48 template <BuiltinExtraArguments extra_args>
49 class BuiltinArguments :
public Arguments {
51 BuiltinArguments(
int length,
Object** arguments)
52 : Arguments(length, arguments) { }
54 Object*& operator[] (
int index) {
56 return Arguments::operator[](index);
59 template <
class S> Handle<S> at(
int index) {
61 return Arguments::at<S>(index);
64 Handle<Object> receiver() {
65 return Arguments::at<Object>(0);
68 Handle<JSFunction> called_function() {
70 return Arguments::at<JSFunction>(Arguments::length() - 1);
77 return Arguments::length();
83 ASSERT(Arguments::length() >= 1);
92 int BuiltinArguments<NEEDS_CALLED_FUNCTION>::length()
const {
93 return Arguments::length() - 1;
98 void BuiltinArguments<NEEDS_CALLED_FUNCTION>::Verify() {
100 ASSERT(Arguments::length() >= 2);
107 #define DEF_ARG_TYPE(name, spec) \
108 typedef BuiltinArguments<spec> name##ArgumentsType;
129 #define BUILTIN(name) \
130 MUST_USE_RESULT static MaybeObject* Builtin_Impl_##name( \
131 name##ArgumentsType args, Isolate* isolate); \
132 MUST_USE_RESULT static MaybeObject* Builtin_##name( \
133 int args_length, Object** args_object, Isolate* isolate) { \
134 name##ArgumentsType args(args_length, args_object); \
136 return Builtin_Impl_##name(args, isolate); \
138 MUST_USE_RESULT static MaybeObject* Builtin_Impl_##name( \
139 name##ArgumentsType args, Isolate* isolate)
141 #else // For release mode.
143 #define BUILTIN(name) \
144 static MaybeObject* Builtin_impl##name( \
145 name##ArgumentsType args, Isolate* isolate); \
146 static MaybeObject* Builtin_##name( \
147 int args_length, Object** args_object, Isolate* isolate) { \
148 name##ArgumentsType args(args_length, args_object); \
149 return Builtin_impl##name(args, isolate); \
151 static MaybeObject* Builtin_impl##name( \
152 name##ArgumentsType args, Isolate* isolate)
157 static inline bool CalledAsConstructor(Isolate* isolate) {
161 StackFrameIterator it(isolate);
162 ASSERT(it.frame()->is_exit());
165 bool reference_result = frame->is_construct();
176 const Smi* kConstructMarker =
Smi::FromInt(StackFrame::CONSTRUCT);
178 bool result = (marker == kConstructMarker);
189 return isolate->heap()->undefined_value();
194 return isolate->heap()->undefined_value();
198 static void MoveDoubleElements(FixedDoubleArray* dst,
200 FixedDoubleArray* src,
203 if (len == 0)
return;
205 src->data_start() + src_index,
210 static void FillWithHoles(Heap* heap, FixedArray* dst,
int from,
int to) {
211 ASSERT(dst->map() != heap->fixed_cow_array_map());
212 MemsetPointer(dst->data_start() + from, heap->the_hole_value(), to - from);
216 static void FillWithHoles(FixedDoubleArray* dst,
int from,
int to) {
217 for (
int i = from; i < to; i++) {
218 dst->set_the_hole(i);
223 static FixedArrayBase* LeftTrimFixedArray(Heap* heap,
224 FixedArrayBase* elms,
226 Map*
map = elms->map();
228 if (elms->IsFixedArray()) {
233 ASSERT(elms->map() != heap->fixed_cow_array_map());
237 ASSERT(!heap->lo_space()->Contains(elms));
245 const int len = elms->length();
248 elms->IsFixedArray() &&
249 !heap->new_space()->Contains(elms)) {
253 Object** zap =
reinterpret_cast<Object**
>(elms->address());
255 for (
int i = 1; i < to_trim; i++) {
262 heap->CreateFillerObjectAt(elms->address(), to_trim * entry_size);
264 int new_start_index = to_trim * (entry_size /
kPointerSize);
265 former_start[new_start_index] =
map;
266 former_start[new_start_index + 1] =
Smi::FromInt(len - to_trim);
270 int size_delta = to_trim * entry_size;
271 Address new_start = elms->address() + size_delta;
272 heap->marking()->TransferMark(elms->address(), new_start);
275 FixedArrayBase* new_elms =
277 HeapProfiler*
profiler = heap->isolate()->heap_profiler();
278 if (profiler->is_tracking_object_moves()) {
279 profiler->ObjectMoveEvent(elms->address(),
287 static bool ArrayPrototypeHasNoElements(Heap* heap,
288 Context* native_context,
289 JSObject* array_proto) {
292 if (array_proto->elements() != heap->empty_fixed_array())
return false;
294 Object* proto = array_proto->GetPrototype();
295 if (proto == heap->null_value())
return false;
297 if (array_proto != native_context->initial_object_prototype())
return false;
298 if (array_proto->elements() != heap->empty_fixed_array())
return false;
299 return array_proto->GetPrototype()->IsNull();
305 static inline Handle<FixedArrayBase> EnsureJSArrayWithWritableFastElements(
307 Handle<Object> receiver,
309 int first_added_arg) {
310 if (!receiver->IsJSArray())
return Handle<FixedArrayBase>::null();
312 if (array->map()->is_observed())
return Handle<FixedArrayBase>::null();
313 if (!array->map()->is_extensible())
return Handle<FixedArrayBase>::null();
314 Handle<FixedArrayBase> elms(array->elements());
315 Heap* heap = isolate->heap();
316 Map* map = elms->map();
317 if (map == heap->fixed_array_map()) {
318 if (args ==
NULL || array->HasFastObjectElements())
return elms;
319 }
else if (map == heap->fixed_cow_array_map()) {
321 if (args ==
NULL || array->HasFastObjectElements())
return elms;
322 }
else if (map == heap->fixed_double_array_map()) {
323 if (args ==
NULL)
return elms;
330 int args_length = args->length();
331 if (first_added_arg >= args_length)
return handle(array->elements());
333 ElementsKind origin_kind = array->map()->elements_kind();
336 int arg_count = args->length() - first_added_arg;
337 Object** arguments = args->arguments() - first_added_arg - (arg_count - 1);
338 for (
int i = 0; i < arg_count; i++) {
339 Object* arg = arguments[i];
340 if (arg->IsHeapObject()) {
341 if (arg->IsHeapNumber()) {
349 if (target_kind != origin_kind) {
351 return handle(array->elements());
358 static inline bool IsJSArrayFastElementMovingAllowed(Heap* heap,
360 if (!FLAG_clever_optimizations)
return false;
361 Context* native_context = heap->isolate()->context()->native_context();
362 JSObject* array_proto =
365 ArrayPrototypeHasNoElements(heap, native_context, array_proto);
372 BuiltinArguments<NO_EXTRA_ARGUMENTS> args) {
373 HandleScope handleScope(isolate);
375 Handle<Object> js_builtin =
376 GetProperty(Handle<JSObject>(isolate->native_context()->builtins()),
379 int argc = args.length() - 1;
380 ScopedVector<Handle<Object> > argv(argc);
381 for (
int i = 0; i < argc; ++i) {
382 argv[i] = args.at<
Object>(i + 1);
384 bool pending_exception;
400 EnsureJSArrayWithWritableFastElements(isolate, receiver, &args, 1);
401 if (elms_obj.
is_null())
return CallJsBuiltin(isolate,
"ArrayPush", args);
404 ASSERT(!array->map()->is_observed());
411 int len =
Smi::cast(array->length())->value();
412 int to_add = args.length() - 1;
420 int new_length = len + to_add;
422 if (new_length > elms->length()) {
424 int capacity = new_length + (new_length >> 1) + 16;
426 isolate->factory()->NewUninitializedFixedArray(capacity);
439 for (
int index = 0; index < to_add; index++) {
440 elms->set(index + len, args[index + 1], mode);
443 if (*elms != array->elements()) {
444 array->set_elements(*elms);
451 int len =
Smi::cast(array->length())->value();
452 int elms_len = elms_obj->length();
454 int to_add = args.length() - 1;
462 int new_length = len + to_add;
466 if (new_length > elms_len) {
468 int capacity = new_length + (new_length >> 1) + 16;
469 new_elms = isolate->factory()->NewFixedDoubleArray(capacity);
485 for (index = 0; index < to_add; index++) {
486 Object* arg = args[index + 1];
487 new_elms->set(index + len, arg->
Number());
490 if (*new_elms != array->elements()) {
491 array->set_elements(*new_elms);
502 static bool ElementsAccessorHasElementWrapper(
503 ElementsAccessor* accessor,
508 return accessor->HasElement(*receiver, *holder, key,
509 backing_store.is_null() ?
NULL : *backing_store);
517 EnsureJSArrayWithWritableFastElements(isolate, receiver,
NULL, 0);
518 if (elms_obj.
is_null())
return CallJsBuiltin(isolate,
"ArrayPop", args);
521 ASSERT(!array->map()->is_observed());
523 int len =
Smi::cast(array->length())->value();
524 if (len == 0)
return isolate->heap()->undefined_value();
527 int new_length = len - 1;
529 if (ElementsAccessorHasElementWrapper(
530 accessor, array, array, new_length, elms_obj)) {
531 element = accessor->
Get(
532 array, array, new_length, elms_obj);
547 Heap* heap = isolate->heap();
550 EnsureJSArrayWithWritableFastElements(isolate, receiver,
NULL, 0);
552 !IsJSArrayFastElementMovingAllowed(heap,
554 return CallJsBuiltin(isolate,
"ArrayShift", args);
557 ASSERT(!array->map()->is_observed());
559 int len =
Smi::cast(array->length())->value();
560 if (len == 0)
return heap->undefined_value();
566 if (first->IsTheHole()) {
567 first = isolate->factory()->undefined_value();
571 array->set_elements(LeftTrimFixedArray(heap, *elms_obj, 1));
574 if (elms_obj->IsFixedArray()) {
578 elms->set(len - 1, heap->the_hole_value());
581 MoveDoubleElements(*elms, 0, *elms, 1, len - 1);
582 elms->set_the_hole(len - 1);
595 Heap* heap = isolate->heap();
598 EnsureJSArrayWithWritableFastElements(isolate, receiver,
NULL, 0);
600 !IsJSArrayFastElementMovingAllowed(heap,
602 return CallJsBuiltin(isolate,
"ArrayUnshift", args);
605 ASSERT(!array->map()->is_observed());
606 if (!array->HasFastSmiOrObjectElements()) {
607 return CallJsBuiltin(isolate,
"ArrayUnshift", args);
611 int len =
Smi::cast(array->length())->value();
612 int to_add = args.length() - 1;
613 int new_length = len + to_add;
621 if (new_length > elms->length()) {
623 int capacity = new_length + (new_length >> 1) + 16;
625 isolate->factory()->NewUninitializedFixedArray(capacity);
634 array->set_elements(*elms);
643 for (
int i = 0; i < to_add; i++) {
644 elms->set(i, args[i + 1], mode);
655 Heap* heap = isolate->heap();
659 if (receiver->IsJSArray()) {
661 if (!IsJSArrayFastElementMovingAllowed(heap, *array)) {
662 return CallJsBuiltin(isolate,
"ArraySlice", args);
665 if (array->HasFastElements()) {
666 elms =
handle(array->elements());
668 return CallJsBuiltin(isolate,
"ArraySlice", args);
671 len =
Smi::cast(array->length())->value();
675 Handle<Map> arguments_map(isolate->context()->native_context()->
676 sloppy_arguments_boilerplate()->map());
678 bool is_arguments_object_with_fast_elements =
679 receiver->IsJSObject() &&
681 if (!is_arguments_object_with_fast_elements) {
682 return CallJsBuiltin(isolate,
"ArraySlice", args);
686 if (object->HasFastElements()) {
687 elms =
handle(object->elements());
689 return CallJsBuiltin(isolate,
"ArraySlice", args);
693 if (!len_obj->IsSmi()) {
694 return CallJsBuiltin(isolate,
"ArraySlice", args);
697 if (len > elms->length()) {
698 return CallJsBuiltin(isolate,
"ArraySlice", args);
705 int n_arguments = args.length() - 1;
710 int relative_start = 0;
711 int relative_end = len;
712 if (n_arguments > 0) {
716 }
else if (arg1->IsHeapNumber()) {
718 if (start < kMinInt || start >
kMaxInt) {
719 return CallJsBuiltin(isolate,
"ArraySlice", args);
721 relative_start =
std::isnan(start) ? 0 :
static_cast<int>(start);
722 }
else if (!arg1->IsUndefined()) {
723 return CallJsBuiltin(isolate,
"ArraySlice", args);
725 if (n_arguments > 1) {
729 }
else if (arg2->IsHeapNumber()) {
731 if (end < kMinInt || end >
kMaxInt) {
732 return CallJsBuiltin(isolate,
"ArraySlice", args);
734 relative_end =
std::isnan(end) ? 0 :
static_cast<int>(end);
735 }
else if (!arg2->IsUndefined()) {
736 return CallJsBuiltin(isolate,
"ArraySlice", args);
742 int k = (relative_start < 0) ?
Max(len + relative_start, 0)
743 :
Min(relative_start, len);
746 int final = (relative_end < 0) ?
Max(len + relative_end, 0)
747 :
Min(relative_end, len);
750 int result_len =
Max(
final - k, 0);
756 for (
int i = k; i <
final; i++) {
757 if (!ElementsAccessorHasElementWrapper(
758 accessor,
object,
object, i, elms)) {
765 }
else if (!receiver->IsJSArray()) {
766 return CallJsBuiltin(isolate,
"ArraySlice", args);
771 isolate->factory()->NewJSArray(kind, result_len, result_len);
774 if (result_len == 0)
return *result_array;
778 handle(result_array->elements()), 0, result_len, elms);
779 return *result_array;
785 Heap* heap = isolate->heap();
788 EnsureJSArrayWithWritableFastElements(isolate, receiver, &args, 3);
790 !IsJSArrayFastElementMovingAllowed(heap,
792 return CallJsBuiltin(isolate,
"ArraySplice", args);
795 ASSERT(!array->map()->is_observed());
797 int len =
Smi::cast(array->length())->value();
799 int n_arguments = args.length() - 1;
801 int relative_start = 0;
802 if (n_arguments > 0) {
806 }
else if (arg1->IsHeapNumber()) {
808 if (start < kMinInt || start >
kMaxInt) {
809 return CallJsBuiltin(isolate,
"ArraySplice", args);
811 relative_start =
std::isnan(start) ? 0 :
static_cast<int>(start);
812 }
else if (!arg1->IsUndefined()) {
813 return CallJsBuiltin(isolate,
"ArraySplice", args);
816 int actual_start = (relative_start < 0) ?
Max(len + relative_start, 0)
817 :
Min(relative_start, len);
824 int actual_delete_count;
825 if (n_arguments == 1) {
826 ASSERT(len - actual_start >= 0);
827 actual_delete_count = len - actual_start;
830 if (n_arguments > 1) {
835 return CallJsBuiltin(isolate,
"ArraySplice", args);
838 actual_delete_count =
Min(
Max(value, 0), len - actual_start);
843 int item_count = (n_arguments > 1) ? (n_arguments - 2) : 0;
844 int new_length = len - actual_delete_count + item_count;
848 return CallJsBuiltin(isolate,
"ArraySplice", args);
851 if (new_length == 0) {
853 elms_obj, elements_kind, actual_delete_count);
854 array->set_elements(heap->empty_fixed_array());
860 isolate->factory()->NewJSArray(elements_kind,
862 actual_delete_count);
864 if (actual_delete_count > 0) {
869 handle(result_array->elements()), 0, actual_delete_count, elms_obj);
872 bool elms_changed =
false;
873 if (item_count < actual_delete_count) {
876 ((actual_start + item_count) <
877 (len - actual_delete_count - actual_start));
879 const int delta = actual_delete_count - item_count;
881 if (elms_obj->IsFixedDoubleArray()) {
884 MoveDoubleElements(*elms, delta, *elms, 0, actual_start);
893 elms_obj =
handle(LeftTrimFixedArray(heap, *elms_obj, delta));
897 if (elms_obj->IsFixedDoubleArray()) {
900 MoveDoubleElements(*elms, 0, *elms, delta, len - delta);
901 FillWithHoles(*elms, len - delta, len);
906 FillWithHoles(heap, *elms, len - delta, len);
911 if (elms_obj->IsFixedDoubleArray()) {
914 MoveDoubleElements(*elms, actual_start + item_count,
915 *elms, actual_start + actual_delete_count,
916 (len - actual_delete_count - actual_start));
917 FillWithHoles(*elms, new_length, len);
922 actual_start + actual_delete_count,
923 (len - actual_delete_count - actual_start));
924 FillWithHoles(heap, *elms, new_length, len);
927 }
else if (item_count > actual_delete_count) {
934 if (new_length > elms->length()) {
936 int capacity = new_length + (new_length >> 1) + 16;
938 isolate->factory()->NewUninitializedFixedArray(capacity);
944 if (actual_start > 0) {
951 new_elms, actual_start + item_count,
959 actual_start + actual_delete_count,
960 (len - actual_delete_count - actual_start));
966 for (
int k = actual_start; k < actual_start + item_count; k++) {
967 Object* arg = args[3 + k - actual_start];
978 for (
int k = actual_start; k < actual_start + item_count; k++) {
979 elms->set(k, args[3 + k - actual_start], mode);
984 array->set_elements(*elms_obj);
989 return *result_array;
995 Heap* heap = isolate->heap();
999 if (!ArrayPrototypeHasNoElements(heap, *native_context, *array_proto)) {
1000 return CallJsBuiltin(isolate,
"ArrayConcat", args);
1005 int n_arguments = args.length();
1008 bool has_double =
false;
1009 bool is_holey =
false;
1010 for (
int i = 0; i < n_arguments; i++) {
1012 if (!arg->IsJSArray() ||
1015 return CallJsBuiltin(isolate,
"ArrayConcat", args);
1027 return CallJsBuiltin(isolate,
"ArrayConcat", args);
1034 elements_kind = arg_kind;
1047 isolate->factory()->NewJSArray(elements_kind,
1051 if (result_len == 0)
return *result_array;
1056 for (
int i = 0; i < n_arguments; i++) {
1058 int len =
Smi::cast(array->length())->value();
1061 accessor->CopyElements(array, 0, from_kind, storage, j, len);
1068 return *result_array;
1078 return isolate->Throw(*isolate->factory()->NewTypeError(
1079 "strict_poison_pill", HandleVector<Object>(
NULL, 0)));
1090 static inline Object* FindHidden(Heap* heap,
1092 FunctionTemplateInfo* type) {
1093 if (type->IsTemplateFor(
object))
return object;
1095 if (proto->IsJSObject() &&
1097 return FindHidden(heap, proto, type);
1099 return heap->null_value();
1109 static inline Object* TypeCheck(Heap* heap,
1112 FunctionTemplateInfo*
info) {
1115 if (!recv->IsJSObject())
return heap->null_value();
1116 Object* sig_obj = info->signature();
1117 if (sig_obj->IsUndefined())
return recv;
1120 Object* recv_type = sig->receiver();
1122 if (!recv_type->IsUndefined()) {
1124 if (holder == heap->null_value())
return heap->null_value();
1126 Object* args_obj = sig->args();
1128 if (args_obj->IsUndefined())
return holder;
1130 int length = args->length();
1131 if (argc <= length) length = argc - 1;
1132 for (
int i = 0; i < length; i++) {
1133 Object* argtype = args->get(i);
1134 if (argtype->IsUndefined())
continue;
1135 Object** arg = &argv[-1 - i];
1138 if (current == heap->null_value()) current = heap->undefined_value();
1145 template <
bool is_construct>
1147 BuiltinArguments<NEEDS_CALLED_FUNCTION> args, Isolate* isolate) {
1148 ASSERT(is_construct == CalledAsConstructor(isolate));
1149 Heap* heap = isolate->heap();
1151 HandleScope scope(isolate);
1152 Handle<JSFunction>
function = args.called_function();
1153 ASSERT(function->shared()->IsApiFunction());
1155 FunctionTemplateInfo* fun_data =
function->shared()->get_api_func_data();
1157 Handle<FunctionTemplateInfo> desc(fun_data, isolate);
1158 bool pending_exception =
false;
1159 isolate->factory()->ConfigureInstance(
1160 desc, Handle<JSObject>::cast(args.receiver()), &pending_exception);
1161 ASSERT(isolate->has_pending_exception() == pending_exception);
1166 SharedFunctionInfo* shared =
function->shared();
1167 if (shared->strict_mode() ==
SLOPPY && !shared->native()) {
1170 if (recv->IsUndefined()) {
1171 args[0] =
function->context()->global_object()->global_receiver();
1175 Object* raw_holder = TypeCheck(heap, args.length(), &args[0], fun_data);
1177 if (raw_holder->IsNull()) {
1179 Handle<Object>
obj =
1180 isolate->factory()->NewTypeError(
1182 return isolate->Throw(*obj);
1185 Object* raw_call_data = fun_data->call_code();
1186 if (!raw_call_data->IsUndefined()) {
1188 Object* callback_obj = call_data->callback();
1190 v8::ToCData<v8::FunctionCallback>(callback_obj);
1191 Object* data_obj = call_data->data();
1195 ASSERT(raw_holder->IsJSObject());
1197 FunctionCallbackArguments custom(isolate,
1206 if (value.IsEmpty()) {
1207 result = heap->undefined_value();
1209 result = *
reinterpret_cast<Object**
>(*value);
1210 result->VerifyApiCallResultType();
1214 if (!is_construct || result->IsJSObject())
return result;
1217 return *args.receiver();
1222 return HandleApiCallHelper<false>(args, isolate);
1227 return HandleApiCallHelper<true>(args, isolate);
1234 MUST_USE_RESULT static MaybeObject* HandleApiCallAsFunctionOrConstructor(
1236 bool is_construct_call,
1237 BuiltinArguments<NO_EXTRA_ARGUMENTS> args) {
1240 ASSERT(!CalledAsConstructor(isolate));
1241 Heap* heap = isolate->heap();
1250 ASSERT(obj->map()->has_instance_call_handler());
1252 ASSERT(constructor->shared()->IsApiFunction());
1254 constructor->shared()->get_api_func_data()->instance_call_handler();
1255 ASSERT(!handler->IsUndefined());
1257 Object* callback_obj = call_data->callback();
1259 v8::ToCData<v8::FunctionCallback>(callback_obj);
1265 LOG(isolate, ApiObjectAccess(
"call non-function", obj));
1267 FunctionCallbackArguments custom(isolate,
1275 if (value.IsEmpty()) {
1276 result = heap->undefined_value();
1278 result = *
reinterpret_cast<Object**
>(*value);
1279 result->VerifyApiCallResultType();
1291 return HandleApiCallAsFunctionOrConstructor(isolate,
false, args);
1298 return HandleApiCallAsFunctionOrConstructor(isolate,
true, args);
1302 static void Generate_LoadIC_Miss(MacroAssembler* masm) {
1307 static void Generate_LoadIC_Normal(MacroAssembler* masm) {
1312 static void Generate_LoadIC_Getter_ForDeopt(MacroAssembler* masm) {
1317 static void Generate_LoadIC_Slow(MacroAssembler* masm) {
1322 static void Generate_KeyedLoadIC_Initialize(MacroAssembler* masm) {
1327 static void Generate_KeyedLoadIC_Slow(MacroAssembler* masm) {
1332 static void Generate_KeyedLoadIC_Miss(MacroAssembler* masm) {
1337 static void Generate_KeyedLoadIC_Generic(MacroAssembler* masm) {
1342 static void Generate_KeyedLoadIC_String(MacroAssembler* masm) {
1347 static void Generate_KeyedLoadIC_PreMonomorphic(MacroAssembler* masm) {
1352 static void Generate_KeyedLoadIC_IndexedInterceptor(MacroAssembler* masm) {
1357 static void Generate_KeyedLoadIC_SloppyArguments(MacroAssembler* masm) {
1362 static void Generate_StoreIC_Slow(MacroAssembler* masm) {
1367 static void Generate_StoreIC_Miss(MacroAssembler* masm) {
1372 static void Generate_StoreIC_Normal(MacroAssembler* masm) {
1377 static void Generate_StoreIC_Setter_ForDeopt(MacroAssembler* masm) {
1382 static void Generate_KeyedStoreIC_Generic(MacroAssembler* masm) {
1387 static void Generate_KeyedStoreIC_Generic_Strict(MacroAssembler* masm) {
1392 static void Generate_KeyedStoreIC_Miss(MacroAssembler* masm) {
1397 static void Generate_KeyedStoreIC_Slow(MacroAssembler* masm) {
1402 static void Generate_KeyedStoreIC_Initialize(MacroAssembler* masm) {
1407 static void Generate_KeyedStoreIC_Initialize_Strict(MacroAssembler* masm) {
1412 static void Generate_KeyedStoreIC_PreMonomorphic(MacroAssembler* masm) {
1417 static void Generate_KeyedStoreIC_PreMonomorphic_Strict(MacroAssembler* masm) {
1422 static void Generate_KeyedStoreIC_SloppyArguments(MacroAssembler* masm) {
1427 #ifdef ENABLE_DEBUGGER_SUPPORT
1428 static void Generate_LoadIC_DebugBreak(MacroAssembler* masm) {
1429 Debug::GenerateLoadICDebugBreak(masm);
1433 static void Generate_StoreIC_DebugBreak(MacroAssembler* masm) {
1434 Debug::GenerateStoreICDebugBreak(masm);
1438 static void Generate_KeyedLoadIC_DebugBreak(MacroAssembler* masm) {
1439 Debug::GenerateKeyedLoadICDebugBreak(masm);
1443 static void Generate_KeyedStoreIC_DebugBreak(MacroAssembler* masm) {
1444 Debug::GenerateKeyedStoreICDebugBreak(masm);
1448 static void Generate_CompareNilIC_DebugBreak(MacroAssembler* masm) {
1449 Debug::GenerateCompareNilICDebugBreak(masm);
1453 static void Generate_Return_DebugBreak(MacroAssembler* masm) {
1454 Debug::GenerateReturnDebugBreak(masm);
1458 static void Generate_CallFunctionStub_DebugBreak(MacroAssembler* masm) {
1459 Debug::GenerateCallFunctionStubDebugBreak(masm);
1463 static void Generate_CallFunctionStub_Recording_DebugBreak(
1464 MacroAssembler* masm) {
1465 Debug::GenerateCallFunctionStubRecordDebugBreak(masm);
1469 static void Generate_CallConstructStub_DebugBreak(MacroAssembler* masm) {
1470 Debug::GenerateCallConstructStubDebugBreak(masm);
1474 static void Generate_CallConstructStub_Recording_DebugBreak(
1475 MacroAssembler* masm) {
1476 Debug::GenerateCallConstructStubRecordDebugBreak(masm);
1480 static void Generate_Slot_DebugBreak(MacroAssembler* masm) {
1481 Debug::GenerateSlotDebugBreak(masm);
1485 static void Generate_PlainReturn_LiveEdit(MacroAssembler* masm) {
1486 Debug::GeneratePlainReturnLiveEdit(masm);
1490 static void Generate_FrameDropper_LiveEdit(MacroAssembler* masm) {
1491 Debug::GenerateFrameDropperLiveEdit(masm);
1496 Builtins::Builtins() : initialized_(
false) {
1497 memset(builtins_, 0,
sizeof(builtins_[0]) * builtin_count);
1498 memset(names_, 0,
sizeof(names_[0]) * builtin_count);
1502 Builtins::~Builtins() {
1506 #define DEF_ENUM_C(name, ignore) FUNCTION_ADDR(Builtin_##name),
1507 Address const Builtins::c_functions_[cfunction_count] = {
1512 #define DEF_JS_NAME(name, ignore) #name,
1513 #define DEF_JS_ARGC(ignore, argc) argc,
1514 const char*
const Builtins::javascript_names_[id_count] = {
1518 int const Builtins::javascript_argc_[id_count] = {
1533 #define BUILTIN_FUNCTION_TABLE_INIT { V8_ONCE_INIT, {} }
1538 CallOnce(&once_, &Builtins::InitBuiltinFunctionTable);
1555 void Builtins::InitBuiltinFunctionTable() {
1560 functions[builtin_count].
name = builtin_count;
1564 #define DEF_FUNCTION_PTR_C(aname, aextra_args) \
1565 functions->generator = FUNCTION_ADDR(Generate_Adaptor); \
1566 functions->c_code = FUNCTION_ADDR(Builtin_##aname); \
1567 functions->s_name = #aname; \
1568 functions->name = c_##aname; \
1569 functions->flags = Code::ComputeFlags(Code::BUILTIN); \
1570 functions->extra_args = aextra_args; \
1573 #define DEF_FUNCTION_PTR_A(aname, kind, state, extra) \
1574 functions->generator = FUNCTION_ADDR(Generate_##aname); \
1575 functions->c_code = NULL; \
1576 functions->s_name = #aname; \
1577 functions->name = k##aname; \
1578 functions->flags = Code::ComputeFlags(Code::kind, \
1581 functions->extra_args = NO_EXTRA_ARGUMENTS; \
1584 #define DEF_FUNCTION_PTR_H(aname, kind) \
1585 functions->generator = FUNCTION_ADDR(Generate_##aname); \
1586 functions->c_code = NULL; \
1587 functions->s_name = #aname; \
1588 functions->name = k##aname; \
1589 functions->flags = Code::ComputeHandlerFlags(Code::kind); \
1590 functions->extra_args = NO_EXTRA_ARGUMENTS; \
1598 #undef DEF_FUNCTION_PTR_C
1599 #undef DEF_FUNCTION_PTR_A
1603 void Builtins::SetUp(
Isolate* isolate,
bool create_heap_objects) {
1617 union {
int force_alignment;
byte buffer[16*
KB]; } u;
1621 for (
int i = 0; i < builtin_count; i++) {
1622 if (create_heap_objects) {
1626 Generator g = FUNCTION_CAST<Generator>(functions[i].
generator);
1631 g(&masm, functions[i].name, functions[i].extra_args);
1641 { MaybeObject* maybe_code =
1643 if (!maybe_code->ToObject(&code)) {
1650 CodeCreateEvent(Logger::BUILTIN_TAG,
1652 functions[i].s_name));
1654 functions[i].s_name,
1656 builtins_[i] =
code;
1657 #ifdef ENABLE_DISASSEMBLER
1658 if (FLAG_print_builtin_code) {
1660 PrintF(trace_scope.file(),
"Builtin: %s\n", functions[i].
s_name);
1661 Code::cast(code)->Disassemble(functions[i].s_name, trace_scope.file());
1662 PrintF(trace_scope.file(),
"\n");
1667 builtins_[i] =
NULL;
1669 names_[i] = functions[i].
s_name;
1673 initialized_ =
true;
1677 void Builtins::TearDown() {
1678 initialized_ =
false;
1682 void Builtins::IterateBuiltins(ObjectVisitor* v) {
1683 v->VisitPointers(&builtins_[0], &builtins_[0] + builtin_count);
1690 for (
int i = 0; i < builtin_count; i++) {
1691 Code* entry = Code::cast(builtins_[i]);
1706 void Builtins::Generate_StackCheck(MacroAssembler* masm) {
1707 masm->TailCallRuntime(Runtime::kHiddenStackGuard, 0, 1);
1711 #define DEFINE_BUILTIN_ACCESSOR_C(name, ignore) \
1712 Handle<Code> Builtins::name() { \
1713 Code** code_address = \
1714 reinterpret_cast<Code**>(builtin_address(k##name)); \
1715 return Handle<Code>(code_address); \
1717 #define DEFINE_BUILTIN_ACCESSOR_A(name, kind, state, extra) \
1718 Handle<Code> Builtins::name() { \
1719 Code** code_address = \
1720 reinterpret_cast<Code**>(builtin_address(k##name)); \
1721 return Handle<Code>(code_address); \
1723 #define DEFINE_BUILTIN_ACCESSOR_H(name, kind) \
1724 Handle<Code> Builtins::name() { \
1725 Code** code_address = \
1726 reinterpret_cast<Code**>(builtin_address(k##name)); \
1727 return Handle<Code>(code_address); \
1733 #undef DEFINE_BUILTIN_ACCESSOR_C
1734 #undef DEFINE_BUILTIN_ACCESSOR_A
static void GenerateSloppyArguments(MacroAssembler *masm)
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter NULL
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter NULL
BuiltinDesc functions_[Builtins::builtin_count+1]
static void GenerateRuntimeGetProperty(MacroAssembler *masm)
static const int kMaxLength
static void EnsureCanContainElements(Handle< JSObject > object, Object **elements, uint32_t count, EnsureElementsMode mode)
static Object *& Object_at(Address addr)
static CallHandlerInfo * cast(Object *obj)
bool IsHoleyElementsKind(ElementsKind kind)
#define BUILTINS_LIST_JS(V)
#define PROFILE(IsolateGetter, Call)
ElementsKind GetPackedElementsKind(ElementsKind holey_kind)
#define BUILTIN_LIST_H(V)
#define RETURN_IF_SCHEDULED_EXCEPTION(isolate)
bool is_hidden_prototype()
void PrintF(const char *format,...)
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf map
CodeTracer * GetCodeTracer()
void(* FunctionCallback)(const FunctionCallbackInfo< Value > &info)
static Smi * FromInt(int value)
bool IsFastObjectElementsKind(ElementsKind kind)
#define LOG(isolate, Call)
static void GenerateMiss(MacroAssembler *masm)
virtual MUST_USE_RESULT Handle< Object > Get(Handle< Object > receiver, Handle< JSObject > holder, uint32_t key, Handle< FixedArrayBase > backing_store=Handle< FixedArrayBase >::null())=0
static Handle< T > cast(Handle< S > that)
BuiltinDesc * functions()
void CallOnce(OnceType *once, NoArgFunction init_func)
kSerializedDataOffset Object
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long expose natives in global object expose freeBuffer extension expose gc extension under the specified name expose externalize string extension number of stack frames to capture disable builtin natives files print name of functions for which code is generated use random jit cookie to mask large constants trace lazy optimization use adaptive optimizations always try to OSR functions trace optimize function deoptimization minimum length for automatic enable preparsing maximum number of optimization attempts before giving up cache prototype transitions trace debugging JSON request response trace out of bounds accesses to external arrays trace_js_array_abuse automatically set the debug break flag when debugger commands are in the queue abort by crashing maximum length of function source code printed in a stack trace max size of the new max size of the old max size of executable always perform global GCs print one trace line following each garbage collection do not print trace line after scavenger collection print statistics of the maximum memory committed for the heap in only print modified registers Don t break for ASM_UNIMPLEMENTED_BREAK macros print stack trace when an illegal exception is thrown randomize hashes to avoid predictable hash Fixed seed to use to hash property Print the time it takes to deserialize the snapshot testing_bool_flag testing_int_flag string flag tmp file in which to serialize heap Print the time it takes to lazily compile hydrogen code stubs concurrent_recompilation concurrent_sweeping Print usage including on console Map counters to a file Enable debugger compile events enable GDBJIT enable GDBJIT interface for all code objects dump only objects containing this substring stress the GC compactor to flush out pretty print source code print source AST function name where to insert a breakpoint print scopes for builtins trace contexts operations print stuff during garbage collection report code statistics after GC report handles after GC trace cache state transitions print interface inference details prints when objects are turned into dictionaries report heap spill statistics along with trace isolate state changes trace regexp bytecode execution Minimal Log all events to the log file Log API events to the log file Log heap samples on garbage collection for the hp2ps tool log positions Log suspect operations Used with turns on browser compatible mode for profiling v8 Specify the name of the log file Enable low level linux profiler Enable perf linux profiler(experimental annotate support).") DEFINE_string(gc_fake_mmap
static Failure * Exception()
#define ASSERT(condition)
virtual MUST_USE_RESULT Handle< Object > SetLength(Handle< JSArray > holder, Handle< Object > new_length)=0
#define DEFINE_BUILTIN_ACCESSOR_C(name, ignore)
Handle< Object > GetProperty(Handle< JSReceiver > obj, const char *name)
#define RETURN_IF_EMPTY_HANDLE(isolate, call)
MUST_USE_RESULT MaybeObject * EnsureWritableFastElements()
ArrayStorageAllocationMode
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long expose natives in global object expose freeBuffer extension expose gc extension under the specified name expose externalize string extension number of stack frames to capture disable builtin natives files print name of functions for which code is generated use random jit cookie to mask large constants trace lazy optimization use adaptive optimizations always try to OSR functions trace optimize function deoptimization minimum length for automatic enable preparsing maximum number of optimization attempts before giving up cache prototype transitions trace debugging JSON request response trace out of bounds accesses to external arrays trace_js_array_abuse automatically set the debug break flag when debugger commands are in the queue abort by crashing maximum length of function source code printed in a stack trace max size of the new max size of the old max size of executable always perform global GCs print one trace line following each garbage collection do not print trace line after scavenger collection print statistics of the maximum memory committed for the heap in only print modified registers Don t break for ASM_UNIMPLEMENTED_BREAK macros print stack trace when an illegal exception is thrown randomize hashes to avoid predictable hash Fixed seed to use to hash property Print the time it takes to deserialize the snapshot testing_bool_flag testing_int_flag string flag tmp file in which to serialize heap Print the time it takes to lazily compile hydrogen code stubs concurrent_recompilation concurrent_sweeping Print usage including flags
static Object ** RawField(HeapObject *obj, int offset)
static Smi * cast(Object *object)
#define DEFINE_BUILTIN_ACCESSOR_H(name, kind)
static void GenerateGeneric(MacroAssembler *masm, StrictMode strict_mode)
Handle< Object > CodeObject()
static void TransitionElementsKind(Handle< JSObject > object, ElementsKind to_kind)
HANDLE HANDLE LPSTACKFRAME64 StackFrame
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long mode(MIPS only)") DEFINE_string(expose_natives_as
static void GenerateInitialize(MacroAssembler *masm)
STATIC_ASSERT(sizeof(CPURegister)==sizeof(Register))
static Address c_entry_fp(ThreadLocalTop *thread)
static const int kCallerFPOffset
Local< Value > GetPrototype()
#define BUILTIN_FUNCTION_TABLE_INIT
void GetCode(CodeDesc *desc)
static void GeneratePreMonomorphic(MacroAssembler *masm)
static void GenerateGeneric(MacroAssembler *masm)
static Address & Address_at(Address addr)
static void GenerateMiss(MacroAssembler *masm)
bool IsMoreGeneralElementsKindTransition(ElementsKind from_kind, ElementsKind to_kind)
static void MemMove(void *dest, const void *src, size_t size)
static FunctionTemplateInfo * cast(Object *obj)
static void GenerateLoadViaGetterForDeopt(MacroAssembler *masm)
#define DEF_FUNCTION_PTR_C(aname, aextra_args)
static const int kMarkerOffset
static void GenerateSlow(MacroAssembler *masm)
LargeObjectSpace * lo_space()
static void GenerateRuntimeGetProperty(MacroAssembler *masm)
static void GenerateSloppyArguments(MacroAssembler *masm)
bool IsFastSmiOrObjectElementsKind(ElementsKind kind)
static ElementsAccessor * ForKind(ElementsKind elements_kind)
static const int kHeaderSize
#define DEF_ENUM_C(name, ignore)
MUST_USE_RESULT MaybeObject * CreateCode(const CodeDesc &desc, Code::Flags flags, Handle< Object > self_reference, bool immovable=false, bool crankshafted=false, int prologue_offset=Code::kPrologueOffsetNotSet)
static void GenerateSlow(MacroAssembler *masm)
bool Contains(HeapObject *obj)
static const int kMapOffset
static const int kCopyToEndAndInitializeToHole
#define BUILTIN_LIST_DEBUG_A(V)
static HeapNumber * cast(Object *obj)
#define DEF_FUNCTION_PTR_H(aname, kind)
static const int kLengthOffset
static Handle< Object > Call(Isolate *isolate, Handle< Object > callable, Handle< Object > receiver, int argc, Handle< Object > argv[], bool *pending_exception, bool convert_receiver=false)
Handle< T > handle(T *t, Isolate *isolate)
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function info
static const int kArgumentsLengthIndex
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra code(assertions) for debugging") DEFINE_bool(code_comments
ElementsKind GetInitialFastElementsKind()
static void GenerateString(MacroAssembler *masm)
void MoveElements(FixedArray *array, int dst_index, int src_index, int len)
#define DEF_JS_ARGC(ignore, argc)
static Handle< T > null()
void MemsetPointer(T **dest, U *value, int counter)
#define ASSERT_EQ(v1, v2)
static HeapObject * FromAddress(Address address)
static FixedArray * cast(Object *obj)
static void GenerateNormal(MacroAssembler *masm)
virtual void CopyElements(Handle< JSObject > source_holder, uint32_t source_start, ElementsKind source_kind, Handle< FixedArrayBase > destination, uint32_t destination_start, int copy_size, Handle< FixedArrayBase > source=Handle< FixedArrayBase >::null())=0
static void GenerateIndexedInterceptor(MacroAssembler *masm)
bool IsFastHoleyElementsKind(ElementsKind kind)
static Handle< Object > GetElement(Isolate *isolate, Handle< Object > object, uint32_t index)
static const int kMaxLength
#define DEF_FUNCTION_PTR_A(aname, kind, state, extra)
Vector< Handle< Object > > HandleVector(v8::internal::Handle< T > *elms, int length)
#define DEFINE_BUILTIN_ACCESSOR_A(name, kind, state, extra)
static void GenerateMiss(MacroAssembler *masm)
bool CanMoveObjectStart(HeapObject *object)
#define DEF_ARG_TYPE(name, spec)
static SignatureInfo * cast(Object *obj)
static FixedArrayBase * cast(Object *object)
static void GenerateStoreViaSetterForDeopt(MacroAssembler *masm)
static const int kMaxValue
void TailCallRuntime(Runtime::FunctionId fid, int num_arguments, int result_size)
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long expose natives in global object expose freeBuffer extension expose gc extension under the specified name expose externalize string extension number of stack frames to capture disable builtin natives files print name of functions for which code is generated use random jit cookie to mask large constants trace lazy optimization use adaptive optimizations always try to OSR functions trace optimize function deoptimization minimum length for automatic enable preparsing maximum number of optimization attempts before giving up cache prototype transitions trace debugging JSON request response trace out of bounds accesses to external arrays trace_js_array_abuse automatically set the debug break flag when debugger commands are in the queue abort by crashing maximum length of function source code printed in a stack trace max size of the new max size of the old max size of executable always perform global GCs print one trace line following each garbage collection do not print trace line after scavenger collection print statistics of the maximum memory committed for the heap in name
static void FatalProcessOutOfMemory(const char *location, bool take_snapshot=false)
BuiltinExtraArguments extra_args
#define BUILTIN_LIST_C(V)
ElementsKind GetHoleyElementsKind(ElementsKind packed_kind)
static void GenerateInitialize(MacroAssembler *masm)
#define DEF_JS_NAME(name, ignore)
static JSObject * cast(Object *obj)
static void GeneratePreMonomorphic(MacroAssembler *masm)
#define BUILTIN_LIST_A(V)
bool IsFastDoubleElementsKind(ElementsKind kind)
static void GenerateNormal(MacroAssembler *masm)
static void GenerateMiss(MacroAssembler *masm)
static JSFunction * cast(Object *obj)