v8  3.25.30(node0.11.13)
V8 is Google's open source JavaScript engine
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
builtins.cc
Go to the documentation of this file.
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 
28 #include "v8.h"
29 
30 #include "api.h"
31 #include "arguments.h"
32 #include "bootstrapper.h"
33 #include "builtins.h"
34 #include "cpu-profiler.h"
35 #include "gdb-jit.h"
36 #include "ic-inl.h"
37 #include "heap-profiler.h"
38 #include "mark-compact.h"
39 #include "stub-cache.h"
40 #include "vm-state-inl.h"
41 
42 namespace v8 {
43 namespace internal {
44 
45 namespace {
46 
47 // Arguments object passed to C++ builtins.
48 template <BuiltinExtraArguments extra_args>
49 class BuiltinArguments : public Arguments {
50  public:
51  BuiltinArguments(int length, Object** arguments)
52  : Arguments(length, arguments) { }
53 
54  Object*& operator[] (int index) {
55  ASSERT(index < length());
56  return Arguments::operator[](index);
57  }
58 
59  template <class S> Handle<S> at(int index) {
60  ASSERT(index < length());
61  return Arguments::at<S>(index);
62  }
63 
64  Handle<Object> receiver() {
65  return Arguments::at<Object>(0);
66  }
67 
68  Handle<JSFunction> called_function() {
69  STATIC_ASSERT(extra_args == NEEDS_CALLED_FUNCTION);
70  return Arguments::at<JSFunction>(Arguments::length() - 1);
71  }
72 
73  // Gets the total number of arguments including the receiver (but
74  // excluding extra arguments).
75  int length() const {
76  STATIC_ASSERT(extra_args == NO_EXTRA_ARGUMENTS);
77  return Arguments::length();
78  }
79 
80 #ifdef DEBUG
81  void Verify() {
82  // Check we have at least the receiver.
83  ASSERT(Arguments::length() >= 1);
84  }
85 #endif
86 };
87 
88 
89 // Specialize BuiltinArguments for the called function extra argument.
90 
91 template <>
92 int BuiltinArguments<NEEDS_CALLED_FUNCTION>::length() const {
93  return Arguments::length() - 1;
94 }
95 
96 #ifdef DEBUG
97 template <>
98 void BuiltinArguments<NEEDS_CALLED_FUNCTION>::Verify() {
99  // Check we have at least the receiver and the called function.
100  ASSERT(Arguments::length() >= 2);
101  // Make sure cast to JSFunction succeeds.
102  called_function();
103 }
104 #endif
105 
106 
107 #define DEF_ARG_TYPE(name, spec) \
108  typedef BuiltinArguments<spec> name##ArgumentsType;
110 #undef DEF_ARG_TYPE
111 
112 } // namespace
113 
114 // ----------------------------------------------------------------------------
115 // Support macro for defining builtins in C++.
116 // ----------------------------------------------------------------------------
117 //
118 // A builtin function is defined by writing:
119 //
120 // BUILTIN(name) {
121 // ...
122 // }
123 //
124 // In the body of the builtin function the arguments can be accessed
125 // through the BuiltinArguments object args.
126 
127 #ifdef DEBUG
128 
129 #define BUILTIN(name) \
130  MUST_USE_RESULT static MaybeObject* Builtin_Impl_##name( \
131  name##ArgumentsType args, Isolate* isolate); \
132  MUST_USE_RESULT static MaybeObject* Builtin_##name( \
133  int args_length, Object** args_object, Isolate* isolate) { \
134  name##ArgumentsType args(args_length, args_object); \
135  args.Verify(); \
136  return Builtin_Impl_##name(args, isolate); \
137  } \
138  MUST_USE_RESULT static MaybeObject* Builtin_Impl_##name( \
139  name##ArgumentsType args, Isolate* isolate)
140 
141 #else // For release mode.
142 
143 #define BUILTIN(name) \
144  static MaybeObject* Builtin_impl##name( \
145  name##ArgumentsType args, Isolate* isolate); \
146  static MaybeObject* Builtin_##name( \
147  int args_length, Object** args_object, Isolate* isolate) { \
148  name##ArgumentsType args(args_length, args_object); \
149  return Builtin_impl##name(args, isolate); \
150  } \
151  static MaybeObject* Builtin_impl##name( \
152  name##ArgumentsType args, Isolate* isolate)
153 #endif
154 
155 
156 #ifdef DEBUG
157 static inline bool CalledAsConstructor(Isolate* isolate) {
158  // Calculate the result using a full stack frame iterator and check
159  // that the state of the stack is as we assume it to be in the
160  // code below.
161  StackFrameIterator it(isolate);
162  ASSERT(it.frame()->is_exit());
163  it.Advance();
164  StackFrame* frame = it.frame();
165  bool reference_result = frame->is_construct();
166  Address fp = Isolate::c_entry_fp(isolate->thread_local_top());
167  // Because we know fp points to an exit frame we can use the relevant
168  // part of ExitFrame::ComputeCallerState directly.
169  const int kCallerOffset = ExitFrameConstants::kCallerFPOffset;
170  Address caller_fp = Memory::Address_at(fp + kCallerOffset);
171  // This inlines the part of StackFrame::ComputeType that grabs the
172  // type of the current frame. Note that StackFrame::ComputeType
173  // has been specialized for each architecture so if any one of them
174  // changes this code has to be changed as well.
175  const int kMarkerOffset = StandardFrameConstants::kMarkerOffset;
176  const Smi* kConstructMarker = Smi::FromInt(StackFrame::CONSTRUCT);
177  Object* marker = Memory::Object_at(caller_fp + kMarkerOffset);
178  bool result = (marker == kConstructMarker);
179  ASSERT_EQ(result, reference_result);
180  return result;
181 }
182 #endif
183 
184 
185 // ----------------------------------------------------------------------------
186 
187 BUILTIN(Illegal) {
188  UNREACHABLE();
189  return isolate->heap()->undefined_value(); // Make compiler happy.
190 }
191 
192 
193 BUILTIN(EmptyFunction) {
194  return isolate->heap()->undefined_value();
195 }
196 
197 
198 static void MoveDoubleElements(FixedDoubleArray* dst,
199  int dst_index,
200  FixedDoubleArray* src,
201  int src_index,
202  int len) {
203  if (len == 0) return;
204  OS::MemMove(dst->data_start() + dst_index,
205  src->data_start() + src_index,
206  len * kDoubleSize);
207 }
208 
209 
210 static void FillWithHoles(Heap* heap, FixedArray* dst, int from, int to) {
211  ASSERT(dst->map() != heap->fixed_cow_array_map());
212  MemsetPointer(dst->data_start() + from, heap->the_hole_value(), to - from);
213 }
214 
215 
216 static void FillWithHoles(FixedDoubleArray* dst, int from, int to) {
217  for (int i = from; i < to; i++) {
218  dst->set_the_hole(i);
219  }
220 }
221 
222 
223 static FixedArrayBase* LeftTrimFixedArray(Heap* heap,
224  FixedArrayBase* elms,
225  int to_trim) {
226  Map* map = elms->map();
227  int entry_size;
228  if (elms->IsFixedArray()) {
229  entry_size = kPointerSize;
230  } else {
231  entry_size = kDoubleSize;
232  }
233  ASSERT(elms->map() != heap->fixed_cow_array_map());
234  // For now this trick is only applied to fixed arrays in new and paged space.
235  // In large object space the object's start must coincide with chunk
236  // and thus the trick is just not applicable.
237  ASSERT(!heap->lo_space()->Contains(elms));
238 
242 
243  Object** former_start = HeapObject::RawField(elms, 0);
244 
245  const int len = elms->length();
246 
247  if (to_trim * entry_size > FixedArrayBase::kHeaderSize &&
248  elms->IsFixedArray() &&
249  !heap->new_space()->Contains(elms)) {
250  // If we are doing a big trim in old space then we zap the space that was
251  // formerly part of the array so that the GC (aided by the card-based
252  // remembered set) won't find pointers to new-space there.
253  Object** zap = reinterpret_cast<Object**>(elms->address());
254  zap++; // Header of filler must be at least one word so skip that.
255  for (int i = 1; i < to_trim; i++) {
256  *zap++ = Smi::FromInt(0);
257  }
258  }
259  // Technically in new space this write might be omitted (except for
260  // debug mode which iterates through the heap), but to play safer
261  // we still do it.
262  heap->CreateFillerObjectAt(elms->address(), to_trim * entry_size);
263 
264  int new_start_index = to_trim * (entry_size / kPointerSize);
265  former_start[new_start_index] = map;
266  former_start[new_start_index + 1] = Smi::FromInt(len - to_trim);
267 
268  // Maintain marking consistency for HeapObjectIterator and
269  // IncrementalMarking.
270  int size_delta = to_trim * entry_size;
271  Address new_start = elms->address() + size_delta;
272  heap->marking()->TransferMark(elms->address(), new_start);
273  heap->AdjustLiveBytes(new_start, -size_delta, Heap::FROM_MUTATOR);
274 
275  FixedArrayBase* new_elms =
277  HeapProfiler* profiler = heap->isolate()->heap_profiler();
278  if (profiler->is_tracking_object_moves()) {
279  profiler->ObjectMoveEvent(elms->address(),
280  new_elms->address(),
281  new_elms->Size());
282  }
283  return new_elms;
284 }
285 
286 
287 static bool ArrayPrototypeHasNoElements(Heap* heap,
288  Context* native_context,
289  JSObject* array_proto) {
290  // This method depends on non writability of Object and Array prototype
291  // fields.
292  if (array_proto->elements() != heap->empty_fixed_array()) return false;
293  // Object.prototype
294  Object* proto = array_proto->GetPrototype();
295  if (proto == heap->null_value()) return false;
296  array_proto = JSObject::cast(proto);
297  if (array_proto != native_context->initial_object_prototype()) return false;
298  if (array_proto->elements() != heap->empty_fixed_array()) return false;
299  return array_proto->GetPrototype()->IsNull();
300 }
301 
302 
303 // Returns empty handle if not applicable.
305 static inline Handle<FixedArrayBase> EnsureJSArrayWithWritableFastElements(
306  Isolate* isolate,
307  Handle<Object> receiver,
308  Arguments* args,
309  int first_added_arg) {
310  if (!receiver->IsJSArray()) return Handle<FixedArrayBase>::null();
311  Handle<JSArray> array = Handle<JSArray>::cast(receiver);
312  if (array->map()->is_observed()) return Handle<FixedArrayBase>::null();
313  if (!array->map()->is_extensible()) return Handle<FixedArrayBase>::null();
314  Handle<FixedArrayBase> elms(array->elements());
315  Heap* heap = isolate->heap();
316  Map* map = elms->map();
317  if (map == heap->fixed_array_map()) {
318  if (args == NULL || array->HasFastObjectElements()) return elms;
319  } else if (map == heap->fixed_cow_array_map()) {
321  if (args == NULL || array->HasFastObjectElements()) return elms;
322  } else if (map == heap->fixed_double_array_map()) {
323  if (args == NULL) return elms;
324  } else {
326  }
327 
328  // Need to ensure that the arguments passed in args can be contained in
329  // the array.
330  int args_length = args->length();
331  if (first_added_arg >= args_length) return handle(array->elements());
332 
333  ElementsKind origin_kind = array->map()->elements_kind();
334  ASSERT(!IsFastObjectElementsKind(origin_kind));
335  ElementsKind target_kind = origin_kind;
336  int arg_count = args->length() - first_added_arg;
337  Object** arguments = args->arguments() - first_added_arg - (arg_count - 1);
338  for (int i = 0; i < arg_count; i++) {
339  Object* arg = arguments[i];
340  if (arg->IsHeapObject()) {
341  if (arg->IsHeapNumber()) {
342  target_kind = FAST_DOUBLE_ELEMENTS;
343  } else {
344  target_kind = FAST_ELEMENTS;
345  break;
346  }
347  }
348  }
349  if (target_kind != origin_kind) {
350  JSObject::TransitionElementsKind(array, target_kind);
351  return handle(array->elements());
352  }
353  return elms;
354 }
355 
356 
357 // TODO(ishell): Handlify when all Array* builtins are handlified.
358 static inline bool IsJSArrayFastElementMovingAllowed(Heap* heap,
359  JSArray* receiver) {
360  if (!FLAG_clever_optimizations) return false;
361  Context* native_context = heap->isolate()->context()->native_context();
362  JSObject* array_proto =
363  JSObject::cast(native_context->array_function()->prototype());
364  return receiver->GetPrototype() == array_proto &&
365  ArrayPrototypeHasNoElements(heap, native_context, array_proto);
366 }
367 
368 
369 MUST_USE_RESULT static MaybeObject* CallJsBuiltin(
370  Isolate* isolate,
371  const char* name,
372  BuiltinArguments<NO_EXTRA_ARGUMENTS> args) {
373  HandleScope handleScope(isolate);
374 
375  Handle<Object> js_builtin =
376  GetProperty(Handle<JSObject>(isolate->native_context()->builtins()),
377  name);
378  Handle<JSFunction> function = Handle<JSFunction>::cast(js_builtin);
379  int argc = args.length() - 1;
380  ScopedVector<Handle<Object> > argv(argc);
381  for (int i = 0; i < argc; ++i) {
382  argv[i] = args.at<Object>(i + 1);
383  }
384  bool pending_exception;
385  Handle<Object> result = Execution::Call(isolate,
386  function,
387  args.receiver(),
388  argc,
389  argv.start(),
390  &pending_exception);
391  if (pending_exception) return Failure::Exception();
392  return *result;
393 }
394 
395 
396 BUILTIN(ArrayPush) {
397  HandleScope scope(isolate);
398  Handle<Object> receiver = args.receiver();
399  Handle<FixedArrayBase> elms_obj =
400  EnsureJSArrayWithWritableFastElements(isolate, receiver, &args, 1);
401  if (elms_obj.is_null()) return CallJsBuiltin(isolate, "ArrayPush", args);
402 
403  Handle<JSArray> array = Handle<JSArray>::cast(receiver);
404  ASSERT(!array->map()->is_observed());
405 
406  ElementsKind kind = array->GetElementsKind();
407 
408  if (IsFastSmiOrObjectElementsKind(kind)) {
410 
411  int len = Smi::cast(array->length())->value();
412  int to_add = args.length() - 1;
413  if (to_add == 0) {
414  return Smi::FromInt(len);
415  }
416  // Currently fixed arrays cannot grow too big, so
417  // we should never hit this case.
418  ASSERT(to_add <= (Smi::kMaxValue - len));
419 
420  int new_length = len + to_add;
421 
422  if (new_length > elms->length()) {
423  // New backing storage is needed.
424  int capacity = new_length + (new_length >> 1) + 16;
425  Handle<FixedArray> new_elms =
426  isolate->factory()->NewUninitializedFixedArray(capacity);
427 
428  ElementsAccessor* accessor = array->GetElementsAccessor();
429  accessor->CopyElements(
430  Handle<JSObject>::null(), 0, kind, new_elms, 0,
432 
433  elms = new_elms;
434  }
435 
436  // Add the provided values.
438  WriteBarrierMode mode = elms->GetWriteBarrierMode(no_gc);
439  for (int index = 0; index < to_add; index++) {
440  elms->set(index + len, args[index + 1], mode);
441  }
442 
443  if (*elms != array->elements()) {
444  array->set_elements(*elms);
445  }
446 
447  // Set the length.
448  array->set_length(Smi::FromInt(new_length));
449  return Smi::FromInt(new_length);
450  } else {
451  int len = Smi::cast(array->length())->value();
452  int elms_len = elms_obj->length();
453 
454  int to_add = args.length() - 1;
455  if (to_add == 0) {
456  return Smi::FromInt(len);
457  }
458  // Currently fixed arrays cannot grow too big, so
459  // we should never hit this case.
460  ASSERT(to_add <= (Smi::kMaxValue - len));
461 
462  int new_length = len + to_add;
463 
464  Handle<FixedDoubleArray> new_elms;
465 
466  if (new_length > elms_len) {
467  // New backing storage is needed.
468  int capacity = new_length + (new_length >> 1) + 16;
469  new_elms = isolate->factory()->NewFixedDoubleArray(capacity);
470 
471  ElementsAccessor* accessor = array->GetElementsAccessor();
472  accessor->CopyElements(
473  Handle<JSObject>::null(), 0, kind, new_elms, 0,
475 
476  } else {
477  // to_add is > 0 and new_length <= elms_len, so elms_obj cannot be the
478  // empty_fixed_array.
479  new_elms = Handle<FixedDoubleArray>::cast(elms_obj);
480  }
481 
482  // Add the provided values.
484  int index;
485  for (index = 0; index < to_add; index++) {
486  Object* arg = args[index + 1];
487  new_elms->set(index + len, arg->Number());
488  }
489 
490  if (*new_elms != array->elements()) {
491  array->set_elements(*new_elms);
492  }
493 
494  // Set the length.
495  array->set_length(Smi::FromInt(new_length));
496  return Smi::FromInt(new_length);
497  }
498 }
499 
500 
501 // TODO(ishell): Temporary wrapper until handlified.
502 static bool ElementsAccessorHasElementWrapper(
503  ElementsAccessor* accessor,
504  Handle<Object> receiver,
505  Handle<JSObject> holder,
506  uint32_t key,
508  return accessor->HasElement(*receiver, *holder, key,
509  backing_store.is_null() ? NULL : *backing_store);
510 }
511 
512 
513 BUILTIN(ArrayPop) {
514  HandleScope scope(isolate);
515  Handle<Object> receiver = args.receiver();
516  Handle<FixedArrayBase> elms_obj =
517  EnsureJSArrayWithWritableFastElements(isolate, receiver, NULL, 0);
518  if (elms_obj.is_null()) return CallJsBuiltin(isolate, "ArrayPop", args);
519 
520  Handle<JSArray> array = Handle<JSArray>::cast(receiver);
521  ASSERT(!array->map()->is_observed());
522 
523  int len = Smi::cast(array->length())->value();
524  if (len == 0) return isolate->heap()->undefined_value();
525 
526  ElementsAccessor* accessor = array->GetElementsAccessor();
527  int new_length = len - 1;
528  Handle<Object> element;
529  if (ElementsAccessorHasElementWrapper(
530  accessor, array, array, new_length, elms_obj)) {
531  element = accessor->Get(
532  array, array, new_length, elms_obj);
533  } else {
534  Handle<Object> proto(array->GetPrototype(), isolate);
535  element = Object::GetElement(isolate, proto, len - 1);
536  }
537  RETURN_IF_EMPTY_HANDLE(isolate, element);
538  RETURN_IF_EMPTY_HANDLE(isolate,
539  accessor->SetLength(
540  array, handle(Smi::FromInt(new_length), isolate)));
541  return *element;
542 }
543 
544 
545 BUILTIN(ArrayShift) {
546  HandleScope scope(isolate);
547  Heap* heap = isolate->heap();
548  Handle<Object> receiver = args.receiver();
549  Handle<FixedArrayBase> elms_obj =
550  EnsureJSArrayWithWritableFastElements(isolate, receiver, NULL, 0);
551  if (elms_obj.is_null() ||
552  !IsJSArrayFastElementMovingAllowed(heap,
553  *Handle<JSArray>::cast(receiver))) {
554  return CallJsBuiltin(isolate, "ArrayShift", args);
555  }
556  Handle<JSArray> array = Handle<JSArray>::cast(receiver);
557  ASSERT(!array->map()->is_observed());
558 
559  int len = Smi::cast(array->length())->value();
560  if (len == 0) return heap->undefined_value();
561 
562  // Get first element
563  ElementsAccessor* accessor = array->GetElementsAccessor();
564  Handle<Object> first = accessor->Get(receiver, array, 0, elms_obj);
565  RETURN_IF_EMPTY_HANDLE(isolate, first);
566  if (first->IsTheHole()) {
567  first = isolate->factory()->undefined_value();
568  }
569 
570  if (!heap->CanMoveObjectStart(*elms_obj)) {
571  array->set_elements(LeftTrimFixedArray(heap, *elms_obj, 1));
572  } else {
573  // Shift the elements.
574  if (elms_obj->IsFixedArray()) {
577  heap->MoveElements(*elms, 0, 1, len - 1);
578  elms->set(len - 1, heap->the_hole_value());
579  } else {
581  MoveDoubleElements(*elms, 0, *elms, 1, len - 1);
582  elms->set_the_hole(len - 1);
583  }
584  }
585 
586  // Set the length.
587  array->set_length(Smi::FromInt(len - 1));
588 
589  return *first;
590 }
591 
592 
593 BUILTIN(ArrayUnshift) {
594  HandleScope scope(isolate);
595  Heap* heap = isolate->heap();
596  Handle<Object> receiver = args.receiver();
597  Handle<FixedArrayBase> elms_obj =
598  EnsureJSArrayWithWritableFastElements(isolate, receiver, NULL, 0);
599  if (elms_obj.is_null() ||
600  !IsJSArrayFastElementMovingAllowed(heap,
601  *Handle<JSArray>::cast(receiver))) {
602  return CallJsBuiltin(isolate, "ArrayUnshift", args);
603  }
604  Handle<JSArray> array = Handle<JSArray>::cast(receiver);
605  ASSERT(!array->map()->is_observed());
606  if (!array->HasFastSmiOrObjectElements()) {
607  return CallJsBuiltin(isolate, "ArrayUnshift", args);
608  }
610 
611  int len = Smi::cast(array->length())->value();
612  int to_add = args.length() - 1;
613  int new_length = len + to_add;
614  // Currently fixed arrays cannot grow too big, so
615  // we should never hit this case.
616  ASSERT(to_add <= (Smi::kMaxValue - len));
617 
618  JSObject::EnsureCanContainElements(array, &args, 1, to_add,
620 
621  if (new_length > elms->length()) {
622  // New backing storage is needed.
623  int capacity = new_length + (new_length >> 1) + 16;
624  Handle<FixedArray> new_elms =
625  isolate->factory()->NewUninitializedFixedArray(capacity);
626 
627  ElementsKind kind = array->GetElementsKind();
628  ElementsAccessor* accessor = array->GetElementsAccessor();
629  accessor->CopyElements(
630  Handle<JSObject>::null(), 0, kind, new_elms, to_add,
632 
633  elms = new_elms;
634  array->set_elements(*elms);
635  } else {
637  heap->MoveElements(*elms, to_add, 0, len);
638  }
639 
640  // Add the provided values.
642  WriteBarrierMode mode = elms->GetWriteBarrierMode(no_gc);
643  for (int i = 0; i < to_add; i++) {
644  elms->set(i, args[i + 1], mode);
645  }
646 
647  // Set the length.
648  array->set_length(Smi::FromInt(new_length));
649  return Smi::FromInt(new_length);
650 }
651 
652 
653 BUILTIN(ArraySlice) {
654  HandleScope scope(isolate);
655  Heap* heap = isolate->heap();
656  Handle<Object> receiver = args.receiver();
658  int len = -1;
659  if (receiver->IsJSArray()) {
660  Handle<JSArray> array = Handle<JSArray>::cast(receiver);
661  if (!IsJSArrayFastElementMovingAllowed(heap, *array)) {
662  return CallJsBuiltin(isolate, "ArraySlice", args);
663  }
664 
665  if (array->HasFastElements()) {
666  elms = handle(array->elements());
667  } else {
668  return CallJsBuiltin(isolate, "ArraySlice", args);
669  }
670 
671  len = Smi::cast(array->length())->value();
672  } else {
673  // Array.slice(arguments, ...) is quite a common idiom (notably more
674  // than 50% of invocations in Web apps). Treat it in C++ as well.
675  Handle<Map> arguments_map(isolate->context()->native_context()->
676  sloppy_arguments_boilerplate()->map());
677 
678  bool is_arguments_object_with_fast_elements =
679  receiver->IsJSObject() &&
680  Handle<JSObject>::cast(receiver)->map() == *arguments_map;
681  if (!is_arguments_object_with_fast_elements) {
682  return CallJsBuiltin(isolate, "ArraySlice", args);
683  }
684  Handle<JSObject> object = Handle<JSObject>::cast(receiver);
685 
686  if (object->HasFastElements()) {
687  elms = handle(object->elements());
688  } else {
689  return CallJsBuiltin(isolate, "ArraySlice", args);
690  }
691  Handle<Object> len_obj(
692  object->InObjectPropertyAt(Heap::kArgumentsLengthIndex), isolate);
693  if (!len_obj->IsSmi()) {
694  return CallJsBuiltin(isolate, "ArraySlice", args);
695  }
696  len = Handle<Smi>::cast(len_obj)->value();
697  if (len > elms->length()) {
698  return CallJsBuiltin(isolate, "ArraySlice", args);
699  }
700  }
701 
702  Handle<JSObject> object = Handle<JSObject>::cast(receiver);
703 
704  ASSERT(len >= 0);
705  int n_arguments = args.length() - 1;
706 
707  // Note carefully choosen defaults---if argument is missing,
708  // it's undefined which gets converted to 0 for relative_start
709  // and to len for relative_end.
710  int relative_start = 0;
711  int relative_end = len;
712  if (n_arguments > 0) {
713  Handle<Object> arg1 = args.at<Object>(1);
714  if (arg1->IsSmi()) {
715  relative_start = Handle<Smi>::cast(arg1)->value();
716  } else if (arg1->IsHeapNumber()) {
717  double start = Handle<HeapNumber>::cast(arg1)->value();
718  if (start < kMinInt || start > kMaxInt) {
719  return CallJsBuiltin(isolate, "ArraySlice", args);
720  }
721  relative_start = std::isnan(start) ? 0 : static_cast<int>(start);
722  } else if (!arg1->IsUndefined()) {
723  return CallJsBuiltin(isolate, "ArraySlice", args);
724  }
725  if (n_arguments > 1) {
726  Handle<Object> arg2 = args.at<Object>(2);
727  if (arg2->IsSmi()) {
728  relative_end = Handle<Smi>::cast(arg2)->value();
729  } else if (arg2->IsHeapNumber()) {
730  double end = Handle<HeapNumber>::cast(arg2)->value();
731  if (end < kMinInt || end > kMaxInt) {
732  return CallJsBuiltin(isolate, "ArraySlice", args);
733  }
734  relative_end = std::isnan(end) ? 0 : static_cast<int>(end);
735  } else if (!arg2->IsUndefined()) {
736  return CallJsBuiltin(isolate, "ArraySlice", args);
737  }
738  }
739  }
740 
741  // ECMAScript 232, 3rd Edition, Section 15.4.4.10, step 6.
742  int k = (relative_start < 0) ? Max(len + relative_start, 0)
743  : Min(relative_start, len);
744 
745  // ECMAScript 232, 3rd Edition, Section 15.4.4.10, step 8.
746  int final = (relative_end < 0) ? Max(len + relative_end, 0)
747  : Min(relative_end, len);
748 
749  // Calculate the length of result array.
750  int result_len = Max(final - k, 0);
751 
752  ElementsKind kind = object->GetElementsKind();
753  if (IsHoleyElementsKind(kind)) {
754  bool packed = true;
756  for (int i = k; i < final; i++) {
757  if (!ElementsAccessorHasElementWrapper(
758  accessor, object, object, i, elms)) {
759  packed = false;
760  break;
761  }
762  }
763  if (packed) {
764  kind = GetPackedElementsKind(kind);
765  } else if (!receiver->IsJSArray()) {
766  return CallJsBuiltin(isolate, "ArraySlice", args);
767  }
768  }
769 
770  Handle<JSArray> result_array =
771  isolate->factory()->NewJSArray(kind, result_len, result_len);
772 
774  if (result_len == 0) return *result_array;
775 
776  ElementsAccessor* accessor = object->GetElementsAccessor();
777  accessor->CopyElements(Handle<JSObject>::null(), k, kind,
778  handle(result_array->elements()), 0, result_len, elms);
779  return *result_array;
780 }
781 
782 
783 BUILTIN(ArraySplice) {
784  HandleScope scope(isolate);
785  Heap* heap = isolate->heap();
786  Handle<Object> receiver = args.receiver();
787  Handle<FixedArrayBase> elms_obj =
788  EnsureJSArrayWithWritableFastElements(isolate, receiver, &args, 3);
789  if (elms_obj.is_null() ||
790  !IsJSArrayFastElementMovingAllowed(heap,
791  *Handle<JSArray>::cast(receiver))) {
792  return CallJsBuiltin(isolate, "ArraySplice", args);
793  }
794  Handle<JSArray> array = Handle<JSArray>::cast(receiver);
795  ASSERT(!array->map()->is_observed());
796 
797  int len = Smi::cast(array->length())->value();
798 
799  int n_arguments = args.length() - 1;
800 
801  int relative_start = 0;
802  if (n_arguments > 0) {
803  Handle<Object> arg1 = args.at<Object>(1);
804  if (arg1->IsSmi()) {
805  relative_start = Handle<Smi>::cast(arg1)->value();
806  } else if (arg1->IsHeapNumber()) {
807  double start = Handle<HeapNumber>::cast(arg1)->value();
808  if (start < kMinInt || start > kMaxInt) {
809  return CallJsBuiltin(isolate, "ArraySplice", args);
810  }
811  relative_start = std::isnan(start) ? 0 : static_cast<int>(start);
812  } else if (!arg1->IsUndefined()) {
813  return CallJsBuiltin(isolate, "ArraySplice", args);
814  }
815  }
816  int actual_start = (relative_start < 0) ? Max(len + relative_start, 0)
817  : Min(relative_start, len);
818 
819  // SpiderMonkey, TraceMonkey and JSC treat the case where no delete count is
820  // given as a request to delete all the elements from the start.
821  // And it differs from the case of undefined delete count.
822  // This does not follow ECMA-262, but we do the same for
823  // compatibility.
824  int actual_delete_count;
825  if (n_arguments == 1) {
826  ASSERT(len - actual_start >= 0);
827  actual_delete_count = len - actual_start;
828  } else {
829  int value = 0; // ToInteger(undefined) == 0
830  if (n_arguments > 1) {
831  Object* arg2 = args[2];
832  if (arg2->IsSmi()) {
833  value = Smi::cast(arg2)->value();
834  } else {
835  return CallJsBuiltin(isolate, "ArraySplice", args);
836  }
837  }
838  actual_delete_count = Min(Max(value, 0), len - actual_start);
839  }
840 
841  ElementsKind elements_kind = array->GetElementsKind();
842 
843  int item_count = (n_arguments > 1) ? (n_arguments - 2) : 0;
844  int new_length = len - actual_delete_count + item_count;
845 
846  // For double mode we do not support changing the length.
847  if (new_length > len && IsFastDoubleElementsKind(elements_kind)) {
848  return CallJsBuiltin(isolate, "ArraySplice", args);
849  }
850 
851  if (new_length == 0) {
852  Handle<JSArray> result = isolate->factory()->NewJSArrayWithElements(
853  elms_obj, elements_kind, actual_delete_count);
854  array->set_elements(heap->empty_fixed_array());
855  array->set_length(Smi::FromInt(0));
856  return *result;
857  }
858 
859  Handle<JSArray> result_array =
860  isolate->factory()->NewJSArray(elements_kind,
861  actual_delete_count,
862  actual_delete_count);
863 
864  if (actual_delete_count > 0) {
866  ElementsAccessor* accessor = array->GetElementsAccessor();
867  accessor->CopyElements(
868  Handle<JSObject>::null(), actual_start, elements_kind,
869  handle(result_array->elements()), 0, actual_delete_count, elms_obj);
870  }
871 
872  bool elms_changed = false;
873  if (item_count < actual_delete_count) {
874  // Shrink the array.
875  const bool trim_array = !heap->lo_space()->Contains(*elms_obj) &&
876  ((actual_start + item_count) <
877  (len - actual_delete_count - actual_start));
878  if (trim_array) {
879  const int delta = actual_delete_count - item_count;
880 
881  if (elms_obj->IsFixedDoubleArray()) {
884  MoveDoubleElements(*elms, delta, *elms, 0, actual_start);
885  } else {
888  heap->MoveElements(*elms, delta, 0, actual_start);
889  }
890 
891  if (heap->CanMoveObjectStart(*elms_obj)) {
892  // On the fast path we move the start of the object in memory.
893  elms_obj = handle(LeftTrimFixedArray(heap, *elms_obj, delta));
894  } else {
895  // This is the slow path. We are going to move the elements to the left
896  // by copying them. For trimmed values we store the hole.
897  if (elms_obj->IsFixedDoubleArray()) {
900  MoveDoubleElements(*elms, 0, *elms, delta, len - delta);
901  FillWithHoles(*elms, len - delta, len);
902  } else {
905  heap->MoveElements(*elms, 0, delta, len - delta);
906  FillWithHoles(heap, *elms, len - delta, len);
907  }
908  }
909  elms_changed = true;
910  } else {
911  if (elms_obj->IsFixedDoubleArray()) {
914  MoveDoubleElements(*elms, actual_start + item_count,
915  *elms, actual_start + actual_delete_count,
916  (len - actual_delete_count - actual_start));
917  FillWithHoles(*elms, new_length, len);
918  } else {
921  heap->MoveElements(*elms, actual_start + item_count,
922  actual_start + actual_delete_count,
923  (len - actual_delete_count - actual_start));
924  FillWithHoles(heap, *elms, new_length, len);
925  }
926  }
927  } else if (item_count > actual_delete_count) {
929  // Currently fixed arrays cannot grow too big, so
930  // we should never hit this case.
931  ASSERT((item_count - actual_delete_count) <= (Smi::kMaxValue - len));
932 
933  // Check if array need to grow.
934  if (new_length > elms->length()) {
935  // New backing storage is needed.
936  int capacity = new_length + (new_length >> 1) + 16;
937  Handle<FixedArray> new_elms =
938  isolate->factory()->NewUninitializedFixedArray(capacity);
939 
941 
942  ElementsKind kind = array->GetElementsKind();
943  ElementsAccessor* accessor = array->GetElementsAccessor();
944  if (actual_start > 0) {
945  // Copy the part before actual_start as is.
946  accessor->CopyElements(
947  Handle<JSObject>::null(), 0, kind, new_elms, 0, actual_start, elms);
948  }
949  accessor->CopyElements(
950  Handle<JSObject>::null(), actual_start + actual_delete_count, kind,
951  new_elms, actual_start + item_count,
953 
954  elms_obj = new_elms;
955  elms_changed = true;
956  } else {
958  heap->MoveElements(*elms, actual_start + item_count,
959  actual_start + actual_delete_count,
960  (len - actual_delete_count - actual_start));
961  }
962  }
963 
964  if (IsFastDoubleElementsKind(elements_kind)) {
966  for (int k = actual_start; k < actual_start + item_count; k++) {
967  Object* arg = args[3 + k - actual_start];
968  if (arg->IsSmi()) {
969  elms->set(k, Smi::cast(arg)->value());
970  } else {
971  elms->set(k, HeapNumber::cast(arg)->value());
972  }
973  }
974  } else {
977  WriteBarrierMode mode = elms->GetWriteBarrierMode(no_gc);
978  for (int k = actual_start; k < actual_start + item_count; k++) {
979  elms->set(k, args[3 + k - actual_start], mode);
980  }
981  }
982 
983  if (elms_changed) {
984  array->set_elements(*elms_obj);
985  }
986  // Set the length.
987  array->set_length(Smi::FromInt(new_length));
988 
989  return *result_array;
990 }
991 
992 
993 BUILTIN(ArrayConcat) {
994  HandleScope scope(isolate);
995  Heap* heap = isolate->heap();
996  Handle<Context> native_context(isolate->context()->native_context());
997  Handle<JSObject> array_proto(
998  JSObject::cast(native_context->array_function()->prototype()));
999  if (!ArrayPrototypeHasNoElements(heap, *native_context, *array_proto)) {
1000  return CallJsBuiltin(isolate, "ArrayConcat", args);
1001  }
1002 
1003  // Iterate through all the arguments performing checks
1004  // and calculating total length.
1005  int n_arguments = args.length();
1006  int result_len = 0;
1007  ElementsKind elements_kind = GetInitialFastElementsKind();
1008  bool has_double = false;
1009  bool is_holey = false;
1010  for (int i = 0; i < n_arguments; i++) {
1011  Handle<Object> arg = args.at<Object>(i);
1012  if (!arg->IsJSArray() ||
1013  !Handle<JSArray>::cast(arg)->HasFastElements() ||
1014  Handle<JSArray>::cast(arg)->GetPrototype() != *array_proto) {
1015  return CallJsBuiltin(isolate, "ArrayConcat", args);
1016  }
1017  int len = Smi::cast(Handle<JSArray>::cast(arg)->length())->value();
1018 
1019  // We shouldn't overflow when adding another len.
1020  const int kHalfOfMaxInt = 1 << (kBitsPerInt - 2);
1021  STATIC_ASSERT(FixedArray::kMaxLength < kHalfOfMaxInt);
1022  USE(kHalfOfMaxInt);
1023  result_len += len;
1024  ASSERT(result_len >= 0);
1025 
1026  if (result_len > FixedDoubleArray::kMaxLength) {
1027  return CallJsBuiltin(isolate, "ArrayConcat", args);
1028  }
1029 
1030  ElementsKind arg_kind = Handle<JSArray>::cast(arg)->map()->elements_kind();
1031  has_double = has_double || IsFastDoubleElementsKind(arg_kind);
1032  is_holey = is_holey || IsFastHoleyElementsKind(arg_kind);
1033  if (IsMoreGeneralElementsKindTransition(elements_kind, arg_kind)) {
1034  elements_kind = arg_kind;
1035  }
1036  }
1037 
1038  if (is_holey) elements_kind = GetHoleyElementsKind(elements_kind);
1039 
1040  // If a double array is concatted into a fast elements array, the fast
1041  // elements array needs to be initialized to contain proper holes, since
1042  // boxing doubles may cause incremental marking.
1044  has_double && IsFastObjectElementsKind(elements_kind)
1046  Handle<JSArray> result_array =
1047  isolate->factory()->NewJSArray(elements_kind,
1048  result_len,
1049  result_len,
1050  mode);
1051  if (result_len == 0) return *result_array;
1052 
1053  int j = 0;
1054  Handle<FixedArrayBase> storage(result_array->elements());
1055  ElementsAccessor* accessor = ElementsAccessor::ForKind(elements_kind);
1056  for (int i = 0; i < n_arguments; i++) {
1057  Handle<JSArray> array = args.at<JSArray>(i);
1058  int len = Smi::cast(array->length())->value();
1059  ElementsKind from_kind = array->GetElementsKind();
1060  if (len > 0) {
1061  accessor->CopyElements(array, 0, from_kind, storage, j, len);
1062  j += len;
1063  }
1064  }
1065 
1066  ASSERT(j == result_len);
1067 
1068  return *result_array;
1069 }
1070 
1071 
1072 // -----------------------------------------------------------------------------
1073 // Strict mode poison pills
1074 
1075 
1076 BUILTIN(StrictModePoisonPill) {
1077  HandleScope scope(isolate);
1078  return isolate->Throw(*isolate->factory()->NewTypeError(
1079  "strict_poison_pill", HandleVector<Object>(NULL, 0)));
1080 }
1081 
1082 
1083 // -----------------------------------------------------------------------------
1084 //
1085 
1086 
1087 // Searches the hidden prototype chain of the given object for the first
1088 // object that is an instance of the given type. If no such object can
1089 // be found then Heap::null_value() is returned.
1090 static inline Object* FindHidden(Heap* heap,
1091  Object* object,
1092  FunctionTemplateInfo* type) {
1093  if (type->IsTemplateFor(object)) return object;
1094  Object* proto = object->GetPrototype(heap->isolate());
1095  if (proto->IsJSObject() &&
1096  JSObject::cast(proto)->map()->is_hidden_prototype()) {
1097  return FindHidden(heap, proto, type);
1098  }
1099  return heap->null_value();
1100 }
1101 
1102 
1103 // Returns the holder JSObject if the function can legally be called
1104 // with this receiver. Returns Heap::null_value() if the call is
1105 // illegal. Any arguments that don't fit the expected type is
1106 // overwritten with undefined. Note that holder and the arguments are
1107 // implicitly rewritten with the first object in the hidden prototype
1108 // chain that actually has the expected type.
1109 static inline Object* TypeCheck(Heap* heap,
1110  int argc,
1111  Object** argv,
1112  FunctionTemplateInfo* info) {
1113  Object* recv = argv[0];
1114  // API calls are only supported with JSObject receivers.
1115  if (!recv->IsJSObject()) return heap->null_value();
1116  Object* sig_obj = info->signature();
1117  if (sig_obj->IsUndefined()) return recv;
1118  SignatureInfo* sig = SignatureInfo::cast(sig_obj);
1119  // If necessary, check the receiver
1120  Object* recv_type = sig->receiver();
1121  Object* holder = recv;
1122  if (!recv_type->IsUndefined()) {
1123  holder = FindHidden(heap, holder, FunctionTemplateInfo::cast(recv_type));
1124  if (holder == heap->null_value()) return heap->null_value();
1125  }
1126  Object* args_obj = sig->args();
1127  // If there is no argument signature we're done
1128  if (args_obj->IsUndefined()) return holder;
1129  FixedArray* args = FixedArray::cast(args_obj);
1130  int length = args->length();
1131  if (argc <= length) length = argc - 1;
1132  for (int i = 0; i < length; i++) {
1133  Object* argtype = args->get(i);
1134  if (argtype->IsUndefined()) continue;
1135  Object** arg = &argv[-1 - i];
1136  Object* current = *arg;
1137  current = FindHidden(heap, current, FunctionTemplateInfo::cast(argtype));
1138  if (current == heap->null_value()) current = heap->undefined_value();
1139  *arg = current;
1140  }
1141  return holder;
1142 }
1143 
1144 
1145 template <bool is_construct>
1146 MUST_USE_RESULT static MaybeObject* HandleApiCallHelper(
1147  BuiltinArguments<NEEDS_CALLED_FUNCTION> args, Isolate* isolate) {
1148  ASSERT(is_construct == CalledAsConstructor(isolate));
1149  Heap* heap = isolate->heap();
1150 
1151  HandleScope scope(isolate);
1152  Handle<JSFunction> function = args.called_function();
1153  ASSERT(function->shared()->IsApiFunction());
1154 
1155  FunctionTemplateInfo* fun_data = function->shared()->get_api_func_data();
1156  if (is_construct) {
1157  Handle<FunctionTemplateInfo> desc(fun_data, isolate);
1158  bool pending_exception = false;
1159  isolate->factory()->ConfigureInstance(
1160  desc, Handle<JSObject>::cast(args.receiver()), &pending_exception);
1161  ASSERT(isolate->has_pending_exception() == pending_exception);
1162  if (pending_exception) return Failure::Exception();
1163  fun_data = *desc;
1164  }
1165 
1166  SharedFunctionInfo* shared = function->shared();
1167  if (shared->strict_mode() == SLOPPY && !shared->native()) {
1168  Object* recv = args[0];
1169  ASSERT(!recv->IsNull());
1170  if (recv->IsUndefined()) {
1171  args[0] = function->context()->global_object()->global_receiver();
1172  }
1173  }
1174 
1175  Object* raw_holder = TypeCheck(heap, args.length(), &args[0], fun_data);
1176 
1177  if (raw_holder->IsNull()) {
1178  // This function cannot be called with the given receiver. Abort!
1179  Handle<Object> obj =
1180  isolate->factory()->NewTypeError(
1181  "illegal_invocation", HandleVector(&function, 1));
1182  return isolate->Throw(*obj);
1183  }
1184 
1185  Object* raw_call_data = fun_data->call_code();
1186  if (!raw_call_data->IsUndefined()) {
1187  CallHandlerInfo* call_data = CallHandlerInfo::cast(raw_call_data);
1188  Object* callback_obj = call_data->callback();
1189  v8::FunctionCallback callback =
1190  v8::ToCData<v8::FunctionCallback>(callback_obj);
1191  Object* data_obj = call_data->data();
1192  Object* result;
1193 
1194  LOG(isolate, ApiObjectAccess("call", JSObject::cast(*args.receiver())));
1195  ASSERT(raw_holder->IsJSObject());
1196 
1197  FunctionCallbackArguments custom(isolate,
1198  data_obj,
1199  *function,
1200  raw_holder,
1201  &args[0] - 1,
1202  args.length() - 1,
1203  is_construct);
1204 
1205  v8::Handle<v8::Value> value = custom.Call(callback);
1206  if (value.IsEmpty()) {
1207  result = heap->undefined_value();
1208  } else {
1209  result = *reinterpret_cast<Object**>(*value);
1210  result->VerifyApiCallResultType();
1211  }
1212 
1214  if (!is_construct || result->IsJSObject()) return result;
1215  }
1216 
1217  return *args.receiver();
1218 }
1219 
1220 
1221 BUILTIN(HandleApiCall) {
1222  return HandleApiCallHelper<false>(args, isolate);
1223 }
1224 
1225 
1226 BUILTIN(HandleApiCallConstruct) {
1227  return HandleApiCallHelper<true>(args, isolate);
1228 }
1229 
1230 
1231 // Helper function to handle calls to non-function objects created through the
1232 // API. The object can be called as either a constructor (using new) or just as
1233 // a function (without new).
1234 MUST_USE_RESULT static MaybeObject* HandleApiCallAsFunctionOrConstructor(
1235  Isolate* isolate,
1236  bool is_construct_call,
1237  BuiltinArguments<NO_EXTRA_ARGUMENTS> args) {
1238  // Non-functions are never called as constructors. Even if this is an object
1239  // called as a constructor the delegate call is not a construct call.
1240  ASSERT(!CalledAsConstructor(isolate));
1241  Heap* heap = isolate->heap();
1242 
1243  Handle<Object> receiver = args.receiver();
1244 
1245  // Get the object called.
1246  JSObject* obj = JSObject::cast(*receiver);
1247 
1248  // Get the invocation callback from the function descriptor that was
1249  // used to create the called object.
1250  ASSERT(obj->map()->has_instance_call_handler());
1251  JSFunction* constructor = JSFunction::cast(obj->map()->constructor());
1252  ASSERT(constructor->shared()->IsApiFunction());
1253  Object* handler =
1254  constructor->shared()->get_api_func_data()->instance_call_handler();
1255  ASSERT(!handler->IsUndefined());
1256  CallHandlerInfo* call_data = CallHandlerInfo::cast(handler);
1257  Object* callback_obj = call_data->callback();
1258  v8::FunctionCallback callback =
1259  v8::ToCData<v8::FunctionCallback>(callback_obj);
1260 
1261  // Get the data for the call and perform the callback.
1262  Object* result;
1263  {
1264  HandleScope scope(isolate);
1265  LOG(isolate, ApiObjectAccess("call non-function", obj));
1266 
1267  FunctionCallbackArguments custom(isolate,
1268  call_data->data(),
1269  constructor,
1270  obj,
1271  &args[0] - 1,
1272  args.length() - 1,
1273  is_construct_call);
1274  v8::Handle<v8::Value> value = custom.Call(callback);
1275  if (value.IsEmpty()) {
1276  result = heap->undefined_value();
1277  } else {
1278  result = *reinterpret_cast<Object**>(*value);
1279  result->VerifyApiCallResultType();
1280  }
1281  }
1282  // Check for exceptions and return result.
1284  return result;
1285 }
1286 
1287 
1288 // Handle calls to non-function objects created through the API. This delegate
1289 // function is used when the call is a normal function call.
1290 BUILTIN(HandleApiCallAsFunction) {
1291  return HandleApiCallAsFunctionOrConstructor(isolate, false, args);
1292 }
1293 
1294 
1295 // Handle calls to non-function objects created through the API. This delegate
1296 // function is used when the call is a construct call.
1297 BUILTIN(HandleApiCallAsConstructor) {
1298  return HandleApiCallAsFunctionOrConstructor(isolate, true, args);
1299 }
1300 
1301 
1302 static void Generate_LoadIC_Miss(MacroAssembler* masm) {
1303  LoadIC::GenerateMiss(masm);
1304 }
1305 
1306 
1307 static void Generate_LoadIC_Normal(MacroAssembler* masm) {
1308  LoadIC::GenerateNormal(masm);
1309 }
1310 
1311 
1312 static void Generate_LoadIC_Getter_ForDeopt(MacroAssembler* masm) {
1314 }
1315 
1316 
1317 static void Generate_LoadIC_Slow(MacroAssembler* masm) {
1319 }
1320 
1321 
1322 static void Generate_KeyedLoadIC_Initialize(MacroAssembler* masm) {
1324 }
1325 
1326 
1327 static void Generate_KeyedLoadIC_Slow(MacroAssembler* masm) {
1329 }
1330 
1331 
1332 static void Generate_KeyedLoadIC_Miss(MacroAssembler* masm) {
1334 }
1335 
1336 
1337 static void Generate_KeyedLoadIC_Generic(MacroAssembler* masm) {
1339 }
1340 
1341 
1342 static void Generate_KeyedLoadIC_String(MacroAssembler* masm) {
1344 }
1345 
1346 
1347 static void Generate_KeyedLoadIC_PreMonomorphic(MacroAssembler* masm) {
1349 }
1350 
1351 
1352 static void Generate_KeyedLoadIC_IndexedInterceptor(MacroAssembler* masm) {
1354 }
1355 
1356 
1357 static void Generate_KeyedLoadIC_SloppyArguments(MacroAssembler* masm) {
1359 }
1360 
1361 
1362 static void Generate_StoreIC_Slow(MacroAssembler* masm) {
1363  StoreIC::GenerateSlow(masm);
1364 }
1365 
1366 
1367 static void Generate_StoreIC_Miss(MacroAssembler* masm) {
1368  StoreIC::GenerateMiss(masm);
1369 }
1370 
1371 
1372 static void Generate_StoreIC_Normal(MacroAssembler* masm) {
1374 }
1375 
1376 
1377 static void Generate_StoreIC_Setter_ForDeopt(MacroAssembler* masm) {
1379 }
1380 
1381 
1382 static void Generate_KeyedStoreIC_Generic(MacroAssembler* masm) {
1384 }
1385 
1386 
1387 static void Generate_KeyedStoreIC_Generic_Strict(MacroAssembler* masm) {
1389 }
1390 
1391 
1392 static void Generate_KeyedStoreIC_Miss(MacroAssembler* masm) {
1394 }
1395 
1396 
1397 static void Generate_KeyedStoreIC_Slow(MacroAssembler* masm) {
1399 }
1400 
1401 
1402 static void Generate_KeyedStoreIC_Initialize(MacroAssembler* masm) {
1404 }
1405 
1406 
1407 static void Generate_KeyedStoreIC_Initialize_Strict(MacroAssembler* masm) {
1409 }
1410 
1411 
1412 static void Generate_KeyedStoreIC_PreMonomorphic(MacroAssembler* masm) {
1414 }
1415 
1416 
1417 static void Generate_KeyedStoreIC_PreMonomorphic_Strict(MacroAssembler* masm) {
1419 }
1420 
1421 
1422 static void Generate_KeyedStoreIC_SloppyArguments(MacroAssembler* masm) {
1424 }
1425 
1426 
1427 #ifdef ENABLE_DEBUGGER_SUPPORT
1428 static void Generate_LoadIC_DebugBreak(MacroAssembler* masm) {
1429  Debug::GenerateLoadICDebugBreak(masm);
1430 }
1431 
1432 
1433 static void Generate_StoreIC_DebugBreak(MacroAssembler* masm) {
1434  Debug::GenerateStoreICDebugBreak(masm);
1435 }
1436 
1437 
1438 static void Generate_KeyedLoadIC_DebugBreak(MacroAssembler* masm) {
1439  Debug::GenerateKeyedLoadICDebugBreak(masm);
1440 }
1441 
1442 
1443 static void Generate_KeyedStoreIC_DebugBreak(MacroAssembler* masm) {
1444  Debug::GenerateKeyedStoreICDebugBreak(masm);
1445 }
1446 
1447 
1448 static void Generate_CompareNilIC_DebugBreak(MacroAssembler* masm) {
1449  Debug::GenerateCompareNilICDebugBreak(masm);
1450 }
1451 
1452 
1453 static void Generate_Return_DebugBreak(MacroAssembler* masm) {
1454  Debug::GenerateReturnDebugBreak(masm);
1455 }
1456 
1457 
1458 static void Generate_CallFunctionStub_DebugBreak(MacroAssembler* masm) {
1459  Debug::GenerateCallFunctionStubDebugBreak(masm);
1460 }
1461 
1462 
1463 static void Generate_CallFunctionStub_Recording_DebugBreak(
1464  MacroAssembler* masm) {
1465  Debug::GenerateCallFunctionStubRecordDebugBreak(masm);
1466 }
1467 
1468 
1469 static void Generate_CallConstructStub_DebugBreak(MacroAssembler* masm) {
1470  Debug::GenerateCallConstructStubDebugBreak(masm);
1471 }
1472 
1473 
1474 static void Generate_CallConstructStub_Recording_DebugBreak(
1475  MacroAssembler* masm) {
1476  Debug::GenerateCallConstructStubRecordDebugBreak(masm);
1477 }
1478 
1479 
1480 static void Generate_Slot_DebugBreak(MacroAssembler* masm) {
1481  Debug::GenerateSlotDebugBreak(masm);
1482 }
1483 
1484 
1485 static void Generate_PlainReturn_LiveEdit(MacroAssembler* masm) {
1486  Debug::GeneratePlainReturnLiveEdit(masm);
1487 }
1488 
1489 
1490 static void Generate_FrameDropper_LiveEdit(MacroAssembler* masm) {
1491  Debug::GenerateFrameDropperLiveEdit(masm);
1492 }
1493 #endif
1494 
1495 
1496 Builtins::Builtins() : initialized_(false) {
1497  memset(builtins_, 0, sizeof(builtins_[0]) * builtin_count);
1498  memset(names_, 0, sizeof(names_[0]) * builtin_count);
1499 }
1500 
1501 
1502 Builtins::~Builtins() {
1503 }
1504 
1505 
1506 #define DEF_ENUM_C(name, ignore) FUNCTION_ADDR(Builtin_##name),
1507 Address const Builtins::c_functions_[cfunction_count] = {
1509 };
1510 #undef DEF_ENUM_C
1511 
1512 #define DEF_JS_NAME(name, ignore) #name,
1513 #define DEF_JS_ARGC(ignore, argc) argc,
1514 const char* const Builtins::javascript_names_[id_count] = {
1516 };
1517 
1518 int const Builtins::javascript_argc_[id_count] = {
1520 };
1521 #undef DEF_JS_NAME
1522 #undef DEF_JS_ARGC
1523 
1524 struct BuiltinDesc {
1527  const char* s_name; // name is only used for generating log information.
1528  int name;
1531 };
1532 
1533 #define BUILTIN_FUNCTION_TABLE_INIT { V8_ONCE_INIT, {} }
1534 
1536  public:
1538  CallOnce(&once_, &Builtins::InitBuiltinFunctionTable);
1539  return functions_;
1540  }
1541 
1543  BuiltinDesc functions_[Builtins::builtin_count + 1];
1544 
1545  friend class Builtins;
1546 };
1547 
1548 static BuiltinFunctionTable builtin_function_table =
1550 
1551 // Define array of pointers to generators and C builtin functions.
1552 // We do this in a sort of roundabout way so that we can do the initialization
1553 // within the lexical scope of Builtins:: and within a context where
1554 // Code::Flags names a non-abstract type.
1555 void Builtins::InitBuiltinFunctionTable() {
1556  BuiltinDesc* functions = builtin_function_table.functions_;
1557  functions[builtin_count].generator = NULL;
1558  functions[builtin_count].c_code = NULL;
1559  functions[builtin_count].s_name = NULL;
1560  functions[builtin_count].name = builtin_count;
1561  functions[builtin_count].flags = static_cast<Code::Flags>(0);
1562  functions[builtin_count].extra_args = NO_EXTRA_ARGUMENTS;
1563 
1564 #define DEF_FUNCTION_PTR_C(aname, aextra_args) \
1565  functions->generator = FUNCTION_ADDR(Generate_Adaptor); \
1566  functions->c_code = FUNCTION_ADDR(Builtin_##aname); \
1567  functions->s_name = #aname; \
1568  functions->name = c_##aname; \
1569  functions->flags = Code::ComputeFlags(Code::BUILTIN); \
1570  functions->extra_args = aextra_args; \
1571  ++functions;
1572 
1573 #define DEF_FUNCTION_PTR_A(aname, kind, state, extra) \
1574  functions->generator = FUNCTION_ADDR(Generate_##aname); \
1575  functions->c_code = NULL; \
1576  functions->s_name = #aname; \
1577  functions->name = k##aname; \
1578  functions->flags = Code::ComputeFlags(Code::kind, \
1579  state, \
1580  extra); \
1581  functions->extra_args = NO_EXTRA_ARGUMENTS; \
1582  ++functions;
1583 
1584 #define DEF_FUNCTION_PTR_H(aname, kind) \
1585  functions->generator = FUNCTION_ADDR(Generate_##aname); \
1586  functions->c_code = NULL; \
1587  functions->s_name = #aname; \
1588  functions->name = k##aname; \
1589  functions->flags = Code::ComputeHandlerFlags(Code::kind); \
1590  functions->extra_args = NO_EXTRA_ARGUMENTS; \
1591  ++functions;
1592 
1597 
1598 #undef DEF_FUNCTION_PTR_C
1599 #undef DEF_FUNCTION_PTR_A
1600 }
1601 
1602 
1603 void Builtins::SetUp(Isolate* isolate, bool create_heap_objects) {
1604  ASSERT(!initialized_);
1605  Heap* heap = isolate->heap();
1606 
1607  // Create a scope for the handles in the builtins.
1608  HandleScope scope(isolate);
1609 
1610  const BuiltinDesc* functions = builtin_function_table.functions();
1611 
1612  // For now we generate builtin adaptor code into a stack-allocated
1613  // buffer, before copying it into individual code objects. Be careful
1614  // with alignment, some platforms don't like unaligned code.
1615  // TODO(jbramley): I had to increase the size of this buffer from 8KB because
1616  // we can generate a lot of debug code on ARM64.
1617  union { int force_alignment; byte buffer[16*KB]; } u;
1618 
1619  // Traverse the list of builtins and generate an adaptor in a
1620  // separate code object for each one.
1621  for (int i = 0; i < builtin_count; i++) {
1622  if (create_heap_objects) {
1623  MacroAssembler masm(isolate, u.buffer, sizeof u.buffer);
1624  // Generate the code/adaptor.
1625  typedef void (*Generator)(MacroAssembler*, int, BuiltinExtraArguments);
1626  Generator g = FUNCTION_CAST<Generator>(functions[i].generator);
1627  // We pass all arguments to the generator, but it may not use all of
1628  // them. This works because the first arguments are on top of the
1629  // stack.
1630  ASSERT(!masm.has_frame());
1631  g(&masm, functions[i].name, functions[i].extra_args);
1632  // Move the code into the object heap.
1633  CodeDesc desc;
1634  masm.GetCode(&desc);
1635  Code::Flags flags = functions[i].flags;
1636  Object* code = NULL;
1637  {
1638  // During startup it's OK to always allocate and defer GC to later.
1639  // This simplifies things because we don't need to retry.
1640  AlwaysAllocateScope __scope__(isolate);
1641  { MaybeObject* maybe_code =
1642  heap->CreateCode(desc, flags, masm.CodeObject());
1643  if (!maybe_code->ToObject(&code)) {
1645  }
1646  }
1647  }
1648  // Log the event and add the code to the builtins array.
1649  PROFILE(isolate,
1650  CodeCreateEvent(Logger::BUILTIN_TAG,
1651  Code::cast(code),
1652  functions[i].s_name));
1654  functions[i].s_name,
1655  Code::cast(code)));
1656  builtins_[i] = code;
1657 #ifdef ENABLE_DISASSEMBLER
1658  if (FLAG_print_builtin_code) {
1659  CodeTracer::Scope trace_scope(isolate->GetCodeTracer());
1660  PrintF(trace_scope.file(), "Builtin: %s\n", functions[i].s_name);
1661  Code::cast(code)->Disassemble(functions[i].s_name, trace_scope.file());
1662  PrintF(trace_scope.file(), "\n");
1663  }
1664 #endif
1665  } else {
1666  // Deserializing. The values will be filled in during IterateBuiltins.
1667  builtins_[i] = NULL;
1668  }
1669  names_[i] = functions[i].s_name;
1670  }
1671 
1672  // Mark as initialized.
1673  initialized_ = true;
1674 }
1675 
1676 
1677 void Builtins::TearDown() {
1678  initialized_ = false;
1679 }
1680 
1681 
1682 void Builtins::IterateBuiltins(ObjectVisitor* v) {
1683  v->VisitPointers(&builtins_[0], &builtins_[0] + builtin_count);
1684 }
1685 
1686 
1687 const char* Builtins::Lookup(byte* pc) {
1688  // may be called during initialization (disassembler!)
1689  if (initialized_) {
1690  for (int i = 0; i < builtin_count; i++) {
1691  Code* entry = Code::cast(builtins_[i]);
1692  if (entry->contains(pc)) {
1693  return names_[i];
1694  }
1695  }
1696  }
1697  return NULL;
1698 }
1699 
1700 
1701 void Builtins::Generate_InterruptCheck(MacroAssembler* masm) {
1702  masm->TailCallRuntime(Runtime::kHiddenInterrupt, 0, 1);
1703 }
1704 
1705 
1706 void Builtins::Generate_StackCheck(MacroAssembler* masm) {
1707  masm->TailCallRuntime(Runtime::kHiddenStackGuard, 0, 1);
1708 }
1709 
1710 
1711 #define DEFINE_BUILTIN_ACCESSOR_C(name, ignore) \
1712 Handle<Code> Builtins::name() { \
1713  Code** code_address = \
1714  reinterpret_cast<Code**>(builtin_address(k##name)); \
1715  return Handle<Code>(code_address); \
1716 }
1717 #define DEFINE_BUILTIN_ACCESSOR_A(name, kind, state, extra) \
1718 Handle<Code> Builtins::name() { \
1719  Code** code_address = \
1720  reinterpret_cast<Code**>(builtin_address(k##name)); \
1721  return Handle<Code>(code_address); \
1722 }
1723 #define DEFINE_BUILTIN_ACCESSOR_H(name, kind) \
1724 Handle<Code> Builtins::name() { \
1725  Code** code_address = \
1726  reinterpret_cast<Code**>(builtin_address(k##name)); \
1727  return Handle<Code>(code_address); \
1728 }
1733 #undef DEFINE_BUILTIN_ACCESSOR_C
1734 #undef DEFINE_BUILTIN_ACCESSOR_A
1735 
1736 
1737 } } // namespace v8::internal
byte * Address
Definition: globals.h:186
static void GenerateSloppyArguments(MacroAssembler *masm)
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter NULL
Definition: flags.cc:269
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter NULL
BuiltinDesc functions_[Builtins::builtin_count+1]
Definition: builtins.cc:1543
static void GenerateRuntimeGetProperty(MacroAssembler *masm)
static const int kMaxLength
Definition: objects.h:3085
static void EnsureCanContainElements(Handle< JSObject > object, Object **elements, uint32_t count, EnsureElementsMode mode)
Definition: objects-inl.h:1603
static Object *& Object_at(Address addr)
Definition: v8memory.h:83
static CallHandlerInfo * cast(Object *obj)
bool IsHoleyElementsKind(ElementsKind kind)
#define BUILTINS_LIST_JS(V)
Definition: builtins.h:234
#define PROFILE(IsolateGetter, Call)
Definition: cpu-profiler.h:194
ElementsKind GetPackedElementsKind(ElementsKind holey_kind)
#define BUILTIN_LIST_H(V)
Definition: builtins.h:192
#define RETURN_IF_SCHEDULED_EXCEPTION(isolate)
Definition: isolate.h:120
bool is_hidden_prototype()
Definition: objects.h:5889
void PrintF(const char *format,...)
Definition: v8utils.cc:40
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf map
Definition: flags.cc:350
CodeTracer * GetCodeTracer()
Definition: isolate.cc:2229
void(* FunctionCallback)(const FunctionCallbackInfo< Value > &info)
Definition: v8.h:2603
static Smi * FromInt(int value)
Definition: objects-inl.h:1209
bool IsFastObjectElementsKind(ElementsKind kind)
#define LOG(isolate, Call)
Definition: log.h:86
const int KB
Definition: globals.h:245
static void GenerateMiss(MacroAssembler *masm)
virtual MUST_USE_RESULT Handle< Object > Get(Handle< Object > receiver, Handle< JSObject > holder, uint32_t key, Handle< FixedArrayBase > backing_store=Handle< FixedArrayBase >::null())=0
static Handle< T > cast(Handle< S > that)
Definition: handles.h:75
void CallOnce(OnceType *once, NoArgFunction init_func)
Definition: once.h:105
T Max(T a, T b)
Definition: utils.h:227
kSerializedDataOffset Object
Definition: objects-inl.h:5016
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long expose natives in global object expose freeBuffer extension expose gc extension under the specified name expose externalize string extension number of stack frames to capture disable builtin natives files print name of functions for which code is generated use random jit cookie to mask large constants trace lazy optimization use adaptive optimizations always try to OSR functions trace optimize function deoptimization minimum length for automatic enable preparsing maximum number of optimization attempts before giving up cache prototype transitions trace debugging JSON request response trace out of bounds accesses to external arrays trace_js_array_abuse automatically set the debug break flag when debugger commands are in the queue abort by crashing maximum length of function source code printed in a stack trace max size of the new max size of the old max size of executable always perform global GCs print one trace line following each garbage collection do not print trace line after scavenger collection print statistics of the maximum memory committed for the heap in only print modified registers Don t break for ASM_UNIMPLEMENTED_BREAK macros print stack trace when an illegal exception is thrown randomize hashes to avoid predictable hash Fixed seed to use to hash property Print the time it takes to deserialize the snapshot testing_bool_flag testing_int_flag string flag tmp file in which to serialize heap Print the time it takes to lazily compile hydrogen code stubs concurrent_recompilation concurrent_sweeping Print usage including on console Map counters to a file Enable debugger compile events enable GDBJIT enable GDBJIT interface for all code objects dump only objects containing this substring stress the GC compactor to flush out pretty print source code print source AST function name where to insert a breakpoint print scopes for builtins trace contexts operations print stuff during garbage collection report code statistics after GC report handles after GC trace cache state transitions print interface inference details prints when objects are turned into dictionaries report heap spill statistics along with trace isolate state changes trace regexp bytecode execution Minimal Log all events to the log file Log API events to the log file Log heap samples on garbage collection for the hp2ps tool log positions Log suspect operations Used with turns on browser compatible mode for profiling v8 Specify the name of the log file Enable low level linux profiler Enable perf linux profiler(experimental annotate support).") DEFINE_string(gc_fake_mmap
static Failure * Exception()
Definition: objects-inl.h:1244
const int kMaxInt
Definition: globals.h:248
uint32_t Flags
Definition: objects.h:5184
#define ASSERT(condition)
Definition: checks.h:329
AtomicWord OnceType
Definition: once.h:83
virtual MUST_USE_RESULT Handle< Object > SetLength(Handle< JSArray > holder, Handle< Object > new_length)=0
#define DEFINE_BUILTIN_ACCESSOR_C(name, ignore)
Definition: builtins.cc:1711
Handle< Object > GetProperty(Handle< JSReceiver > obj, const char *name)
Definition: handles.cc:196
#define RETURN_IF_EMPTY_HANDLE(isolate, call)
Definition: isolate.h:151
MUST_USE_RESULT MaybeObject * EnsureWritableFastElements()
Definition: objects-inl.h:6124
ArrayStorageAllocationMode
Definition: heap.h:554
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long expose natives in global object expose freeBuffer extension expose gc extension under the specified name expose externalize string extension number of stack frames to capture disable builtin natives files print name of functions for which code is generated use random jit cookie to mask large constants trace lazy optimization use adaptive optimizations always try to OSR functions trace optimize function deoptimization minimum length for automatic enable preparsing maximum number of optimization attempts before giving up cache prototype transitions trace debugging JSON request response trace out of bounds accesses to external arrays trace_js_array_abuse automatically set the debug break flag when debugger commands are in the queue abort by crashing maximum length of function source code printed in a stack trace max size of the new max size of the old max size of executable always perform global GCs print one trace line following each garbage collection do not print trace line after scavenger collection print statistics of the maximum memory committed for the heap in only print modified registers Don t break for ASM_UNIMPLEMENTED_BREAK macros print stack trace when an illegal exception is thrown randomize hashes to avoid predictable hash Fixed seed to use to hash property Print the time it takes to deserialize the snapshot testing_bool_flag testing_int_flag string flag tmp file in which to serialize heap Print the time it takes to lazily compile hydrogen code stubs concurrent_recompilation concurrent_sweeping Print usage including flags
static Object ** RawField(HeapObject *obj, int offset)
Definition: objects-inl.h:1199
static Smi * cast(Object *object)
#define DEFINE_BUILTIN_ACCESSOR_H(name, kind)
Definition: builtins.cc:1723
static void GenerateGeneric(MacroAssembler *masm, StrictMode strict_mode)
bool contains(byte *pc)
Definition: objects-inl.h:5892
int isnan(double x)
static void TransitionElementsKind(Handle< JSObject > object, ElementsKind to_kind)
Definition: objects.cc:12779
uint8_t byte
Definition: globals.h:185
HANDLE HANDLE LPSTACKFRAME64 StackFrame
#define UNREACHABLE()
Definition: checks.h:52
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long mode(MIPS only)") DEFINE_string(expose_natives_as
static void GenerateInitialize(MacroAssembler *masm)
Definition: ic.h:419
STATIC_ASSERT(sizeof(CPURegister)==sizeof(Register))
static Address c_entry_fp(ThreadLocalTop *thread)
Definition: isolate.h:648
#define MUST_USE_RESULT
Definition: globals.h:381
BuiltinExtraArguments
Definition: builtins.h:35
const int kDoubleSize
Definition: globals.h:266
static const int kCallerFPOffset
Definition: frames-arm.h:121
Local< Value > GetPrototype()
Definition: api.cc:3192
#define BUILTIN_FUNCTION_TABLE_INIT
Definition: builtins.cc:1533
void GetCode(CodeDesc *desc)
static void GeneratePreMonomorphic(MacroAssembler *masm)
Definition: ic.h:613
const int kPointerSize
Definition: globals.h:268
static void GenerateGeneric(MacroAssembler *masm)
static Address & Address_at(Address addr)
Definition: v8memory.h:79
static void GenerateMiss(MacroAssembler *masm)
bool IsMoreGeneralElementsKindTransition(ElementsKind from_kind, ElementsKind to_kind)
static void MemMove(void *dest, const void *src, size_t size)
Definition: platform.h:402
const Register pc
static FunctionTemplateInfo * cast(Object *obj)
static void GenerateLoadViaGetterForDeopt(MacroAssembler *masm)
Definition: stub-cache.h:580
#define DEF_FUNCTION_PTR_C(aname, aextra_args)
static const int kMarkerOffset
Definition: frames.h:184
static void GenerateSlow(MacroAssembler *masm)
LargeObjectSpace * lo_space()
Definition: heap.h:646
const int kBitsPerInt
Definition: globals.h:290
static void GenerateRuntimeGetProperty(MacroAssembler *masm)
static void GenerateSloppyArguments(MacroAssembler *masm)
bool IsFastSmiOrObjectElementsKind(ElementsKind kind)
#define GDBJIT(action)
Definition: gdb-jit.h:137
static ElementsAccessor * ForKind(ElementsKind elements_kind)
Definition: elements.h:178
static const int kHeaderSize
Definition: objects.h:3016
#define DEF_ENUM_C(name, ignore)
Definition: builtins.cc:1506
MUST_USE_RESULT MaybeObject * CreateCode(const CodeDesc &desc, Code::Flags flags, Handle< Object > self_reference, bool immovable=false, bool crankshafted=false, int prologue_offset=Code::kPrologueOffsetNotSet)
Definition: heap.cc:4119
static void GenerateSlow(MacroAssembler *masm)
bool Contains(HeapObject *obj)
Definition: spaces.cc:3083
static const int kMapOffset
Definition: objects.h:1890
static const int kCopyToEndAndInitializeToHole
Definition: elements.h:147
#define BUILTIN_LIST_DEBUG_A(V)
Definition: builtins.h:230
static HeapNumber * cast(Object *obj)
#define DEF_FUNCTION_PTR_H(aname, kind)
static const int kLengthOffset
Definition: objects.h:3015
static Handle< Object > Call(Isolate *isolate, Handle< Object > callable, Handle< Object > receiver, int argc, Handle< Object > argv[], bool *pending_exception, bool convert_receiver=false)
Definition: execution.cc:153
bool is_null() const
Definition: handles.h:81
Handle< T > handle(T *t, Isolate *isolate)
Definition: handles.h:103
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function info
Definition: flags.cc:317
static const int kArgumentsLengthIndex
Definition: heap.h:1104
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra code(assertions) for debugging") DEFINE_bool(code_comments
ElementsKind GetInitialFastElementsKind()
static void GenerateString(MacroAssembler *masm)
void MoveElements(FixedArray *array, int dst_index, int src_index, int len)
Definition: heap.cc:883
#define DEF_JS_ARGC(ignore, argc)
Definition: builtins.cc:1513
static Handle< T > null()
Definition: handles.h:80
void MemsetPointer(T **dest, U *value, int counter)
Definition: v8utils.h:198
#define ASSERT_EQ(v1, v2)
Definition: checks.h:330
static HeapObject * FromAddress(Address address)
Definition: objects-inl.h:1369
void USE(T)
Definition: globals.h:341
static FixedArray * cast(Object *obj)
static void GenerateNormal(MacroAssembler *masm)
virtual void CopyElements(Handle< JSObject > source_holder, uint32_t source_start, ElementsKind source_kind, Handle< FixedArrayBase > destination, uint32_t destination_start, int copy_size, Handle< FixedArrayBase > source=Handle< FixedArrayBase >::null())=0
static void GenerateIndexedInterceptor(MacroAssembler *masm)
bool IsFastHoleyElementsKind(ElementsKind kind)
#define BUILTIN(name)
Definition: builtins.cc:143
static Handle< Object > GetElement(Isolate *isolate, Handle< Object > object, uint32_t index)
Definition: objects-inl.h:1060
static const int kMaxLength
Definition: objects.h:3174
#define DEF_FUNCTION_PTR_A(aname, kind, state, extra)
HeapObject * obj
const Register fp
Vector< Handle< Object > > HandleVector(v8::internal::Handle< T > *elms, int length)
Definition: v8utils.h:118
#define DEFINE_BUILTIN_ACCESSOR_A(name, kind, state, extra)
Definition: builtins.cc:1717
static void GenerateMiss(MacroAssembler *masm)
bool CanMoveObjectStart(HeapObject *object)
Definition: heap.cc:4019
#define DEF_ARG_TYPE(name, spec)
Definition: builtins.cc:107
T Min(T a, T b)
Definition: utils.h:234
static SignatureInfo * cast(Object *obj)
static FixedArrayBase * cast(Object *object)
Definition: objects-inl.h:2121
static void GenerateStoreViaSetterForDeopt(MacroAssembler *masm)
Definition: stub-cache.h:733
static const int kMaxValue
Definition: objects.h:1681
void TailCallRuntime(Runtime::FunctionId fid, int num_arguments, int result_size)
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long expose natives in global object expose freeBuffer extension expose gc extension under the specified name expose externalize string extension number of stack frames to capture disable builtin natives files print name of functions for which code is generated use random jit cookie to mask large constants trace lazy optimization use adaptive optimizations always try to OSR functions trace optimize function deoptimization minimum length for automatic enable preparsing maximum number of optimization attempts before giving up cache prototype transitions trace debugging JSON request response trace out of bounds accesses to external arrays trace_js_array_abuse automatically set the debug break flag when debugger commands are in the queue abort by crashing maximum length of function source code printed in a stack trace max size of the new max size of the old max size of executable always perform global GCs print one trace line following each garbage collection do not print trace line after scavenger collection print statistics of the maximum memory committed for the heap in name
Definition: flags.cc:505
static void FatalProcessOutOfMemory(const char *location, bool take_snapshot=false)
BuiltinExtraArguments extra_args
Definition: builtins.cc:1530
#define BUILTIN_LIST_C(V)
Definition: builtins.h:67
ElementsKind GetHoleyElementsKind(ElementsKind packed_kind)
static void GenerateInitialize(MacroAssembler *masm)
Definition: ic.h:612
#define DEF_JS_NAME(name, ignore)
Definition: builtins.cc:1512
static JSObject * cast(Object *obj)
static void GeneratePreMonomorphic(MacroAssembler *masm)
Definition: ic.h:420
#define BUILTIN_LIST_A(V)
Definition: builtins.h:88
bool IsFastDoubleElementsKind(ElementsKind kind)
static void GenerateNormal(MacroAssembler *masm)
static void GenerateMiss(MacroAssembler *masm)
static JSFunction * cast(Object *obj)