v8  3.25.30(node0.11.13)
V8 is Google's open source JavaScript engine
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
ic-x64.cc
Go to the documentation of this file.
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 
28 #include "v8.h"
29 
30 #if V8_TARGET_ARCH_X64
31 
32 #include "codegen.h"
33 #include "ic-inl.h"
34 #include "runtime.h"
35 #include "stub-cache.h"
36 
37 namespace v8 {
38 namespace internal {
39 
40 // ----------------------------------------------------------------------------
41 // Static IC stub generators.
42 //
43 
44 #define __ ACCESS_MASM(masm)
45 
46 
47 static void GenerateGlobalInstanceTypeCheck(MacroAssembler* masm,
48  Register type,
49  Label* global_object) {
50  // Register usage:
51  // type: holds the receiver instance type on entry.
52  __ cmpb(type, Immediate(JS_GLOBAL_OBJECT_TYPE));
53  __ j(equal, global_object);
54  __ cmpb(type, Immediate(JS_BUILTINS_OBJECT_TYPE));
55  __ j(equal, global_object);
56  __ cmpb(type, Immediate(JS_GLOBAL_PROXY_TYPE));
57  __ j(equal, global_object);
58 }
59 
60 
61 // Generated code falls through if the receiver is a regular non-global
62 // JS object with slow properties and no interceptors.
63 static void GenerateNameDictionaryReceiverCheck(MacroAssembler* masm,
64  Register receiver,
65  Register r0,
66  Register r1,
67  Label* miss) {
68  // Register usage:
69  // receiver: holds the receiver on entry and is unchanged.
70  // r0: used to hold receiver instance type.
71  // Holds the property dictionary on fall through.
72  // r1: used to hold receivers map.
73 
74  __ JumpIfSmi(receiver, miss);
75 
76  // Check that the receiver is a valid JS object.
77  __ movp(r1, FieldOperand(receiver, HeapObject::kMapOffset));
79  __ cmpb(r0, Immediate(FIRST_SPEC_OBJECT_TYPE));
80  __ j(below, miss);
81 
82  // If this assert fails, we have to check upper bound too.
84 
85  GenerateGlobalInstanceTypeCheck(masm, r0, miss);
86 
87  // Check for non-global object that requires access check.
89  Immediate((1 << Map::kIsAccessCheckNeeded) |
91  __ j(not_zero, miss);
92 
93  __ movp(r0, FieldOperand(receiver, JSObject::kPropertiesOffset));
94  __ CompareRoot(FieldOperand(r0, HeapObject::kMapOffset),
95  Heap::kHashTableMapRootIndex);
96  __ j(not_equal, miss);
97 }
98 
99 
100 
101 // Helper function used to load a property from a dictionary backing storage.
102 // This function may return false negatives, so miss_label
103 // must always call a backup property load that is complete.
104 // This function is safe to call if name is not an internalized string,
105 // and will jump to the miss_label in that case.
106 // The generated code assumes that the receiver has slow properties,
107 // is not a global object and does not have interceptors.
108 static void GenerateDictionaryLoad(MacroAssembler* masm,
109  Label* miss_label,
110  Register elements,
111  Register name,
112  Register r0,
113  Register r1,
114  Register result) {
115  // Register use:
116  //
117  // elements - holds the property dictionary on entry and is unchanged.
118  //
119  // name - holds the name of the property on entry and is unchanged.
120  //
121  // r0 - used to hold the capacity of the property dictionary.
122  //
123  // r1 - used to hold the index into the property dictionary.
124  //
125  // result - holds the result on exit if the load succeeded.
126 
127  Label done;
128 
129  // Probe the dictionary.
131  miss_label,
132  &done,
133  elements,
134  name,
135  r0,
136  r1);
137 
138  // If probing finds an entry in the dictionary, r1 contains the
139  // index into the dictionary. Check that the value is a normal
140  // property.
141  __ bind(&done);
142  const int kElementsStartOffset =
145  const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
146  __ Test(Operand(elements, r1, times_pointer_size,
147  kDetailsOffset - kHeapObjectTag),
148  Smi::FromInt(PropertyDetails::TypeField::kMask));
149  __ j(not_zero, miss_label);
150 
151  // Get the value at the masked, scaled index.
152  const int kValueOffset = kElementsStartOffset + kPointerSize;
153  __ movp(result,
154  Operand(elements, r1, times_pointer_size,
155  kValueOffset - kHeapObjectTag));
156 }
157 
158 
159 // Helper function used to store a property to a dictionary backing
160 // storage. This function may fail to store a property even though it
161 // is in the dictionary, so code at miss_label must always call a
162 // backup property store that is complete. This function is safe to
163 // call if name is not an internalized string, and will jump to the miss_label
164 // in that case. The generated code assumes that the receiver has slow
165 // properties, is not a global object and does not have interceptors.
166 static void GenerateDictionaryStore(MacroAssembler* masm,
167  Label* miss_label,
168  Register elements,
169  Register name,
170  Register value,
171  Register scratch0,
172  Register scratch1) {
173  // Register use:
174  //
175  // elements - holds the property dictionary on entry and is clobbered.
176  //
177  // name - holds the name of the property on entry and is unchanged.
178  //
179  // value - holds the value to store and is unchanged.
180  //
181  // scratch0 - used during the positive dictionary lookup and is clobbered.
182  //
183  // scratch1 - used for index into the property dictionary and is clobbered.
184  Label done;
185 
186  // Probe the dictionary.
188  miss_label,
189  &done,
190  elements,
191  name,
192  scratch0,
193  scratch1);
194 
195  // If probing finds an entry in the dictionary, scratch0 contains the
196  // index into the dictionary. Check that the value is a normal
197  // property that is not read only.
198  __ bind(&done);
199  const int kElementsStartOffset =
202  const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
203  const int kTypeAndReadOnlyMask =
204  (PropertyDetails::TypeField::kMask |
205  PropertyDetails::AttributesField::encode(READ_ONLY)) << kSmiTagSize;
206  __ Test(Operand(elements,
207  scratch1,
209  kDetailsOffset - kHeapObjectTag),
210  Smi::FromInt(kTypeAndReadOnlyMask));
211  __ j(not_zero, miss_label);
212 
213  // Store the value at the masked, scaled index.
214  const int kValueOffset = kElementsStartOffset + kPointerSize;
215  __ leap(scratch1, Operand(elements,
216  scratch1,
218  kValueOffset - kHeapObjectTag));
219  __ movp(Operand(scratch1, 0), value);
220 
221  // Update write barrier. Make sure not to clobber the value.
222  __ movp(scratch0, value);
223  __ RecordWrite(elements, scratch1, scratch0, kDontSaveFPRegs);
224 }
225 
226 
227 // Checks the receiver for special cases (value type, slow case bits).
228 // Falls through for regular JS object.
229 static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
230  Register receiver,
231  Register map,
232  int interceptor_bit,
233  Label* slow) {
234  // Register use:
235  // receiver - holds the receiver and is unchanged.
236  // Scratch registers:
237  // map - used to hold the map of the receiver.
238 
239  // Check that the object isn't a smi.
240  __ JumpIfSmi(receiver, slow);
241 
242  // Check that the object is some kind of JS object EXCEPT JS Value type.
243  // In the case that the object is a value-wrapper object,
244  // we enter the runtime system to make sure that indexing
245  // into string objects work as intended.
247  __ CmpObjectType(receiver, JS_OBJECT_TYPE, map);
248  __ j(below, slow);
249 
250  // Check bit field.
252  Immediate((1 << Map::kIsAccessCheckNeeded) |
253  (1 << interceptor_bit)));
254  __ j(not_zero, slow);
255 }
256 
257 
258 // Loads an indexed element from a fast case array.
259 // If not_fast_array is NULL, doesn't perform the elements map check.
260 static void GenerateFastArrayLoad(MacroAssembler* masm,
261  Register receiver,
262  Register key,
263  Register elements,
264  Register scratch,
265  Register result,
266  Label* not_fast_array,
267  Label* out_of_range) {
268  // Register use:
269  //
270  // receiver - holds the receiver on entry.
271  // Unchanged unless 'result' is the same register.
272  //
273  // key - holds the smi key on entry.
274  // Unchanged unless 'result' is the same register.
275  //
276  // elements - holds the elements of the receiver on exit.
277  //
278  // result - holds the result on exit if the load succeeded.
279  // Allowed to be the the same as 'receiver' or 'key'.
280  // Unchanged on bailout so 'receiver' and 'key' can be safely
281  // used by further computation.
282  //
283  // Scratch registers:
284  //
285  // scratch - used to hold elements of the receiver and the loaded value.
286 
287  __ movp(elements, FieldOperand(receiver, JSObject::kElementsOffset));
288  if (not_fast_array != NULL) {
289  // Check that the object is in fast mode and writable.
290  __ CompareRoot(FieldOperand(elements, HeapObject::kMapOffset),
291  Heap::kFixedArrayMapRootIndex);
292  __ j(not_equal, not_fast_array);
293  } else {
294  __ AssertFastElements(elements);
295  }
296  // Check that the key (index) is within bounds.
297  __ SmiCompare(key, FieldOperand(elements, FixedArray::kLengthOffset));
298  // Unsigned comparison rejects negative indices.
299  __ j(above_equal, out_of_range);
300  // Fast case: Do the load.
301  SmiIndex index = masm->SmiToIndex(scratch, key, kPointerSizeLog2);
302  __ movp(scratch, FieldOperand(elements,
303  index.reg,
304  index.scale,
306  __ CompareRoot(scratch, Heap::kTheHoleValueRootIndex);
307  // In case the loaded value is the_hole we have to consult GetProperty
308  // to ensure the prototype chain is searched.
309  __ j(equal, out_of_range);
310  if (!result.is(scratch)) {
311  __ movp(result, scratch);
312  }
313 }
314 
315 
316 // Checks whether a key is an array index string or a unique name.
317 // Falls through if the key is a unique name.
318 static void GenerateKeyNameCheck(MacroAssembler* masm,
319  Register key,
320  Register map,
321  Register hash,
322  Label* index_string,
323  Label* not_unique) {
324  // Register use:
325  // key - holds the key and is unchanged. Assumed to be non-smi.
326  // Scratch registers:
327  // map - used to hold the map of the key.
328  // hash - used to hold the hash of the key.
329  Label unique;
330  __ CmpObjectType(key, LAST_UNIQUE_NAME_TYPE, map);
331  __ j(above, not_unique);
333  __ j(equal, &unique);
334 
335  // Is the string an array index, with cached numeric value?
336  __ movl(hash, FieldOperand(key, Name::kHashFieldOffset));
337  __ testl(hash, Immediate(Name::kContainsCachedArrayIndexMask));
338  __ j(zero, index_string); // The value in hash is used at jump target.
339 
340  // Is the string internalized? We already know it's a string so a single
341  // bit test is enough.
344  Immediate(kIsNotInternalizedMask));
345  __ j(not_zero, not_unique);
346 
347  __ bind(&unique);
348 }
349 
350 
351 
352 void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
353  // ----------- S t a t e -------------
354  // -- rax : key
355  // -- rdx : receiver
356  // -- rsp[0] : return address
357  // -----------------------------------
358  Label slow, check_name, index_smi, index_name, property_array_property;
359  Label probe_dictionary, check_number_dictionary;
360 
361  // Check that the key is a smi.
362  __ JumpIfNotSmi(rax, &check_name);
363  __ bind(&index_smi);
364  // Now the key is known to be a smi. This place is also jumped to from below
365  // where a numeric string is converted to a smi.
366 
367  GenerateKeyedLoadReceiverCheck(
368  masm, rdx, rcx, Map::kHasIndexedInterceptor, &slow);
369 
370  // Check the receiver's map to see if it has fast elements.
371  __ CheckFastElements(rcx, &check_number_dictionary);
372 
373  GenerateFastArrayLoad(masm,
374  rdx,
375  rax,
376  rcx,
377  rbx,
378  rax,
379  NULL,
380  &slow);
381  Counters* counters = masm->isolate()->counters();
382  __ IncrementCounter(counters->keyed_load_generic_smi(), 1);
383  __ ret(0);
384 
385  __ bind(&check_number_dictionary);
386  __ SmiToInteger32(rbx, rax);
388 
389  // Check whether the elements is a number dictionary.
390  // rdx: receiver
391  // rax: key
392  // rbx: key as untagged int32
393  // rcx: elements
395  Heap::kHashTableMapRootIndex);
396  __ j(not_equal, &slow);
397  __ LoadFromNumberDictionary(&slow, rcx, rax, rbx, r9, rdi, rax);
398  __ ret(0);
399 
400  __ bind(&slow);
401  // Slow case: Jump to runtime.
402  // rdx: receiver
403  // rax: key
404  __ IncrementCounter(counters->keyed_load_generic_slow(), 1);
406 
407  __ bind(&check_name);
408  GenerateKeyNameCheck(masm, rax, rcx, rbx, &index_name, &slow);
409 
410  GenerateKeyedLoadReceiverCheck(
411  masm, rdx, rcx, Map::kHasNamedInterceptor, &slow);
412 
413  // If the receiver is a fast-case object, check the keyed lookup
414  // cache. Otherwise probe the dictionary leaving result in rcx.
417  Heap::kHashTableMapRootIndex);
418  __ j(equal, &probe_dictionary);
419 
420  // Load the map of the receiver, compute the keyed lookup cache hash
421  // based on 32 bits of the map pointer and the string hash.
423  __ movl(rcx, rbx);
424  __ shr(rcx, Immediate(KeyedLookupCache::kMapHashShift));
426  __ shr(rdi, Immediate(String::kHashShift));
427  __ xorp(rcx, rdi);
429  __ andp(rcx, Immediate(mask));
430 
431  // Load the key (consisting of map and internalized string) from the cache and
432  // check for match.
433  Label load_in_object_property;
434  static const int kEntriesPerBucket = KeyedLookupCache::kEntriesPerBucket;
435  Label hit_on_nth_entry[kEntriesPerBucket];
436  ExternalReference cache_keys
437  = ExternalReference::keyed_lookup_cache_keys(masm->isolate());
438 
439  for (int i = 0; i < kEntriesPerBucket - 1; i++) {
440  Label try_next_entry;
441  __ movp(rdi, rcx);
442  __ shl(rdi, Immediate(kPointerSizeLog2 + 1));
443  __ LoadAddress(kScratchRegister, cache_keys);
444  int off = kPointerSize * i * 2;
445  __ cmpp(rbx, Operand(kScratchRegister, rdi, times_1, off));
446  __ j(not_equal, &try_next_entry);
447  __ cmpp(rax, Operand(kScratchRegister, rdi, times_1, off + kPointerSize));
448  __ j(equal, &hit_on_nth_entry[i]);
449  __ bind(&try_next_entry);
450  }
451 
452  int off = kPointerSize * (kEntriesPerBucket - 1) * 2;
453  __ cmpp(rbx, Operand(kScratchRegister, rdi, times_1, off));
454  __ j(not_equal, &slow);
455  __ cmpp(rax, Operand(kScratchRegister, rdi, times_1, off + kPointerSize));
456  __ j(not_equal, &slow);
457 
458  // Get field offset, which is a 32-bit integer.
459  ExternalReference cache_field_offsets
460  = ExternalReference::keyed_lookup_cache_field_offsets(masm->isolate());
461 
462  // Hit on nth entry.
463  for (int i = kEntriesPerBucket - 1; i >= 0; i--) {
464  __ bind(&hit_on_nth_entry[i]);
465  if (i != 0) {
466  __ addl(rcx, Immediate(i));
467  }
468  __ LoadAddress(kScratchRegister, cache_field_offsets);
469  __ movl(rdi, Operand(kScratchRegister, rcx, times_4, 0));
471  __ subp(rdi, rcx);
472  __ j(above_equal, &property_array_property);
473  if (i != 0) {
474  __ jmp(&load_in_object_property);
475  }
476  }
477 
478  // Load in-object property.
479  __ bind(&load_in_object_property);
481  __ addp(rcx, rdi);
483  __ IncrementCounter(counters->keyed_load_generic_lookup_cache(), 1);
484  __ ret(0);
485 
486  // Load property array property.
487  __ bind(&property_array_property);
491  __ IncrementCounter(counters->keyed_load_generic_lookup_cache(), 1);
492  __ ret(0);
493 
494  // Do a quick inline probe of the receiver's dictionary, if it
495  // exists.
496  __ bind(&probe_dictionary);
497  // rdx: receiver
498  // rax: key
499  // rbx: elements
500 
503  GenerateGlobalInstanceTypeCheck(masm, rcx, &slow);
504 
505  GenerateDictionaryLoad(masm, &slow, rbx, rax, rcx, rdi, rax);
506  __ IncrementCounter(counters->keyed_load_generic_symbol(), 1);
507  __ ret(0);
508 
509  __ bind(&index_name);
510  __ IndexFromHash(rbx, rax);
511  __ jmp(&index_smi);
512 }
513 
514 
515 void KeyedLoadIC::GenerateString(MacroAssembler* masm) {
516  // ----------- S t a t e -------------
517  // -- rax : key
518  // -- rdx : receiver
519  // -- rsp[0] : return address
520  // -----------------------------------
521  Label miss;
522 
523  Register receiver = rdx;
524  Register index = rax;
525  Register scratch = rcx;
526  Register result = rax;
527 
528  StringCharAtGenerator char_at_generator(receiver,
529  index,
530  scratch,
531  result,
532  &miss, // When not a string.
533  &miss, // When not a number.
534  &miss, // When index out of range.
536  char_at_generator.GenerateFast(masm);
537  __ ret(0);
538 
539  StubRuntimeCallHelper call_helper;
540  char_at_generator.GenerateSlow(masm, call_helper);
541 
542  __ bind(&miss);
543  GenerateMiss(masm);
544 }
545 
546 
547 void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) {
548  // ----------- S t a t e -------------
549  // -- rax : key
550  // -- rdx : receiver
551  // -- rsp[0] : return address
552  // -----------------------------------
553  Label slow;
554 
555  // Check that the receiver isn't a smi.
556  __ JumpIfSmi(rdx, &slow);
557 
558  // Check that the key is an array index, that is Uint32.
560  __ JumpUnlessNonNegativeSmi(rax, &slow);
561 
562  // Get the map of the receiver.
564 
565  // Check that it has indexed interceptor and access checks
566  // are not enabled for this object.
568  __ andb(rcx, Immediate(kSlowCaseBitFieldMask));
569  __ cmpb(rcx, Immediate(1 << Map::kHasIndexedInterceptor));
570  __ j(not_zero, &slow);
571 
572  // Everything is fine, call runtime.
573  __ PopReturnAddressTo(rcx);
574  __ Push(rdx); // receiver
575  __ Push(rax); // key
576  __ PushReturnAddressFrom(rcx);
577 
578  // Perform tail call to the entry.
579  __ TailCallExternalReference(
580  ExternalReference(IC_Utility(kKeyedLoadPropertyWithInterceptor),
581  masm->isolate()),
582  2,
583  1);
584 
585  __ bind(&slow);
586  GenerateMiss(masm);
587 }
588 
589 
590 static void KeyedStoreGenerateGenericHelper(
591  MacroAssembler* masm,
592  Label* fast_object,
593  Label* fast_double,
594  Label* slow,
595  KeyedStoreCheckMap check_map,
596  KeyedStoreIncrementLength increment_length) {
597  Label transition_smi_elements;
598  Label finish_object_store, non_double_value, transition_double_elements;
599  Label fast_double_without_map_check;
600  // Fast case: Do the store, could be either Object or double.
601  __ bind(fast_object);
602  // rax: value
603  // rbx: receiver's elements array (a FixedArray)
604  // rcx: index
605  // rdx: receiver (a JSArray)
606  // r9: map of receiver
607  if (check_map == kCheckMap) {
609  __ CompareRoot(rdi, Heap::kFixedArrayMapRootIndex);
610  __ j(not_equal, fast_double);
611  }
612 
613  // HOLECHECK: guards "A[i] = V"
614  // We have to go to the runtime if the current value is the hole because
615  // there may be a callback on the element
616  Label holecheck_passed1;
618  rcx,
621  __ CompareRoot(kScratchRegister, Heap::kTheHoleValueRootIndex);
622  __ j(not_equal, &holecheck_passed1);
623  __ JumpIfDictionaryInPrototypeChain(rdx, rdi, kScratchRegister, slow);
624 
625  __ bind(&holecheck_passed1);
626 
627  // Smi stores don't require further checks.
628  Label non_smi_value;
629  __ JumpIfNotSmi(rax, &non_smi_value);
630  if (increment_length == kIncrementLength) {
631  // Add 1 to receiver->length.
632  __ leal(rdi, Operand(rcx, 1));
633  __ Integer32ToSmiField(FieldOperand(rdx, JSArray::kLengthOffset), rdi);
634  }
635  // It's irrelevant whether array is smi-only or not when writing a smi.
637  rax);
638  __ ret(0);
639 
640  __ bind(&non_smi_value);
641  // Writing a non-smi, check whether array allows non-smi elements.
642  // r9: receiver's map
643  __ CheckFastObjectElements(r9, &transition_smi_elements);
644 
645  __ bind(&finish_object_store);
646  if (increment_length == kIncrementLength) {
647  // Add 1 to receiver->length.
648  __ leal(rdi, Operand(rcx, 1));
649  __ Integer32ToSmiField(FieldOperand(rdx, JSArray::kLengthOffset), rdi);
650  }
652  rax);
653  __ movp(rdx, rax); // Preserve the value which is returned.
654  __ RecordWriteArray(
656  __ ret(0);
657 
658  __ bind(fast_double);
659  if (check_map == kCheckMap) {
660  // Check for fast double array case. If this fails, call through to the
661  // runtime.
662  // rdi: elements array's map
663  __ CompareRoot(rdi, Heap::kFixedDoubleArrayMapRootIndex);
664  __ j(not_equal, slow);
665  }
666 
667  // HOLECHECK: guards "A[i] double hole?"
668  // We have to see if the double version of the hole is present. If so
669  // go to the runtime.
670  uint32_t offset = FixedDoubleArray::kHeaderSize + sizeof(kHoleNanLower32);
671  __ cmpl(FieldOperand(rbx, rcx, times_8, offset), Immediate(kHoleNanUpper32));
672  __ j(not_equal, &fast_double_without_map_check);
673  __ JumpIfDictionaryInPrototypeChain(rdx, rdi, kScratchRegister, slow);
674 
675  __ bind(&fast_double_without_map_check);
676  __ StoreNumberToDoubleElements(rax, rbx, rcx, xmm0,
677  &transition_double_elements);
678  if (increment_length == kIncrementLength) {
679  // Add 1 to receiver->length.
680  __ leal(rdi, Operand(rcx, 1));
681  __ Integer32ToSmiField(FieldOperand(rdx, JSArray::kLengthOffset), rdi);
682  }
683  __ ret(0);
684 
685  __ bind(&transition_smi_elements);
687 
688  // Transition the array appropriately depending on the value type.
690  __ CompareRoot(r9, Heap::kHeapNumberMapRootIndex);
691  __ j(not_equal, &non_double_value);
692 
693  // Value is a double. Transition FAST_SMI_ELEMENTS ->
694  // FAST_DOUBLE_ELEMENTS and complete the store.
695  __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
697  rbx,
698  rdi,
699  slow);
704  __ jmp(&fast_double_without_map_check);
705 
706  __ bind(&non_double_value);
707  // Value is not a double, FAST_SMI_ELEMENTS -> FAST_ELEMENTS
708  __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
710  rbx,
711  rdi,
712  slow);
715  slow);
717  __ jmp(&finish_object_store);
718 
719  __ bind(&transition_double_elements);
720  // Elements are FAST_DOUBLE_ELEMENTS, but value is an Object that's not a
721  // HeapNumber. Make sure that the receiver is a Array with FAST_ELEMENTS and
722  // transition array from FAST_DOUBLE_ELEMENTS to FAST_ELEMENTS
724  __ LoadTransitionedArrayMapConditional(FAST_DOUBLE_ELEMENTS,
726  rbx,
727  rdi,
728  slow);
732  __ jmp(&finish_object_store);
733 }
734 
735 
736 void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
737  StrictMode strict_mode) {
738  // ----------- S t a t e -------------
739  // -- rax : value
740  // -- rcx : key
741  // -- rdx : receiver
742  // -- rsp[0] : return address
743  // -----------------------------------
744  Label slow, slow_with_tagged_index, fast_object, fast_object_grow;
745  Label fast_double, fast_double_grow;
746  Label array, extra, check_if_double_array;
747 
748  // Check that the object isn't a smi.
749  __ JumpIfSmi(rdx, &slow_with_tagged_index);
750  // Get the map from the receiver.
752  // Check that the receiver does not require access checks and is not observed.
753  // The generic stub does not perform map checks or handle observed objects.
755  Immediate(1 << Map::kIsAccessCheckNeeded | 1 << Map::kIsObserved));
756  __ j(not_zero, &slow_with_tagged_index);
757  // Check that the key is a smi.
758  __ JumpIfNotSmi(rcx, &slow_with_tagged_index);
759  __ SmiToInteger32(rcx, rcx);
760 
761  __ CmpInstanceType(r9, JS_ARRAY_TYPE);
762  __ j(equal, &array);
763  // Check that the object is some kind of JSObject.
764  __ CmpInstanceType(r9, FIRST_JS_OBJECT_TYPE);
765  __ j(below, &slow);
766 
767  // Object case: Check key against length in the elements array.
768  // rax: value
769  // rdx: JSObject
770  // rcx: index
772  // Check array bounds.
773  __ SmiCompareInteger32(FieldOperand(rbx, FixedArray::kLengthOffset), rcx);
774  // rax: value
775  // rbx: FixedArray
776  // rcx: index
777  __ j(above, &fast_object);
778 
779  // Slow case: call runtime.
780  __ bind(&slow);
781  __ Integer32ToSmi(rcx, rcx);
782  __ bind(&slow_with_tagged_index);
783  GenerateRuntimeSetProperty(masm, strict_mode);
784  // Never returns to here.
785 
786  // Extra capacity case: Check if there is extra capacity to
787  // perform the store and update the length. Used for adding one
788  // element to the array by writing to array[array.length].
789  __ bind(&extra);
790  // rax: value
791  // rdx: receiver (a JSArray)
792  // rbx: receiver's elements array (a FixedArray)
793  // rcx: index
794  // flags: smicompare (rdx.length(), rbx)
795  __ j(not_equal, &slow); // do not leave holes in the array
796  __ SmiCompareInteger32(FieldOperand(rbx, FixedArray::kLengthOffset), rcx);
797  __ j(below_equal, &slow);
798  // Increment index to get new length.
800  __ CompareRoot(rdi, Heap::kFixedArrayMapRootIndex);
801  __ j(not_equal, &check_if_double_array);
802  __ jmp(&fast_object_grow);
803 
804  __ bind(&check_if_double_array);
805  // rdi: elements array's map
806  __ CompareRoot(rdi, Heap::kFixedDoubleArrayMapRootIndex);
807  __ j(not_equal, &slow);
808  __ jmp(&fast_double_grow);
809 
810  // Array case: Get the length and the elements array from the JS
811  // array. Check that the array is in fast mode (and writable); if it
812  // is the length is always a smi.
813  __ bind(&array);
814  // rax: value
815  // rdx: receiver (a JSArray)
816  // rcx: index
818 
819  // Check the key against the length in the array, compute the
820  // address to store into and fall through to fast case.
821  __ SmiCompareInteger32(FieldOperand(rdx, JSArray::kLengthOffset), rcx);
822  __ j(below_equal, &extra);
823 
824  KeyedStoreGenerateGenericHelper(masm, &fast_object, &fast_double,
826  KeyedStoreGenerateGenericHelper(masm, &fast_object_grow, &fast_double_grow,
828 }
829 
830 
831 static Operand GenerateMappedArgumentsLookup(MacroAssembler* masm,
832  Register object,
833  Register key,
834  Register scratch1,
835  Register scratch2,
836  Register scratch3,
837  Label* unmapped_case,
838  Label* slow_case) {
839  Heap* heap = masm->isolate()->heap();
840 
841  // Check that the receiver is a JSObject. Because of the elements
842  // map check later, we do not need to check for interceptors or
843  // whether it requires access checks.
844  __ JumpIfSmi(object, slow_case);
845  // Check that the object is some kind of JSObject.
846  __ CmpObjectType(object, FIRST_JS_RECEIVER_TYPE, scratch1);
847  __ j(below, slow_case);
848 
849  // Check that the key is a positive smi.
850  Condition check = masm->CheckNonNegativeSmi(key);
851  __ j(NegateCondition(check), slow_case);
852 
853  // Load the elements into scratch1 and check its map. If not, jump
854  // to the unmapped lookup with the parameter map in scratch1.
855  Handle<Map> arguments_map(heap->sloppy_arguments_elements_map());
856  __ movp(scratch1, FieldOperand(object, JSObject::kElementsOffset));
857  __ CheckMap(scratch1, arguments_map, slow_case, DONT_DO_SMI_CHECK);
858 
859  // Check if element is in the range of mapped arguments.
860  __ movp(scratch2, FieldOperand(scratch1, FixedArray::kLengthOffset));
861  __ SmiSubConstant(scratch2, scratch2, Smi::FromInt(2));
862  __ cmpp(key, scratch2);
863  __ j(greater_equal, unmapped_case);
864 
865  // Load element index and check whether it is the hole.
866  const int kHeaderSize = FixedArray::kHeaderSize + 2 * kPointerSize;
867  __ SmiToInteger64(scratch3, key);
868  __ movp(scratch2, FieldOperand(scratch1,
869  scratch3,
871  kHeaderSize));
872  __ CompareRoot(scratch2, Heap::kTheHoleValueRootIndex);
873  __ j(equal, unmapped_case);
874 
875  // Load value from context and return it. We can reuse scratch1 because
876  // we do not jump to the unmapped lookup (which requires the parameter
877  // map in scratch1).
878  __ movp(scratch1, FieldOperand(scratch1, FixedArray::kHeaderSize));
879  __ SmiToInteger64(scratch3, scratch2);
880  return FieldOperand(scratch1,
881  scratch3,
884 }
885 
886 
887 static Operand GenerateUnmappedArgumentsLookup(MacroAssembler* masm,
888  Register key,
889  Register parameter_map,
890  Register scratch,
891  Label* slow_case) {
892  // Element is in arguments backing store, which is referenced by the
893  // second element of the parameter_map. The parameter_map register
894  // must be loaded with the parameter map of the arguments object and is
895  // overwritten.
896  const int kBackingStoreOffset = FixedArray::kHeaderSize + kPointerSize;
897  Register backing_store = parameter_map;
898  __ movp(backing_store, FieldOperand(parameter_map, kBackingStoreOffset));
899  Handle<Map> fixed_array_map(masm->isolate()->heap()->fixed_array_map());
900  __ CheckMap(backing_store, fixed_array_map, slow_case, DONT_DO_SMI_CHECK);
901  __ movp(scratch, FieldOperand(backing_store, FixedArray::kLengthOffset));
902  __ cmpp(key, scratch);
903  __ j(greater_equal, slow_case);
904  __ SmiToInteger64(scratch, key);
905  return FieldOperand(backing_store,
906  scratch,
909 }
910 
911 
912 void KeyedLoadIC::GenerateSloppyArguments(MacroAssembler* masm) {
913  // ----------- S t a t e -------------
914  // -- rax : key
915  // -- rdx : receiver
916  // -- rsp[0] : return address
917  // -----------------------------------
918  Label slow, notin;
919  Operand mapped_location =
920  GenerateMappedArgumentsLookup(
921  masm, rdx, rax, rbx, rcx, rdi, &notin, &slow);
922  __ movp(rax, mapped_location);
923  __ Ret();
924  __ bind(&notin);
925  // The unmapped lookup expects that the parameter map is in rbx.
926  Operand unmapped_location =
927  GenerateUnmappedArgumentsLookup(masm, rax, rbx, rcx, &slow);
928  __ CompareRoot(unmapped_location, Heap::kTheHoleValueRootIndex);
929  __ j(equal, &slow);
930  __ movp(rax, unmapped_location);
931  __ Ret();
932  __ bind(&slow);
933  GenerateMiss(masm);
934 }
935 
936 
937 void KeyedStoreIC::GenerateSloppyArguments(MacroAssembler* masm) {
938  // ----------- S t a t e -------------
939  // -- rax : value
940  // -- rcx : key
941  // -- rdx : receiver
942  // -- rsp[0] : return address
943  // -----------------------------------
944  Label slow, notin;
945  Operand mapped_location = GenerateMappedArgumentsLookup(
946  masm, rdx, rcx, rbx, rdi, r8, &notin, &slow);
947  __ movp(mapped_location, rax);
948  __ leap(r9, mapped_location);
949  __ movp(r8, rax);
950  __ RecordWrite(rbx,
951  r9,
952  r8,
956  __ Ret();
957  __ bind(&notin);
958  // The unmapped lookup expects that the parameter map is in rbx.
959  Operand unmapped_location =
960  GenerateUnmappedArgumentsLookup(masm, rcx, rbx, rdi, &slow);
961  __ movp(unmapped_location, rax);
962  __ leap(r9, unmapped_location);
963  __ movp(r8, rax);
964  __ RecordWrite(rbx,
965  r9,
966  r8,
970  __ Ret();
971  __ bind(&slow);
972  GenerateMiss(masm);
973 }
974 
975 
976 void LoadIC::GenerateMegamorphic(MacroAssembler* masm) {
977  // ----------- S t a t e -------------
978  // -- rax : receiver
979  // -- rcx : name
980  // -- rsp[0] : return address
981  // -----------------------------------
982 
983  // Probe the stub cache.
985  masm->isolate()->stub_cache()->GenerateProbe(
986  masm, flags, rax, rcx, rbx, rdx);
987 
988  GenerateMiss(masm);
989 }
990 
991 
992 void LoadIC::GenerateNormal(MacroAssembler* masm) {
993  // ----------- S t a t e -------------
994  // -- rax : receiver
995  // -- rcx : name
996  // -- rsp[0] : return address
997  // -----------------------------------
998  Label miss;
999 
1000  GenerateNameDictionaryReceiverCheck(masm, rax, rdx, rbx, &miss);
1001 
1002  // rdx: elements
1003  // Search the dictionary placing the result in rax.
1004  GenerateDictionaryLoad(masm, &miss, rdx, rcx, rbx, rdi, rax);
1005  __ ret(0);
1006 
1007  // Cache miss: Jump to runtime.
1008  __ bind(&miss);
1009  GenerateMiss(masm);
1010 }
1011 
1012 
1013 void LoadIC::GenerateMiss(MacroAssembler* masm) {
1014  // ----------- S t a t e -------------
1015  // -- rax : receiver
1016  // -- rcx : name
1017  // -- rsp[0] : return address
1018  // -----------------------------------
1019 
1020  Counters* counters = masm->isolate()->counters();
1021  __ IncrementCounter(counters->load_miss(), 1);
1022 
1023  __ PopReturnAddressTo(rbx);
1024  __ Push(rax); // receiver
1025  __ Push(rcx); // name
1026  __ PushReturnAddressFrom(rbx);
1027 
1028  // Perform tail call to the entry.
1029  ExternalReference ref =
1030  ExternalReference(IC_Utility(kLoadIC_Miss), masm->isolate());
1031  __ TailCallExternalReference(ref, 2, 1);
1032 }
1033 
1034 
1035 void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
1036  // ----------- S t a t e -------------
1037  // -- rax : receiver
1038  // -- rcx : name
1039  // -- rsp[0] : return address
1040  // -----------------------------------
1041 
1042  __ PopReturnAddressTo(rbx);
1043  __ Push(rax); // receiver
1044  __ Push(rcx); // name
1045  __ PushReturnAddressFrom(rbx);
1046 
1047  // Perform tail call to the entry.
1048  __ TailCallRuntime(Runtime::kGetProperty, 2, 1);
1049 }
1050 
1051 
1052 void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
1053  // ----------- S t a t e -------------
1054  // -- rax : key
1055  // -- rdx : receiver
1056  // -- rsp[0] : return address
1057  // -----------------------------------
1058 
1059  Counters* counters = masm->isolate()->counters();
1060  __ IncrementCounter(counters->keyed_load_miss(), 1);
1061 
1062  __ PopReturnAddressTo(rbx);
1063  __ Push(rdx); // receiver
1064  __ Push(rax); // name
1065  __ PushReturnAddressFrom(rbx);
1066 
1067  // Perform tail call to the entry.
1068  ExternalReference ref =
1069  ExternalReference(IC_Utility(kKeyedLoadIC_Miss), masm->isolate());
1070  __ TailCallExternalReference(ref, 2, 1);
1071 }
1072 
1073 
1074 void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
1075  // ----------- S t a t e -------------
1076  // -- rax : key
1077  // -- rdx : receiver
1078  // -- rsp[0] : return address
1079  // -----------------------------------
1080 
1081  __ PopReturnAddressTo(rbx);
1082  __ Push(rdx); // receiver
1083  __ Push(rax); // name
1084  __ PushReturnAddressFrom(rbx);
1085 
1086  // Perform tail call to the entry.
1087  __ TailCallRuntime(Runtime::kKeyedGetProperty, 2, 1);
1088 }
1089 
1090 
1091 void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
1092  // ----------- S t a t e -------------
1093  // -- rax : value
1094  // -- rcx : name
1095  // -- rdx : receiver
1096  // -- rsp[0] : return address
1097  // -----------------------------------
1098 
1099  // Get the receiver from the stack and probe the stub cache.
1100  Code::Flags flags = Code::ComputeHandlerFlags(Code::STORE_IC);
1101  masm->isolate()->stub_cache()->GenerateProbe(
1102  masm, flags, rdx, rcx, rbx, no_reg);
1103 
1104  // Cache miss: Jump to runtime.
1105  GenerateMiss(masm);
1106 }
1107 
1108 
1109 void StoreIC::GenerateMiss(MacroAssembler* masm) {
1110  // ----------- S t a t e -------------
1111  // -- rax : value
1112  // -- rcx : name
1113  // -- rdx : receiver
1114  // -- rsp[0] : return address
1115  // -----------------------------------
1116 
1117  __ PopReturnAddressTo(rbx);
1118  __ Push(rdx); // receiver
1119  __ Push(rcx); // name
1120  __ Push(rax); // value
1121  __ PushReturnAddressFrom(rbx);
1122 
1123  // Perform tail call to the entry.
1124  ExternalReference ref =
1125  ExternalReference(IC_Utility(kStoreIC_Miss), masm->isolate());
1126  __ TailCallExternalReference(ref, 3, 1);
1127 }
1128 
1129 
1130 void StoreIC::GenerateNormal(MacroAssembler* masm) {
1131  // ----------- S t a t e -------------
1132  // -- rax : value
1133  // -- rcx : name
1134  // -- rdx : receiver
1135  // -- rsp[0] : return address
1136  // -----------------------------------
1137 
1138  Label miss;
1139 
1140  GenerateNameDictionaryReceiverCheck(masm, rdx, rbx, rdi, &miss);
1141 
1142  GenerateDictionaryStore(masm, &miss, rbx, rcx, rax, r8, r9);
1143  Counters* counters = masm->isolate()->counters();
1144  __ IncrementCounter(counters->store_normal_hit(), 1);
1145  __ ret(0);
1146 
1147  __ bind(&miss);
1148  __ IncrementCounter(counters->store_normal_miss(), 1);
1149  GenerateMiss(masm);
1150 }
1151 
1152 
1153 void StoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm,
1154  StrictMode strict_mode) {
1155  // ----------- S t a t e -------------
1156  // -- rax : value
1157  // -- rcx : name
1158  // -- rdx : receiver
1159  // -- rsp[0] : return address
1160  // -----------------------------------
1161  __ PopReturnAddressTo(rbx);
1162  __ Push(rdx);
1163  __ Push(rcx);
1164  __ Push(rax);
1165  __ Push(Smi::FromInt(NONE)); // PropertyAttributes
1166  __ Push(Smi::FromInt(strict_mode));
1167  __ PushReturnAddressFrom(rbx);
1168 
1169  // Do tail-call to runtime routine.
1170  __ TailCallRuntime(Runtime::kSetProperty, 5, 1);
1171 }
1172 
1173 
1174 void KeyedStoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm,
1175  StrictMode strict_mode) {
1176  // ----------- S t a t e -------------
1177  // -- rax : value
1178  // -- rcx : key
1179  // -- rdx : receiver
1180  // -- rsp[0] : return address
1181  // -----------------------------------
1182 
1183  __ PopReturnAddressTo(rbx);
1184  __ Push(rdx); // receiver
1185  __ Push(rcx); // key
1186  __ Push(rax); // value
1187  __ Push(Smi::FromInt(NONE)); // PropertyAttributes
1188  __ Push(Smi::FromInt(strict_mode)); // Strict mode.
1189  __ PushReturnAddressFrom(rbx);
1190 
1191  // Do tail-call to runtime routine.
1192  __ TailCallRuntime(Runtime::kSetProperty, 5, 1);
1193 }
1194 
1195 
1196 void StoreIC::GenerateSlow(MacroAssembler* masm) {
1197  // ----------- S t a t e -------------
1198  // -- rax : value
1199  // -- rcx : key
1200  // -- rdx : receiver
1201  // -- rsp[0] : return address
1202  // -----------------------------------
1203 
1204  __ PopReturnAddressTo(rbx);
1205  __ Push(rdx); // receiver
1206  __ Push(rcx); // key
1207  __ Push(rax); // value
1208  __ PushReturnAddressFrom(rbx);
1209 
1210  // Do tail-call to runtime routine.
1211  ExternalReference ref(IC_Utility(kStoreIC_Slow), masm->isolate());
1212  __ TailCallExternalReference(ref, 3, 1);
1213 }
1214 
1215 
1216 void KeyedStoreIC::GenerateSlow(MacroAssembler* masm) {
1217  // ----------- S t a t e -------------
1218  // -- rax : value
1219  // -- rcx : key
1220  // -- rdx : receiver
1221  // -- rsp[0] : return address
1222  // -----------------------------------
1223 
1224  __ PopReturnAddressTo(rbx);
1225  __ Push(rdx); // receiver
1226  __ Push(rcx); // key
1227  __ Push(rax); // value
1228  __ PushReturnAddressFrom(rbx);
1229 
1230  // Do tail-call to runtime routine.
1231  ExternalReference ref(IC_Utility(kKeyedStoreIC_Slow), masm->isolate());
1232  __ TailCallExternalReference(ref, 3, 1);
1233 }
1234 
1235 
1236 void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
1237  // ----------- S t a t e -------------
1238  // -- rax : value
1239  // -- rcx : key
1240  // -- rdx : receiver
1241  // -- rsp[0] : return address
1242  // -----------------------------------
1243 
1244  __ PopReturnAddressTo(rbx);
1245  __ Push(rdx); // receiver
1246  __ Push(rcx); // key
1247  __ Push(rax); // value
1248  __ PushReturnAddressFrom(rbx);
1249 
1250  // Do tail-call to runtime routine.
1251  ExternalReference ref =
1252  ExternalReference(IC_Utility(kKeyedStoreIC_Miss), masm->isolate());
1253  __ TailCallExternalReference(ref, 3, 1);
1254 }
1255 
1256 
1257 #undef __
1258 
1259 
1261  switch (op) {
1262  case Token::EQ_STRICT:
1263  case Token::EQ:
1264  return equal;
1265  case Token::LT:
1266  return less;
1267  case Token::GT:
1268  return greater;
1269  case Token::LTE:
1270  return less_equal;
1271  case Token::GTE:
1272  return greater_equal;
1273  default:
1274  UNREACHABLE();
1275  return no_condition;
1276  }
1277 }
1278 
1279 
1280 bool CompareIC::HasInlinedSmiCode(Address address) {
1281  // The address of the instruction following the call.
1282  Address test_instruction_address =
1284 
1285  // If the instruction following the call is not a test al, nothing
1286  // was inlined.
1287  return *test_instruction_address == Assembler::kTestAlByte;
1288 }
1289 
1290 
1291 void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) {
1292  // The address of the instruction following the call.
1293  Address test_instruction_address =
1295 
1296  // If the instruction following the call is not a test al, nothing
1297  // was inlined.
1298  if (*test_instruction_address != Assembler::kTestAlByte) {
1299  ASSERT(*test_instruction_address == Assembler::kNopByte);
1300  return;
1301  }
1302 
1303  Address delta_address = test_instruction_address + 1;
1304  // The delta to the start of the map check instruction and the
1305  // condition code uses at the patched jump.
1306  int8_t delta = *reinterpret_cast<int8_t*>(delta_address);
1307  if (FLAG_trace_ic) {
1308  PrintF("[ patching ic at %p, test=%p, delta=%d\n",
1309  address, test_instruction_address, delta);
1310  }
1311 
1312  // Patch with a short conditional jump. Enabling means switching from a short
1313  // jump-if-carry/not-carry to jump-if-zero/not-zero, whereas disabling is the
1314  // reverse operation of that.
1315  Address jmp_address = test_instruction_address - delta;
1316  ASSERT((check == ENABLE_INLINED_SMI_CHECK)
1317  ? (*jmp_address == Assembler::kJncShortOpcode ||
1318  *jmp_address == Assembler::kJcShortOpcode)
1319  : (*jmp_address == Assembler::kJnzShortOpcode ||
1320  *jmp_address == Assembler::kJzShortOpcode));
1322  ? (*jmp_address == Assembler::kJncShortOpcode ? not_zero : zero)
1323  : (*jmp_address == Assembler::kJnzShortOpcode ? not_carry : carry);
1324  *jmp_address = static_cast<byte>(Assembler::kJccShortPrefix | cc);
1325 }
1326 
1327 
1328 } } // namespace v8::internal
1329 
1330 #endif // V8_TARGET_ARCH_X64
byte * Address
Definition: globals.h:186
static void GenerateSloppyArguments(MacroAssembler *masm)
const Register rdx
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter NULL
Definition: flags.cc:269
static const int kHashFieldOffset
Definition: objects.h:8629
static const int kBitFieldOffset
Definition: objects.h:6461
static void GenerateRuntimeGetProperty(MacroAssembler *masm)
static const byte kJccShortPrefix
static const int kMapHashShift
Definition: heap.h:2759
void PrintF(const char *format,...)
Definition: v8utils.cc:40
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf map
Definition: flags.cc:350
static Smi * FromInt(int value)
Definition: objects-inl.h:1209
static Flags ComputeHandlerFlags(Kind handler_kind, StubType type=NORMAL, InlineCacheHolderFlag holder=OWN_MAP)
Definition: objects-inl.h:4624
static void GenerateMiss(MacroAssembler *masm)
const int kSmiValueSize
Definition: v8.h:5540
static const unsigned int kContainsCachedArrayIndexMask
Definition: objects.h:8673
static void GenerateRuntimeSetProperty(MacroAssembler *masm, StrictMode strict_mode)
const uint32_t kIsNotInternalizedMask
Definition: objects.h:603
KeyedStoreCheckMap
Definition: ic.h:572
static const int kHasNamedInterceptor
Definition: objects.h:6470
static const int kIsAccessCheckNeeded
Definition: objects.h:6474
uint32_t Flags
Definition: objects.h:5184
static void GenerateMegamorphic(MacroAssembler *masm)
#define ASSERT(condition)
Definition: checks.h:329
const int kPointerSizeLog2
Definition: globals.h:281
static const int kInstanceSizeOffset
Definition: objects.h:6448
static void GenerateMegamorphic(MacroAssembler *masm)
static const byte kTestAlByte
void PatchInlinedSmiCode(Address address, InlinedSmiCheck check)
static void GenerateGeneric(MacroAssembler *masm, StrictMode strict_mode)
uint8_t byte
Definition: globals.h:185
static const int kHasIndexedInterceptor
Definition: objects.h:6471
static const byte kJcShortOpcode
#define UNREACHABLE()
Definition: checks.h:52
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long mode(MIPS only)") DEFINE_string(expose_natives_as
static Condition ComputeCondition(Token::Value op)
STATIC_ASSERT(sizeof(CPURegister)==sizeof(Register))
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long expose natives in global object expose freeBuffer extension expose gc extension under the specified name expose externalize string extension number of stack frames to capture disable builtin natives files print name of functions for which code is generated use random jit cookie to mask large constants trace lazy optimization use adaptive optimizations always try to OSR functions trace optimize function deoptimization minimum length for automatic enable preparsing maximum number of optimization attempts before giving up cache prototype transitions trace debugging JSON request response trace out of bounds accesses to external arrays trace_js_array_abuse automatically set the debug break flag when debugger commands are in the queue abort by crashing maximum length of function source code printed in a stack trace max size of the new max size of the old max size of executable always perform global GCs print one trace line following each garbage collection do not print trace line after scavenger collection print statistics of the maximum memory committed for the heap in only print modified registers Don t break for ASM_UNIMPLEMENTED_BREAK macros print stack trace when an illegal exception is thrown randomize hashes to avoid predictable hash Fixed seed to use to hash property Print the time it takes to deserialize the snapshot testing_bool_flag testing_int_flag string flag tmp file in which to serialize heap Print the time it takes to lazily compile hydrogen code stubs concurrent_recompilation concurrent_sweeping Print usage including flags
Definition: flags.cc:665
const uint32_t kHoleNanUpper32
Definition: v8globals.h:454
const Register r9
const int kPointerSize
Definition: globals.h:268
void check(i::Vector< const uint8_t > string)
static void GenerateGeneric(MacroAssembler *masm)
static const byte kNopByte
Operand FieldOperand(Register object, int offset)
static const byte kJzShortOpcode
const int kHeapObjectTag
Definition: v8.h:5473
static void GenerateMiss(MacroAssembler *masm)
const Register rbx
const uint32_t kHoleNanLower32
Definition: v8globals.h:455
#define __
static void GenerateRuntimeSetProperty(MacroAssembler *masm, StrictMode strict_mode)
static const int kPropertiesOffset
Definition: objects.h:2755
const Register rax
static const int kInObjectPropertiesOffset
Definition: objects.h:6450
STATIC_ASSERT(static_cast< int >(NOT_CONTEXTUAL)==0)
const Register rdi
static void GenerateSlow(MacroAssembler *masm)
const Register r0
static const int kElementsOffset
Definition: objects.h:2756
static const int kCallTargetAddressOffset
static void GenerateRuntimeGetProperty(MacroAssembler *masm)
static const int kLengthOffset
Definition: objects.h:10076
static void GenerateSloppyArguments(MacroAssembler *masm)
static const int kHeaderSize
Definition: objects.h:3016
static void GenerateSlow(MacroAssembler *masm)
static const int kMapOffset
Definition: objects.h:1890
static const int kIsObserved
Definition: objects.h:6473
const Register r1
static const byte kJncShortOpcode
const uint32_t kNotInternalizedTag
Definition: objects.h:604
static const int kLengthOffset
Definition: objects.h:3015
static const int kSlowCaseBitFieldMask
Definition: ic.h:432
KeyedStoreIncrementLength
Definition: ic.h:578
InlinedSmiCheck
Definition: ic.h:920
const Register kScratchRegister
static void GenerateString(MacroAssembler *masm)
static void GenerateMapChangeElementsTransition(MacroAssembler *masm, AllocationSiteMode mode, Label *allocation_memento_found)
const int kSmiTagSize
Definition: v8.h:5479
const Register r8
static AllocationSiteMode GetMode(ElementsKind boilerplate_elements_kind)
Definition: objects-inl.h:1477
const Register rcx
Condition NegateCondition(Condition cond)
static void GenerateSmiToDouble(MacroAssembler *masm, AllocationSiteMode mode, Label *fail)
static void GenerateNormal(MacroAssembler *masm)
static void GenerateIndexedInterceptor(MacroAssembler *masm)
static const byte kJnzShortOpcode
static const int kHashShift
Definition: objects.h:8642
const Register no_reg
static const int kCapacityMask
Definition: heap.h:2758
static void GenerateMiss(MacroAssembler *masm)
static void GeneratePositiveLookup(MacroAssembler *masm, Label *miss, Label *done, Register elements, Register name, Register r0, Register r1)
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long expose natives in global object expose freeBuffer extension expose gc extension under the specified name expose externalize string extension number of stack frames to capture disable builtin natives files print name of functions for which code is generated use random jit cookie to mask large constants trace lazy optimization use adaptive optimizations always try to OSR functions trace optimize function deoptimization minimum length for automatic enable preparsing maximum number of optimization attempts before giving up cache prototype transitions trace debugging JSON request response trace out of bounds accesses to external arrays trace_js_array_abuse automatically set the debug break flag when debugger commands are in the queue abort by crashing maximum length of function source code printed in a stack trace max size of the new max size of the old max size of executable always perform global GCs print one trace line following each garbage collection do not print trace line after scavenger collection print statistics of the maximum memory committed for the heap in name
Definition: flags.cc:505
static void GenerateDoubleToObject(MacroAssembler *masm, AllocationSiteMode mode, Label *fail)
static const int kHashMask
Definition: heap.h:2760
static const int kInstanceTypeOffset
Definition: objects.h:6459
static const int kEntriesPerBucket
Definition: heap.h:2761
static void GenerateNormal(MacroAssembler *masm)
static void GenerateMiss(MacroAssembler *masm)
const XMMRegister xmm0