v8  3.25.30(node0.11.13)
V8 is Google's open source JavaScript engine
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
ic-ia32.cc
Go to the documentation of this file.
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 
28 #include "v8.h"
29 
30 #if V8_TARGET_ARCH_IA32
31 
32 #include "codegen.h"
33 #include "ic-inl.h"
34 #include "runtime.h"
35 #include "stub-cache.h"
36 
37 namespace v8 {
38 namespace internal {
39 
40 // ----------------------------------------------------------------------------
41 // Static IC stub generators.
42 //
43 
44 #define __ ACCESS_MASM(masm)
45 
46 
47 static void GenerateGlobalInstanceTypeCheck(MacroAssembler* masm,
48  Register type,
49  Label* global_object) {
50  // Register usage:
51  // type: holds the receiver instance type on entry.
52  __ cmp(type, JS_GLOBAL_OBJECT_TYPE);
53  __ j(equal, global_object);
54  __ cmp(type, JS_BUILTINS_OBJECT_TYPE);
55  __ j(equal, global_object);
56  __ cmp(type, JS_GLOBAL_PROXY_TYPE);
57  __ j(equal, global_object);
58 }
59 
60 
61 // Generated code falls through if the receiver is a regular non-global
62 // JS object with slow properties and no interceptors.
63 static void GenerateNameDictionaryReceiverCheck(MacroAssembler* masm,
64  Register receiver,
65  Register r0,
66  Register r1,
67  Label* miss) {
68  // Register usage:
69  // receiver: holds the receiver on entry and is unchanged.
70  // r0: used to hold receiver instance type.
71  // Holds the property dictionary on fall through.
72  // r1: used to hold receivers map.
73 
74  // Check that the receiver isn't a smi.
75  __ JumpIfSmi(receiver, miss);
76 
77  // Check that the receiver is a valid JS object.
78  __ mov(r1, FieldOperand(receiver, HeapObject::kMapOffset));
79  __ movzx_b(r0, FieldOperand(r1, Map::kInstanceTypeOffset));
80  __ cmp(r0, FIRST_SPEC_OBJECT_TYPE);
81  __ j(below, miss);
82 
83  // If this assert fails, we have to check upper bound too.
85 
86  GenerateGlobalInstanceTypeCheck(masm, r0, miss);
87 
88  // Check for non-global object that requires access check.
92  __ j(not_zero, miss);
93 
94  __ mov(r0, FieldOperand(receiver, JSObject::kPropertiesOffset));
95  __ CheckMap(r0, masm->isolate()->factory()->hash_table_map(), miss,
97 }
98 
99 
100 // Helper function used to load a property from a dictionary backing
101 // storage. This function may fail to load a property even though it is
102 // in the dictionary, so code at miss_label must always call a backup
103 // property load that is complete. This function is safe to call if
104 // name is not internalized, and will jump to the miss_label in that
105 // case. The generated code assumes that the receiver has slow
106 // properties, is not a global object and does not have interceptors.
107 static void GenerateDictionaryLoad(MacroAssembler* masm,
108  Label* miss_label,
109  Register elements,
110  Register name,
111  Register r0,
112  Register r1,
113  Register result) {
114  // Register use:
115  //
116  // elements - holds the property dictionary on entry and is unchanged.
117  //
118  // name - holds the name of the property on entry and is unchanged.
119  //
120  // Scratch registers:
121  //
122  // r0 - used for the index into the property dictionary
123  //
124  // r1 - used to hold the capacity of the property dictionary.
125  //
126  // result - holds the result on exit.
127 
128  Label done;
129 
130  // Probe the dictionary.
132  miss_label,
133  &done,
134  elements,
135  name,
136  r0,
137  r1);
138 
139  // If probing finds an entry in the dictionary, r0 contains the
140  // index into the dictionary. Check that the value is a normal
141  // property.
142  __ bind(&done);
143  const int kElementsStartOffset =
146  const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
147  __ test(Operand(elements, r0, times_4, kDetailsOffset - kHeapObjectTag),
148  Immediate(PropertyDetails::TypeField::kMask << kSmiTagSize));
149  __ j(not_zero, miss_label);
150 
151  // Get the value at the masked, scaled index.
152  const int kValueOffset = kElementsStartOffset + kPointerSize;
153  __ mov(result, Operand(elements, r0, times_4, kValueOffset - kHeapObjectTag));
154 }
155 
156 
157 // Helper function used to store a property to a dictionary backing
158 // storage. This function may fail to store a property eventhough it
159 // is in the dictionary, so code at miss_label must always call a
160 // backup property store that is complete. This function is safe to
161 // call if name is not internalized, and will jump to the miss_label in
162 // that case. The generated code assumes that the receiver has slow
163 // properties, is not a global object and does not have interceptors.
164 static void GenerateDictionaryStore(MacroAssembler* masm,
165  Label* miss_label,
166  Register elements,
167  Register name,
168  Register value,
169  Register r0,
170  Register r1) {
171  // Register use:
172  //
173  // elements - holds the property dictionary on entry and is clobbered.
174  //
175  // name - holds the name of the property on entry and is unchanged.
176  //
177  // value - holds the value to store and is unchanged.
178  //
179  // r0 - used for index into the property dictionary and is clobbered.
180  //
181  // r1 - used to hold the capacity of the property dictionary and is clobbered.
182  Label done;
183 
184 
185  // Probe the dictionary.
187  miss_label,
188  &done,
189  elements,
190  name,
191  r0,
192  r1);
193 
194  // If probing finds an entry in the dictionary, r0 contains the
195  // index into the dictionary. Check that the value is a normal
196  // property that is not read only.
197  __ bind(&done);
198  const int kElementsStartOffset =
201  const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
202  const int kTypeAndReadOnlyMask =
203  (PropertyDetails::TypeField::kMask |
204  PropertyDetails::AttributesField::encode(READ_ONLY)) << kSmiTagSize;
205  __ test(Operand(elements, r0, times_4, kDetailsOffset - kHeapObjectTag),
206  Immediate(kTypeAndReadOnlyMask));
207  __ j(not_zero, miss_label);
208 
209  // Store the value at the masked, scaled index.
210  const int kValueOffset = kElementsStartOffset + kPointerSize;
211  __ lea(r0, Operand(elements, r0, times_4, kValueOffset - kHeapObjectTag));
212  __ mov(Operand(r0, 0), value);
213 
214  // Update write barrier. Make sure not to clobber the value.
215  __ mov(r1, value);
216  __ RecordWrite(elements, r0, r1, kDontSaveFPRegs);
217 }
218 
219 
220 // Checks the receiver for special cases (value type, slow case bits).
221 // Falls through for regular JS object.
222 static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
223  Register receiver,
224  Register map,
225  int interceptor_bit,
226  Label* slow) {
227  // Register use:
228  // receiver - holds the receiver and is unchanged.
229  // Scratch registers:
230  // map - used to hold the map of the receiver.
231 
232  // Check that the object isn't a smi.
233  __ JumpIfSmi(receiver, slow);
234 
235  // Get the map of the receiver.
236  __ mov(map, FieldOperand(receiver, HeapObject::kMapOffset));
237 
238  // Check bit field.
239  __ test_b(FieldOperand(map, Map::kBitFieldOffset),
240  (1 << Map::kIsAccessCheckNeeded) | (1 << interceptor_bit));
241  __ j(not_zero, slow);
242  // Check that the object is some kind of JS object EXCEPT JS Value type.
243  // In the case that the object is a value-wrapper object,
244  // we enter the runtime system to make sure that indexing
245  // into string objects works as intended.
247 
248  __ CmpInstanceType(map, JS_OBJECT_TYPE);
249  __ j(below, slow);
250 }
251 
252 
253 // Loads an indexed element from a fast case array.
254 // If not_fast_array is NULL, doesn't perform the elements map check.
255 static void GenerateFastArrayLoad(MacroAssembler* masm,
256  Register receiver,
257  Register key,
258  Register scratch,
259  Register result,
260  Label* not_fast_array,
261  Label* out_of_range) {
262  // Register use:
263  // receiver - holds the receiver and is unchanged.
264  // key - holds the key and is unchanged (must be a smi).
265  // Scratch registers:
266  // scratch - used to hold elements of the receiver and the loaded value.
267  // result - holds the result on exit if the load succeeds and
268  // we fall through.
269 
270  __ mov(scratch, FieldOperand(receiver, JSObject::kElementsOffset));
271  if (not_fast_array != NULL) {
272  // Check that the object is in fast mode and writable.
273  __ CheckMap(scratch,
274  masm->isolate()->factory()->fixed_array_map(),
275  not_fast_array,
277  } else {
278  __ AssertFastElements(scratch);
279  }
280  // Check that the key (index) is within bounds.
281  __ cmp(key, FieldOperand(scratch, FixedArray::kLengthOffset));
282  __ j(above_equal, out_of_range);
283  // Fast case: Do the load.
284  STATIC_ASSERT((kPointerSize == 4) && (kSmiTagSize == 1) && (kSmiTag == 0));
285  __ mov(scratch, FieldOperand(scratch, key, times_2, FixedArray::kHeaderSize));
286  __ cmp(scratch, Immediate(masm->isolate()->factory()->the_hole_value()));
287  // In case the loaded value is the_hole we have to consult GetProperty
288  // to ensure the prototype chain is searched.
289  __ j(equal, out_of_range);
290  if (!result.is(scratch)) {
291  __ mov(result, scratch);
292  }
293 }
294 
295 
296 // Checks whether a key is an array index string or a unique name.
297 // Falls through if the key is a unique name.
298 static void GenerateKeyNameCheck(MacroAssembler* masm,
299  Register key,
300  Register map,
301  Register hash,
302  Label* index_string,
303  Label* not_unique) {
304  // Register use:
305  // key - holds the key and is unchanged. Assumed to be non-smi.
306  // Scratch registers:
307  // map - used to hold the map of the key.
308  // hash - used to hold the hash of the key.
309  Label unique;
310  __ CmpObjectType(key, LAST_UNIQUE_NAME_TYPE, map);
311  __ j(above, not_unique);
313  __ j(equal, &unique);
314 
315  // Is the string an array index, with cached numeric value?
316  __ mov(hash, FieldOperand(key, Name::kHashFieldOffset));
317  __ test(hash, Immediate(Name::kContainsCachedArrayIndexMask));
318  __ j(zero, index_string);
319 
320  // Is the string internalized? We already know it's a string so a single
321  // bit test is enough.
325  __ j(not_zero, not_unique);
326 
327  __ bind(&unique);
328 }
329 
330 
331 static Operand GenerateMappedArgumentsLookup(MacroAssembler* masm,
332  Register object,
333  Register key,
334  Register scratch1,
335  Register scratch2,
336  Label* unmapped_case,
337  Label* slow_case) {
338  Heap* heap = masm->isolate()->heap();
339  Factory* factory = masm->isolate()->factory();
340 
341  // Check that the receiver is a JSObject. Because of the elements
342  // map check later, we do not need to check for interceptors or
343  // whether it requires access checks.
344  __ JumpIfSmi(object, slow_case);
345  // Check that the object is some kind of JSObject.
346  __ CmpObjectType(object, FIRST_JS_RECEIVER_TYPE, scratch1);
347  __ j(below, slow_case);
348 
349  // Check that the key is a positive smi.
350  __ test(key, Immediate(0x80000001));
351  __ j(not_zero, slow_case);
352 
353  // Load the elements into scratch1 and check its map.
354  Handle<Map> arguments_map(heap->sloppy_arguments_elements_map());
355  __ mov(scratch1, FieldOperand(object, JSObject::kElementsOffset));
356  __ CheckMap(scratch1, arguments_map, slow_case, DONT_DO_SMI_CHECK);
357 
358  // Check if element is in the range of mapped arguments. If not, jump
359  // to the unmapped lookup with the parameter map in scratch1.
360  __ mov(scratch2, FieldOperand(scratch1, FixedArray::kLengthOffset));
361  __ sub(scratch2, Immediate(Smi::FromInt(2)));
362  __ cmp(key, scratch2);
363  __ j(above_equal, unmapped_case);
364 
365  // Load element index and check whether it is the hole.
366  const int kHeaderSize = FixedArray::kHeaderSize + 2 * kPointerSize;
367  __ mov(scratch2, FieldOperand(scratch1,
368  key,
370  kHeaderSize));
371  __ cmp(scratch2, factory->the_hole_value());
372  __ j(equal, unmapped_case);
373 
374  // Load value from context and return it. We can reuse scratch1 because
375  // we do not jump to the unmapped lookup (which requires the parameter
376  // map in scratch1).
377  const int kContextOffset = FixedArray::kHeaderSize;
378  __ mov(scratch1, FieldOperand(scratch1, kContextOffset));
379  return FieldOperand(scratch1,
380  scratch2,
383 }
384 
385 
386 static Operand GenerateUnmappedArgumentsLookup(MacroAssembler* masm,
387  Register key,
388  Register parameter_map,
389  Register scratch,
390  Label* slow_case) {
391  // Element is in arguments backing store, which is referenced by the
392  // second element of the parameter_map.
393  const int kBackingStoreOffset = FixedArray::kHeaderSize + kPointerSize;
394  Register backing_store = parameter_map;
395  __ mov(backing_store, FieldOperand(parameter_map, kBackingStoreOffset));
396  Handle<Map> fixed_array_map(masm->isolate()->heap()->fixed_array_map());
397  __ CheckMap(backing_store, fixed_array_map, slow_case, DONT_DO_SMI_CHECK);
398  __ mov(scratch, FieldOperand(backing_store, FixedArray::kLengthOffset));
399  __ cmp(key, scratch);
400  __ j(greater_equal, slow_case);
401  return FieldOperand(backing_store,
402  key,
405 }
406 
407 
408 void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
409  // ----------- S t a t e -------------
410  // -- ecx : key
411  // -- edx : receiver
412  // -- esp[0] : return address
413  // -----------------------------------
414  Label slow, check_name, index_smi, index_name, property_array_property;
415  Label probe_dictionary, check_number_dictionary;
416 
417  // Check that the key is a smi.
418  __ JumpIfNotSmi(ecx, &check_name);
419  __ bind(&index_smi);
420  // Now the key is known to be a smi. This place is also jumped to from
421  // where a numeric string is converted to a smi.
422 
423  GenerateKeyedLoadReceiverCheck(
424  masm, edx, eax, Map::kHasIndexedInterceptor, &slow);
425 
426  // Check the receiver's map to see if it has fast elements.
427  __ CheckFastElements(eax, &check_number_dictionary);
428 
429  GenerateFastArrayLoad(masm, edx, ecx, eax, eax, NULL, &slow);
430  Isolate* isolate = masm->isolate();
431  Counters* counters = isolate->counters();
432  __ IncrementCounter(counters->keyed_load_generic_smi(), 1);
433  __ ret(0);
434 
435  __ bind(&check_number_dictionary);
436  __ mov(ebx, ecx);
437  __ SmiUntag(ebx);
439 
440  // Check whether the elements is a number dictionary.
441  // edx: receiver
442  // ebx: untagged index
443  // ecx: key
444  // eax: elements
445  __ CheckMap(eax,
446  isolate->factory()->hash_table_map(),
447  &slow,
449  Label slow_pop_receiver;
450  // Push receiver on the stack to free up a register for the dictionary
451  // probing.
452  __ push(edx);
453  __ LoadFromNumberDictionary(&slow_pop_receiver, eax, ecx, ebx, edx, edi, eax);
454  // Pop receiver before returning.
455  __ pop(edx);
456  __ ret(0);
457 
458  __ bind(&slow_pop_receiver);
459  // Pop the receiver from the stack and jump to runtime.
460  __ pop(edx);
461 
462  __ bind(&slow);
463  // Slow case: jump to runtime.
464  // edx: receiver
465  // ecx: key
466  __ IncrementCounter(counters->keyed_load_generic_slow(), 1);
468 
469  __ bind(&check_name);
470  GenerateKeyNameCheck(masm, ecx, eax, ebx, &index_name, &slow);
471 
472  GenerateKeyedLoadReceiverCheck(
473  masm, edx, eax, Map::kHasNamedInterceptor, &slow);
474 
475  // If the receiver is a fast-case object, check the keyed lookup
476  // cache. Otherwise probe the dictionary.
479  Immediate(isolate->factory()->hash_table_map()));
480  __ j(equal, &probe_dictionary);
481 
482  // The receiver's map is still in eax, compute the keyed lookup cache hash
483  // based on 32 bits of the map pointer and the string hash.
484  if (FLAG_debug_code) {
486  __ Check(equal, kMapIsNoLongerInEax);
487  }
488  __ mov(ebx, eax); // Keep the map around for later.
491  __ shr(edi, String::kHashShift);
492  __ xor_(eax, edi);
494 
495  // Load the key (consisting of map and internalized string) from the cache and
496  // check for match.
497  Label load_in_object_property;
498  static const int kEntriesPerBucket = KeyedLookupCache::kEntriesPerBucket;
499  Label hit_on_nth_entry[kEntriesPerBucket];
500  ExternalReference cache_keys =
501  ExternalReference::keyed_lookup_cache_keys(masm->isolate());
502 
503  for (int i = 0; i < kEntriesPerBucket - 1; i++) {
504  Label try_next_entry;
505  __ mov(edi, eax);
506  __ shl(edi, kPointerSizeLog2 + 1);
507  if (i != 0) {
508  __ add(edi, Immediate(kPointerSize * i * 2));
509  }
510  __ cmp(ebx, Operand::StaticArray(edi, times_1, cache_keys));
511  __ j(not_equal, &try_next_entry);
512  __ add(edi, Immediate(kPointerSize));
513  __ cmp(ecx, Operand::StaticArray(edi, times_1, cache_keys));
514  __ j(equal, &hit_on_nth_entry[i]);
515  __ bind(&try_next_entry);
516  }
517 
518  __ lea(edi, Operand(eax, 1));
519  __ shl(edi, kPointerSizeLog2 + 1);
520  __ add(edi, Immediate(kPointerSize * (kEntriesPerBucket - 1) * 2));
521  __ cmp(ebx, Operand::StaticArray(edi, times_1, cache_keys));
522  __ j(not_equal, &slow);
523  __ add(edi, Immediate(kPointerSize));
524  __ cmp(ecx, Operand::StaticArray(edi, times_1, cache_keys));
525  __ j(not_equal, &slow);
526 
527  // Get field offset.
528  // edx : receiver
529  // ebx : receiver's map
530  // ecx : key
531  // eax : lookup cache index
532  ExternalReference cache_field_offsets =
533  ExternalReference::keyed_lookup_cache_field_offsets(masm->isolate());
534 
535  // Hit on nth entry.
536  for (int i = kEntriesPerBucket - 1; i >= 0; i--) {
537  __ bind(&hit_on_nth_entry[i]);
538  if (i != 0) {
539  __ add(eax, Immediate(i));
540  }
541  __ mov(edi,
542  Operand::StaticArray(eax, times_pointer_size, cache_field_offsets));
544  __ sub(edi, eax);
545  __ j(above_equal, &property_array_property);
546  if (i != 0) {
547  __ jmp(&load_in_object_property);
548  }
549  }
550 
551  // Load in-object property.
552  __ bind(&load_in_object_property);
554  __ add(eax, edi);
556  __ IncrementCounter(counters->keyed_load_generic_lookup_cache(), 1);
557  __ ret(0);
558 
559  // Load property array property.
560  __ bind(&property_array_property);
564  __ IncrementCounter(counters->keyed_load_generic_lookup_cache(), 1);
565  __ ret(0);
566 
567  // Do a quick inline probe of the receiver's dictionary, if it
568  // exists.
569  __ bind(&probe_dictionary);
570 
573  GenerateGlobalInstanceTypeCheck(masm, eax, &slow);
574 
575  GenerateDictionaryLoad(masm, &slow, ebx, ecx, eax, edi, eax);
576  __ IncrementCounter(counters->keyed_load_generic_symbol(), 1);
577  __ ret(0);
578 
579  __ bind(&index_name);
580  __ IndexFromHash(ebx, ecx);
581  // Now jump to the place where smi keys are handled.
582  __ jmp(&index_smi);
583 }
584 
585 
586 void KeyedLoadIC::GenerateString(MacroAssembler* masm) {
587  // ----------- S t a t e -------------
588  // -- ecx : key (index)
589  // -- edx : receiver
590  // -- esp[0] : return address
591  // -----------------------------------
592  Label miss;
593 
594  Register receiver = edx;
595  Register index = ecx;
596  Register scratch = ebx;
597  Register result = eax;
598 
599  StringCharAtGenerator char_at_generator(receiver,
600  index,
601  scratch,
602  result,
603  &miss, // When not a string.
604  &miss, // When not a number.
605  &miss, // When index out of range.
607  char_at_generator.GenerateFast(masm);
608  __ ret(0);
609 
610  StubRuntimeCallHelper call_helper;
611  char_at_generator.GenerateSlow(masm, call_helper);
612 
613  __ bind(&miss);
614  GenerateMiss(masm);
615 }
616 
617 
618 void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) {
619  // ----------- S t a t e -------------
620  // -- ecx : key
621  // -- edx : receiver
622  // -- esp[0] : return address
623  // -----------------------------------
624  Label slow;
625 
626  // Check that the receiver isn't a smi.
627  __ JumpIfSmi(edx, &slow);
628 
629  // Check that the key is an array index, that is Uint32.
630  __ test(ecx, Immediate(kSmiTagMask | kSmiSignMask));
631  __ j(not_zero, &slow);
632 
633  // Get the map of the receiver.
635 
636  // Check that it has indexed interceptor and access checks
637  // are not enabled for this object.
639  __ and_(eax, Immediate(kSlowCaseBitFieldMask));
640  __ cmp(eax, Immediate(1 << Map::kHasIndexedInterceptor));
641  __ j(not_zero, &slow);
642 
643  // Everything is fine, call runtime.
644  __ pop(eax);
645  __ push(edx); // receiver
646  __ push(ecx); // key
647  __ push(eax); // return address
648 
649  // Perform tail call to the entry.
650  ExternalReference ref =
651  ExternalReference(IC_Utility(kKeyedLoadPropertyWithInterceptor),
652  masm->isolate());
653  __ TailCallExternalReference(ref, 2, 1);
654 
655  __ bind(&slow);
656  GenerateMiss(masm);
657 }
658 
659 
660 void KeyedLoadIC::GenerateSloppyArguments(MacroAssembler* masm) {
661  // ----------- S t a t e -------------
662  // -- ecx : key
663  // -- edx : receiver
664  // -- esp[0] : return address
665  // -----------------------------------
666  Label slow, notin;
667  Factory* factory = masm->isolate()->factory();
668  Operand mapped_location =
669  GenerateMappedArgumentsLookup(masm, edx, ecx, ebx, eax, &notin, &slow);
670  __ mov(eax, mapped_location);
671  __ Ret();
672  __ bind(&notin);
673  // The unmapped lookup expects that the parameter map is in ebx.
674  Operand unmapped_location =
675  GenerateUnmappedArgumentsLookup(masm, ecx, ebx, eax, &slow);
676  __ cmp(unmapped_location, factory->the_hole_value());
677  __ j(equal, &slow);
678  __ mov(eax, unmapped_location);
679  __ Ret();
680  __ bind(&slow);
681  GenerateMiss(masm);
682 }
683 
684 
685 void KeyedStoreIC::GenerateSloppyArguments(MacroAssembler* masm) {
686  // ----------- S t a t e -------------
687  // -- eax : value
688  // -- ecx : key
689  // -- edx : receiver
690  // -- esp[0] : return address
691  // -----------------------------------
692  Label slow, notin;
693  Operand mapped_location =
694  GenerateMappedArgumentsLookup(masm, edx, ecx, ebx, edi, &notin, &slow);
695  __ mov(mapped_location, eax);
696  __ lea(ecx, mapped_location);
697  __ mov(edx, eax);
698  __ RecordWrite(ebx, ecx, edx, kDontSaveFPRegs);
699  __ Ret();
700  __ bind(&notin);
701  // The unmapped lookup expects that the parameter map is in ebx.
702  Operand unmapped_location =
703  GenerateUnmappedArgumentsLookup(masm, ecx, ebx, edi, &slow);
704  __ mov(unmapped_location, eax);
705  __ lea(edi, unmapped_location);
706  __ mov(edx, eax);
707  __ RecordWrite(ebx, edi, edx, kDontSaveFPRegs);
708  __ Ret();
709  __ bind(&slow);
710  GenerateMiss(masm);
711 }
712 
713 
714 static void KeyedStoreGenerateGenericHelper(
715  MacroAssembler* masm,
716  Label* fast_object,
717  Label* fast_double,
718  Label* slow,
719  KeyedStoreCheckMap check_map,
720  KeyedStoreIncrementLength increment_length) {
721  Label transition_smi_elements;
722  Label finish_object_store, non_double_value, transition_double_elements;
723  Label fast_double_without_map_check;
724  // eax: value
725  // ecx: key (a smi)
726  // edx: receiver
727  // ebx: FixedArray receiver->elements
728  // edi: receiver map
729  // Fast case: Do the store, could either Object or double.
730  __ bind(fast_object);
731  if (check_map == kCheckMap) {
733  __ cmp(edi, masm->isolate()->factory()->fixed_array_map());
734  __ j(not_equal, fast_double);
735  }
736 
737  // HOLECHECK: guards "A[i] = V"
738  // We have to go to the runtime if the current value is the hole because
739  // there may be a callback on the element
740  Label holecheck_passed1;
742  masm->isolate()->factory()->the_hole_value());
743  __ j(not_equal, &holecheck_passed1);
744  __ JumpIfDictionaryInPrototypeChain(edx, ebx, edi, slow);
746 
747  __ bind(&holecheck_passed1);
748 
749  // Smi stores don't require further checks.
750  Label non_smi_value;
751  __ JumpIfNotSmi(eax, &non_smi_value);
752  if (increment_length == kIncrementLength) {
753  // Add 1 to receiver->length.
755  Immediate(Smi::FromInt(1)));
756  }
757  // It's irrelevant whether array is smi-only or not when writing a smi.
759  __ ret(0);
760 
761  __ bind(&non_smi_value);
762  // Escape to elements kind transition case.
764  __ CheckFastObjectElements(edi, &transition_smi_elements);
765 
766  // Fast elements array, store the value to the elements backing store.
767  __ bind(&finish_object_store);
768  if (increment_length == kIncrementLength) {
769  // Add 1 to receiver->length.
771  Immediate(Smi::FromInt(1)));
772  }
774  // Update write barrier for the elements array address.
775  __ mov(edx, eax); // Preserve the value which is returned.
776  __ RecordWriteArray(
778  __ ret(0);
779 
780  __ bind(fast_double);
781  if (check_map == kCheckMap) {
782  // Check for fast double array case. If this fails, call through to the
783  // runtime.
784  __ cmp(edi, masm->isolate()->factory()->fixed_double_array_map());
785  __ j(not_equal, slow);
786  // If the value is a number, store it as a double in the FastDoubleElements
787  // array.
788  }
789 
790  // HOLECHECK: guards "A[i] double hole?"
791  // We have to see if the double version of the hole is present. If so
792  // go to the runtime.
793  uint32_t offset = FixedDoubleArray::kHeaderSize + sizeof(kHoleNanLower32);
794  __ cmp(FieldOperand(ebx, ecx, times_4, offset), Immediate(kHoleNanUpper32));
795  __ j(not_equal, &fast_double_without_map_check);
796  __ JumpIfDictionaryInPrototypeChain(edx, ebx, edi, slow);
798 
799  __ bind(&fast_double_without_map_check);
800  __ StoreNumberToDoubleElements(eax, ebx, ecx, edi, xmm0,
801  &transition_double_elements, false);
802  if (increment_length == kIncrementLength) {
803  // Add 1 to receiver->length.
805  Immediate(Smi::FromInt(1)));
806  }
807  __ ret(0);
808 
809  __ bind(&transition_smi_elements);
811 
812  // Transition the array appropriately depending on the value type.
813  __ CheckMap(eax,
814  masm->isolate()->factory()->heap_number_map(),
815  &non_double_value,
817 
818  // Value is a double. Transition FAST_SMI_ELEMENTS -> FAST_DOUBLE_ELEMENTS
819  // and complete the store.
820  __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
822  ebx,
823  edi,
824  slow);
829  __ jmp(&fast_double_without_map_check);
830 
831  __ bind(&non_double_value);
832  // Value is not a double, FAST_SMI_ELEMENTS -> FAST_ELEMENTS
833  __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
835  ebx,
836  edi,
837  slow);
840  slow);
842  __ jmp(&finish_object_store);
843 
844  __ bind(&transition_double_elements);
845  // Elements are FAST_DOUBLE_ELEMENTS, but value is an Object that's not a
846  // HeapNumber. Make sure that the receiver is a Array with FAST_ELEMENTS and
847  // transition array from FAST_DOUBLE_ELEMENTS to FAST_ELEMENTS
849  __ LoadTransitionedArrayMapConditional(FAST_DOUBLE_ELEMENTS,
851  ebx,
852  edi,
853  slow);
857  __ jmp(&finish_object_store);
858 }
859 
860 
861 void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
862  StrictMode strict_mode) {
863  // ----------- S t a t e -------------
864  // -- eax : value
865  // -- ecx : key
866  // -- edx : receiver
867  // -- esp[0] : return address
868  // -----------------------------------
869  Label slow, fast_object, fast_object_grow;
870  Label fast_double, fast_double_grow;
871  Label array, extra, check_if_double_array;
872 
873  // Check that the object isn't a smi.
874  __ JumpIfSmi(edx, &slow);
875  // Get the map from the receiver.
877  // Check that the receiver does not require access checks and is not observed.
878  // The generic stub does not perform map checks or handle observed objects.
881  __ j(not_zero, &slow);
882  // Check that the key is a smi.
883  __ JumpIfNotSmi(ecx, &slow);
884  __ CmpInstanceType(edi, JS_ARRAY_TYPE);
885  __ j(equal, &array);
886  // Check that the object is some kind of JSObject.
887  __ CmpInstanceType(edi, FIRST_JS_OBJECT_TYPE);
888  __ j(below, &slow);
889 
890  // Object case: Check key against length in the elements array.
891  // eax: value
892  // edx: JSObject
893  // ecx: key (a smi)
894  // edi: receiver map
896  // Check array bounds. Both the key and the length of FixedArray are smis.
898  __ j(below, &fast_object);
899 
900  // Slow case: call runtime.
901  __ bind(&slow);
902  GenerateRuntimeSetProperty(masm, strict_mode);
903 
904  // Extra capacity case: Check if there is extra capacity to
905  // perform the store and update the length. Used for adding one
906  // element to the array by writing to array[array.length].
907  __ bind(&extra);
908  // eax: value
909  // edx: receiver, a JSArray
910  // ecx: key, a smi.
911  // ebx: receiver->elements, a FixedArray
912  // edi: receiver map
913  // flags: compare (ecx, edx.length())
914  // do not leave holes in the array:
915  __ j(not_equal, &slow);
917  __ j(above_equal, &slow);
919  __ cmp(edi, masm->isolate()->factory()->fixed_array_map());
920  __ j(not_equal, &check_if_double_array);
921  __ jmp(&fast_object_grow);
922 
923  __ bind(&check_if_double_array);
924  __ cmp(edi, masm->isolate()->factory()->fixed_double_array_map());
925  __ j(not_equal, &slow);
926  __ jmp(&fast_double_grow);
927 
928  // Array case: Get the length and the elements array from the JS
929  // array. Check that the array is in fast mode (and writable); if it
930  // is the length is always a smi.
931  __ bind(&array);
932  // eax: value
933  // edx: receiver, a JSArray
934  // ecx: key, a smi.
935  // edi: receiver map
937 
938  // Check the key against the length in the array and fall through to the
939  // common store code.
940  __ cmp(ecx, FieldOperand(edx, JSArray::kLengthOffset)); // Compare smis.
941  __ j(above_equal, &extra);
942 
943  KeyedStoreGenerateGenericHelper(masm, &fast_object, &fast_double,
945  KeyedStoreGenerateGenericHelper(masm, &fast_object_grow, &fast_double_grow,
947 }
948 
949 
950 void LoadIC::GenerateMegamorphic(MacroAssembler* masm) {
951  // ----------- S t a t e -------------
952  // -- ecx : name
953  // -- edx : receiver
954  // -- esp[0] : return address
955  // -----------------------------------
956 
957  // Probe the stub cache.
959  masm->isolate()->stub_cache()->GenerateProbe(
960  masm, flags, edx, ecx, ebx, eax);
961 
962  // Cache miss: Jump to runtime.
963  GenerateMiss(masm);
964 }
965 
966 
967 void LoadIC::GenerateNormal(MacroAssembler* masm) {
968  // ----------- S t a t e -------------
969  // -- ecx : name
970  // -- edx : receiver
971  // -- esp[0] : return address
972  // -----------------------------------
973  Label miss;
974 
975  GenerateNameDictionaryReceiverCheck(masm, edx, eax, ebx, &miss);
976 
977  // eax: elements
978  // Search the dictionary placing the result in eax.
979  GenerateDictionaryLoad(masm, &miss, eax, ecx, edi, ebx, eax);
980  __ ret(0);
981 
982  // Cache miss: Jump to runtime.
983  __ bind(&miss);
984  GenerateMiss(masm);
985 }
986 
987 
988 void LoadIC::GenerateMiss(MacroAssembler* masm) {
989  // ----------- S t a t e -------------
990  // -- ecx : name
991  // -- edx : receiver
992  // -- esp[0] : return address
993  // -----------------------------------
994 
995  __ IncrementCounter(masm->isolate()->counters()->load_miss(), 1);
996 
997  __ pop(ebx);
998  __ push(edx); // receiver
999  __ push(ecx); // name
1000  __ push(ebx); // return address
1001 
1002  // Perform tail call to the entry.
1003  ExternalReference ref =
1004  ExternalReference(IC_Utility(kLoadIC_Miss), masm->isolate());
1005  __ TailCallExternalReference(ref, 2, 1);
1006 }
1007 
1008 
1009 void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
1010  // ----------- S t a t e -------------
1011  // -- ecx : key
1012  // -- edx : receiver
1013  // -- esp[0] : return address
1014  // -----------------------------------
1015 
1016  __ pop(ebx);
1017  __ push(edx); // receiver
1018  __ push(ecx); // name
1019  __ push(ebx); // return address
1020 
1021  // Perform tail call to the entry.
1022  __ TailCallRuntime(Runtime::kGetProperty, 2, 1);
1023 }
1024 
1025 
1026 void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
1027  // ----------- S t a t e -------------
1028  // -- ecx : key
1029  // -- edx : receiver
1030  // -- esp[0] : return address
1031  // -----------------------------------
1032 
1033  __ IncrementCounter(masm->isolate()->counters()->keyed_load_miss(), 1);
1034 
1035  __ pop(ebx);
1036  __ push(edx); // receiver
1037  __ push(ecx); // name
1038  __ push(ebx); // return address
1039 
1040  // Perform tail call to the entry.
1041  ExternalReference ref =
1042  ExternalReference(IC_Utility(kKeyedLoadIC_Miss), masm->isolate());
1043  __ TailCallExternalReference(ref, 2, 1);
1044 }
1045 
1046 
1047 void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
1048  // ----------- S t a t e -------------
1049  // -- ecx : key
1050  // -- edx : receiver
1051  // -- esp[0] : return address
1052  // -----------------------------------
1053 
1054  __ pop(ebx);
1055  __ push(edx); // receiver
1056  __ push(ecx); // name
1057  __ push(ebx); // return address
1058 
1059  // Perform tail call to the entry.
1060  __ TailCallRuntime(Runtime::kKeyedGetProperty, 2, 1);
1061 }
1062 
1063 
1064 void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
1065  // ----------- S t a t e -------------
1066  // -- eax : value
1067  // -- ecx : name
1068  // -- edx : receiver
1069  // -- esp[0] : return address
1070  // -----------------------------------
1071  Code::Flags flags = Code::ComputeHandlerFlags(Code::STORE_IC);
1072  masm->isolate()->stub_cache()->GenerateProbe(
1073  masm, flags, edx, ecx, ebx, no_reg);
1074 
1075  // Cache miss: Jump to runtime.
1076  GenerateMiss(masm);
1077 }
1078 
1079 
1080 void StoreIC::GenerateMiss(MacroAssembler* masm) {
1081  // ----------- S t a t e -------------
1082  // -- eax : value
1083  // -- ecx : name
1084  // -- edx : receiver
1085  // -- esp[0] : return address
1086  // -----------------------------------
1087 
1088  __ pop(ebx);
1089  __ push(edx);
1090  __ push(ecx);
1091  __ push(eax);
1092  __ push(ebx);
1093 
1094  // Perform tail call to the entry.
1095  ExternalReference ref =
1096  ExternalReference(IC_Utility(kStoreIC_Miss), masm->isolate());
1097  __ TailCallExternalReference(ref, 3, 1);
1098 }
1099 
1100 
1101 void StoreIC::GenerateNormal(MacroAssembler* masm) {
1102  // ----------- S t a t e -------------
1103  // -- eax : value
1104  // -- ecx : name
1105  // -- edx : receiver
1106  // -- esp[0] : return address
1107  // -----------------------------------
1108 
1109  Label miss, restore_miss;
1110 
1111  GenerateNameDictionaryReceiverCheck(masm, edx, ebx, edi, &miss);
1112 
1113  // A lot of registers are needed for storing to slow case
1114  // objects. Push and restore receiver but rely on
1115  // GenerateDictionaryStore preserving the value and name.
1116  __ push(edx);
1117  GenerateDictionaryStore(masm, &restore_miss, ebx, ecx, eax, edx, edi);
1118  __ Drop(1);
1119  Counters* counters = masm->isolate()->counters();
1120  __ IncrementCounter(counters->store_normal_hit(), 1);
1121  __ ret(0);
1122 
1123  __ bind(&restore_miss);
1124  __ pop(edx);
1125 
1126  __ bind(&miss);
1127  __ IncrementCounter(counters->store_normal_miss(), 1);
1128  GenerateMiss(masm);
1129 }
1130 
1131 
1132 void StoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm,
1133  StrictMode strict_mode) {
1134  // ----------- S t a t e -------------
1135  // -- eax : value
1136  // -- ecx : name
1137  // -- edx : receiver
1138  // -- esp[0] : return address
1139  // -----------------------------------
1140  __ pop(ebx);
1141  __ push(edx);
1142  __ push(ecx);
1143  __ push(eax);
1144  __ push(Immediate(Smi::FromInt(NONE))); // PropertyAttributes
1145  __ push(Immediate(Smi::FromInt(strict_mode)));
1146  __ push(ebx); // return address
1147 
1148  // Do tail-call to runtime routine.
1149  __ TailCallRuntime(Runtime::kSetProperty, 5, 1);
1150 }
1151 
1152 
1153 void KeyedStoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm,
1154  StrictMode strict_mode) {
1155  // ----------- S t a t e -------------
1156  // -- eax : value
1157  // -- ecx : key
1158  // -- edx : receiver
1159  // -- esp[0] : return address
1160  // -----------------------------------
1161 
1162  __ pop(ebx);
1163  __ push(edx);
1164  __ push(ecx);
1165  __ push(eax);
1166  __ push(Immediate(Smi::FromInt(NONE))); // PropertyAttributes
1167  __ push(Immediate(Smi::FromInt(strict_mode))); // Strict mode.
1168  __ push(ebx); // return address
1169 
1170  // Do tail-call to runtime routine.
1171  __ TailCallRuntime(Runtime::kSetProperty, 5, 1);
1172 }
1173 
1174 
1175 void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
1176  // ----------- S t a t e -------------
1177  // -- eax : value
1178  // -- ecx : key
1179  // -- edx : receiver
1180  // -- esp[0] : return address
1181  // -----------------------------------
1182 
1183  __ pop(ebx);
1184  __ push(edx);
1185  __ push(ecx);
1186  __ push(eax);
1187  __ push(ebx);
1188 
1189  // Do tail-call to runtime routine.
1190  ExternalReference ref =
1191  ExternalReference(IC_Utility(kKeyedStoreIC_Miss), masm->isolate());
1192  __ TailCallExternalReference(ref, 3, 1);
1193 }
1194 
1195 
1196 void StoreIC::GenerateSlow(MacroAssembler* masm) {
1197  // ----------- S t a t e -------------
1198  // -- eax : value
1199  // -- ecx : key
1200  // -- edx : receiver
1201  // -- esp[0] : return address
1202  // -----------------------------------
1203 
1204  __ pop(ebx);
1205  __ push(edx);
1206  __ push(ecx);
1207  __ push(eax);
1208  __ push(ebx); // return address
1209 
1210  // Do tail-call to runtime routine.
1211  ExternalReference ref(IC_Utility(kStoreIC_Slow), masm->isolate());
1212  __ TailCallExternalReference(ref, 3, 1);
1213 }
1214 
1215 
1216 void KeyedStoreIC::GenerateSlow(MacroAssembler* masm) {
1217  // ----------- S t a t e -------------
1218  // -- eax : value
1219  // -- ecx : key
1220  // -- edx : receiver
1221  // -- esp[0] : return address
1222  // -----------------------------------
1223 
1224  __ pop(ebx);
1225  __ push(edx);
1226  __ push(ecx);
1227  __ push(eax);
1228  __ push(ebx); // return address
1229 
1230  // Do tail-call to runtime routine.
1231  ExternalReference ref(IC_Utility(kKeyedStoreIC_Slow), masm->isolate());
1232  __ TailCallExternalReference(ref, 3, 1);
1233 }
1234 
1235 
1236 #undef __
1237 
1238 
1240  switch (op) {
1241  case Token::EQ_STRICT:
1242  case Token::EQ:
1243  return equal;
1244  case Token::LT:
1245  return less;
1246  case Token::GT:
1247  return greater;
1248  case Token::LTE:
1249  return less_equal;
1250  case Token::GTE:
1251  return greater_equal;
1252  default:
1253  UNREACHABLE();
1254  return no_condition;
1255  }
1256 }
1257 
1258 
1259 bool CompareIC::HasInlinedSmiCode(Address address) {
1260  // The address of the instruction following the call.
1261  Address test_instruction_address =
1263 
1264  // If the instruction following the call is not a test al, nothing
1265  // was inlined.
1266  return *test_instruction_address == Assembler::kTestAlByte;
1267 }
1268 
1269 
1271  // The address of the instruction following the call.
1272  Address test_instruction_address =
1274 
1275  // If the instruction following the call is not a test al, nothing
1276  // was inlined.
1277  if (*test_instruction_address != Assembler::kTestAlByte) {
1278  ASSERT(*test_instruction_address == Assembler::kNopByte);
1279  return;
1280  }
1281 
1282  Address delta_address = test_instruction_address + 1;
1283  // The delta to the start of the map check instruction and the
1284  // condition code uses at the patched jump.
1285  int8_t delta = *reinterpret_cast<int8_t*>(delta_address);
1286  if (FLAG_trace_ic) {
1287  PrintF("[ patching ic at %p, test=%p, delta=%d\n",
1288  address, test_instruction_address, delta);
1289  }
1290 
1291  // Patch with a short conditional jump. Enabling means switching from a short
1292  // jump-if-carry/not-carry to jump-if-zero/not-zero, whereas disabling is the
1293  // reverse operation of that.
1294  Address jmp_address = test_instruction_address - delta;
1295  ASSERT((check == ENABLE_INLINED_SMI_CHECK)
1296  ? (*jmp_address == Assembler::kJncShortOpcode ||
1297  *jmp_address == Assembler::kJcShortOpcode)
1298  : (*jmp_address == Assembler::kJnzShortOpcode ||
1299  *jmp_address == Assembler::kJzShortOpcode));
1301  ? (*jmp_address == Assembler::kJncShortOpcode ? not_zero : zero)
1302  : (*jmp_address == Assembler::kJnzShortOpcode ? not_carry : carry);
1303  *jmp_address = static_cast<byte>(Assembler::kJccShortPrefix | cc);
1304 }
1305 
1306 
1307 } } // namespace v8::internal
1308 
1309 #endif // V8_TARGET_ARCH_IA32
byte * Address
Definition: globals.h:186
static void GenerateSloppyArguments(MacroAssembler *masm)
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter NULL
Definition: flags.cc:269
static const int kHashFieldOffset
Definition: objects.h:8629
static const int kBitFieldOffset
Definition: objects.h:6461
static void GenerateRuntimeGetProperty(MacroAssembler *masm)
const intptr_t kSmiTagMask
Definition: v8.h:5480
const intptr_t kSmiSignMask
Definition: v8globals.h:41
static const byte kJccShortPrefix
static const int kMapHashShift
Definition: heap.h:2759
void PrintF(const char *format,...)
Definition: v8utils.cc:40
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf map
Definition: flags.cc:350
static Smi * FromInt(int value)
Definition: objects-inl.h:1209
static Flags ComputeHandlerFlags(Kind handler_kind, StubType type=NORMAL, InlineCacheHolderFlag holder=OWN_MAP)
Definition: objects-inl.h:4624
static void GenerateMiss(MacroAssembler *masm)
static const unsigned int kContainsCachedArrayIndexMask
Definition: objects.h:8673
static void GenerateRuntimeSetProperty(MacroAssembler *masm, StrictMode strict_mode)
const uint32_t kIsNotInternalizedMask
Definition: objects.h:603
KeyedStoreCheckMap
Definition: ic.h:572
static const int kHasNamedInterceptor
Definition: objects.h:6470
static const int kIsAccessCheckNeeded
Definition: objects.h:6474
uint32_t Flags
Definition: objects.h:5184
static void GenerateMegamorphic(MacroAssembler *masm)
#define ASSERT(condition)
Definition: checks.h:329
const int kPointerSizeLog2
Definition: globals.h:281
static const int kInstanceSizeOffset
Definition: objects.h:6448
Isolate * isolate() const
Definition: ic.h:157
static void GenerateMegamorphic(MacroAssembler *masm)
static const byte kTestAlByte
void PatchInlinedSmiCode(Address address, InlinedSmiCheck check)
const Register edi
static void GenerateGeneric(MacroAssembler *masm, StrictMode strict_mode)
uint8_t byte
Definition: globals.h:185
static const int kHasIndexedInterceptor
Definition: objects.h:6471
static const byte kJcShortOpcode
#define UNREACHABLE()
Definition: checks.h:52
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long mode(MIPS only)") DEFINE_string(expose_natives_as
static Condition ComputeCondition(Token::Value op)
STATIC_ASSERT(sizeof(CPURegister)==sizeof(Register))
const Register eax
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long expose natives in global object expose freeBuffer extension expose gc extension under the specified name expose externalize string extension number of stack frames to capture disable builtin natives files print name of functions for which code is generated use random jit cookie to mask large constants trace lazy optimization use adaptive optimizations always try to OSR functions trace optimize function deoptimization minimum length for automatic enable preparsing maximum number of optimization attempts before giving up cache prototype transitions trace debugging JSON request response trace out of bounds accesses to external arrays trace_js_array_abuse automatically set the debug break flag when debugger commands are in the queue abort by crashing maximum length of function source code printed in a stack trace max size of the new max size of the old max size of executable always perform global GCs print one trace line following each garbage collection do not print trace line after scavenger collection print statistics of the maximum memory committed for the heap in only print modified registers Don t break for ASM_UNIMPLEMENTED_BREAK macros print stack trace when an illegal exception is thrown randomize hashes to avoid predictable hash Fixed seed to use to hash property Print the time it takes to deserialize the snapshot testing_bool_flag testing_int_flag string flag tmp file in which to serialize heap Print the time it takes to lazily compile hydrogen code stubs concurrent_recompilation concurrent_sweeping Print usage including flags
Definition: flags.cc:665
const uint32_t kHoleNanUpper32
Definition: v8globals.h:454
const int kPointerSize
Definition: globals.h:268
void check(i::Vector< const uint8_t > string)
static void GenerateGeneric(MacroAssembler *masm)
static const byte kNopByte
Operand FieldOperand(Register object, int offset)
const Register ecx
static const byte kJzShortOpcode
const int kHeapObjectTag
Definition: v8.h:5473
static void GenerateMiss(MacroAssembler *masm)
const uint32_t kHoleNanLower32
Definition: v8globals.h:455
#define __
static void GenerateRuntimeSetProperty(MacroAssembler *masm, StrictMode strict_mode)
static const int kPropertiesOffset
Definition: objects.h:2755
static const int kInObjectPropertiesOffset
Definition: objects.h:6450
static void GenerateSlow(MacroAssembler *masm)
const Register r0
static const int kElementsOffset
Definition: objects.h:2756
Operand FixedArrayElementOperand(Register array, Register index_as_smi, int additional_offset=0)
static const int kCallTargetAddressOffset
static void GenerateRuntimeGetProperty(MacroAssembler *masm)
static const int kLengthOffset
Definition: objects.h:10076
static void GenerateSloppyArguments(MacroAssembler *masm)
static const int kHeaderSize
Definition: objects.h:3016
static void GenerateSlow(MacroAssembler *masm)
static const int kMapOffset
Definition: objects.h:1890
static const int kIsObserved
Definition: objects.h:6473
const Register r1
static const byte kJncShortOpcode
const uint32_t kNotInternalizedTag
Definition: objects.h:604
static const int kLengthOffset
Definition: objects.h:3015
static const int kSlowCaseBitFieldMask
Definition: ic.h:432
const Register ebx
KeyedStoreIncrementLength
Definition: ic.h:578
InlinedSmiCheck
Definition: ic.h:920
static void GenerateString(MacroAssembler *masm)
static void GenerateMapChangeElementsTransition(MacroAssembler *masm, AllocationSiteMode mode, Label *allocation_memento_found)
const int kSmiTagSize
Definition: v8.h:5479
static AllocationSiteMode GetMode(ElementsKind boilerplate_elements_kind)
Definition: objects-inl.h:1477
static void GenerateSmiToDouble(MacroAssembler *masm, AllocationSiteMode mode, Label *fail)
Counters * counters()
Definition: isolate.h:859
const int kSmiTag
Definition: v8.h:5478
static void GenerateNormal(MacroAssembler *masm)
static void GenerateIndexedInterceptor(MacroAssembler *masm)
static const byte kJnzShortOpcode
static const int kHashShift
Definition: objects.h:8642
const Register no_reg
static const int kCapacityMask
Definition: heap.h:2758
static void GenerateMiss(MacroAssembler *masm)
const Register edx
static void GeneratePositiveLookup(MacroAssembler *masm, Label *miss, Label *done, Register elements, Register name, Register r0, Register r1)
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long expose natives in global object expose freeBuffer extension expose gc extension under the specified name expose externalize string extension number of stack frames to capture disable builtin natives files print name of functions for which code is generated use random jit cookie to mask large constants trace lazy optimization use adaptive optimizations always try to OSR functions trace optimize function deoptimization minimum length for automatic enable preparsing maximum number of optimization attempts before giving up cache prototype transitions trace debugging JSON request response trace out of bounds accesses to external arrays trace_js_array_abuse automatically set the debug break flag when debugger commands are in the queue abort by crashing maximum length of function source code printed in a stack trace max size of the new max size of the old max size of executable always perform global GCs print one trace line following each garbage collection do not print trace line after scavenger collection print statistics of the maximum memory committed for the heap in name
Definition: flags.cc:505
static void GenerateDoubleToObject(MacroAssembler *masm, AllocationSiteMode mode, Label *fail)
static const int kHashMask
Definition: heap.h:2760
static const int kInstanceTypeOffset
Definition: objects.h:6459
static const int kEntriesPerBucket
Definition: heap.h:2761
static void GenerateNormal(MacroAssembler *masm)
static void GenerateMiss(MacroAssembler *masm)
const XMMRegister xmm0