v8  3.25.30(node0.11.13)
V8 is Google's open source JavaScript engine
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
ic-arm.cc
Go to the documentation of this file.
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 
28 #include "v8.h"
29 
30 #if V8_TARGET_ARCH_ARM
31 
32 #include "assembler-arm.h"
33 #include "code-stubs.h"
34 #include "codegen.h"
35 #include "disasm.h"
36 #include "ic-inl.h"
37 #include "runtime.h"
38 #include "stub-cache.h"
39 
40 namespace v8 {
41 namespace internal {
42 
43 
44 // ----------------------------------------------------------------------------
45 // Static IC stub generators.
46 //
47 
48 #define __ ACCESS_MASM(masm)
49 
50 
51 static void GenerateGlobalInstanceTypeCheck(MacroAssembler* masm,
52  Register type,
53  Label* global_object) {
54  // Register usage:
55  // type: holds the receiver instance type on entry.
56  __ cmp(type, Operand(JS_GLOBAL_OBJECT_TYPE));
57  __ b(eq, global_object);
58  __ cmp(type, Operand(JS_BUILTINS_OBJECT_TYPE));
59  __ b(eq, global_object);
60  __ cmp(type, Operand(JS_GLOBAL_PROXY_TYPE));
61  __ b(eq, global_object);
62 }
63 
64 
65 // Generated code falls through if the receiver is a regular non-global
66 // JS object with slow properties and no interceptors.
67 static void GenerateNameDictionaryReceiverCheck(MacroAssembler* masm,
68  Register receiver,
69  Register elements,
70  Register t0,
71  Register t1,
72  Label* miss) {
73  // Register usage:
74  // receiver: holds the receiver on entry and is unchanged.
75  // elements: holds the property dictionary on fall through.
76  // Scratch registers:
77  // t0: used to holds the receiver map.
78  // t1: used to holds the receiver instance type, receiver bit mask and
79  // elements map.
80 
81  // Check that the receiver isn't a smi.
82  __ JumpIfSmi(receiver, miss);
83 
84  // Check that the receiver is a valid JS object.
85  __ CompareObjectType(receiver, t0, t1, FIRST_SPEC_OBJECT_TYPE);
86  __ b(lt, miss);
87 
88  // If this assert fails, we have to check upper bound too.
90 
91  GenerateGlobalInstanceTypeCheck(masm, t1, miss);
92 
93  // Check that the global object does not require access checks.
95  __ tst(t1, Operand((1 << Map::kIsAccessCheckNeeded) |
97  __ b(ne, miss);
98 
99  __ ldr(elements, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
100  __ ldr(t1, FieldMemOperand(elements, HeapObject::kMapOffset));
101  __ LoadRoot(ip, Heap::kHashTableMapRootIndex);
102  __ cmp(t1, ip);
103  __ b(ne, miss);
104 }
105 
106 
107 // Helper function used from LoadIC GenerateNormal.
108 //
109 // elements: Property dictionary. It is not clobbered if a jump to the miss
110 // label is done.
111 // name: Property name. It is not clobbered if a jump to the miss label is
112 // done
113 // result: Register for the result. It is only updated if a jump to the miss
114 // label is not done. Can be the same as elements or name clobbering
115 // one of these in the case of not jumping to the miss label.
116 // The two scratch registers need to be different from elements, name and
117 // result.
118 // The generated code assumes that the receiver has slow properties,
119 // is not a global object and does not have interceptors.
120 static void GenerateDictionaryLoad(MacroAssembler* masm,
121  Label* miss,
122  Register elements,
123  Register name,
124  Register result,
125  Register scratch1,
126  Register scratch2) {
127  // Main use of the scratch registers.
128  // scratch1: Used as temporary and to hold the capacity of the property
129  // dictionary.
130  // scratch2: Used as temporary.
131  Label done;
132 
133  // Probe the dictionary.
135  miss,
136  &done,
137  elements,
138  name,
139  scratch1,
140  scratch2);
141 
142  // If probing finds an entry check that the value is a normal
143  // property.
144  __ bind(&done); // scratch2 == elements + 4 * index
145  const int kElementsStartOffset = NameDictionary::kHeaderSize +
147  const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
148  __ ldr(scratch1, FieldMemOperand(scratch2, kDetailsOffset));
149  __ tst(scratch1, Operand(PropertyDetails::TypeField::kMask << kSmiTagSize));
150  __ b(ne, miss);
151 
152  // Get the value at the masked, scaled index and return.
153  __ ldr(result,
154  FieldMemOperand(scratch2, kElementsStartOffset + 1 * kPointerSize));
155 }
156 
157 
158 // Helper function used from StoreIC::GenerateNormal.
159 //
160 // elements: Property dictionary. It is not clobbered if a jump to the miss
161 // label is done.
162 // name: Property name. It is not clobbered if a jump to the miss label is
163 // done
164 // value: The value to store.
165 // The two scratch registers need to be different from elements, name and
166 // result.
167 // The generated code assumes that the receiver has slow properties,
168 // is not a global object and does not have interceptors.
169 static void GenerateDictionaryStore(MacroAssembler* masm,
170  Label* miss,
171  Register elements,
172  Register name,
173  Register value,
174  Register scratch1,
175  Register scratch2) {
176  // Main use of the scratch registers.
177  // scratch1: Used as temporary and to hold the capacity of the property
178  // dictionary.
179  // scratch2: Used as temporary.
180  Label done;
181 
182  // Probe the dictionary.
184  miss,
185  &done,
186  elements,
187  name,
188  scratch1,
189  scratch2);
190 
191  // If probing finds an entry in the dictionary check that the value
192  // is a normal property that is not read only.
193  __ bind(&done); // scratch2 == elements + 4 * index
194  const int kElementsStartOffset = NameDictionary::kHeaderSize +
196  const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
197  const int kTypeAndReadOnlyMask =
198  (PropertyDetails::TypeField::kMask |
199  PropertyDetails::AttributesField::encode(READ_ONLY)) << kSmiTagSize;
200  __ ldr(scratch1, FieldMemOperand(scratch2, kDetailsOffset));
201  __ tst(scratch1, Operand(kTypeAndReadOnlyMask));
202  __ b(ne, miss);
203 
204  // Store the value at the masked, scaled index and return.
205  const int kValueOffset = kElementsStartOffset + kPointerSize;
206  __ add(scratch2, scratch2, Operand(kValueOffset - kHeapObjectTag));
207  __ str(value, MemOperand(scratch2));
208 
209  // Update the write barrier. Make sure not to clobber the value.
210  __ mov(scratch1, value);
211  __ RecordWrite(
212  elements, scratch2, scratch1, kLRHasNotBeenSaved, kDontSaveFPRegs);
213 }
214 
215 
216 // Checks the receiver for special cases (value type, slow case bits).
217 // Falls through for regular JS object.
218 static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
219  Register receiver,
220  Register map,
221  Register scratch,
222  int interceptor_bit,
223  Label* slow) {
224  // Check that the object isn't a smi.
225  __ JumpIfSmi(receiver, slow);
226  // Get the map of the receiver.
227  __ ldr(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
228  // Check bit field.
229  __ ldrb(scratch, FieldMemOperand(map, Map::kBitFieldOffset));
230  __ tst(scratch,
231  Operand((1 << Map::kIsAccessCheckNeeded) | (1 << interceptor_bit)));
232  __ b(ne, slow);
233  // Check that the object is some kind of JS object EXCEPT JS Value type.
234  // In the case that the object is a value-wrapper object,
235  // we enter the runtime system to make sure that indexing into string
236  // objects work as intended.
238  __ ldrb(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
239  __ cmp(scratch, Operand(JS_OBJECT_TYPE));
240  __ b(lt, slow);
241 }
242 
243 
244 // Loads an indexed element from a fast case array.
245 // If not_fast_array is NULL, doesn't perform the elements map check.
246 static void GenerateFastArrayLoad(MacroAssembler* masm,
247  Register receiver,
248  Register key,
249  Register elements,
250  Register scratch1,
251  Register scratch2,
252  Register result,
253  Label* not_fast_array,
254  Label* out_of_range) {
255  // Register use:
256  //
257  // receiver - holds the receiver on entry.
258  // Unchanged unless 'result' is the same register.
259  //
260  // key - holds the smi key on entry.
261  // Unchanged unless 'result' is the same register.
262  //
263  // elements - holds the elements of the receiver on exit.
264  //
265  // result - holds the result on exit if the load succeeded.
266  // Allowed to be the the same as 'receiver' or 'key'.
267  // Unchanged on bailout so 'receiver' and 'key' can be safely
268  // used by further computation.
269  //
270  // Scratch registers:
271  //
272  // scratch1 - used to hold elements map and elements length.
273  // Holds the elements map if not_fast_array branch is taken.
274  //
275  // scratch2 - used to hold the loaded value.
276 
277  __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
278  if (not_fast_array != NULL) {
279  // Check that the object is in fast mode and writable.
280  __ ldr(scratch1, FieldMemOperand(elements, HeapObject::kMapOffset));
281  __ LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
282  __ cmp(scratch1, ip);
283  __ b(ne, not_fast_array);
284  } else {
285  __ AssertFastElements(elements);
286  }
287  // Check that the key (index) is within bounds.
288  __ ldr(scratch1, FieldMemOperand(elements, FixedArray::kLengthOffset));
289  __ cmp(key, Operand(scratch1));
290  __ b(hs, out_of_range);
291  // Fast case: Do the load.
292  __ add(scratch1, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
293  __ ldr(scratch2, MemOperand::PointerAddressFromSmiKey(scratch1, key));
294  __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
295  __ cmp(scratch2, ip);
296  // In case the loaded value is the_hole we have to consult GetProperty
297  // to ensure the prototype chain is searched.
298  __ b(eq, out_of_range);
299  __ mov(result, scratch2);
300 }
301 
302 
303 // Checks whether a key is an array index string or a unique name.
304 // Falls through if a key is a unique name.
305 static void GenerateKeyNameCheck(MacroAssembler* masm,
306  Register key,
307  Register map,
308  Register hash,
309  Label* index_string,
310  Label* not_unique) {
311  // The key is not a smi.
312  Label unique;
313  // Is it a name?
314  __ CompareObjectType(key, map, hash, LAST_UNIQUE_NAME_TYPE);
315  __ b(hi, not_unique);
317  __ b(eq, &unique);
318 
319  // Is the string an array index, with cached numeric value?
320  __ ldr(hash, FieldMemOperand(key, Name::kHashFieldOffset));
321  __ tst(hash, Operand(Name::kContainsCachedArrayIndexMask));
322  __ b(eq, index_string);
323 
324  // Is the string internalized? We know it's a string, so a single
325  // bit test is enough.
326  // map: key map
327  __ ldrb(hash, FieldMemOperand(map, Map::kInstanceTypeOffset));
329  __ tst(hash, Operand(kIsNotInternalizedMask));
330  __ b(ne, not_unique);
331 
332  __ bind(&unique);
333 }
334 
335 
336 void LoadIC::GenerateMegamorphic(MacroAssembler* masm) {
337  // ----------- S t a t e -------------
338  // -- r2 : name
339  // -- lr : return address
340  // -- r0 : receiver
341  // -----------------------------------
342 
343  // Probe the stub cache.
345  masm->isolate()->stub_cache()->GenerateProbe(
346  masm, flags, r0, r2, r3, r4, r5, r6);
347 
348  // Cache miss: Jump to runtime.
349  GenerateMiss(masm);
350 }
351 
352 
353 void LoadIC::GenerateNormal(MacroAssembler* masm) {
354  // ----------- S t a t e -------------
355  // -- r2 : name
356  // -- lr : return address
357  // -- r0 : receiver
358  // -----------------------------------
359  Label miss;
360 
361  GenerateNameDictionaryReceiverCheck(masm, r0, r1, r3, r4, &miss);
362 
363  // r1: elements
364  GenerateDictionaryLoad(masm, &miss, r1, r2, r0, r3, r4);
365  __ Ret();
366 
367  // Cache miss: Jump to runtime.
368  __ bind(&miss);
369  GenerateMiss(masm);
370 }
371 
372 
373 void LoadIC::GenerateMiss(MacroAssembler* masm) {
374  // ----------- S t a t e -------------
375  // -- r2 : name
376  // -- lr : return address
377  // -- r0 : receiver
378  // -----------------------------------
379  Isolate* isolate = masm->isolate();
380 
381  __ IncrementCounter(isolate->counters()->load_miss(), 1, r3, r4);
382 
383  __ mov(r3, r0);
384  __ Push(r3, r2);
385 
386  // Perform tail call to the entry.
387  ExternalReference ref =
388  ExternalReference(IC_Utility(kLoadIC_Miss), isolate);
389  __ TailCallExternalReference(ref, 2, 1);
390 }
391 
392 
393 void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
394  // ---------- S t a t e --------------
395  // -- r2 : name
396  // -- lr : return address
397  // -- r0 : receiver
398  // -----------------------------------
399 
400  __ mov(r3, r0);
401  __ Push(r3, r2);
402 
403  __ TailCallRuntime(Runtime::kGetProperty, 2, 1);
404 }
405 
406 
407 static MemOperand GenerateMappedArgumentsLookup(MacroAssembler* masm,
408  Register object,
409  Register key,
410  Register scratch1,
411  Register scratch2,
412  Register scratch3,
413  Label* unmapped_case,
414  Label* slow_case) {
415  Heap* heap = masm->isolate()->heap();
416 
417  // Check that the receiver is a JSObject. Because of the map check
418  // later, we do not need to check for interceptors or whether it
419  // requires access checks.
420  __ JumpIfSmi(object, slow_case);
421  // Check that the object is some kind of JSObject.
422  __ CompareObjectType(object, scratch1, scratch2, FIRST_JS_RECEIVER_TYPE);
423  __ b(lt, slow_case);
424 
425  // Check that the key is a positive smi.
426  __ tst(key, Operand(0x80000001));
427  __ b(ne, slow_case);
428 
429  // Load the elements into scratch1 and check its map.
430  Handle<Map> arguments_map(heap->sloppy_arguments_elements_map());
431  __ ldr(scratch1, FieldMemOperand(object, JSObject::kElementsOffset));
432  __ CheckMap(scratch1, scratch2, arguments_map, slow_case, DONT_DO_SMI_CHECK);
433 
434  // Check if element is in the range of mapped arguments. If not, jump
435  // to the unmapped lookup with the parameter map in scratch1.
436  __ ldr(scratch2, FieldMemOperand(scratch1, FixedArray::kLengthOffset));
437  __ sub(scratch2, scratch2, Operand(Smi::FromInt(2)));
438  __ cmp(key, Operand(scratch2));
439  __ b(cs, unmapped_case);
440 
441  // Load element index and check whether it is the hole.
442  const int kOffset =
443  FixedArray::kHeaderSize + 2 * kPointerSize - kHeapObjectTag;
444 
445  __ mov(scratch3, Operand(kPointerSize >> 1));
446  __ mul(scratch3, key, scratch3);
447  __ add(scratch3, scratch3, Operand(kOffset));
448 
449  __ ldr(scratch2, MemOperand(scratch1, scratch3));
450  __ LoadRoot(scratch3, Heap::kTheHoleValueRootIndex);
451  __ cmp(scratch2, scratch3);
452  __ b(eq, unmapped_case);
453 
454  // Load value from context and return it. We can reuse scratch1 because
455  // we do not jump to the unmapped lookup (which requires the parameter
456  // map in scratch1).
457  __ ldr(scratch1, FieldMemOperand(scratch1, FixedArray::kHeaderSize));
458  __ mov(scratch3, Operand(kPointerSize >> 1));
459  __ mul(scratch3, scratch2, scratch3);
460  __ add(scratch3, scratch3, Operand(Context::kHeaderSize - kHeapObjectTag));
461  return MemOperand(scratch1, scratch3);
462 }
463 
464 
465 static MemOperand GenerateUnmappedArgumentsLookup(MacroAssembler* masm,
466  Register key,
467  Register parameter_map,
468  Register scratch,
469  Label* slow_case) {
470  // Element is in arguments backing store, which is referenced by the
471  // second element of the parameter_map. The parameter_map register
472  // must be loaded with the parameter map of the arguments object and is
473  // overwritten.
474  const int kBackingStoreOffset = FixedArray::kHeaderSize + kPointerSize;
475  Register backing_store = parameter_map;
476  __ ldr(backing_store, FieldMemOperand(parameter_map, kBackingStoreOffset));
477  Handle<Map> fixed_array_map(masm->isolate()->heap()->fixed_array_map());
478  __ CheckMap(backing_store, scratch, fixed_array_map, slow_case,
480  __ ldr(scratch, FieldMemOperand(backing_store, FixedArray::kLengthOffset));
481  __ cmp(key, Operand(scratch));
482  __ b(cs, slow_case);
483  __ mov(scratch, Operand(kPointerSize >> 1));
484  __ mul(scratch, key, scratch);
485  __ add(scratch,
486  scratch,
487  Operand(FixedArray::kHeaderSize - kHeapObjectTag));
488  return MemOperand(backing_store, scratch);
489 }
490 
491 
492 void KeyedLoadIC::GenerateSloppyArguments(MacroAssembler* masm) {
493  // ---------- S t a t e --------------
494  // -- lr : return address
495  // -- r0 : key
496  // -- r1 : receiver
497  // -----------------------------------
498  Label slow, notin;
499  MemOperand mapped_location =
500  GenerateMappedArgumentsLookup(masm, r1, r0, r2, r3, r4, &notin, &slow);
501  __ ldr(r0, mapped_location);
502  __ Ret();
503  __ bind(&notin);
504  // The unmapped lookup expects that the parameter map is in r2.
505  MemOperand unmapped_location =
506  GenerateUnmappedArgumentsLookup(masm, r0, r2, r3, &slow);
507  __ ldr(r2, unmapped_location);
508  __ LoadRoot(r3, Heap::kTheHoleValueRootIndex);
509  __ cmp(r2, r3);
510  __ b(eq, &slow);
511  __ mov(r0, r2);
512  __ Ret();
513  __ bind(&slow);
514  GenerateMiss(masm);
515 }
516 
517 
518 void KeyedStoreIC::GenerateSloppyArguments(MacroAssembler* masm) {
519  // ---------- S t a t e --------------
520  // -- r0 : value
521  // -- r1 : key
522  // -- r2 : receiver
523  // -- lr : return address
524  // -----------------------------------
525  Label slow, notin;
526  MemOperand mapped_location =
527  GenerateMappedArgumentsLookup(masm, r2, r1, r3, r4, r5, &notin, &slow);
528  __ str(r0, mapped_location);
529  __ add(r6, r3, r5);
530  __ mov(r9, r0);
531  __ RecordWrite(r3, r6, r9, kLRHasNotBeenSaved, kDontSaveFPRegs);
532  __ Ret();
533  __ bind(&notin);
534  // The unmapped lookup expects that the parameter map is in r3.
535  MemOperand unmapped_location =
536  GenerateUnmappedArgumentsLookup(masm, r1, r3, r4, &slow);
537  __ str(r0, unmapped_location);
538  __ add(r6, r3, r4);
539  __ mov(r9, r0);
540  __ RecordWrite(r3, r6, r9, kLRHasNotBeenSaved, kDontSaveFPRegs);
541  __ Ret();
542  __ bind(&slow);
543  GenerateMiss(masm);
544 }
545 
546 
547 void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
548  // ---------- S t a t e --------------
549  // -- lr : return address
550  // -- r0 : key
551  // -- r1 : receiver
552  // -----------------------------------
553  Isolate* isolate = masm->isolate();
554 
555  __ IncrementCounter(isolate->counters()->keyed_load_miss(), 1, r3, r4);
556 
557  __ Push(r1, r0);
558 
559  // Perform tail call to the entry.
560  ExternalReference ref =
561  ExternalReference(IC_Utility(kKeyedLoadIC_Miss), isolate);
562 
563  __ TailCallExternalReference(ref, 2, 1);
564 }
565 
566 
567 void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
568  // ---------- S t a t e --------------
569  // -- lr : return address
570  // -- r0 : key
571  // -- r1 : receiver
572  // -----------------------------------
573 
574  __ Push(r1, r0);
575 
576  __ TailCallRuntime(Runtime::kKeyedGetProperty, 2, 1);
577 }
578 
579 
580 void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
581  // ---------- S t a t e --------------
582  // -- lr : return address
583  // -- r0 : key
584  // -- r1 : receiver
585  // -----------------------------------
586  Label slow, check_name, index_smi, index_name, property_array_property;
587  Label probe_dictionary, check_number_dictionary;
588 
589  Register key = r0;
590  Register receiver = r1;
591 
592  Isolate* isolate = masm->isolate();
593 
594  // Check that the key is a smi.
595  __ JumpIfNotSmi(key, &check_name);
596  __ bind(&index_smi);
597  // Now the key is known to be a smi. This place is also jumped to from below
598  // where a numeric string is converted to a smi.
599 
600  GenerateKeyedLoadReceiverCheck(
601  masm, receiver, r2, r3, Map::kHasIndexedInterceptor, &slow);
602 
603  // Check the receiver's map to see if it has fast elements.
604  __ CheckFastElements(r2, r3, &check_number_dictionary);
605 
606  GenerateFastArrayLoad(
607  masm, receiver, key, r4, r3, r2, r0, NULL, &slow);
608  __ IncrementCounter(isolate->counters()->keyed_load_generic_smi(), 1, r2, r3);
609  __ Ret();
610 
611  __ bind(&check_number_dictionary);
614 
615  // Check whether the elements is a number dictionary.
616  // r0: key
617  // r3: elements map
618  // r4: elements
619  __ LoadRoot(ip, Heap::kHashTableMapRootIndex);
620  __ cmp(r3, ip);
621  __ b(ne, &slow);
622  __ SmiUntag(r2, r0);
623  __ LoadFromNumberDictionary(&slow, r4, r0, r0, r2, r3, r5);
624  __ Ret();
625 
626  // Slow case, key and receiver still in r0 and r1.
627  __ bind(&slow);
628  __ IncrementCounter(isolate->counters()->keyed_load_generic_slow(),
629  1, r2, r3);
631 
632  __ bind(&check_name);
633  GenerateKeyNameCheck(masm, key, r2, r3, &index_name, &slow);
634 
635  GenerateKeyedLoadReceiverCheck(
636  masm, receiver, r2, r3, Map::kHasNamedInterceptor, &slow);
637 
638  // If the receiver is a fast-case object, check the keyed lookup
639  // cache. Otherwise probe the dictionary.
642  __ LoadRoot(ip, Heap::kHashTableMapRootIndex);
643  __ cmp(r4, ip);
644  __ b(eq, &probe_dictionary);
645 
646  // Load the map of the receiver, compute the keyed lookup cache hash
647  // based on 32 bits of the map pointer and the name hash.
649  __ mov(r3, Operand(r2, ASR, KeyedLookupCache::kMapHashShift));
651  __ eor(r3, r3, Operand(r4, ASR, Name::kHashShift));
653  __ And(r3, r3, Operand(mask));
654 
655  // Load the key (consisting of map and unique name) from the cache and
656  // check for match.
657  Label load_in_object_property;
658  static const int kEntriesPerBucket = KeyedLookupCache::kEntriesPerBucket;
659  Label hit_on_nth_entry[kEntriesPerBucket];
660  ExternalReference cache_keys =
661  ExternalReference::keyed_lookup_cache_keys(isolate);
662 
663  __ mov(r4, Operand(cache_keys));
664  __ add(r4, r4, Operand(r3, LSL, kPointerSizeLog2 + 1));
665 
666  for (int i = 0; i < kEntriesPerBucket - 1; i++) {
667  Label try_next_entry;
668  // Load map and move r4 to next entry.
669  __ ldr(r5, MemOperand(r4, kPointerSize * 2, PostIndex));
670  __ cmp(r2, r5);
671  __ b(ne, &try_next_entry);
672  __ ldr(r5, MemOperand(r4, -kPointerSize)); // Load name
673  __ cmp(r0, r5);
674  __ b(eq, &hit_on_nth_entry[i]);
675  __ bind(&try_next_entry);
676  }
677 
678  // Last entry: Load map and move r4 to name.
679  __ ldr(r5, MemOperand(r4, kPointerSize, PostIndex));
680  __ cmp(r2, r5);
681  __ b(ne, &slow);
682  __ ldr(r5, MemOperand(r4));
683  __ cmp(r0, r5);
684  __ b(ne, &slow);
685 
686  // Get field offset.
687  // r0 : key
688  // r1 : receiver
689  // r2 : receiver's map
690  // r3 : lookup cache index
691  ExternalReference cache_field_offsets =
692  ExternalReference::keyed_lookup_cache_field_offsets(isolate);
693 
694  // Hit on nth entry.
695  for (int i = kEntriesPerBucket - 1; i >= 0; i--) {
696  __ bind(&hit_on_nth_entry[i]);
697  __ mov(r4, Operand(cache_field_offsets));
698  if (i != 0) {
699  __ add(r3, r3, Operand(i));
700  }
703  __ sub(r5, r5, r6, SetCC);
704  __ b(ge, &property_array_property);
705  if (i != 0) {
706  __ jmp(&load_in_object_property);
707  }
708  }
709 
710  // Load in-object property.
711  __ bind(&load_in_object_property);
713  __ add(r6, r6, r5); // Index from start of object.
714  __ sub(r1, r1, Operand(kHeapObjectTag)); // Remove the heap tag.
716  __ IncrementCounter(isolate->counters()->keyed_load_generic_lookup_cache(),
717  1, r2, r3);
718  __ Ret();
719 
720  // Load property array property.
721  __ bind(&property_array_property);
723  __ add(r1, r1, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
725  __ IncrementCounter(isolate->counters()->keyed_load_generic_lookup_cache(),
726  1, r2, r3);
727  __ Ret();
728 
729  // Do a quick inline probe of the receiver's dictionary, if it
730  // exists.
731  __ bind(&probe_dictionary);
732  // r1: receiver
733  // r0: key
734  // r3: elements
737  GenerateGlobalInstanceTypeCheck(masm, r2, &slow);
738  // Load the property to r0.
739  GenerateDictionaryLoad(masm, &slow, r3, r0, r0, r2, r4);
740  __ IncrementCounter(
741  isolate->counters()->keyed_load_generic_symbol(), 1, r2, r3);
742  __ Ret();
743 
744  __ bind(&index_name);
745  __ IndexFromHash(r3, key);
746  // Now jump to the place where smi keys are handled.
747  __ jmp(&index_smi);
748 }
749 
750 
751 void KeyedLoadIC::GenerateString(MacroAssembler* masm) {
752  // ---------- S t a t e --------------
753  // -- lr : return address
754  // -- r0 : key (index)
755  // -- r1 : receiver
756  // -----------------------------------
757  Label miss;
758 
759  Register receiver = r1;
760  Register index = r0;
761  Register scratch = r3;
762  Register result = r0;
763 
764  StringCharAtGenerator char_at_generator(receiver,
765  index,
766  scratch,
767  result,
768  &miss, // When not a string.
769  &miss, // When not a number.
770  &miss, // When index out of range.
772  char_at_generator.GenerateFast(masm);
773  __ Ret();
774 
775  StubRuntimeCallHelper call_helper;
776  char_at_generator.GenerateSlow(masm, call_helper);
777 
778  __ bind(&miss);
779  GenerateMiss(masm);
780 }
781 
782 
783 void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) {
784  // ---------- S t a t e --------------
785  // -- lr : return address
786  // -- r0 : key
787  // -- r1 : receiver
788  // -----------------------------------
789  Label slow;
790 
791  // Check that the receiver isn't a smi.
792  __ JumpIfSmi(r1, &slow);
793 
794  // Check that the key is an array index, that is Uint32.
795  __ NonNegativeSmiTst(r0);
796  __ b(ne, &slow);
797 
798  // Get the map of the receiver.
800 
801  // Check that it has indexed interceptor and access checks
802  // are not enabled for this object.
804  __ and_(r3, r3, Operand(kSlowCaseBitFieldMask));
805  __ cmp(r3, Operand(1 << Map::kHasIndexedInterceptor));
806  __ b(ne, &slow);
807 
808  // Everything is fine, call runtime.
809  __ Push(r1, r0); // Receiver, key.
810 
811  // Perform tail call to the entry.
812  __ TailCallExternalReference(
813  ExternalReference(IC_Utility(kKeyedLoadPropertyWithInterceptor),
814  masm->isolate()),
815  2,
816  1);
817 
818  __ bind(&slow);
819  GenerateMiss(masm);
820 }
821 
822 
823 void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
824  // ---------- S t a t e --------------
825  // -- r0 : value
826  // -- r1 : key
827  // -- r2 : receiver
828  // -- lr : return address
829  // -----------------------------------
830 
831  // Push receiver, key and value for runtime call.
832  __ Push(r2, r1, r0);
833 
834  ExternalReference ref =
835  ExternalReference(IC_Utility(kKeyedStoreIC_Miss), masm->isolate());
836  __ TailCallExternalReference(ref, 3, 1);
837 }
838 
839 
840 void StoreIC::GenerateSlow(MacroAssembler* masm) {
841  // ---------- S t a t e --------------
842  // -- r0 : value
843  // -- r2 : key
844  // -- r1 : receiver
845  // -- lr : return address
846  // -----------------------------------
847 
848  // Push receiver, key and value for runtime call.
849  __ Push(r1, r2, r0);
850 
851  // The slow case calls into the runtime to complete the store without causing
852  // an IC miss that would otherwise cause a transition to the generic stub.
853  ExternalReference ref =
854  ExternalReference(IC_Utility(kStoreIC_Slow), masm->isolate());
855  __ TailCallExternalReference(ref, 3, 1);
856 }
857 
858 
859 void KeyedStoreIC::GenerateSlow(MacroAssembler* masm) {
860  // ---------- S t a t e --------------
861  // -- r0 : value
862  // -- r1 : key
863  // -- r2 : receiver
864  // -- lr : return address
865  // -----------------------------------
866 
867  // Push receiver, key and value for runtime call.
868  __ Push(r2, r1, r0);
869 
870  // The slow case calls into the runtime to complete the store without causing
871  // an IC miss that would otherwise cause a transition to the generic stub.
872  ExternalReference ref =
873  ExternalReference(IC_Utility(kKeyedStoreIC_Slow), masm->isolate());
874  __ TailCallExternalReference(ref, 3, 1);
875 }
876 
877 
878 void KeyedStoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm,
879  StrictMode strict_mode) {
880  // ---------- S t a t e --------------
881  // -- r0 : value
882  // -- r1 : key
883  // -- r2 : receiver
884  // -- lr : return address
885  // -----------------------------------
886 
887  // Push receiver, key and value for runtime call.
888  __ Push(r2, r1, r0);
889 
890  __ mov(r1, Operand(Smi::FromInt(NONE))); // PropertyAttributes
891  __ mov(r0, Operand(Smi::FromInt(strict_mode))); // Strict mode.
892  __ Push(r1, r0);
893 
894  __ TailCallRuntime(Runtime::kSetProperty, 5, 1);
895 }
896 
897 
898 static void KeyedStoreGenerateGenericHelper(
899  MacroAssembler* masm,
900  Label* fast_object,
901  Label* fast_double,
902  Label* slow,
903  KeyedStoreCheckMap check_map,
904  KeyedStoreIncrementLength increment_length,
905  Register value,
906  Register key,
907  Register receiver,
908  Register receiver_map,
909  Register elements_map,
910  Register elements) {
911  Label transition_smi_elements;
912  Label finish_object_store, non_double_value, transition_double_elements;
913  Label fast_double_without_map_check;
914 
915  // Fast case: Do the store, could be either Object or double.
916  __ bind(fast_object);
917  Register scratch_value = r4;
918  Register address = r5;
919  if (check_map == kCheckMap) {
920  __ ldr(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
921  __ cmp(elements_map,
922  Operand(masm->isolate()->factory()->fixed_array_map()));
923  __ b(ne, fast_double);
924  }
925 
926  // HOLECHECK: guards "A[i] = V"
927  // We have to go to the runtime if the current value is the hole because
928  // there may be a callback on the element
929  Label holecheck_passed1;
930  __ add(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
931  __ ldr(scratch_value,
932  MemOperand::PointerAddressFromSmiKey(address, key, PreIndex));
933  __ cmp(scratch_value, Operand(masm->isolate()->factory()->the_hole_value()));
934  __ b(ne, &holecheck_passed1);
935  __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, scratch_value,
936  slow);
937 
938  __ bind(&holecheck_passed1);
939 
940  // Smi stores don't require further checks.
941  Label non_smi_value;
942  __ JumpIfNotSmi(value, &non_smi_value);
943 
944  if (increment_length == kIncrementLength) {
945  // Add 1 to receiver->length.
946  __ add(scratch_value, key, Operand(Smi::FromInt(1)));
947  __ str(scratch_value, FieldMemOperand(receiver, JSArray::kLengthOffset));
948  }
949  // It's irrelevant whether array is smi-only or not when writing a smi.
950  __ add(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
951  __ str(value, MemOperand::PointerAddressFromSmiKey(address, key));
952  __ Ret();
953 
954  __ bind(&non_smi_value);
955  // Escape to elements kind transition case.
956  __ CheckFastObjectElements(receiver_map, scratch_value,
957  &transition_smi_elements);
958 
959  // Fast elements array, store the value to the elements backing store.
960  __ bind(&finish_object_store);
961  if (increment_length == kIncrementLength) {
962  // Add 1 to receiver->length.
963  __ add(scratch_value, key, Operand(Smi::FromInt(1)));
964  __ str(scratch_value, FieldMemOperand(receiver, JSArray::kLengthOffset));
965  }
966  __ add(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
967  __ add(address, address, Operand::PointerOffsetFromSmiKey(key));
968  __ str(value, MemOperand(address));
969  // Update write barrier for the elements array address.
970  __ mov(scratch_value, value); // Preserve the value which is returned.
971  __ RecordWrite(elements,
972  address,
973  scratch_value,
978  __ Ret();
979 
980  __ bind(fast_double);
981  if (check_map == kCheckMap) {
982  // Check for fast double array case. If this fails, call through to the
983  // runtime.
984  __ CompareRoot(elements_map, Heap::kFixedDoubleArrayMapRootIndex);
985  __ b(ne, slow);
986  }
987 
988  // HOLECHECK: guards "A[i] double hole?"
989  // We have to see if the double version of the hole is present. If so
990  // go to the runtime.
991  __ add(address, elements,
993  - kHeapObjectTag));
994  __ ldr(scratch_value,
995  MemOperand(address, key, LSL, kPointerSizeLog2, PreIndex));
996  __ cmp(scratch_value, Operand(kHoleNanUpper32));
997  __ b(ne, &fast_double_without_map_check);
998  __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, scratch_value,
999  slow);
1000 
1001  __ bind(&fast_double_without_map_check);
1002  __ StoreNumberToDoubleElements(value, key, elements, r3, d0,
1003  &transition_double_elements);
1004  if (increment_length == kIncrementLength) {
1005  // Add 1 to receiver->length.
1006  __ add(scratch_value, key, Operand(Smi::FromInt(1)));
1007  __ str(scratch_value, FieldMemOperand(receiver, JSArray::kLengthOffset));
1008  }
1009  __ Ret();
1010 
1011  __ bind(&transition_smi_elements);
1012  // Transition the array appropriately depending on the value type.
1014  __ CompareRoot(r4, Heap::kHeapNumberMapRootIndex);
1015  __ b(ne, &non_double_value);
1016 
1017  // Value is a double. Transition FAST_SMI_ELEMENTS ->
1018  // FAST_DOUBLE_ELEMENTS and complete the store.
1019  __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
1021  receiver_map,
1022  r4,
1023  slow);
1024  ASSERT(receiver_map.is(r3)); // Transition code expects map in r3
1028  __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
1029  __ jmp(&fast_double_without_map_check);
1030 
1031  __ bind(&non_double_value);
1032  // Value is not a double, FAST_SMI_ELEMENTS -> FAST_ELEMENTS
1033  __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
1034  FAST_ELEMENTS,
1035  receiver_map,
1036  r4,
1037  slow);
1038  ASSERT(receiver_map.is(r3)); // Transition code expects map in r3
1041  slow);
1042  __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
1043  __ jmp(&finish_object_store);
1044 
1045  __ bind(&transition_double_elements);
1046  // Elements are FAST_DOUBLE_ELEMENTS, but value is an Object that's not a
1047  // HeapNumber. Make sure that the receiver is a Array with FAST_ELEMENTS and
1048  // transition array from FAST_DOUBLE_ELEMENTS to FAST_ELEMENTS
1049  __ LoadTransitionedArrayMapConditional(FAST_DOUBLE_ELEMENTS,
1050  FAST_ELEMENTS,
1051  receiver_map,
1052  r4,
1053  slow);
1054  ASSERT(receiver_map.is(r3)); // Transition code expects map in r3
1057  __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
1058  __ jmp(&finish_object_store);
1059 }
1060 
1061 
1062 void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
1063  StrictMode strict_mode) {
1064  // ---------- S t a t e --------------
1065  // -- r0 : value
1066  // -- r1 : key
1067  // -- r2 : receiver
1068  // -- lr : return address
1069  // -----------------------------------
1070  Label slow, fast_object, fast_object_grow;
1071  Label fast_double, fast_double_grow;
1072  Label array, extra, check_if_double_array;
1073 
1074  // Register usage.
1075  Register value = r0;
1076  Register key = r1;
1077  Register receiver = r2;
1078  Register receiver_map = r3;
1079  Register elements_map = r6;
1080  Register elements = r9; // Elements array of the receiver.
1081  // r4 and r5 are used as general scratch registers.
1082 
1083  // Check that the key is a smi.
1084  __ JumpIfNotSmi(key, &slow);
1085  // Check that the object isn't a smi.
1086  __ JumpIfSmi(receiver, &slow);
1087  // Get the map of the object.
1088  __ ldr(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
1089  // Check that the receiver does not require access checks and is not observed.
1090  // The generic stub does not perform map checks or handle observed objects.
1091  __ ldrb(ip, FieldMemOperand(receiver_map, Map::kBitFieldOffset));
1092  __ tst(ip, Operand(1 << Map::kIsAccessCheckNeeded | 1 << Map::kIsObserved));
1093  __ b(ne, &slow);
1094  // Check if the object is a JS array or not.
1095  __ ldrb(r4, FieldMemOperand(receiver_map, Map::kInstanceTypeOffset));
1096  __ cmp(r4, Operand(JS_ARRAY_TYPE));
1097  __ b(eq, &array);
1098  // Check that the object is some kind of JSObject.
1099  __ cmp(r4, Operand(FIRST_JS_OBJECT_TYPE));
1100  __ b(lt, &slow);
1101 
1102  // Object case: Check key against length in the elements array.
1103  __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
1104  // Check array bounds. Both the key and the length of FixedArray are smis.
1106  __ cmp(key, Operand(ip));
1107  __ b(lo, &fast_object);
1108 
1109  // Slow case, handle jump to runtime.
1110  __ bind(&slow);
1111  // Entry registers are intact.
1112  // r0: value.
1113  // r1: key.
1114  // r2: receiver.
1115  GenerateRuntimeSetProperty(masm, strict_mode);
1116 
1117  // Extra capacity case: Check if there is extra capacity to
1118  // perform the store and update the length. Used for adding one
1119  // element to the array by writing to array[array.length].
1120  __ bind(&extra);
1121  // Condition code from comparing key and array length is still available.
1122  __ b(ne, &slow); // Only support writing to writing to array[array.length].
1123  // Check for room in the elements backing store.
1124  // Both the key and the length of FixedArray are smis.
1126  __ cmp(key, Operand(ip));
1127  __ b(hs, &slow);
1128  __ ldr(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
1129  __ cmp(elements_map,
1130  Operand(masm->isolate()->factory()->fixed_array_map()));
1131  __ b(ne, &check_if_double_array);
1132  __ jmp(&fast_object_grow);
1133 
1134  __ bind(&check_if_double_array);
1135  __ cmp(elements_map,
1136  Operand(masm->isolate()->factory()->fixed_double_array_map()));
1137  __ b(ne, &slow);
1138  __ jmp(&fast_double_grow);
1139 
1140  // Array case: Get the length and the elements array from the JS
1141  // array. Check that the array is in fast mode (and writable); if it
1142  // is the length is always a smi.
1143  __ bind(&array);
1144  __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
1145 
1146  // Check the key against the length in the array.
1147  __ ldr(ip, FieldMemOperand(receiver, JSArray::kLengthOffset));
1148  __ cmp(key, Operand(ip));
1149  __ b(hs, &extra);
1150 
1151  KeyedStoreGenerateGenericHelper(masm, &fast_object, &fast_double,
1153  value, key, receiver, receiver_map,
1154  elements_map, elements);
1155  KeyedStoreGenerateGenericHelper(masm, &fast_object_grow, &fast_double_grow,
1157  value, key, receiver, receiver_map,
1158  elements_map, elements);
1159 }
1160 
1161 
1162 void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
1163  // ----------- S t a t e -------------
1164  // -- r0 : value
1165  // -- r1 : receiver
1166  // -- r2 : name
1167  // -- lr : return address
1168  // -----------------------------------
1169 
1170  // Get the receiver from the stack and probe the stub cache.
1171  Code::Flags flags = Code::ComputeHandlerFlags(Code::STORE_IC);
1172 
1173  masm->isolate()->stub_cache()->GenerateProbe(
1174  masm, flags, r1, r2, r3, r4, r5, r6);
1175 
1176  // Cache miss: Jump to runtime.
1177  GenerateMiss(masm);
1178 }
1179 
1180 
1181 void StoreIC::GenerateMiss(MacroAssembler* masm) {
1182  // ----------- S t a t e -------------
1183  // -- r0 : value
1184  // -- r1 : receiver
1185  // -- r2 : name
1186  // -- lr : return address
1187  // -----------------------------------
1188 
1189  __ Push(r1, r2, r0);
1190 
1191  // Perform tail call to the entry.
1192  ExternalReference ref =
1193  ExternalReference(IC_Utility(kStoreIC_Miss), masm->isolate());
1194  __ TailCallExternalReference(ref, 3, 1);
1195 }
1196 
1197 
1198 void StoreIC::GenerateNormal(MacroAssembler* masm) {
1199  // ----------- S t a t e -------------
1200  // -- r0 : value
1201  // -- r1 : receiver
1202  // -- r2 : name
1203  // -- lr : return address
1204  // -----------------------------------
1205  Label miss;
1206 
1207  GenerateNameDictionaryReceiverCheck(masm, r1, r3, r4, r5, &miss);
1208 
1209  GenerateDictionaryStore(masm, &miss, r3, r2, r0, r4, r5);
1210  Counters* counters = masm->isolate()->counters();
1211  __ IncrementCounter(counters->store_normal_hit(),
1212  1, r4, r5);
1213  __ Ret();
1214 
1215  __ bind(&miss);
1216  __ IncrementCounter(counters->store_normal_miss(), 1, r4, r5);
1217  GenerateMiss(masm);
1218 }
1219 
1220 
1221 void StoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm,
1222  StrictMode strict_mode) {
1223  // ----------- S t a t e -------------
1224  // -- r0 : value
1225  // -- r1 : receiver
1226  // -- r2 : name
1227  // -- lr : return address
1228  // -----------------------------------
1229 
1230  __ Push(r1, r2, r0);
1231 
1232  __ mov(r1, Operand(Smi::FromInt(NONE))); // PropertyAttributes
1233  __ mov(r0, Operand(Smi::FromInt(strict_mode)));
1234  __ Push(r1, r0);
1235 
1236  // Do tail-call to runtime routine.
1237  __ TailCallRuntime(Runtime::kSetProperty, 5, 1);
1238 }
1239 
1240 
1241 #undef __
1242 
1243 
1245  switch (op) {
1246  case Token::EQ_STRICT:
1247  case Token::EQ:
1248  return eq;
1249  case Token::LT:
1250  return lt;
1251  case Token::GT:
1252  return gt;
1253  case Token::LTE:
1254  return le;
1255  case Token::GTE:
1256  return ge;
1257  default:
1258  UNREACHABLE();
1259  return kNoCondition;
1260  }
1261 }
1262 
1263 
1264 bool CompareIC::HasInlinedSmiCode(Address address) {
1265  // The address of the instruction following the call.
1266  Address cmp_instruction_address =
1268 
1269  // If the instruction following the call is not a cmp rx, #yyy, nothing
1270  // was inlined.
1271  Instr instr = Assembler::instr_at(cmp_instruction_address);
1272  return Assembler::IsCmpImmediate(instr);
1273 }
1274 
1275 
1277  Address cmp_instruction_address =
1279 
1280  // If the instruction following the call is not a cmp rx, #yyy, nothing
1281  // was inlined.
1282  Instr instr = Assembler::instr_at(cmp_instruction_address);
1283  if (!Assembler::IsCmpImmediate(instr)) {
1284  return;
1285  }
1286 
1287  // The delta to the start of the map check instruction and the
1288  // condition code uses at the patched jump.
1289  int delta = Assembler::GetCmpImmediateRawImmediate(instr);
1290  delta +=
1292  // If the delta is 0 the instruction is cmp r0, #0 which also signals that
1293  // nothing was inlined.
1294  if (delta == 0) {
1295  return;
1296  }
1297 
1298  if (FLAG_trace_ic) {
1299  PrintF("[ patching ic at %p, cmp=%p, delta=%d\n",
1300  address, cmp_instruction_address, delta);
1301  }
1302 
1303  Address patch_address =
1304  cmp_instruction_address - delta * Instruction::kInstrSize;
1305  Instr instr_at_patch = Assembler::instr_at(patch_address);
1306  Instr branch_instr =
1307  Assembler::instr_at(patch_address + Instruction::kInstrSize);
1308  // This is patching a conditional "jump if not smi/jump if smi" site.
1309  // Enabling by changing from
1310  // cmp rx, rx
1311  // b eq/ne, <target>
1312  // to
1313  // tst rx, #kSmiTagMask
1314  // b ne/eq, <target>
1315  // and vice-versa to be disabled again.
1316  CodePatcher patcher(patch_address, 2);
1317  Register reg = Assembler::GetRn(instr_at_patch);
1318  if (check == ENABLE_INLINED_SMI_CHECK) {
1319  ASSERT(Assembler::IsCmpRegister(instr_at_patch));
1320  ASSERT_EQ(Assembler::GetRn(instr_at_patch).code(),
1321  Assembler::GetRm(instr_at_patch).code());
1322  patcher.masm()->tst(reg, Operand(kSmiTagMask));
1323  } else {
1325  ASSERT(Assembler::IsTstImmediate(instr_at_patch));
1326  patcher.masm()->cmp(reg, reg);
1327  }
1328  ASSERT(Assembler::IsBranch(branch_instr));
1329  if (Assembler::GetCondition(branch_instr) == eq) {
1330  patcher.EmitCondition(ne);
1331  } else {
1332  ASSERT(Assembler::GetCondition(branch_instr) == ne);
1333  patcher.EmitCondition(eq);
1334  }
1335 }
1336 
1337 
1338 } } // namespace v8::internal
1339 
1340 #endif // V8_TARGET_ARCH_ARM
byte * Address
Definition: globals.h:186
static void GenerateSloppyArguments(MacroAssembler *masm)
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter NULL
Definition: flags.cc:269
static bool IsBranch(Instr instr)
static const int kHashFieldOffset
Definition: objects.h:8629
static const int kBitFieldOffset
Definition: objects.h:6461
static void GenerateRuntimeGetProperty(MacroAssembler *masm)
const intptr_t kSmiTagMask
Definition: v8.h:5480
static bool IsCmpRegister(Instr instr)
const Register r3
static const int kMapHashShift
Definition: heap.h:2759
void PrintF(const char *format,...)
Definition: v8utils.cc:40
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf map
Definition: flags.cc:350
const LowDwVfpRegister d0
static Smi * FromInt(int value)
Definition: objects-inl.h:1209
static Flags ComputeHandlerFlags(Kind handler_kind, StubType type=NORMAL, InlineCacheHolderFlag holder=OWN_MAP)
Definition: objects-inl.h:4624
static void GenerateMiss(MacroAssembler *masm)
const Register r6
static int GetCmpImmediateRawImmediate(Instr instr)
static const unsigned int kContainsCachedArrayIndexMask
Definition: objects.h:8673
static void GenerateRuntimeSetProperty(MacroAssembler *masm, StrictMode strict_mode)
const uint32_t kIsNotInternalizedMask
Definition: objects.h:603
KeyedStoreCheckMap
Definition: ic.h:572
static const int kHasNamedInterceptor
Definition: objects.h:6470
static const int kIsAccessCheckNeeded
Definition: objects.h:6474
uint32_t Flags
Definition: objects.h:5184
static Register GetRm(Instr instr)
static void GenerateMegamorphic(MacroAssembler *masm)
#define ASSERT(condition)
Definition: checks.h:329
static bool IsCmpImmediate(Instr instr)
const int kPointerSizeLog2
Definition: globals.h:281
static const int kInstanceSizeOffset
Definition: objects.h:6448
Isolate * isolate() const
Definition: ic.h:157
static void GenerateMegamorphic(MacroAssembler *masm)
const Register r2
void PatchInlinedSmiCode(Address address, InlinedSmiCheck check)
static void GenerateGeneric(MacroAssembler *masm, StrictMode strict_mode)
static Condition GetCondition(Instr instr)
static const int kHasIndexedInterceptor
Definition: objects.h:6471
#define UNREACHABLE()
Definition: checks.h:52
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long mode(MIPS only)") DEFINE_string(expose_natives_as
static Condition ComputeCondition(Token::Value op)
STATIC_ASSERT(sizeof(CPURegister)==sizeof(Register))
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long expose natives in global object expose freeBuffer extension expose gc extension under the specified name expose externalize string extension number of stack frames to capture disable builtin natives files print name of functions for which code is generated use random jit cookie to mask large constants trace lazy optimization use adaptive optimizations always try to OSR functions trace optimize function deoptimization minimum length for automatic enable preparsing maximum number of optimization attempts before giving up cache prototype transitions trace debugging JSON request response trace out of bounds accesses to external arrays trace_js_array_abuse automatically set the debug break flag when debugger commands are in the queue abort by crashing maximum length of function source code printed in a stack trace max size of the new max size of the old max size of executable always perform global GCs print one trace line following each garbage collection do not print trace line after scavenger collection print statistics of the maximum memory committed for the heap in only print modified registers Don t break for ASM_UNIMPLEMENTED_BREAK macros print stack trace when an illegal exception is thrown randomize hashes to avoid predictable hash Fixed seed to use to hash property Print the time it takes to deserialize the snapshot testing_bool_flag testing_int_flag string flag tmp file in which to serialize heap Print the time it takes to lazily compile hydrogen code stubs concurrent_recompilation concurrent_sweeping Print usage including flags
Definition: flags.cc:665
const uint32_t kHoleNanUpper32
Definition: v8globals.h:454
const Register ip
const Register r9
const int kPointerSize
Definition: globals.h:268
void check(i::Vector< const uint8_t > string)
static void GenerateGeneric(MacroAssembler *masm)
const int kHeapObjectTag
Definition: v8.h:5473
static void GenerateMiss(MacroAssembler *masm)
const uint32_t kHoleNanLower32
Definition: v8globals.h:455
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra code(assertions) for debugging") DEFINE_bool(code_comments
#define __
static void GenerateRuntimeSetProperty(MacroAssembler *masm, StrictMode strict_mode)
static const int kPropertiesOffset
Definition: objects.h:2755
static const int kInObjectPropertiesOffset
Definition: objects.h:6450
static void GenerateSlow(MacroAssembler *masm)
const Register r0
static const int kElementsOffset
Definition: objects.h:2756
static Register GetRn(Instr instr)
const uint32_t kInternalizedTag
Definition: objects.h:605
static void GenerateRuntimeGetProperty(MacroAssembler *masm)
static bool IsTstImmediate(Instr instr)
static const int kLengthOffset
Definition: objects.h:10076
static void GenerateSloppyArguments(MacroAssembler *masm)
static const int kHeaderSize
Definition: objects.h:3016
static void GenerateSlow(MacroAssembler *masm)
static const int kMapOffset
Definition: objects.h:1890
static const int kIsObserved
Definition: objects.h:6473
const Register r1
static const int kLengthOffset
Definition: objects.h:3015
static const int kSlowCaseBitFieldMask
Definition: ic.h:432
KeyedStoreIncrementLength
Definition: ic.h:578
InlinedSmiCheck
Definition: ic.h:920
MemOperand FieldMemOperand(Register object, int offset)
static void GenerateString(MacroAssembler *masm)
static Register GetCmpImmediateRegister(Instr instr)
static void GenerateMapChangeElementsTransition(MacroAssembler *masm, AllocationSiteMode mode, Label *allocation_memento_found)
const int kSmiTagSize
Definition: v8.h:5479
static AllocationSiteMode GetMode(ElementsKind boilerplate_elements_kind)
Definition: objects-inl.h:1477
#define ASSERT_EQ(v1, v2)
Definition: checks.h:330
static void GenerateSmiToDouble(MacroAssembler *masm, AllocationSiteMode mode, Label *fail)
static Address return_address_from_call_start(Address pc)
static void GenerateNormal(MacroAssembler *masm)
static void GenerateIndexedInterceptor(MacroAssembler *masm)
static const int kHashShift
Definition: objects.h:8642
static const int kCapacityMask
Definition: heap.h:2758
static void GenerateMiss(MacroAssembler *masm)
static void GeneratePositiveLookup(MacroAssembler *masm, Label *miss, Label *done, Register elements, Register name, Register r0, Register r1)
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long expose natives in global object expose freeBuffer extension expose gc extension under the specified name expose externalize string extension number of stack frames to capture disable builtin natives files print name of functions for which code is generated use random jit cookie to mask large constants trace lazy optimization use adaptive optimizations always try to OSR functions trace optimize function deoptimization minimum length for automatic enable preparsing maximum number of optimization attempts before giving up cache prototype transitions trace debugging JSON request response trace out of bounds accesses to external arrays trace_js_array_abuse automatically set the debug break flag when debugger commands are in the queue abort by crashing maximum length of function source code printed in a stack trace max size of the new max size of the old max size of executable always perform global GCs print one trace line following each garbage collection do not print trace line after scavenger collection print statistics of the maximum memory committed for the heap in name
Definition: flags.cc:505
static void GenerateDoubleToObject(MacroAssembler *masm, AllocationSiteMode mode, Label *fail)
static const int kHashMask
Definition: heap.h:2760
const Register r5
static const int kInstanceTypeOffset
Definition: objects.h:6459
static const int kEntriesPerBucket
Definition: heap.h:2761
static void GenerateNormal(MacroAssembler *masm)
static void GenerateMiss(MacroAssembler *masm)
const Register r4