v8  3.25.30(node0.11.13)
V8 is Google's open source JavaScript engine
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
ic-mips.cc
Go to the documentation of this file.
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 
28 
29 
30 #include "v8.h"
31 
32 #if V8_TARGET_ARCH_MIPS
33 
34 #include "codegen.h"
35 #include "code-stubs.h"
36 #include "ic-inl.h"
37 #include "runtime.h"
38 #include "stub-cache.h"
39 
40 namespace v8 {
41 namespace internal {
42 
43 
44 // ----------------------------------------------------------------------------
45 // Static IC stub generators.
46 //
47 
48 #define __ ACCESS_MASM(masm)
49 
50 
51 static void GenerateGlobalInstanceTypeCheck(MacroAssembler* masm,
52  Register type,
53  Label* global_object) {
54  // Register usage:
55  // type: holds the receiver instance type on entry.
56  __ Branch(global_object, eq, type, Operand(JS_GLOBAL_OBJECT_TYPE));
57  __ Branch(global_object, eq, type, Operand(JS_BUILTINS_OBJECT_TYPE));
58  __ Branch(global_object, eq, type, Operand(JS_GLOBAL_PROXY_TYPE));
59 }
60 
61 
62 // Generated code falls through if the receiver is a regular non-global
63 // JS object with slow properties and no interceptors.
64 static void GenerateNameDictionaryReceiverCheck(MacroAssembler* masm,
65  Register receiver,
66  Register elements,
67  Register scratch0,
68  Register scratch1,
69  Label* miss) {
70  // Register usage:
71  // receiver: holds the receiver on entry and is unchanged.
72  // elements: holds the property dictionary on fall through.
73  // Scratch registers:
74  // scratch0: used to holds the receiver map.
75  // scratch1: used to holds the receiver instance type, receiver bit mask
76  // and elements map.
77 
78  // Check that the receiver isn't a smi.
79  __ JumpIfSmi(receiver, miss);
80 
81  // Check that the receiver is a valid JS object.
82  __ GetObjectType(receiver, scratch0, scratch1);
83  __ Branch(miss, lt, scratch1, Operand(FIRST_SPEC_OBJECT_TYPE));
84 
85  // If this assert fails, we have to check upper bound too.
87 
88  GenerateGlobalInstanceTypeCheck(masm, scratch1, miss);
89 
90  // Check that the global object does not require access checks.
91  __ lbu(scratch1, FieldMemOperand(scratch0, Map::kBitFieldOffset));
92  __ And(scratch1, scratch1, Operand((1 << Map::kIsAccessCheckNeeded) |
94  __ Branch(miss, ne, scratch1, Operand(zero_reg));
95 
96  __ lw(elements, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
97  __ lw(scratch1, FieldMemOperand(elements, HeapObject::kMapOffset));
98  __ LoadRoot(scratch0, Heap::kHashTableMapRootIndex);
99  __ Branch(miss, ne, scratch1, Operand(scratch0));
100 }
101 
102 
103 // Helper function used from LoadIC GenerateNormal.
104 //
105 // elements: Property dictionary. It is not clobbered if a jump to the miss
106 // label is done.
107 // name: Property name. It is not clobbered if a jump to the miss label is
108 // done
109 // result: Register for the result. It is only updated if a jump to the miss
110 // label is not done. Can be the same as elements or name clobbering
111 // one of these in the case of not jumping to the miss label.
112 // The two scratch registers need to be different from elements, name and
113 // result.
114 // The generated code assumes that the receiver has slow properties,
115 // is not a global object and does not have interceptors.
116 // The address returned from GenerateStringDictionaryProbes() in scratch2
117 // is used.
118 static void GenerateDictionaryLoad(MacroAssembler* masm,
119  Label* miss,
120  Register elements,
121  Register name,
122  Register result,
123  Register scratch1,
124  Register scratch2) {
125  // Main use of the scratch registers.
126  // scratch1: Used as temporary and to hold the capacity of the property
127  // dictionary.
128  // scratch2: Used as temporary.
129  Label done;
130 
131  // Probe the dictionary.
133  miss,
134  &done,
135  elements,
136  name,
137  scratch1,
138  scratch2);
139 
140  // If probing finds an entry check that the value is a normal
141  // property.
142  __ bind(&done); // scratch2 == elements + 4 * index.
143  const int kElementsStartOffset = NameDictionary::kHeaderSize +
145  const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
146  __ lw(scratch1, FieldMemOperand(scratch2, kDetailsOffset));
147  __ And(at,
148  scratch1,
149  Operand(PropertyDetails::TypeField::kMask << kSmiTagSize));
150  __ Branch(miss, ne, at, Operand(zero_reg));
151 
152  // Get the value at the masked, scaled index and return.
153  __ lw(result,
154  FieldMemOperand(scratch2, kElementsStartOffset + 1 * kPointerSize));
155 }
156 
157 
158 // Helper function used from StoreIC::GenerateNormal.
159 //
160 // elements: Property dictionary. It is not clobbered if a jump to the miss
161 // label is done.
162 // name: Property name. It is not clobbered if a jump to the miss label is
163 // done
164 // value: The value to store.
165 // The two scratch registers need to be different from elements, name and
166 // result.
167 // The generated code assumes that the receiver has slow properties,
168 // is not a global object and does not have interceptors.
169 // The address returned from GenerateStringDictionaryProbes() in scratch2
170 // is used.
171 static void GenerateDictionaryStore(MacroAssembler* masm,
172  Label* miss,
173  Register elements,
174  Register name,
175  Register value,
176  Register scratch1,
177  Register scratch2) {
178  // Main use of the scratch registers.
179  // scratch1: Used as temporary and to hold the capacity of the property
180  // dictionary.
181  // scratch2: Used as temporary.
182  Label done;
183 
184  // Probe the dictionary.
186  miss,
187  &done,
188  elements,
189  name,
190  scratch1,
191  scratch2);
192 
193  // If probing finds an entry in the dictionary check that the value
194  // is a normal property that is not read only.
195  __ bind(&done); // scratch2 == elements + 4 * index.
196  const int kElementsStartOffset = NameDictionary::kHeaderSize +
198  const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
199  const int kTypeAndReadOnlyMask =
200  (PropertyDetails::TypeField::kMask |
201  PropertyDetails::AttributesField::encode(READ_ONLY)) << kSmiTagSize;
202  __ lw(scratch1, FieldMemOperand(scratch2, kDetailsOffset));
203  __ And(at, scratch1, Operand(kTypeAndReadOnlyMask));
204  __ Branch(miss, ne, at, Operand(zero_reg));
205 
206  // Store the value at the masked, scaled index and return.
207  const int kValueOffset = kElementsStartOffset + kPointerSize;
208  __ Addu(scratch2, scratch2, Operand(kValueOffset - kHeapObjectTag));
209  __ sw(value, MemOperand(scratch2));
210 
211  // Update the write barrier. Make sure not to clobber the value.
212  __ mov(scratch1, value);
213  __ RecordWrite(
214  elements, scratch2, scratch1, kRAHasNotBeenSaved, kDontSaveFPRegs);
215 }
216 
217 
218 // Checks the receiver for special cases (value type, slow case bits).
219 // Falls through for regular JS object.
220 static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
221  Register receiver,
222  Register map,
223  Register scratch,
224  int interceptor_bit,
225  Label* slow) {
226  // Check that the object isn't a smi.
227  __ JumpIfSmi(receiver, slow);
228  // Get the map of the receiver.
229  __ lw(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
230  // Check bit field.
231  __ lbu(scratch, FieldMemOperand(map, Map::kBitFieldOffset));
232  __ And(at, scratch,
233  Operand((1 << Map::kIsAccessCheckNeeded) | (1 << interceptor_bit)));
234  __ Branch(slow, ne, at, Operand(zero_reg));
235  // Check that the object is some kind of JS object EXCEPT JS Value type.
236  // In the case that the object is a value-wrapper object,
237  // we enter the runtime system to make sure that indexing into string
238  // objects work as intended.
240  __ lbu(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
241  __ Branch(slow, lt, scratch, Operand(JS_OBJECT_TYPE));
242 }
243 
244 
245 // Loads an indexed element from a fast case array.
246 // If not_fast_array is NULL, doesn't perform the elements map check.
247 static void GenerateFastArrayLoad(MacroAssembler* masm,
248  Register receiver,
249  Register key,
250  Register elements,
251  Register scratch1,
252  Register scratch2,
253  Register result,
254  Label* not_fast_array,
255  Label* out_of_range) {
256  // Register use:
257  //
258  // receiver - holds the receiver on entry.
259  // Unchanged unless 'result' is the same register.
260  //
261  // key - holds the smi key on entry.
262  // Unchanged unless 'result' is the same register.
263  //
264  // elements - holds the elements of the receiver on exit.
265  //
266  // result - holds the result on exit if the load succeeded.
267  // Allowed to be the the same as 'receiver' or 'key'.
268  // Unchanged on bailout so 'receiver' and 'key' can be safely
269  // used by further computation.
270  //
271  // Scratch registers:
272  //
273  // scratch1 - used to hold elements map and elements length.
274  // Holds the elements map if not_fast_array branch is taken.
275  //
276  // scratch2 - used to hold the loaded value.
277 
278  __ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
279  if (not_fast_array != NULL) {
280  // Check that the object is in fast mode (not dictionary).
281  __ lw(scratch1, FieldMemOperand(elements, HeapObject::kMapOffset));
282  __ LoadRoot(at, Heap::kFixedArrayMapRootIndex);
283  __ Branch(not_fast_array, ne, scratch1, Operand(at));
284  } else {
285  __ AssertFastElements(elements);
286  }
287 
288  // Check that the key (index) is within bounds.
289  __ lw(scratch1, FieldMemOperand(elements, FixedArray::kLengthOffset));
290  __ Branch(out_of_range, hs, key, Operand(scratch1));
291 
292  // Fast case: Do the load.
293  __ Addu(scratch1, elements,
295  // The key is a smi.
297  __ sll(at, key, kPointerSizeLog2 - kSmiTagSize);
298  __ addu(at, at, scratch1);
299  __ lw(scratch2, MemOperand(at));
300 
301  __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
302  // In case the loaded value is the_hole we have to consult GetProperty
303  // to ensure the prototype chain is searched.
304  __ Branch(out_of_range, eq, scratch2, Operand(at));
305  __ mov(result, scratch2);
306 }
307 
308 
309 // Checks whether a key is an array index string or a unique name.
310 // Falls through if a key is a unique name.
311 static void GenerateKeyNameCheck(MacroAssembler* masm,
312  Register key,
313  Register map,
314  Register hash,
315  Label* index_string,
316  Label* not_unique) {
317  // The key is not a smi.
318  Label unique;
319  // Is it a name?
320  __ GetObjectType(key, map, hash);
321  __ Branch(not_unique, hi, hash, Operand(LAST_UNIQUE_NAME_TYPE));
323  __ Branch(&unique, eq, hash, Operand(LAST_UNIQUE_NAME_TYPE));
324 
325  // Is the string an array index, with cached numeric value?
326  __ lw(hash, FieldMemOperand(key, Name::kHashFieldOffset));
327  __ And(at, hash, Operand(Name::kContainsCachedArrayIndexMask));
328  __ Branch(index_string, eq, at, Operand(zero_reg));
329 
330  // Is the string internalized? We know it's a string, so a single
331  // bit test is enough.
332  // map: key map
335  __ And(at, hash, Operand(kIsNotInternalizedMask));
336  __ Branch(not_unique, ne, at, Operand(zero_reg));
337 
338  __ bind(&unique);
339 }
340 
341 
342 void LoadIC::GenerateMegamorphic(MacroAssembler* masm) {
343  // ----------- S t a t e -------------
344  // -- a2 : name
345  // -- ra : return address
346  // -- a0 : receiver
347  // -----------------------------------
348 
349  // Probe the stub cache.
351  masm->isolate()->stub_cache()->GenerateProbe(
352  masm, flags, a0, a2, a3, t0, t1, t2);
353 
354  // Cache miss: Jump to runtime.
355  GenerateMiss(masm);
356 }
357 
358 
359 void LoadIC::GenerateNormal(MacroAssembler* masm) {
360  // ----------- S t a t e -------------
361  // -- a2 : name
362  // -- lr : return address
363  // -- a0 : receiver
364  // -----------------------------------
365  Label miss;
366 
367  GenerateNameDictionaryReceiverCheck(masm, a0, a1, a3, t0, &miss);
368 
369  // a1: elements
370  GenerateDictionaryLoad(masm, &miss, a1, a2, v0, a3, t0);
371  __ Ret();
372 
373  // Cache miss: Jump to runtime.
374  __ bind(&miss);
375  GenerateMiss(masm);
376 }
377 
378 
379 void LoadIC::GenerateMiss(MacroAssembler* masm) {
380  // ----------- S t a t e -------------
381  // -- a2 : name
382  // -- ra : return address
383  // -- a0 : receiver
384  // -----------------------------------
385  Isolate* isolate = masm->isolate();
386 
387  __ IncrementCounter(isolate->counters()->keyed_load_miss(), 1, a3, t0);
388 
389  __ mov(a3, a0);
390  __ Push(a3, a2);
391 
392  // Perform tail call to the entry.
393  ExternalReference ref = ExternalReference(IC_Utility(kLoadIC_Miss), isolate);
394  __ TailCallExternalReference(ref, 2, 1);
395 }
396 
397 
398 void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
399  // ---------- S t a t e --------------
400  // -- a2 : name
401  // -- ra : return address
402  // -- a0 : receiver
403  // -----------------------------------
404 
405  __ mov(a3, a0);
406  __ Push(a3, a2);
407 
408  __ TailCallRuntime(Runtime::kGetProperty, 2, 1);
409 }
410 
411 
412 static MemOperand GenerateMappedArgumentsLookup(MacroAssembler* masm,
413  Register object,
414  Register key,
415  Register scratch1,
416  Register scratch2,
417  Register scratch3,
418  Label* unmapped_case,
419  Label* slow_case) {
420  Heap* heap = masm->isolate()->heap();
421 
422  // Check that the receiver is a JSObject. Because of the map check
423  // later, we do not need to check for interceptors or whether it
424  // requires access checks.
425  __ JumpIfSmi(object, slow_case);
426  // Check that the object is some kind of JSObject.
427  __ GetObjectType(object, scratch1, scratch2);
428  __ Branch(slow_case, lt, scratch2, Operand(FIRST_JS_RECEIVER_TYPE));
429 
430  // Check that the key is a positive smi.
431  __ And(scratch1, key, Operand(0x80000001));
432  __ Branch(slow_case, ne, scratch1, Operand(zero_reg));
433 
434  // Load the elements into scratch1 and check its map.
435  Handle<Map> arguments_map(heap->sloppy_arguments_elements_map());
436  __ lw(scratch1, FieldMemOperand(object, JSObject::kElementsOffset));
437  __ CheckMap(scratch1,
438  scratch2,
439  arguments_map,
440  slow_case,
442  // Check if element is in the range of mapped arguments. If not, jump
443  // to the unmapped lookup with the parameter map in scratch1.
444  __ lw(scratch2, FieldMemOperand(scratch1, FixedArray::kLengthOffset));
445  __ Subu(scratch2, scratch2, Operand(Smi::FromInt(2)));
446  __ Branch(unmapped_case, Ugreater_equal, key, Operand(scratch2));
447 
448  // Load element index and check whether it is the hole.
449  const int kOffset =
450  FixedArray::kHeaderSize + 2 * kPointerSize - kHeapObjectTag;
451 
452  __ li(scratch3, Operand(kPointerSize >> 1));
453  __ Mul(scratch3, key, scratch3);
454  __ Addu(scratch3, scratch3, Operand(kOffset));
455 
456  __ Addu(scratch2, scratch1, scratch3);
457  __ lw(scratch2, MemOperand(scratch2));
458  __ LoadRoot(scratch3, Heap::kTheHoleValueRootIndex);
459  __ Branch(unmapped_case, eq, scratch2, Operand(scratch3));
460 
461  // Load value from context and return it. We can reuse scratch1 because
462  // we do not jump to the unmapped lookup (which requires the parameter
463  // map in scratch1).
464  __ lw(scratch1, FieldMemOperand(scratch1, FixedArray::kHeaderSize));
465  __ li(scratch3, Operand(kPointerSize >> 1));
466  __ Mul(scratch3, scratch2, scratch3);
467  __ Addu(scratch3, scratch3, Operand(Context::kHeaderSize - kHeapObjectTag));
468  __ Addu(scratch2, scratch1, scratch3);
469  return MemOperand(scratch2);
470 }
471 
472 
473 static MemOperand GenerateUnmappedArgumentsLookup(MacroAssembler* masm,
474  Register key,
475  Register parameter_map,
476  Register scratch,
477  Label* slow_case) {
478  // Element is in arguments backing store, which is referenced by the
479  // second element of the parameter_map. The parameter_map register
480  // must be loaded with the parameter map of the arguments object and is
481  // overwritten.
482  const int kBackingStoreOffset = FixedArray::kHeaderSize + kPointerSize;
483  Register backing_store = parameter_map;
484  __ lw(backing_store, FieldMemOperand(parameter_map, kBackingStoreOffset));
485  __ CheckMap(backing_store,
486  scratch,
487  Heap::kFixedArrayMapRootIndex,
488  slow_case,
490  __ lw(scratch, FieldMemOperand(backing_store, FixedArray::kLengthOffset));
491  __ Branch(slow_case, Ugreater_equal, key, Operand(scratch));
492  __ li(scratch, Operand(kPointerSize >> 1));
493  __ Mul(scratch, key, scratch);
494  __ Addu(scratch,
495  scratch,
496  Operand(FixedArray::kHeaderSize - kHeapObjectTag));
497  __ Addu(scratch, backing_store, scratch);
498  return MemOperand(scratch);
499 }
500 
501 
502 void KeyedLoadIC::GenerateSloppyArguments(MacroAssembler* masm) {
503  // ---------- S t a t e --------------
504  // -- lr : return address
505  // -- a0 : key
506  // -- a1 : receiver
507  // -----------------------------------
508  Label slow, notin;
509  MemOperand mapped_location =
510  GenerateMappedArgumentsLookup(masm, a1, a0, a2, a3, t0, &notin, &slow);
511  __ Ret(USE_DELAY_SLOT);
512  __ lw(v0, mapped_location);
513  __ bind(&notin);
514  // The unmapped lookup expects that the parameter map is in a2.
515  MemOperand unmapped_location =
516  GenerateUnmappedArgumentsLookup(masm, a0, a2, a3, &slow);
517  __ lw(a2, unmapped_location);
518  __ LoadRoot(a3, Heap::kTheHoleValueRootIndex);
519  __ Branch(&slow, eq, a2, Operand(a3));
520  __ Ret(USE_DELAY_SLOT);
521  __ mov(v0, a2);
522  __ bind(&slow);
523  GenerateMiss(masm);
524 }
525 
526 
527 void KeyedStoreIC::GenerateSloppyArguments(MacroAssembler* masm) {
528  // ---------- S t a t e --------------
529  // -- a0 : value
530  // -- a1 : key
531  // -- a2 : receiver
532  // -- lr : return address
533  // -----------------------------------
534  Label slow, notin;
535  // Store address is returned in register (of MemOperand) mapped_location.
536  MemOperand mapped_location =
537  GenerateMappedArgumentsLookup(masm, a2, a1, a3, t0, t1, &notin, &slow);
538  __ sw(a0, mapped_location);
539  __ mov(t5, a0);
540  ASSERT_EQ(mapped_location.offset(), 0);
541  __ RecordWrite(a3, mapped_location.rm(), t5,
543  __ Ret(USE_DELAY_SLOT);
544  __ mov(v0, a0); // (In delay slot) return the value stored in v0.
545  __ bind(&notin);
546  // The unmapped lookup expects that the parameter map is in a3.
547  // Store address is returned in register (of MemOperand) unmapped_location.
548  MemOperand unmapped_location =
549  GenerateUnmappedArgumentsLookup(masm, a1, a3, t0, &slow);
550  __ sw(a0, unmapped_location);
551  __ mov(t5, a0);
552  ASSERT_EQ(unmapped_location.offset(), 0);
553  __ RecordWrite(a3, unmapped_location.rm(), t5,
555  __ Ret(USE_DELAY_SLOT);
556  __ mov(v0, a0); // (In delay slot) return the value stored in v0.
557  __ bind(&slow);
558  GenerateMiss(masm);
559 }
560 
561 
562 void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
563  // ---------- S t a t e --------------
564  // -- ra : return address
565  // -- a0 : key
566  // -- a1 : receiver
567  // -----------------------------------
568  Isolate* isolate = masm->isolate();
569 
570  __ IncrementCounter(isolate->counters()->keyed_load_miss(), 1, a3, t0);
571 
572  __ Push(a1, a0);
573 
574  // Perform tail call to the entry.
575  ExternalReference ref =
576  ExternalReference(IC_Utility(kKeyedLoadIC_Miss), isolate);
577 
578  __ TailCallExternalReference(ref, 2, 1);
579 }
580 
581 
582 void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
583  // ---------- S t a t e --------------
584  // -- ra : return address
585  // -- a0 : key
586  // -- a1 : receiver
587  // -----------------------------------
588 
589  __ Push(a1, a0);
590 
591  __ TailCallRuntime(Runtime::kKeyedGetProperty, 2, 1);
592 }
593 
594 
595 void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
596  // ---------- S t a t e --------------
597  // -- ra : return address
598  // -- a0 : key
599  // -- a1 : receiver
600  // -----------------------------------
601  Label slow, check_name, index_smi, index_name, property_array_property;
602  Label probe_dictionary, check_number_dictionary;
603 
604  Register key = a0;
605  Register receiver = a1;
606 
607  Isolate* isolate = masm->isolate();
608 
609  // Check that the key is a smi.
610  __ JumpIfNotSmi(key, &check_name);
611  __ bind(&index_smi);
612  // Now the key is known to be a smi. This place is also jumped to from below
613  // where a numeric string is converted to a smi.
614 
615  GenerateKeyedLoadReceiverCheck(
616  masm, receiver, a2, a3, Map::kHasIndexedInterceptor, &slow);
617 
618  // Check the receiver's map to see if it has fast elements.
619  __ CheckFastElements(a2, a3, &check_number_dictionary);
620 
621  GenerateFastArrayLoad(
622  masm, receiver, key, t0, a3, a2, v0, NULL, &slow);
623 
624  __ IncrementCounter(isolate->counters()->keyed_load_generic_smi(), 1, a2, a3);
625  __ Ret();
626 
627  __ bind(&check_number_dictionary);
628  __ lw(t0, FieldMemOperand(receiver, JSObject::kElementsOffset));
630 
631  // Check whether the elements is a number dictionary.
632  // a0: key
633  // a3: elements map
634  // t0: elements
635  __ LoadRoot(at, Heap::kHashTableMapRootIndex);
636  __ Branch(&slow, ne, a3, Operand(at));
637  __ sra(a2, a0, kSmiTagSize);
638  __ LoadFromNumberDictionary(&slow, t0, a0, v0, a2, a3, t1);
639  __ Ret();
640 
641  // Slow case, key and receiver still in a0 and a1.
642  __ bind(&slow);
643  __ IncrementCounter(isolate->counters()->keyed_load_generic_slow(),
644  1,
645  a2,
646  a3);
648 
649  __ bind(&check_name);
650  GenerateKeyNameCheck(masm, key, a2, a3, &index_name, &slow);
651 
652  GenerateKeyedLoadReceiverCheck(
653  masm, receiver, a2, a3, Map::kHasNamedInterceptor, &slow);
654 
655 
656  // If the receiver is a fast-case object, check the keyed lookup
657  // cache. Otherwise probe the dictionary.
660  __ LoadRoot(at, Heap::kHashTableMapRootIndex);
661  __ Branch(&probe_dictionary, eq, t0, Operand(at));
662 
663  // Load the map of the receiver, compute the keyed lookup cache hash
664  // based on 32 bits of the map pointer and the name hash.
666  __ sra(a3, a2, KeyedLookupCache::kMapHashShift);
668  __ sra(at, t0, Name::kHashShift);
669  __ xor_(a3, a3, at);
671  __ And(a3, a3, Operand(mask));
672 
673  // Load the key (consisting of map and unique name) from the cache and
674  // check for match.
675  Label load_in_object_property;
676  static const int kEntriesPerBucket = KeyedLookupCache::kEntriesPerBucket;
677  Label hit_on_nth_entry[kEntriesPerBucket];
678  ExternalReference cache_keys =
679  ExternalReference::keyed_lookup_cache_keys(isolate);
680  __ li(t0, Operand(cache_keys));
681  __ sll(at, a3, kPointerSizeLog2 + 1);
682  __ addu(t0, t0, at);
683 
684  for (int i = 0; i < kEntriesPerBucket - 1; i++) {
685  Label try_next_entry;
686  __ lw(t1, MemOperand(t0, kPointerSize * i * 2));
687  __ Branch(&try_next_entry, ne, a2, Operand(t1));
688  __ lw(t1, MemOperand(t0, kPointerSize * (i * 2 + 1)));
689  __ Branch(&hit_on_nth_entry[i], eq, a0, Operand(t1));
690  __ bind(&try_next_entry);
691  }
692 
693  __ lw(t1, MemOperand(t0, kPointerSize * (kEntriesPerBucket - 1) * 2));
694  __ Branch(&slow, ne, a2, Operand(t1));
695  __ lw(t1, MemOperand(t0, kPointerSize * ((kEntriesPerBucket - 1) * 2 + 1)));
696  __ Branch(&slow, ne, a0, Operand(t1));
697 
698  // Get field offset.
699  // a0 : key
700  // a1 : receiver
701  // a2 : receiver's map
702  // a3 : lookup cache index
703  ExternalReference cache_field_offsets =
704  ExternalReference::keyed_lookup_cache_field_offsets(isolate);
705 
706  // Hit on nth entry.
707  for (int i = kEntriesPerBucket - 1; i >= 0; i--) {
708  __ bind(&hit_on_nth_entry[i]);
709  __ li(t0, Operand(cache_field_offsets));
710  __ sll(at, a3, kPointerSizeLog2);
711  __ addu(at, t0, at);
712  __ lw(t1, MemOperand(at, kPointerSize * i));
714  __ Subu(t1, t1, t2);
715  __ Branch(&property_array_property, ge, t1, Operand(zero_reg));
716  if (i != 0) {
717  __ Branch(&load_in_object_property);
718  }
719  }
720 
721  // Load in-object property.
722  __ bind(&load_in_object_property);
724  __ addu(t2, t2, t1); // Index from start of object.
725  __ Subu(a1, a1, Operand(kHeapObjectTag)); // Remove the heap tag.
726  __ sll(at, t2, kPointerSizeLog2);
727  __ addu(at, a1, at);
728  __ lw(v0, MemOperand(at));
729  __ IncrementCounter(isolate->counters()->keyed_load_generic_lookup_cache(),
730  1,
731  a2,
732  a3);
733  __ Ret();
734 
735  // Load property array property.
736  __ bind(&property_array_property);
738  __ Addu(a1, a1, FixedArray::kHeaderSize - kHeapObjectTag);
739  __ sll(t0, t1, kPointerSizeLog2);
740  __ Addu(t0, t0, a1);
741  __ lw(v0, MemOperand(t0));
742  __ IncrementCounter(isolate->counters()->keyed_load_generic_lookup_cache(),
743  1,
744  a2,
745  a3);
746  __ Ret();
747 
748 
749  // Do a quick inline probe of the receiver's dictionary, if it
750  // exists.
751  __ bind(&probe_dictionary);
752  // a1: receiver
753  // a0: key
754  // a3: elements
757  GenerateGlobalInstanceTypeCheck(masm, a2, &slow);
758  // Load the property to v0.
759  GenerateDictionaryLoad(masm, &slow, a3, a0, v0, a2, t0);
760  __ IncrementCounter(isolate->counters()->keyed_load_generic_symbol(),
761  1,
762  a2,
763  a3);
764  __ Ret();
765 
766  __ bind(&index_name);
767  __ IndexFromHash(a3, key);
768  // Now jump to the place where smi keys are handled.
769  __ Branch(&index_smi);
770 }
771 
772 
773 void KeyedLoadIC::GenerateString(MacroAssembler* masm) {
774  // ---------- S t a t e --------------
775  // -- ra : return address
776  // -- a0 : key (index)
777  // -- a1 : receiver
778  // -----------------------------------
779  Label miss;
780 
781  Register receiver = a1;
782  Register index = a0;
783  Register scratch = a3;
784  Register result = v0;
785 
786  StringCharAtGenerator char_at_generator(receiver,
787  index,
788  scratch,
789  result,
790  &miss, // When not a string.
791  &miss, // When not a number.
792  &miss, // When index out of range.
794  char_at_generator.GenerateFast(masm);
795  __ Ret();
796 
797  StubRuntimeCallHelper call_helper;
798  char_at_generator.GenerateSlow(masm, call_helper);
799 
800  __ bind(&miss);
801  GenerateMiss(masm);
802 }
803 
804 
805 void KeyedStoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm,
806  StrictMode strict_mode) {
807  // ---------- S t a t e --------------
808  // -- a0 : value
809  // -- a1 : key
810  // -- a2 : receiver
811  // -- ra : return address
812  // -----------------------------------
813 
814  // Push receiver, key and value for runtime call.
815  __ Push(a2, a1, a0);
816  __ li(a1, Operand(Smi::FromInt(NONE))); // PropertyAttributes.
817  __ li(a0, Operand(Smi::FromInt(strict_mode))); // Strict mode.
818  __ Push(a1, a0);
819 
820  __ TailCallRuntime(Runtime::kSetProperty, 5, 1);
821 }
822 
823 
824 static void KeyedStoreGenerateGenericHelper(
825  MacroAssembler* masm,
826  Label* fast_object,
827  Label* fast_double,
828  Label* slow,
829  KeyedStoreCheckMap check_map,
830  KeyedStoreIncrementLength increment_length,
831  Register value,
832  Register key,
833  Register receiver,
834  Register receiver_map,
835  Register elements_map,
836  Register elements) {
837  Label transition_smi_elements;
838  Label finish_object_store, non_double_value, transition_double_elements;
839  Label fast_double_without_map_check;
840 
841  // Fast case: Do the store, could be either Object or double.
842  __ bind(fast_object);
843  Register scratch_value = t0;
844  Register address = t1;
845  if (check_map == kCheckMap) {
846  __ lw(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
847  __ Branch(fast_double, ne, elements_map,
848  Operand(masm->isolate()->factory()->fixed_array_map()));
849  }
850 
851  // HOLECHECK: guards "A[i] = V"
852  // We have to go to the runtime if the current value is the hole because
853  // there may be a callback on the element.
854  Label holecheck_passed1;
855  __ Addu(address, elements, FixedArray::kHeaderSize - kHeapObjectTag);
856  __ sll(at, key, kPointerSizeLog2 - kSmiTagSize);
857  __ addu(address, address, at);
858  __ lw(scratch_value, MemOperand(address));
859  __ Branch(&holecheck_passed1, ne, scratch_value,
860  Operand(masm->isolate()->factory()->the_hole_value()));
861  __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, scratch_value,
862  slow);
863 
864  __ bind(&holecheck_passed1);
865 
866  // Smi stores don't require further checks.
867  Label non_smi_value;
868  __ JumpIfNotSmi(value, &non_smi_value);
869 
870  if (increment_length == kIncrementLength) {
871  // Add 1 to receiver->length.
872  __ Addu(scratch_value, key, Operand(Smi::FromInt(1)));
873  __ sw(scratch_value, FieldMemOperand(receiver, JSArray::kLengthOffset));
874  }
875  // It's irrelevant whether array is smi-only or not when writing a smi.
876  __ Addu(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
877  __ sll(scratch_value, key, kPointerSizeLog2 - kSmiTagSize);
878  __ Addu(address, address, scratch_value);
879  __ sw(value, MemOperand(address));
880  __ Ret();
881 
882  __ bind(&non_smi_value);
883  // Escape to elements kind transition case.
884  __ CheckFastObjectElements(receiver_map, scratch_value,
885  &transition_smi_elements);
886 
887  // Fast elements array, store the value to the elements backing store.
888  __ bind(&finish_object_store);
889  if (increment_length == kIncrementLength) {
890  // Add 1 to receiver->length.
891  __ Addu(scratch_value, key, Operand(Smi::FromInt(1)));
892  __ sw(scratch_value, FieldMemOperand(receiver, JSArray::kLengthOffset));
893  }
894  __ Addu(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
895  __ sll(scratch_value, key, kPointerSizeLog2 - kSmiTagSize);
896  __ Addu(address, address, scratch_value);
897  __ sw(value, MemOperand(address));
898  // Update write barrier for the elements array address.
899  __ mov(scratch_value, value); // Preserve the value which is returned.
900  __ RecordWrite(elements,
901  address,
902  scratch_value,
903  kRAHasNotBeenSaved,
907  __ Ret();
908 
909  __ bind(fast_double);
910  if (check_map == kCheckMap) {
911  // Check for fast double array case. If this fails, call through to the
912  // runtime.
913  __ LoadRoot(at, Heap::kFixedDoubleArrayMapRootIndex);
914  __ Branch(slow, ne, elements_map, Operand(at));
915  }
916 
917  // HOLECHECK: guards "A[i] double hole?"
918  // We have to see if the double version of the hole is present. If so
919  // go to the runtime.
920  __ Addu(address, elements,
922  - kHeapObjectTag));
923  __ sll(at, key, kPointerSizeLog2);
924  __ addu(address, address, at);
925  __ lw(scratch_value, MemOperand(address));
926  __ Branch(&fast_double_without_map_check, ne, scratch_value,
927  Operand(kHoleNanUpper32));
928  __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, scratch_value,
929  slow);
930 
931  __ bind(&fast_double_without_map_check);
932  __ StoreNumberToDoubleElements(value,
933  key,
934  elements, // Overwritten.
935  a3, // Scratch regs...
936  t0,
937  t1,
938  &transition_double_elements);
939  if (increment_length == kIncrementLength) {
940  // Add 1 to receiver->length.
941  __ Addu(scratch_value, key, Operand(Smi::FromInt(1)));
942  __ sw(scratch_value, FieldMemOperand(receiver, JSArray::kLengthOffset));
943  }
944  __ Ret();
945 
946  __ bind(&transition_smi_elements);
947  // Transition the array appropriately depending on the value type.
948  __ lw(t0, FieldMemOperand(value, HeapObject::kMapOffset));
949  __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
950  __ Branch(&non_double_value, ne, t0, Operand(at));
951 
952  // Value is a double. Transition FAST_SMI_ELEMENTS ->
953  // FAST_DOUBLE_ELEMENTS and complete the store.
954  __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
956  receiver_map,
957  t0,
958  slow);
959  ASSERT(receiver_map.is(a3)); // Transition code expects map in a3
963  __ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
964  __ jmp(&fast_double_without_map_check);
965 
966  __ bind(&non_double_value);
967  // Value is not a double, FAST_SMI_ELEMENTS -> FAST_ELEMENTS
968  __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
970  receiver_map,
971  t0,
972  slow);
973  ASSERT(receiver_map.is(a3)); // Transition code expects map in a3
976  slow);
977  __ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
978  __ jmp(&finish_object_store);
979 
980  __ bind(&transition_double_elements);
981  // Elements are FAST_DOUBLE_ELEMENTS, but value is an Object that's not a
982  // HeapNumber. Make sure that the receiver is a Array with FAST_ELEMENTS and
983  // transition array from FAST_DOUBLE_ELEMENTS to FAST_ELEMENTS
984  __ LoadTransitionedArrayMapConditional(FAST_DOUBLE_ELEMENTS,
986  receiver_map,
987  t0,
988  slow);
989  ASSERT(receiver_map.is(a3)); // Transition code expects map in a3
992  __ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
993  __ jmp(&finish_object_store);
994 }
995 
996 
997 void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
998  StrictMode strict_mode) {
999  // ---------- S t a t e --------------
1000  // -- a0 : value
1001  // -- a1 : key
1002  // -- a2 : receiver
1003  // -- ra : return address
1004  // -----------------------------------
1005  Label slow, fast_object, fast_object_grow;
1006  Label fast_double, fast_double_grow;
1007  Label array, extra, check_if_double_array;
1008 
1009  // Register usage.
1010  Register value = a0;
1011  Register key = a1;
1012  Register receiver = a2;
1013  Register receiver_map = a3;
1014  Register elements_map = t2;
1015  Register elements = t3; // Elements array of the receiver.
1016  // t0 and t1 are used as general scratch registers.
1017 
1018  // Check that the key is a smi.
1019  __ JumpIfNotSmi(key, &slow);
1020  // Check that the object isn't a smi.
1021  __ JumpIfSmi(receiver, &slow);
1022  // Get the map of the object.
1023  __ lw(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
1024  // Check that the receiver does not require access checks and is not observed.
1025  // The generic stub does not perform map checks or handle observed objects.
1026  __ lbu(t0, FieldMemOperand(receiver_map, Map::kBitFieldOffset));
1027  __ And(t0, t0, Operand(1 << Map::kIsAccessCheckNeeded |
1028  1 << Map::kIsObserved));
1029  __ Branch(&slow, ne, t0, Operand(zero_reg));
1030  // Check if the object is a JS array or not.
1031  __ lbu(t0, FieldMemOperand(receiver_map, Map::kInstanceTypeOffset));
1032  __ Branch(&array, eq, t0, Operand(JS_ARRAY_TYPE));
1033  // Check that the object is some kind of JSObject.
1034  __ Branch(&slow, lt, t0, Operand(FIRST_JS_OBJECT_TYPE));
1035 
1036  // Object case: Check key against length in the elements array.
1037  __ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
1038  // Check array bounds. Both the key and the length of FixedArray are smis.
1039  __ lw(t0, FieldMemOperand(elements, FixedArray::kLengthOffset));
1040  __ Branch(&fast_object, lo, key, Operand(t0));
1041 
1042  // Slow case, handle jump to runtime.
1043  __ bind(&slow);
1044  // Entry registers are intact.
1045  // a0: value.
1046  // a1: key.
1047  // a2: receiver.
1048  GenerateRuntimeSetProperty(masm, strict_mode);
1049 
1050  // Extra capacity case: Check if there is extra capacity to
1051  // perform the store and update the length. Used for adding one
1052  // element to the array by writing to array[array.length].
1053  __ bind(&extra);
1054  // Condition code from comparing key and array length is still available.
1055  // Only support writing to array[array.length].
1056  __ Branch(&slow, ne, key, Operand(t0));
1057  // Check for room in the elements backing store.
1058  // Both the key and the length of FixedArray are smis.
1059  __ lw(t0, FieldMemOperand(elements, FixedArray::kLengthOffset));
1060  __ Branch(&slow, hs, key, Operand(t0));
1061  __ lw(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
1062  __ Branch(
1063  &check_if_double_array, ne, elements_map, Heap::kFixedArrayMapRootIndex);
1064 
1065  __ jmp(&fast_object_grow);
1066 
1067  __ bind(&check_if_double_array);
1068  __ Branch(&slow, ne, elements_map, Heap::kFixedDoubleArrayMapRootIndex);
1069  __ jmp(&fast_double_grow);
1070 
1071  // Array case: Get the length and the elements array from the JS
1072  // array. Check that the array is in fast mode (and writable); if it
1073  // is the length is always a smi.
1074  __ bind(&array);
1075  __ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
1076 
1077  // Check the key against the length in the array.
1078  __ lw(t0, FieldMemOperand(receiver, JSArray::kLengthOffset));
1079  __ Branch(&extra, hs, key, Operand(t0));
1080 
1081  KeyedStoreGenerateGenericHelper(masm, &fast_object, &fast_double,
1083  value, key, receiver, receiver_map,
1084  elements_map, elements);
1085  KeyedStoreGenerateGenericHelper(masm, &fast_object_grow, &fast_double_grow,
1087  value, key, receiver, receiver_map,
1088  elements_map, elements);
1089 }
1090 
1091 
1092 void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) {
1093  // ---------- S t a t e --------------
1094  // -- ra : return address
1095  // -- a0 : key
1096  // -- a1 : receiver
1097  // -----------------------------------
1098  Label slow;
1099 
1100  // Check that the receiver isn't a smi.
1101  __ JumpIfSmi(a1, &slow);
1102 
1103  // Check that the key is an array index, that is Uint32.
1104  __ And(t0, a0, Operand(kSmiTagMask | kSmiSignMask));
1105  __ Branch(&slow, ne, t0, Operand(zero_reg));
1106 
1107  // Get the map of the receiver.
1109 
1110  // Check that it has indexed interceptor and access checks
1111  // are not enabled for this object.
1112  __ lbu(a3, FieldMemOperand(a2, Map::kBitFieldOffset));
1113  __ And(a3, a3, Operand(kSlowCaseBitFieldMask));
1114  __ Branch(&slow, ne, a3, Operand(1 << Map::kHasIndexedInterceptor));
1115  // Everything is fine, call runtime.
1116  __ Push(a1, a0); // Receiver, key.
1117 
1118  // Perform tail call to the entry.
1119  __ TailCallExternalReference(ExternalReference(
1120  IC_Utility(kKeyedLoadPropertyWithInterceptor), masm->isolate()), 2, 1);
1121 
1122  __ bind(&slow);
1123  GenerateMiss(masm);
1124 }
1125 
1126 
1127 void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
1128  // ---------- S t a t e --------------
1129  // -- a0 : value
1130  // -- a1 : key
1131  // -- a2 : receiver
1132  // -- ra : return address
1133  // -----------------------------------
1134 
1135  // Push receiver, key and value for runtime call.
1136  __ Push(a2, a1, a0);
1137 
1138  ExternalReference ref =
1139  ExternalReference(IC_Utility(kKeyedStoreIC_Miss), masm->isolate());
1140  __ TailCallExternalReference(ref, 3, 1);
1141 }
1142 
1143 
1144 void StoreIC::GenerateSlow(MacroAssembler* masm) {
1145  // ---------- S t a t e --------------
1146  // -- a0 : value
1147  // -- a2 : key
1148  // -- a1 : receiver
1149  // -- ra : return address
1150  // -----------------------------------
1151 
1152  // Push receiver, key and value for runtime call.
1153  __ Push(a1, a2, a0);
1154 
1155  // The slow case calls into the runtime to complete the store without causing
1156  // an IC miss that would otherwise cause a transition to the generic stub.
1157  ExternalReference ref =
1158  ExternalReference(IC_Utility(kStoreIC_Slow), masm->isolate());
1159  __ TailCallExternalReference(ref, 3, 1);
1160 }
1161 
1162 
1163 void KeyedStoreIC::GenerateSlow(MacroAssembler* masm) {
1164  // ---------- S t a t e --------------
1165  // -- a0 : value
1166  // -- a1 : key
1167  // -- a2 : receiver
1168  // -- ra : return address
1169  // -----------------------------------
1170 
1171  // Push receiver, key and value for runtime call.
1172  // We can't use MultiPush as the order of the registers is important.
1173  __ Push(a2, a1, a0);
1174 
1175  // The slow case calls into the runtime to complete the store without causing
1176  // an IC miss that would otherwise cause a transition to the generic stub.
1177  ExternalReference ref =
1178  ExternalReference(IC_Utility(kKeyedStoreIC_Slow), masm->isolate());
1179 
1180  __ TailCallExternalReference(ref, 3, 1);
1181 }
1182 
1183 
1184 void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
1185  // ----------- S t a t e -------------
1186  // -- a0 : value
1187  // -- a1 : receiver
1188  // -- a2 : name
1189  // -- ra : return address
1190  // -----------------------------------
1191 
1192  // Get the receiver from the stack and probe the stub cache.
1193  Code::Flags flags = Code::ComputeHandlerFlags(Code::STORE_IC);
1194  masm->isolate()->stub_cache()->GenerateProbe(
1195  masm, flags, a1, a2, a3, t0, t1, t2);
1196 
1197  // Cache miss: Jump to runtime.
1198  GenerateMiss(masm);
1199 }
1200 
1201 
1202 void StoreIC::GenerateMiss(MacroAssembler* masm) {
1203  // ----------- S t a t e -------------
1204  // -- a0 : value
1205  // -- a1 : receiver
1206  // -- a2 : name
1207  // -- ra : return address
1208  // -----------------------------------
1209 
1210  __ Push(a1, a2, a0);
1211  // Perform tail call to the entry.
1212  ExternalReference ref = ExternalReference(IC_Utility(kStoreIC_Miss),
1213  masm->isolate());
1214  __ TailCallExternalReference(ref, 3, 1);
1215 }
1216 
1217 
1218 void StoreIC::GenerateNormal(MacroAssembler* masm) {
1219  // ----------- S t a t e -------------
1220  // -- a0 : value
1221  // -- a1 : receiver
1222  // -- a2 : name
1223  // -- ra : return address
1224  // -----------------------------------
1225  Label miss;
1226 
1227  GenerateNameDictionaryReceiverCheck(masm, a1, a3, t0, t1, &miss);
1228 
1229  GenerateDictionaryStore(masm, &miss, a3, a2, a0, t0, t1);
1230  Counters* counters = masm->isolate()->counters();
1231  __ IncrementCounter(counters->store_normal_hit(), 1, t0, t1);
1232  __ Ret();
1233 
1234  __ bind(&miss);
1235  __ IncrementCounter(counters->store_normal_miss(), 1, t0, t1);
1236  GenerateMiss(masm);
1237 }
1238 
1239 
1240 void StoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm,
1241  StrictMode strict_mode) {
1242  // ----------- S t a t e -------------
1243  // -- a0 : value
1244  // -- a1 : receiver
1245  // -- a2 : name
1246  // -- ra : return address
1247  // -----------------------------------
1248 
1249  __ Push(a1, a2, a0);
1250 
1251  __ li(a1, Operand(Smi::FromInt(NONE))); // PropertyAttributes.
1252  __ li(a0, Operand(Smi::FromInt(strict_mode)));
1253  __ Push(a1, a0);
1254 
1255  // Do tail-call to runtime routine.
1256  __ TailCallRuntime(Runtime::kSetProperty, 5, 1);
1257 }
1258 
1259 
1260 #undef __
1261 
1262 
1264  switch (op) {
1265  case Token::EQ_STRICT:
1266  case Token::EQ:
1267  return eq;
1268  case Token::LT:
1269  return lt;
1270  case Token::GT:
1271  return gt;
1272  case Token::LTE:
1273  return le;
1274  case Token::GTE:
1275  return ge;
1276  default:
1277  UNREACHABLE();
1278  return kNoCondition;
1279  }
1280 }
1281 
1282 
1283 bool CompareIC::HasInlinedSmiCode(Address address) {
1284  // The address of the instruction following the call.
1285  Address andi_instruction_address =
1287 
1288  // If the instruction following the call is not a andi at, rx, #yyy, nothing
1289  // was inlined.
1290  Instr instr = Assembler::instr_at(andi_instruction_address);
1291  return Assembler::IsAndImmediate(instr) &&
1292  Assembler::GetRt(instr) == static_cast<uint32_t>(zero_reg.code());
1293 }
1294 
1295 
1297  Address andi_instruction_address =
1299 
1300  // If the instruction following the call is not a andi at, rx, #yyy, nothing
1301  // was inlined.
1302  Instr instr = Assembler::instr_at(andi_instruction_address);
1303  if (!(Assembler::IsAndImmediate(instr) &&
1304  Assembler::GetRt(instr) == static_cast<uint32_t>(zero_reg.code()))) {
1305  return;
1306  }
1307 
1308  // The delta to the start of the map check instruction and the
1309  // condition code uses at the patched jump.
1310  int delta = Assembler::GetImmediate16(instr);
1311  delta += Assembler::GetRs(instr) * kImm16Mask;
1312  // If the delta is 0 the instruction is andi at, zero_reg, #0 which also
1313  // signals that nothing was inlined.
1314  if (delta == 0) {
1315  return;
1316  }
1317 
1318  if (FLAG_trace_ic) {
1319  PrintF("[ patching ic at %p, andi=%p, delta=%d\n",
1320  address, andi_instruction_address, delta);
1321  }
1322 
1323  Address patch_address =
1324  andi_instruction_address - delta * Instruction::kInstrSize;
1325  Instr instr_at_patch = Assembler::instr_at(patch_address);
1326  Instr branch_instr =
1327  Assembler::instr_at(patch_address + Instruction::kInstrSize);
1328  // This is patching a conditional "jump if not smi/jump if smi" site.
1329  // Enabling by changing from
1330  // andi at, rx, 0
1331  // Branch <target>, eq, at, Operand(zero_reg)
1332  // to:
1333  // andi at, rx, #kSmiTagMask
1334  // Branch <target>, ne, at, Operand(zero_reg)
1335  // and vice-versa to be disabled again.
1336  CodePatcher patcher(patch_address, 2);
1337  Register reg = Register::from_code(Assembler::GetRs(instr_at_patch));
1338  if (check == ENABLE_INLINED_SMI_CHECK) {
1339  ASSERT(Assembler::IsAndImmediate(instr_at_patch));
1340  ASSERT_EQ(0, Assembler::GetImmediate16(instr_at_patch));
1341  patcher.masm()->andi(at, reg, kSmiTagMask);
1342  } else {
1344  ASSERT(Assembler::IsAndImmediate(instr_at_patch));
1345  patcher.masm()->andi(at, reg, 0);
1346  }
1347  ASSERT(Assembler::IsBranch(branch_instr));
1348  if (Assembler::IsBeq(branch_instr)) {
1349  patcher.ChangeBranchCondition(ne);
1350  } else {
1351  ASSERT(Assembler::IsBne(branch_instr));
1352  patcher.ChangeBranchCondition(eq);
1353  }
1354 }
1355 
1356 
1357 } } // namespace v8::internal
1358 
1359 #endif // V8_TARGET_ARCH_MIPS
byte * Address
Definition: globals.h:186
static void GenerateSloppyArguments(MacroAssembler *masm)
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter NULL
Definition: flags.cc:269
static bool IsBranch(Instr instr)
static const int kHashFieldOffset
Definition: objects.h:8629
static const int kBitFieldOffset
Definition: objects.h:6461
static void GenerateRuntimeGetProperty(MacroAssembler *masm)
const intptr_t kSmiTagMask
Definition: v8.h:5480
const intptr_t kSmiSignMask
Definition: v8globals.h:41
static uint32_t GetRt(Instr instr)
static const int kMapHashShift
Definition: heap.h:2759
void PrintF(const char *format,...)
Definition: v8utils.cc:40
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf map
Definition: flags.cc:350
static Smi * FromInt(int value)
Definition: objects-inl.h:1209
static Flags ComputeHandlerFlags(Kind handler_kind, StubType type=NORMAL, InlineCacheHolderFlag holder=OWN_MAP)
Definition: objects-inl.h:4624
static uint32_t GetImmediate16(Instr instr)
static void GenerateMiss(MacroAssembler *masm)
static const unsigned int kContainsCachedArrayIndexMask
Definition: objects.h:8673
static void GenerateRuntimeSetProperty(MacroAssembler *masm, StrictMode strict_mode)
const uint32_t kIsNotInternalizedMask
Definition: objects.h:603
KeyedStoreCheckMap
Definition: ic.h:572
static const int kHasNamedInterceptor
Definition: objects.h:6470
static const int kIsAccessCheckNeeded
Definition: objects.h:6474
uint32_t Flags
Definition: objects.h:5184
static uint32_t GetRs(Instr instr)
static void GenerateMegamorphic(MacroAssembler *masm)
#define ASSERT(condition)
Definition: checks.h:329
const int kPointerSizeLog2
Definition: globals.h:281
static const int kInstanceSizeOffset
Definition: objects.h:6448
Isolate * isolate() const
Definition: ic.h:157
static void GenerateMegamorphic(MacroAssembler *masm)
void PatchInlinedSmiCode(Address address, InlinedSmiCheck check)
static void GenerateGeneric(MacroAssembler *masm, StrictMode strict_mode)
static const int kHasIndexedInterceptor
Definition: objects.h:6471
#define UNREACHABLE()
Definition: checks.h:52
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long mode(MIPS only)") DEFINE_string(expose_natives_as
static Condition ComputeCondition(Token::Value op)
STATIC_ASSERT(sizeof(CPURegister)==sizeof(Register))
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long expose natives in global object expose freeBuffer extension expose gc extension under the specified name expose externalize string extension number of stack frames to capture disable builtin natives files print name of functions for which code is generated use random jit cookie to mask large constants trace lazy optimization use adaptive optimizations always try to OSR functions trace optimize function deoptimization minimum length for automatic enable preparsing maximum number of optimization attempts before giving up cache prototype transitions trace debugging JSON request response trace out of bounds accesses to external arrays trace_js_array_abuse automatically set the debug break flag when debugger commands are in the queue abort by crashing maximum length of function source code printed in a stack trace max size of the new max size of the old max size of executable always perform global GCs print one trace line following each garbage collection do not print trace line after scavenger collection print statistics of the maximum memory committed for the heap in only print modified registers Don t break for ASM_UNIMPLEMENTED_BREAK macros print stack trace when an illegal exception is thrown randomize hashes to avoid predictable hash Fixed seed to use to hash property Print the time it takes to deserialize the snapshot testing_bool_flag testing_int_flag string flag tmp file in which to serialize heap Print the time it takes to lazily compile hydrogen code stubs concurrent_recompilation concurrent_sweeping Print usage including flags
Definition: flags.cc:665
const uint32_t kHoleNanUpper32
Definition: v8globals.h:454
const int kPointerSize
Definition: globals.h:268
void check(i::Vector< const uint8_t > string)
static void GenerateGeneric(MacroAssembler *masm)
const int kHeapObjectTag
Definition: v8.h:5473
static void GenerateMiss(MacroAssembler *masm)
const uint32_t kHoleNanLower32
Definition: v8globals.h:455
#define __
static void GenerateRuntimeSetProperty(MacroAssembler *masm, StrictMode strict_mode)
static const int kPropertiesOffset
Definition: objects.h:2755
static Register from_code(int code)
static const int kInObjectPropertiesOffset
Definition: objects.h:6450
static void GenerateSlow(MacroAssembler *masm)
static const int kElementsOffset
Definition: objects.h:2756
static const int kCallTargetAddressOffset
const uint32_t kInternalizedTag
Definition: objects.h:605
static void GenerateRuntimeGetProperty(MacroAssembler *masm)
static const int kLengthOffset
Definition: objects.h:10076
static void GenerateSloppyArguments(MacroAssembler *masm)
static const int kHeaderSize
Definition: objects.h:3016
static void GenerateSlow(MacroAssembler *masm)
static const int kMapOffset
Definition: objects.h:1890
static const int kIsObserved
Definition: objects.h:6473
static const int kLengthOffset
Definition: objects.h:3015
static const int kSlowCaseBitFieldMask
Definition: ic.h:432
KeyedStoreIncrementLength
Definition: ic.h:578
InlinedSmiCheck
Definition: ic.h:920
MemOperand FieldMemOperand(Register object, int offset)
static void GenerateString(MacroAssembler *masm)
static void GenerateMapChangeElementsTransition(MacroAssembler *masm, AllocationSiteMode mode, Label *allocation_memento_found)
const int kSmiTagSize
Definition: v8.h:5479
static AllocationSiteMode GetMode(ElementsKind boilerplate_elements_kind)
Definition: objects-inl.h:1477
#define ASSERT_EQ(v1, v2)
Definition: checks.h:330
static void GenerateSmiToDouble(MacroAssembler *masm, AllocationSiteMode mode, Label *fail)
const int kSmiTag
Definition: v8.h:5478
static void GenerateNormal(MacroAssembler *masm)
static bool IsBne(Instr instr)
static void GenerateIndexedInterceptor(MacroAssembler *masm)
static bool IsBeq(Instr instr)
static const int kHashShift
Definition: objects.h:8642
static const int kCapacityMask
Definition: heap.h:2758
static void GenerateMiss(MacroAssembler *masm)
static void GeneratePositiveLookup(MacroAssembler *masm, Label *miss, Label *done, Register elements, Register name, Register r0, Register r1)
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long expose natives in global object expose freeBuffer extension expose gc extension under the specified name expose externalize string extension number of stack frames to capture disable builtin natives files print name of functions for which code is generated use random jit cookie to mask large constants trace lazy optimization use adaptive optimizations always try to OSR functions trace optimize function deoptimization minimum length for automatic enable preparsing maximum number of optimization attempts before giving up cache prototype transitions trace debugging JSON request response trace out of bounds accesses to external arrays trace_js_array_abuse automatically set the debug break flag when debugger commands are in the queue abort by crashing maximum length of function source code printed in a stack trace max size of the new max size of the old max size of executable always perform global GCs print one trace line following each garbage collection do not print trace line after scavenger collection print statistics of the maximum memory committed for the heap in name
Definition: flags.cc:505
static void GenerateDoubleToObject(MacroAssembler *masm, AllocationSiteMode mode, Label *fail)
static const int kHashMask
Definition: heap.h:2760
static const int kInstanceTypeOffset
Definition: objects.h:6459
static const int kEntriesPerBucket
Definition: heap.h:2761
static bool IsAndImmediate(Instr instr)
static void GenerateNormal(MacroAssembler *masm)
static void GenerateMiss(MacroAssembler *masm)