v8  3.25.30(node0.11.13)
V8 is Google's open source JavaScript engine
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
stub-cache-mips.cc
Go to the documentation of this file.
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 
28 #include "v8.h"
29 
30 #if V8_TARGET_ARCH_MIPS
31 
32 #include "ic-inl.h"
33 #include "codegen.h"
34 #include "stub-cache.h"
35 
36 namespace v8 {
37 namespace internal {
38 
39 #define __ ACCESS_MASM(masm)
40 
41 
42 static void ProbeTable(Isolate* isolate,
43  MacroAssembler* masm,
45  StubCache::Table table,
46  Register receiver,
47  Register name,
48  // Number of the cache entry, not scaled.
49  Register offset,
50  Register scratch,
51  Register scratch2,
52  Register offset_scratch) {
53  ExternalReference key_offset(isolate->stub_cache()->key_reference(table));
54  ExternalReference value_offset(isolate->stub_cache()->value_reference(table));
55  ExternalReference map_offset(isolate->stub_cache()->map_reference(table));
56 
57  uint32_t key_off_addr = reinterpret_cast<uint32_t>(key_offset.address());
58  uint32_t value_off_addr = reinterpret_cast<uint32_t>(value_offset.address());
59  uint32_t map_off_addr = reinterpret_cast<uint32_t>(map_offset.address());
60 
61  // Check the relative positions of the address fields.
62  ASSERT(value_off_addr > key_off_addr);
63  ASSERT((value_off_addr - key_off_addr) % 4 == 0);
64  ASSERT((value_off_addr - key_off_addr) < (256 * 4));
65  ASSERT(map_off_addr > key_off_addr);
66  ASSERT((map_off_addr - key_off_addr) % 4 == 0);
67  ASSERT((map_off_addr - key_off_addr) < (256 * 4));
68 
69  Label miss;
70  Register base_addr = scratch;
71  scratch = no_reg;
72 
73  // Multiply by 3 because there are 3 fields per entry (name, code, map).
74  __ sll(offset_scratch, offset, 1);
75  __ Addu(offset_scratch, offset_scratch, offset);
76 
77  // Calculate the base address of the entry.
78  __ li(base_addr, Operand(key_offset));
79  __ sll(at, offset_scratch, kPointerSizeLog2);
80  __ Addu(base_addr, base_addr, at);
81 
82  // Check that the key in the entry matches the name.
83  __ lw(at, MemOperand(base_addr, 0));
84  __ Branch(&miss, ne, name, Operand(at));
85 
86  // Check the map matches.
87  __ lw(at, MemOperand(base_addr, map_off_addr - key_off_addr));
88  __ lw(scratch2, FieldMemOperand(receiver, HeapObject::kMapOffset));
89  __ Branch(&miss, ne, at, Operand(scratch2));
90 
91  // Get the code entry from the cache.
92  Register code = scratch2;
93  scratch2 = no_reg;
94  __ lw(code, MemOperand(base_addr, value_off_addr - key_off_addr));
95 
96  // Check that the flags match what we're looking for.
97  Register flags_reg = base_addr;
98  base_addr = no_reg;
99  __ lw(flags_reg, FieldMemOperand(code, Code::kFlagsOffset));
100  __ And(flags_reg, flags_reg, Operand(~Code::kFlagsNotUsedInLookup));
101  __ Branch(&miss, ne, flags_reg, Operand(flags));
102 
103 #ifdef DEBUG
104  if (FLAG_test_secondary_stub_cache && table == StubCache::kPrimary) {
105  __ jmp(&miss);
106  } else if (FLAG_test_primary_stub_cache && table == StubCache::kSecondary) {
107  __ jmp(&miss);
108  }
109 #endif
110 
111  // Jump to the first instruction in the code stub.
112  __ Addu(at, code, Operand(Code::kHeaderSize - kHeapObjectTag));
113  __ Jump(at);
114 
115  // Miss: fall through.
116  __ bind(&miss);
117 }
118 
119 
120 void StubCompiler::GenerateDictionaryNegativeLookup(MacroAssembler* masm,
121  Label* miss_label,
122  Register receiver,
123  Handle<Name> name,
124  Register scratch0,
125  Register scratch1) {
126  ASSERT(name->IsUniqueName());
127  ASSERT(!receiver.is(scratch0));
128  Counters* counters = masm->isolate()->counters();
129  __ IncrementCounter(counters->negative_lookups(), 1, scratch0, scratch1);
130  __ IncrementCounter(counters->negative_lookups_miss(), 1, scratch0, scratch1);
131 
132  Label done;
133 
134  const int kInterceptorOrAccessCheckNeededMask =
136 
137  // Bail out if the receiver has a named interceptor or requires access checks.
138  Register map = scratch1;
139  __ lw(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
140  __ lbu(scratch0, FieldMemOperand(map, Map::kBitFieldOffset));
141  __ And(scratch0, scratch0, Operand(kInterceptorOrAccessCheckNeededMask));
142  __ Branch(miss_label, ne, scratch0, Operand(zero_reg));
143 
144  // Check that receiver is a JSObject.
145  __ lbu(scratch0, FieldMemOperand(map, Map::kInstanceTypeOffset));
146  __ Branch(miss_label, lt, scratch0, Operand(FIRST_SPEC_OBJECT_TYPE));
147 
148  // Load properties array.
149  Register properties = scratch0;
150  __ lw(properties, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
151  // Check that the properties array is a dictionary.
152  __ lw(map, FieldMemOperand(properties, HeapObject::kMapOffset));
153  Register tmp = properties;
154  __ LoadRoot(tmp, Heap::kHashTableMapRootIndex);
155  __ Branch(miss_label, ne, map, Operand(tmp));
156 
157  // Restore the temporarily used register.
158  __ lw(properties, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
159 
160 
162  miss_label,
163  &done,
164  receiver,
165  properties,
166  name,
167  scratch1);
168  __ bind(&done);
169  __ DecrementCounter(counters->negative_lookups_miss(), 1, scratch0, scratch1);
170 }
171 
172 
173 void StubCache::GenerateProbe(MacroAssembler* masm,
174  Code::Flags flags,
175  Register receiver,
176  Register name,
177  Register scratch,
178  Register extra,
179  Register extra2,
180  Register extra3) {
181  Isolate* isolate = masm->isolate();
182  Label miss;
183 
184  // Make sure that code is valid. The multiplying code relies on the
185  // entry size being 12.
186  ASSERT(sizeof(Entry) == 12);
187 
188  // Make sure the flags does not name a specific type.
189  ASSERT(Code::ExtractTypeFromFlags(flags) == 0);
190 
191  // Make sure that there are no register conflicts.
192  ASSERT(!scratch.is(receiver));
193  ASSERT(!scratch.is(name));
194  ASSERT(!extra.is(receiver));
195  ASSERT(!extra.is(name));
196  ASSERT(!extra.is(scratch));
197  ASSERT(!extra2.is(receiver));
198  ASSERT(!extra2.is(name));
199  ASSERT(!extra2.is(scratch));
200  ASSERT(!extra2.is(extra));
201 
202  // Check register validity.
203  ASSERT(!scratch.is(no_reg));
204  ASSERT(!extra.is(no_reg));
205  ASSERT(!extra2.is(no_reg));
206  ASSERT(!extra3.is(no_reg));
207 
208  Counters* counters = masm->isolate()->counters();
209  __ IncrementCounter(counters->megamorphic_stub_cache_probes(), 1,
210  extra2, extra3);
211 
212  // Check that the receiver isn't a smi.
213  __ JumpIfSmi(receiver, &miss);
214 
215  // Get the map of the receiver and compute the hash.
216  __ lw(scratch, FieldMemOperand(name, Name::kHashFieldOffset));
217  __ lw(at, FieldMemOperand(receiver, HeapObject::kMapOffset));
218  __ Addu(scratch, scratch, at);
219  uint32_t mask = kPrimaryTableSize - 1;
220  // We shift out the last two bits because they are not part of the hash and
221  // they are always 01 for maps.
222  __ srl(scratch, scratch, kHeapObjectTagSize);
223  __ Xor(scratch, scratch, Operand((flags >> kHeapObjectTagSize) & mask));
224  __ And(scratch, scratch, Operand(mask));
225 
226  // Probe the primary table.
227  ProbeTable(isolate,
228  masm,
229  flags,
230  kPrimary,
231  receiver,
232  name,
233  scratch,
234  extra,
235  extra2,
236  extra3);
237 
238  // Primary miss: Compute hash for secondary probe.
239  __ srl(at, name, kHeapObjectTagSize);
240  __ Subu(scratch, scratch, at);
241  uint32_t mask2 = kSecondaryTableSize - 1;
242  __ Addu(scratch, scratch, Operand((flags >> kHeapObjectTagSize) & mask2));
243  __ And(scratch, scratch, Operand(mask2));
244 
245  // Probe the secondary table.
246  ProbeTable(isolate,
247  masm,
248  flags,
249  kSecondary,
250  receiver,
251  name,
252  scratch,
253  extra,
254  extra2,
255  extra3);
256 
257  // Cache miss: Fall-through and let caller handle the miss by
258  // entering the runtime system.
259  __ bind(&miss);
260  __ IncrementCounter(counters->megamorphic_stub_cache_misses(), 1,
261  extra2, extra3);
262 }
263 
264 
265 void StubCompiler::GenerateLoadGlobalFunctionPrototype(MacroAssembler* masm,
266  int index,
267  Register prototype) {
268  // Load the global or builtins object from the current context.
269  __ lw(prototype,
271  // Load the native context from the global or builtins object.
272  __ lw(prototype,
274  // Load the function from the native context.
275  __ lw(prototype, MemOperand(prototype, Context::SlotOffset(index)));
276  // Load the initial map. The global functions all have initial maps.
277  __ lw(prototype,
279  // Load the prototype from the initial map.
280  __ lw(prototype, FieldMemOperand(prototype, Map::kPrototypeOffset));
281 }
282 
283 
284 void StubCompiler::GenerateDirectLoadGlobalFunctionPrototype(
285  MacroAssembler* masm,
286  int index,
287  Register prototype,
288  Label* miss) {
289  Isolate* isolate = masm->isolate();
290  // Get the global function with the given index.
291  Handle<JSFunction> function(
292  JSFunction::cast(isolate->native_context()->get(index)));
293 
294  // Check we're still in the same context.
295  Register scratch = prototype;
297  __ lw(scratch, MemOperand(cp, offset));
299  __ lw(scratch, MemOperand(scratch, Context::SlotOffset(index)));
300  __ li(at, function);
301  __ Branch(miss, ne, at, Operand(scratch));
302 
303  // Load its initial map. The global functions all have initial maps.
304  __ li(prototype, Handle<Map>(function->initial_map()));
305  // Load the prototype from the initial map.
306  __ lw(prototype, FieldMemOperand(prototype, Map::kPrototypeOffset));
307 }
308 
309 
310 void StubCompiler::GenerateFastPropertyLoad(MacroAssembler* masm,
311  Register dst,
312  Register src,
313  bool inobject,
314  int index,
315  Representation representation) {
316  ASSERT(!representation.IsDouble());
317  int offset = index * kPointerSize;
318  if (!inobject) {
319  // Calculate the offset into the properties array.
320  offset = offset + FixedArray::kHeaderSize;
322  src = dst;
323  }
324  __ lw(dst, FieldMemOperand(src, offset));
325 }
326 
327 
328 void StubCompiler::GenerateLoadArrayLength(MacroAssembler* masm,
329  Register receiver,
330  Register scratch,
331  Label* miss_label) {
332  // Check that the receiver isn't a smi.
333  __ JumpIfSmi(receiver, miss_label);
334 
335  // Check that the object is a JS array.
336  __ GetObjectType(receiver, scratch, scratch);
337  __ Branch(miss_label, ne, scratch, Operand(JS_ARRAY_TYPE));
338 
339  // Load length directly from the JS array.
340  __ Ret(USE_DELAY_SLOT);
341  __ lw(v0, FieldMemOperand(receiver, JSArray::kLengthOffset));
342 }
343 
344 
345 void StubCompiler::GenerateLoadFunctionPrototype(MacroAssembler* masm,
346  Register receiver,
347  Register scratch1,
348  Register scratch2,
349  Label* miss_label) {
350  __ TryGetFunctionPrototype(receiver, scratch1, scratch2, miss_label);
351  __ Ret(USE_DELAY_SLOT);
352  __ mov(v0, scratch1);
353 }
354 
355 
356 void StubCompiler::GenerateCheckPropertyCell(MacroAssembler* masm,
357  Handle<JSGlobalObject> global,
358  Handle<Name> name,
359  Register scratch,
360  Label* miss) {
361  Handle<Cell> cell = JSGlobalObject::EnsurePropertyCell(global, name);
362  ASSERT(cell->value()->IsTheHole());
363  __ li(scratch, Operand(cell));
364  __ lw(scratch, FieldMemOperand(scratch, Cell::kValueOffset));
365  __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
366  __ Branch(miss, ne, scratch, Operand(at));
367 }
368 
369 
371  MacroAssembler* masm,
372  Handle<JSObject> holder,
373  Register holder_reg,
374  Handle<Name> name,
375  Label* miss) {
376  if (holder->IsJSGlobalObject()) {
377  GenerateCheckPropertyCell(
378  masm, Handle<JSGlobalObject>::cast(holder), name, scratch1(), miss);
379  } else if (!holder->HasFastProperties() && !holder->IsJSGlobalProxy()) {
380  GenerateDictionaryNegativeLookup(
381  masm, miss, holder_reg, name, scratch1(), scratch2());
382  }
383 }
384 
385 
386 // Generate StoreTransition code, value is passed in a0 register.
387 // After executing generated code, the receiver_reg and name_reg
388 // may be clobbered.
389 void StoreStubCompiler::GenerateStoreTransition(MacroAssembler* masm,
390  Handle<JSObject> object,
391  LookupResult* lookup,
392  Handle<Map> transition,
393  Handle<Name> name,
394  Register receiver_reg,
395  Register storage_reg,
396  Register value_reg,
397  Register scratch1,
398  Register scratch2,
399  Register scratch3,
400  Label* miss_label,
401  Label* slow) {
402  // a0 : value.
403  Label exit;
404 
405  int descriptor = transition->LastAdded();
406  DescriptorArray* descriptors = transition->instance_descriptors();
407  PropertyDetails details = descriptors->GetDetails(descriptor);
408  Representation representation = details.representation();
409  ASSERT(!representation.IsNone());
410 
411  if (details.type() == CONSTANT) {
412  Handle<Object> constant(descriptors->GetValue(descriptor), masm->isolate());
413  __ li(scratch1, constant);
414  __ Branch(miss_label, ne, value_reg, Operand(scratch1));
415  } else if (representation.IsSmi()) {
416  __ JumpIfNotSmi(value_reg, miss_label);
417  } else if (representation.IsHeapObject()) {
418  __ JumpIfSmi(value_reg, miss_label);
419  } else if (representation.IsDouble()) {
420  Label do_store, heap_number;
421  __ LoadRoot(scratch3, Heap::kHeapNumberMapRootIndex);
422  __ AllocateHeapNumber(storage_reg, scratch1, scratch2, scratch3, slow);
423 
424  __ JumpIfNotSmi(value_reg, &heap_number);
425  __ SmiUntag(scratch1, value_reg);
426  __ mtc1(scratch1, f6);
427  __ cvt_d_w(f4, f6);
428  __ jmp(&do_store);
429 
430  __ bind(&heap_number);
431  __ CheckMap(value_reg, scratch1, Heap::kHeapNumberMapRootIndex,
432  miss_label, DONT_DO_SMI_CHECK);
433  __ ldc1(f4, FieldMemOperand(value_reg, HeapNumber::kValueOffset));
434 
435  __ bind(&do_store);
436  __ sdc1(f4, FieldMemOperand(storage_reg, HeapNumber::kValueOffset));
437  }
438 
439  // Stub never generated for non-global objects that require access
440  // checks.
441  ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
442 
443  // Perform map transition for the receiver if necessary.
444  if (details.type() == FIELD &&
445  object->map()->unused_property_fields() == 0) {
446  // The properties must be extended before we can store the value.
447  // We jump to a runtime call that extends the properties array.
448  __ push(receiver_reg);
449  __ li(a2, Operand(transition));
450  __ Push(a2, a0);
451  __ TailCallExternalReference(
452  ExternalReference(IC_Utility(IC::kSharedStoreIC_ExtendStorage),
453  masm->isolate()),
454  3, 1);
455  return;
456  }
457 
458  // Update the map of the object.
459  __ li(scratch1, Operand(transition));
460  __ sw(scratch1, FieldMemOperand(receiver_reg, HeapObject::kMapOffset));
461 
462  // Update the write barrier for the map field.
463  __ RecordWriteField(receiver_reg,
465  scratch1,
466  scratch2,
471 
472  if (details.type() == CONSTANT) {
473  ASSERT(value_reg.is(a0));
474  __ Ret(USE_DELAY_SLOT);
475  __ mov(v0, a0);
476  return;
477  }
478 
479  int index = transition->instance_descriptors()->GetFieldIndex(
480  transition->LastAdded());
481 
482  // Adjust for the number of properties stored in the object. Even in the
483  // face of a transition we can use the old map here because the size of the
484  // object and the number of in-object properties is not going to change.
485  index -= object->map()->inobject_properties();
486 
487  // TODO(verwaest): Share this code as a code stub.
488  SmiCheck smi_check = representation.IsTagged()
490  if (index < 0) {
491  // Set the property straight into the object.
492  int offset = object->map()->instance_size() + (index * kPointerSize);
493  if (representation.IsDouble()) {
494  __ sw(storage_reg, FieldMemOperand(receiver_reg, offset));
495  } else {
496  __ sw(value_reg, FieldMemOperand(receiver_reg, offset));
497  }
498 
499  if (!representation.IsSmi()) {
500  // Update the write barrier for the array address.
501  if (!representation.IsDouble()) {
502  __ mov(storage_reg, value_reg);
503  }
504  __ RecordWriteField(receiver_reg,
505  offset,
506  storage_reg,
507  scratch1,
511  smi_check);
512  }
513  } else {
514  // Write to the properties array.
515  int offset = index * kPointerSize + FixedArray::kHeaderSize;
516  // Get the properties array
517  __ lw(scratch1,
519  if (representation.IsDouble()) {
520  __ sw(storage_reg, FieldMemOperand(scratch1, offset));
521  } else {
522  __ sw(value_reg, FieldMemOperand(scratch1, offset));
523  }
524 
525  if (!representation.IsSmi()) {
526  // Update the write barrier for the array address.
527  if (!representation.IsDouble()) {
528  __ mov(storage_reg, value_reg);
529  }
530  __ RecordWriteField(scratch1,
531  offset,
532  storage_reg,
533  receiver_reg,
537  smi_check);
538  }
539  }
540 
541  // Return the value (register v0).
542  ASSERT(value_reg.is(a0));
543  __ bind(&exit);
544  __ Ret(USE_DELAY_SLOT);
545  __ mov(v0, a0);
546 }
547 
548 
549 // Generate StoreField code, value is passed in a0 register.
550 // When leaving generated code after success, the receiver_reg and name_reg
551 // may be clobbered. Upon branch to miss_label, the receiver and name
552 // registers have their original values.
553 void StoreStubCompiler::GenerateStoreField(MacroAssembler* masm,
554  Handle<JSObject> object,
555  LookupResult* lookup,
556  Register receiver_reg,
557  Register name_reg,
558  Register value_reg,
559  Register scratch1,
560  Register scratch2,
561  Label* miss_label) {
562  // a0 : value
563  Label exit;
564 
565  // Stub never generated for non-global objects that require access
566  // checks.
567  ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
568 
569  int index = lookup->GetFieldIndex().field_index();
570 
571  // Adjust for the number of properties stored in the object. Even in the
572  // face of a transition we can use the old map here because the size of the
573  // object and the number of in-object properties is not going to change.
574  index -= object->map()->inobject_properties();
575 
576  Representation representation = lookup->representation();
577  ASSERT(!representation.IsNone());
578  if (representation.IsSmi()) {
579  __ JumpIfNotSmi(value_reg, miss_label);
580  } else if (representation.IsHeapObject()) {
581  __ JumpIfSmi(value_reg, miss_label);
582  } else if (representation.IsDouble()) {
583  // Load the double storage.
584  if (index < 0) {
585  int offset = object->map()->instance_size() + (index * kPointerSize);
586  __ lw(scratch1, FieldMemOperand(receiver_reg, offset));
587  } else {
588  __ lw(scratch1,
590  int offset = index * kPointerSize + FixedArray::kHeaderSize;
591  __ lw(scratch1, FieldMemOperand(scratch1, offset));
592  }
593 
594  // Store the value into the storage.
595  Label do_store, heap_number;
596  __ JumpIfNotSmi(value_reg, &heap_number);
597  __ SmiUntag(scratch2, value_reg);
598  __ mtc1(scratch2, f6);
599  __ cvt_d_w(f4, f6);
600  __ jmp(&do_store);
601 
602  __ bind(&heap_number);
603  __ CheckMap(value_reg, scratch2, Heap::kHeapNumberMapRootIndex,
604  miss_label, DONT_DO_SMI_CHECK);
605  __ ldc1(f4, FieldMemOperand(value_reg, HeapNumber::kValueOffset));
606 
607  __ bind(&do_store);
608  __ sdc1(f4, FieldMemOperand(scratch1, HeapNumber::kValueOffset));
609  // Return the value (register v0).
610  ASSERT(value_reg.is(a0));
611  __ Ret(USE_DELAY_SLOT);
612  __ mov(v0, a0);
613  return;
614  }
615 
616  // TODO(verwaest): Share this code as a code stub.
617  SmiCheck smi_check = representation.IsTagged()
619  if (index < 0) {
620  // Set the property straight into the object.
621  int offset = object->map()->instance_size() + (index * kPointerSize);
622  __ sw(value_reg, FieldMemOperand(receiver_reg, offset));
623 
624  if (!representation.IsSmi()) {
625  // Skip updating write barrier if storing a smi.
626  __ JumpIfSmi(value_reg, &exit);
627 
628  // Update the write barrier for the array address.
629  // Pass the now unused name_reg as a scratch register.
630  __ mov(name_reg, value_reg);
631  __ RecordWriteField(receiver_reg,
632  offset,
633  name_reg,
634  scratch1,
638  smi_check);
639  }
640  } else {
641  // Write to the properties array.
642  int offset = index * kPointerSize + FixedArray::kHeaderSize;
643  // Get the properties array.
644  __ lw(scratch1,
646  __ sw(value_reg, FieldMemOperand(scratch1, offset));
647 
648  if (!representation.IsSmi()) {
649  // Skip updating write barrier if storing a smi.
650  __ JumpIfSmi(value_reg, &exit);
651 
652  // Update the write barrier for the array address.
653  // Ok to clobber receiver_reg and name_reg, since we return.
654  __ mov(name_reg, value_reg);
655  __ RecordWriteField(scratch1,
656  offset,
657  name_reg,
658  receiver_reg,
662  smi_check);
663  }
664  }
665 
666  // Return the value (register v0).
667  ASSERT(value_reg.is(a0));
668  __ bind(&exit);
669  __ Ret(USE_DELAY_SLOT);
670  __ mov(v0, a0);
671 }
672 
673 
674 void StoreStubCompiler::GenerateRestoreName(MacroAssembler* masm,
675  Label* label,
676  Handle<Name> name) {
677  if (!label->is_unused()) {
678  __ bind(label);
679  __ li(this->name(), Operand(name));
680  }
681 }
682 
683 
684 static void PushInterceptorArguments(MacroAssembler* masm,
685  Register receiver,
686  Register holder,
687  Register name,
688  Handle<JSObject> holder_obj) {
694  __ push(name);
695  Handle<InterceptorInfo> interceptor(holder_obj->GetNamedInterceptor());
696  ASSERT(!masm->isolate()->heap()->InNewSpace(*interceptor));
697  Register scratch = name;
698  __ li(scratch, Operand(interceptor));
699  __ Push(scratch, receiver, holder);
700 }
701 
702 
703 static void CompileCallLoadPropertyWithInterceptor(
704  MacroAssembler* masm,
705  Register receiver,
706  Register holder,
707  Register name,
708  Handle<JSObject> holder_obj,
709  IC::UtilityId id) {
710  PushInterceptorArguments(masm, receiver, holder, name, holder_obj);
711  __ CallExternalReference(
712  ExternalReference(IC_Utility(id), masm->isolate()),
714 }
715 
716 
717 // Generate call to api function.
718 void StubCompiler::GenerateFastApiCall(MacroAssembler* masm,
719  const CallOptimization& optimization,
720  Handle<Map> receiver_map,
721  Register receiver,
722  Register scratch_in,
723  bool is_store,
724  int argc,
725  Register* values) {
726  ASSERT(!receiver.is(scratch_in));
727  // Preparing to push, adjust sp.
728  __ Subu(sp, sp, Operand((argc + 1) * kPointerSize));
729  __ sw(receiver, MemOperand(sp, argc * kPointerSize)); // Push receiver.
730  // Write the arguments to stack frame.
731  for (int i = 0; i < argc; i++) {
732  Register arg = values[argc-1-i];
733  ASSERT(!receiver.is(arg));
734  ASSERT(!scratch_in.is(arg));
735  __ sw(arg, MemOperand(sp, (argc-1-i) * kPointerSize)); // Push arg.
736  }
737  ASSERT(optimization.is_simple_api_call());
738 
739  // Abi for CallApiFunctionStub.
740  Register callee = a0;
741  Register call_data = t0;
742  Register holder = a2;
743  Register api_function_address = a1;
744 
745  // Put holder in place.
746  CallOptimization::HolderLookup holder_lookup;
747  Handle<JSObject> api_holder = optimization.LookupHolderOfExpectedType(
748  receiver_map,
749  &holder_lookup);
750  switch (holder_lookup) {
751  case CallOptimization::kHolderIsReceiver:
752  __ Move(holder, receiver);
753  break;
754  case CallOptimization::kHolderFound:
755  __ li(holder, api_holder);
756  break;
757  case CallOptimization::kHolderNotFound:
758  UNREACHABLE();
759  break;
760  }
761 
762  Isolate* isolate = masm->isolate();
763  Handle<JSFunction> function = optimization.constant_function();
764  Handle<CallHandlerInfo> api_call_info = optimization.api_call_info();
765  Handle<Object> call_data_obj(api_call_info->data(), isolate);
766 
767  // Put callee in place.
768  __ li(callee, function);
769 
770  bool call_data_undefined = false;
771  // Put call_data in place.
772  if (isolate->heap()->InNewSpace(*call_data_obj)) {
773  __ li(call_data, api_call_info);
774  __ lw(call_data, FieldMemOperand(call_data, CallHandlerInfo::kDataOffset));
775  } else if (call_data_obj->IsUndefined()) {
776  call_data_undefined = true;
777  __ LoadRoot(call_data, Heap::kUndefinedValueRootIndex);
778  } else {
779  __ li(call_data, call_data_obj);
780  }
781  // Put api_function_address in place.
782  Address function_address = v8::ToCData<Address>(api_call_info->callback());
783  ApiFunction fun(function_address);
784  ExternalReference::Type type = ExternalReference::DIRECT_API_CALL;
785  ExternalReference ref =
786  ExternalReference(&fun,
787  type,
788  masm->isolate());
789  __ li(api_function_address, Operand(ref));
790 
791  // Jump to stub.
792  CallApiFunctionStub stub(is_store, call_data_undefined, argc);
793  __ TailCallStub(&stub);
794 }
795 
796 
797 void StubCompiler::GenerateTailCall(MacroAssembler* masm, Handle<Code> code) {
798  __ Jump(code, RelocInfo::CODE_TARGET);
799 }
800 
801 
802 #undef __
803 #define __ ACCESS_MASM(masm())
804 
805 
806 Register StubCompiler::CheckPrototypes(Handle<HeapType> type,
807  Register object_reg,
808  Handle<JSObject> holder,
809  Register holder_reg,
810  Register scratch1,
811  Register scratch2,
812  Handle<Name> name,
813  Label* miss,
815  Handle<Map> receiver_map(IC::TypeToMap(*type, isolate()));
816 
817  // Make sure there's no overlap between holder and object registers.
818  ASSERT(!scratch1.is(object_reg) && !scratch1.is(holder_reg));
819  ASSERT(!scratch2.is(object_reg) && !scratch2.is(holder_reg)
820  && !scratch2.is(scratch1));
821 
822  // Keep track of the current object in register reg.
823  Register reg = object_reg;
824  int depth = 0;
825 
826  Handle<JSObject> current = Handle<JSObject>::null();
827  if (type->IsConstant()) current = Handle<JSObject>::cast(type->AsConstant());
828  Handle<JSObject> prototype = Handle<JSObject>::null();
829  Handle<Map> current_map = receiver_map;
830  Handle<Map> holder_map(holder->map());
831  // Traverse the prototype chain and check the maps in the prototype chain for
832  // fast and global objects or do negative lookup for normal objects.
833  while (!current_map.is_identical_to(holder_map)) {
834  ++depth;
835 
836  // Only global objects and objects that do not require access
837  // checks are allowed in stubs.
838  ASSERT(current_map->IsJSGlobalProxyMap() ||
839  !current_map->is_access_check_needed());
840 
841  prototype = handle(JSObject::cast(current_map->prototype()));
842  if (current_map->is_dictionary_map() &&
843  !current_map->IsJSGlobalObjectMap() &&
844  !current_map->IsJSGlobalProxyMap()) {
845  if (!name->IsUniqueName()) {
846  ASSERT(name->IsString());
847  name = factory()->InternalizeString(Handle<String>::cast(name));
848  }
849  ASSERT(current.is_null() ||
850  current->property_dictionary()->FindEntry(*name) ==
852 
853  GenerateDictionaryNegativeLookup(masm(), miss, reg, name,
854  scratch1, scratch2);
855 
856  __ lw(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset));
857  reg = holder_reg; // From now on the object will be in holder_reg.
858  __ lw(reg, FieldMemOperand(scratch1, Map::kPrototypeOffset));
859  } else {
860  Register map_reg = scratch1;
861  if (depth != 1 || check == CHECK_ALL_MAPS) {
862  // CheckMap implicitly loads the map of |reg| into |map_reg|.
863  __ CheckMap(reg, map_reg, current_map, miss, DONT_DO_SMI_CHECK);
864  } else {
865  __ lw(map_reg, FieldMemOperand(reg, HeapObject::kMapOffset));
866  }
867 
868  // Check access rights to the global object. This has to happen after
869  // the map check so that we know that the object is actually a global
870  // object.
871  if (current_map->IsJSGlobalProxyMap()) {
872  __ CheckAccessGlobalProxy(reg, scratch2, miss);
873  } else if (current_map->IsJSGlobalObjectMap()) {
874  GenerateCheckPropertyCell(
875  masm(), Handle<JSGlobalObject>::cast(current), name,
876  scratch2, miss);
877  }
878 
879  reg = holder_reg; // From now on the object will be in holder_reg.
880 
881  if (heap()->InNewSpace(*prototype)) {
882  // The prototype is in new space; we cannot store a reference to it
883  // in the code. Load it from the map.
884  __ lw(reg, FieldMemOperand(map_reg, Map::kPrototypeOffset));
885  } else {
886  // The prototype is in old space; load it directly.
887  __ li(reg, Operand(prototype));
888  }
889  }
890 
891  // Go to the next object in the prototype chain.
892  current = prototype;
893  current_map = handle(current->map());
894  }
895 
896  // Log the check depth.
897  LOG(isolate(), IntEvent("check-maps-depth", depth + 1));
898 
899  if (depth != 0 || check == CHECK_ALL_MAPS) {
900  // Check the holder map.
901  __ CheckMap(reg, scratch1, current_map, miss, DONT_DO_SMI_CHECK);
902  }
903 
904  // Perform security check for access to the global object.
905  ASSERT(current_map->IsJSGlobalProxyMap() ||
906  !current_map->is_access_check_needed());
907  if (current_map->IsJSGlobalProxyMap()) {
908  __ CheckAccessGlobalProxy(reg, scratch1, miss);
909  }
910 
911  // Return the register containing the holder.
912  return reg;
913 }
914 
915 
916 void LoadStubCompiler::HandlerFrontendFooter(Handle<Name> name, Label* miss) {
917  if (!miss->is_unused()) {
918  Label success;
919  __ Branch(&success);
920  __ bind(miss);
921  TailCallBuiltin(masm(), MissBuiltin(kind()));
922  __ bind(&success);
923  }
924 }
925 
926 
927 void StoreStubCompiler::HandlerFrontendFooter(Handle<Name> name, Label* miss) {
928  if (!miss->is_unused()) {
929  Label success;
930  __ Branch(&success);
931  GenerateRestoreName(masm(), miss, name);
932  TailCallBuiltin(masm(), MissBuiltin(kind()));
933  __ bind(&success);
934  }
935 }
936 
937 
939  Handle<HeapType> type,
940  Register object_reg,
941  Handle<JSObject> holder,
942  Handle<Name> name,
943  Handle<Object> callback) {
944  Label miss;
945 
946  Register reg = HandlerFrontendHeader(type, object_reg, holder, name, &miss);
947 
948  if (!holder->HasFastProperties() && !holder->IsJSGlobalObject()) {
949  ASSERT(!reg.is(scratch2()));
950  ASSERT(!reg.is(scratch3()));
951  ASSERT(!reg.is(scratch4()));
952 
953  // Load the properties dictionary.
954  Register dictionary = scratch4();
955  __ lw(dictionary, FieldMemOperand(reg, JSObject::kPropertiesOffset));
956 
957  // Probe the dictionary.
958  Label probe_done;
960  &miss,
961  &probe_done,
962  dictionary,
963  this->name(),
964  scratch2(),
965  scratch3());
966  __ bind(&probe_done);
967 
968  // If probing finds an entry in the dictionary, scratch3 contains the
969  // pointer into the dictionary. Check that the value is the callback.
970  Register pointer = scratch3();
971  const int kElementsStartOffset = NameDictionary::kHeaderSize +
973  const int kValueOffset = kElementsStartOffset + kPointerSize;
974  __ lw(scratch2(), FieldMemOperand(pointer, kValueOffset));
975  __ Branch(&miss, ne, scratch2(), Operand(callback));
976  }
977 
978  HandlerFrontendFooter(name, &miss);
979  return reg;
980 }
981 
982 
983 void LoadStubCompiler::GenerateLoadField(Register reg,
984  Handle<JSObject> holder,
985  PropertyIndex field,
986  Representation representation) {
987  if (!reg.is(receiver())) __ mov(receiver(), reg);
988  if (kind() == Code::LOAD_IC) {
989  LoadFieldStub stub(field.is_inobject(holder),
990  field.translate(holder),
991  representation);
992  GenerateTailCall(masm(), stub.GetCode(isolate()));
993  } else {
994  KeyedLoadFieldStub stub(field.is_inobject(holder),
995  field.translate(holder),
996  representation);
997  GenerateTailCall(masm(), stub.GetCode(isolate()));
998  }
999 }
1000 
1001 
1002 void LoadStubCompiler::GenerateLoadConstant(Handle<Object> value) {
1003  // Return the constant value.
1004  __ li(v0, value);
1005  __ Ret();
1006 }
1007 
1008 
1010  Register reg,
1011  Handle<ExecutableAccessorInfo> callback) {
1012  // Build AccessorInfo::args_ list on the stack and push property name below
1013  // the exit frame to make GC aware of them and store pointers to them.
1021  ASSERT(!scratch2().is(reg));
1022  ASSERT(!scratch3().is(reg));
1023  ASSERT(!scratch4().is(reg));
1024  __ push(receiver());
1025  if (heap()->InNewSpace(callback->data())) {
1026  __ li(scratch3(), callback);
1029  } else {
1030  __ li(scratch3(), Handle<Object>(callback->data(), isolate()));
1031  }
1032  __ Subu(sp, sp, 6 * kPointerSize);
1033  __ sw(scratch3(), MemOperand(sp, 5 * kPointerSize));
1034  __ LoadRoot(scratch3(), Heap::kUndefinedValueRootIndex);
1035  __ sw(scratch3(), MemOperand(sp, 4 * kPointerSize));
1036  __ sw(scratch3(), MemOperand(sp, 3 * kPointerSize));
1037  __ li(scratch4(),
1038  Operand(ExternalReference::isolate_address(isolate())));
1039  __ sw(scratch4(), MemOperand(sp, 2 * kPointerSize));
1040  __ sw(reg, MemOperand(sp, 1 * kPointerSize));
1041  __ sw(name(), MemOperand(sp, 0 * kPointerSize));
1042  __ Addu(scratch2(), sp, 1 * kPointerSize);
1043 
1044  __ mov(a2, scratch2()); // Saved in case scratch2 == a1.
1045  // Abi for CallApiGetter.
1046  Register getter_address_reg = a2;
1047 
1048  Address getter_address = v8::ToCData<Address>(callback->getter());
1049  ApiFunction fun(getter_address);
1050  ExternalReference::Type type = ExternalReference::DIRECT_GETTER_CALL;
1051  ExternalReference ref = ExternalReference(&fun, type, isolate());
1052  __ li(getter_address_reg, Operand(ref));
1053 
1054  CallApiGetterStub stub;
1055  __ TailCallStub(&stub);
1056 }
1057 
1058 
1060  Register holder_reg,
1061  Handle<Object> object,
1062  Handle<JSObject> interceptor_holder,
1063  LookupResult* lookup,
1064  Handle<Name> name) {
1065  ASSERT(interceptor_holder->HasNamedInterceptor());
1066  ASSERT(!interceptor_holder->GetNamedInterceptor()->getter()->IsUndefined());
1067 
1068  // So far the most popular follow ups for interceptor loads are FIELD
1069  // and CALLBACKS, so inline only them, other cases may be added
1070  // later.
1071  bool compile_followup_inline = false;
1072  if (lookup->IsFound() && lookup->IsCacheable()) {
1073  if (lookup->IsField()) {
1074  compile_followup_inline = true;
1075  } else if (lookup->type() == CALLBACKS &&
1076  lookup->GetCallbackObject()->IsExecutableAccessorInfo()) {
1077  ExecutableAccessorInfo* callback =
1078  ExecutableAccessorInfo::cast(lookup->GetCallbackObject());
1079  compile_followup_inline = callback->getter() != NULL &&
1080  callback->IsCompatibleReceiver(*object);
1081  }
1082  }
1083 
1084  if (compile_followup_inline) {
1085  // Compile the interceptor call, followed by inline code to load the
1086  // property from further up the prototype chain if the call fails.
1087  // Check that the maps haven't changed.
1088  ASSERT(holder_reg.is(receiver()) || holder_reg.is(scratch1()));
1089 
1090  // Preserve the receiver register explicitly whenever it is different from
1091  // the holder and it is needed should the interceptor return without any
1092  // result. The CALLBACKS case needs the receiver to be passed into C++ code,
1093  // the FIELD case might cause a miss during the prototype check.
1094  bool must_perfrom_prototype_check = *interceptor_holder != lookup->holder();
1095  bool must_preserve_receiver_reg = !receiver().is(holder_reg) &&
1096  (lookup->type() == CALLBACKS || must_perfrom_prototype_check);
1097 
1098  // Save necessary data before invoking an interceptor.
1099  // Requires a frame to make GC aware of pushed pointers.
1100  {
1101  FrameScope frame_scope(masm(), StackFrame::INTERNAL);
1102  if (must_preserve_receiver_reg) {
1103  __ Push(receiver(), holder_reg, this->name());
1104  } else {
1105  __ Push(holder_reg, this->name());
1106  }
1107  // Invoke an interceptor. Note: map checks from receiver to
1108  // interceptor's holder has been compiled before (see a caller
1109  // of this method).
1110  CompileCallLoadPropertyWithInterceptor(
1111  masm(), receiver(), holder_reg, this->name(), interceptor_holder,
1112  IC::kLoadPropertyWithInterceptorOnly);
1113 
1114  // Check if interceptor provided a value for property. If it's
1115  // the case, return immediately.
1116  Label interceptor_failed;
1117  __ LoadRoot(scratch1(), Heap::kNoInterceptorResultSentinelRootIndex);
1118  __ Branch(&interceptor_failed, eq, v0, Operand(scratch1()));
1119  frame_scope.GenerateLeaveFrame();
1120  __ Ret();
1121 
1122  __ bind(&interceptor_failed);
1123  __ pop(this->name());
1124  __ pop(holder_reg);
1125  if (must_preserve_receiver_reg) {
1126  __ pop(receiver());
1127  }
1128  // Leave the internal frame.
1129  }
1130  GenerateLoadPostInterceptor(holder_reg, interceptor_holder, name, lookup);
1131  } else { // !compile_followup_inline
1132  // Call the runtime system to load the interceptor.
1133  // Check that the maps haven't changed.
1134  PushInterceptorArguments(masm(), receiver(), holder_reg,
1135  this->name(), interceptor_holder);
1136 
1137  ExternalReference ref = ExternalReference(
1138  IC_Utility(IC::kLoadPropertyWithInterceptorForLoad), isolate());
1139  __ TailCallExternalReference(ref, StubCache::kInterceptorArgsLength, 1);
1140  }
1141 }
1142 
1143 
1144 void StubCompiler::GenerateBooleanCheck(Register object, Label* miss) {
1145  Label success;
1146  // Check that the object is a boolean.
1147  __ LoadRoot(at, Heap::kTrueValueRootIndex);
1148  __ Branch(&success, eq, object, Operand(at));
1149  __ LoadRoot(at, Heap::kFalseValueRootIndex);
1150  __ Branch(miss, ne, object, Operand(at));
1151  __ bind(&success);
1152 }
1153 
1154 
1156  Handle<JSObject> object,
1157  Handle<JSObject> holder,
1158  Handle<Name> name,
1159  Handle<ExecutableAccessorInfo> callback) {
1160  Register holder_reg = HandlerFrontend(
1161  IC::CurrentTypeOf(object, isolate()), receiver(), holder, name);
1162 
1163  // Stub never generated for non-global objects that require access
1164  // checks.
1165  ASSERT(holder->IsJSGlobalProxy() || !holder->IsAccessCheckNeeded());
1166 
1167  __ Push(receiver(), holder_reg); // Receiver.
1168  __ li(at, Operand(callback)); // Callback info.
1169  __ push(at);
1170  __ li(at, Operand(name));
1171  __ Push(at, value());
1172 
1173  // Do tail-call to the runtime system.
1174  ExternalReference store_callback_property =
1175  ExternalReference(IC_Utility(IC::kStoreCallbackProperty), isolate());
1176  __ TailCallExternalReference(store_callback_property, 5, 1);
1177 
1178  // Return the generated code.
1179  return GetCode(kind(), Code::FAST, name);
1180 }
1181 
1182 
1183 #undef __
1184 #define __ ACCESS_MASM(masm)
1185 
1186 
1188  MacroAssembler* masm,
1189  Handle<HeapType> type,
1190  Register receiver,
1191  Handle<JSFunction> setter) {
1192  // ----------- S t a t e -------------
1193  // -- ra : return address
1194  // -----------------------------------
1195  {
1196  FrameScope scope(masm, StackFrame::INTERNAL);
1197 
1198  // Save value register, so we can restore it later.
1199  __ push(value());
1200 
1201  if (!setter.is_null()) {
1202  // Call the JavaScript setter with receiver and value on the stack.
1203  if (IC::TypeToMap(*type, masm->isolate())->IsJSGlobalObjectMap()) {
1204  // Swap in the global receiver.
1205  __ lw(receiver,
1208  }
1209  __ Push(receiver, value());
1210  ParameterCount actual(1);
1211  ParameterCount expected(setter);
1212  __ InvokeFunction(setter, expected, actual,
1213  CALL_FUNCTION, NullCallWrapper());
1214  } else {
1215  // If we generate a global code snippet for deoptimization only, remember
1216  // the place to continue after deoptimization.
1217  masm->isolate()->heap()->SetSetterStubDeoptPCOffset(masm->pc_offset());
1218  }
1219 
1220  // We have to return the passed value, not the return value of the setter.
1221  __ pop(v0);
1222 
1223  // Restore context register.
1225  }
1226  __ Ret();
1227 }
1228 
1229 
1230 #undef __
1231 #define __ ACCESS_MASM(masm())
1232 
1233 
1235  Handle<JSObject> object,
1236  Handle<Name> name) {
1237  __ Push(receiver(), this->name(), value());
1238 
1239  // Do tail-call to the runtime system.
1240  ExternalReference store_ic_property =
1241  ExternalReference(IC_Utility(IC::kStoreInterceptorProperty), isolate());
1242  __ TailCallExternalReference(store_ic_property, 3, 1);
1243 
1244  // Return the generated code.
1245  return GetCode(kind(), Code::FAST, name);
1246 }
1247 
1248 
1249 Handle<Code> LoadStubCompiler::CompileLoadNonexistent(Handle<HeapType> type,
1250  Handle<JSObject> last,
1251  Handle<Name> name) {
1252  NonexistentHandlerFrontend(type, last, name);
1253 
1254  // Return undefined if maps of the full prototype chain is still the same.
1255  __ LoadRoot(v0, Heap::kUndefinedValueRootIndex);
1256  __ Ret();
1257 
1258  // Return the generated code.
1259  return GetCode(kind(), Code::FAST, name);
1260 }
1261 
1262 
1263 Register* LoadStubCompiler::registers() {
1264  // receiver, name, scratch1, scratch2, scratch3, scratch4.
1265  static Register registers[] = { a0, a2, a3, a1, t0, t1 };
1266  return registers;
1267 }
1268 
1269 
1270 Register* KeyedLoadStubCompiler::registers() {
1271  // receiver, name, scratch1, scratch2, scratch3, scratch4.
1272  static Register registers[] = { a1, a0, a2, a3, t0, t1 };
1273  return registers;
1274 }
1275 
1276 
1277 Register StoreStubCompiler::value() {
1278  return a0;
1279 }
1280 
1281 
1282 Register* StoreStubCompiler::registers() {
1283  // receiver, name, scratch1, scratch2, scratch3.
1284  static Register registers[] = { a1, a2, a3, t0, t1 };
1285  return registers;
1286 }
1287 
1288 
1289 Register* KeyedStoreStubCompiler::registers() {
1290  // receiver, name, scratch1, scratch2, scratch3.
1291  static Register registers[] = { a2, a1, a3, t0, t1 };
1292  return registers;
1293 }
1294 
1295 
1296 #undef __
1297 #define __ ACCESS_MASM(masm)
1298 
1299 
1300 void LoadStubCompiler::GenerateLoadViaGetter(MacroAssembler* masm,
1301  Handle<HeapType> type,
1302  Register receiver,
1303  Handle<JSFunction> getter) {
1304  // ----------- S t a t e -------------
1305  // -- a0 : receiver
1306  // -- a2 : name
1307  // -- ra : return address
1308  // -----------------------------------
1309  {
1310  FrameScope scope(masm, StackFrame::INTERNAL);
1311 
1312  if (!getter.is_null()) {
1313  // Call the JavaScript getter with the receiver on the stack.
1314  if (IC::TypeToMap(*type, masm->isolate())->IsJSGlobalObjectMap()) {
1315  // Swap in the global receiver.
1316  __ lw(receiver,
1319  }
1320  __ push(receiver);
1321  ParameterCount actual(0);
1322  ParameterCount expected(getter);
1323  __ InvokeFunction(getter, expected, actual,
1324  CALL_FUNCTION, NullCallWrapper());
1325  } else {
1326  // If we generate a global code snippet for deoptimization only, remember
1327  // the place to continue after deoptimization.
1328  masm->isolate()->heap()->SetGetterStubDeoptPCOffset(masm->pc_offset());
1329  }
1330 
1331  // Restore context register.
1333  }
1334  __ Ret();
1335 }
1336 
1337 
1338 #undef __
1339 #define __ ACCESS_MASM(masm())
1340 
1341 
1343  Handle<HeapType> type,
1344  Handle<GlobalObject> global,
1345  Handle<PropertyCell> cell,
1346  Handle<Name> name,
1347  bool is_dont_delete) {
1348  Label miss;
1349 
1350  HandlerFrontendHeader(type, receiver(), global, name, &miss);
1351 
1352  // Get the value from the cell.
1353  __ li(a3, Operand(cell));
1354  __ lw(t0, FieldMemOperand(a3, Cell::kValueOffset));
1355 
1356  // Check for deleted property if property can actually be deleted.
1357  if (!is_dont_delete) {
1358  __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
1359  __ Branch(&miss, eq, t0, Operand(at));
1360  }
1361 
1362  Counters* counters = isolate()->counters();
1363  __ IncrementCounter(counters->named_load_global_stub(), 1, a1, a3);
1364  __ Ret(USE_DELAY_SLOT);
1365  __ mov(v0, t0);
1366 
1367  HandlerFrontendFooter(name, &miss);
1368 
1369  // Return the generated code.
1370  return GetCode(kind(), Code::NORMAL, name);
1371 }
1372 
1373 
1375  TypeHandleList* types,
1376  CodeHandleList* handlers,
1377  Handle<Name> name,
1378  Code::StubType type,
1379  IcCheckType check) {
1380  Label miss;
1381 
1382  if (check == PROPERTY &&
1383  (kind() == Code::KEYED_LOAD_IC || kind() == Code::KEYED_STORE_IC)) {
1384  __ Branch(&miss, ne, this->name(), Operand(name));
1385  }
1386 
1387  Label number_case;
1388  Register match = scratch1();
1389  Label* smi_target = IncludesNumberType(types) ? &number_case : &miss;
1390  __ JumpIfSmi(receiver(), smi_target, match); // Reg match is 0 if Smi.
1391 
1392  Register map_reg = scratch2();
1393 
1394  int receiver_count = types->length();
1395  int number_of_handled_maps = 0;
1397  for (int current = 0; current < receiver_count; ++current) {
1398  Handle<HeapType> type = types->at(current);
1399  Handle<Map> map = IC::TypeToMap(*type, isolate());
1400  if (!map->is_deprecated()) {
1401  number_of_handled_maps++;
1402  // Check map and tail call if there's a match.
1403  // Separate compare from branch, to provide path for above JumpIfSmi().
1404  __ Subu(match, map_reg, Operand(map));
1405  if (type->Is(HeapType::Number())) {
1406  ASSERT(!number_case.is_unused());
1407  __ bind(&number_case);
1408  }
1409  __ Jump(handlers->at(current), RelocInfo::CODE_TARGET,
1410  eq, match, Operand(zero_reg));
1411  }
1412  }
1413  ASSERT(number_of_handled_maps != 0);
1414 
1415  __ bind(&miss);
1416  TailCallBuiltin(masm(), MissBuiltin(kind()));
1417 
1418  // Return the generated code.
1419  InlineCacheState state =
1420  number_of_handled_maps > 1 ? POLYMORPHIC : MONOMORPHIC;
1421  return GetICCode(kind(), type, name, state);
1422 }
1423 
1424 
1426  // Prepare tail call to StoreIC_ArrayLength.
1427  __ Push(receiver(), value());
1428 
1429  ExternalReference ref =
1430  ExternalReference(IC_Utility(IC::kStoreIC_ArrayLength),
1431  masm()->isolate());
1432  __ TailCallExternalReference(ref, 2, 1);
1433 }
1434 
1435 
1437  MapHandleList* receiver_maps,
1438  CodeHandleList* handler_stubs,
1439  MapHandleList* transitioned_maps) {
1440  Label miss;
1441  __ JumpIfSmi(receiver(), &miss);
1442 
1443  int receiver_count = receiver_maps->length();
1445  for (int i = 0; i < receiver_count; ++i) {
1446  if (transitioned_maps->at(i).is_null()) {
1447  __ Jump(handler_stubs->at(i), RelocInfo::CODE_TARGET, eq,
1448  scratch1(), Operand(receiver_maps->at(i)));
1449  } else {
1450  Label next_map;
1451  __ Branch(&next_map, ne, scratch1(), Operand(receiver_maps->at(i)));
1452  __ li(transition_map(), Operand(transitioned_maps->at(i)));
1453  __ Jump(handler_stubs->at(i), RelocInfo::CODE_TARGET);
1454  __ bind(&next_map);
1455  }
1456  }
1457 
1458  __ bind(&miss);
1459  TailCallBuiltin(masm(), MissBuiltin(kind()));
1460 
1461  // Return the generated code.
1462  return GetICCode(
1463  kind(), Code::NORMAL, factory()->empty_string(), POLYMORPHIC);
1464 }
1465 
1466 
1467 #undef __
1468 #define __ ACCESS_MASM(masm)
1469 
1470 
1472  MacroAssembler* masm) {
1473  // ---------- S t a t e --------------
1474  // -- ra : return address
1475  // -- a0 : key
1476  // -- a1 : receiver
1477  // -----------------------------------
1478  Label slow, miss;
1479 
1480  Register key = a0;
1481  Register receiver = a1;
1482 
1483  __ JumpIfNotSmi(key, &miss);
1484  __ lw(t0, FieldMemOperand(receiver, JSObject::kElementsOffset));
1485  __ sra(a2, a0, kSmiTagSize);
1486  __ LoadFromNumberDictionary(&slow, t0, a0, v0, a2, a3, t1);
1487  __ Ret();
1488 
1489  // Slow case, key and receiver still in a0 and a1.
1490  __ bind(&slow);
1491  __ IncrementCounter(
1492  masm->isolate()->counters()->keyed_load_external_array_slow(),
1493  1, a2, a3);
1494  // Entry registers are intact.
1495  // ---------- S t a t e --------------
1496  // -- ra : return address
1497  // -- a0 : key
1498  // -- a1 : receiver
1499  // -----------------------------------
1500  TailCallBuiltin(masm, Builtins::kKeyedLoadIC_Slow);
1501 
1502  // Miss case, call the runtime.
1503  __ bind(&miss);
1504 
1505  // ---------- S t a t e --------------
1506  // -- ra : return address
1507  // -- a0 : key
1508  // -- a1 : receiver
1509  // -----------------------------------
1510  TailCallBuiltin(masm, Builtins::kKeyedLoadIC_Miss);
1511 }
1512 
1513 
1514 #undef __
1515 
1516 } } // namespace v8::internal
1517 
1518 #endif // V8_TARGET_ARCH_MIPS
byte * Address
Definition: globals.h:186
const FPURegister f4
virtual void HandlerFrontendFooter(Handle< Name > name, Label *miss)
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter NULL
Definition: flags.cc:269
void GenerateRestoreName(MacroAssembler *masm, Label *label, Handle< Name > name)
static const int kHashFieldOffset
Definition: objects.h:8629
static const int kBitFieldOffset
Definition: objects.h:6461
static const int kPrototypeOrInitialMapOffset
Definition: objects.h:7519
static const int kValueOffset
Definition: objects.h:9547
static int SlotOffset(int index)
Definition: contexts.h:498
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf map
Definition: flags.cc:350
static ExecutableAccessorInfo * cast(Object *obj)
void GenerateProbe(MacroAssembler *masm, Code::Flags flags, Register receiver, Register name, Register scratch, Register extra, Register extra2=no_reg, Register extra3=no_reg)
const Register cp
static const int kFlagsOffset
Definition: objects.h:5592
#define LOG(isolate, Call)
Definition: log.h:86
Handle< Code > CompileStoreInterceptor(Handle< JSObject > object, Handle< Name > name)
static Handle< String > cast(Handle< S > that)
Definition: handles.h:75
static const int kGlobalReceiverOffset
Definition: objects.h:7613
void GenerateLoadField(Register reg, Handle< JSObject > holder, PropertyIndex field, Representation representation)
static StubType ExtractTypeFromFlags(Flags flags)
Definition: objects-inl.h:4646
TypeImpl< ZoneTypeConfig > Type
static const int kInterceptorArgsLength
Definition: stub-cache.h:207
static const int kInterceptorArgsNameIndex
Definition: stub-cache.h:203
static const int kHasNamedInterceptor
Definition: objects.h:6470
static const int kIsAccessCheckNeeded
Definition: objects.h:6474
uint32_t Flags
Definition: objects.h:5184
List< Handle< Map > > MapHandleList
Definition: list.h:218
#define ASSERT(condition)
Definition: checks.h:329
static const int kContextOffset
Definition: frames.h:185
const int kPointerSizeLog2
Definition: globals.h:281
Handle< Code > CompileStoreCallback(Handle< JSObject > object, Handle< JSObject > holder, Handle< Name > name, Handle< ExecutableAccessorInfo > callback)
virtual Register HandlerFrontendHeader(Handle< HeapType > type, Register object_reg, Handle< JSObject > holder, Handle< Name > name, Label *miss)
Definition: stub-cache.cc:790
virtual void HandlerFrontendFooter(Handle< Name > name, Label *miss)
const Register sp
#define UNREACHABLE()
Definition: checks.h:52
void GenerateLoadPostInterceptor(Register reg, Handle< JSObject > interceptor_holder, Handle< Name > name, LookupResult *lookup)
Definition: stub-cache.cc:983
STATIC_ASSERT(sizeof(CPURegister)==sizeof(Register))
static const int kValueOffset
Definition: objects.h:1971
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long expose natives in global object expose freeBuffer extension expose gc extension under the specified name expose externalize string extension number of stack frames to capture disable builtin natives files print name of functions for which code is generated use random jit cookie to mask large constants trace lazy optimization use adaptive optimizations always try to OSR functions trace optimize function deoptimization minimum length for automatic enable preparsing maximum number of optimization attempts before giving up cache prototype transitions trace debugging JSON request response trace out of bounds accesses to external arrays trace_js_array_abuse automatically set the debug break flag when debugger commands are in the queue abort by crashing maximum length of function source code printed in a stack trace max size of the new max size of the old max size of executable always perform global GCs print one trace line following each garbage collection do not print trace line after scavenger collection print statistics of the maximum memory committed for the heap in only print modified registers Don t break for ASM_UNIMPLEMENTED_BREAK macros print stack trace when an illegal exception is thrown randomize hashes to avoid predictable hash Fixed seed to use to hash property Print the time it takes to deserialize the snapshot testing_bool_flag testing_int_flag string flag tmp file in which to serialize heap Print the time it takes to lazily compile hydrogen code stubs concurrent_recompilation concurrent_sweeping Print usage including flags
Definition: flags.cc:665
static void GenerateStoreViaSetter(MacroAssembler *masm, Handle< HeapType > type, Register receiver, Handle< JSFunction > setter)
static Handle< HeapType > CurrentTypeOf(Handle< Object > object, Isolate *isolate)
Definition: ic.cc:676
const int kPointerSize
Definition: globals.h:268
void check(i::Vector< const uint8_t > string)
Handle< Code > CompilePolymorphicIC(TypeHandleList *types, CodeHandleList *handlers, Handle< Name > name, Code::StubType type, IcCheckType check)
const int kHeapObjectTag
Definition: v8.h:5473
void GenerateLoadConstant(Handle< Object > value)
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra code(assertions) for debugging") DEFINE_bool(code_comments
#define __
static const int kInterceptorArgsThisIndex
Definition: stub-cache.h:205
static Handle< PropertyCell > EnsurePropertyCell(Handle< JSGlobalObject > global, Handle< Name > name)
Definition: objects.cc:14752
List< Handle< HeapType > > TypeHandleList
Definition: list.h:219
static const int kPropertiesOffset
Definition: objects.h:2755
static const int kReturnValueDefaultValueIndex
Definition: arguments.h:179
void GenerateNegativeHolderLookup(MacroAssembler *masm, Handle< JSObject > holder, Register holder_reg, Handle< Name > name, Label *miss)
static const int kElementsOffset
Definition: objects.h:2756
Handle< Code > CompileLoadNonexistent(Handle< HeapType > type, Handle< JSObject > last, Handle< Name > name)
Handle< Code > CompileLoadGlobal(Handle< HeapType > type, Handle< GlobalObject > holder, Handle< PropertyCell > cell, Handle< Name > name, bool is_dont_delete)
static void GenerateLoadDictionaryElement(MacroAssembler *masm)
void GenerateLoadInterceptor(Register holder_reg, Handle< Object > object, Handle< JSObject > holder, LookupResult *lookup, Handle< Name > name)
static const int kLengthOffset
Definition: objects.h:10076
static void GenerateNegativeLookup(MacroAssembler *masm, Label *miss, Label *done, Register receiver, Register properties, Handle< Name > name, Register scratch0)
static const int kInterceptorArgsInfoIndex
Definition: stub-cache.h:204
static const int kHeaderSize
Definition: objects.h:3016
void GenerateLoadCallback(Register reg, Handle< ExecutableAccessorInfo > callback)
static Builtins::Name MissBuiltin(Code::Kind kind)
Definition: stub-cache.h:466
static const int kMapOffset
Definition: objects.h:1890
bool is(Register reg) const
List< Handle< Code > > CodeHandleList
Definition: list.h:220
Register CallbackHandlerFrontend(Handle< HeapType > type, Register object_reg, Handle< JSObject > holder, Handle< Name > name, Handle< Object > callback)
Handle< T > handle(T *t, Isolate *isolate)
Definition: handles.h:103
MemOperand FieldMemOperand(Register object, int offset)
static const int kDataOffset
Definition: objects.h:10433
void GenerateStoreField(MacroAssembler *masm, Handle< JSObject > object, LookupResult *lookup, Register receiver_reg, Register name_reg, Register value_reg, Register scratch1, Register scratch2, Label *miss_label)
friend class Isolate
Definition: stub-cache.h:280
const int kSmiTagSize
Definition: v8.h:5479
static const int kHeaderSize
Definition: objects.h:5604
static Handle< T > null()
Definition: handles.h:80
void NonexistentHandlerFrontend(Handle< HeapType > type, Handle< JSObject > last, Handle< Name > name)
Definition: stub-cache.cc:864
const int kHeapObjectTagSize
Definition: v8.h:5474
const FPURegister f6
static const int kPrototypeOffset
Definition: objects.h:6427
static const int kFlagsNotUsedInLookup
Definition: objects.h:5684
const Register no_reg
Handle< Code > GetCode(Code::Kind kind, Code::StubType type, Handle< Name > name)
Definition: stub-cache.cc:1281
void GenerateStoreTransition(MacroAssembler *masm, Handle< JSObject > object, LookupResult *lookup, Handle< Map > transition, Handle< Name > name, Register receiver_reg, Register name_reg, Register value_reg, Register scratch1, Register scratch2, Register scratch3, Label *miss_label, Label *slow)
const Register fp
static const int kNativeContextOffset
Definition: objects.h:7611
Register HandlerFrontend(Handle< HeapType > type, Register object_reg, Handle< JSObject > holder, Handle< Name > name)
Definition: stub-cache.cc:850
Handle< Code > CompileStorePolymorphic(MapHandleList *receiver_maps, CodeHandleList *handler_stubs, MapHandleList *transitioned_maps)
static void GeneratePositiveLookup(MacroAssembler *masm, Label *miss, Label *done, Register elements, Register name, Register r0, Register r1)
static void GenerateLoadViaGetter(MacroAssembler *masm, Handle< HeapType > type, Register receiver, Handle< JSFunction > getter)
static const int kInterceptorArgsHolderIndex
Definition: stub-cache.h:206
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long expose natives in global object expose freeBuffer extension expose gc extension under the specified name expose externalize string extension number of stack frames to capture disable builtin natives files print name of functions for which code is generated use random jit cookie to mask large constants trace lazy optimization use adaptive optimizations always try to OSR functions trace optimize function deoptimization minimum length for automatic enable preparsing maximum number of optimization attempts before giving up cache prototype transitions trace debugging JSON request response trace out of bounds accesses to external arrays trace_js_array_abuse automatically set the debug break flag when debugger commands are in the queue abort by crashing maximum length of function source code printed in a stack trace max size of the new max size of the old max size of executable always perform global GCs print one trace line following each garbage collection do not print trace line after scavenger collection print statistics of the maximum memory committed for the heap in name
Definition: flags.cc:505
Handle< Code > GetICCode(Code::Kind kind, Code::StubType type, Handle< Name > name, InlineCacheState state=MONOMORPHIC)
Definition: stub-cache.cc:1269
static JSObject * cast(Object *obj)
static const int kInstanceTypeOffset
Definition: objects.h:6459
static Handle< Map > TypeToMap(HeapType *type, Isolate *isolate)
Definition: ic.cc:683
bool IncludesNumberType(TypeHandleList *types)
Definition: stub-cache.cc:842
static JSFunction * cast(Object *obj)