v8  3.25.30(node0.11.13)
V8 is Google's open source JavaScript engine
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
stub-cache-arm.cc
Go to the documentation of this file.
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 
28 #include "v8.h"
29 
30 #if V8_TARGET_ARCH_ARM
31 
32 #include "ic-inl.h"
33 #include "codegen.h"
34 #include "stub-cache.h"
35 
36 namespace v8 {
37 namespace internal {
38 
39 #define __ ACCESS_MASM(masm)
40 
41 
42 static void ProbeTable(Isolate* isolate,
43  MacroAssembler* masm,
45  StubCache::Table table,
46  Register receiver,
47  Register name,
48  // Number of the cache entry, not scaled.
49  Register offset,
50  Register scratch,
51  Register scratch2,
52  Register offset_scratch) {
53  ExternalReference key_offset(isolate->stub_cache()->key_reference(table));
54  ExternalReference value_offset(isolate->stub_cache()->value_reference(table));
55  ExternalReference map_offset(isolate->stub_cache()->map_reference(table));
56 
57  uint32_t key_off_addr = reinterpret_cast<uint32_t>(key_offset.address());
58  uint32_t value_off_addr = reinterpret_cast<uint32_t>(value_offset.address());
59  uint32_t map_off_addr = reinterpret_cast<uint32_t>(map_offset.address());
60 
61  // Check the relative positions of the address fields.
62  ASSERT(value_off_addr > key_off_addr);
63  ASSERT((value_off_addr - key_off_addr) % 4 == 0);
64  ASSERT((value_off_addr - key_off_addr) < (256 * 4));
65  ASSERT(map_off_addr > key_off_addr);
66  ASSERT((map_off_addr - key_off_addr) % 4 == 0);
67  ASSERT((map_off_addr - key_off_addr) < (256 * 4));
68 
69  Label miss;
70  Register base_addr = scratch;
71  scratch = no_reg;
72 
73  // Multiply by 3 because there are 3 fields per entry (name, code, map).
74  __ add(offset_scratch, offset, Operand(offset, LSL, 1));
75 
76  // Calculate the base address of the entry.
77  __ mov(base_addr, Operand(key_offset));
78  __ add(base_addr, base_addr, Operand(offset_scratch, LSL, kPointerSizeLog2));
79 
80  // Check that the key in the entry matches the name.
81  __ ldr(ip, MemOperand(base_addr, 0));
82  __ cmp(name, ip);
83  __ b(ne, &miss);
84 
85  // Check the map matches.
86  __ ldr(ip, MemOperand(base_addr, map_off_addr - key_off_addr));
87  __ ldr(scratch2, FieldMemOperand(receiver, HeapObject::kMapOffset));
88  __ cmp(ip, scratch2);
89  __ b(ne, &miss);
90 
91  // Get the code entry from the cache.
92  Register code = scratch2;
93  scratch2 = no_reg;
94  __ ldr(code, MemOperand(base_addr, value_off_addr - key_off_addr));
95 
96  // Check that the flags match what we're looking for.
97  Register flags_reg = base_addr;
98  base_addr = no_reg;
99  __ ldr(flags_reg, FieldMemOperand(code, Code::kFlagsOffset));
100  // It's a nice optimization if this constant is encodable in the bic insn.
101 
102  uint32_t mask = Code::kFlagsNotUsedInLookup;
103  ASSERT(__ ImmediateFitsAddrMode1Instruction(mask));
104  __ bic(flags_reg, flags_reg, Operand(mask));
105  __ cmp(flags_reg, Operand(flags));
106  __ b(ne, &miss);
107 
108 #ifdef DEBUG
109  if (FLAG_test_secondary_stub_cache && table == StubCache::kPrimary) {
110  __ jmp(&miss);
111  } else if (FLAG_test_primary_stub_cache && table == StubCache::kSecondary) {
112  __ jmp(&miss);
113  }
114 #endif
115 
116  // Jump to the first instruction in the code stub.
117  __ add(pc, code, Operand(Code::kHeaderSize - kHeapObjectTag));
118 
119  // Miss: fall through.
120  __ bind(&miss);
121 }
122 
123 
124 void StubCompiler::GenerateDictionaryNegativeLookup(MacroAssembler* masm,
125  Label* miss_label,
126  Register receiver,
127  Handle<Name> name,
128  Register scratch0,
129  Register scratch1) {
130  ASSERT(name->IsUniqueName());
131  ASSERT(!receiver.is(scratch0));
132  Counters* counters = masm->isolate()->counters();
133  __ IncrementCounter(counters->negative_lookups(), 1, scratch0, scratch1);
134  __ IncrementCounter(counters->negative_lookups_miss(), 1, scratch0, scratch1);
135 
136  Label done;
137 
138  const int kInterceptorOrAccessCheckNeededMask =
140 
141  // Bail out if the receiver has a named interceptor or requires access checks.
142  Register map = scratch1;
143  __ ldr(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
144  __ ldrb(scratch0, FieldMemOperand(map, Map::kBitFieldOffset));
145  __ tst(scratch0, Operand(kInterceptorOrAccessCheckNeededMask));
146  __ b(ne, miss_label);
147 
148  // Check that receiver is a JSObject.
149  __ ldrb(scratch0, FieldMemOperand(map, Map::kInstanceTypeOffset));
150  __ cmp(scratch0, Operand(FIRST_SPEC_OBJECT_TYPE));
151  __ b(lt, miss_label);
152 
153  // Load properties array.
154  Register properties = scratch0;
155  __ ldr(properties, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
156  // Check that the properties array is a dictionary.
157  __ ldr(map, FieldMemOperand(properties, HeapObject::kMapOffset));
158  Register tmp = properties;
159  __ LoadRoot(tmp, Heap::kHashTableMapRootIndex);
160  __ cmp(map, tmp);
161  __ b(ne, miss_label);
162 
163  // Restore the temporarily used register.
164  __ ldr(properties, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
165 
166 
168  miss_label,
169  &done,
170  receiver,
171  properties,
172  name,
173  scratch1);
174  __ bind(&done);
175  __ DecrementCounter(counters->negative_lookups_miss(), 1, scratch0, scratch1);
176 }
177 
178 
179 void StubCache::GenerateProbe(MacroAssembler* masm,
180  Code::Flags flags,
181  Register receiver,
182  Register name,
183  Register scratch,
184  Register extra,
185  Register extra2,
186  Register extra3) {
187  Isolate* isolate = masm->isolate();
188  Label miss;
189 
190  // Make sure that code is valid. The multiplying code relies on the
191  // entry size being 12.
192  ASSERT(sizeof(Entry) == 12);
193 
194  // Make sure the flags does not name a specific type.
195  ASSERT(Code::ExtractTypeFromFlags(flags) == 0);
196 
197  // Make sure that there are no register conflicts.
198  ASSERT(!scratch.is(receiver));
199  ASSERT(!scratch.is(name));
200  ASSERT(!extra.is(receiver));
201  ASSERT(!extra.is(name));
202  ASSERT(!extra.is(scratch));
203  ASSERT(!extra2.is(receiver));
204  ASSERT(!extra2.is(name));
205  ASSERT(!extra2.is(scratch));
206  ASSERT(!extra2.is(extra));
207 
208  // Check scratch, extra and extra2 registers are valid.
209  ASSERT(!scratch.is(no_reg));
210  ASSERT(!extra.is(no_reg));
211  ASSERT(!extra2.is(no_reg));
212  ASSERT(!extra3.is(no_reg));
213 
214  Counters* counters = masm->isolate()->counters();
215  __ IncrementCounter(counters->megamorphic_stub_cache_probes(), 1,
216  extra2, extra3);
217 
218  // Check that the receiver isn't a smi.
219  __ JumpIfSmi(receiver, &miss);
220 
221  // Get the map of the receiver and compute the hash.
222  __ ldr(scratch, FieldMemOperand(name, Name::kHashFieldOffset));
223  __ ldr(ip, FieldMemOperand(receiver, HeapObject::kMapOffset));
224  __ add(scratch, scratch, Operand(ip));
225  uint32_t mask = kPrimaryTableSize - 1;
226  // We shift out the last two bits because they are not part of the hash and
227  // they are always 01 for maps.
228  __ mov(scratch, Operand(scratch, LSR, kHeapObjectTagSize));
229  // Mask down the eor argument to the minimum to keep the immediate
230  // ARM-encodable.
231  __ eor(scratch, scratch, Operand((flags >> kHeapObjectTagSize) & mask));
232  // Prefer and_ to ubfx here because ubfx takes 2 cycles.
233  __ and_(scratch, scratch, Operand(mask));
234 
235  // Probe the primary table.
236  ProbeTable(isolate,
237  masm,
238  flags,
239  kPrimary,
240  receiver,
241  name,
242  scratch,
243  extra,
244  extra2,
245  extra3);
246 
247  // Primary miss: Compute hash for secondary probe.
248  __ sub(scratch, scratch, Operand(name, LSR, kHeapObjectTagSize));
249  uint32_t mask2 = kSecondaryTableSize - 1;
250  __ add(scratch, scratch, Operand((flags >> kHeapObjectTagSize) & mask2));
251  __ and_(scratch, scratch, Operand(mask2));
252 
253  // Probe the secondary table.
254  ProbeTable(isolate,
255  masm,
256  flags,
257  kSecondary,
258  receiver,
259  name,
260  scratch,
261  extra,
262  extra2,
263  extra3);
264 
265  // Cache miss: Fall-through and let caller handle the miss by
266  // entering the runtime system.
267  __ bind(&miss);
268  __ IncrementCounter(counters->megamorphic_stub_cache_misses(), 1,
269  extra2, extra3);
270 }
271 
272 
273 void StubCompiler::GenerateLoadGlobalFunctionPrototype(MacroAssembler* masm,
274  int index,
275  Register prototype) {
276  // Load the global or builtins object from the current context.
277  __ ldr(prototype,
279  // Load the native context from the global or builtins object.
280  __ ldr(prototype,
282  // Load the function from the native context.
283  __ ldr(prototype, MemOperand(prototype, Context::SlotOffset(index)));
284  // Load the initial map. The global functions all have initial maps.
285  __ ldr(prototype,
287  // Load the prototype from the initial map.
288  __ ldr(prototype, FieldMemOperand(prototype, Map::kPrototypeOffset));
289 }
290 
291 
292 void StubCompiler::GenerateDirectLoadGlobalFunctionPrototype(
293  MacroAssembler* masm,
294  int index,
295  Register prototype,
296  Label* miss) {
297  Isolate* isolate = masm->isolate();
298  // Get the global function with the given index.
299  Handle<JSFunction> function(
300  JSFunction::cast(isolate->native_context()->get(index)));
301 
302  // Check we're still in the same context.
303  Register scratch = prototype;
305  __ ldr(scratch, MemOperand(cp, offset));
306  __ ldr(scratch, FieldMemOperand(scratch, GlobalObject::kNativeContextOffset));
307  __ ldr(scratch, MemOperand(scratch, Context::SlotOffset(index)));
308  __ Move(ip, function);
309  __ cmp(ip, scratch);
310  __ b(ne, miss);
311 
312  // Load its initial map. The global functions all have initial maps.
313  __ Move(prototype, Handle<Map>(function->initial_map()));
314  // Load the prototype from the initial map.
315  __ ldr(prototype, FieldMemOperand(prototype, Map::kPrototypeOffset));
316 }
317 
318 
319 void StubCompiler::GenerateFastPropertyLoad(MacroAssembler* masm,
320  Register dst,
321  Register src,
322  bool inobject,
323  int index,
324  Representation representation) {
325  ASSERT(!representation.IsDouble());
326  int offset = index * kPointerSize;
327  if (!inobject) {
328  // Calculate the offset into the properties array.
329  offset = offset + FixedArray::kHeaderSize;
331  src = dst;
332  }
333  __ ldr(dst, FieldMemOperand(src, offset));
334 }
335 
336 
337 void StubCompiler::GenerateLoadArrayLength(MacroAssembler* masm,
338  Register receiver,
339  Register scratch,
340  Label* miss_label) {
341  // Check that the receiver isn't a smi.
342  __ JumpIfSmi(receiver, miss_label);
343 
344  // Check that the object is a JS array.
345  __ CompareObjectType(receiver, scratch, scratch, JS_ARRAY_TYPE);
346  __ b(ne, miss_label);
347 
348  // Load length directly from the JS array.
349  __ ldr(r0, FieldMemOperand(receiver, JSArray::kLengthOffset));
350  __ Ret();
351 }
352 
353 
354 void StubCompiler::GenerateLoadFunctionPrototype(MacroAssembler* masm,
355  Register receiver,
356  Register scratch1,
357  Register scratch2,
358  Label* miss_label) {
359  __ TryGetFunctionPrototype(receiver, scratch1, scratch2, miss_label);
360  __ mov(r0, scratch1);
361  __ Ret();
362 }
363 
364 
365 // Generate code to check that a global property cell is empty. Create
366 // the property cell at compilation time if no cell exists for the
367 // property.
368 void StubCompiler::GenerateCheckPropertyCell(MacroAssembler* masm,
369  Handle<JSGlobalObject> global,
370  Handle<Name> name,
371  Register scratch,
372  Label* miss) {
373  Handle<Cell> cell = JSGlobalObject::EnsurePropertyCell(global, name);
374  ASSERT(cell->value()->IsTheHole());
375  __ mov(scratch, Operand(cell));
376  __ ldr(scratch, FieldMemOperand(scratch, Cell::kValueOffset));
377  __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
378  __ cmp(scratch, ip);
379  __ b(ne, miss);
380 }
381 
382 
384  MacroAssembler* masm,
385  Handle<JSObject> holder,
386  Register holder_reg,
387  Handle<Name> name,
388  Label* miss) {
389  if (holder->IsJSGlobalObject()) {
390  GenerateCheckPropertyCell(
391  masm, Handle<JSGlobalObject>::cast(holder), name, scratch1(), miss);
392  } else if (!holder->HasFastProperties() && !holder->IsJSGlobalProxy()) {
393  GenerateDictionaryNegativeLookup(
394  masm, miss, holder_reg, name, scratch1(), scratch2());
395  }
396 }
397 
398 
399 // Generate StoreTransition code, value is passed in r0 register.
400 // When leaving generated code after success, the receiver_reg and name_reg
401 // may be clobbered. Upon branch to miss_label, the receiver and name
402 // registers have their original values.
403 void StoreStubCompiler::GenerateStoreTransition(MacroAssembler* masm,
404  Handle<JSObject> object,
405  LookupResult* lookup,
406  Handle<Map> transition,
407  Handle<Name> name,
408  Register receiver_reg,
409  Register storage_reg,
410  Register value_reg,
411  Register scratch1,
412  Register scratch2,
413  Register scratch3,
414  Label* miss_label,
415  Label* slow) {
416  // r0 : value
417  Label exit;
418 
419  int descriptor = transition->LastAdded();
420  DescriptorArray* descriptors = transition->instance_descriptors();
421  PropertyDetails details = descriptors->GetDetails(descriptor);
422  Representation representation = details.representation();
423  ASSERT(!representation.IsNone());
424 
425  if (details.type() == CONSTANT) {
426  Handle<Object> constant(descriptors->GetValue(descriptor), masm->isolate());
427  __ Move(scratch1, constant);
428  __ cmp(value_reg, scratch1);
429  __ b(ne, miss_label);
430  } else if (representation.IsSmi()) {
431  __ JumpIfNotSmi(value_reg, miss_label);
432  } else if (representation.IsHeapObject()) {
433  __ JumpIfSmi(value_reg, miss_label);
434  } else if (representation.IsDouble()) {
435  Label do_store, heap_number;
436  __ LoadRoot(scratch3, Heap::kHeapNumberMapRootIndex);
437  __ AllocateHeapNumber(storage_reg, scratch1, scratch2, scratch3, slow);
438 
439  __ JumpIfNotSmi(value_reg, &heap_number);
440  __ SmiUntag(scratch1, value_reg);
441  __ vmov(s0, scratch1);
442  __ vcvt_f64_s32(d0, s0);
443  __ jmp(&do_store);
444 
445  __ bind(&heap_number);
446  __ CheckMap(value_reg, scratch1, Heap::kHeapNumberMapRootIndex,
447  miss_label, DONT_DO_SMI_CHECK);
448  __ vldr(d0, FieldMemOperand(value_reg, HeapNumber::kValueOffset));
449 
450  __ bind(&do_store);
451  __ vstr(d0, FieldMemOperand(storage_reg, HeapNumber::kValueOffset));
452  }
453 
454  // Stub never generated for non-global objects that require access
455  // checks.
456  ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
457 
458  // Perform map transition for the receiver if necessary.
459  if (details.type() == FIELD &&
460  object->map()->unused_property_fields() == 0) {
461  // The properties must be extended before we can store the value.
462  // We jump to a runtime call that extends the properties array.
463  __ push(receiver_reg);
464  __ mov(r2, Operand(transition));
465  __ Push(r2, r0);
466  __ TailCallExternalReference(
467  ExternalReference(IC_Utility(IC::kSharedStoreIC_ExtendStorage),
468  masm->isolate()),
469  3,
470  1);
471  return;
472  }
473 
474  // Update the map of the object.
475  __ mov(scratch1, Operand(transition));
476  __ str(scratch1, FieldMemOperand(receiver_reg, HeapObject::kMapOffset));
477 
478  // Update the write barrier for the map field.
479  __ RecordWriteField(receiver_reg,
481  scratch1,
482  scratch2,
487 
488  if (details.type() == CONSTANT) {
489  ASSERT(value_reg.is(r0));
490  __ Ret();
491  return;
492  }
493 
494  int index = transition->instance_descriptors()->GetFieldIndex(
495  transition->LastAdded());
496 
497  // Adjust for the number of properties stored in the object. Even in the
498  // face of a transition we can use the old map here because the size of the
499  // object and the number of in-object properties is not going to change.
500  index -= object->map()->inobject_properties();
501 
502  // TODO(verwaest): Share this code as a code stub.
503  SmiCheck smi_check = representation.IsTagged()
505  if (index < 0) {
506  // Set the property straight into the object.
507  int offset = object->map()->instance_size() + (index * kPointerSize);
508  if (representation.IsDouble()) {
509  __ str(storage_reg, FieldMemOperand(receiver_reg, offset));
510  } else {
511  __ str(value_reg, FieldMemOperand(receiver_reg, offset));
512  }
513 
514  if (!representation.IsSmi()) {
515  // Update the write barrier for the array address.
516  if (!representation.IsDouble()) {
517  __ mov(storage_reg, value_reg);
518  }
519  __ RecordWriteField(receiver_reg,
520  offset,
521  storage_reg,
522  scratch1,
526  smi_check);
527  }
528  } else {
529  // Write to the properties array.
530  int offset = index * kPointerSize + FixedArray::kHeaderSize;
531  // Get the properties array
532  __ ldr(scratch1,
534  if (representation.IsDouble()) {
535  __ str(storage_reg, FieldMemOperand(scratch1, offset));
536  } else {
537  __ str(value_reg, FieldMemOperand(scratch1, offset));
538  }
539 
540  if (!representation.IsSmi()) {
541  // Update the write barrier for the array address.
542  if (!representation.IsDouble()) {
543  __ mov(storage_reg, value_reg);
544  }
545  __ RecordWriteField(scratch1,
546  offset,
547  storage_reg,
548  receiver_reg,
552  smi_check);
553  }
554  }
555 
556  // Return the value (register r0).
557  ASSERT(value_reg.is(r0));
558  __ bind(&exit);
559  __ Ret();
560 }
561 
562 
563 // Generate StoreField code, value is passed in r0 register.
564 // When leaving generated code after success, the receiver_reg and name_reg
565 // may be clobbered. Upon branch to miss_label, the receiver and name
566 // registers have their original values.
567 void StoreStubCompiler::GenerateStoreField(MacroAssembler* masm,
568  Handle<JSObject> object,
569  LookupResult* lookup,
570  Register receiver_reg,
571  Register name_reg,
572  Register value_reg,
573  Register scratch1,
574  Register scratch2,
575  Label* miss_label) {
576  // r0 : value
577  Label exit;
578 
579  // Stub never generated for non-global objects that require access
580  // checks.
581  ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
582 
583  int index = lookup->GetFieldIndex().field_index();
584 
585  // Adjust for the number of properties stored in the object. Even in the
586  // face of a transition we can use the old map here because the size of the
587  // object and the number of in-object properties is not going to change.
588  index -= object->map()->inobject_properties();
589 
590  Representation representation = lookup->representation();
591  ASSERT(!representation.IsNone());
592  if (representation.IsSmi()) {
593  __ JumpIfNotSmi(value_reg, miss_label);
594  } else if (representation.IsHeapObject()) {
595  __ JumpIfSmi(value_reg, miss_label);
596  } else if (representation.IsDouble()) {
597  // Load the double storage.
598  if (index < 0) {
599  int offset = object->map()->instance_size() + (index * kPointerSize);
600  __ ldr(scratch1, FieldMemOperand(receiver_reg, offset));
601  } else {
602  __ ldr(scratch1,
604  int offset = index * kPointerSize + FixedArray::kHeaderSize;
605  __ ldr(scratch1, FieldMemOperand(scratch1, offset));
606  }
607 
608  // Store the value into the storage.
609  Label do_store, heap_number;
610  __ JumpIfNotSmi(value_reg, &heap_number);
611  __ SmiUntag(scratch2, value_reg);
612  __ vmov(s0, scratch2);
613  __ vcvt_f64_s32(d0, s0);
614  __ jmp(&do_store);
615 
616  __ bind(&heap_number);
617  __ CheckMap(value_reg, scratch2, Heap::kHeapNumberMapRootIndex,
618  miss_label, DONT_DO_SMI_CHECK);
619  __ vldr(d0, FieldMemOperand(value_reg, HeapNumber::kValueOffset));
620 
621  __ bind(&do_store);
622  __ vstr(d0, FieldMemOperand(scratch1, HeapNumber::kValueOffset));
623  // Return the value (register r0).
624  ASSERT(value_reg.is(r0));
625  __ Ret();
626  return;
627  }
628 
629  // TODO(verwaest): Share this code as a code stub.
630  SmiCheck smi_check = representation.IsTagged()
632  if (index < 0) {
633  // Set the property straight into the object.
634  int offset = object->map()->instance_size() + (index * kPointerSize);
635  __ str(value_reg, FieldMemOperand(receiver_reg, offset));
636 
637  if (!representation.IsSmi()) {
638  // Skip updating write barrier if storing a smi.
639  __ JumpIfSmi(value_reg, &exit);
640 
641  // Update the write barrier for the array address.
642  // Pass the now unused name_reg as a scratch register.
643  __ mov(name_reg, value_reg);
644  __ RecordWriteField(receiver_reg,
645  offset,
646  name_reg,
647  scratch1,
651  smi_check);
652  }
653  } else {
654  // Write to the properties array.
655  int offset = index * kPointerSize + FixedArray::kHeaderSize;
656  // Get the properties array
657  __ ldr(scratch1,
659  __ str(value_reg, FieldMemOperand(scratch1, offset));
660 
661  if (!representation.IsSmi()) {
662  // Skip updating write barrier if storing a smi.
663  __ JumpIfSmi(value_reg, &exit);
664 
665  // Update the write barrier for the array address.
666  // Ok to clobber receiver_reg and name_reg, since we return.
667  __ mov(name_reg, value_reg);
668  __ RecordWriteField(scratch1,
669  offset,
670  name_reg,
671  receiver_reg,
675  smi_check);
676  }
677  }
678 
679  // Return the value (register r0).
680  ASSERT(value_reg.is(r0));
681  __ bind(&exit);
682  __ Ret();
683 }
684 
685 
686 void StoreStubCompiler::GenerateRestoreName(MacroAssembler* masm,
687  Label* label,
688  Handle<Name> name) {
689  if (!label->is_unused()) {
690  __ bind(label);
691  __ mov(this->name(), Operand(name));
692  }
693 }
694 
695 
696 static void PushInterceptorArguments(MacroAssembler* masm,
697  Register receiver,
698  Register holder,
699  Register name,
700  Handle<JSObject> holder_obj) {
706  __ push(name);
707  Handle<InterceptorInfo> interceptor(holder_obj->GetNamedInterceptor());
708  ASSERT(!masm->isolate()->heap()->InNewSpace(*interceptor));
709  Register scratch = name;
710  __ mov(scratch, Operand(interceptor));
711  __ push(scratch);
712  __ push(receiver);
713  __ push(holder);
714 }
715 
716 
717 static void CompileCallLoadPropertyWithInterceptor(
718  MacroAssembler* masm,
719  Register receiver,
720  Register holder,
721  Register name,
722  Handle<JSObject> holder_obj,
723  IC::UtilityId id) {
724  PushInterceptorArguments(masm, receiver, holder, name, holder_obj);
725  __ CallExternalReference(
726  ExternalReference(IC_Utility(id), masm->isolate()),
728 }
729 
730 
731 // Generate call to api function.
732 void StubCompiler::GenerateFastApiCall(MacroAssembler* masm,
733  const CallOptimization& optimization,
734  Handle<Map> receiver_map,
735  Register receiver,
736  Register scratch_in,
737  bool is_store,
738  int argc,
739  Register* values) {
740  ASSERT(!receiver.is(scratch_in));
741  __ push(receiver);
742  // Write the arguments to stack frame.
743  for (int i = 0; i < argc; i++) {
744  Register arg = values[argc-1-i];
745  ASSERT(!receiver.is(arg));
746  ASSERT(!scratch_in.is(arg));
747  __ push(arg);
748  }
749  ASSERT(optimization.is_simple_api_call());
750 
751  // Abi for CallApiFunctionStub.
752  Register callee = r0;
753  Register call_data = r4;
754  Register holder = r2;
755  Register api_function_address = r1;
756 
757  // Put holder in place.
758  CallOptimization::HolderLookup holder_lookup;
759  Handle<JSObject> api_holder = optimization.LookupHolderOfExpectedType(
760  receiver_map,
761  &holder_lookup);
762  switch (holder_lookup) {
763  case CallOptimization::kHolderIsReceiver:
764  __ Move(holder, receiver);
765  break;
766  case CallOptimization::kHolderFound:
767  __ Move(holder, api_holder);
768  break;
769  case CallOptimization::kHolderNotFound:
770  UNREACHABLE();
771  break;
772  }
773 
774  Isolate* isolate = masm->isolate();
775  Handle<JSFunction> function = optimization.constant_function();
776  Handle<CallHandlerInfo> api_call_info = optimization.api_call_info();
777  Handle<Object> call_data_obj(api_call_info->data(), isolate);
778 
779  // Put callee in place.
780  __ Move(callee, function);
781 
782  bool call_data_undefined = false;
783  // Put call_data in place.
784  if (isolate->heap()->InNewSpace(*call_data_obj)) {
785  __ Move(call_data, api_call_info);
786  __ ldr(call_data, FieldMemOperand(call_data, CallHandlerInfo::kDataOffset));
787  } else if (call_data_obj->IsUndefined()) {
788  call_data_undefined = true;
789  __ LoadRoot(call_data, Heap::kUndefinedValueRootIndex);
790  } else {
791  __ Move(call_data, call_data_obj);
792  }
793 
794  // Put api_function_address in place.
795  Address function_address = v8::ToCData<Address>(api_call_info->callback());
796  ApiFunction fun(function_address);
797  ExternalReference::Type type = ExternalReference::DIRECT_API_CALL;
798  ExternalReference ref = ExternalReference(&fun,
799  type,
800  masm->isolate());
801  __ mov(api_function_address, Operand(ref));
802 
803  // Jump to stub.
804  CallApiFunctionStub stub(is_store, call_data_undefined, argc);
805  __ TailCallStub(&stub);
806 }
807 
808 
809 void StubCompiler::GenerateTailCall(MacroAssembler* masm, Handle<Code> code) {
810  __ Jump(code, RelocInfo::CODE_TARGET);
811 }
812 
813 
814 #undef __
815 #define __ ACCESS_MASM(masm())
816 
817 
818 Register StubCompiler::CheckPrototypes(Handle<HeapType> type,
819  Register object_reg,
820  Handle<JSObject> holder,
821  Register holder_reg,
822  Register scratch1,
823  Register scratch2,
824  Handle<Name> name,
825  Label* miss,
827  Handle<Map> receiver_map(IC::TypeToMap(*type, isolate()));
828 
829  // Make sure there's no overlap between holder and object registers.
830  ASSERT(!scratch1.is(object_reg) && !scratch1.is(holder_reg));
831  ASSERT(!scratch2.is(object_reg) && !scratch2.is(holder_reg)
832  && !scratch2.is(scratch1));
833 
834  // Keep track of the current object in register reg.
835  Register reg = object_reg;
836  int depth = 0;
837 
838  Handle<JSObject> current = Handle<JSObject>::null();
839  if (type->IsConstant()) current = Handle<JSObject>::cast(type->AsConstant());
840  Handle<JSObject> prototype = Handle<JSObject>::null();
841  Handle<Map> current_map = receiver_map;
842  Handle<Map> holder_map(holder->map());
843  // Traverse the prototype chain and check the maps in the prototype chain for
844  // fast and global objects or do negative lookup for normal objects.
845  while (!current_map.is_identical_to(holder_map)) {
846  ++depth;
847 
848  // Only global objects and objects that do not require access
849  // checks are allowed in stubs.
850  ASSERT(current_map->IsJSGlobalProxyMap() ||
851  !current_map->is_access_check_needed());
852 
853  prototype = handle(JSObject::cast(current_map->prototype()));
854  if (current_map->is_dictionary_map() &&
855  !current_map->IsJSGlobalObjectMap() &&
856  !current_map->IsJSGlobalProxyMap()) {
857  if (!name->IsUniqueName()) {
858  ASSERT(name->IsString());
859  name = factory()->InternalizeString(Handle<String>::cast(name));
860  }
861  ASSERT(current.is_null() ||
862  current->property_dictionary()->FindEntry(*name) ==
864 
865  GenerateDictionaryNegativeLookup(masm(), miss, reg, name,
866  scratch1, scratch2);
867 
868  __ ldr(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset));
869  reg = holder_reg; // From now on the object will be in holder_reg.
870  __ ldr(reg, FieldMemOperand(scratch1, Map::kPrototypeOffset));
871  } else {
872  Register map_reg = scratch1;
873  if (depth != 1 || check == CHECK_ALL_MAPS) {
874  // CheckMap implicitly loads the map of |reg| into |map_reg|.
875  __ CheckMap(reg, map_reg, current_map, miss, DONT_DO_SMI_CHECK);
876  } else {
877  __ ldr(map_reg, FieldMemOperand(reg, HeapObject::kMapOffset));
878  }
879 
880  // Check access rights to the global object. This has to happen after
881  // the map check so that we know that the object is actually a global
882  // object.
883  if (current_map->IsJSGlobalProxyMap()) {
884  __ CheckAccessGlobalProxy(reg, scratch2, miss);
885  } else if (current_map->IsJSGlobalObjectMap()) {
886  GenerateCheckPropertyCell(
887  masm(), Handle<JSGlobalObject>::cast(current), name,
888  scratch2, miss);
889  }
890 
891  reg = holder_reg; // From now on the object will be in holder_reg.
892 
893  if (heap()->InNewSpace(*prototype)) {
894  // The prototype is in new space; we cannot store a reference to it
895  // in the code. Load it from the map.
896  __ ldr(reg, FieldMemOperand(map_reg, Map::kPrototypeOffset));
897  } else {
898  // The prototype is in old space; load it directly.
899  __ mov(reg, Operand(prototype));
900  }
901  }
902 
903  // Go to the next object in the prototype chain.
904  current = prototype;
905  current_map = handle(current->map());
906  }
907 
908  // Log the check depth.
909  LOG(isolate(), IntEvent("check-maps-depth", depth + 1));
910 
911  if (depth != 0 || check == CHECK_ALL_MAPS) {
912  // Check the holder map.
913  __ CheckMap(reg, scratch1, current_map, miss, DONT_DO_SMI_CHECK);
914  }
915 
916  // Perform security check for access to the global object.
917  ASSERT(current_map->IsJSGlobalProxyMap() ||
918  !current_map->is_access_check_needed());
919  if (current_map->IsJSGlobalProxyMap()) {
920  __ CheckAccessGlobalProxy(reg, scratch1, miss);
921  }
922 
923  // Return the register containing the holder.
924  return reg;
925 }
926 
927 
928 void LoadStubCompiler::HandlerFrontendFooter(Handle<Name> name, Label* miss) {
929  if (!miss->is_unused()) {
930  Label success;
931  __ b(&success);
932  __ bind(miss);
933  TailCallBuiltin(masm(), MissBuiltin(kind()));
934  __ bind(&success);
935  }
936 }
937 
938 
939 void StoreStubCompiler::HandlerFrontendFooter(Handle<Name> name, Label* miss) {
940  if (!miss->is_unused()) {
941  Label success;
942  __ b(&success);
943  GenerateRestoreName(masm(), miss, name);
944  TailCallBuiltin(masm(), MissBuiltin(kind()));
945  __ bind(&success);
946  }
947 }
948 
949 
951  Handle<HeapType> type,
952  Register object_reg,
953  Handle<JSObject> holder,
954  Handle<Name> name,
955  Handle<Object> callback) {
956  Label miss;
957 
958  Register reg = HandlerFrontendHeader(type, object_reg, holder, name, &miss);
959 
960  if (!holder->HasFastProperties() && !holder->IsJSGlobalObject()) {
961  ASSERT(!reg.is(scratch2()));
962  ASSERT(!reg.is(scratch3()));
963  ASSERT(!reg.is(scratch4()));
964 
965  // Load the properties dictionary.
966  Register dictionary = scratch4();
967  __ ldr(dictionary, FieldMemOperand(reg, JSObject::kPropertiesOffset));
968 
969  // Probe the dictionary.
970  Label probe_done;
972  &miss,
973  &probe_done,
974  dictionary,
975  this->name(),
976  scratch2(),
977  scratch3());
978  __ bind(&probe_done);
979 
980  // If probing finds an entry in the dictionary, scratch3 contains the
981  // pointer into the dictionary. Check that the value is the callback.
982  Register pointer = scratch3();
983  const int kElementsStartOffset = NameDictionary::kHeaderSize +
985  const int kValueOffset = kElementsStartOffset + kPointerSize;
986  __ ldr(scratch2(), FieldMemOperand(pointer, kValueOffset));
987  __ cmp(scratch2(), Operand(callback));
988  __ b(ne, &miss);
989  }
990 
991  HandlerFrontendFooter(name, &miss);
992  return reg;
993 }
994 
995 
996 void LoadStubCompiler::GenerateLoadField(Register reg,
997  Handle<JSObject> holder,
998  PropertyIndex field,
999  Representation representation) {
1000  if (!reg.is(receiver())) __ mov(receiver(), reg);
1001  if (kind() == Code::LOAD_IC) {
1002  LoadFieldStub stub(field.is_inobject(holder),
1003  field.translate(holder),
1004  representation);
1005  GenerateTailCall(masm(), stub.GetCode(isolate()));
1006  } else {
1007  KeyedLoadFieldStub stub(field.is_inobject(holder),
1008  field.translate(holder),
1009  representation);
1010  GenerateTailCall(masm(), stub.GetCode(isolate()));
1011  }
1012 }
1013 
1014 
1015 void LoadStubCompiler::GenerateLoadConstant(Handle<Object> value) {
1016  // Return the constant value.
1017  __ Move(r0, value);
1018  __ Ret();
1019 }
1020 
1021 
1023  Register reg,
1024  Handle<ExecutableAccessorInfo> callback) {
1025  // Build AccessorInfo::args_ list on the stack and push property name below
1026  // the exit frame to make GC aware of them and store pointers to them.
1034  ASSERT(!scratch2().is(reg));
1035  ASSERT(!scratch3().is(reg));
1036  ASSERT(!scratch4().is(reg));
1037  __ push(receiver());
1038  if (heap()->InNewSpace(callback->data())) {
1039  __ Move(scratch3(), callback);
1042  } else {
1043  __ Move(scratch3(), Handle<Object>(callback->data(), isolate()));
1044  }
1045  __ push(scratch3());
1046  __ LoadRoot(scratch3(), Heap::kUndefinedValueRootIndex);
1047  __ mov(scratch4(), scratch3());
1048  __ Push(scratch3(), scratch4());
1049  __ mov(scratch4(),
1050  Operand(ExternalReference::isolate_address(isolate())));
1051  __ Push(scratch4(), reg);
1052  __ mov(scratch2(), sp); // scratch2 = PropertyAccessorInfo::args_
1053  __ push(name());
1054 
1055  // Abi for CallApiGetter
1056  Register getter_address_reg = r2;
1057 
1058  Address getter_address = v8::ToCData<Address>(callback->getter());
1059  ApiFunction fun(getter_address);
1060  ExternalReference::Type type = ExternalReference::DIRECT_GETTER_CALL;
1061  ExternalReference ref = ExternalReference(&fun, type, isolate());
1062  __ mov(getter_address_reg, Operand(ref));
1063 
1064  CallApiGetterStub stub;
1065  __ TailCallStub(&stub);
1066 }
1067 
1068 
1070  Register holder_reg,
1071  Handle<Object> object,
1072  Handle<JSObject> interceptor_holder,
1073  LookupResult* lookup,
1074  Handle<Name> name) {
1075  ASSERT(interceptor_holder->HasNamedInterceptor());
1076  ASSERT(!interceptor_holder->GetNamedInterceptor()->getter()->IsUndefined());
1077 
1078  // So far the most popular follow ups for interceptor loads are FIELD
1079  // and CALLBACKS, so inline only them, other cases may be added
1080  // later.
1081  bool compile_followup_inline = false;
1082  if (lookup->IsFound() && lookup->IsCacheable()) {
1083  if (lookup->IsField()) {
1084  compile_followup_inline = true;
1085  } else if (lookup->type() == CALLBACKS &&
1086  lookup->GetCallbackObject()->IsExecutableAccessorInfo()) {
1087  ExecutableAccessorInfo* callback =
1088  ExecutableAccessorInfo::cast(lookup->GetCallbackObject());
1089  compile_followup_inline = callback->getter() != NULL &&
1090  callback->IsCompatibleReceiver(*object);
1091  }
1092  }
1093 
1094  if (compile_followup_inline) {
1095  // Compile the interceptor call, followed by inline code to load the
1096  // property from further up the prototype chain if the call fails.
1097  // Check that the maps haven't changed.
1098  ASSERT(holder_reg.is(receiver()) || holder_reg.is(scratch1()));
1099 
1100  // Preserve the receiver register explicitly whenever it is different from
1101  // the holder and it is needed should the interceptor return without any
1102  // result. The CALLBACKS case needs the receiver to be passed into C++ code,
1103  // the FIELD case might cause a miss during the prototype check.
1104  bool must_perfrom_prototype_check = *interceptor_holder != lookup->holder();
1105  bool must_preserve_receiver_reg = !receiver().is(holder_reg) &&
1106  (lookup->type() == CALLBACKS || must_perfrom_prototype_check);
1107 
1108  // Save necessary data before invoking an interceptor.
1109  // Requires a frame to make GC aware of pushed pointers.
1110  {
1111  FrameAndConstantPoolScope frame_scope(masm(), StackFrame::INTERNAL);
1112  if (must_preserve_receiver_reg) {
1113  __ Push(receiver(), holder_reg, this->name());
1114  } else {
1115  __ Push(holder_reg, this->name());
1116  }
1117  // Invoke an interceptor. Note: map checks from receiver to
1118  // interceptor's holder has been compiled before (see a caller
1119  // of this method.)
1120  CompileCallLoadPropertyWithInterceptor(
1121  masm(), receiver(), holder_reg, this->name(), interceptor_holder,
1122  IC::kLoadPropertyWithInterceptorOnly);
1123 
1124  // Check if interceptor provided a value for property. If it's
1125  // the case, return immediately.
1126  Label interceptor_failed;
1127  __ LoadRoot(scratch1(), Heap::kNoInterceptorResultSentinelRootIndex);
1128  __ cmp(r0, scratch1());
1129  __ b(eq, &interceptor_failed);
1130  frame_scope.GenerateLeaveFrame();
1131  __ Ret();
1132 
1133  __ bind(&interceptor_failed);
1134  __ pop(this->name());
1135  __ pop(holder_reg);
1136  if (must_preserve_receiver_reg) {
1137  __ pop(receiver());
1138  }
1139  // Leave the internal frame.
1140  }
1141 
1142  GenerateLoadPostInterceptor(holder_reg, interceptor_holder, name, lookup);
1143  } else { // !compile_followup_inline
1144  // Call the runtime system to load the interceptor.
1145  // Check that the maps haven't changed.
1146  PushInterceptorArguments(masm(), receiver(), holder_reg,
1147  this->name(), interceptor_holder);
1148 
1149  ExternalReference ref =
1150  ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorForLoad),
1151  isolate());
1152  __ TailCallExternalReference(ref, StubCache::kInterceptorArgsLength, 1);
1153  }
1154 }
1155 
1156 
1157 void StubCompiler::GenerateBooleanCheck(Register object, Label* miss) {
1158  Label success;
1159  // Check that the object is a boolean.
1160  __ LoadRoot(ip, Heap::kTrueValueRootIndex);
1161  __ cmp(object, ip);
1162  __ b(eq, &success);
1163  __ LoadRoot(ip, Heap::kFalseValueRootIndex);
1164  __ cmp(object, ip);
1165  __ b(ne, miss);
1166  __ bind(&success);
1167 }
1168 
1169 
1171  Handle<JSObject> object,
1172  Handle<JSObject> holder,
1173  Handle<Name> name,
1174  Handle<ExecutableAccessorInfo> callback) {
1175  Register holder_reg = HandlerFrontend(
1176  IC::CurrentTypeOf(object, isolate()), receiver(), holder, name);
1177 
1178  // Stub never generated for non-global objects that require access checks.
1179  ASSERT(holder->IsJSGlobalProxy() || !holder->IsAccessCheckNeeded());
1180 
1181  __ push(receiver()); // receiver
1182  __ push(holder_reg);
1183  __ mov(ip, Operand(callback)); // callback info
1184  __ push(ip);
1185  __ mov(ip, Operand(name));
1186  __ Push(ip, value());
1187 
1188  // Do tail-call to the runtime system.
1189  ExternalReference store_callback_property =
1190  ExternalReference(IC_Utility(IC::kStoreCallbackProperty), isolate());
1191  __ TailCallExternalReference(store_callback_property, 5, 1);
1192 
1193  // Return the generated code.
1194  return GetCode(kind(), Code::FAST, name);
1195 }
1196 
1197 
1198 #undef __
1199 #define __ ACCESS_MASM(masm)
1200 
1201 
1203  MacroAssembler* masm,
1204  Handle<HeapType> type,
1205  Register receiver,
1206  Handle<JSFunction> setter) {
1207  // ----------- S t a t e -------------
1208  // -- lr : return address
1209  // -----------------------------------
1210  {
1211  FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
1212 
1213  // Save value register, so we can restore it later.
1214  __ push(value());
1215 
1216  if (!setter.is_null()) {
1217  // Call the JavaScript setter with receiver and value on the stack.
1218  if (IC::TypeToMap(*type, masm->isolate())->IsJSGlobalObjectMap()) {
1219  // Swap in the global receiver.
1220  __ ldr(receiver,
1223  }
1224  __ Push(receiver, value());
1225  ParameterCount actual(1);
1226  ParameterCount expected(setter);
1227  __ InvokeFunction(setter, expected, actual,
1228  CALL_FUNCTION, NullCallWrapper());
1229  } else {
1230  // If we generate a global code snippet for deoptimization only, remember
1231  // the place to continue after deoptimization.
1232  masm->isolate()->heap()->SetSetterStubDeoptPCOffset(masm->pc_offset());
1233  }
1234 
1235  // We have to return the passed value, not the return value of the setter.
1236  __ pop(r0);
1237 
1238  // Restore context register.
1240  }
1241  __ Ret();
1242 }
1243 
1244 
1245 #undef __
1246 #define __ ACCESS_MASM(masm())
1247 
1248 
1250  Handle<JSObject> object,
1251  Handle<Name> name) {
1252  __ Push(receiver(), this->name(), value());
1253 
1254  // Do tail-call to the runtime system.
1255  ExternalReference store_ic_property =
1256  ExternalReference(IC_Utility(IC::kStoreInterceptorProperty), isolate());
1257  __ TailCallExternalReference(store_ic_property, 3, 1);
1258 
1259  // Return the generated code.
1260  return GetCode(kind(), Code::FAST, name);
1261 }
1262 
1263 
1264 Handle<Code> LoadStubCompiler::CompileLoadNonexistent(Handle<HeapType> type,
1265  Handle<JSObject> last,
1266  Handle<Name> name) {
1267  NonexistentHandlerFrontend(type, last, name);
1268 
1269  // Return undefined if maps of the full prototype chain are still the
1270  // same and no global property with this name contains a value.
1271  __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
1272  __ Ret();
1273 
1274  // Return the generated code.
1275  return GetCode(kind(), Code::FAST, name);
1276 }
1277 
1278 
1279 Register* LoadStubCompiler::registers() {
1280  // receiver, name, scratch1, scratch2, scratch3, scratch4.
1281  static Register registers[] = { r0, r2, r3, r1, r4, r5 };
1282  return registers;
1283 }
1284 
1285 
1286 Register* KeyedLoadStubCompiler::registers() {
1287  // receiver, name, scratch1, scratch2, scratch3, scratch4.
1288  static Register registers[] = { r1, r0, r2, r3, r4, r5 };
1289  return registers;
1290 }
1291 
1292 
1293 Register StoreStubCompiler::value() {
1294  return r0;
1295 }
1296 
1297 
1298 Register* StoreStubCompiler::registers() {
1299  // receiver, name, scratch1, scratch2, scratch3.
1300  static Register registers[] = { r1, r2, r3, r4, r5 };
1301  return registers;
1302 }
1303 
1304 
1305 Register* KeyedStoreStubCompiler::registers() {
1306  // receiver, name, scratch1, scratch2, scratch3.
1307  static Register registers[] = { r2, r1, r3, r4, r5 };
1308  return registers;
1309 }
1310 
1311 
1312 #undef __
1313 #define __ ACCESS_MASM(masm)
1314 
1315 
1316 void LoadStubCompiler::GenerateLoadViaGetter(MacroAssembler* masm,
1317  Handle<HeapType> type,
1318  Register receiver,
1319  Handle<JSFunction> getter) {
1320  // ----------- S t a t e -------------
1321  // -- r0 : receiver
1322  // -- r2 : name
1323  // -- lr : return address
1324  // -----------------------------------
1325  {
1326  FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
1327 
1328  if (!getter.is_null()) {
1329  // Call the JavaScript getter with the receiver on the stack.
1330  if (IC::TypeToMap(*type, masm->isolate())->IsJSGlobalObjectMap()) {
1331  // Swap in the global receiver.
1332  __ ldr(receiver,
1335  }
1336  __ push(receiver);
1337  ParameterCount actual(0);
1338  ParameterCount expected(getter);
1339  __ InvokeFunction(getter, expected, actual,
1340  CALL_FUNCTION, NullCallWrapper());
1341  } else {
1342  // If we generate a global code snippet for deoptimization only, remember
1343  // the place to continue after deoptimization.
1344  masm->isolate()->heap()->SetGetterStubDeoptPCOffset(masm->pc_offset());
1345  }
1346 
1347  // Restore context register.
1349  }
1350  __ Ret();
1351 }
1352 
1353 
1354 #undef __
1355 #define __ ACCESS_MASM(masm())
1356 
1357 
1359  Handle<HeapType> type,
1360  Handle<GlobalObject> global,
1361  Handle<PropertyCell> cell,
1362  Handle<Name> name,
1363  bool is_dont_delete) {
1364  Label miss;
1365  HandlerFrontendHeader(type, receiver(), global, name, &miss);
1366 
1367  // Get the value from the cell.
1368  __ mov(r3, Operand(cell));
1370 
1371  // Check for deleted property if property can actually be deleted.
1372  if (!is_dont_delete) {
1373  __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
1374  __ cmp(r4, ip);
1375  __ b(eq, &miss);
1376  }
1377 
1378  Counters* counters = isolate()->counters();
1379  __ IncrementCounter(counters->named_load_global_stub(), 1, r1, r3);
1380  __ mov(r0, r4);
1381  __ Ret();
1382 
1383  HandlerFrontendFooter(name, &miss);
1384 
1385  // Return the generated code.
1386  return GetCode(kind(), Code::NORMAL, name);
1387 }
1388 
1389 
1391  TypeHandleList* types,
1392  CodeHandleList* handlers,
1393  Handle<Name> name,
1394  Code::StubType type,
1395  IcCheckType check) {
1396  Label miss;
1397 
1398  if (check == PROPERTY &&
1399  (kind() == Code::KEYED_LOAD_IC || kind() == Code::KEYED_STORE_IC)) {
1400  __ cmp(this->name(), Operand(name));
1401  __ b(ne, &miss);
1402  }
1403 
1404  Label number_case;
1405  Label* smi_target = IncludesNumberType(types) ? &number_case : &miss;
1406  __ JumpIfSmi(receiver(), smi_target);
1407 
1408  Register map_reg = scratch1();
1409 
1410  int receiver_count = types->length();
1411  int number_of_handled_maps = 0;
1413  for (int current = 0; current < receiver_count; ++current) {
1414  Handle<HeapType> type = types->at(current);
1415  Handle<Map> map = IC::TypeToMap(*type, isolate());
1416  if (!map->is_deprecated()) {
1417  number_of_handled_maps++;
1418  __ mov(ip, Operand(map));
1419  __ cmp(map_reg, ip);
1420  if (type->Is(HeapType::Number())) {
1421  ASSERT(!number_case.is_unused());
1422  __ bind(&number_case);
1423  }
1424  __ Jump(handlers->at(current), RelocInfo::CODE_TARGET, eq);
1425  }
1426  }
1427  ASSERT(number_of_handled_maps != 0);
1428 
1429  __ bind(&miss);
1430  TailCallBuiltin(masm(), MissBuiltin(kind()));
1431 
1432  // Return the generated code.
1433  InlineCacheState state =
1434  number_of_handled_maps > 1 ? POLYMORPHIC : MONOMORPHIC;
1435  return GetICCode(kind(), type, name, state);
1436 }
1437 
1438 
1440  // Prepare tail call to StoreIC_ArrayLength.
1441  __ Push(receiver(), value());
1442 
1443  ExternalReference ref =
1444  ExternalReference(IC_Utility(IC::kStoreIC_ArrayLength),
1445  masm()->isolate());
1446  __ TailCallExternalReference(ref, 2, 1);
1447 }
1448 
1449 
1451  MapHandleList* receiver_maps,
1452  CodeHandleList* handler_stubs,
1453  MapHandleList* transitioned_maps) {
1454  Label miss;
1455  __ JumpIfSmi(receiver(), &miss);
1456 
1457  int receiver_count = receiver_maps->length();
1459  for (int i = 0; i < receiver_count; ++i) {
1460  __ mov(ip, Operand(receiver_maps->at(i)));
1461  __ cmp(scratch1(), ip);
1462  if (transitioned_maps->at(i).is_null()) {
1463  __ Jump(handler_stubs->at(i), RelocInfo::CODE_TARGET, eq);
1464  } else {
1465  Label next_map;
1466  __ b(ne, &next_map);
1467  __ mov(transition_map(), Operand(transitioned_maps->at(i)));
1468  __ Jump(handler_stubs->at(i), RelocInfo::CODE_TARGET, al);
1469  __ bind(&next_map);
1470  }
1471  }
1472 
1473  __ bind(&miss);
1474  TailCallBuiltin(masm(), MissBuiltin(kind()));
1475 
1476  // Return the generated code.
1477  return GetICCode(
1478  kind(), Code::NORMAL, factory()->empty_string(), POLYMORPHIC);
1479 }
1480 
1481 
1482 #undef __
1483 #define __ ACCESS_MASM(masm)
1484 
1485 
1487  MacroAssembler* masm) {
1488  // ---------- S t a t e --------------
1489  // -- lr : return address
1490  // -- r0 : key
1491  // -- r1 : receiver
1492  // -----------------------------------
1493  Label slow, miss;
1494 
1495  Register key = r0;
1496  Register receiver = r1;
1497 
1498  __ UntagAndJumpIfNotSmi(r2, key, &miss);
1500  __ LoadFromNumberDictionary(&slow, r4, key, r0, r2, r3, r5);
1501  __ Ret();
1502 
1503  __ bind(&slow);
1504  __ IncrementCounter(
1505  masm->isolate()->counters()->keyed_load_external_array_slow(),
1506  1, r2, r3);
1507 
1508  // ---------- S t a t e --------------
1509  // -- lr : return address
1510  // -- r0 : key
1511  // -- r1 : receiver
1512  // -----------------------------------
1513  TailCallBuiltin(masm, Builtins::kKeyedLoadIC_Slow);
1514 
1515  // Miss case, call the runtime.
1516  __ bind(&miss);
1517 
1518  // ---------- S t a t e --------------
1519  // -- lr : return address
1520  // -- r0 : key
1521  // -- r1 : receiver
1522  // -----------------------------------
1523  TailCallBuiltin(masm, Builtins::kKeyedLoadIC_Miss);
1524 }
1525 
1526 
1527 #undef __
1528 
1529 } } // namespace v8::internal
1530 
1531 #endif // V8_TARGET_ARCH_ARM
byte * Address
Definition: globals.h:186
virtual void HandlerFrontendFooter(Handle< Name > name, Label *miss)
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter NULL
Definition: flags.cc:269
void GenerateRestoreName(MacroAssembler *masm, Label *label, Handle< Name > name)
static const int kHashFieldOffset
Definition: objects.h:8629
static const int kBitFieldOffset
Definition: objects.h:6461
static const int kPrototypeOrInitialMapOffset
Definition: objects.h:7519
static const int kValueOffset
Definition: objects.h:9547
static int SlotOffset(int index)
Definition: contexts.h:498
const Register r3
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf map
Definition: flags.cc:350
static ExecutableAccessorInfo * cast(Object *obj)
void GenerateProbe(MacroAssembler *masm, Code::Flags flags, Register receiver, Register name, Register scratch, Register extra, Register extra2=no_reg, Register extra3=no_reg)
const Register cp
const LowDwVfpRegister d0
static const int kFlagsOffset
Definition: objects.h:5592
#define LOG(isolate, Call)
Definition: log.h:86
Handle< Code > CompileStoreInterceptor(Handle< JSObject > object, Handle< Name > name)
static Handle< String > cast(Handle< S > that)
Definition: handles.h:75
static const int kGlobalReceiverOffset
Definition: objects.h:7613
void GenerateLoadField(Register reg, Handle< JSObject > holder, PropertyIndex field, Representation representation)
static StubType ExtractTypeFromFlags(Flags flags)
Definition: objects-inl.h:4646
TypeImpl< ZoneTypeConfig > Type
static const int kInterceptorArgsLength
Definition: stub-cache.h:207
static const int kInterceptorArgsNameIndex
Definition: stub-cache.h:203
static const int kHasNamedInterceptor
Definition: objects.h:6470
static const int kIsAccessCheckNeeded
Definition: objects.h:6474
uint32_t Flags
Definition: objects.h:5184
List< Handle< Map > > MapHandleList
Definition: list.h:218
#define ASSERT(condition)
Definition: checks.h:329
static const int kContextOffset
Definition: frames.h:185
const int kPointerSizeLog2
Definition: globals.h:281
Handle< Code > CompileStoreCallback(Handle< JSObject > object, Handle< JSObject > holder, Handle< Name > name, Handle< ExecutableAccessorInfo > callback)
virtual Register HandlerFrontendHeader(Handle< HeapType > type, Register object_reg, Handle< JSObject > holder, Handle< Name > name, Label *miss)
Definition: stub-cache.cc:790
const Register r2
virtual void HandlerFrontendFooter(Handle< Name > name, Label *miss)
const Register sp
#define UNREACHABLE()
Definition: checks.h:52
void GenerateLoadPostInterceptor(Register reg, Handle< JSObject > interceptor_holder, Handle< Name > name, LookupResult *lookup)
Definition: stub-cache.cc:983
STATIC_ASSERT(sizeof(CPURegister)==sizeof(Register))
static const int kValueOffset
Definition: objects.h:1971
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long expose natives in global object expose freeBuffer extension expose gc extension under the specified name expose externalize string extension number of stack frames to capture disable builtin natives files print name of functions for which code is generated use random jit cookie to mask large constants trace lazy optimization use adaptive optimizations always try to OSR functions trace optimize function deoptimization minimum length for automatic enable preparsing maximum number of optimization attempts before giving up cache prototype transitions trace debugging JSON request response trace out of bounds accesses to external arrays trace_js_array_abuse automatically set the debug break flag when debugger commands are in the queue abort by crashing maximum length of function source code printed in a stack trace max size of the new max size of the old max size of executable always perform global GCs print one trace line following each garbage collection do not print trace line after scavenger collection print statistics of the maximum memory committed for the heap in only print modified registers Don t break for ASM_UNIMPLEMENTED_BREAK macros print stack trace when an illegal exception is thrown randomize hashes to avoid predictable hash Fixed seed to use to hash property Print the time it takes to deserialize the snapshot testing_bool_flag testing_int_flag string flag tmp file in which to serialize heap Print the time it takes to lazily compile hydrogen code stubs concurrent_recompilation concurrent_sweeping Print usage including flags
Definition: flags.cc:665
static void GenerateStoreViaSetter(MacroAssembler *masm, Handle< HeapType > type, Register receiver, Handle< JSFunction > setter)
static Handle< HeapType > CurrentTypeOf(Handle< Object > object, Isolate *isolate)
Definition: ic.cc:676
const Register ip
const int kPointerSize
Definition: globals.h:268
void check(i::Vector< const uint8_t > string)
Handle< Code > CompilePolymorphicIC(TypeHandleList *types, CodeHandleList *handlers, Handle< Name > name, Code::StubType type, IcCheckType check)
const int kHeapObjectTag
Definition: v8.h:5473
void GenerateLoadConstant(Handle< Object > value)
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra code(assertions) for debugging") DEFINE_bool(code_comments
#define __
static const int kInterceptorArgsThisIndex
Definition: stub-cache.h:205
static Handle< PropertyCell > EnsurePropertyCell(Handle< JSGlobalObject > global, Handle< Name > name)
Definition: objects.cc:14752
const Register pc
List< Handle< HeapType > > TypeHandleList
Definition: list.h:219
static const int kPropertiesOffset
Definition: objects.h:2755
static const int kReturnValueDefaultValueIndex
Definition: arguments.h:179
const SwVfpRegister s0
void GenerateNegativeHolderLookup(MacroAssembler *masm, Handle< JSObject > holder, Register holder_reg, Handle< Name > name, Label *miss)
const Register r0
static const int kElementsOffset
Definition: objects.h:2756
Handle< Code > CompileLoadNonexistent(Handle< HeapType > type, Handle< JSObject > last, Handle< Name > name)
Handle< Code > CompileLoadGlobal(Handle< HeapType > type, Handle< GlobalObject > holder, Handle< PropertyCell > cell, Handle< Name > name, bool is_dont_delete)
static void GenerateLoadDictionaryElement(MacroAssembler *masm)
void GenerateLoadInterceptor(Register holder_reg, Handle< Object > object, Handle< JSObject > holder, LookupResult *lookup, Handle< Name > name)
static const int kLengthOffset
Definition: objects.h:10076
static void GenerateNegativeLookup(MacroAssembler *masm, Label *miss, Label *done, Register receiver, Register properties, Handle< Name > name, Register scratch0)
static const int kInterceptorArgsInfoIndex
Definition: stub-cache.h:204
static const int kHeaderSize
Definition: objects.h:3016
void GenerateLoadCallback(Register reg, Handle< ExecutableAccessorInfo > callback)
static Builtins::Name MissBuiltin(Code::Kind kind)
Definition: stub-cache.h:466
static const int kMapOffset
Definition: objects.h:1890
bool is(Register reg) const
List< Handle< Code > > CodeHandleList
Definition: list.h:220
const Register r1
Register CallbackHandlerFrontend(Handle< HeapType > type, Register object_reg, Handle< JSObject > holder, Handle< Name > name, Handle< Object > callback)
Handle< T > handle(T *t, Isolate *isolate)
Definition: handles.h:103
MemOperand FieldMemOperand(Register object, int offset)
static const int kDataOffset
Definition: objects.h:10433
void GenerateStoreField(MacroAssembler *masm, Handle< JSObject > object, LookupResult *lookup, Register receiver_reg, Register name_reg, Register value_reg, Register scratch1, Register scratch2, Label *miss_label)
friend class Isolate
Definition: stub-cache.h:280
static const int kHeaderSize
Definition: objects.h:5604
static Handle< T > null()
Definition: handles.h:80
void NonexistentHandlerFrontend(Handle< HeapType > type, Handle< JSObject > last, Handle< Name > name)
Definition: stub-cache.cc:864
const int kHeapObjectTagSize
Definition: v8.h:5474
static const int kPrototypeOffset
Definition: objects.h:6427
static const int kFlagsNotUsedInLookup
Definition: objects.h:5684
const Register no_reg
Handle< Code > GetCode(Code::Kind kind, Code::StubType type, Handle< Name > name)
Definition: stub-cache.cc:1281
void GenerateStoreTransition(MacroAssembler *masm, Handle< JSObject > object, LookupResult *lookup, Handle< Map > transition, Handle< Name > name, Register receiver_reg, Register name_reg, Register value_reg, Register scratch1, Register scratch2, Register scratch3, Label *miss_label, Label *slow)
const Register fp
static const int kNativeContextOffset
Definition: objects.h:7611
Register HandlerFrontend(Handle< HeapType > type, Register object_reg, Handle< JSObject > holder, Handle< Name > name)
Definition: stub-cache.cc:850
Handle< Code > CompileStorePolymorphic(MapHandleList *receiver_maps, CodeHandleList *handler_stubs, MapHandleList *transitioned_maps)
static void GeneratePositiveLookup(MacroAssembler *masm, Label *miss, Label *done, Register elements, Register name, Register r0, Register r1)
static void GenerateLoadViaGetter(MacroAssembler *masm, Handle< HeapType > type, Register receiver, Handle< JSFunction > getter)
static const int kInterceptorArgsHolderIndex
Definition: stub-cache.h:206
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long expose natives in global object expose freeBuffer extension expose gc extension under the specified name expose externalize string extension number of stack frames to capture disable builtin natives files print name of functions for which code is generated use random jit cookie to mask large constants trace lazy optimization use adaptive optimizations always try to OSR functions trace optimize function deoptimization minimum length for automatic enable preparsing maximum number of optimization attempts before giving up cache prototype transitions trace debugging JSON request response trace out of bounds accesses to external arrays trace_js_array_abuse automatically set the debug break flag when debugger commands are in the queue abort by crashing maximum length of function source code printed in a stack trace max size of the new max size of the old max size of executable always perform global GCs print one trace line following each garbage collection do not print trace line after scavenger collection print statistics of the maximum memory committed for the heap in name
Definition: flags.cc:505
Handle< Code > GetICCode(Code::Kind kind, Code::StubType type, Handle< Name > name, InlineCacheState state=MONOMORPHIC)
Definition: stub-cache.cc:1269
static JSObject * cast(Object *obj)
const Register r5
static const int kInstanceTypeOffset
Definition: objects.h:6459
static Handle< Map > TypeToMap(HeapType *type, Isolate *isolate)
Definition: ic.cc:683
const Register r4
bool IncludesNumberType(TypeHandleList *types)
Definition: stub-cache.cc:842
static JSFunction * cast(Object *obj)