v8  3.25.30(node0.11.13)
V8 is Google's open source JavaScript engine
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
stub-cache-arm64.cc
Go to the documentation of this file.
1 // Copyright 2013 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 
28 #include "v8.h"
29 
30 #if V8_TARGET_ARCH_ARM64
31 
32 #include "ic-inl.h"
33 #include "codegen.h"
34 #include "stub-cache.h"
35 
36 namespace v8 {
37 namespace internal {
38 
39 
40 #define __ ACCESS_MASM(masm)
41 
42 
43 void StubCompiler::GenerateDictionaryNegativeLookup(MacroAssembler* masm,
44  Label* miss_label,
45  Register receiver,
46  Handle<Name> name,
47  Register scratch0,
48  Register scratch1) {
49  ASSERT(!AreAliased(receiver, scratch0, scratch1));
50  ASSERT(name->IsUniqueName());
51  Counters* counters = masm->isolate()->counters();
52  __ IncrementCounter(counters->negative_lookups(), 1, scratch0, scratch1);
53  __ IncrementCounter(counters->negative_lookups_miss(), 1, scratch0, scratch1);
54 
55  Label done;
56 
57  const int kInterceptorOrAccessCheckNeededMask =
59 
60  // Bail out if the receiver has a named interceptor or requires access checks.
61  Register map = scratch1;
62  __ Ldr(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
63  __ Ldrb(scratch0, FieldMemOperand(map, Map::kBitFieldOffset));
64  __ Tst(scratch0, kInterceptorOrAccessCheckNeededMask);
65  __ B(ne, miss_label);
66 
67  // Check that receiver is a JSObject.
68  __ Ldrb(scratch0, FieldMemOperand(map, Map::kInstanceTypeOffset));
69  __ Cmp(scratch0, FIRST_SPEC_OBJECT_TYPE);
70  __ B(lt, miss_label);
71 
72  // Load properties array.
73  Register properties = scratch0;
74  __ Ldr(properties, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
75  // Check that the properties array is a dictionary.
76  __ Ldr(map, FieldMemOperand(properties, HeapObject::kMapOffset));
77  __ JumpIfNotRoot(map, Heap::kHashTableMapRootIndex, miss_label);
78 
80  miss_label,
81  &done,
82  receiver,
83  properties,
84  name,
85  scratch1);
86  __ Bind(&done);
87  __ DecrementCounter(counters->negative_lookups_miss(), 1, scratch0, scratch1);
88 }
89 
90 
91 // Probe primary or secondary table.
92 // If the entry is found in the cache, the generated code jump to the first
93 // instruction of the stub in the cache.
94 // If there is a miss the code fall trough.
95 //
96 // 'receiver', 'name' and 'offset' registers are preserved on miss.
97 static void ProbeTable(Isolate* isolate,
98  MacroAssembler* masm,
100  StubCache::Table table,
101  Register receiver,
102  Register name,
103  Register offset,
104  Register scratch,
105  Register scratch2,
106  Register scratch3) {
107  // Some code below relies on the fact that the Entry struct contains
108  // 3 pointers (name, code, map).
109  STATIC_ASSERT(sizeof(StubCache::Entry) == (3 * kPointerSize));
110 
111  ExternalReference key_offset(isolate->stub_cache()->key_reference(table));
112  ExternalReference value_offset(isolate->stub_cache()->value_reference(table));
113  ExternalReference map_offset(isolate->stub_cache()->map_reference(table));
114 
115  uintptr_t key_off_addr = reinterpret_cast<uintptr_t>(key_offset.address());
116  uintptr_t value_off_addr =
117  reinterpret_cast<uintptr_t>(value_offset.address());
118  uintptr_t map_off_addr = reinterpret_cast<uintptr_t>(map_offset.address());
119 
120  Label miss;
121 
122  ASSERT(!AreAliased(name, offset, scratch, scratch2, scratch3));
123 
124  // Multiply by 3 because there are 3 fields per entry.
125  __ Add(scratch3, offset, Operand(offset, LSL, 1));
126 
127  // Calculate the base address of the entry.
128  __ Mov(scratch, key_offset);
129  __ Add(scratch, scratch, Operand(scratch3, LSL, kPointerSizeLog2));
130 
131  // Check that the key in the entry matches the name.
132  __ Ldr(scratch2, MemOperand(scratch));
133  __ Cmp(name, scratch2);
134  __ B(ne, &miss);
135 
136  // Check the map matches.
137  __ Ldr(scratch2, MemOperand(scratch, map_off_addr - key_off_addr));
138  __ Ldr(scratch3, FieldMemOperand(receiver, HeapObject::kMapOffset));
139  __ Cmp(scratch2, scratch3);
140  __ B(ne, &miss);
141 
142  // Get the code entry from the cache.
143  __ Ldr(scratch, MemOperand(scratch, value_off_addr - key_off_addr));
144 
145  // Check that the flags match what we're looking for.
146  __ Ldr(scratch2.W(), FieldMemOperand(scratch, Code::kFlagsOffset));
147  __ Bic(scratch2.W(), scratch2.W(), Code::kFlagsNotUsedInLookup);
148  __ Cmp(scratch2.W(), flags);
149  __ B(ne, &miss);
150 
151 #ifdef DEBUG
152  if (FLAG_test_secondary_stub_cache && table == StubCache::kPrimary) {
153  __ B(&miss);
154  } else if (FLAG_test_primary_stub_cache && table == StubCache::kSecondary) {
155  __ B(&miss);
156  }
157 #endif
158 
159  // Jump to the first instruction in the code stub.
160  __ Add(scratch, scratch, Code::kHeaderSize - kHeapObjectTag);
161  __ Br(scratch);
162 
163  // Miss: fall through.
164  __ Bind(&miss);
165 }
166 
167 
168 void StubCache::GenerateProbe(MacroAssembler* masm,
169  Code::Flags flags,
170  Register receiver,
171  Register name,
172  Register scratch,
173  Register extra,
174  Register extra2,
175  Register extra3) {
176  Isolate* isolate = masm->isolate();
177  Label miss;
178 
179  // Make sure the flags does not name a specific type.
180  ASSERT(Code::ExtractTypeFromFlags(flags) == 0);
181 
182  // Make sure that there are no register conflicts.
183  ASSERT(!AreAliased(receiver, name, scratch, extra, extra2, extra3));
184 
185  // Make sure extra and extra2 registers are valid.
186  ASSERT(!extra.is(no_reg));
187  ASSERT(!extra2.is(no_reg));
188  ASSERT(!extra3.is(no_reg));
189 
190  Counters* counters = masm->isolate()->counters();
191  __ IncrementCounter(counters->megamorphic_stub_cache_probes(), 1,
192  extra2, extra3);
193 
194  // Check that the receiver isn't a smi.
195  __ JumpIfSmi(receiver, &miss);
196 
197  // Compute the hash for primary table.
198  __ Ldr(scratch, FieldMemOperand(name, Name::kHashFieldOffset));
199  __ Ldr(extra, FieldMemOperand(receiver, HeapObject::kMapOffset));
200  __ Add(scratch, scratch, extra);
201  __ Eor(scratch, scratch, flags);
202  // We shift out the last two bits because they are not part of the hash.
203  __ Ubfx(scratch, scratch, kHeapObjectTagSize,
204  CountTrailingZeros(kPrimaryTableSize, 64));
205 
206  // Probe the primary table.
207  ProbeTable(isolate, masm, flags, kPrimary, receiver, name,
208  scratch, extra, extra2, extra3);
209 
210  // Primary miss: Compute hash for secondary table.
211  __ Sub(scratch, scratch, Operand(name, LSR, kHeapObjectTagSize));
212  __ Add(scratch, scratch, flags >> kHeapObjectTagSize);
213  __ And(scratch, scratch, kSecondaryTableSize - 1);
214 
215  // Probe the secondary table.
216  ProbeTable(isolate, masm, flags, kSecondary, receiver, name,
217  scratch, extra, extra2, extra3);
218 
219  // Cache miss: Fall-through and let caller handle the miss by
220  // entering the runtime system.
221  __ Bind(&miss);
222  __ IncrementCounter(counters->megamorphic_stub_cache_misses(), 1,
223  extra2, extra3);
224 }
225 
226 
227 void StubCompiler::GenerateLoadGlobalFunctionPrototype(MacroAssembler* masm,
228  int index,
229  Register prototype) {
230  // Load the global or builtins object from the current context.
231  __ Ldr(prototype, GlobalObjectMemOperand());
232  // Load the native context from the global or builtins object.
233  __ Ldr(prototype,
235  // Load the function from the native context.
236  __ Ldr(prototype, ContextMemOperand(prototype, index));
237  // Load the initial map. The global functions all have initial maps.
238  __ Ldr(prototype,
240  // Load the prototype from the initial map.
241  __ Ldr(prototype, FieldMemOperand(prototype, Map::kPrototypeOffset));
242 }
243 
244 
245 void StubCompiler::GenerateDirectLoadGlobalFunctionPrototype(
246  MacroAssembler* masm,
247  int index,
248  Register prototype,
249  Label* miss) {
250  Isolate* isolate = masm->isolate();
251  // Get the global function with the given index.
252  Handle<JSFunction> function(
253  JSFunction::cast(isolate->native_context()->get(index)));
254 
255  // Check we're still in the same context.
256  Register scratch = prototype;
257  __ Ldr(scratch, GlobalObjectMemOperand());
258  __ Ldr(scratch, FieldMemOperand(scratch, GlobalObject::kNativeContextOffset));
259  __ Ldr(scratch, ContextMemOperand(scratch, index));
260  __ Cmp(scratch, Operand(function));
261  __ B(ne, miss);
262 
263  // Load its initial map. The global functions all have initial maps.
264  __ Mov(prototype, Operand(Handle<Map>(function->initial_map())));
265  // Load the prototype from the initial map.
266  __ Ldr(prototype, FieldMemOperand(prototype, Map::kPrototypeOffset));
267 }
268 
269 
270 void StubCompiler::GenerateFastPropertyLoad(MacroAssembler* masm,
271  Register dst,
272  Register src,
273  bool inobject,
274  int index,
275  Representation representation) {
276  ASSERT(!representation.IsDouble());
277  USE(representation);
278  if (inobject) {
279  int offset = index * kPointerSize;
280  __ Ldr(dst, FieldMemOperand(src, offset));
281  } else {
282  // Calculate the offset into the properties array.
283  int offset = index * kPointerSize + FixedArray::kHeaderSize;
285  __ Ldr(dst, FieldMemOperand(dst, offset));
286  }
287 }
288 
289 
290 void StubCompiler::GenerateLoadArrayLength(MacroAssembler* masm,
291  Register receiver,
292  Register scratch,
293  Label* miss_label) {
294  ASSERT(!AreAliased(receiver, scratch));
295 
296  // Check that the receiver isn't a smi.
297  __ JumpIfSmi(receiver, miss_label);
298 
299  // Check that the object is a JS array.
300  __ JumpIfNotObjectType(receiver, scratch, scratch, JS_ARRAY_TYPE,
301  miss_label);
302 
303  // Load length directly from the JS array.
304  __ Ldr(x0, FieldMemOperand(receiver, JSArray::kLengthOffset));
305  __ Ret();
306 }
307 
308 
309 void StubCompiler::GenerateLoadFunctionPrototype(MacroAssembler* masm,
310  Register receiver,
311  Register scratch1,
312  Register scratch2,
313  Label* miss_label) {
314  __ TryGetFunctionPrototype(receiver, scratch1, scratch2, miss_label);
315  // TryGetFunctionPrototype can't put the result directly in x0 because the
316  // 3 inputs registers can't alias and we call this function from
317  // LoadIC::GenerateFunctionPrototype, where receiver is x0. So we explicitly
318  // move the result in x0.
319  __ Mov(x0, scratch1);
320  __ Ret();
321 }
322 
323 
324 // Generate code to check that a global property cell is empty. Create
325 // the property cell at compilation time if no cell exists for the
326 // property.
327 void StubCompiler::GenerateCheckPropertyCell(MacroAssembler* masm,
328  Handle<JSGlobalObject> global,
329  Handle<Name> name,
330  Register scratch,
331  Label* miss) {
332  Handle<Cell> cell = JSGlobalObject::EnsurePropertyCell(global, name);
333  ASSERT(cell->value()->IsTheHole());
334  __ Mov(scratch, Operand(cell));
335  __ Ldr(scratch, FieldMemOperand(scratch, Cell::kValueOffset));
336  __ JumpIfNotRoot(scratch, Heap::kTheHoleValueRootIndex, miss);
337 }
338 
339 
341  MacroAssembler* masm,
342  Handle<JSObject> holder,
343  Register holder_reg,
344  Handle<Name> name,
345  Label* miss) {
346  if (holder->IsJSGlobalObject()) {
347  GenerateCheckPropertyCell(
348  masm, Handle<JSGlobalObject>::cast(holder), name, scratch1(), miss);
349  } else if (!holder->HasFastProperties() && !holder->IsJSGlobalProxy()) {
350  GenerateDictionaryNegativeLookup(
351  masm, miss, holder_reg, name, scratch1(), scratch2());
352  }
353 }
354 
355 
356 // Generate StoreTransition code, value is passed in x0 register.
357 // When leaving generated code after success, the receiver_reg and storage_reg
358 // may be clobbered. Upon branch to miss_label, the receiver and name registers
359 // have their original values.
360 void StoreStubCompiler::GenerateStoreTransition(MacroAssembler* masm,
361  Handle<JSObject> object,
362  LookupResult* lookup,
363  Handle<Map> transition,
364  Handle<Name> name,
365  Register receiver_reg,
366  Register storage_reg,
367  Register value_reg,
368  Register scratch1,
369  Register scratch2,
370  Register scratch3,
371  Label* miss_label,
372  Label* slow) {
373  Label exit;
374 
375  ASSERT(!AreAliased(receiver_reg, storage_reg, value_reg,
376  scratch1, scratch2, scratch3));
377 
378  // We don't need scratch3.
379  scratch3 = NoReg;
380 
381  int descriptor = transition->LastAdded();
382  DescriptorArray* descriptors = transition->instance_descriptors();
383  PropertyDetails details = descriptors->GetDetails(descriptor);
384  Representation representation = details.representation();
385  ASSERT(!representation.IsNone());
386 
387  if (details.type() == CONSTANT) {
388  Handle<Object> constant(descriptors->GetValue(descriptor), masm->isolate());
389  __ LoadObject(scratch1, constant);
390  __ Cmp(value_reg, scratch1);
391  __ B(ne, miss_label);
392  } else if (representation.IsSmi()) {
393  __ JumpIfNotSmi(value_reg, miss_label);
394  } else if (representation.IsHeapObject()) {
395  __ JumpIfSmi(value_reg, miss_label);
396  } else if (representation.IsDouble()) {
397  UseScratchRegisterScope temps(masm);
398  DoubleRegister temp_double = temps.AcquireD();
399  __ SmiUntagToDouble(temp_double, value_reg, kSpeculativeUntag);
400 
401  Label do_store, heap_number;
402  __ AllocateHeapNumber(storage_reg, slow, scratch1, scratch2);
403 
404  __ JumpIfSmi(value_reg, &do_store);
405 
406  __ CheckMap(value_reg, scratch1, Heap::kHeapNumberMapRootIndex,
407  miss_label, DONT_DO_SMI_CHECK);
408  __ Ldr(temp_double, FieldMemOperand(value_reg, HeapNumber::kValueOffset));
409 
410  __ Bind(&do_store);
411  __ Str(temp_double, FieldMemOperand(storage_reg, HeapNumber::kValueOffset));
412  }
413 
414  // Stub never generated for non-global objects that require access checks.
415  ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
416 
417  // Perform map transition for the receiver if necessary.
418  if ((details.type() == FIELD) &&
419  (object->map()->unused_property_fields() == 0)) {
420  // The properties must be extended before we can store the value.
421  // We jump to a runtime call that extends the properties array.
422  __ Mov(scratch1, Operand(transition));
423  __ Push(receiver_reg, scratch1, value_reg);
424  __ TailCallExternalReference(
425  ExternalReference(IC_Utility(IC::kSharedStoreIC_ExtendStorage),
426  masm->isolate()),
427  3,
428  1);
429  return;
430  }
431 
432  // Update the map of the object.
433  __ Mov(scratch1, Operand(transition));
434  __ Str(scratch1, FieldMemOperand(receiver_reg, HeapObject::kMapOffset));
435 
436  // Update the write barrier for the map field.
437  __ RecordWriteField(receiver_reg,
439  scratch1,
440  scratch2,
445 
446  if (details.type() == CONSTANT) {
447  ASSERT(value_reg.is(x0));
448  __ Ret();
449  return;
450  }
451 
452  int index = transition->instance_descriptors()->GetFieldIndex(
453  transition->LastAdded());
454 
455  // Adjust for the number of properties stored in the object. Even in the
456  // face of a transition we can use the old map here because the size of the
457  // object and the number of in-object properties is not going to change.
458  index -= object->map()->inobject_properties();
459 
460  // TODO(verwaest): Share this code as a code stub.
461  SmiCheck smi_check = representation.IsTagged()
463  Register prop_reg = representation.IsDouble() ? storage_reg : value_reg;
464  if (index < 0) {
465  // Set the property straight into the object.
466  int offset = object->map()->instance_size() + (index * kPointerSize);
467  __ Str(prop_reg, FieldMemOperand(receiver_reg, offset));
468 
469  if (!representation.IsSmi()) {
470  // Update the write barrier for the array address.
471  if (!representation.IsDouble()) {
472  __ Mov(storage_reg, value_reg);
473  }
474  __ RecordWriteField(receiver_reg,
475  offset,
476  storage_reg,
477  scratch1,
481  smi_check);
482  }
483  } else {
484  // Write to the properties array.
485  int offset = index * kPointerSize + FixedArray::kHeaderSize;
486  // Get the properties array
487  __ Ldr(scratch1,
489  __ Str(prop_reg, FieldMemOperand(scratch1, offset));
490 
491  if (!representation.IsSmi()) {
492  // Update the write barrier for the array address.
493  if (!representation.IsDouble()) {
494  __ Mov(storage_reg, value_reg);
495  }
496  __ RecordWriteField(scratch1,
497  offset,
498  storage_reg,
499  receiver_reg,
503  smi_check);
504  }
505  }
506 
507  __ Bind(&exit);
508  // Return the value (register x0).
509  ASSERT(value_reg.is(x0));
510  __ Ret();
511 }
512 
513 
514 // Generate StoreField code, value is passed in x0 register.
515 // When leaving generated code after success, the receiver_reg and name_reg may
516 // be clobbered. Upon branch to miss_label, the receiver and name registers have
517 // their original values.
518 void StoreStubCompiler::GenerateStoreField(MacroAssembler* masm,
519  Handle<JSObject> object,
520  LookupResult* lookup,
521  Register receiver_reg,
522  Register name_reg,
523  Register value_reg,
524  Register scratch1,
525  Register scratch2,
526  Label* miss_label) {
527  // x0 : value
528  Label exit;
529 
530  // Stub never generated for non-global objects that require access
531  // checks.
532  ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
533 
534  int index = lookup->GetFieldIndex().field_index();
535 
536  // Adjust for the number of properties stored in the object. Even in the
537  // face of a transition we can use the old map here because the size of the
538  // object and the number of in-object properties is not going to change.
539  index -= object->map()->inobject_properties();
540 
541  Representation representation = lookup->representation();
542  ASSERT(!representation.IsNone());
543  if (representation.IsSmi()) {
544  __ JumpIfNotSmi(value_reg, miss_label);
545  } else if (representation.IsHeapObject()) {
546  __ JumpIfSmi(value_reg, miss_label);
547  } else if (representation.IsDouble()) {
548  UseScratchRegisterScope temps(masm);
549  DoubleRegister temp_double = temps.AcquireD();
550 
551  __ SmiUntagToDouble(temp_double, value_reg, kSpeculativeUntag);
552 
553  // Load the double storage.
554  if (index < 0) {
555  int offset = (index * kPointerSize) + object->map()->instance_size();
556  __ Ldr(scratch1, FieldMemOperand(receiver_reg, offset));
557  } else {
558  int offset = (index * kPointerSize) + FixedArray::kHeaderSize;
559  __ Ldr(scratch1,
561  __ Ldr(scratch1, FieldMemOperand(scratch1, offset));
562  }
563 
564  // Store the value into the storage.
565  Label do_store, heap_number;
566 
567  __ JumpIfSmi(value_reg, &do_store);
568 
569  __ CheckMap(value_reg, scratch2, Heap::kHeapNumberMapRootIndex,
570  miss_label, DONT_DO_SMI_CHECK);
571  __ Ldr(temp_double, FieldMemOperand(value_reg, HeapNumber::kValueOffset));
572 
573  __ Bind(&do_store);
574  __ Str(temp_double, FieldMemOperand(scratch1, HeapNumber::kValueOffset));
575 
576  // Return the value (register x0).
577  ASSERT(value_reg.is(x0));
578  __ Ret();
579  return;
580  }
581 
582  // TODO(verwaest): Share this code as a code stub.
583  SmiCheck smi_check = representation.IsTagged()
585  if (index < 0) {
586  // Set the property straight into the object.
587  int offset = object->map()->instance_size() + (index * kPointerSize);
588  __ Str(value_reg, FieldMemOperand(receiver_reg, offset));
589 
590  if (!representation.IsSmi()) {
591  // Skip updating write barrier if storing a smi.
592  __ JumpIfSmi(value_reg, &exit);
593 
594  // Update the write barrier for the array address.
595  // Pass the now unused name_reg as a scratch register.
596  __ Mov(name_reg, value_reg);
597  __ RecordWriteField(receiver_reg,
598  offset,
599  name_reg,
600  scratch1,
604  smi_check);
605  }
606  } else {
607  // Write to the properties array.
608  int offset = index * kPointerSize + FixedArray::kHeaderSize;
609  // Get the properties array
610  __ Ldr(scratch1,
612  __ Str(value_reg, FieldMemOperand(scratch1, offset));
613 
614  if (!representation.IsSmi()) {
615  // Skip updating write barrier if storing a smi.
616  __ JumpIfSmi(value_reg, &exit);
617 
618  // Update the write barrier for the array address.
619  // Ok to clobber receiver_reg and name_reg, since we return.
620  __ Mov(name_reg, value_reg);
621  __ RecordWriteField(scratch1,
622  offset,
623  name_reg,
624  receiver_reg,
628  smi_check);
629  }
630  }
631 
632  __ Bind(&exit);
633  // Return the value (register x0).
634  ASSERT(value_reg.is(x0));
635  __ Ret();
636 }
637 
638 
639 void StoreStubCompiler::GenerateRestoreName(MacroAssembler* masm,
640  Label* label,
641  Handle<Name> name) {
642  if (!label->is_unused()) {
643  __ Bind(label);
644  __ Mov(this->name(), Operand(name));
645  }
646 }
647 
648 
649 static void PushInterceptorArguments(MacroAssembler* masm,
650  Register receiver,
651  Register holder,
652  Register name,
653  Handle<JSObject> holder_obj) {
659 
660  __ Push(name);
661  Handle<InterceptorInfo> interceptor(holder_obj->GetNamedInterceptor());
662  ASSERT(!masm->isolate()->heap()->InNewSpace(*interceptor));
663  Register scratch = name;
664  __ Mov(scratch, Operand(interceptor));
665  __ Push(scratch, receiver, holder);
666 }
667 
668 
669 static void CompileCallLoadPropertyWithInterceptor(
670  MacroAssembler* masm,
671  Register receiver,
672  Register holder,
673  Register name,
674  Handle<JSObject> holder_obj,
675  IC::UtilityId id) {
676  PushInterceptorArguments(masm, receiver, holder, name, holder_obj);
677 
678  __ CallExternalReference(
679  ExternalReference(IC_Utility(id), masm->isolate()),
681 }
682 
683 
684 // Generate call to api function.
685 void StubCompiler::GenerateFastApiCall(MacroAssembler* masm,
686  const CallOptimization& optimization,
687  Handle<Map> receiver_map,
688  Register receiver,
689  Register scratch,
690  bool is_store,
691  int argc,
692  Register* values) {
693  ASSERT(!AreAliased(receiver, scratch));
694 
695  MacroAssembler::PushPopQueue queue(masm);
696  queue.Queue(receiver);
697  // Write the arguments to the stack frame.
698  for (int i = 0; i < argc; i++) {
699  Register arg = values[argc-1-i];
700  ASSERT(!AreAliased(receiver, scratch, arg));
701  queue.Queue(arg);
702  }
703  queue.PushQueued();
704 
705  ASSERT(optimization.is_simple_api_call());
706 
707  // Abi for CallApiFunctionStub.
708  Register callee = x0;
709  Register call_data = x4;
710  Register holder = x2;
711  Register api_function_address = x1;
712 
713  // Put holder in place.
714  CallOptimization::HolderLookup holder_lookup;
715  Handle<JSObject> api_holder =
716  optimization.LookupHolderOfExpectedType(receiver_map, &holder_lookup);
717  switch (holder_lookup) {
718  case CallOptimization::kHolderIsReceiver:
719  __ Mov(holder, receiver);
720  break;
721  case CallOptimization::kHolderFound:
722  __ LoadObject(holder, api_holder);
723  break;
724  case CallOptimization::kHolderNotFound:
725  UNREACHABLE();
726  break;
727  }
728 
729  Isolate* isolate = masm->isolate();
730  Handle<JSFunction> function = optimization.constant_function();
731  Handle<CallHandlerInfo> api_call_info = optimization.api_call_info();
732  Handle<Object> call_data_obj(api_call_info->data(), isolate);
733 
734  // Put callee in place.
735  __ LoadObject(callee, function);
736 
737  bool call_data_undefined = false;
738  // Put call_data in place.
739  if (isolate->heap()->InNewSpace(*call_data_obj)) {
740  __ LoadObject(call_data, api_call_info);
741  __ Ldr(call_data, FieldMemOperand(call_data, CallHandlerInfo::kDataOffset));
742  } else if (call_data_obj->IsUndefined()) {
743  call_data_undefined = true;
744  __ LoadRoot(call_data, Heap::kUndefinedValueRootIndex);
745  } else {
746  __ LoadObject(call_data, call_data_obj);
747  }
748 
749  // Put api_function_address in place.
750  Address function_address = v8::ToCData<Address>(api_call_info->callback());
751  ApiFunction fun(function_address);
752  ExternalReference ref = ExternalReference(&fun,
753  ExternalReference::DIRECT_API_CALL,
754  masm->isolate());
755  __ Mov(api_function_address, ref);
756 
757  // Jump to stub.
758  CallApiFunctionStub stub(is_store, call_data_undefined, argc);
759  __ TailCallStub(&stub);
760 }
761 
762 
763 void StubCompiler::GenerateTailCall(MacroAssembler* masm, Handle<Code> code) {
764  __ Jump(code, RelocInfo::CODE_TARGET);
765 }
766 
767 
768 #undef __
769 #define __ ACCESS_MASM(masm())
770 
771 
772 Register StubCompiler::CheckPrototypes(Handle<HeapType> type,
773  Register object_reg,
774  Handle<JSObject> holder,
775  Register holder_reg,
776  Register scratch1,
777  Register scratch2,
778  Handle<Name> name,
779  Label* miss,
781  Handle<Map> receiver_map(IC::TypeToMap(*type, isolate()));
782 
783  // object_reg and holder_reg registers can alias.
784  ASSERT(!AreAliased(object_reg, scratch1, scratch2));
785  ASSERT(!AreAliased(holder_reg, scratch1, scratch2));
786 
787  // Keep track of the current object in register reg.
788  Register reg = object_reg;
789  int depth = 0;
790 
791  Handle<JSObject> current = Handle<JSObject>::null();
792  if (type->IsConstant()) {
793  current = Handle<JSObject>::cast(type->AsConstant());
794  }
795  Handle<JSObject> prototype = Handle<JSObject>::null();
796  Handle<Map> current_map = receiver_map;
797  Handle<Map> holder_map(holder->map());
798  // Traverse the prototype chain and check the maps in the prototype chain for
799  // fast and global objects or do negative lookup for normal objects.
800  while (!current_map.is_identical_to(holder_map)) {
801  ++depth;
802 
803  // Only global objects and objects that do not require access
804  // checks are allowed in stubs.
805  ASSERT(current_map->IsJSGlobalProxyMap() ||
806  !current_map->is_access_check_needed());
807 
808  prototype = handle(JSObject::cast(current_map->prototype()));
809  if (current_map->is_dictionary_map() &&
810  !current_map->IsJSGlobalObjectMap() &&
811  !current_map->IsJSGlobalProxyMap()) {
812  if (!name->IsUniqueName()) {
813  ASSERT(name->IsString());
814  name = factory()->InternalizeString(Handle<String>::cast(name));
815  }
816  ASSERT(current.is_null() ||
817  (current->property_dictionary()->FindEntry(*name) ==
819 
820  GenerateDictionaryNegativeLookup(masm(), miss, reg, name,
821  scratch1, scratch2);
822 
823  __ Ldr(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset));
824  reg = holder_reg; // From now on the object will be in holder_reg.
825  __ Ldr(reg, FieldMemOperand(scratch1, Map::kPrototypeOffset));
826  } else {
827  bool need_map = (depth != 1 || check == CHECK_ALL_MAPS) ||
828  heap()->InNewSpace(*prototype);
829  Register map_reg = NoReg;
830  if (need_map) {
831  map_reg = scratch1;
832  __ Ldr(map_reg, FieldMemOperand(reg, HeapObject::kMapOffset));
833  }
834 
835  if (depth != 1 || check == CHECK_ALL_MAPS) {
836  __ CheckMap(map_reg, current_map, miss, DONT_DO_SMI_CHECK);
837  }
838 
839  // Check access rights to the global object. This has to happen after
840  // the map check so that we know that the object is actually a global
841  // object.
842  if (current_map->IsJSGlobalProxyMap()) {
843  UseScratchRegisterScope temps(masm());
844  __ CheckAccessGlobalProxy(reg, scratch2, temps.AcquireX(), miss);
845  } else if (current_map->IsJSGlobalObjectMap()) {
846  GenerateCheckPropertyCell(
847  masm(), Handle<JSGlobalObject>::cast(current), name,
848  scratch2, miss);
849  }
850 
851  reg = holder_reg; // From now on the object will be in holder_reg.
852 
853  if (heap()->InNewSpace(*prototype)) {
854  // The prototype is in new space; we cannot store a reference to it
855  // in the code. Load it from the map.
856  __ Ldr(reg, FieldMemOperand(map_reg, Map::kPrototypeOffset));
857  } else {
858  // The prototype is in old space; load it directly.
859  __ Mov(reg, Operand(prototype));
860  }
861  }
862 
863  // Go to the next object in the prototype chain.
864  current = prototype;
865  current_map = handle(current->map());
866  }
867 
868  // Log the check depth.
869  LOG(isolate(), IntEvent("check-maps-depth", depth + 1));
870 
871  // Check the holder map.
872  if (depth != 0 || check == CHECK_ALL_MAPS) {
873  // Check the holder map.
874  __ CheckMap(reg, scratch1, current_map, miss, DONT_DO_SMI_CHECK);
875  }
876 
877  // Perform security check for access to the global object.
878  ASSERT(current_map->IsJSGlobalProxyMap() ||
879  !current_map->is_access_check_needed());
880  if (current_map->IsJSGlobalProxyMap()) {
881  __ CheckAccessGlobalProxy(reg, scratch1, scratch2, miss);
882  }
883 
884  // Return the register containing the holder.
885  return reg;
886 }
887 
888 
889 void LoadStubCompiler::HandlerFrontendFooter(Handle<Name> name, Label* miss) {
890  if (!miss->is_unused()) {
891  Label success;
892  __ B(&success);
893 
894  __ Bind(miss);
895  TailCallBuiltin(masm(), MissBuiltin(kind()));
896 
897  __ Bind(&success);
898  }
899 }
900 
901 
902 void StoreStubCompiler::HandlerFrontendFooter(Handle<Name> name, Label* miss) {
903  if (!miss->is_unused()) {
904  Label success;
905  __ B(&success);
906 
907  GenerateRestoreName(masm(), miss, name);
908  TailCallBuiltin(masm(), MissBuiltin(kind()));
909 
910  __ Bind(&success);
911  }
912 }
913 
914 
915 Register LoadStubCompiler::CallbackHandlerFrontend(Handle<HeapType> type,
916  Register object_reg,
917  Handle<JSObject> holder,
918  Handle<Name> name,
919  Handle<Object> callback) {
920  Label miss;
921 
922  Register reg = HandlerFrontendHeader(type, object_reg, holder, name, &miss);
923  // HandlerFrontendHeader can return its result into scratch1() so do not
924  // use it.
925  Register scratch2 = this->scratch2();
926  Register scratch3 = this->scratch3();
927  Register dictionary = this->scratch4();
928  ASSERT(!AreAliased(reg, scratch2, scratch3, dictionary));
929 
930  if (!holder->HasFastProperties() && !holder->IsJSGlobalObject()) {
931  // Load the properties dictionary.
932  __ Ldr(dictionary, FieldMemOperand(reg, JSObject::kPropertiesOffset));
933 
934  // Probe the dictionary.
935  Label probe_done;
937  &miss,
938  &probe_done,
939  dictionary,
940  this->name(),
941  scratch2,
942  scratch3);
943  __ Bind(&probe_done);
944 
945  // If probing finds an entry in the dictionary, scratch3 contains the
946  // pointer into the dictionary. Check that the value is the callback.
947  Register pointer = scratch3;
948  const int kElementsStartOffset = NameDictionary::kHeaderSize +
950  const int kValueOffset = kElementsStartOffset + kPointerSize;
951  __ Ldr(scratch2, FieldMemOperand(pointer, kValueOffset));
952  __ Cmp(scratch2, Operand(callback));
953  __ B(ne, &miss);
954  }
955 
956  HandlerFrontendFooter(name, &miss);
957  return reg;
958 }
959 
960 
961 void LoadStubCompiler::GenerateLoadField(Register reg,
962  Handle<JSObject> holder,
963  PropertyIndex field,
964  Representation representation) {
965  __ Mov(receiver(), reg);
966  if (kind() == Code::LOAD_IC) {
967  LoadFieldStub stub(field.is_inobject(holder),
968  field.translate(holder),
969  representation);
970  GenerateTailCall(masm(), stub.GetCode(isolate()));
971  } else {
972  KeyedLoadFieldStub stub(field.is_inobject(holder),
973  field.translate(holder),
974  representation);
975  GenerateTailCall(masm(), stub.GetCode(isolate()));
976  }
977 }
978 
979 
980 void LoadStubCompiler::GenerateLoadConstant(Handle<Object> value) {
981  // Return the constant value.
982  __ LoadObject(x0, value);
983  __ Ret();
984 }
985 
986 
988  Register reg,
989  Handle<ExecutableAccessorInfo> callback) {
990  ASSERT(!AreAliased(scratch2(), scratch3(), scratch4(), reg));
991 
992  // Build ExecutableAccessorInfo::args_ list on the stack and push property
993  // name below the exit frame to make GC aware of them and store pointers to
994  // them.
1002 
1003  __ Push(receiver());
1004 
1005  if (heap()->InNewSpace(callback->data())) {
1006  __ Mov(scratch3(), Operand(callback));
1009  } else {
1010  __ Mov(scratch3(), Operand(Handle<Object>(callback->data(), isolate())));
1011  }
1012  __ LoadRoot(scratch4(), Heap::kUndefinedValueRootIndex);
1013  __ Mov(scratch2(), Operand(ExternalReference::isolate_address(isolate())));
1014  __ Push(scratch3(), scratch4(), scratch4(), scratch2(), reg, name());
1015 
1016  Register args_addr = scratch2();
1017  __ Add(args_addr, __ StackPointer(), kPointerSize);
1018 
1019  // Stack at this point:
1020  // sp[40] callback data
1021  // sp[32] undefined
1022  // sp[24] undefined
1023  // sp[16] isolate
1024  // args_addr -> sp[8] reg
1025  // sp[0] name
1026 
1027  // Abi for CallApiGetter.
1028  Register getter_address_reg = x2;
1029 
1030  // Set up the call.
1031  Address getter_address = v8::ToCData<Address>(callback->getter());
1032  ApiFunction fun(getter_address);
1033  ExternalReference::Type type = ExternalReference::DIRECT_GETTER_CALL;
1034  ExternalReference ref = ExternalReference(&fun, type, isolate());
1035  __ Mov(getter_address_reg, ref);
1036 
1037  CallApiGetterStub stub;
1038  __ TailCallStub(&stub);
1039 }
1040 
1041 
1043  Register holder_reg,
1044  Handle<Object> object,
1045  Handle<JSObject> interceptor_holder,
1046  LookupResult* lookup,
1047  Handle<Name> name) {
1048  ASSERT(!AreAliased(receiver(), this->name(),
1049  scratch1(), scratch2(), scratch3()));
1050  ASSERT(interceptor_holder->HasNamedInterceptor());
1051  ASSERT(!interceptor_holder->GetNamedInterceptor()->getter()->IsUndefined());
1052 
1053  // So far the most popular follow ups for interceptor loads are FIELD
1054  // and CALLBACKS, so inline only them, other cases may be added later.
1055  bool compile_followup_inline = false;
1056  if (lookup->IsFound() && lookup->IsCacheable()) {
1057  if (lookup->IsField()) {
1058  compile_followup_inline = true;
1059  } else if (lookup->type() == CALLBACKS &&
1060  lookup->GetCallbackObject()->IsExecutableAccessorInfo()) {
1061  ExecutableAccessorInfo* callback =
1062  ExecutableAccessorInfo::cast(lookup->GetCallbackObject());
1063  compile_followup_inline = callback->getter() != NULL &&
1064  callback->IsCompatibleReceiver(*object);
1065  }
1066  }
1067 
1068  if (compile_followup_inline) {
1069  // Compile the interceptor call, followed by inline code to load the
1070  // property from further up the prototype chain if the call fails.
1071  // Check that the maps haven't changed.
1072  ASSERT(holder_reg.is(receiver()) || holder_reg.is(scratch1()));
1073 
1074  // Preserve the receiver register explicitly whenever it is different from
1075  // the holder and it is needed should the interceptor return without any
1076  // result. The CALLBACKS case needs the receiver to be passed into C++ code,
1077  // the FIELD case might cause a miss during the prototype check.
1078  bool must_perfrom_prototype_check = *interceptor_holder != lookup->holder();
1079  bool must_preserve_receiver_reg = !receiver().Is(holder_reg) &&
1080  (lookup->type() == CALLBACKS || must_perfrom_prototype_check);
1081 
1082  // Save necessary data before invoking an interceptor.
1083  // Requires a frame to make GC aware of pushed pointers.
1084  {
1085  FrameScope frame_scope(masm(), StackFrame::INTERNAL);
1086  if (must_preserve_receiver_reg) {
1087  __ Push(receiver(), holder_reg, this->name());
1088  } else {
1089  __ Push(holder_reg, this->name());
1090  }
1091  // Invoke an interceptor. Note: map checks from receiver to
1092  // interceptor's holder has been compiled before (see a caller
1093  // of this method.)
1094  CompileCallLoadPropertyWithInterceptor(
1095  masm(), receiver(), holder_reg, this->name(), interceptor_holder,
1096  IC::kLoadPropertyWithInterceptorOnly);
1097 
1098  // Check if interceptor provided a value for property. If it's
1099  // the case, return immediately.
1100  Label interceptor_failed;
1101  __ JumpIfRoot(x0,
1102  Heap::kNoInterceptorResultSentinelRootIndex,
1103  &interceptor_failed);
1104  frame_scope.GenerateLeaveFrame();
1105  __ Ret();
1106 
1107  __ Bind(&interceptor_failed);
1108  if (must_preserve_receiver_reg) {
1109  __ Pop(this->name(), holder_reg, receiver());
1110  } else {
1111  __ Pop(this->name(), holder_reg);
1112  }
1113  // Leave the internal frame.
1114  }
1115  GenerateLoadPostInterceptor(holder_reg, interceptor_holder, name, lookup);
1116  } else { // !compile_followup_inline
1117  // Call the runtime system to load the interceptor.
1118  // Check that the maps haven't changed.
1119  PushInterceptorArguments(
1120  masm(), receiver(), holder_reg, this->name(), interceptor_holder);
1121 
1122  ExternalReference ref =
1123  ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorForLoad),
1124  isolate());
1125  __ TailCallExternalReference(ref, StubCache::kInterceptorArgsLength, 1);
1126  }
1127 }
1128 
1129 
1130 void StubCompiler::GenerateBooleanCheck(Register object, Label* miss) {
1131  UseScratchRegisterScope temps(masm());
1132  // Check that the object is a boolean.
1133  Register true_root = temps.AcquireX();
1134  Register false_root = temps.AcquireX();
1135  ASSERT(!AreAliased(object, true_root, false_root));
1136  __ LoadTrueFalseRoots(true_root, false_root);
1137  __ Cmp(object, true_root);
1138  __ Ccmp(object, false_root, ZFlag, ne);
1139  __ B(ne, miss);
1140 }
1141 
1142 
1144  Handle<JSObject> object,
1145  Handle<JSObject> holder,
1146  Handle<Name> name,
1147  Handle<ExecutableAccessorInfo> callback) {
1148  ASM_LOCATION("StoreStubCompiler::CompileStoreCallback");
1149  Register holder_reg = HandlerFrontend(
1150  IC::CurrentTypeOf(object, isolate()), receiver(), holder, name);
1151 
1152  // Stub never generated for non-global objects that require access checks.
1153  ASSERT(holder->IsJSGlobalProxy() || !holder->IsAccessCheckNeeded());
1154 
1155  // receiver() and holder_reg can alias.
1156  ASSERT(!AreAliased(receiver(), scratch1(), scratch2(), value()));
1157  ASSERT(!AreAliased(holder_reg, scratch1(), scratch2(), value()));
1158  __ Mov(scratch1(), Operand(callback));
1159  __ Mov(scratch2(), Operand(name));
1160  __ Push(receiver(), holder_reg, scratch1(), scratch2(), value());
1161 
1162  // Do tail-call to the runtime system.
1163  ExternalReference store_callback_property =
1164  ExternalReference(IC_Utility(IC::kStoreCallbackProperty), isolate());
1165  __ TailCallExternalReference(store_callback_property, 5, 1);
1166 
1167  // Return the generated code.
1168  return GetCode(kind(), Code::FAST, name);
1169 }
1170 
1171 
1172 #undef __
1173 #define __ ACCESS_MASM(masm)
1174 
1175 
1177  MacroAssembler* masm,
1178  Handle<HeapType> type,
1179  Register receiver,
1180  Handle<JSFunction> setter) {
1181  // ----------- S t a t e -------------
1182  // -- lr : return address
1183  // -----------------------------------
1184  Label miss;
1185 
1186  {
1187  FrameScope scope(masm, StackFrame::INTERNAL);
1188 
1189  // Save value register, so we can restore it later.
1190  __ Push(value());
1191 
1192  if (!setter.is_null()) {
1193  // Call the JavaScript setter with receiver and value on the stack.
1194  if (IC::TypeToMap(*type, masm->isolate())->IsJSGlobalObjectMap()) {
1195  // Swap in the global receiver.
1196  __ Ldr(receiver,
1199  }
1200  __ Push(receiver, value());
1201  ParameterCount actual(1);
1202  ParameterCount expected(setter);
1203  __ InvokeFunction(setter, expected, actual,
1204  CALL_FUNCTION, NullCallWrapper());
1205  } else {
1206  // If we generate a global code snippet for deoptimization only, remember
1207  // the place to continue after deoptimization.
1208  masm->isolate()->heap()->SetSetterStubDeoptPCOffset(masm->pc_offset());
1209  }
1210 
1211  // We have to return the passed value, not the return value of the setter.
1212  __ Pop(x0);
1213 
1214  // Restore context register.
1216  }
1217  __ Ret();
1218 }
1219 
1220 
1221 #undef __
1222 #define __ ACCESS_MASM(masm())
1223 
1224 
1226  Handle<JSObject> object,
1227  Handle<Name> name) {
1228  Label miss;
1229 
1230  ASM_LOCATION("StoreStubCompiler::CompileStoreInterceptor");
1231 
1232  __ Push(receiver(), this->name(), value());
1233 
1234  // Do tail-call to the runtime system.
1235  ExternalReference store_ic_property =
1236  ExternalReference(IC_Utility(IC::kStoreInterceptorProperty), isolate());
1237  __ TailCallExternalReference(store_ic_property, 3, 1);
1238 
1239  // Return the generated code.
1240  return GetCode(kind(), Code::FAST, name);
1241 }
1242 
1243 
1244 Handle<Code> LoadStubCompiler::CompileLoadNonexistent(Handle<HeapType> type,
1245  Handle<JSObject> last,
1246  Handle<Name> name) {
1247  NonexistentHandlerFrontend(type, last, name);
1248 
1249  // Return undefined if maps of the full prototype chain are still the
1250  // same and no global property with this name contains a value.
1251  __ LoadRoot(x0, Heap::kUndefinedValueRootIndex);
1252  __ Ret();
1253 
1254  // Return the generated code.
1255  return GetCode(kind(), Code::FAST, name);
1256 }
1257 
1258 
1259 // TODO(all): The so-called scratch registers are significant in some cases. For
1260 // example, KeyedStoreStubCompiler::registers()[3] (x3) is actually used for
1261 // KeyedStoreCompiler::transition_map(). We should verify which registers are
1262 // actually scratch registers, and which are important. For now, we use the same
1263 // assignments as ARM to remain on the safe side.
1264 
1265 Register* LoadStubCompiler::registers() {
1266  // receiver, name, scratch1, scratch2, scratch3, scratch4.
1267  static Register registers[] = { x0, x2, x3, x1, x4, x5 };
1268  return registers;
1269 }
1270 
1271 
1272 Register* KeyedLoadStubCompiler::registers() {
1273  // receiver, name/key, scratch1, scratch2, scratch3, scratch4.
1274  static Register registers[] = { x1, x0, x2, x3, x4, x5 };
1275  return registers;
1276 }
1277 
1278 
1279 Register StoreStubCompiler::value() {
1280  return x0;
1281 }
1282 
1283 
1284 Register* StoreStubCompiler::registers() {
1285  // receiver, value, scratch1, scratch2, scratch3.
1286  static Register registers[] = { x1, x2, x3, x4, x5 };
1287  return registers;
1288 }
1289 
1290 
1291 Register* KeyedStoreStubCompiler::registers() {
1292  // receiver, name, scratch1, scratch2, scratch3.
1293  static Register registers[] = { x2, x1, x3, x4, x5 };
1294  return registers;
1295 }
1296 
1297 
1298 #undef __
1299 #define __ ACCESS_MASM(masm)
1300 
1301 void LoadStubCompiler::GenerateLoadViaGetter(MacroAssembler* masm,
1302  Handle<HeapType> type,
1303  Register receiver,
1304  Handle<JSFunction> getter) {
1305  {
1306  FrameScope scope(masm, StackFrame::INTERNAL);
1307 
1308  if (!getter.is_null()) {
1309  // Call the JavaScript getter with the receiver on the stack.
1310  if (IC::TypeToMap(*type, masm->isolate())->IsJSGlobalObjectMap()) {
1311  // Swap in the global receiver.
1312  __ Ldr(receiver,
1315  }
1316  __ Push(receiver);
1317  ParameterCount actual(0);
1318  ParameterCount expected(getter);
1319  __ InvokeFunction(getter, expected, actual,
1320  CALL_FUNCTION, NullCallWrapper());
1321  } else {
1322  // If we generate a global code snippet for deoptimization only, remember
1323  // the place to continue after deoptimization.
1324  masm->isolate()->heap()->SetGetterStubDeoptPCOffset(masm->pc_offset());
1325  }
1326 
1327  // Restore context register.
1329  }
1330  __ Ret();
1331 }
1332 
1333 
1334 #undef __
1335 #define __ ACCESS_MASM(masm())
1336 
1337 
1339  Handle<HeapType> type,
1340  Handle<GlobalObject> global,
1341  Handle<PropertyCell> cell,
1342  Handle<Name> name,
1343  bool is_dont_delete) {
1344  Label miss;
1345  HandlerFrontendHeader(type, receiver(), global, name, &miss);
1346 
1347  // Get the value from the cell.
1348  __ Mov(x3, Operand(cell));
1349  __ Ldr(x4, FieldMemOperand(x3, Cell::kValueOffset));
1350 
1351  // Check for deleted property if property can actually be deleted.
1352  if (!is_dont_delete) {
1353  __ JumpIfRoot(x4, Heap::kTheHoleValueRootIndex, &miss);
1354  }
1355 
1356  Counters* counters = isolate()->counters();
1357  __ IncrementCounter(counters->named_load_global_stub(), 1, x1, x3);
1358  __ Mov(x0, x4);
1359  __ Ret();
1360 
1361  HandlerFrontendFooter(name, &miss);
1362 
1363  // Return the generated code.
1364  return GetCode(kind(), Code::NORMAL, name);
1365 }
1366 
1367 
1369  TypeHandleList* types,
1370  CodeHandleList* handlers,
1371  Handle<Name> name,
1372  Code::StubType type,
1373  IcCheckType check) {
1374  Label miss;
1375 
1376  if (check == PROPERTY &&
1377  (kind() == Code::KEYED_LOAD_IC || kind() == Code::KEYED_STORE_IC)) {
1378  __ CompareAndBranch(this->name(), Operand(name), ne, &miss);
1379  }
1380 
1381  Label number_case;
1382  Label* smi_target = IncludesNumberType(types) ? &number_case : &miss;
1383  __ JumpIfSmi(receiver(), smi_target);
1384 
1385  Register map_reg = scratch1();
1387  int receiver_count = types->length();
1388  int number_of_handled_maps = 0;
1389  for (int current = 0; current < receiver_count; ++current) {
1390  Handle<HeapType> type = types->at(current);
1391  Handle<Map> map = IC::TypeToMap(*type, isolate());
1392  if (!map->is_deprecated()) {
1393  number_of_handled_maps++;
1394  Label try_next;
1395  __ Cmp(map_reg, Operand(map));
1396  __ B(ne, &try_next);
1397  if (type->Is(HeapType::Number())) {
1398  ASSERT(!number_case.is_unused());
1399  __ Bind(&number_case);
1400  }
1401  __ Jump(handlers->at(current), RelocInfo::CODE_TARGET);
1402  __ Bind(&try_next);
1403  }
1404  }
1405  ASSERT(number_of_handled_maps != 0);
1406 
1407  __ Bind(&miss);
1408  TailCallBuiltin(masm(), MissBuiltin(kind()));
1409 
1410  // Return the generated code.
1411  InlineCacheState state =
1412  (number_of_handled_maps > 1) ? POLYMORPHIC : MONOMORPHIC;
1413  return GetICCode(kind(), type, name, state);
1414 }
1415 
1416 
1418  // Prepare tail call to StoreIC_ArrayLength.
1419  __ Push(receiver(), value());
1420 
1421  ExternalReference ref =
1422  ExternalReference(IC_Utility(IC::kStoreIC_ArrayLength),
1423  masm()->isolate());
1424  __ TailCallExternalReference(ref, 2, 1);
1425 }
1426 
1427 
1429  MapHandleList* receiver_maps,
1430  CodeHandleList* handler_stubs,
1431  MapHandleList* transitioned_maps) {
1432  Label miss;
1433 
1434  ASM_LOCATION("KeyedStoreStubCompiler::CompileStorePolymorphic");
1435 
1436  __ JumpIfSmi(receiver(), &miss);
1437 
1438  int receiver_count = receiver_maps->length();
1440  for (int i = 0; i < receiver_count; i++) {
1441  __ Cmp(scratch1(), Operand(receiver_maps->at(i)));
1442 
1443  Label skip;
1444  __ B(&skip, ne);
1445  if (!transitioned_maps->at(i).is_null()) {
1446  // This argument is used by the handler stub. For example, see
1447  // ElementsTransitionGenerator::GenerateMapChangeElementsTransition.
1448  __ Mov(transition_map(), Operand(transitioned_maps->at(i)));
1449  }
1450  __ Jump(handler_stubs->at(i), RelocInfo::CODE_TARGET);
1451  __ Bind(&skip);
1452  }
1453 
1454  __ Bind(&miss);
1455  TailCallBuiltin(masm(), MissBuiltin(kind()));
1456 
1457  return GetICCode(
1458  kind(), Code::NORMAL, factory()->empty_string(), POLYMORPHIC);
1459 }
1460 
1461 
1462 #undef __
1463 #define __ ACCESS_MASM(masm)
1464 
1466  MacroAssembler* masm) {
1467  // ---------- S t a t e --------------
1468  // -- lr : return address
1469  // -- x0 : key
1470  // -- x1 : receiver
1471  // -----------------------------------
1472  Label slow, miss;
1473 
1474  Register result = x0;
1475  Register key = x0;
1476  Register receiver = x1;
1477 
1478  __ JumpIfNotSmi(key, &miss);
1479  __ Ldr(x4, FieldMemOperand(receiver, JSObject::kElementsOffset));
1480  __ LoadFromNumberDictionary(&slow, x4, key, result, x2, x3, x5, x6);
1481  __ Ret();
1482 
1483  __ Bind(&slow);
1484  __ IncrementCounter(
1485  masm->isolate()->counters()->keyed_load_external_array_slow(), 1, x2, x3);
1486  TailCallBuiltin(masm, Builtins::kKeyedLoadIC_Slow);
1487 
1488  // Miss case, call the runtime.
1489  __ Bind(&miss);
1490  TailCallBuiltin(masm, Builtins::kKeyedLoadIC_Miss);
1491 }
1492 
1493 
1494 } } // namespace v8::internal
1495 
1496 #endif // V8_TARGET_ARCH_ARM64
byte * Address
Definition: globals.h:186
virtual void HandlerFrontendFooter(Handle< Name > name, Label *miss)
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter NULL
Definition: flags.cc:269
void GenerateRestoreName(MacroAssembler *masm, Label *label, Handle< Name > name)
static const int kHashFieldOffset
Definition: objects.h:8629
static const int kBitFieldOffset
Definition: objects.h:6461
static const int kPrototypeOrInitialMapOffset
Definition: objects.h:7519
static const int kValueOffset
Definition: objects.h:9547
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf map
Definition: flags.cc:350
static ExecutableAccessorInfo * cast(Object *obj)
void GenerateProbe(MacroAssembler *masm, Code::Flags flags, Register receiver, Register name, Register scratch, Register extra, Register extra2=no_reg, Register extra3=no_reg)
const Register cp
static const int kFlagsOffset
Definition: objects.h:5592
#define ASM_LOCATION(message)
Definition: checks.h:69
#define LOG(isolate, Call)
Definition: log.h:86
Handle< Code > CompileStoreInterceptor(Handle< JSObject > object, Handle< Name > name)
static Handle< T > cast(Handle< S > that)
Definition: handles.h:75
static const int kGlobalReceiverOffset
Definition: objects.h:7613
void GenerateLoadField(Register reg, Handle< JSObject > holder, PropertyIndex field, Representation representation)
static StubType ExtractTypeFromFlags(Flags flags)
Definition: objects-inl.h:4646
TypeImpl< ZoneTypeConfig > Type
static const int kInterceptorArgsLength
Definition: stub-cache.h:207
static const int kInterceptorArgsNameIndex
Definition: stub-cache.h:203
static const int kHasNamedInterceptor
Definition: objects.h:6470
static const int kIsAccessCheckNeeded
Definition: objects.h:6474
uint32_t Flags
Definition: objects.h:5184
List< Handle< Map > > MapHandleList
Definition: list.h:218
#define ASSERT(condition)
Definition: checks.h:329
static const int kContextOffset
Definition: frames.h:185
const int kPointerSizeLog2
Definition: globals.h:281
Handle< Code > CompileStoreCallback(Handle< JSObject > object, Handle< JSObject > holder, Handle< Name > name, Handle< ExecutableAccessorInfo > callback)
virtual Register HandlerFrontendHeader(Handle< HeapType > type, Register object_reg, Handle< JSObject > holder, Handle< Name > name, Label *miss)
Definition: stub-cache.cc:790
virtual void HandlerFrontendFooter(Handle< Name > name, Label *miss)
#define UNREACHABLE()
Definition: checks.h:52
DwVfpRegister DoubleRegister
void GenerateLoadPostInterceptor(Register reg, Handle< JSObject > interceptor_holder, Handle< Name > name, LookupResult *lookup)
Definition: stub-cache.cc:983
STATIC_ASSERT(sizeof(CPURegister)==sizeof(Register))
static const int kValueOffset
Definition: objects.h:1971
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long expose natives in global object expose freeBuffer extension expose gc extension under the specified name expose externalize string extension number of stack frames to capture disable builtin natives files print name of functions for which code is generated use random jit cookie to mask large constants trace lazy optimization use adaptive optimizations always try to OSR functions trace optimize function deoptimization minimum length for automatic enable preparsing maximum number of optimization attempts before giving up cache prototype transitions trace debugging JSON request response trace out of bounds accesses to external arrays trace_js_array_abuse automatically set the debug break flag when debugger commands are in the queue abort by crashing maximum length of function source code printed in a stack trace max size of the new max size of the old max size of executable always perform global GCs print one trace line following each garbage collection do not print trace line after scavenger collection print statistics of the maximum memory committed for the heap in only print modified registers Don t break for ASM_UNIMPLEMENTED_BREAK macros print stack trace when an illegal exception is thrown randomize hashes to avoid predictable hash Fixed seed to use to hash property Print the time it takes to deserialize the snapshot testing_bool_flag testing_int_flag string flag tmp file in which to serialize heap Print the time it takes to lazily compile hydrogen code stubs concurrent_recompilation concurrent_sweeping Print usage including flags
Definition: flags.cc:665
static void GenerateStoreViaSetter(MacroAssembler *masm, Handle< HeapType > type, Register receiver, Handle< JSFunction > setter)
static Handle< HeapType > CurrentTypeOf(Handle< Object > object, Isolate *isolate)
Definition: ic.cc:676
const int kPointerSize
Definition: globals.h:268
void check(i::Vector< const uint8_t > string)
Handle< Code > CompilePolymorphicIC(TypeHandleList *types, CodeHandleList *handlers, Handle< Name > name, Code::StubType type, IcCheckType check)
const int kHeapObjectTag
Definition: v8.h:5473
void GenerateLoadConstant(Handle< Object > value)
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra code(assertions) for debugging") DEFINE_bool(code_comments
#define __
static const int kInterceptorArgsThisIndex
Definition: stub-cache.h:205
static Handle< PropertyCell > EnsurePropertyCell(Handle< JSGlobalObject > global, Handle< Name > name)
Definition: objects.cc:14752
List< Handle< HeapType > > TypeHandleList
Definition: list.h:219
static const int kPropertiesOffset
Definition: objects.h:2755
static const int kReturnValueDefaultValueIndex
Definition: arguments.h:179
void GenerateNegativeHolderLookup(MacroAssembler *masm, Handle< JSObject > holder, Register holder_reg, Handle< Name > name, Label *miss)
static const int kElementsOffset
Definition: objects.h:2756
Handle< Code > CompileLoadNonexistent(Handle< HeapType > type, Handle< JSObject > last, Handle< Name > name)
Handle< Code > CompileLoadGlobal(Handle< HeapType > type, Handle< GlobalObject > holder, Handle< PropertyCell > cell, Handle< Name > name, bool is_dont_delete)
static void GenerateLoadDictionaryElement(MacroAssembler *masm)
void GenerateLoadInterceptor(Register holder_reg, Handle< Object > object, Handle< JSObject > holder, LookupResult *lookup, Handle< Name > name)
static const int kLengthOffset
Definition: objects.h:10076
static void GenerateNegativeLookup(MacroAssembler *masm, Label *miss, Label *done, Register receiver, Register properties, Handle< Name > name, Register scratch0)
static const int kInterceptorArgsInfoIndex
Definition: stub-cache.h:204
static const int kHeaderSize
Definition: objects.h:3016
void GenerateLoadCallback(Register reg, Handle< ExecutableAccessorInfo > callback)
static Builtins::Name MissBuiltin(Code::Kind kind)
Definition: stub-cache.h:466
static const int kMapOffset
Definition: objects.h:1890
List< Handle< Code > > CodeHandleList
Definition: list.h:220
Register CallbackHandlerFrontend(Handle< HeapType > type, Register object_reg, Handle< JSObject > holder, Handle< Name > name, Handle< Object > callback)
Handle< T > handle(T *t, Isolate *isolate)
Definition: handles.h:103
MemOperand FieldMemOperand(Register object, int offset)
static const int kDataOffset
Definition: objects.h:10433
void GenerateStoreField(MacroAssembler *masm, Handle< JSObject > object, LookupResult *lookup, Register receiver_reg, Register name_reg, Register value_reg, Register scratch1, Register scratch2, Label *miss_label)
friend class Isolate
Definition: stub-cache.h:280
static const int kHeaderSize
Definition: objects.h:5604
static Handle< T > null()
Definition: handles.h:80
void USE(T)
Definition: globals.h:341
void NonexistentHandlerFrontend(Handle< HeapType > type, Handle< JSObject > last, Handle< Name > name)
Definition: stub-cache.cc:864
const int kHeapObjectTagSize
Definition: v8.h:5474
static const int kPrototypeOffset
Definition: objects.h:6427
static const int kFlagsNotUsedInLookup
Definition: objects.h:5684
const Register no_reg
bool Is(const CPURegister &other) const
Handle< Code > GetCode(Code::Kind kind, Code::StubType type, Handle< Name > name)
Definition: stub-cache.cc:1281
void GenerateStoreTransition(MacroAssembler *masm, Handle< JSObject > object, LookupResult *lookup, Handle< Map > transition, Handle< Name > name, Register receiver_reg, Register name_reg, Register value_reg, Register scratch1, Register scratch2, Register scratch3, Label *miss_label, Label *slow)
const Register fp
static const int kNativeContextOffset
Definition: objects.h:7611
MemOperand ContextMemOperand(Register context, int index)
Register HandlerFrontend(Handle< HeapType > type, Register object_reg, Handle< JSObject > holder, Handle< Name > name)
Definition: stub-cache.cc:850
Handle< Code > CompileStorePolymorphic(MapHandleList *receiver_maps, CodeHandleList *handler_stubs, MapHandleList *transitioned_maps)
static void GeneratePositiveLookup(MacroAssembler *masm, Label *miss, Label *done, Register elements, Register name, Register r0, Register r1)
int CountTrailingZeros(uint64_t value, int width)
static void GenerateLoadViaGetter(MacroAssembler *masm, Handle< HeapType > type, Register receiver, Handle< JSFunction > getter)
static const int kInterceptorArgsHolderIndex
Definition: stub-cache.h:206
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long expose natives in global object expose freeBuffer extension expose gc extension under the specified name expose externalize string extension number of stack frames to capture disable builtin natives files print name of functions for which code is generated use random jit cookie to mask large constants trace lazy optimization use adaptive optimizations always try to OSR functions trace optimize function deoptimization minimum length for automatic enable preparsing maximum number of optimization attempts before giving up cache prototype transitions trace debugging JSON request response trace out of bounds accesses to external arrays trace_js_array_abuse automatically set the debug break flag when debugger commands are in the queue abort by crashing maximum length of function source code printed in a stack trace max size of the new max size of the old max size of executable always perform global GCs print one trace line following each garbage collection do not print trace line after scavenger collection print statistics of the maximum memory committed for the heap in name
Definition: flags.cc:505
Handle< Code > GetICCode(Code::Kind kind, Code::StubType type, Handle< Name > name, InlineCacheState state=MONOMORPHIC)
Definition: stub-cache.cc:1269
static JSObject * cast(Object *obj)
MemOperand GlobalObjectMemOperand()
bool AreAliased(const CPURegister &reg1, const CPURegister &reg2, const CPURegister &reg3=NoReg, const CPURegister &reg4=NoReg, const CPURegister &reg5=NoReg, const CPURegister &reg6=NoReg, const CPURegister &reg7=NoReg, const CPURegister &reg8=NoReg)
static const int kInstanceTypeOffset
Definition: objects.h:6459
static Handle< Map > TypeToMap(HeapType *type, Isolate *isolate)
Definition: ic.cc:683
bool IncludesNumberType(TypeHandleList *types)
Definition: stub-cache.cc:842
static JSFunction * cast(Object *obj)