v8  3.25.30(node0.11.13)
V8 is Google's open source JavaScript engine
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
stub-cache-ia32.cc
Go to the documentation of this file.
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 
28 #include "v8.h"
29 
30 #if V8_TARGET_ARCH_IA32
31 
32 #include "ic-inl.h"
33 #include "codegen.h"
34 #include "stub-cache.h"
35 
36 namespace v8 {
37 namespace internal {
38 
39 #define __ ACCESS_MASM(masm)
40 
41 
42 static void ProbeTable(Isolate* isolate,
43  MacroAssembler* masm,
45  StubCache::Table table,
46  Register name,
47  Register receiver,
48  // Number of the cache entry pointer-size scaled.
49  Register offset,
50  Register extra) {
51  ExternalReference key_offset(isolate->stub_cache()->key_reference(table));
52  ExternalReference value_offset(isolate->stub_cache()->value_reference(table));
53  ExternalReference map_offset(isolate->stub_cache()->map_reference(table));
54 
55  Label miss;
56 
57  // Multiply by 3 because there are 3 fields per entry (name, code, map).
58  __ lea(offset, Operand(offset, offset, times_2, 0));
59 
60  if (extra.is_valid()) {
61  // Get the code entry from the cache.
62  __ mov(extra, Operand::StaticArray(offset, times_1, value_offset));
63 
64  // Check that the key in the entry matches the name.
65  __ cmp(name, Operand::StaticArray(offset, times_1, key_offset));
66  __ j(not_equal, &miss);
67 
68  // Check the map matches.
69  __ mov(offset, Operand::StaticArray(offset, times_1, map_offset));
70  __ cmp(offset, FieldOperand(receiver, HeapObject::kMapOffset));
71  __ j(not_equal, &miss);
72 
73  // Check that the flags match what we're looking for.
74  __ mov(offset, FieldOperand(extra, Code::kFlagsOffset));
75  __ and_(offset, ~Code::kFlagsNotUsedInLookup);
76  __ cmp(offset, flags);
77  __ j(not_equal, &miss);
78 
79 #ifdef DEBUG
80  if (FLAG_test_secondary_stub_cache && table == StubCache::kPrimary) {
81  __ jmp(&miss);
82  } else if (FLAG_test_primary_stub_cache && table == StubCache::kSecondary) {
83  __ jmp(&miss);
84  }
85 #endif
86 
87  // Jump to the first instruction in the code stub.
88  __ add(extra, Immediate(Code::kHeaderSize - kHeapObjectTag));
89  __ jmp(extra);
90 
91  __ bind(&miss);
92  } else {
93  // Save the offset on the stack.
94  __ push(offset);
95 
96  // Check that the key in the entry matches the name.
97  __ cmp(name, Operand::StaticArray(offset, times_1, key_offset));
98  __ j(not_equal, &miss);
99 
100  // Check the map matches.
101  __ mov(offset, Operand::StaticArray(offset, times_1, map_offset));
102  __ cmp(offset, FieldOperand(receiver, HeapObject::kMapOffset));
103  __ j(not_equal, &miss);
104 
105  // Restore offset register.
106  __ mov(offset, Operand(esp, 0));
107 
108  // Get the code entry from the cache.
109  __ mov(offset, Operand::StaticArray(offset, times_1, value_offset));
110 
111  // Check that the flags match what we're looking for.
112  __ mov(offset, FieldOperand(offset, Code::kFlagsOffset));
113  __ and_(offset, ~Code::kFlagsNotUsedInLookup);
114  __ cmp(offset, flags);
115  __ j(not_equal, &miss);
116 
117 #ifdef DEBUG
118  if (FLAG_test_secondary_stub_cache && table == StubCache::kPrimary) {
119  __ jmp(&miss);
120  } else if (FLAG_test_primary_stub_cache && table == StubCache::kSecondary) {
121  __ jmp(&miss);
122  }
123 #endif
124 
125  // Restore offset and re-load code entry from cache.
126  __ pop(offset);
127  __ mov(offset, Operand::StaticArray(offset, times_1, value_offset));
128 
129  // Jump to the first instruction in the code stub.
130  __ add(offset, Immediate(Code::kHeaderSize - kHeapObjectTag));
131  __ jmp(offset);
132 
133  // Pop at miss.
134  __ bind(&miss);
135  __ pop(offset);
136  }
137 }
138 
139 
140 void StubCompiler::GenerateDictionaryNegativeLookup(MacroAssembler* masm,
141  Label* miss_label,
142  Register receiver,
143  Handle<Name> name,
144  Register scratch0,
145  Register scratch1) {
146  ASSERT(name->IsUniqueName());
147  ASSERT(!receiver.is(scratch0));
148  Counters* counters = masm->isolate()->counters();
149  __ IncrementCounter(counters->negative_lookups(), 1);
150  __ IncrementCounter(counters->negative_lookups_miss(), 1);
151 
152  __ mov(scratch0, FieldOperand(receiver, HeapObject::kMapOffset));
153 
154  const int kInterceptorOrAccessCheckNeededMask =
156 
157  // Bail out if the receiver has a named interceptor or requires access checks.
158  __ test_b(FieldOperand(scratch0, Map::kBitFieldOffset),
159  kInterceptorOrAccessCheckNeededMask);
160  __ j(not_zero, miss_label);
161 
162  // Check that receiver is a JSObject.
163  __ CmpInstanceType(scratch0, FIRST_SPEC_OBJECT_TYPE);
164  __ j(below, miss_label);
165 
166  // Load properties array.
167  Register properties = scratch0;
168  __ mov(properties, FieldOperand(receiver, JSObject::kPropertiesOffset));
169 
170  // Check that the properties array is a dictionary.
171  __ cmp(FieldOperand(properties, HeapObject::kMapOffset),
172  Immediate(masm->isolate()->factory()->hash_table_map()));
173  __ j(not_equal, miss_label);
174 
175  Label done;
177  miss_label,
178  &done,
179  properties,
180  name,
181  scratch1);
182  __ bind(&done);
183  __ DecrementCounter(counters->negative_lookups_miss(), 1);
184 }
185 
186 
187 void StubCache::GenerateProbe(MacroAssembler* masm,
188  Code::Flags flags,
189  Register receiver,
190  Register name,
191  Register scratch,
192  Register extra,
193  Register extra2,
194  Register extra3) {
195  Label miss;
196 
197  // Assert that code is valid. The multiplying code relies on the entry size
198  // being 12.
199  ASSERT(sizeof(Entry) == 12);
200 
201  // Assert the flags do not name a specific type.
202  ASSERT(Code::ExtractTypeFromFlags(flags) == 0);
203 
204  // Assert that there are no register conflicts.
205  ASSERT(!scratch.is(receiver));
206  ASSERT(!scratch.is(name));
207  ASSERT(!extra.is(receiver));
208  ASSERT(!extra.is(name));
209  ASSERT(!extra.is(scratch));
210 
211  // Assert scratch and extra registers are valid, and extra2/3 are unused.
212  ASSERT(!scratch.is(no_reg));
213  ASSERT(extra2.is(no_reg));
214  ASSERT(extra3.is(no_reg));
215 
216  Register offset = scratch;
217  scratch = no_reg;
218 
219  Counters* counters = masm->isolate()->counters();
220  __ IncrementCounter(counters->megamorphic_stub_cache_probes(), 1);
221 
222  // Check that the receiver isn't a smi.
223  __ JumpIfSmi(receiver, &miss);
224 
225  // Get the map of the receiver and compute the hash.
226  __ mov(offset, FieldOperand(name, Name::kHashFieldOffset));
227  __ add(offset, FieldOperand(receiver, HeapObject::kMapOffset));
228  __ xor_(offset, flags);
229  // We mask out the last two bits because they are not part of the hash and
230  // they are always 01 for maps. Also in the two 'and' instructions below.
231  __ and_(offset, (kPrimaryTableSize - 1) << kHeapObjectTagSize);
232  // ProbeTable expects the offset to be pointer scaled, which it is, because
233  // the heap object tag size is 2 and the pointer size log 2 is also 2.
235 
236  // Probe the primary table.
237  ProbeTable(isolate(), masm, flags, kPrimary, name, receiver, offset, extra);
238 
239  // Primary miss: Compute hash for secondary probe.
240  __ mov(offset, FieldOperand(name, Name::kHashFieldOffset));
241  __ add(offset, FieldOperand(receiver, HeapObject::kMapOffset));
242  __ xor_(offset, flags);
243  __ and_(offset, (kPrimaryTableSize - 1) << kHeapObjectTagSize);
244  __ sub(offset, name);
245  __ add(offset, Immediate(flags));
246  __ and_(offset, (kSecondaryTableSize - 1) << kHeapObjectTagSize);
247 
248  // Probe the secondary table.
249  ProbeTable(
250  isolate(), masm, flags, kSecondary, name, receiver, offset, extra);
251 
252  // Cache miss: Fall-through and let caller handle the miss by
253  // entering the runtime system.
254  __ bind(&miss);
255  __ IncrementCounter(counters->megamorphic_stub_cache_misses(), 1);
256 }
257 
258 
259 void StubCompiler::GenerateLoadGlobalFunctionPrototype(MacroAssembler* masm,
260  int index,
261  Register prototype) {
262  __ LoadGlobalFunction(index, prototype);
263  __ LoadGlobalFunctionInitialMap(prototype, prototype);
264  // Load the prototype from the initial map.
265  __ mov(prototype, FieldOperand(prototype, Map::kPrototypeOffset));
266 }
267 
268 
269 void StubCompiler::GenerateDirectLoadGlobalFunctionPrototype(
270  MacroAssembler* masm,
271  int index,
272  Register prototype,
273  Label* miss) {
274  // Get the global function with the given index.
275  Handle<JSFunction> function(
276  JSFunction::cast(masm->isolate()->native_context()->get(index)));
277  // Check we're still in the same context.
278  Register scratch = prototype;
280  __ mov(scratch, Operand(esi, offset));
281  __ mov(scratch, FieldOperand(scratch, GlobalObject::kNativeContextOffset));
282  __ cmp(Operand(scratch, Context::SlotOffset(index)), function);
283  __ j(not_equal, miss);
284 
285  // Load its initial map. The global functions all have initial maps.
286  __ Move(prototype, Immediate(Handle<Map>(function->initial_map())));
287  // Load the prototype from the initial map.
288  __ mov(prototype, FieldOperand(prototype, Map::kPrototypeOffset));
289 }
290 
291 
292 void StubCompiler::GenerateLoadArrayLength(MacroAssembler* masm,
293  Register receiver,
294  Register scratch,
295  Label* miss_label) {
296  // Check that the receiver isn't a smi.
297  __ JumpIfSmi(receiver, miss_label);
298 
299  // Check that the object is a JS array.
300  __ CmpObjectType(receiver, JS_ARRAY_TYPE, scratch);
301  __ j(not_equal, miss_label);
302 
303  // Load length directly from the JS array.
304  __ mov(eax, FieldOperand(receiver, JSArray::kLengthOffset));
305  __ ret(0);
306 }
307 
308 
309 void StubCompiler::GenerateLoadFunctionPrototype(MacroAssembler* masm,
310  Register receiver,
311  Register scratch1,
312  Register scratch2,
313  Label* miss_label) {
314  __ TryGetFunctionPrototype(receiver, scratch1, scratch2, miss_label);
315  __ mov(eax, scratch1);
316  __ ret(0);
317 }
318 
319 
320 void StubCompiler::GenerateFastPropertyLoad(MacroAssembler* masm,
321  Register dst,
322  Register src,
323  bool inobject,
324  int index,
325  Representation representation) {
326  ASSERT(!representation.IsDouble());
327  int offset = index * kPointerSize;
328  if (!inobject) {
329  // Calculate the offset into the properties array.
330  offset = offset + FixedArray::kHeaderSize;
332  src = dst;
333  }
334  __ mov(dst, FieldOperand(src, offset));
335 }
336 
337 
338 static void PushInterceptorArguments(MacroAssembler* masm,
339  Register receiver,
340  Register holder,
341  Register name,
342  Handle<JSObject> holder_obj) {
348  __ push(name);
349  Handle<InterceptorInfo> interceptor(holder_obj->GetNamedInterceptor());
350  ASSERT(!masm->isolate()->heap()->InNewSpace(*interceptor));
351  Register scratch = name;
352  __ mov(scratch, Immediate(interceptor));
353  __ push(scratch);
354  __ push(receiver);
355  __ push(holder);
356 }
357 
358 
359 static void CompileCallLoadPropertyWithInterceptor(
360  MacroAssembler* masm,
361  Register receiver,
362  Register holder,
363  Register name,
364  Handle<JSObject> holder_obj,
365  IC::UtilityId id) {
366  PushInterceptorArguments(masm, receiver, holder, name, holder_obj);
367  __ CallExternalReference(
368  ExternalReference(IC_Utility(id), masm->isolate()),
370 }
371 
372 
373 // Generate call to api function.
374 // This function uses push() to generate smaller, faster code than
375 // the version above. It is an optimization that should will be removed
376 // when api call ICs are generated in hydrogen.
377 void StubCompiler::GenerateFastApiCall(MacroAssembler* masm,
378  const CallOptimization& optimization,
379  Handle<Map> receiver_map,
380  Register receiver,
381  Register scratch_in,
382  bool is_store,
383  int argc,
384  Register* values) {
385  // Copy return value.
386  __ pop(scratch_in);
387  // receiver
388  __ push(receiver);
389  // Write the arguments to stack frame.
390  for (int i = 0; i < argc; i++) {
391  Register arg = values[argc-1-i];
392  ASSERT(!receiver.is(arg));
393  ASSERT(!scratch_in.is(arg));
394  __ push(arg);
395  }
396  __ push(scratch_in);
397  // Stack now matches JSFunction abi.
398  ASSERT(optimization.is_simple_api_call());
399 
400  // Abi for CallApiFunctionStub.
401  Register callee = eax;
402  Register call_data = ebx;
403  Register holder = ecx;
404  Register api_function_address = edx;
405  Register scratch = edi; // scratch_in is no longer valid.
406 
407  // Put holder in place.
408  CallOptimization::HolderLookup holder_lookup;
409  Handle<JSObject> api_holder = optimization.LookupHolderOfExpectedType(
410  receiver_map,
411  &holder_lookup);
412  switch (holder_lookup) {
413  case CallOptimization::kHolderIsReceiver:
414  __ Move(holder, receiver);
415  break;
416  case CallOptimization::kHolderFound:
417  __ LoadHeapObject(holder, api_holder);
418  break;
419  case CallOptimization::kHolderNotFound:
420  UNREACHABLE();
421  break;
422  }
423 
424  Isolate* isolate = masm->isolate();
425  Handle<JSFunction> function = optimization.constant_function();
426  Handle<CallHandlerInfo> api_call_info = optimization.api_call_info();
427  Handle<Object> call_data_obj(api_call_info->data(), isolate);
428 
429  // Put callee in place.
430  __ LoadHeapObject(callee, function);
431 
432  bool call_data_undefined = false;
433  // Put call_data in place.
434  if (isolate->heap()->InNewSpace(*call_data_obj)) {
435  __ mov(scratch, api_call_info);
436  __ mov(call_data, FieldOperand(scratch, CallHandlerInfo::kDataOffset));
437  } else if (call_data_obj->IsUndefined()) {
438  call_data_undefined = true;
439  __ mov(call_data, Immediate(isolate->factory()->undefined_value()));
440  } else {
441  __ mov(call_data, call_data_obj);
442  }
443 
444  // Put api_function_address in place.
445  Address function_address = v8::ToCData<Address>(api_call_info->callback());
446  __ mov(api_function_address, Immediate(function_address));
447 
448  // Jump to stub.
449  CallApiFunctionStub stub(is_store, call_data_undefined, argc);
450  __ TailCallStub(&stub);
451 }
452 
453 
454 void StoreStubCompiler::GenerateRestoreName(MacroAssembler* masm,
455  Label* label,
456  Handle<Name> name) {
457  if (!label->is_unused()) {
458  __ bind(label);
459  __ mov(this->name(), Immediate(name));
460  }
461 }
462 
463 
464 // Generate code to check that a global property cell is empty. Create
465 // the property cell at compilation time if no cell exists for the
466 // property.
467 void StubCompiler::GenerateCheckPropertyCell(MacroAssembler* masm,
468  Handle<JSGlobalObject> global,
469  Handle<Name> name,
470  Register scratch,
471  Label* miss) {
472  Handle<PropertyCell> cell =
474  ASSERT(cell->value()->IsTheHole());
475  Handle<Oddball> the_hole = masm->isolate()->factory()->the_hole_value();
476  if (Serializer::enabled()) {
477  __ mov(scratch, Immediate(cell));
479  Immediate(the_hole));
480  } else {
481  __ cmp(Operand::ForCell(cell), Immediate(the_hole));
482  }
483  __ j(not_equal, miss);
484 }
485 
486 
488  MacroAssembler* masm,
489  Handle<JSObject> holder,
490  Register holder_reg,
491  Handle<Name> name,
492  Label* miss) {
493  if (holder->IsJSGlobalObject()) {
494  GenerateCheckPropertyCell(
495  masm, Handle<JSGlobalObject>::cast(holder), name, scratch1(), miss);
496  } else if (!holder->HasFastProperties() && !holder->IsJSGlobalProxy()) {
497  GenerateDictionaryNegativeLookup(
498  masm, miss, holder_reg, name, scratch1(), scratch2());
499  }
500 }
501 
502 
503 // Receiver_reg is preserved on jumps to miss_label, but may be destroyed if
504 // store is successful.
505 void StoreStubCompiler::GenerateStoreTransition(MacroAssembler* masm,
506  Handle<JSObject> object,
507  LookupResult* lookup,
508  Handle<Map> transition,
509  Handle<Name> name,
510  Register receiver_reg,
511  Register storage_reg,
512  Register value_reg,
513  Register scratch1,
514  Register scratch2,
515  Register unused,
516  Label* miss_label,
517  Label* slow) {
518  int descriptor = transition->LastAdded();
519  DescriptorArray* descriptors = transition->instance_descriptors();
520  PropertyDetails details = descriptors->GetDetails(descriptor);
521  Representation representation = details.representation();
522  ASSERT(!representation.IsNone());
523 
524  if (details.type() == CONSTANT) {
525  Handle<Object> constant(descriptors->GetValue(descriptor), masm->isolate());
526  __ CmpObject(value_reg, constant);
527  __ j(not_equal, miss_label);
528  } else if (representation.IsSmi()) {
529  __ JumpIfNotSmi(value_reg, miss_label);
530  } else if (representation.IsHeapObject()) {
531  __ JumpIfSmi(value_reg, miss_label);
532  } else if (representation.IsDouble()) {
533  Label do_store, heap_number;
534  __ AllocateHeapNumber(storage_reg, scratch1, scratch2, slow);
535 
536  __ JumpIfNotSmi(value_reg, &heap_number);
537  __ SmiUntag(value_reg);
539  CpuFeatureScope use_sse2(masm, SSE2);
540  __ Cvtsi2sd(xmm0, value_reg);
541  } else {
542  __ push(value_reg);
543  __ fild_s(Operand(esp, 0));
544  __ pop(value_reg);
545  }
546  __ SmiTag(value_reg);
547  __ jmp(&do_store);
548 
549  __ bind(&heap_number);
550  __ CheckMap(value_reg, masm->isolate()->factory()->heap_number_map(),
551  miss_label, DONT_DO_SMI_CHECK);
553  CpuFeatureScope use_sse2(masm, SSE2);
554  __ movsd(xmm0, FieldOperand(value_reg, HeapNumber::kValueOffset));
555  } else {
556  __ fld_d(FieldOperand(value_reg, HeapNumber::kValueOffset));
557  }
558 
559  __ bind(&do_store);
561  CpuFeatureScope use_sse2(masm, SSE2);
562  __ movsd(FieldOperand(storage_reg, HeapNumber::kValueOffset), xmm0);
563  } else {
564  __ fstp_d(FieldOperand(storage_reg, HeapNumber::kValueOffset));
565  }
566  }
567 
568  // Stub never generated for non-global objects that require access
569  // checks.
570  ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
571 
572  // Perform map transition for the receiver if necessary.
573  if (details.type() == FIELD &&
574  object->map()->unused_property_fields() == 0) {
575  // The properties must be extended before we can store the value.
576  // We jump to a runtime call that extends the properties array.
577  __ pop(scratch1); // Return address.
578  __ push(receiver_reg);
579  __ push(Immediate(transition));
580  __ push(value_reg);
581  __ push(scratch1);
582  __ TailCallExternalReference(
583  ExternalReference(IC_Utility(IC::kSharedStoreIC_ExtendStorage),
584  masm->isolate()),
585  3,
586  1);
587  return;
588  }
589 
590  // Update the map of the object.
591  __ mov(scratch1, Immediate(transition));
592  __ mov(FieldOperand(receiver_reg, HeapObject::kMapOffset), scratch1);
593 
594  // Update the write barrier for the map field.
595  __ RecordWriteField(receiver_reg,
597  scratch1,
598  scratch2,
602 
603  if (details.type() == CONSTANT) {
604  ASSERT(value_reg.is(eax));
605  __ ret(0);
606  return;
607  }
608 
609  int index = transition->instance_descriptors()->GetFieldIndex(
610  transition->LastAdded());
611 
612  // Adjust for the number of properties stored in the object. Even in the
613  // face of a transition we can use the old map here because the size of the
614  // object and the number of in-object properties is not going to change.
615  index -= object->map()->inobject_properties();
616 
617  SmiCheck smi_check = representation.IsTagged()
619  // TODO(verwaest): Share this code as a code stub.
620  if (index < 0) {
621  // Set the property straight into the object.
622  int offset = object->map()->instance_size() + (index * kPointerSize);
623  if (representation.IsDouble()) {
624  __ mov(FieldOperand(receiver_reg, offset), storage_reg);
625  } else {
626  __ mov(FieldOperand(receiver_reg, offset), value_reg);
627  }
628 
629  if (!representation.IsSmi()) {
630  // Update the write barrier for the array address.
631  if (!representation.IsDouble()) {
632  __ mov(storage_reg, value_reg);
633  }
634  __ RecordWriteField(receiver_reg,
635  offset,
636  storage_reg,
637  scratch1,
640  smi_check);
641  }
642  } else {
643  // Write to the properties array.
644  int offset = index * kPointerSize + FixedArray::kHeaderSize;
645  // Get the properties array (optimistically).
646  __ mov(scratch1, FieldOperand(receiver_reg, JSObject::kPropertiesOffset));
647  if (representation.IsDouble()) {
648  __ mov(FieldOperand(scratch1, offset), storage_reg);
649  } else {
650  __ mov(FieldOperand(scratch1, offset), value_reg);
651  }
652 
653  if (!representation.IsSmi()) {
654  // Update the write barrier for the array address.
655  if (!representation.IsDouble()) {
656  __ mov(storage_reg, value_reg);
657  }
658  __ RecordWriteField(scratch1,
659  offset,
660  storage_reg,
661  receiver_reg,
664  smi_check);
665  }
666  }
667 
668  // Return the value (register eax).
669  ASSERT(value_reg.is(eax));
670  __ ret(0);
671 }
672 
673 
674 // Both name_reg and receiver_reg are preserved on jumps to miss_label,
675 // but may be destroyed if store is successful.
676 void StoreStubCompiler::GenerateStoreField(MacroAssembler* masm,
677  Handle<JSObject> object,
678  LookupResult* lookup,
679  Register receiver_reg,
680  Register name_reg,
681  Register value_reg,
682  Register scratch1,
683  Register scratch2,
684  Label* miss_label) {
685  // Stub never generated for non-global objects that require access
686  // checks.
687  ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
688 
689  int index = lookup->GetFieldIndex().field_index();
690 
691  // Adjust for the number of properties stored in the object. Even in the
692  // face of a transition we can use the old map here because the size of the
693  // object and the number of in-object properties is not going to change.
694  index -= object->map()->inobject_properties();
695 
696  Representation representation = lookup->representation();
697  ASSERT(!representation.IsNone());
698  if (representation.IsSmi()) {
699  __ JumpIfNotSmi(value_reg, miss_label);
700  } else if (representation.IsHeapObject()) {
701  __ JumpIfSmi(value_reg, miss_label);
702  } else if (representation.IsDouble()) {
703  // Load the double storage.
704  if (index < 0) {
705  int offset = object->map()->instance_size() + (index * kPointerSize);
706  __ mov(scratch1, FieldOperand(receiver_reg, offset));
707  } else {
708  __ mov(scratch1, FieldOperand(receiver_reg, JSObject::kPropertiesOffset));
709  int offset = index * kPointerSize + FixedArray::kHeaderSize;
710  __ mov(scratch1, FieldOperand(scratch1, offset));
711  }
712 
713  // Store the value into the storage.
714  Label do_store, heap_number;
715  __ JumpIfNotSmi(value_reg, &heap_number);
716  __ SmiUntag(value_reg);
718  CpuFeatureScope use_sse2(masm, SSE2);
719  __ Cvtsi2sd(xmm0, value_reg);
720  } else {
721  __ push(value_reg);
722  __ fild_s(Operand(esp, 0));
723  __ pop(value_reg);
724  }
725  __ SmiTag(value_reg);
726  __ jmp(&do_store);
727  __ bind(&heap_number);
728  __ CheckMap(value_reg, masm->isolate()->factory()->heap_number_map(),
729  miss_label, DONT_DO_SMI_CHECK);
731  CpuFeatureScope use_sse2(masm, SSE2);
732  __ movsd(xmm0, FieldOperand(value_reg, HeapNumber::kValueOffset));
733  } else {
734  __ fld_d(FieldOperand(value_reg, HeapNumber::kValueOffset));
735  }
736  __ bind(&do_store);
738  CpuFeatureScope use_sse2(masm, SSE2);
739  __ movsd(FieldOperand(scratch1, HeapNumber::kValueOffset), xmm0);
740  } else {
741  __ fstp_d(FieldOperand(scratch1, HeapNumber::kValueOffset));
742  }
743  // Return the value (register eax).
744  ASSERT(value_reg.is(eax));
745  __ ret(0);
746  return;
747  }
748 
749  ASSERT(!representation.IsDouble());
750  // TODO(verwaest): Share this code as a code stub.
751  SmiCheck smi_check = representation.IsTagged()
753  if (index < 0) {
754  // Set the property straight into the object.
755  int offset = object->map()->instance_size() + (index * kPointerSize);
756  __ mov(FieldOperand(receiver_reg, offset), value_reg);
757 
758  if (!representation.IsSmi()) {
759  // Update the write barrier for the array address.
760  // Pass the value being stored in the now unused name_reg.
761  __ mov(name_reg, value_reg);
762  __ RecordWriteField(receiver_reg,
763  offset,
764  name_reg,
765  scratch1,
768  smi_check);
769  }
770  } else {
771  // Write to the properties array.
772  int offset = index * kPointerSize + FixedArray::kHeaderSize;
773  // Get the properties array (optimistically).
774  __ mov(scratch1, FieldOperand(receiver_reg, JSObject::kPropertiesOffset));
775  __ mov(FieldOperand(scratch1, offset), value_reg);
776 
777  if (!representation.IsSmi()) {
778  // Update the write barrier for the array address.
779  // Pass the value being stored in the now unused name_reg.
780  __ mov(name_reg, value_reg);
781  __ RecordWriteField(scratch1,
782  offset,
783  name_reg,
784  receiver_reg,
787  smi_check);
788  }
789  }
790 
791  // Return the value (register eax).
792  ASSERT(value_reg.is(eax));
793  __ ret(0);
794 }
795 
796 
797 void StubCompiler::GenerateTailCall(MacroAssembler* masm, Handle<Code> code) {
798  __ jmp(code, RelocInfo::CODE_TARGET);
799 }
800 
801 
802 #undef __
803 #define __ ACCESS_MASM(masm())
804 
805 
806 Register StubCompiler::CheckPrototypes(Handle<HeapType> type,
807  Register object_reg,
808  Handle<JSObject> holder,
809  Register holder_reg,
810  Register scratch1,
811  Register scratch2,
812  Handle<Name> name,
813  Label* miss,
815  Handle<Map> receiver_map(IC::TypeToMap(*type, isolate()));
816 
817  // Make sure there's no overlap between holder and object registers.
818  ASSERT(!scratch1.is(object_reg) && !scratch1.is(holder_reg));
819  ASSERT(!scratch2.is(object_reg) && !scratch2.is(holder_reg)
820  && !scratch2.is(scratch1));
821 
822  // Keep track of the current object in register reg.
823  Register reg = object_reg;
824  int depth = 0;
825 
826  Handle<JSObject> current = Handle<JSObject>::null();
827  if (type->IsConstant()) current = Handle<JSObject>::cast(type->AsConstant());
828  Handle<JSObject> prototype = Handle<JSObject>::null();
829  Handle<Map> current_map = receiver_map;
830  Handle<Map> holder_map(holder->map());
831  // Traverse the prototype chain and check the maps in the prototype chain for
832  // fast and global objects or do negative lookup for normal objects.
833  while (!current_map.is_identical_to(holder_map)) {
834  ++depth;
835 
836  // Only global objects and objects that do not require access
837  // checks are allowed in stubs.
838  ASSERT(current_map->IsJSGlobalProxyMap() ||
839  !current_map->is_access_check_needed());
840 
841  prototype = handle(JSObject::cast(current_map->prototype()));
842  if (current_map->is_dictionary_map() &&
843  !current_map->IsJSGlobalObjectMap() &&
844  !current_map->IsJSGlobalProxyMap()) {
845  if (!name->IsUniqueName()) {
846  ASSERT(name->IsString());
847  name = factory()->InternalizeString(Handle<String>::cast(name));
848  }
849  ASSERT(current.is_null() ||
850  current->property_dictionary()->FindEntry(*name) ==
852 
853  GenerateDictionaryNegativeLookup(masm(), miss, reg, name,
854  scratch1, scratch2);
855 
856  __ mov(scratch1, FieldOperand(reg, HeapObject::kMapOffset));
857  reg = holder_reg; // From now on the object will be in holder_reg.
858  __ mov(reg, FieldOperand(scratch1, Map::kPrototypeOffset));
859  } else {
860  bool in_new_space = heap()->InNewSpace(*prototype);
861  if (depth != 1 || check == CHECK_ALL_MAPS) {
862  __ CheckMap(reg, current_map, miss, DONT_DO_SMI_CHECK);
863  }
864 
865  // Check access rights to the global object. This has to happen after
866  // the map check so that we know that the object is actually a global
867  // object.
868  if (current_map->IsJSGlobalProxyMap()) {
869  __ CheckAccessGlobalProxy(reg, scratch1, scratch2, miss);
870  } else if (current_map->IsJSGlobalObjectMap()) {
871  GenerateCheckPropertyCell(
872  masm(), Handle<JSGlobalObject>::cast(current), name,
873  scratch2, miss);
874  }
875 
876  if (in_new_space) {
877  // Save the map in scratch1 for later.
878  __ mov(scratch1, FieldOperand(reg, HeapObject::kMapOffset));
879  }
880 
881  reg = holder_reg; // From now on the object will be in holder_reg.
882 
883  if (in_new_space) {
884  // The prototype is in new space; we cannot store a reference to it
885  // in the code. Load it from the map.
886  __ mov(reg, FieldOperand(scratch1, Map::kPrototypeOffset));
887  } else {
888  // The prototype is in old space; load it directly.
889  __ mov(reg, prototype);
890  }
891  }
892 
893  // Go to the next object in the prototype chain.
894  current = prototype;
895  current_map = handle(current->map());
896  }
897 
898  // Log the check depth.
899  LOG(isolate(), IntEvent("check-maps-depth", depth + 1));
900 
901  if (depth != 0 || check == CHECK_ALL_MAPS) {
902  // Check the holder map.
903  __ CheckMap(reg, current_map, miss, DONT_DO_SMI_CHECK);
904  }
905 
906  // Perform security check for access to the global object.
907  ASSERT(current_map->IsJSGlobalProxyMap() ||
908  !current_map->is_access_check_needed());
909  if (current_map->IsJSGlobalProxyMap()) {
910  __ CheckAccessGlobalProxy(reg, scratch1, scratch2, miss);
911  }
912 
913  // Return the register containing the holder.
914  return reg;
915 }
916 
917 
918 void LoadStubCompiler::HandlerFrontendFooter(Handle<Name> name, Label* miss) {
919  if (!miss->is_unused()) {
920  Label success;
921  __ jmp(&success);
922  __ bind(miss);
923  TailCallBuiltin(masm(), MissBuiltin(kind()));
924  __ bind(&success);
925  }
926 }
927 
928 
929 void StoreStubCompiler::HandlerFrontendFooter(Handle<Name> name, Label* miss) {
930  if (!miss->is_unused()) {
931  Label success;
932  __ jmp(&success);
933  GenerateRestoreName(masm(), miss, name);
934  TailCallBuiltin(masm(), MissBuiltin(kind()));
935  __ bind(&success);
936  }
937 }
938 
939 
941  Handle<HeapType> type,
942  Register object_reg,
943  Handle<JSObject> holder,
944  Handle<Name> name,
945  Handle<Object> callback) {
946  Label miss;
947 
948  Register reg = HandlerFrontendHeader(type, object_reg, holder, name, &miss);
949 
950  if (!holder->HasFastProperties() && !holder->IsJSGlobalObject()) {
951  ASSERT(!reg.is(scratch2()));
952  ASSERT(!reg.is(scratch3()));
953  Register dictionary = scratch1();
954  bool must_preserve_dictionary_reg = reg.is(dictionary);
955 
956  // Load the properties dictionary.
957  if (must_preserve_dictionary_reg) {
958  __ push(dictionary);
959  }
960  __ mov(dictionary, FieldOperand(reg, JSObject::kPropertiesOffset));
961 
962  // Probe the dictionary.
963  Label probe_done, pop_and_miss;
965  &pop_and_miss,
966  &probe_done,
967  dictionary,
968  this->name(),
969  scratch2(),
970  scratch3());
971  __ bind(&pop_and_miss);
972  if (must_preserve_dictionary_reg) {
973  __ pop(dictionary);
974  }
975  __ jmp(&miss);
976  __ bind(&probe_done);
977 
978  // If probing finds an entry in the dictionary, scratch2 contains the
979  // index into the dictionary. Check that the value is the callback.
980  Register index = scratch2();
981  const int kElementsStartOffset =
984  const int kValueOffset = kElementsStartOffset + kPointerSize;
985  __ mov(scratch3(),
986  Operand(dictionary, index, times_4, kValueOffset - kHeapObjectTag));
987  if (must_preserve_dictionary_reg) {
988  __ pop(dictionary);
989  }
990  __ cmp(scratch3(), callback);
991  __ j(not_equal, &miss);
992  }
993 
994  HandlerFrontendFooter(name, &miss);
995  return reg;
996 }
997 
998 
999 void LoadStubCompiler::GenerateLoadField(Register reg,
1000  Handle<JSObject> holder,
1001  PropertyIndex field,
1002  Representation representation) {
1003  if (!reg.is(receiver())) __ mov(receiver(), reg);
1004  if (kind() == Code::LOAD_IC) {
1005  LoadFieldStub stub(field.is_inobject(holder),
1006  field.translate(holder),
1007  representation);
1008  GenerateTailCall(masm(), stub.GetCode(isolate()));
1009  } else {
1010  KeyedLoadFieldStub stub(field.is_inobject(holder),
1011  field.translate(holder),
1012  representation);
1013  GenerateTailCall(masm(), stub.GetCode(isolate()));
1014  }
1015 }
1016 
1017 
1019  Register reg,
1020  Handle<ExecutableAccessorInfo> callback) {
1021  // Insert additional parameters into the stack frame above return address.
1022  ASSERT(!scratch3().is(reg));
1023  __ pop(scratch3()); // Get return address to place it below.
1024 
1031  __ push(receiver()); // receiver
1032  // Push data from ExecutableAccessorInfo.
1033  if (isolate()->heap()->InNewSpace(callback->data())) {
1034  ASSERT(!scratch2().is(reg));
1035  __ mov(scratch2(), Immediate(callback));
1037  } else {
1038  __ push(Immediate(Handle<Object>(callback->data(), isolate())));
1039  }
1040  __ push(Immediate(isolate()->factory()->undefined_value())); // ReturnValue
1041  // ReturnValue default value
1042  __ push(Immediate(isolate()->factory()->undefined_value()));
1043  __ push(Immediate(reinterpret_cast<int>(isolate())));
1044  __ push(reg); // holder
1045 
1046  // Save a pointer to where we pushed the arguments. This will be
1047  // passed as the const PropertyAccessorInfo& to the C++ callback.
1048  __ push(esp);
1049 
1050  __ push(name()); // name
1051 
1052  __ push(scratch3()); // Restore return address.
1053 
1054  // Abi for CallApiGetter
1055  Register getter_address = edx;
1056  Address function_address = v8::ToCData<Address>(callback->getter());
1057  __ mov(getter_address, Immediate(function_address));
1058 
1059  CallApiGetterStub stub;
1060  __ TailCallStub(&stub);
1061 }
1062 
1063 
1064 void LoadStubCompiler::GenerateLoadConstant(Handle<Object> value) {
1065  // Return the constant value.
1066  __ LoadObject(eax, value);
1067  __ ret(0);
1068 }
1069 
1070 
1072  Register holder_reg,
1073  Handle<Object> object,
1074  Handle<JSObject> interceptor_holder,
1075  LookupResult* lookup,
1076  Handle<Name> name) {
1077  ASSERT(interceptor_holder->HasNamedInterceptor());
1078  ASSERT(!interceptor_holder->GetNamedInterceptor()->getter()->IsUndefined());
1079 
1080  // So far the most popular follow ups for interceptor loads are FIELD
1081  // and CALLBACKS, so inline only them, other cases may be added
1082  // later.
1083  bool compile_followup_inline = false;
1084  if (lookup->IsFound() && lookup->IsCacheable()) {
1085  if (lookup->IsField()) {
1086  compile_followup_inline = true;
1087  } else if (lookup->type() == CALLBACKS &&
1088  lookup->GetCallbackObject()->IsExecutableAccessorInfo()) {
1089  ExecutableAccessorInfo* callback =
1090  ExecutableAccessorInfo::cast(lookup->GetCallbackObject());
1091  compile_followup_inline = callback->getter() != NULL &&
1092  callback->IsCompatibleReceiver(*object);
1093  }
1094  }
1095 
1096  if (compile_followup_inline) {
1097  // Compile the interceptor call, followed by inline code to load the
1098  // property from further up the prototype chain if the call fails.
1099  // Check that the maps haven't changed.
1100  ASSERT(holder_reg.is(receiver()) || holder_reg.is(scratch1()));
1101 
1102  // Preserve the receiver register explicitly whenever it is different from
1103  // the holder and it is needed should the interceptor return without any
1104  // result. The CALLBACKS case needs the receiver to be passed into C++ code,
1105  // the FIELD case might cause a miss during the prototype check.
1106  bool must_perfrom_prototype_check = *interceptor_holder != lookup->holder();
1107  bool must_preserve_receiver_reg = !receiver().is(holder_reg) &&
1108  (lookup->type() == CALLBACKS || must_perfrom_prototype_check);
1109 
1110  // Save necessary data before invoking an interceptor.
1111  // Requires a frame to make GC aware of pushed pointers.
1112  {
1113  FrameScope frame_scope(masm(), StackFrame::INTERNAL);
1114 
1115  if (must_preserve_receiver_reg) {
1116  __ push(receiver());
1117  }
1118  __ push(holder_reg);
1119  __ push(this->name());
1120 
1121  // Invoke an interceptor. Note: map checks from receiver to
1122  // interceptor's holder has been compiled before (see a caller
1123  // of this method.)
1124  CompileCallLoadPropertyWithInterceptor(
1125  masm(), receiver(), holder_reg, this->name(), interceptor_holder,
1126  IC::kLoadPropertyWithInterceptorOnly);
1127 
1128  // Check if interceptor provided a value for property. If it's
1129  // the case, return immediately.
1130  Label interceptor_failed;
1131  __ cmp(eax, factory()->no_interceptor_result_sentinel());
1132  __ j(equal, &interceptor_failed);
1133  frame_scope.GenerateLeaveFrame();
1134  __ ret(0);
1135 
1136  // Clobber registers when generating debug-code to provoke errors.
1137  __ bind(&interceptor_failed);
1138  if (FLAG_debug_code) {
1139  __ mov(receiver(), Immediate(BitCast<int32_t>(kZapValue)));
1140  __ mov(holder_reg, Immediate(BitCast<int32_t>(kZapValue)));
1141  __ mov(this->name(), Immediate(BitCast<int32_t>(kZapValue)));
1142  }
1143 
1144  __ pop(this->name());
1145  __ pop(holder_reg);
1146  if (must_preserve_receiver_reg) {
1147  __ pop(receiver());
1148  }
1149 
1150  // Leave the internal frame.
1151  }
1152 
1153  GenerateLoadPostInterceptor(holder_reg, interceptor_holder, name, lookup);
1154  } else { // !compile_followup_inline
1155  // Call the runtime system to load the interceptor.
1156  // Check that the maps haven't changed.
1157  __ pop(scratch2()); // save old return address
1158  PushInterceptorArguments(masm(), receiver(), holder_reg,
1159  this->name(), interceptor_holder);
1160  __ push(scratch2()); // restore old return address
1161 
1162  ExternalReference ref =
1163  ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorForLoad),
1164  isolate());
1165  __ TailCallExternalReference(ref, StubCache::kInterceptorArgsLength, 1);
1166  }
1167 }
1168 
1169 
1170 void StubCompiler::GenerateBooleanCheck(Register object, Label* miss) {
1171  Label success;
1172  // Check that the object is a boolean.
1173  __ cmp(object, factory()->true_value());
1174  __ j(equal, &success);
1175  __ cmp(object, factory()->false_value());
1176  __ j(not_equal, miss);
1177  __ bind(&success);
1178 }
1179 
1180 
1182  Handle<JSObject> object,
1183  Handle<JSObject> holder,
1184  Handle<Name> name,
1185  Handle<ExecutableAccessorInfo> callback) {
1186  Register holder_reg = HandlerFrontend(
1187  IC::CurrentTypeOf(object, isolate()), receiver(), holder, name);
1188 
1189  __ pop(scratch1()); // remove the return address
1190  __ push(receiver());
1191  __ push(holder_reg);
1192  __ Push(callback);
1193  __ Push(name);
1194  __ push(value());
1195  __ push(scratch1()); // restore return address
1196 
1197  // Do tail-call to the runtime system.
1198  ExternalReference store_callback_property =
1199  ExternalReference(IC_Utility(IC::kStoreCallbackProperty), isolate());
1200  __ TailCallExternalReference(store_callback_property, 5, 1);
1201 
1202  // Return the generated code.
1203  return GetCode(kind(), Code::FAST, name);
1204 }
1205 
1206 
1207 #undef __
1208 #define __ ACCESS_MASM(masm)
1209 
1210 
1212  MacroAssembler* masm,
1213  Handle<HeapType> type,
1214  Register receiver,
1215  Handle<JSFunction> setter) {
1216  // ----------- S t a t e -------------
1217  // -- esp[0] : return address
1218  // -----------------------------------
1219  {
1220  FrameScope scope(masm, StackFrame::INTERNAL);
1221 
1222  // Save value register, so we can restore it later.
1223  __ push(value());
1224 
1225  if (!setter.is_null()) {
1226  // Call the JavaScript setter with receiver and value on the stack.
1227  if (IC::TypeToMap(*type, masm->isolate())->IsJSGlobalObjectMap()) {
1228  // Swap in the global receiver.
1229  __ mov(receiver,
1231  }
1232  __ push(receiver);
1233  __ push(value());
1234  ParameterCount actual(1);
1235  ParameterCount expected(setter);
1236  __ InvokeFunction(setter, expected, actual,
1237  CALL_FUNCTION, NullCallWrapper());
1238  } else {
1239  // If we generate a global code snippet for deoptimization only, remember
1240  // the place to continue after deoptimization.
1241  masm->isolate()->heap()->SetSetterStubDeoptPCOffset(masm->pc_offset());
1242  }
1243 
1244  // We have to return the passed value, not the return value of the setter.
1245  __ pop(eax);
1246 
1247  // Restore context register.
1249  }
1250  __ ret(0);
1251 }
1252 
1253 
1254 #undef __
1255 #define __ ACCESS_MASM(masm())
1256 
1257 
1259  Handle<JSObject> object,
1260  Handle<Name> name) {
1261  __ pop(scratch1()); // remove the return address
1262  __ push(receiver());
1263  __ push(this->name());
1264  __ push(value());
1265  __ push(scratch1()); // restore return address
1266 
1267  // Do tail-call to the runtime system.
1268  ExternalReference store_ic_property =
1269  ExternalReference(IC_Utility(IC::kStoreInterceptorProperty), isolate());
1270  __ TailCallExternalReference(store_ic_property, 3, 1);
1271 
1272  // Return the generated code.
1273  return GetCode(kind(), Code::FAST, name);
1274 }
1275 
1276 
1278  // Prepare tail call to StoreIC_ArrayLength.
1279  __ pop(scratch1()); // remove the return address
1280  __ push(receiver());
1281  __ push(value());
1282  __ push(scratch1()); // restore return address
1283 
1284  ExternalReference ref =
1285  ExternalReference(IC_Utility(IC::kStoreIC_ArrayLength),
1286  masm()->isolate());
1287  __ TailCallExternalReference(ref, 2, 1);
1288 }
1289 
1290 
1292  MapHandleList* receiver_maps,
1293  CodeHandleList* handler_stubs,
1294  MapHandleList* transitioned_maps) {
1295  Label miss;
1296  __ JumpIfSmi(receiver(), &miss, Label::kNear);
1298  for (int i = 0; i < receiver_maps->length(); ++i) {
1299  __ cmp(scratch1(), receiver_maps->at(i));
1300  if (transitioned_maps->at(i).is_null()) {
1301  __ j(equal, handler_stubs->at(i));
1302  } else {
1303  Label next_map;
1304  __ j(not_equal, &next_map, Label::kNear);
1305  __ mov(transition_map(), Immediate(transitioned_maps->at(i)));
1306  __ jmp(handler_stubs->at(i), RelocInfo::CODE_TARGET);
1307  __ bind(&next_map);
1308  }
1309  }
1310  __ bind(&miss);
1311  TailCallBuiltin(masm(), MissBuiltin(kind()));
1312 
1313  // Return the generated code.
1314  return GetICCode(
1315  kind(), Code::NORMAL, factory()->empty_string(), POLYMORPHIC);
1316 }
1317 
1318 
1319 Handle<Code> LoadStubCompiler::CompileLoadNonexistent(Handle<HeapType> type,
1320  Handle<JSObject> last,
1321  Handle<Name> name) {
1322  NonexistentHandlerFrontend(type, last, name);
1323 
1324  // Return undefined if maps of the full prototype chain are still the
1325  // same and no global property with this name contains a value.
1326  __ mov(eax, isolate()->factory()->undefined_value());
1327  __ ret(0);
1328 
1329  // Return the generated code.
1330  return GetCode(kind(), Code::FAST, name);
1331 }
1332 
1333 
1334 Register* LoadStubCompiler::registers() {
1335  // receiver, name, scratch1, scratch2, scratch3, scratch4.
1336  static Register registers[] = { edx, ecx, ebx, eax, edi, no_reg };
1337  return registers;
1338 }
1339 
1340 
1341 Register* KeyedLoadStubCompiler::registers() {
1342  // receiver, name, scratch1, scratch2, scratch3, scratch4.
1343  static Register registers[] = { edx, ecx, ebx, eax, edi, no_reg };
1344  return registers;
1345 }
1346 
1347 
1348 Register StoreStubCompiler::value() {
1349  return eax;
1350 }
1351 
1352 
1353 Register* StoreStubCompiler::registers() {
1354  // receiver, name, scratch1, scratch2, scratch3.
1355  static Register registers[] = { edx, ecx, ebx, edi, no_reg };
1356  return registers;
1357 }
1358 
1359 
1360 Register* KeyedStoreStubCompiler::registers() {
1361  // receiver, name, scratch1, scratch2, scratch3.
1362  static Register registers[] = { edx, ecx, ebx, edi, no_reg };
1363  return registers;
1364 }
1365 
1366 
1367 #undef __
1368 #define __ ACCESS_MASM(masm)
1369 
1370 
1371 void LoadStubCompiler::GenerateLoadViaGetter(MacroAssembler* masm,
1372  Handle<HeapType> type,
1373  Register receiver,
1374  Handle<JSFunction> getter) {
1375  {
1376  FrameScope scope(masm, StackFrame::INTERNAL);
1377 
1378  if (!getter.is_null()) {
1379  // Call the JavaScript getter with the receiver on the stack.
1380  if (IC::TypeToMap(*type, masm->isolate())->IsJSGlobalObjectMap()) {
1381  // Swap in the global receiver.
1382  __ mov(receiver,
1384  }
1385  __ push(receiver);
1386  ParameterCount actual(0);
1387  ParameterCount expected(getter);
1388  __ InvokeFunction(getter, expected, actual,
1389  CALL_FUNCTION, NullCallWrapper());
1390  } else {
1391  // If we generate a global code snippet for deoptimization only, remember
1392  // the place to continue after deoptimization.
1393  masm->isolate()->heap()->SetGetterStubDeoptPCOffset(masm->pc_offset());
1394  }
1395 
1396  // Restore context register.
1398  }
1399  __ ret(0);
1400 }
1401 
1402 
1403 #undef __
1404 #define __ ACCESS_MASM(masm())
1405 
1406 
1408  Handle<HeapType> type,
1409  Handle<GlobalObject> global,
1410  Handle<PropertyCell> cell,
1411  Handle<Name> name,
1412  bool is_dont_delete) {
1413  Label miss;
1414 
1415  HandlerFrontendHeader(type, receiver(), global, name, &miss);
1416  // Get the value from the cell.
1417  if (Serializer::enabled()) {
1418  __ mov(eax, Immediate(cell));
1420  } else {
1421  __ mov(eax, Operand::ForCell(cell));
1422  }
1423 
1424  // Check for deleted property if property can actually be deleted.
1425  if (!is_dont_delete) {
1426  __ cmp(eax, factory()->the_hole_value());
1427  __ j(equal, &miss);
1428  } else if (FLAG_debug_code) {
1429  __ cmp(eax, factory()->the_hole_value());
1430  __ Check(not_equal, kDontDeleteCellsCannotContainTheHole);
1431  }
1432 
1433  Counters* counters = isolate()->counters();
1434  __ IncrementCounter(counters->named_load_global_stub(), 1);
1435  // The code above already loads the result into the return register.
1436  __ ret(0);
1437 
1438  HandlerFrontendFooter(name, &miss);
1439 
1440  // Return the generated code.
1441  return GetCode(kind(), Code::NORMAL, name);
1442 }
1443 
1444 
1446  TypeHandleList* types,
1447  CodeHandleList* handlers,
1448  Handle<Name> name,
1449  Code::StubType type,
1450  IcCheckType check) {
1451  Label miss;
1452 
1453  if (check == PROPERTY &&
1454  (kind() == Code::KEYED_LOAD_IC || kind() == Code::KEYED_STORE_IC)) {
1455  __ cmp(this->name(), Immediate(name));
1456  __ j(not_equal, &miss);
1457  }
1458 
1459  Label number_case;
1460  Label* smi_target = IncludesNumberType(types) ? &number_case : &miss;
1461  __ JumpIfSmi(receiver(), smi_target);
1462 
1463  Register map_reg = scratch1();
1464  __ mov(map_reg, FieldOperand(receiver(), HeapObject::kMapOffset));
1465  int receiver_count = types->length();
1466  int number_of_handled_maps = 0;
1467  for (int current = 0; current < receiver_count; ++current) {
1468  Handle<HeapType> type = types->at(current);
1469  Handle<Map> map = IC::TypeToMap(*type, isolate());
1470  if (!map->is_deprecated()) {
1471  number_of_handled_maps++;
1472  __ cmp(map_reg, map);
1473  if (type->Is(HeapType::Number())) {
1474  ASSERT(!number_case.is_unused());
1475  __ bind(&number_case);
1476  }
1477  __ j(equal, handlers->at(current));
1478  }
1479  }
1480  ASSERT(number_of_handled_maps != 0);
1481 
1482  __ bind(&miss);
1483  TailCallBuiltin(masm(), MissBuiltin(kind()));
1484 
1485  // Return the generated code.
1486  InlineCacheState state =
1487  number_of_handled_maps > 1 ? POLYMORPHIC : MONOMORPHIC;
1488  return GetICCode(kind(), type, name, state);
1489 }
1490 
1491 
1492 #undef __
1493 #define __ ACCESS_MASM(masm)
1494 
1495 
1497  MacroAssembler* masm) {
1498  // ----------- S t a t e -------------
1499  // -- ecx : key
1500  // -- edx : receiver
1501  // -- esp[0] : return address
1502  // -----------------------------------
1503  Label slow, miss;
1504 
1505  // This stub is meant to be tail-jumped to, the receiver must already
1506  // have been verified by the caller to not be a smi.
1507  __ JumpIfNotSmi(ecx, &miss);
1508  __ mov(ebx, ecx);
1509  __ SmiUntag(ebx);
1511 
1512  // Push receiver on the stack to free up a register for the dictionary
1513  // probing.
1514  __ push(edx);
1515  __ LoadFromNumberDictionary(&slow, eax, ecx, ebx, edx, edi, eax);
1516  // Pop receiver before returning.
1517  __ pop(edx);
1518  __ ret(0);
1519 
1520  __ bind(&slow);
1521  __ pop(edx);
1522 
1523  // ----------- S t a t e -------------
1524  // -- ecx : key
1525  // -- edx : receiver
1526  // -- esp[0] : return address
1527  // -----------------------------------
1528  TailCallBuiltin(masm, Builtins::kKeyedLoadIC_Slow);
1529 
1530  __ bind(&miss);
1531  // ----------- S t a t e -------------
1532  // -- ecx : key
1533  // -- edx : receiver
1534  // -- esp[0] : return address
1535  // -----------------------------------
1536  TailCallBuiltin(masm, Builtins::kKeyedLoadIC_Miss);
1537 }
1538 
1539 
1540 #undef __
1541 
1542 } } // namespace v8::internal
1543 
1544 #endif // V8_TARGET_ARCH_IA32
byte * Address
Definition: globals.h:186
virtual void HandlerFrontendFooter(Handle< Name > name, Label *miss)
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter NULL
Definition: flags.cc:269
void GenerateRestoreName(MacroAssembler *masm, Label *label, Handle< Name > name)
static const int kHashFieldOffset
Definition: objects.h:8629
static const int kBitFieldOffset
Definition: objects.h:6461
static const int kValueOffset
Definition: objects.h:9547
static int SlotOffset(int index)
Definition: contexts.h:498
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf map
Definition: flags.cc:350
static ExecutableAccessorInfo * cast(Object *obj)
void GenerateProbe(MacroAssembler *masm, Code::Flags flags, Register receiver, Register name, Register scratch, Register extra, Register extra2=no_reg, Register extra3=no_reg)
static const int kFlagsOffset
Definition: objects.h:5592
#define LOG(isolate, Call)
Definition: log.h:86
Handle< Code > CompileStoreInterceptor(Handle< JSObject > object, Handle< Name > name)
const Register esp
static Handle< String > cast(Handle< S > that)
Definition: handles.h:75
static const int kGlobalReceiverOffset
Definition: objects.h:7613
void GenerateLoadField(Register reg, Handle< JSObject > holder, PropertyIndex field, Representation representation)
static StubType ExtractTypeFromFlags(Flags flags)
Definition: objects-inl.h:4646
static const int kInterceptorArgsLength
Definition: stub-cache.h:207
static bool IsSupported(CpuFeature f)
Definition: assembler-arm.h:68
static const int kInterceptorArgsNameIndex
Definition: stub-cache.h:203
static bool enabled()
Definition: serialize.h:485
static const int kHasNamedInterceptor
Definition: objects.h:6470
static const int kIsAccessCheckNeeded
Definition: objects.h:6474
uint32_t Flags
Definition: objects.h:5184
List< Handle< Map > > MapHandleList
Definition: list.h:218
#define ASSERT(condition)
Definition: checks.h:329
static const int kContextOffset
Definition: frames.h:185
const int kPointerSizeLog2
Definition: globals.h:281
Handle< Code > CompileStoreCallback(Handle< JSObject > object, Handle< JSObject > holder, Handle< Name > name, Handle< ExecutableAccessorInfo > callback)
virtual Register HandlerFrontendHeader(Handle< HeapType > type, Register object_reg, Handle< JSObject > holder, Handle< Name > name, Label *miss)
Definition: stub-cache.cc:790
virtual void HandlerFrontendFooter(Handle< Name > name, Label *miss)
const Register edi
const Register ebp
#define UNREACHABLE()
Definition: checks.h:52
void GenerateLoadPostInterceptor(Register reg, Handle< JSObject > interceptor_holder, Handle< Name > name, LookupResult *lookup)
Definition: stub-cache.cc:983
STATIC_ASSERT(sizeof(CPURegister)==sizeof(Register))
const Register eax
static const int kValueOffset
Definition: objects.h:1971
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long expose natives in global object expose freeBuffer extension expose gc extension under the specified name expose externalize string extension number of stack frames to capture disable builtin natives files print name of functions for which code is generated use random jit cookie to mask large constants trace lazy optimization use adaptive optimizations always try to OSR functions trace optimize function deoptimization minimum length for automatic enable preparsing maximum number of optimization attempts before giving up cache prototype transitions trace debugging JSON request response trace out of bounds accesses to external arrays trace_js_array_abuse automatically set the debug break flag when debugger commands are in the queue abort by crashing maximum length of function source code printed in a stack trace max size of the new max size of the old max size of executable always perform global GCs print one trace line following each garbage collection do not print trace line after scavenger collection print statistics of the maximum memory committed for the heap in only print modified registers Don t break for ASM_UNIMPLEMENTED_BREAK macros print stack trace when an illegal exception is thrown randomize hashes to avoid predictable hash Fixed seed to use to hash property Print the time it takes to deserialize the snapshot testing_bool_flag testing_int_flag string flag tmp file in which to serialize heap Print the time it takes to lazily compile hydrogen code stubs concurrent_recompilation concurrent_sweeping Print usage including flags
Definition: flags.cc:665
static void GenerateStoreViaSetter(MacroAssembler *masm, Handle< HeapType > type, Register receiver, Handle< JSFunction > setter)
static Handle< HeapType > CurrentTypeOf(Handle< Object > object, Isolate *isolate)
Definition: ic.cc:676
const int kPointerSize
Definition: globals.h:268
void check(i::Vector< const uint8_t > string)
Handle< Code > CompilePolymorphicIC(TypeHandleList *types, CodeHandleList *handlers, Handle< Name > name, Code::StubType type, IcCheckType check)
Operand FieldOperand(Register object, int offset)
const Register ecx
const Address kZapValue
Definition: v8globals.h:82
const int kHeapObjectTag
Definition: v8.h:5473
void GenerateLoadConstant(Handle< Object > value)
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra code(assertions) for debugging") DEFINE_bool(code_comments
#define __
static const int kInterceptorArgsThisIndex
Definition: stub-cache.h:205
static Handle< PropertyCell > EnsurePropertyCell(Handle< JSGlobalObject > global, Handle< Name > name)
Definition: objects.cc:14752
List< Handle< HeapType > > TypeHandleList
Definition: list.h:219
static const int kPropertiesOffset
Definition: objects.h:2755
static const int kReturnValueDefaultValueIndex
Definition: arguments.h:179
void GenerateNegativeHolderLookup(MacroAssembler *masm, Handle< JSObject > holder, Register holder_reg, Handle< Name > name, Label *miss)
static const int kElementsOffset
Definition: objects.h:2756
Handle< Code > CompileLoadNonexistent(Handle< HeapType > type, Handle< JSObject > last, Handle< Name > name)
Handle< Code > CompileLoadGlobal(Handle< HeapType > type, Handle< GlobalObject > holder, Handle< PropertyCell > cell, Handle< Name > name, bool is_dont_delete)
static void GenerateLoadDictionaryElement(MacroAssembler *masm)
void GenerateLoadInterceptor(Register holder_reg, Handle< Object > object, Handle< JSObject > holder, LookupResult *lookup, Handle< Name > name)
static const int kLengthOffset
Definition: objects.h:10076
static void GenerateNegativeLookup(MacroAssembler *masm, Label *miss, Label *done, Register receiver, Register properties, Handle< Name > name, Register scratch0)
static const int kInterceptorArgsInfoIndex
Definition: stub-cache.h:204
static const int kHeaderSize
Definition: objects.h:3016
void GenerateLoadCallback(Register reg, Handle< ExecutableAccessorInfo > callback)
static Builtins::Name MissBuiltin(Code::Kind kind)
Definition: stub-cache.h:466
static const int kMapOffset
Definition: objects.h:1890
bool is(Register reg) const
List< Handle< Code > > CodeHandleList
Definition: list.h:220
const Register ebx
Register CallbackHandlerFrontend(Handle< HeapType > type, Register object_reg, Handle< JSObject > holder, Handle< Name > name, Handle< Object > callback)
Handle< T > handle(T *t, Isolate *isolate)
Definition: handles.h:103
static const int kDataOffset
Definition: objects.h:10433
void GenerateStoreField(MacroAssembler *masm, Handle< JSObject > object, LookupResult *lookup, Register receiver_reg, Register name_reg, Register value_reg, Register scratch1, Register scratch2, Label *miss_label)
static const int kHeaderSize
Definition: objects.h:5604
static Handle< T > null()
Definition: handles.h:80
const Register esi
void NonexistentHandlerFrontend(Handle< HeapType > type, Handle< JSObject > last, Handle< Name > name)
Definition: stub-cache.cc:864
const int kHeapObjectTagSize
Definition: v8.h:5474
static const int kPrototypeOffset
Definition: objects.h:6427
static const int kFlagsNotUsedInLookup
Definition: objects.h:5684
const Register no_reg
Handle< Code > GetCode(Code::Kind kind, Code::StubType type, Handle< Name > name)
Definition: stub-cache.cc:1281
void GenerateStoreTransition(MacroAssembler *masm, Handle< JSObject > object, LookupResult *lookup, Handle< Map > transition, Handle< Name > name, Register receiver_reg, Register name_reg, Register value_reg, Register scratch1, Register scratch2, Register scratch3, Label *miss_label, Label *slow)
static const int kNativeContextOffset
Definition: objects.h:7611
Register HandlerFrontend(Handle< HeapType > type, Register object_reg, Handle< JSObject > holder, Handle< Name > name)
Definition: stub-cache.cc:850
const Register edx
Handle< Code > CompileStorePolymorphic(MapHandleList *receiver_maps, CodeHandleList *handler_stubs, MapHandleList *transitioned_maps)
static void GeneratePositiveLookup(MacroAssembler *masm, Label *miss, Label *done, Register elements, Register name, Register r0, Register r1)
static void GenerateLoadViaGetter(MacroAssembler *masm, Handle< HeapType > type, Register receiver, Handle< JSFunction > getter)
static const int kInterceptorArgsHolderIndex
Definition: stub-cache.h:206
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long expose natives in global object expose freeBuffer extension expose gc extension under the specified name expose externalize string extension number of stack frames to capture disable builtin natives files print name of functions for which code is generated use random jit cookie to mask large constants trace lazy optimization use adaptive optimizations always try to OSR functions trace optimize function deoptimization minimum length for automatic enable preparsing maximum number of optimization attempts before giving up cache prototype transitions trace debugging JSON request response trace out of bounds accesses to external arrays trace_js_array_abuse automatically set the debug break flag when debugger commands are in the queue abort by crashing maximum length of function source code printed in a stack trace max size of the new max size of the old max size of executable always perform global GCs print one trace line following each garbage collection do not print trace line after scavenger collection print statistics of the maximum memory committed for the heap in name
Definition: flags.cc:505
Handle< Code > GetICCode(Code::Kind kind, Code::StubType type, Handle< Name > name, InlineCacheState state=MONOMORPHIC)
Definition: stub-cache.cc:1269
static JSObject * cast(Object *obj)
static Handle< Map > TypeToMap(HeapType *type, Isolate *isolate)
Definition: ic.cc:683
const XMMRegister xmm0
bool IncludesNumberType(TypeHandleList *types)
Definition: stub-cache.cc:842
static JSFunction * cast(Object *obj)