v8  3.14.5(node0.10.28)
V8 is Google's open source JavaScript engine
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
stub-cache-mips.cc
Go to the documentation of this file.
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 
28 #include "v8.h"
29 
30 #if defined(V8_TARGET_ARCH_MIPS)
31 
32 #include "ic-inl.h"
33 #include "codegen.h"
34 #include "stub-cache.h"
35 
36 namespace v8 {
37 namespace internal {
38 
39 #define __ ACCESS_MASM(masm)
40 
41 
42 static void ProbeTable(Isolate* isolate,
43  MacroAssembler* masm,
45  StubCache::Table table,
46  Register receiver,
47  Register name,
48  // Number of the cache entry, not scaled.
49  Register offset,
50  Register scratch,
51  Register scratch2,
52  Register offset_scratch) {
53  ExternalReference key_offset(isolate->stub_cache()->key_reference(table));
54  ExternalReference value_offset(isolate->stub_cache()->value_reference(table));
55  ExternalReference map_offset(isolate->stub_cache()->map_reference(table));
56 
57  uint32_t key_off_addr = reinterpret_cast<uint32_t>(key_offset.address());
58  uint32_t value_off_addr = reinterpret_cast<uint32_t>(value_offset.address());
59  uint32_t map_off_addr = reinterpret_cast<uint32_t>(map_offset.address());
60 
61  // Check the relative positions of the address fields.
62  ASSERT(value_off_addr > key_off_addr);
63  ASSERT((value_off_addr - key_off_addr) % 4 == 0);
64  ASSERT((value_off_addr - key_off_addr) < (256 * 4));
65  ASSERT(map_off_addr > key_off_addr);
66  ASSERT((map_off_addr - key_off_addr) % 4 == 0);
67  ASSERT((map_off_addr - key_off_addr) < (256 * 4));
68 
69  Label miss;
70  Register base_addr = scratch;
71  scratch = no_reg;
72 
73  // Multiply by 3 because there are 3 fields per entry (name, code, map).
74  __ sll(offset_scratch, offset, 1);
75  __ Addu(offset_scratch, offset_scratch, offset);
76 
77  // Calculate the base address of the entry.
78  __ li(base_addr, Operand(key_offset));
79  __ sll(at, offset_scratch, kPointerSizeLog2);
80  __ Addu(base_addr, base_addr, at);
81 
82  // Check that the key in the entry matches the name.
83  __ lw(at, MemOperand(base_addr, 0));
84  __ Branch(&miss, ne, name, Operand(at));
85 
86  // Check the map matches.
87  __ lw(at, MemOperand(base_addr, map_off_addr - key_off_addr));
88  __ lw(scratch2, FieldMemOperand(receiver, HeapObject::kMapOffset));
89  __ Branch(&miss, ne, at, Operand(scratch2));
90 
91  // Get the code entry from the cache.
92  Register code = scratch2;
93  scratch2 = no_reg;
94  __ lw(code, MemOperand(base_addr, value_off_addr - key_off_addr));
95 
96  // Check that the flags match what we're looking for.
97  Register flags_reg = base_addr;
98  base_addr = no_reg;
99  __ lw(flags_reg, FieldMemOperand(code, Code::kFlagsOffset));
100  __ And(flags_reg, flags_reg, Operand(~Code::kFlagsNotUsedInLookup));
101  __ Branch(&miss, ne, flags_reg, Operand(flags));
102 
103 #ifdef DEBUG
104  if (FLAG_test_secondary_stub_cache && table == StubCache::kPrimary) {
105  __ jmp(&miss);
106  } else if (FLAG_test_primary_stub_cache && table == StubCache::kSecondary) {
107  __ jmp(&miss);
108  }
109 #endif
110 
111  // Jump to the first instruction in the code stub.
112  __ Addu(at, code, Operand(Code::kHeaderSize - kHeapObjectTag));
113  __ Jump(at);
114 
115  // Miss: fall through.
116  __ bind(&miss);
117 }
118 
119 
120 // Helper function used to check that the dictionary doesn't contain
121 // the property. This function may return false negatives, so miss_label
122 // must always call a backup property check that is complete.
123 // This function is safe to call if the receiver has fast properties.
124 // Name must be a symbol and receiver must be a heap object.
125 static void GenerateDictionaryNegativeLookup(MacroAssembler* masm,
126  Label* miss_label,
127  Register receiver,
128  Handle<String> name,
129  Register scratch0,
130  Register scratch1) {
131  ASSERT(name->IsSymbol());
132  Counters* counters = masm->isolate()->counters();
133  __ IncrementCounter(counters->negative_lookups(), 1, scratch0, scratch1);
134  __ IncrementCounter(counters->negative_lookups_miss(), 1, scratch0, scratch1);
135 
136  Label done;
137 
138  const int kInterceptorOrAccessCheckNeededMask =
140 
141  // Bail out if the receiver has a named interceptor or requires access checks.
142  Register map = scratch1;
143  __ lw(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
144  __ lbu(scratch0, FieldMemOperand(map, Map::kBitFieldOffset));
145  __ And(scratch0, scratch0, Operand(kInterceptorOrAccessCheckNeededMask));
146  __ Branch(miss_label, ne, scratch0, Operand(zero_reg));
147 
148  // Check that receiver is a JSObject.
149  __ lbu(scratch0, FieldMemOperand(map, Map::kInstanceTypeOffset));
150  __ Branch(miss_label, lt, scratch0, Operand(FIRST_SPEC_OBJECT_TYPE));
151 
152  // Load properties array.
153  Register properties = scratch0;
154  __ lw(properties, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
155  // Check that the properties array is a dictionary.
156  __ lw(map, FieldMemOperand(properties, HeapObject::kMapOffset));
157  Register tmp = properties;
158  __ LoadRoot(tmp, Heap::kHashTableMapRootIndex);
159  __ Branch(miss_label, ne, map, Operand(tmp));
160 
161  // Restore the temporarily used register.
162  __ lw(properties, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
163 
164 
166  miss_label,
167  &done,
168  receiver,
169  properties,
170  name,
171  scratch1);
172  __ bind(&done);
173  __ DecrementCounter(counters->negative_lookups_miss(), 1, scratch0, scratch1);
174 }
175 
176 
177 void StubCache::GenerateProbe(MacroAssembler* masm,
178  Code::Flags flags,
179  Register receiver,
180  Register name,
181  Register scratch,
182  Register extra,
183  Register extra2,
184  Register extra3) {
185  Isolate* isolate = masm->isolate();
186  Label miss;
187 
188  // Make sure that code is valid. The multiplying code relies on the
189  // entry size being 12.
190  ASSERT(sizeof(Entry) == 12);
191 
192  // Make sure the flags does not name a specific type.
193  ASSERT(Code::ExtractTypeFromFlags(flags) == 0);
194 
195  // Make sure that there are no register conflicts.
196  ASSERT(!scratch.is(receiver));
197  ASSERT(!scratch.is(name));
198  ASSERT(!extra.is(receiver));
199  ASSERT(!extra.is(name));
200  ASSERT(!extra.is(scratch));
201  ASSERT(!extra2.is(receiver));
202  ASSERT(!extra2.is(name));
203  ASSERT(!extra2.is(scratch));
204  ASSERT(!extra2.is(extra));
205 
206  // Check register validity.
207  ASSERT(!scratch.is(no_reg));
208  ASSERT(!extra.is(no_reg));
209  ASSERT(!extra2.is(no_reg));
210  ASSERT(!extra3.is(no_reg));
211 
212  Counters* counters = masm->isolate()->counters();
213  __ IncrementCounter(counters->megamorphic_stub_cache_probes(), 1,
214  extra2, extra3);
215 
216  // Check that the receiver isn't a smi.
217  __ JumpIfSmi(receiver, &miss);
218 
219  // Get the map of the receiver and compute the hash.
220  __ lw(scratch, FieldMemOperand(name, String::kHashFieldOffset));
221  __ lw(at, FieldMemOperand(receiver, HeapObject::kMapOffset));
222  __ Addu(scratch, scratch, at);
223  uint32_t mask = kPrimaryTableSize - 1;
224  // We shift out the last two bits because they are not part of the hash and
225  // they are always 01 for maps.
226  __ srl(scratch, scratch, kHeapObjectTagSize);
227  __ Xor(scratch, scratch, Operand((flags >> kHeapObjectTagSize) & mask));
228  __ And(scratch, scratch, Operand(mask));
229 
230  // Probe the primary table.
231  ProbeTable(isolate,
232  masm,
233  flags,
234  kPrimary,
235  receiver,
236  name,
237  scratch,
238  extra,
239  extra2,
240  extra3);
241 
242  // Primary miss: Compute hash for secondary probe.
243  __ srl(at, name, kHeapObjectTagSize);
244  __ Subu(scratch, scratch, at);
245  uint32_t mask2 = kSecondaryTableSize - 1;
246  __ Addu(scratch, scratch, Operand((flags >> kHeapObjectTagSize) & mask2));
247  __ And(scratch, scratch, Operand(mask2));
248 
249  // Probe the secondary table.
250  ProbeTable(isolate,
251  masm,
252  flags,
253  kSecondary,
254  receiver,
255  name,
256  scratch,
257  extra,
258  extra2,
259  extra3);
260 
261  // Cache miss: Fall-through and let caller handle the miss by
262  // entering the runtime system.
263  __ bind(&miss);
264  __ IncrementCounter(counters->megamorphic_stub_cache_misses(), 1,
265  extra2, extra3);
266 }
267 
268 
269 void StubCompiler::GenerateLoadGlobalFunctionPrototype(MacroAssembler* masm,
270  int index,
271  Register prototype) {
272  // Load the global or builtins object from the current context.
273  __ lw(prototype,
275  // Load the native context from the global or builtins object.
276  __ lw(prototype,
278  // Load the function from the native context.
279  __ lw(prototype, MemOperand(prototype, Context::SlotOffset(index)));
280  // Load the initial map. The global functions all have initial maps.
281  __ lw(prototype,
283  // Load the prototype from the initial map.
284  __ lw(prototype, FieldMemOperand(prototype, Map::kPrototypeOffset));
285 }
286 
287 
288 void StubCompiler::GenerateDirectLoadGlobalFunctionPrototype(
289  MacroAssembler* masm,
290  int index,
291  Register prototype,
292  Label* miss) {
293  Isolate* isolate = masm->isolate();
294  // Check we're still in the same context.
295  __ lw(prototype,
297  ASSERT(!prototype.is(at));
298  __ li(at, isolate->global_object());
299  __ Branch(miss, ne, prototype, Operand(at));
300  // Get the global function with the given index.
301  Handle<JSFunction> function(
302  JSFunction::cast(isolate->native_context()->get(index)));
303  // Load its initial map. The global functions all have initial maps.
304  __ li(prototype, Handle<Map>(function->initial_map()));
305  // Load the prototype from the initial map.
306  __ lw(prototype, FieldMemOperand(prototype, Map::kPrototypeOffset));
307 }
308 
309 
310 // Load a fast property out of a holder object (src). In-object properties
311 // are loaded directly otherwise the property is loaded from the properties
312 // fixed array.
313 void StubCompiler::GenerateFastPropertyLoad(MacroAssembler* masm,
314  Register dst,
315  Register src,
316  Handle<JSObject> holder,
317  int index) {
318  // Adjust for the number of properties stored in the holder.
319  index -= holder->map()->inobject_properties();
320  if (index < 0) {
321  // Get the property straight out of the holder.
322  int offset = holder->map()->instance_size() + (index * kPointerSize);
323  __ lw(dst, FieldMemOperand(src, offset));
324  } else {
325  // Calculate the offset into the properties array.
326  int offset = index * kPointerSize + FixedArray::kHeaderSize;
328  __ lw(dst, FieldMemOperand(dst, offset));
329  }
330 }
331 
332 
333 void StubCompiler::GenerateLoadArrayLength(MacroAssembler* masm,
334  Register receiver,
335  Register scratch,
336  Label* miss_label) {
337  // Check that the receiver isn't a smi.
338  __ JumpIfSmi(receiver, miss_label);
339 
340  // Check that the object is a JS array.
341  __ GetObjectType(receiver, scratch, scratch);
342  __ Branch(miss_label, ne, scratch, Operand(JS_ARRAY_TYPE));
343 
344  // Load length directly from the JS array.
345  __ lw(v0, FieldMemOperand(receiver, JSArray::kLengthOffset));
346  __ Ret();
347 }
348 
349 
350 // Generate code to check if an object is a string. If the object is a
351 // heap object, its map's instance type is left in the scratch1 register.
352 // If this is not needed, scratch1 and scratch2 may be the same register.
353 static void GenerateStringCheck(MacroAssembler* masm,
354  Register receiver,
355  Register scratch1,
356  Register scratch2,
357  Label* smi,
358  Label* non_string_object) {
359  // Check that the receiver isn't a smi.
360  __ JumpIfSmi(receiver, smi, t0);
361 
362  // Check that the object is a string.
363  __ lw(scratch1, FieldMemOperand(receiver, HeapObject::kMapOffset));
364  __ lbu(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
365  __ And(scratch2, scratch1, Operand(kIsNotStringMask));
366  // The cast is to resolve the overload for the argument of 0x0.
367  __ Branch(non_string_object,
368  ne,
369  scratch2,
370  Operand(static_cast<int32_t>(kStringTag)));
371 }
372 
373 
374 // Generate code to load the length from a string object and return the length.
375 // If the receiver object is not a string or a wrapped string object the
376 // execution continues at the miss label. The register containing the
377 // receiver is potentially clobbered.
378 void StubCompiler::GenerateLoadStringLength(MacroAssembler* masm,
379  Register receiver,
380  Register scratch1,
381  Register scratch2,
382  Label* miss,
383  bool support_wrappers) {
384  Label check_wrapper;
385 
386  // Check if the object is a string leaving the instance type in the
387  // scratch1 register.
388  GenerateStringCheck(masm, receiver, scratch1, scratch2, miss,
389  support_wrappers ? &check_wrapper : miss);
390 
391  // Load length directly from the string.
392  __ lw(v0, FieldMemOperand(receiver, String::kLengthOffset));
393  __ Ret();
394 
395  if (support_wrappers) {
396  // Check if the object is a JSValue wrapper.
397  __ bind(&check_wrapper);
398  __ Branch(miss, ne, scratch1, Operand(JS_VALUE_TYPE));
399 
400  // Unwrap the value and check if the wrapped value is a string.
401  __ lw(scratch1, FieldMemOperand(receiver, JSValue::kValueOffset));
402  GenerateStringCheck(masm, scratch1, scratch2, scratch2, miss, miss);
403  __ lw(v0, FieldMemOperand(scratch1, String::kLengthOffset));
404  __ Ret();
405  }
406 }
407 
408 
409 void StubCompiler::GenerateLoadFunctionPrototype(MacroAssembler* masm,
410  Register receiver,
411  Register scratch1,
412  Register scratch2,
413  Label* miss_label) {
414  __ TryGetFunctionPrototype(receiver, scratch1, scratch2, miss_label);
415  __ mov(v0, scratch1);
416  __ Ret();
417 }
418 
419 
420 // Generate StoreField code, value is passed in a0 register.
421 // After executing generated code, the receiver_reg and name_reg
422 // may be clobbered.
423 void StubCompiler::GenerateStoreField(MacroAssembler* masm,
424  Handle<JSObject> object,
425  int index,
426  Handle<Map> transition,
427  Handle<String> name,
428  Register receiver_reg,
429  Register name_reg,
430  Register scratch1,
431  Register scratch2,
432  Label* miss_label) {
433  // a0 : value.
434  Label exit;
435 
436  LookupResult lookup(masm->isolate());
437  object->Lookup(*name, &lookup);
438  if (lookup.IsFound() && (lookup.IsReadOnly() || !lookup.IsCacheable())) {
439  // In sloppy mode, we could just return the value and be done. However, we
440  // might be in strict mode, where we have to throw. Since we cannot tell,
441  // go into slow case unconditionally.
442  __ jmp(miss_label);
443  return;
444  }
445 
446  // Check that the map of the object hasn't changed.
447  CompareMapMode mode = transition.is_null() ? ALLOW_ELEMENT_TRANSITION_MAPS
449  __ CheckMap(receiver_reg, scratch1, Handle<Map>(object->map()), miss_label,
450  DO_SMI_CHECK, mode);
451 
452  // Perform global security token check if needed.
453  if (object->IsJSGlobalProxy()) {
454  __ CheckAccessGlobalProxy(receiver_reg, scratch1, miss_label);
455  }
456 
457  // Check that we are allowed to write this.
458  if (!transition.is_null() && object->GetPrototype()->IsJSObject()) {
459  JSObject* holder;
460  if (lookup.IsFound()) {
461  holder = lookup.holder();
462  } else {
463  // Find the top object.
464  holder = *object;
465  do {
466  holder = JSObject::cast(holder->GetPrototype());
467  } while (holder->GetPrototype()->IsJSObject());
468  }
469  // We need an extra register, push
470  __ push(name_reg);
471  Label miss_pop, done_check;
472  CheckPrototypes(object, receiver_reg, Handle<JSObject>(holder), name_reg,
473  scratch1, scratch2, name, &miss_pop);
474  __ jmp(&done_check);
475  __ bind(&miss_pop);
476  __ pop(name_reg);
477  __ jmp(miss_label);
478  __ bind(&done_check);
479  __ pop(name_reg);
480  }
481 
482  // Stub never generated for non-global objects that require access
483  // checks.
484  ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
485 
486  // Perform map transition for the receiver if necessary.
487  if (!transition.is_null() && (object->map()->unused_property_fields() == 0)) {
488  // The properties must be extended before we can store the value.
489  // We jump to a runtime call that extends the properties array.
490  __ push(receiver_reg);
491  __ li(a2, Operand(transition));
492  __ Push(a2, a0);
493  __ TailCallExternalReference(
494  ExternalReference(IC_Utility(IC::kSharedStoreIC_ExtendStorage),
495  masm->isolate()),
496  3, 1);
497  return;
498  }
499 
500  if (!transition.is_null()) {
501  // Update the map of the object.
502  __ li(scratch1, Operand(transition));
503  __ sw(scratch1, FieldMemOperand(receiver_reg, HeapObject::kMapOffset));
504 
505  // Update the write barrier for the map field and pass the now unused
506  // name_reg as scratch register.
507  __ RecordWriteField(receiver_reg,
509  scratch1,
510  name_reg,
515  }
516 
517  // Adjust for the number of properties stored in the object. Even in the
518  // face of a transition we can use the old map here because the size of the
519  // object and the number of in-object properties is not going to change.
520  index -= object->map()->inobject_properties();
521 
522  if (index < 0) {
523  // Set the property straight into the object.
524  int offset = object->map()->instance_size() + (index * kPointerSize);
525  __ sw(a0, FieldMemOperand(receiver_reg, offset));
526 
527  // Skip updating write barrier if storing a smi.
528  __ JumpIfSmi(a0, &exit, scratch1);
529 
530  // Update the write barrier for the array address.
531  // Pass the now unused name_reg as a scratch register.
532  __ mov(name_reg, a0);
533  __ RecordWriteField(receiver_reg,
534  offset,
535  name_reg,
536  scratch1,
539  } else {
540  // Write to the properties array.
541  int offset = index * kPointerSize + FixedArray::kHeaderSize;
542  // Get the properties array.
543  __ lw(scratch1,
545  __ sw(a0, FieldMemOperand(scratch1, offset));
546 
547  // Skip updating write barrier if storing a smi.
548  __ JumpIfSmi(a0, &exit);
549 
550  // Update the write barrier for the array address.
551  // Ok to clobber receiver_reg and name_reg, since we return.
552  __ mov(name_reg, a0);
553  __ RecordWriteField(scratch1,
554  offset,
555  name_reg,
556  receiver_reg,
559  }
560 
561  // Return the value (register v0).
562  __ bind(&exit);
563  __ mov(v0, a0);
564  __ Ret();
565 }
566 
567 
568 void StubCompiler::GenerateLoadMiss(MacroAssembler* masm, Code::Kind kind) {
569  ASSERT(kind == Code::LOAD_IC || kind == Code::KEYED_LOAD_IC);
570  Handle<Code> code = (kind == Code::LOAD_IC)
571  ? masm->isolate()->builtins()->LoadIC_Miss()
572  : masm->isolate()->builtins()->KeyedLoadIC_Miss();
573  __ Jump(code, RelocInfo::CODE_TARGET);
574 }
575 
576 
577 static void GenerateCallFunction(MacroAssembler* masm,
578  Handle<Object> object,
579  const ParameterCount& arguments,
580  Label* miss,
581  Code::ExtraICState extra_ic_state) {
582  // ----------- S t a t e -------------
583  // -- a0: receiver
584  // -- a1: function to call
585  // -----------------------------------
586  // Check that the function really is a function.
587  __ JumpIfSmi(a1, miss);
588  __ GetObjectType(a1, a3, a3);
589  __ Branch(miss, ne, a3, Operand(JS_FUNCTION_TYPE));
590 
591  // Patch the receiver on the stack with the global proxy if
592  // necessary.
593  if (object->IsGlobalObject()) {
595  __ sw(a3, MemOperand(sp, arguments.immediate() * kPointerSize));
596  }
597 
598  // Invoke the function.
599  CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state)
601  : CALL_AS_METHOD;
602  __ InvokeFunction(a1, arguments, JUMP_FUNCTION, NullCallWrapper(), call_kind);
603 }
604 
605 
606 static void PushInterceptorArguments(MacroAssembler* masm,
607  Register receiver,
608  Register holder,
609  Register name,
610  Handle<JSObject> holder_obj) {
611  __ push(name);
612  Handle<InterceptorInfo> interceptor(holder_obj->GetNamedInterceptor());
613  ASSERT(!masm->isolate()->heap()->InNewSpace(*interceptor));
614  Register scratch = name;
615  __ li(scratch, Operand(interceptor));
616  __ Push(scratch, receiver, holder);
617  __ lw(scratch, FieldMemOperand(scratch, InterceptorInfo::kDataOffset));
618  __ push(scratch);
619  __ li(scratch, Operand(ExternalReference::isolate_address()));
620  __ push(scratch);
621 }
622 
623 
624 static void CompileCallLoadPropertyWithInterceptor(
625  MacroAssembler* masm,
626  Register receiver,
627  Register holder,
628  Register name,
629  Handle<JSObject> holder_obj) {
630  PushInterceptorArguments(masm, receiver, holder, name, holder_obj);
631 
632  ExternalReference ref =
633  ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorOnly),
634  masm->isolate());
635  __ PrepareCEntryArgs(6);
636  __ PrepareCEntryFunction(ref);
637 
638  CEntryStub stub(1);
639  __ CallStub(&stub);
640 }
641 
642 
643 static const int kFastApiCallArguments = 4;
644 
645 
646 // Reserves space for the extra arguments to API function in the
647 // caller's frame.
648 //
649 // These arguments are set by CheckPrototypes and GenerateFastApiDirectCall.
650 static void ReserveSpaceForFastApiCall(MacroAssembler* masm,
651  Register scratch) {
652  ASSERT(Smi::FromInt(0) == 0);
653  for (int i = 0; i < kFastApiCallArguments; i++) {
654  __ push(zero_reg);
655  }
656 }
657 
658 
659 // Undoes the effects of ReserveSpaceForFastApiCall.
660 static void FreeSpaceForFastApiCall(MacroAssembler* masm) {
661  __ Drop(kFastApiCallArguments);
662 }
663 
664 
665 static void GenerateFastApiDirectCall(MacroAssembler* masm,
666  const CallOptimization& optimization,
667  int argc) {
668  // ----------- S t a t e -------------
669  // -- sp[0] : holder (set by CheckPrototypes)
670  // -- sp[4] : callee JS function
671  // -- sp[8] : call data
672  // -- sp[12] : isolate
673  // -- sp[16] : last JS argument
674  // -- ...
675  // -- sp[(argc + 3) * 4] : first JS argument
676  // -- sp[(argc + 4) * 4] : receiver
677  // -----------------------------------
678  // Get the function and setup the context.
679  Handle<JSFunction> function = optimization.constant_function();
680  __ LoadHeapObject(t1, function);
682 
683  // Pass the additional arguments.
684  Handle<CallHandlerInfo> api_call_info = optimization.api_call_info();
685  Handle<Object> call_data(api_call_info->data());
686  if (masm->isolate()->heap()->InNewSpace(*call_data)) {
687  __ li(a0, api_call_info);
689  } else {
690  __ li(t2, call_data);
691  }
692 
693  __ li(t3, Operand(ExternalReference::isolate_address()));
694  // Store JS function, call data and isolate.
695  __ sw(t1, MemOperand(sp, 1 * kPointerSize));
696  __ sw(t2, MemOperand(sp, 2 * kPointerSize));
697  __ sw(t3, MemOperand(sp, 3 * kPointerSize));
698 
699  // Prepare arguments.
700  __ Addu(a2, sp, Operand(3 * kPointerSize));
701 
702  // Allocate the v8::Arguments structure in the arguments' space since
703  // it's not controlled by GC.
704  const int kApiStackSpace = 4;
705 
706  FrameScope frame_scope(masm, StackFrame::MANUAL);
707  __ EnterExitFrame(false, kApiStackSpace);
708 
709  // NOTE: the O32 abi requires a0 to hold a special pointer when returning a
710  // struct from the function (which is currently the case). This means we pass
711  // the first argument in a1 instead of a0. TryCallApiFunctionAndReturn
712  // will handle setting up a0.
713 
714  // a1 = v8::Arguments&
715  // Arguments is built at sp + 1 (sp is a reserved spot for ra).
716  __ Addu(a1, sp, kPointerSize);
717 
718  // v8::Arguments::implicit_args_
719  __ sw(a2, MemOperand(a1, 0 * kPointerSize));
720  // v8::Arguments::values_
721  __ Addu(t0, a2, Operand(argc * kPointerSize));
722  __ sw(t0, MemOperand(a1, 1 * kPointerSize));
723  // v8::Arguments::length_ = argc
724  __ li(t0, Operand(argc));
725  __ sw(t0, MemOperand(a1, 2 * kPointerSize));
726  // v8::Arguments::is_construct_call = 0
727  __ sw(zero_reg, MemOperand(a1, 3 * kPointerSize));
728 
729  const int kStackUnwindSpace = argc + kFastApiCallArguments + 1;
730  Address function_address = v8::ToCData<Address>(api_call_info->callback());
731  ApiFunction fun(function_address);
732  ExternalReference ref =
733  ExternalReference(&fun,
734  ExternalReference::DIRECT_API_CALL,
735  masm->isolate());
736  AllowExternalCallThatCantCauseGC scope(masm);
737  __ CallApiFunctionAndReturn(ref, kStackUnwindSpace);
738 }
739 
740 class CallInterceptorCompiler BASE_EMBEDDED {
741  public:
742  CallInterceptorCompiler(StubCompiler* stub_compiler,
743  const ParameterCount& arguments,
744  Register name,
745  Code::ExtraICState extra_ic_state)
746  : stub_compiler_(stub_compiler),
747  arguments_(arguments),
748  name_(name),
749  extra_ic_state_(extra_ic_state) {}
750 
751  void Compile(MacroAssembler* masm,
752  Handle<JSObject> object,
753  Handle<JSObject> holder,
754  Handle<String> name,
755  LookupResult* lookup,
756  Register receiver,
757  Register scratch1,
758  Register scratch2,
759  Register scratch3,
760  Label* miss) {
761  ASSERT(holder->HasNamedInterceptor());
762  ASSERT(!holder->GetNamedInterceptor()->getter()->IsUndefined());
763 
764  // Check that the receiver isn't a smi.
765  __ JumpIfSmi(receiver, miss);
766  CallOptimization optimization(lookup);
767  if (optimization.is_constant_call()) {
768  CompileCacheable(masm, object, receiver, scratch1, scratch2, scratch3,
769  holder, lookup, name, optimization, miss);
770  } else {
771  CompileRegular(masm, object, receiver, scratch1, scratch2, scratch3,
772  name, holder, miss);
773  }
774  }
775 
776  private:
777  void CompileCacheable(MacroAssembler* masm,
778  Handle<JSObject> object,
779  Register receiver,
780  Register scratch1,
781  Register scratch2,
782  Register scratch3,
783  Handle<JSObject> interceptor_holder,
784  LookupResult* lookup,
785  Handle<String> name,
786  const CallOptimization& optimization,
787  Label* miss_label) {
788  ASSERT(optimization.is_constant_call());
789  ASSERT(!lookup->holder()->IsGlobalObject());
790  Counters* counters = masm->isolate()->counters();
791  int depth1 = kInvalidProtoDepth;
792  int depth2 = kInvalidProtoDepth;
793  bool can_do_fast_api_call = false;
794  if (optimization.is_simple_api_call() &&
795  !lookup->holder()->IsGlobalObject()) {
796  depth1 = optimization.GetPrototypeDepthOfExpectedType(
797  object, interceptor_holder);
798  if (depth1 == kInvalidProtoDepth) {
799  depth2 = optimization.GetPrototypeDepthOfExpectedType(
800  interceptor_holder, Handle<JSObject>(lookup->holder()));
801  }
802  can_do_fast_api_call =
803  depth1 != kInvalidProtoDepth || depth2 != kInvalidProtoDepth;
804  }
805 
806  __ IncrementCounter(counters->call_const_interceptor(), 1,
807  scratch1, scratch2);
808 
809  if (can_do_fast_api_call) {
810  __ IncrementCounter(counters->call_const_interceptor_fast_api(), 1,
811  scratch1, scratch2);
812  ReserveSpaceForFastApiCall(masm, scratch1);
813  }
814 
815  // Check that the maps from receiver to interceptor's holder
816  // haven't changed and thus we can invoke interceptor.
817  Label miss_cleanup;
818  Label* miss = can_do_fast_api_call ? &miss_cleanup : miss_label;
819  Register holder =
820  stub_compiler_->CheckPrototypes(object, receiver, interceptor_holder,
821  scratch1, scratch2, scratch3,
822  name, depth1, miss);
823 
824  // Invoke an interceptor and if it provides a value,
825  // branch to |regular_invoke|.
826  Label regular_invoke;
827  LoadWithInterceptor(masm, receiver, holder, interceptor_holder, scratch2,
828  &regular_invoke);
829 
830  // Interceptor returned nothing for this property. Try to use cached
831  // constant function.
832 
833  // Check that the maps from interceptor's holder to constant function's
834  // holder haven't changed and thus we can use cached constant function.
835  if (*interceptor_holder != lookup->holder()) {
836  stub_compiler_->CheckPrototypes(interceptor_holder, receiver,
837  Handle<JSObject>(lookup->holder()),
838  scratch1, scratch2, scratch3,
839  name, depth2, miss);
840  } else {
841  // CheckPrototypes has a side effect of fetching a 'holder'
842  // for API (object which is instanceof for the signature). It's
843  // safe to omit it here, as if present, it should be fetched
844  // by the previous CheckPrototypes.
845  ASSERT(depth2 == kInvalidProtoDepth);
846  }
847 
848  // Invoke function.
849  if (can_do_fast_api_call) {
850  GenerateFastApiDirectCall(masm, optimization, arguments_.immediate());
851  } else {
852  CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state_)
854  : CALL_AS_METHOD;
855  __ InvokeFunction(optimization.constant_function(), arguments_,
856  JUMP_FUNCTION, NullCallWrapper(), call_kind);
857  }
858 
859  // Deferred code for fast API call case---clean preallocated space.
860  if (can_do_fast_api_call) {
861  __ bind(&miss_cleanup);
862  FreeSpaceForFastApiCall(masm);
863  __ Branch(miss_label);
864  }
865 
866  // Invoke a regular function.
867  __ bind(&regular_invoke);
868  if (can_do_fast_api_call) {
869  FreeSpaceForFastApiCall(masm);
870  }
871  }
872 
873  void CompileRegular(MacroAssembler* masm,
874  Handle<JSObject> object,
875  Register receiver,
876  Register scratch1,
877  Register scratch2,
878  Register scratch3,
879  Handle<String> name,
880  Handle<JSObject> interceptor_holder,
881  Label* miss_label) {
882  Register holder =
883  stub_compiler_->CheckPrototypes(object, receiver, interceptor_holder,
884  scratch1, scratch2, scratch3,
885  name, miss_label);
886 
887  // Call a runtime function to load the interceptor property.
888  FrameScope scope(masm, StackFrame::INTERNAL);
889  // Save the name_ register across the call.
890  __ push(name_);
891 
892  PushInterceptorArguments(masm, receiver, holder, name_, interceptor_holder);
893 
894  __ CallExternalReference(
895  ExternalReference(
896  IC_Utility(IC::kLoadPropertyWithInterceptorForCall),
897  masm->isolate()),
898  6);
899  // Restore the name_ register.
900  __ pop(name_);
901  // Leave the internal frame.
902  }
903 
904  void LoadWithInterceptor(MacroAssembler* masm,
905  Register receiver,
906  Register holder,
907  Handle<JSObject> holder_obj,
908  Register scratch,
909  Label* interceptor_succeeded) {
910  {
911  FrameScope scope(masm, StackFrame::INTERNAL);
912 
913  __ Push(holder, name_);
914  CompileCallLoadPropertyWithInterceptor(masm,
915  receiver,
916  holder,
917  name_,
918  holder_obj);
919  __ pop(name_); // Restore the name.
920  __ pop(receiver); // Restore the holder.
921  }
922  // If interceptor returns no-result sentinel, call the constant function.
923  __ LoadRoot(scratch, Heap::kNoInterceptorResultSentinelRootIndex);
924  __ Branch(interceptor_succeeded, ne, v0, Operand(scratch));
925  }
926 
927  StubCompiler* stub_compiler_;
928  const ParameterCount& arguments_;
929  Register name_;
930  Code::ExtraICState extra_ic_state_;
931 };
932 
933 
934 
935 // Generate code to check that a global property cell is empty. Create
936 // the property cell at compilation time if no cell exists for the
937 // property.
938 static void GenerateCheckPropertyCell(MacroAssembler* masm,
939  Handle<GlobalObject> global,
940  Handle<String> name,
941  Register scratch,
942  Label* miss) {
943  Handle<JSGlobalPropertyCell> cell =
944  GlobalObject::EnsurePropertyCell(global, name);
945  ASSERT(cell->value()->IsTheHole());
946  __ li(scratch, Operand(cell));
947  __ lw(scratch,
949  __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
950  __ Branch(miss, ne, scratch, Operand(at));
951 }
952 
953 
954 // Calls GenerateCheckPropertyCell for each global object in the prototype chain
955 // from object to (but not including) holder.
956 static void GenerateCheckPropertyCells(MacroAssembler* masm,
957  Handle<JSObject> object,
958  Handle<JSObject> holder,
959  Handle<String> name,
960  Register scratch,
961  Label* miss) {
962  Handle<JSObject> current = object;
963  while (!current.is_identical_to(holder)) {
964  if (current->IsGlobalObject()) {
965  GenerateCheckPropertyCell(masm,
966  Handle<GlobalObject>::cast(current),
967  name,
968  scratch,
969  miss);
970  }
971  current = Handle<JSObject>(JSObject::cast(current->GetPrototype()));
972  }
973 }
974 
975 
976 // Convert and store int passed in register ival to IEEE 754 single precision
977 // floating point value at memory location (dst + 4 * wordoffset)
978 // If FPU is available use it for conversion.
979 static void StoreIntAsFloat(MacroAssembler* masm,
980  Register dst,
981  Register wordoffset,
982  Register ival,
983  Register fval,
984  Register scratch1,
985  Register scratch2) {
987  CpuFeatures::Scope scope(FPU);
988  __ mtc1(ival, f0);
989  __ cvt_s_w(f0, f0);
990  __ sll(scratch1, wordoffset, 2);
991  __ addu(scratch1, dst, scratch1);
992  __ swc1(f0, MemOperand(scratch1, 0));
993  } else {
994  // FPU is not available, do manual conversions.
995 
996  Label not_special, done;
997  // Move sign bit from source to destination. This works because the sign
998  // bit in the exponent word of the double has the same position and polarity
999  // as the 2's complement sign bit in a Smi.
1000  ASSERT(kBinary32SignMask == 0x80000000u);
1001 
1002  __ And(fval, ival, Operand(kBinary32SignMask));
1003  // Negate value if it is negative.
1004  __ subu(scratch1, zero_reg, ival);
1005  __ Movn(ival, scratch1, fval);
1006 
1007  // We have -1, 0 or 1, which we treat specially. Register ival contains
1008  // absolute value: it is either equal to 1 (special case of -1 and 1),
1009  // greater than 1 (not a special case) or less than 1 (special case of 0).
1010  __ Branch(&not_special, gt, ival, Operand(1));
1011 
1012  // For 1 or -1 we need to or in the 0 exponent (biased).
1013  static const uint32_t exponent_word_for_1 =
1015 
1016  __ Xor(scratch1, ival, Operand(1));
1017  __ li(scratch2, exponent_word_for_1);
1018  __ or_(scratch2, fval, scratch2);
1019  __ Movz(fval, scratch2, scratch1); // Only if ival is equal to 1.
1020  __ Branch(&done);
1021 
1022  __ bind(&not_special);
1023  // Count leading zeros.
1024  // Gets the wrong answer for 0, but we already checked for that case above.
1025  Register zeros = scratch2;
1026  __ Clz(zeros, ival);
1027 
1028  // Compute exponent and or it into the exponent register.
1029  __ li(scratch1, (kBitsPerInt - 1) + kBinary32ExponentBias);
1030  __ subu(scratch1, scratch1, zeros);
1031 
1032  __ sll(scratch1, scratch1, kBinary32ExponentShift);
1033  __ or_(fval, fval, scratch1);
1034 
1035  // Shift up the source chopping the top bit off.
1036  __ Addu(zeros, zeros, Operand(1));
1037  // This wouldn't work for 1 and -1 as the shift would be 32 which means 0.
1038  __ sllv(ival, ival, zeros);
1039  // And the top (top 20 bits).
1040  __ srl(scratch1, ival, kBitsPerInt - kBinary32MantissaBits);
1041  __ or_(fval, fval, scratch1);
1042 
1043  __ bind(&done);
1044 
1045  __ sll(scratch1, wordoffset, 2);
1046  __ addu(scratch1, dst, scratch1);
1047  __ sw(fval, MemOperand(scratch1, 0));
1048  }
1049 }
1050 
1051 
1052 // Convert unsigned integer with specified number of leading zeroes in binary
1053 // representation to IEEE 754 double.
1054 // Integer to convert is passed in register hiword.
1055 // Resulting double is returned in registers hiword:loword.
1056 // This functions does not work correctly for 0.
1057 static void GenerateUInt2Double(MacroAssembler* masm,
1058  Register hiword,
1059  Register loword,
1060  Register scratch,
1061  int leading_zeroes) {
1062  const int meaningful_bits = kBitsPerInt - leading_zeroes - 1;
1063  const int biased_exponent = HeapNumber::kExponentBias + meaningful_bits;
1064 
1065  const int mantissa_shift_for_hi_word =
1066  meaningful_bits - HeapNumber::kMantissaBitsInTopWord;
1067 
1068  const int mantissa_shift_for_lo_word =
1069  kBitsPerInt - mantissa_shift_for_hi_word;
1070 
1071  __ li(scratch, biased_exponent << HeapNumber::kExponentShift);
1072  if (mantissa_shift_for_hi_word > 0) {
1073  __ sll(loword, hiword, mantissa_shift_for_lo_word);
1074  __ srl(hiword, hiword, mantissa_shift_for_hi_word);
1075  __ or_(hiword, scratch, hiword);
1076  } else {
1077  __ mov(loword, zero_reg);
1078  __ sll(hiword, hiword, mantissa_shift_for_hi_word);
1079  __ or_(hiword, scratch, hiword);
1080  }
1081 
1082  // If least significant bit of biased exponent was not 1 it was corrupted
1083  // by most significant bit of mantissa so we should fix that.
1084  if (!(biased_exponent & 1)) {
1085  __ li(scratch, 1 << HeapNumber::kExponentShift);
1086  __ nor(scratch, scratch, scratch);
1087  __ and_(hiword, hiword, scratch);
1088  }
1089 }
1090 
1091 
1092 #undef __
1093 #define __ ACCESS_MASM(masm())
1094 
1095 
1096 Register StubCompiler::CheckPrototypes(Handle<JSObject> object,
1097  Register object_reg,
1098  Handle<JSObject> holder,
1099  Register holder_reg,
1100  Register scratch1,
1101  Register scratch2,
1102  Handle<String> name,
1103  int save_at_depth,
1104  Label* miss) {
1105  // Make sure there's no overlap between holder and object registers.
1106  ASSERT(!scratch1.is(object_reg) && !scratch1.is(holder_reg));
1107  ASSERT(!scratch2.is(object_reg) && !scratch2.is(holder_reg)
1108  && !scratch2.is(scratch1));
1109 
1110  // Keep track of the current object in register reg.
1111  Register reg = object_reg;
1112  int depth = 0;
1113 
1114  if (save_at_depth == depth) {
1115  __ sw(reg, MemOperand(sp));
1116  }
1117 
1118  // Check the maps in the prototype chain.
1119  // Traverse the prototype chain from the object and do map checks.
1120  Handle<JSObject> current = object;
1121  while (!current.is_identical_to(holder)) {
1122  ++depth;
1123 
1124  // Only global objects and objects that do not require access
1125  // checks are allowed in stubs.
1126  ASSERT(current->IsJSGlobalProxy() || !current->IsAccessCheckNeeded());
1127 
1128  Handle<JSObject> prototype(JSObject::cast(current->GetPrototype()));
1129  if (!current->HasFastProperties() &&
1130  !current->IsJSGlobalObject() &&
1131  !current->IsJSGlobalProxy()) {
1132  if (!name->IsSymbol()) {
1133  name = factory()->LookupSymbol(name);
1134  }
1135  ASSERT(current->property_dictionary()->FindEntry(*name) ==
1137 
1138  GenerateDictionaryNegativeLookup(masm(), miss, reg, name,
1139  scratch1, scratch2);
1140 
1141  __ lw(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset));
1142  reg = holder_reg; // From now on the object will be in holder_reg.
1143  __ lw(reg, FieldMemOperand(scratch1, Map::kPrototypeOffset));
1144  } else {
1145  Handle<Map> current_map(current->map());
1146  __ CheckMap(reg, scratch1, current_map, miss, DONT_DO_SMI_CHECK,
1148  // Check access rights to the global object. This has to happen after
1149  // the map check so that we know that the object is actually a global
1150  // object.
1151  if (current->IsJSGlobalProxy()) {
1152  __ CheckAccessGlobalProxy(reg, scratch2, miss);
1153  }
1154  reg = holder_reg; // From now on the object will be in holder_reg.
1155 
1156  if (heap()->InNewSpace(*prototype)) {
1157  // The prototype is in new space; we cannot store a reference to it
1158  // in the code. Load it from the map.
1159  __ lw(reg, FieldMemOperand(scratch1, Map::kPrototypeOffset));
1160  } else {
1161  // The prototype is in old space; load it directly.
1162  __ li(reg, Operand(prototype));
1163  }
1164  }
1165 
1166  if (save_at_depth == depth) {
1167  __ sw(reg, MemOperand(sp));
1168  }
1169 
1170  // Go to the next object in the prototype chain.
1171  current = prototype;
1172  }
1173 
1174  // Log the check depth.
1175  LOG(masm()->isolate(), IntEvent("check-maps-depth", depth + 1));
1176 
1177  // Check the holder map.
1178  __ CheckMap(reg, scratch1, Handle<Map>(current->map()), miss,
1180 
1181  // Perform security check for access to the global object.
1182  ASSERT(holder->IsJSGlobalProxy() || !holder->IsAccessCheckNeeded());
1183  if (holder->IsJSGlobalProxy()) {
1184  __ CheckAccessGlobalProxy(reg, scratch1, miss);
1185  }
1186 
1187  // If we've skipped any global objects, it's not enough to verify that
1188  // their maps haven't changed. We also need to check that the property
1189  // cell for the property is still empty.
1190  GenerateCheckPropertyCells(masm(), object, holder, name, scratch1, miss);
1191 
1192  // Return the register containing the holder.
1193  return reg;
1194 }
1195 
1196 
1197 void StubCompiler::GenerateLoadField(Handle<JSObject> object,
1198  Handle<JSObject> holder,
1199  Register receiver,
1200  Register scratch1,
1201  Register scratch2,
1202  Register scratch3,
1203  int index,
1204  Handle<String> name,
1205  Label* miss) {
1206  // Check that the receiver isn't a smi.
1207  __ JumpIfSmi(receiver, miss);
1208 
1209  // Check that the maps haven't changed.
1210  Register reg = CheckPrototypes(
1211  object, receiver, holder, scratch1, scratch2, scratch3, name, miss);
1212  GenerateFastPropertyLoad(masm(), v0, reg, holder, index);
1213  __ Ret();
1214 }
1215 
1216 
1217 void StubCompiler::GenerateLoadConstant(Handle<JSObject> object,
1218  Handle<JSObject> holder,
1219  Register receiver,
1220  Register scratch1,
1221  Register scratch2,
1222  Register scratch3,
1223  Handle<JSFunction> value,
1224  Handle<String> name,
1225  Label* miss) {
1226  // Check that the receiver isn't a smi.
1227  __ JumpIfSmi(receiver, miss, scratch1);
1228 
1229  // Check that the maps haven't changed.
1230  CheckPrototypes(object, receiver, holder,
1231  scratch1, scratch2, scratch3, name, miss);
1232 
1233  // Return the constant value.
1234  __ LoadHeapObject(v0, value);
1235  __ Ret();
1236 }
1237 
1238 
1239 void StubCompiler::GenerateDictionaryLoadCallback(Register receiver,
1240  Register name_reg,
1241  Register scratch1,
1242  Register scratch2,
1243  Register scratch3,
1244  Handle<AccessorInfo> callback,
1245  Handle<String> name,
1246  Label* miss) {
1247  ASSERT(!receiver.is(scratch1));
1248  ASSERT(!receiver.is(scratch2));
1249  ASSERT(!receiver.is(scratch3));
1250 
1251  // Load the properties dictionary.
1252  Register dictionary = scratch1;
1253  __ lw(dictionary, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
1254 
1255  // Probe the dictionary.
1256  Label probe_done;
1258  miss,
1259  &probe_done,
1260  dictionary,
1261  name_reg,
1262  scratch2,
1263  scratch3);
1264  __ bind(&probe_done);
1265 
1266  // If probing finds an entry in the dictionary, scratch3 contains the
1267  // pointer into the dictionary. Check that the value is the callback.
1268  Register pointer = scratch3;
1269  const int kElementsStartOffset = StringDictionary::kHeaderSize +
1271  const int kValueOffset = kElementsStartOffset + kPointerSize;
1272  __ lw(scratch2, FieldMemOperand(pointer, kValueOffset));
1273  __ Branch(miss, ne, scratch2, Operand(callback));
1274 }
1275 
1276 
1277 void StubCompiler::GenerateLoadCallback(Handle<JSObject> object,
1278  Handle<JSObject> holder,
1279  Register receiver,
1280  Register name_reg,
1281  Register scratch1,
1282  Register scratch2,
1283  Register scratch3,
1284  Register scratch4,
1285  Handle<AccessorInfo> callback,
1286  Handle<String> name,
1287  Label* miss) {
1288  // Check that the receiver isn't a smi.
1289  __ JumpIfSmi(receiver, miss, scratch1);
1290 
1291  // Check that the maps haven't changed.
1292  Register reg = CheckPrototypes(object, receiver, holder, scratch1,
1293  scratch2, scratch3, name, miss);
1294 
1295  if (!holder->HasFastProperties() && !holder->IsJSGlobalObject()) {
1296  GenerateDictionaryLoadCallback(
1297  reg, name_reg, scratch2, scratch3, scratch4, callback, name, miss);
1298  }
1299 
1300  // Build AccessorInfo::args_ list on the stack and push property name below
1301  // the exit frame to make GC aware of them and store pointers to them.
1302  __ push(receiver);
1303  __ mov(scratch2, sp); // scratch2 = AccessorInfo::args_
1304  if (heap()->InNewSpace(callback->data())) {
1305  __ li(scratch3, callback);
1306  __ lw(scratch3, FieldMemOperand(scratch3, AccessorInfo::kDataOffset));
1307  } else {
1308  __ li(scratch3, Handle<Object>(callback->data()));
1309  }
1310  __ Subu(sp, sp, 4 * kPointerSize);
1311  __ sw(reg, MemOperand(sp, 3 * kPointerSize));
1312  __ sw(scratch3, MemOperand(sp, 2 * kPointerSize));
1313  __ li(scratch3, Operand(ExternalReference::isolate_address()));
1314  __ sw(scratch3, MemOperand(sp, 1 * kPointerSize));
1315  __ sw(name_reg, MemOperand(sp, 0 * kPointerSize));
1316 
1317  __ mov(a2, scratch2); // Saved in case scratch2 == a1.
1318  __ mov(a1, sp); // a1 (first argument - see note below) = Handle<String>
1319 
1320  // NOTE: the O32 abi requires a0 to hold a special pointer when returning a
1321  // struct from the function (which is currently the case). This means we pass
1322  // the arguments in a1-a2 instead of a0-a1. TryCallApiFunctionAndReturn
1323  // will handle setting up a0.
1324 
1325  const int kApiStackSpace = 1;
1326  FrameScope frame_scope(masm(), StackFrame::MANUAL);
1327  __ EnterExitFrame(false, kApiStackSpace);
1328 
1329  // Create AccessorInfo instance on the stack above the exit frame with
1330  // scratch2 (internal::Object** args_) as the data.
1331  __ sw(a2, MemOperand(sp, kPointerSize));
1332  // a2 (second argument - see note above) = AccessorInfo&
1333  __ Addu(a2, sp, kPointerSize);
1334 
1335  const int kStackUnwindSpace = 5;
1336  Address getter_address = v8::ToCData<Address>(callback->getter());
1337  ApiFunction fun(getter_address);
1338  ExternalReference ref =
1339  ExternalReference(&fun,
1340  ExternalReference::DIRECT_GETTER_CALL,
1341  masm()->isolate());
1342  __ CallApiFunctionAndReturn(ref, kStackUnwindSpace);
1343 }
1344 
1345 
1346 void StubCompiler::GenerateLoadInterceptor(Handle<JSObject> object,
1347  Handle<JSObject> interceptor_holder,
1348  LookupResult* lookup,
1349  Register receiver,
1350  Register name_reg,
1351  Register scratch1,
1352  Register scratch2,
1353  Register scratch3,
1354  Handle<String> name,
1355  Label* miss) {
1356  ASSERT(interceptor_holder->HasNamedInterceptor());
1357  ASSERT(!interceptor_holder->GetNamedInterceptor()->getter()->IsUndefined());
1358 
1359  // Check that the receiver isn't a smi.
1360  __ JumpIfSmi(receiver, miss);
1361 
1362  // So far the most popular follow ups for interceptor loads are FIELD
1363  // and CALLBACKS, so inline only them, other cases may be added
1364  // later.
1365  bool compile_followup_inline = false;
1366  if (lookup->IsFound() && lookup->IsCacheable()) {
1367  if (lookup->IsField()) {
1368  compile_followup_inline = true;
1369  } else if (lookup->type() == CALLBACKS &&
1370  lookup->GetCallbackObject()->IsAccessorInfo()) {
1371  AccessorInfo* callback = AccessorInfo::cast(lookup->GetCallbackObject());
1372  compile_followup_inline = callback->getter() != NULL &&
1373  callback->IsCompatibleReceiver(*object);
1374  }
1375  }
1376 
1377  if (compile_followup_inline) {
1378  // Compile the interceptor call, followed by inline code to load the
1379  // property from further up the prototype chain if the call fails.
1380  // Check that the maps haven't changed.
1381  Register holder_reg = CheckPrototypes(object, receiver, interceptor_holder,
1382  scratch1, scratch2, scratch3,
1383  name, miss);
1384  ASSERT(holder_reg.is(receiver) || holder_reg.is(scratch1));
1385 
1386  // Preserve the receiver register explicitly whenever it is different from
1387  // the holder and it is needed should the interceptor return without any
1388  // result. The CALLBACKS case needs the receiver to be passed into C++ code,
1389  // the FIELD case might cause a miss during the prototype check.
1390  bool must_perfrom_prototype_check = *interceptor_holder != lookup->holder();
1391  bool must_preserve_receiver_reg = !receiver.is(holder_reg) &&
1392  (lookup->type() == CALLBACKS || must_perfrom_prototype_check);
1393 
1394  // Save necessary data before invoking an interceptor.
1395  // Requires a frame to make GC aware of pushed pointers.
1396  {
1397  FrameScope frame_scope(masm(), StackFrame::INTERNAL);
1398  if (must_preserve_receiver_reg) {
1399  __ Push(receiver, holder_reg, name_reg);
1400  } else {
1401  __ Push(holder_reg, name_reg);
1402  }
1403  // Invoke an interceptor. Note: map checks from receiver to
1404  // interceptor's holder has been compiled before (see a caller
1405  // of this method).
1406  CompileCallLoadPropertyWithInterceptor(masm(),
1407  receiver,
1408  holder_reg,
1409  name_reg,
1410  interceptor_holder);
1411  // Check if interceptor provided a value for property. If it's
1412  // the case, return immediately.
1413  Label interceptor_failed;
1414  __ LoadRoot(scratch1, Heap::kNoInterceptorResultSentinelRootIndex);
1415  __ Branch(&interceptor_failed, eq, v0, Operand(scratch1));
1416  frame_scope.GenerateLeaveFrame();
1417  __ Ret();
1418 
1419  __ bind(&interceptor_failed);
1420  __ pop(name_reg);
1421  __ pop(holder_reg);
1422  if (must_preserve_receiver_reg) {
1423  __ pop(receiver);
1424  }
1425  // Leave the internal frame.
1426  }
1427  // Check that the maps from interceptor's holder to lookup's holder
1428  // haven't changed. And load lookup's holder into |holder| register.
1429  if (must_perfrom_prototype_check) {
1430  holder_reg = CheckPrototypes(interceptor_holder,
1431  holder_reg,
1432  Handle<JSObject>(lookup->holder()),
1433  scratch1,
1434  scratch2,
1435  scratch3,
1436  name,
1437  miss);
1438  }
1439 
1440  if (lookup->IsField()) {
1441  // We found FIELD property in prototype chain of interceptor's holder.
1442  // Retrieve a field from field's holder.
1443  GenerateFastPropertyLoad(masm(), v0, holder_reg,
1444  Handle<JSObject>(lookup->holder()),
1445  lookup->GetFieldIndex());
1446  __ Ret();
1447  } else {
1448  // We found CALLBACKS property in prototype chain of interceptor's
1449  // holder.
1450  ASSERT(lookup->type() == CALLBACKS);
1451  Handle<AccessorInfo> callback(
1452  AccessorInfo::cast(lookup->GetCallbackObject()));
1453  ASSERT(callback->getter() != NULL);
1454 
1455  // Tail call to runtime.
1456  // Important invariant in CALLBACKS case: the code above must be
1457  // structured to never clobber |receiver| register.
1458  __ li(scratch2, callback);
1459 
1460  __ Push(receiver, holder_reg);
1461  __ lw(scratch3,
1463  __ li(scratch1, Operand(ExternalReference::isolate_address()));
1464  __ Push(scratch3, scratch1, scratch2, name_reg);
1465 
1466  ExternalReference ref =
1467  ExternalReference(IC_Utility(IC::kLoadCallbackProperty),
1468  masm()->isolate());
1469  __ TailCallExternalReference(ref, 6, 1);
1470  }
1471  } else { // !compile_followup_inline
1472  // Call the runtime system to load the interceptor.
1473  // Check that the maps haven't changed.
1474  Register holder_reg = CheckPrototypes(object, receiver, interceptor_holder,
1475  scratch1, scratch2, scratch3,
1476  name, miss);
1477  PushInterceptorArguments(masm(), receiver, holder_reg,
1478  name_reg, interceptor_holder);
1479 
1480  ExternalReference ref = ExternalReference(
1481  IC_Utility(IC::kLoadPropertyWithInterceptorForLoad), masm()->isolate());
1482  __ TailCallExternalReference(ref, 6, 1);
1483  }
1484 }
1485 
1486 
1487 void CallStubCompiler::GenerateNameCheck(Handle<String> name, Label* miss) {
1488  if (kind_ == Code::KEYED_CALL_IC) {
1489  __ Branch(miss, ne, a2, Operand(name));
1490  }
1491 }
1492 
1493 
1494 void CallStubCompiler::GenerateGlobalReceiverCheck(Handle<JSObject> object,
1495  Handle<JSObject> holder,
1496  Handle<String> name,
1497  Label* miss) {
1498  ASSERT(holder->IsGlobalObject());
1499 
1500  // Get the number of arguments.
1501  const int argc = arguments().immediate();
1502 
1503  // Get the receiver from the stack.
1504  __ lw(a0, MemOperand(sp, argc * kPointerSize));
1505 
1506  // Check that the maps haven't changed.
1507  __ JumpIfSmi(a0, miss);
1508  CheckPrototypes(object, a0, holder, a3, a1, t0, name, miss);
1509 }
1510 
1511 
1512 void CallStubCompiler::GenerateLoadFunctionFromCell(
1513  Handle<JSGlobalPropertyCell> cell,
1514  Handle<JSFunction> function,
1515  Label* miss) {
1516  // Get the value from the cell.
1517  __ li(a3, Operand(cell));
1519 
1520  // Check that the cell contains the same function.
1521  if (heap()->InNewSpace(*function)) {
1522  // We can't embed a pointer to a function in new space so we have
1523  // to verify that the shared function info is unchanged. This has
1524  // the nice side effect that multiple closures based on the same
1525  // function can all use this call IC. Before we load through the
1526  // function, we have to verify that it still is a function.
1527  __ JumpIfSmi(a1, miss);
1528  __ GetObjectType(a1, a3, a3);
1529  __ Branch(miss, ne, a3, Operand(JS_FUNCTION_TYPE));
1530 
1531  // Check the shared function info. Make sure it hasn't changed.
1532  __ li(a3, Handle<SharedFunctionInfo>(function->shared()));
1534  __ Branch(miss, ne, t0, Operand(a3));
1535  } else {
1536  __ Branch(miss, ne, a1, Operand(function));
1537  }
1538 }
1539 
1540 
1541 void CallStubCompiler::GenerateMissBranch() {
1542  Handle<Code> code =
1543  isolate()->stub_cache()->ComputeCallMiss(arguments().immediate(),
1544  kind_,
1545  extra_state_);
1546  __ Jump(code, RelocInfo::CODE_TARGET);
1547 }
1548 
1549 
1550 Handle<Code> CallStubCompiler::CompileCallField(Handle<JSObject> object,
1551  Handle<JSObject> holder,
1552  int index,
1553  Handle<String> name) {
1554  // ----------- S t a t e -------------
1555  // -- a2 : name
1556  // -- ra : return address
1557  // -----------------------------------
1558  Label miss;
1559 
1560  GenerateNameCheck(name, &miss);
1561 
1562  const int argc = arguments().immediate();
1563 
1564  // Get the receiver of the function from the stack into a0.
1565  __ lw(a0, MemOperand(sp, argc * kPointerSize));
1566  // Check that the receiver isn't a smi.
1567  __ JumpIfSmi(a0, &miss, t0);
1568 
1569  // Do the right check and compute the holder register.
1570  Register reg = CheckPrototypes(object, a0, holder, a1, a3, t0, name, &miss);
1571  GenerateFastPropertyLoad(masm(), a1, reg, holder, index);
1572 
1573  GenerateCallFunction(masm(), object, arguments(), &miss, extra_state_);
1574 
1575  // Handle call cache miss.
1576  __ bind(&miss);
1577  GenerateMissBranch();
1578 
1579  // Return the generated code.
1580  return GetCode(Code::FIELD, name);
1581 }
1582 
1583 
1584 Handle<Code> CallStubCompiler::CompileArrayPushCall(
1585  Handle<Object> object,
1586  Handle<JSObject> holder,
1587  Handle<JSGlobalPropertyCell> cell,
1588  Handle<JSFunction> function,
1589  Handle<String> name) {
1590  // ----------- S t a t e -------------
1591  // -- a2 : name
1592  // -- ra : return address
1593  // -- sp[(argc - n - 1) * 4] : arg[n] (zero-based)
1594  // -- ...
1595  // -- sp[argc * 4] : receiver
1596  // -----------------------------------
1597 
1598  // If object is not an array, bail out to regular call.
1599  if (!object->IsJSArray() || !cell.is_null()) return Handle<Code>::null();
1600 
1601  Label miss;
1602 
1603  GenerateNameCheck(name, &miss);
1604 
1605  Register receiver = a1;
1606 
1607  // Get the receiver from the stack.
1608  const int argc = arguments().immediate();
1609  __ lw(receiver, MemOperand(sp, argc * kPointerSize));
1610 
1611  // Check that the receiver isn't a smi.
1612  __ JumpIfSmi(receiver, &miss);
1613 
1614  // Check that the maps haven't changed.
1615  CheckPrototypes(Handle<JSObject>::cast(object), receiver, holder, a3, v0, t0,
1616  name, &miss);
1617 
1618  if (argc == 0) {
1619  // Nothing to do, just return the length.
1620  __ lw(v0, FieldMemOperand(receiver, JSArray::kLengthOffset));
1621  __ Drop(argc + 1);
1622  __ Ret();
1623  } else {
1624  Label call_builtin;
1625  if (argc == 1) { // Otherwise fall through to call the builtin.
1626  Label attempt_to_grow_elements;
1627 
1628  Register elements = t2;
1629  Register end_elements = t1;
1630  // Get the elements array of the object.
1631  __ lw(elements, FieldMemOperand(receiver, JSArray::kElementsOffset));
1632 
1633  // Check that the elements are in fast mode and writable.
1634  __ CheckMap(elements,
1635  v0,
1636  Heap::kFixedArrayMapRootIndex,
1637  &call_builtin,
1639 
1640  // Get the array's length into v0 and calculate new length.
1641  __ lw(v0, FieldMemOperand(receiver, JSArray::kLengthOffset));
1642  STATIC_ASSERT(kSmiTagSize == 1);
1643  STATIC_ASSERT(kSmiTag == 0);
1644  __ Addu(v0, v0, Operand(Smi::FromInt(argc)));
1645 
1646  // Get the elements' length.
1647  __ lw(t0, FieldMemOperand(elements, FixedArray::kLengthOffset));
1648 
1649  // Check if we could survive without allocation.
1650  __ Branch(&attempt_to_grow_elements, gt, v0, Operand(t0));
1651 
1652  // Check if value is a smi.
1653  Label with_write_barrier;
1654  __ lw(t0, MemOperand(sp, (argc - 1) * kPointerSize));
1655  __ JumpIfNotSmi(t0, &with_write_barrier);
1656 
1657  // Save new length.
1658  __ sw(v0, FieldMemOperand(receiver, JSArray::kLengthOffset));
1659 
1660  // Store the value.
1661  // We may need a register containing the address end_elements below,
1662  // so write back the value in end_elements.
1663  __ sll(end_elements, v0, kPointerSizeLog2 - kSmiTagSize);
1664  __ Addu(end_elements, elements, end_elements);
1665  const int kEndElementsOffset =
1666  FixedArray::kHeaderSize - kHeapObjectTag - argc * kPointerSize;
1667  __ Addu(end_elements, end_elements, kEndElementsOffset);
1668  __ sw(t0, MemOperand(end_elements));
1669 
1670  // Check for a smi.
1671  __ Drop(argc + 1);
1672  __ Ret();
1673 
1674  __ bind(&with_write_barrier);
1675 
1676  __ lw(a3, FieldMemOperand(receiver, HeapObject::kMapOffset));
1677 
1678  if (FLAG_smi_only_arrays && !FLAG_trace_elements_transitions) {
1679  Label fast_object, not_fast_object;
1680  __ CheckFastObjectElements(a3, t3, &not_fast_object);
1681  __ jmp(&fast_object);
1682  // In case of fast smi-only, convert to fast object, otherwise bail out.
1683  __ bind(&not_fast_object);
1684  __ CheckFastSmiElements(a3, t3, &call_builtin);
1685  // edx: receiver
1686  // r3: map
1687  Label try_holey_map;
1688  __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
1689  FAST_ELEMENTS,
1690  a3,
1691  t3,
1692  &try_holey_map);
1693  __ mov(a2, receiver);
1696  __ jmp(&fast_object);
1697 
1698  __ bind(&try_holey_map);
1699  __ LoadTransitionedArrayMapConditional(FAST_HOLEY_SMI_ELEMENTS,
1701  a3,
1702  t3,
1703  &call_builtin);
1704  __ mov(a2, receiver);
1707  __ bind(&fast_object);
1708  } else {
1709  __ CheckFastObjectElements(a3, a3, &call_builtin);
1710  }
1711 
1712  // Save new length.
1713  __ sw(v0, FieldMemOperand(receiver, JSArray::kLengthOffset));
1714 
1715  // Store the value.
1716  // We may need a register containing the address end_elements below,
1717  // so write back the value in end_elements.
1718  __ sll(end_elements, v0, kPointerSizeLog2 - kSmiTagSize);
1719  __ Addu(end_elements, elements, end_elements);
1720  __ Addu(end_elements, end_elements, kEndElementsOffset);
1721  __ sw(t0, MemOperand(end_elements));
1722 
1723  __ RecordWrite(elements,
1724  end_elements,
1725  t0,
1729  OMIT_SMI_CHECK);
1730  __ Drop(argc + 1);
1731  __ Ret();
1732 
1733  __ bind(&attempt_to_grow_elements);
1734  // v0: array's length + 1.
1735  // t0: elements' length.
1736 
1737  if (!FLAG_inline_new) {
1738  __ Branch(&call_builtin);
1739  }
1740 
1741  __ lw(a2, MemOperand(sp, (argc - 1) * kPointerSize));
1742  // Growing elements that are SMI-only requires special handling in case
1743  // the new element is non-Smi. For now, delegate to the builtin.
1744  Label no_fast_elements_check;
1745  __ JumpIfSmi(a2, &no_fast_elements_check);
1746  __ lw(t3, FieldMemOperand(receiver, HeapObject::kMapOffset));
1747  __ CheckFastObjectElements(t3, t3, &call_builtin);
1748  __ bind(&no_fast_elements_check);
1749 
1750  ExternalReference new_space_allocation_top =
1751  ExternalReference::new_space_allocation_top_address(
1752  masm()->isolate());
1753  ExternalReference new_space_allocation_limit =
1754  ExternalReference::new_space_allocation_limit_address(
1755  masm()->isolate());
1756 
1757  const int kAllocationDelta = 4;
1758  // Load top and check if it is the end of elements.
1759  __ sll(end_elements, v0, kPointerSizeLog2 - kSmiTagSize);
1760  __ Addu(end_elements, elements, end_elements);
1761  __ Addu(end_elements, end_elements, Operand(kEndElementsOffset));
1762  __ li(t3, Operand(new_space_allocation_top));
1763  __ lw(a3, MemOperand(t3));
1764  __ Branch(&call_builtin, ne, end_elements, Operand(a3));
1765 
1766  __ li(t5, Operand(new_space_allocation_limit));
1767  __ lw(t5, MemOperand(t5));
1768  __ Addu(a3, a3, Operand(kAllocationDelta * kPointerSize));
1769  __ Branch(&call_builtin, hi, a3, Operand(t5));
1770 
1771  // We fit and could grow elements.
1772  // Update new_space_allocation_top.
1773  __ sw(a3, MemOperand(t3));
1774  // Push the argument.
1775  __ sw(a2, MemOperand(end_elements));
1776  // Fill the rest with holes.
1777  __ LoadRoot(a3, Heap::kTheHoleValueRootIndex);
1778  for (int i = 1; i < kAllocationDelta; i++) {
1779  __ sw(a3, MemOperand(end_elements, i * kPointerSize));
1780  }
1781 
1782  // Update elements' and array's sizes.
1783  __ sw(v0, FieldMemOperand(receiver, JSArray::kLengthOffset));
1784  __ Addu(t0, t0, Operand(Smi::FromInt(kAllocationDelta)));
1785  __ sw(t0, FieldMemOperand(elements, FixedArray::kLengthOffset));
1786 
1787  // Elements are in new space, so write barrier is not required.
1788  __ Drop(argc + 1);
1789  __ Ret();
1790  }
1791  __ bind(&call_builtin);
1792  __ TailCallExternalReference(ExternalReference(Builtins::c_ArrayPush,
1793  masm()->isolate()),
1794  argc + 1,
1795  1);
1796  }
1797 
1798  // Handle call cache miss.
1799  __ bind(&miss);
1800  GenerateMissBranch();
1801 
1802  // Return the generated code.
1803  return GetCode(function);
1804 }
1805 
1806 
1807 Handle<Code> CallStubCompiler::CompileArrayPopCall(
1808  Handle<Object> object,
1809  Handle<JSObject> holder,
1810  Handle<JSGlobalPropertyCell> cell,
1811  Handle<JSFunction> function,
1812  Handle<String> name) {
1813  // ----------- S t a t e -------------
1814  // -- a2 : name
1815  // -- ra : return address
1816  // -- sp[(argc - n - 1) * 4] : arg[n] (zero-based)
1817  // -- ...
1818  // -- sp[argc * 4] : receiver
1819  // -----------------------------------
1820 
1821  // If object is not an array, bail out to regular call.
1822  if (!object->IsJSArray() || !cell.is_null()) return Handle<Code>::null();
1823 
1824  Label miss, return_undefined, call_builtin;
1825  Register receiver = a1;
1826  Register elements = a3;
1827  GenerateNameCheck(name, &miss);
1828 
1829  // Get the receiver from the stack.
1830  const int argc = arguments().immediate();
1831  __ lw(receiver, MemOperand(sp, argc * kPointerSize));
1832  // Check that the receiver isn't a smi.
1833  __ JumpIfSmi(receiver, &miss);
1834 
1835  // Check that the maps haven't changed.
1836  CheckPrototypes(Handle<JSObject>::cast(object), receiver, holder, elements,
1837  t0, v0, name, &miss);
1838 
1839  // Get the elements array of the object.
1840  __ lw(elements, FieldMemOperand(receiver, JSArray::kElementsOffset));
1841 
1842  // Check that the elements are in fast mode and writable.
1843  __ CheckMap(elements,
1844  v0,
1845  Heap::kFixedArrayMapRootIndex,
1846  &call_builtin,
1848 
1849  // Get the array's length into t0 and calculate new length.
1850  __ lw(t0, FieldMemOperand(receiver, JSArray::kLengthOffset));
1851  __ Subu(t0, t0, Operand(Smi::FromInt(1)));
1852  __ Branch(&return_undefined, lt, t0, Operand(zero_reg));
1853 
1854  // Get the last element.
1855  __ LoadRoot(t2, Heap::kTheHoleValueRootIndex);
1856  STATIC_ASSERT(kSmiTagSize == 1);
1857  STATIC_ASSERT(kSmiTag == 0);
1858  // We can't address the last element in one operation. Compute the more
1859  // expensive shift first, and use an offset later on.
1860  __ sll(t1, t0, kPointerSizeLog2 - kSmiTagSize);
1861  __ Addu(elements, elements, t1);
1862  __ lw(v0, FieldMemOperand(elements, FixedArray::kHeaderSize));
1863  __ Branch(&call_builtin, eq, v0, Operand(t2));
1864 
1865  // Set the array's length.
1866  __ sw(t0, FieldMemOperand(receiver, JSArray::kLengthOffset));
1867 
1868  // Fill with the hole.
1869  __ sw(t2, FieldMemOperand(elements, FixedArray::kHeaderSize));
1870  __ Drop(argc + 1);
1871  __ Ret();
1872 
1873  __ bind(&return_undefined);
1874  __ LoadRoot(v0, Heap::kUndefinedValueRootIndex);
1875  __ Drop(argc + 1);
1876  __ Ret();
1877 
1878  __ bind(&call_builtin);
1879  __ TailCallExternalReference(ExternalReference(Builtins::c_ArrayPop,
1880  masm()->isolate()),
1881  argc + 1,
1882  1);
1883 
1884  // Handle call cache miss.
1885  __ bind(&miss);
1886  GenerateMissBranch();
1887 
1888  // Return the generated code.
1889  return GetCode(function);
1890 }
1891 
1892 
1893 Handle<Code> CallStubCompiler::CompileStringCharCodeAtCall(
1894  Handle<Object> object,
1895  Handle<JSObject> holder,
1896  Handle<JSGlobalPropertyCell> cell,
1897  Handle<JSFunction> function,
1898  Handle<String> name) {
1899  // ----------- S t a t e -------------
1900  // -- a2 : function name
1901  // -- ra : return address
1902  // -- sp[(argc - n - 1) * 4] : arg[n] (zero-based)
1903  // -- ...
1904  // -- sp[argc * 4] : receiver
1905  // -----------------------------------
1906 
1907  // If object is not a string, bail out to regular call.
1908  if (!object->IsString() || !cell.is_null()) return Handle<Code>::null();
1909 
1910  const int argc = arguments().immediate();
1911  Label miss;
1912  Label name_miss;
1913  Label index_out_of_range;
1914 
1915  Label* index_out_of_range_label = &index_out_of_range;
1916 
1917  if (kind_ == Code::CALL_IC &&
1918  (CallICBase::StringStubState::decode(extra_state_) ==
1920  index_out_of_range_label = &miss;
1921  }
1922 
1923  GenerateNameCheck(name, &name_miss);
1924 
1925  // Check that the maps starting from the prototype haven't changed.
1926  GenerateDirectLoadGlobalFunctionPrototype(masm(),
1928  v0,
1929  &miss);
1930  ASSERT(!object.is_identical_to(holder));
1931  CheckPrototypes(Handle<JSObject>(JSObject::cast(object->GetPrototype())),
1932  v0, holder, a1, a3, t0, name, &miss);
1933 
1934  Register receiver = a1;
1935  Register index = t1;
1936  Register result = v0;
1937  __ lw(receiver, MemOperand(sp, argc * kPointerSize));
1938  if (argc > 0) {
1939  __ lw(index, MemOperand(sp, (argc - 1) * kPointerSize));
1940  } else {
1941  __ LoadRoot(index, Heap::kUndefinedValueRootIndex);
1942  }
1943 
1944  StringCharCodeAtGenerator generator(receiver,
1945  index,
1946  result,
1947  &miss, // When not a string.
1948  &miss, // When not a number.
1949  index_out_of_range_label,
1951  generator.GenerateFast(masm());
1952  __ Drop(argc + 1);
1953  __ Ret();
1954 
1955  StubRuntimeCallHelper call_helper;
1956  generator.GenerateSlow(masm(), call_helper);
1957 
1958  if (index_out_of_range.is_linked()) {
1959  __ bind(&index_out_of_range);
1960  __ LoadRoot(v0, Heap::kNanValueRootIndex);
1961  __ Drop(argc + 1);
1962  __ Ret();
1963  }
1964 
1965  __ bind(&miss);
1966  // Restore function name in a2.
1967  __ li(a2, name);
1968  __ bind(&name_miss);
1969  GenerateMissBranch();
1970 
1971  // Return the generated code.
1972  return GetCode(function);
1973 }
1974 
1975 
1976 Handle<Code> CallStubCompiler::CompileStringCharAtCall(
1977  Handle<Object> object,
1978  Handle<JSObject> holder,
1979  Handle<JSGlobalPropertyCell> cell,
1980  Handle<JSFunction> function,
1981  Handle<String> name) {
1982  // ----------- S t a t e -------------
1983  // -- a2 : function name
1984  // -- ra : return address
1985  // -- sp[(argc - n - 1) * 4] : arg[n] (zero-based)
1986  // -- ...
1987  // -- sp[argc * 4] : receiver
1988  // -----------------------------------
1989 
1990  // If object is not a string, bail out to regular call.
1991  if (!object->IsString() || !cell.is_null()) return Handle<Code>::null();
1992 
1993  const int argc = arguments().immediate();
1994  Label miss;
1995  Label name_miss;
1996  Label index_out_of_range;
1997  Label* index_out_of_range_label = &index_out_of_range;
1998  if (kind_ == Code::CALL_IC &&
1999  (CallICBase::StringStubState::decode(extra_state_) ==
2001  index_out_of_range_label = &miss;
2002  }
2003  GenerateNameCheck(name, &name_miss);
2004 
2005  // Check that the maps starting from the prototype haven't changed.
2006  GenerateDirectLoadGlobalFunctionPrototype(masm(),
2008  v0,
2009  &miss);
2010  ASSERT(!object.is_identical_to(holder));
2011  CheckPrototypes(Handle<JSObject>(JSObject::cast(object->GetPrototype())),
2012  v0, holder, a1, a3, t0, name, &miss);
2013 
2014  Register receiver = v0;
2015  Register index = t1;
2016  Register scratch = a3;
2017  Register result = v0;
2018  __ lw(receiver, MemOperand(sp, argc * kPointerSize));
2019  if (argc > 0) {
2020  __ lw(index, MemOperand(sp, (argc - 1) * kPointerSize));
2021  } else {
2022  __ LoadRoot(index, Heap::kUndefinedValueRootIndex);
2023  }
2024 
2025  StringCharAtGenerator generator(receiver,
2026  index,
2027  scratch,
2028  result,
2029  &miss, // When not a string.
2030  &miss, // When not a number.
2031  index_out_of_range_label,
2033  generator.GenerateFast(masm());
2034  __ Drop(argc + 1);
2035  __ Ret();
2036 
2037  StubRuntimeCallHelper call_helper;
2038  generator.GenerateSlow(masm(), call_helper);
2039 
2040  if (index_out_of_range.is_linked()) {
2041  __ bind(&index_out_of_range);
2042  __ LoadRoot(v0, Heap::kEmptyStringRootIndex);
2043  __ Drop(argc + 1);
2044  __ Ret();
2045  }
2046 
2047  __ bind(&miss);
2048  // Restore function name in a2.
2049  __ li(a2, name);
2050  __ bind(&name_miss);
2051  GenerateMissBranch();
2052 
2053  // Return the generated code.
2054  return GetCode(function);
2055 }
2056 
2057 
2058 Handle<Code> CallStubCompiler::CompileStringFromCharCodeCall(
2059  Handle<Object> object,
2060  Handle<JSObject> holder,
2061  Handle<JSGlobalPropertyCell> cell,
2062  Handle<JSFunction> function,
2063  Handle<String> name) {
2064  // ----------- S t a t e -------------
2065  // -- a2 : function name
2066  // -- ra : return address
2067  // -- sp[(argc - n - 1) * 4] : arg[n] (zero-based)
2068  // -- ...
2069  // -- sp[argc * 4] : receiver
2070  // -----------------------------------
2071 
2072  const int argc = arguments().immediate();
2073 
2074  // If the object is not a JSObject or we got an unexpected number of
2075  // arguments, bail out to the regular call.
2076  if (!object->IsJSObject() || argc != 1) return Handle<Code>::null();
2077 
2078  Label miss;
2079  GenerateNameCheck(name, &miss);
2080 
2081  if (cell.is_null()) {
2082  __ lw(a1, MemOperand(sp, 1 * kPointerSize));
2083 
2084  STATIC_ASSERT(kSmiTag == 0);
2085  __ JumpIfSmi(a1, &miss);
2086 
2087  CheckPrototypes(Handle<JSObject>::cast(object), a1, holder, v0, a3, t0,
2088  name, &miss);
2089  } else {
2090  ASSERT(cell->value() == *function);
2091  GenerateGlobalReceiverCheck(Handle<JSObject>::cast(object), holder, name,
2092  &miss);
2093  GenerateLoadFunctionFromCell(cell, function, &miss);
2094  }
2095 
2096  // Load the char code argument.
2097  Register code = a1;
2098  __ lw(code, MemOperand(sp, 0 * kPointerSize));
2099 
2100  // Check the code is a smi.
2101  Label slow;
2102  STATIC_ASSERT(kSmiTag == 0);
2103  __ JumpIfNotSmi(code, &slow);
2104 
2105  // Convert the smi code to uint16.
2106  __ And(code, code, Operand(Smi::FromInt(0xffff)));
2107 
2108  StringCharFromCodeGenerator generator(code, v0);
2109  generator.GenerateFast(masm());
2110  __ Drop(argc + 1);
2111  __ Ret();
2112 
2113  StubRuntimeCallHelper call_helper;
2114  generator.GenerateSlow(masm(), call_helper);
2115 
2116  // Tail call the full function. We do not have to patch the receiver
2117  // because the function makes no use of it.
2118  __ bind(&slow);
2119  __ InvokeFunction(
2120  function, arguments(), JUMP_FUNCTION, NullCallWrapper(), CALL_AS_METHOD);
2121 
2122  __ bind(&miss);
2123  // a2: function name.
2124  GenerateMissBranch();
2125 
2126  // Return the generated code.
2127  return cell.is_null() ? GetCode(function) : GetCode(Code::NORMAL, name);
2128 }
2129 
2130 
2131 Handle<Code> CallStubCompiler::CompileMathFloorCall(
2132  Handle<Object> object,
2133  Handle<JSObject> holder,
2134  Handle<JSGlobalPropertyCell> cell,
2135  Handle<JSFunction> function,
2136  Handle<String> name) {
2137  // ----------- S t a t e -------------
2138  // -- a2 : function name
2139  // -- ra : return address
2140  // -- sp[(argc - n - 1) * 4] : arg[n] (zero-based)
2141  // -- ...
2142  // -- sp[argc * 4] : receiver
2143  // -----------------------------------
2144 
2145  if (!CpuFeatures::IsSupported(FPU)) {
2146  return Handle<Code>::null();
2147  }
2148 
2149  CpuFeatures::Scope scope_fpu(FPU);
2150  const int argc = arguments().immediate();
2151  // If the object is not a JSObject or we got an unexpected number of
2152  // arguments, bail out to the regular call.
2153  if (!object->IsJSObject() || argc != 1) return Handle<Code>::null();
2154 
2155  Label miss, slow;
2156  GenerateNameCheck(name, &miss);
2157 
2158  if (cell.is_null()) {
2159  __ lw(a1, MemOperand(sp, 1 * kPointerSize));
2160  STATIC_ASSERT(kSmiTag == 0);
2161  __ JumpIfSmi(a1, &miss);
2162  CheckPrototypes(Handle<JSObject>::cast(object), a1, holder, a0, a3, t0,
2163  name, &miss);
2164  } else {
2165  ASSERT(cell->value() == *function);
2166  GenerateGlobalReceiverCheck(Handle<JSObject>::cast(object), holder, name,
2167  &miss);
2168  GenerateLoadFunctionFromCell(cell, function, &miss);
2169  }
2170 
2171  // Load the (only) argument into v0.
2172  __ lw(v0, MemOperand(sp, 0 * kPointerSize));
2173 
2174  // If the argument is a smi, just return.
2175  STATIC_ASSERT(kSmiTag == 0);
2176  __ And(t0, v0, Operand(kSmiTagMask));
2177  __ Drop(argc + 1, eq, t0, Operand(zero_reg));
2178  __ Ret(eq, t0, Operand(zero_reg));
2179 
2180  __ CheckMap(v0, a1, Heap::kHeapNumberMapRootIndex, &slow, DONT_DO_SMI_CHECK);
2181 
2182  Label wont_fit_smi, no_fpu_error, restore_fcsr_and_return;
2183 
2184  // If fpu is enabled, we use the floor instruction.
2185 
2186  // Load the HeapNumber value.
2188 
2189  // Backup FCSR.
2190  __ cfc1(a3, FCSR);
2191  // Clearing FCSR clears the exception mask with no side-effects.
2192  __ ctc1(zero_reg, FCSR);
2193  // Convert the argument to an integer.
2194  __ floor_w_d(f0, f0);
2195 
2196  // Start checking for special cases.
2197  // Get the argument exponent and clear the sign bit.
2198  __ lw(t1, FieldMemOperand(v0, HeapNumber::kValueOffset + kPointerSize));
2199  __ And(t2, t1, Operand(~HeapNumber::kSignMask));
2200  __ srl(t2, t2, HeapNumber::kMantissaBitsInTopWord);
2201 
2202  // Retrieve FCSR and check for fpu errors.
2203  __ cfc1(t5, FCSR);
2204  __ And(t5, t5, Operand(kFCSRExceptionFlagMask));
2205  __ Branch(&no_fpu_error, eq, t5, Operand(zero_reg));
2206 
2207  // Check for NaN, Infinity, and -Infinity.
2208  // They are invariant through a Math.Floor call, so just
2209  // return the original argument.
2210  __ Subu(t3, t2, Operand(HeapNumber::kExponentMask
2211  >> HeapNumber::kMantissaBitsInTopWord));
2212  __ Branch(&restore_fcsr_and_return, eq, t3, Operand(zero_reg));
2213  // We had an overflow or underflow in the conversion. Check if we
2214  // have a big exponent.
2215  // If greater or equal, the argument is already round and in v0.
2216  __ Branch(&restore_fcsr_and_return, ge, t3,
2217  Operand(HeapNumber::kMantissaBits));
2218  __ Branch(&wont_fit_smi);
2219 
2220  __ bind(&no_fpu_error);
2221  // Move the result back to v0.
2222  __ mfc1(v0, f0);
2223  // Check if the result fits into a smi.
2224  __ Addu(a1, v0, Operand(0x40000000));
2225  __ Branch(&wont_fit_smi, lt, a1, Operand(zero_reg));
2226  // Tag the result.
2227  STATIC_ASSERT(kSmiTag == 0);
2228  __ sll(v0, v0, kSmiTagSize);
2229 
2230  // Check for -0.
2231  __ Branch(&restore_fcsr_and_return, ne, v0, Operand(zero_reg));
2232  // t1 already holds the HeapNumber exponent.
2233  __ And(t0, t1, Operand(HeapNumber::kSignMask));
2234  // If our HeapNumber is negative it was -0, so load its address and return.
2235  // Else v0 is loaded with 0, so we can also just return.
2236  __ Branch(&restore_fcsr_and_return, eq, t0, Operand(zero_reg));
2237  __ lw(v0, MemOperand(sp, 0 * kPointerSize));
2238 
2239  __ bind(&restore_fcsr_and_return);
2240  // Restore FCSR and return.
2241  __ ctc1(a3, FCSR);
2242 
2243  __ Drop(argc + 1);
2244  __ Ret();
2245 
2246  __ bind(&wont_fit_smi);
2247  // Restore FCSR and fall to slow case.
2248  __ ctc1(a3, FCSR);
2249 
2250  __ bind(&slow);
2251  // Tail call the full function. We do not have to patch the receiver
2252  // because the function makes no use of it.
2253  __ InvokeFunction(
2254  function, arguments(), JUMP_FUNCTION, NullCallWrapper(), CALL_AS_METHOD);
2255 
2256  __ bind(&miss);
2257  // a2: function name.
2258  GenerateMissBranch();
2259 
2260  // Return the generated code.
2261  return cell.is_null() ? GetCode(function) : GetCode(Code::NORMAL, name);
2262 }
2263 
2264 
2265 Handle<Code> CallStubCompiler::CompileMathAbsCall(
2266  Handle<Object> object,
2267  Handle<JSObject> holder,
2268  Handle<JSGlobalPropertyCell> cell,
2269  Handle<JSFunction> function,
2270  Handle<String> name) {
2271  // ----------- S t a t e -------------
2272  // -- a2 : function name
2273  // -- ra : return address
2274  // -- sp[(argc - n - 1) * 4] : arg[n] (zero-based)
2275  // -- ...
2276  // -- sp[argc * 4] : receiver
2277  // -----------------------------------
2278 
2279  const int argc = arguments().immediate();
2280  // If the object is not a JSObject or we got an unexpected number of
2281  // arguments, bail out to the regular call.
2282  if (!object->IsJSObject() || argc != 1) return Handle<Code>::null();
2283 
2284  Label miss;
2285 
2286  GenerateNameCheck(name, &miss);
2287  if (cell.is_null()) {
2288  __ lw(a1, MemOperand(sp, 1 * kPointerSize));
2289  STATIC_ASSERT(kSmiTag == 0);
2290  __ JumpIfSmi(a1, &miss);
2291  CheckPrototypes(Handle<JSObject>::cast(object), a1, holder, v0, a3, t0,
2292  name, &miss);
2293  } else {
2294  ASSERT(cell->value() == *function);
2295  GenerateGlobalReceiverCheck(Handle<JSObject>::cast(object), holder, name,
2296  &miss);
2297  GenerateLoadFunctionFromCell(cell, function, &miss);
2298  }
2299 
2300  // Load the (only) argument into v0.
2301  __ lw(v0, MemOperand(sp, 0 * kPointerSize));
2302 
2303  // Check if the argument is a smi.
2304  Label not_smi;
2305  STATIC_ASSERT(kSmiTag == 0);
2306  __ JumpIfNotSmi(v0, &not_smi);
2307 
2308  // Do bitwise not or do nothing depending on the sign of the
2309  // argument.
2310  __ sra(t0, v0, kBitsPerInt - 1);
2311  __ Xor(a1, v0, t0);
2312 
2313  // Add 1 or do nothing depending on the sign of the argument.
2314  __ Subu(v0, a1, t0);
2315 
2316  // If the result is still negative, go to the slow case.
2317  // This only happens for the most negative smi.
2318  Label slow;
2319  __ Branch(&slow, lt, v0, Operand(zero_reg));
2320 
2321  // Smi case done.
2322  __ Drop(argc + 1);
2323  __ Ret();
2324 
2325  // Check if the argument is a heap number and load its exponent and
2326  // sign.
2327  __ bind(&not_smi);
2328  __ CheckMap(v0, a1, Heap::kHeapNumberMapRootIndex, &slow, DONT_DO_SMI_CHECK);
2330 
2331  // Check the sign of the argument. If the argument is positive,
2332  // just return it.
2333  Label negative_sign;
2334  __ And(t0, a1, Operand(HeapNumber::kSignMask));
2335  __ Branch(&negative_sign, ne, t0, Operand(zero_reg));
2336  __ Drop(argc + 1);
2337  __ Ret();
2338 
2339  // If the argument is negative, clear the sign, and return a new
2340  // number.
2341  __ bind(&negative_sign);
2342  __ Xor(a1, a1, Operand(HeapNumber::kSignMask));
2344  __ LoadRoot(t2, Heap::kHeapNumberMapRootIndex);
2345  __ AllocateHeapNumber(v0, t0, t1, t2, &slow);
2348  __ Drop(argc + 1);
2349  __ Ret();
2350 
2351  // Tail call the full function. We do not have to patch the receiver
2352  // because the function makes no use of it.
2353  __ bind(&slow);
2354  __ InvokeFunction(
2355  function, arguments(), JUMP_FUNCTION, NullCallWrapper(), CALL_AS_METHOD);
2356 
2357  __ bind(&miss);
2358  // a2: function name.
2359  GenerateMissBranch();
2360 
2361  // Return the generated code.
2362  return cell.is_null() ? GetCode(function) : GetCode(Code::NORMAL, name);
2363 }
2364 
2365 
2366 Handle<Code> CallStubCompiler::CompileFastApiCall(
2367  const CallOptimization& optimization,
2368  Handle<Object> object,
2369  Handle<JSObject> holder,
2370  Handle<JSGlobalPropertyCell> cell,
2371  Handle<JSFunction> function,
2372  Handle<String> name) {
2373 
2374  Counters* counters = isolate()->counters();
2375 
2376  ASSERT(optimization.is_simple_api_call());
2377  // Bail out if object is a global object as we don't want to
2378  // repatch it to global receiver.
2379  if (object->IsGlobalObject()) return Handle<Code>::null();
2380  if (!cell.is_null()) return Handle<Code>::null();
2381  if (!object->IsJSObject()) return Handle<Code>::null();
2382  int depth = optimization.GetPrototypeDepthOfExpectedType(
2383  Handle<JSObject>::cast(object), holder);
2384  if (depth == kInvalidProtoDepth) return Handle<Code>::null();
2385 
2386  Label miss, miss_before_stack_reserved;
2387 
2388  GenerateNameCheck(name, &miss_before_stack_reserved);
2389 
2390  // Get the receiver from the stack.
2391  const int argc = arguments().immediate();
2392  __ lw(a1, MemOperand(sp, argc * kPointerSize));
2393 
2394  // Check that the receiver isn't a smi.
2395  __ JumpIfSmi(a1, &miss_before_stack_reserved);
2396 
2397  __ IncrementCounter(counters->call_const(), 1, a0, a3);
2398  __ IncrementCounter(counters->call_const_fast_api(), 1, a0, a3);
2399 
2400  ReserveSpaceForFastApiCall(masm(), a0);
2401 
2402  // Check that the maps haven't changed and find a Holder as a side effect.
2403  CheckPrototypes(Handle<JSObject>::cast(object), a1, holder, a0, a3, t0, name,
2404  depth, &miss);
2405 
2406  GenerateFastApiDirectCall(masm(), optimization, argc);
2407 
2408  __ bind(&miss);
2409  FreeSpaceForFastApiCall(masm());
2410 
2411  __ bind(&miss_before_stack_reserved);
2412  GenerateMissBranch();
2413 
2414  // Return the generated code.
2415  return GetCode(function);
2416 }
2417 
2418 
2419 Handle<Code> CallStubCompiler::CompileCallConstant(Handle<Object> object,
2420  Handle<JSObject> holder,
2421  Handle<JSFunction> function,
2422  Handle<String> name,
2423  CheckType check) {
2424  // ----------- S t a t e -------------
2425  // -- a2 : name
2426  // -- ra : return address
2427  // -----------------------------------
2428  if (HasCustomCallGenerator(function)) {
2429  Handle<Code> code = CompileCustomCall(object, holder,
2430  Handle<JSGlobalPropertyCell>::null(),
2431  function, name);
2432  // A null handle means bail out to the regular compiler code below.
2433  if (!code.is_null()) return code;
2434  }
2435 
2436  Label miss;
2437 
2438  GenerateNameCheck(name, &miss);
2439 
2440  // Get the receiver from the stack.
2441  const int argc = arguments().immediate();
2442  __ lw(a1, MemOperand(sp, argc * kPointerSize));
2443 
2444  // Check that the receiver isn't a smi.
2445  if (check != NUMBER_CHECK) {
2446  __ JumpIfSmi(a1, &miss);
2447  }
2448 
2449  // Make sure that it's okay not to patch the on stack receiver
2450  // unless we're doing a receiver map check.
2451  ASSERT(!object->IsGlobalObject() || check == RECEIVER_MAP_CHECK);
2452  switch (check) {
2453  case RECEIVER_MAP_CHECK:
2454  __ IncrementCounter(masm()->isolate()->counters()->call_const(),
2455  1, a0, a3);
2456 
2457  // Check that the maps haven't changed.
2458  CheckPrototypes(Handle<JSObject>::cast(object), a1, holder, a0, a3, t0,
2459  name, &miss);
2460 
2461  // Patch the receiver on the stack with the global proxy if
2462  // necessary.
2463  if (object->IsGlobalObject()) {
2465  __ sw(a3, MemOperand(sp, argc * kPointerSize));
2466  }
2467  break;
2468 
2469  case STRING_CHECK:
2470  if (function->IsBuiltin() || !function->shared()->is_classic_mode()) {
2471  // Check that the object is a two-byte string or a symbol.
2472  __ GetObjectType(a1, a3, a3);
2473  __ Branch(&miss, Ugreater_equal, a3, Operand(FIRST_NONSTRING_TYPE));
2474  // Check that the maps starting from the prototype haven't changed.
2475  GenerateDirectLoadGlobalFunctionPrototype(
2476  masm(), Context::STRING_FUNCTION_INDEX, a0, &miss);
2477  CheckPrototypes(
2478  Handle<JSObject>(JSObject::cast(object->GetPrototype())),
2479  a0, holder, a3, a1, t0, name, &miss);
2480  } else {
2481  // Calling non-strict non-builtins with a value as the receiver
2482  // requires boxing.
2483  __ jmp(&miss);
2484  }
2485  break;
2486 
2487  case NUMBER_CHECK:
2488  if (function->IsBuiltin() || !function->shared()->is_classic_mode()) {
2489  Label fast;
2490  // Check that the object is a smi or a heap number.
2491  __ JumpIfSmi(a1, &fast);
2492  __ GetObjectType(a1, a0, a0);
2493  __ Branch(&miss, ne, a0, Operand(HEAP_NUMBER_TYPE));
2494  __ bind(&fast);
2495  // Check that the maps starting from the prototype haven't changed.
2496  GenerateDirectLoadGlobalFunctionPrototype(
2497  masm(), Context::NUMBER_FUNCTION_INDEX, a0, &miss);
2498  CheckPrototypes(
2499  Handle<JSObject>(JSObject::cast(object->GetPrototype())),
2500  a0, holder, a3, a1, t0, name, &miss);
2501  } else {
2502  // Calling non-strict non-builtins with a value as the receiver
2503  // requires boxing.
2504  __ jmp(&miss);
2505  }
2506  break;
2507 
2508  case BOOLEAN_CHECK:
2509  if (function->IsBuiltin() || !function->shared()->is_classic_mode()) {
2510  Label fast;
2511  // Check that the object is a boolean.
2512  __ LoadRoot(t0, Heap::kTrueValueRootIndex);
2513  __ Branch(&fast, eq, a1, Operand(t0));
2514  __ LoadRoot(t0, Heap::kFalseValueRootIndex);
2515  __ Branch(&miss, ne, a1, Operand(t0));
2516  __ bind(&fast);
2517  // Check that the maps starting from the prototype haven't changed.
2518  GenerateDirectLoadGlobalFunctionPrototype(
2519  masm(), Context::BOOLEAN_FUNCTION_INDEX, a0, &miss);
2520  CheckPrototypes(
2521  Handle<JSObject>(JSObject::cast(object->GetPrototype())),
2522  a0, holder, a3, a1, t0, name, &miss);
2523  } else {
2524  // Calling non-strict non-builtins with a value as the receiver
2525  // requires boxing.
2526  __ jmp(&miss);
2527  }
2528  break;
2529  }
2530 
2531  CallKind call_kind = CallICBase::Contextual::decode(extra_state_)
2533  : CALL_AS_METHOD;
2534  __ InvokeFunction(
2535  function, arguments(), JUMP_FUNCTION, NullCallWrapper(), call_kind);
2536 
2537  // Handle call cache miss.
2538  __ bind(&miss);
2539 
2540  GenerateMissBranch();
2541 
2542  // Return the generated code.
2543  return GetCode(function);
2544 }
2545 
2546 
2547 Handle<Code> CallStubCompiler::CompileCallInterceptor(Handle<JSObject> object,
2548  Handle<JSObject> holder,
2549  Handle<String> name) {
2550  // ----------- S t a t e -------------
2551  // -- a2 : name
2552  // -- ra : return address
2553  // -----------------------------------
2554 
2555  Label miss;
2556 
2557  GenerateNameCheck(name, &miss);
2558 
2559  // Get the number of arguments.
2560  const int argc = arguments().immediate();
2561  LookupResult lookup(isolate());
2562  LookupPostInterceptor(holder, name, &lookup);
2563 
2564  // Get the receiver from the stack.
2565  __ lw(a1, MemOperand(sp, argc * kPointerSize));
2566 
2567  CallInterceptorCompiler compiler(this, arguments(), a2, extra_state_);
2568  compiler.Compile(masm(), object, holder, name, &lookup, a1, a3, t0, a0,
2569  &miss);
2570 
2571  // Move returned value, the function to call, to a1.
2572  __ mov(a1, v0);
2573  // Restore receiver.
2574  __ lw(a0, MemOperand(sp, argc * kPointerSize));
2575 
2576  GenerateCallFunction(masm(), object, arguments(), &miss, extra_state_);
2577 
2578  // Handle call cache miss.
2579  __ bind(&miss);
2580  GenerateMissBranch();
2581 
2582  // Return the generated code.
2583  return GetCode(Code::INTERCEPTOR, name);
2584 }
2585 
2586 
2588  Handle<JSObject> object,
2589  Handle<GlobalObject> holder,
2590  Handle<JSGlobalPropertyCell> cell,
2591  Handle<JSFunction> function,
2592  Handle<String> name) {
2593  // ----------- S t a t e -------------
2594  // -- a2 : name
2595  // -- ra : return address
2596  // -----------------------------------
2597 
2598  if (HasCustomCallGenerator(function)) {
2599  Handle<Code> code = CompileCustomCall(object, holder, cell, function, name);
2600  // A null handle means bail out to the regular compiler code below.
2601  if (!code.is_null()) return code;
2602  }
2603 
2604  Label miss;
2605  GenerateNameCheck(name, &miss);
2606 
2607  // Get the number of arguments.
2608  const int argc = arguments().immediate();
2609  GenerateGlobalReceiverCheck(object, holder, name, &miss);
2610  GenerateLoadFunctionFromCell(cell, function, &miss);
2611 
2612  // Patch the receiver on the stack with the global proxy if
2613  // necessary.
2614  if (object->IsGlobalObject()) {
2616  __ sw(a3, MemOperand(sp, argc * kPointerSize));
2617  }
2618 
2619  // Set up the context (function already in r1).
2621 
2622  // Jump to the cached code (tail call).
2623  Counters* counters = masm()->isolate()->counters();
2624  __ IncrementCounter(counters->call_global_inline(), 1, a3, t0);
2625  ParameterCount expected(function->shared()->formal_parameter_count());
2626  CallKind call_kind = CallICBase::Contextual::decode(extra_state_)
2628  : CALL_AS_METHOD;
2629  // We call indirectly through the code field in the function to
2630  // allow recompilation to take effect without changing any of the
2631  // call sites.
2633  __ InvokeCode(a3, expected, arguments(), JUMP_FUNCTION,
2634  NullCallWrapper(), call_kind);
2635 
2636  // Handle call cache miss.
2637  __ bind(&miss);
2638  __ IncrementCounter(counters->call_global_inline_miss(), 1, a1, a3);
2639  GenerateMissBranch();
2640 
2641  // Return the generated code.
2642  return GetCode(Code::NORMAL, name);
2643 }
2644 
2645 
2646 Handle<Code> StoreStubCompiler::CompileStoreField(Handle<JSObject> object,
2647  int index,
2648  Handle<Map> transition,
2649  Handle<String> name) {
2650  // ----------- S t a t e -------------
2651  // -- a0 : value
2652  // -- a1 : receiver
2653  // -- a2 : name
2654  // -- ra : return address
2655  // -----------------------------------
2656  Label miss;
2657 
2658  // Name register might be clobbered.
2659  GenerateStoreField(masm(),
2660  object,
2661  index,
2662  transition,
2663  name,
2664  a1, a2, a3, t0,
2665  &miss);
2666  __ bind(&miss);
2667  __ li(a2, Operand(Handle<String>(name))); // Restore name.
2668  Handle<Code> ic = masm()->isolate()->builtins()->Builtins::StoreIC_Miss();
2669  __ Jump(ic, RelocInfo::CODE_TARGET);
2670 
2671  // Return the generated code.
2672  return GetCode(transition.is_null()
2673  ? Code::FIELD
2674  : Code::MAP_TRANSITION, name);
2675 }
2676 
2677 
2679  Handle<String> name,
2680  Handle<JSObject> receiver,
2681  Handle<JSObject> holder,
2682  Handle<AccessorInfo> callback) {
2683  // ----------- S t a t e -------------
2684  // -- a0 : value
2685  // -- a1 : receiver
2686  // -- a2 : name
2687  // -- ra : return address
2688  // -----------------------------------
2689  Label miss;
2690  // Check that the maps haven't changed.
2691  __ JumpIfSmi(a1, &miss, a3);
2692  CheckPrototypes(receiver, a1, holder, a3, t0, t1, name, &miss);
2693 
2694  // Stub never generated for non-global objects that require access
2695  // checks.
2696  ASSERT(holder->IsJSGlobalProxy() || !holder->IsAccessCheckNeeded());
2697 
2698  __ push(a1); // Receiver.
2699  __ li(a3, Operand(callback)); // Callback info.
2700  __ Push(a3, a2, a0);
2701 
2702  // Do tail-call to the runtime system.
2703  ExternalReference store_callback_property =
2704  ExternalReference(IC_Utility(IC::kStoreCallbackProperty),
2705  masm()->isolate());
2706  __ TailCallExternalReference(store_callback_property, 4, 1);
2707 
2708  // Handle store cache miss.
2709  __ bind(&miss);
2710  Handle<Code> ic = masm()->isolate()->builtins()->StoreIC_Miss();
2711  __ Jump(ic, RelocInfo::CODE_TARGET);
2712 
2713  // Return the generated code.
2714  return GetCode(Code::CALLBACKS, name);
2715 }
2716 
2717 
2718 #undef __
2719 #define __ ACCESS_MASM(masm)
2720 
2721 
2723  MacroAssembler* masm,
2724  Handle<JSFunction> setter) {
2725  // ----------- S t a t e -------------
2726  // -- a0 : value
2727  // -- a1 : receiver
2728  // -- a2 : name
2729  // -- ra : return address
2730  // -----------------------------------
2731  {
2732  FrameScope scope(masm, StackFrame::INTERNAL);
2733 
2734  // Save value register, so we can restore it later.
2735  __ push(a0);
2736 
2737  if (!setter.is_null()) {
2738  // Call the JavaScript setter with receiver and value on the stack.
2739  __ push(a1);
2740  __ push(a0);
2741  ParameterCount actual(1);
2742  __ InvokeFunction(setter, actual, CALL_FUNCTION, NullCallWrapper(),
2743  CALL_AS_METHOD);
2744  } else {
2745  // If we generate a global code snippet for deoptimization only, remember
2746  // the place to continue after deoptimization.
2747  masm->isolate()->heap()->SetSetterStubDeoptPCOffset(masm->pc_offset());
2748  }
2749 
2750  // We have to return the passed value, not the return value of the setter.
2751  __ pop(v0);
2752 
2753  // Restore context register.
2755  }
2756  __ Ret();
2757 }
2758 
2759 
2760 #undef __
2761 #define __ ACCESS_MASM(masm())
2762 
2763 
2765  Handle<String> name,
2766  Handle<JSObject> receiver,
2767  Handle<JSObject> holder,
2768  Handle<JSFunction> setter) {
2769  // ----------- S t a t e -------------
2770  // -- a0 : value
2771  // -- a1 : receiver
2772  // -- a2 : name
2773  // -- ra : return address
2774  // -----------------------------------
2775  Label miss;
2776 
2777  // Check that the maps haven't changed.
2778  __ JumpIfSmi(a1, &miss);
2779  CheckPrototypes(receiver, a1, holder, a3, t0, t1, name, &miss);
2780 
2781  GenerateStoreViaSetter(masm(), setter);
2782 
2783  __ bind(&miss);
2784  Handle<Code> ic = masm()->isolate()->builtins()->StoreIC_Miss();
2785  __ Jump(ic, RelocInfo::CODE_TARGET);
2786 
2787  // Return the generated code.
2788  return GetCode(Code::CALLBACKS, name);
2789 }
2790 
2791 
2793  Handle<JSObject> receiver,
2794  Handle<String> name) {
2795  // ----------- S t a t e -------------
2796  // -- a0 : value
2797  // -- a1 : receiver
2798  // -- a2 : name
2799  // -- ra : return address
2800  // -----------------------------------
2801  Label miss;
2802 
2803  // Check that the map of the object hasn't changed.
2804  __ CheckMap(a1, a3, Handle<Map>(receiver->map()), &miss,
2806 
2807  // Perform global security token check if needed.
2808  if (receiver->IsJSGlobalProxy()) {
2809  __ CheckAccessGlobalProxy(a1, a3, &miss);
2810  }
2811 
2812  // Stub is never generated for non-global objects that require access
2813  // checks.
2814  ASSERT(receiver->IsJSGlobalProxy() || !receiver->IsAccessCheckNeeded());
2815 
2816  __ Push(a1, a2, a0); // Receiver, name, value.
2817 
2818  __ li(a0, Operand(Smi::FromInt(strict_mode_)));
2819  __ push(a0); // Strict mode.
2820 
2821  // Do tail-call to the runtime system.
2822  ExternalReference store_ic_property =
2823  ExternalReference(IC_Utility(IC::kStoreInterceptorProperty),
2824  masm()->isolate());
2825  __ TailCallExternalReference(store_ic_property, 4, 1);
2826 
2827  // Handle store cache miss.
2828  __ bind(&miss);
2829  Handle<Code> ic = masm()->isolate()->builtins()->Builtins::StoreIC_Miss();
2830  __ Jump(ic, RelocInfo::CODE_TARGET);
2831 
2832  // Return the generated code.
2833  return GetCode(Code::INTERCEPTOR, name);
2834 }
2835 
2836 
2838  Handle<GlobalObject> object,
2839  Handle<JSGlobalPropertyCell> cell,
2840  Handle<String> name) {
2841  // ----------- S t a t e -------------
2842  // -- a0 : value
2843  // -- a1 : receiver
2844  // -- a2 : name
2845  // -- ra : return address
2846  // -----------------------------------
2847  Label miss;
2848 
2849  // Check that the map of the global has not changed.
2851  __ Branch(&miss, ne, a3, Operand(Handle<Map>(object->map())));
2852 
2853  // Check that the value in the cell is not the hole. If it is, this
2854  // cell could have been deleted and reintroducing the global needs
2855  // to update the property details in the property dictionary of the
2856  // global object. We bail out to the runtime system to do that.
2857  __ li(t0, Operand(cell));
2858  __ LoadRoot(t1, Heap::kTheHoleValueRootIndex);
2860  __ Branch(&miss, eq, t1, Operand(t2));
2861 
2862  // Store the value in the cell.
2864  __ mov(v0, a0); // Stored value must be returned in v0.
2865  // Cells are always rescanned, so no write barrier here.
2866 
2867  Counters* counters = masm()->isolate()->counters();
2868  __ IncrementCounter(counters->named_store_global_inline(), 1, a1, a3);
2869  __ Ret();
2870 
2871  // Handle store cache miss.
2872  __ bind(&miss);
2873  __ IncrementCounter(counters->named_store_global_inline_miss(), 1, a1, a3);
2874  Handle<Code> ic = masm()->isolate()->builtins()->StoreIC_Miss();
2875  __ Jump(ic, RelocInfo::CODE_TARGET);
2876 
2877  // Return the generated code.
2878  return GetCode(Code::NORMAL, name);
2879 }
2880 
2881 
2882 Handle<Code> LoadStubCompiler::CompileLoadNonexistent(Handle<String> name,
2883  Handle<JSObject> object,
2884  Handle<JSObject> last) {
2885  // ----------- S t a t e -------------
2886  // -- a0 : receiver
2887  // -- ra : return address
2888  // -----------------------------------
2889  Label miss;
2890 
2891  // Check that the receiver is not a smi.
2892  __ JumpIfSmi(a0, &miss);
2893 
2894  // Check the maps of the full prototype chain.
2895  CheckPrototypes(object, a0, last, a3, a1, t0, name, &miss);
2896 
2897  // If the last object in the prototype chain is a global object,
2898  // check that the global property cell is empty.
2899  if (last->IsGlobalObject()) {
2900  GenerateCheckPropertyCell(
2901  masm(), Handle<GlobalObject>::cast(last), name, a1, &miss);
2902  }
2903 
2904  // Return undefined if maps of the full prototype chain is still the same.
2905  __ LoadRoot(v0, Heap::kUndefinedValueRootIndex);
2906  __ Ret();
2907 
2908  __ bind(&miss);
2909  GenerateLoadMiss(masm(), Code::LOAD_IC);
2910 
2911  // Return the generated code.
2912  return GetCode(Code::NONEXISTENT, factory()->empty_string());
2913 }
2914 
2915 
2916 Handle<Code> LoadStubCompiler::CompileLoadField(Handle<JSObject> object,
2917  Handle<JSObject> holder,
2918  int index,
2919  Handle<String> name) {
2920  // ----------- S t a t e -------------
2921  // -- a0 : receiver
2922  // -- a2 : name
2923  // -- ra : return address
2924  // -----------------------------------
2925  Label miss;
2926 
2927  __ mov(v0, a0);
2928 
2929  GenerateLoadField(object, holder, v0, a3, a1, t0, index, name, &miss);
2930  __ bind(&miss);
2931  GenerateLoadMiss(masm(), Code::LOAD_IC);
2932 
2933  // Return the generated code.
2934  return GetCode(Code::FIELD, name);
2935 }
2936 
2937 
2939  Handle<String> name,
2940  Handle<JSObject> object,
2941  Handle<JSObject> holder,
2942  Handle<AccessorInfo> callback) {
2943  // ----------- S t a t e -------------
2944  // -- a0 : receiver
2945  // -- a2 : name
2946  // -- ra : return address
2947  // -----------------------------------
2948  Label miss;
2949  GenerateLoadCallback(object, holder, a0, a2, a3, a1, t0, t1, callback, name,
2950  &miss);
2951  __ bind(&miss);
2952  GenerateLoadMiss(masm(), Code::LOAD_IC);
2953 
2954  // Return the generated code.
2955  return GetCode(Code::CALLBACKS, name);
2956 }
2957 
2958 
2959 #undef __
2960 #define __ ACCESS_MASM(masm)
2961 
2962 
2963 void LoadStubCompiler::GenerateLoadViaGetter(MacroAssembler* masm,
2964  Handle<JSFunction> getter) {
2965  // ----------- S t a t e -------------
2966  // -- a0 : receiver
2967  // -- a2 : name
2968  // -- ra : return address
2969  // -----------------------------------
2970  {
2971  FrameScope scope(masm, StackFrame::INTERNAL);
2972 
2973  if (!getter.is_null()) {
2974  // Call the JavaScript getter with the receiver on the stack.
2975  __ push(a0);
2976  ParameterCount actual(0);
2977  __ InvokeFunction(getter, actual, CALL_FUNCTION, NullCallWrapper(),
2978  CALL_AS_METHOD);
2979  } else {
2980  // If we generate a global code snippet for deoptimization only, remember
2981  // the place to continue after deoptimization.
2982  masm->isolate()->heap()->SetGetterStubDeoptPCOffset(masm->pc_offset());
2983  }
2984 
2985  // Restore context register.
2987  }
2988  __ Ret();
2989 }
2990 
2991 
2992 #undef __
2993 #define __ ACCESS_MASM(masm())
2994 
2995 
2997  Handle<String> name,
2998  Handle<JSObject> receiver,
2999  Handle<JSObject> holder,
3000  Handle<JSFunction> getter) {
3001  // ----------- S t a t e -------------
3002  // -- a0 : receiver
3003  // -- a2 : name
3004  // -- ra : return address
3005  // -----------------------------------
3006  Label miss;
3007 
3008  // Check that the maps haven't changed.
3009  __ JumpIfSmi(a0, &miss);
3010  CheckPrototypes(receiver, a0, holder, a3, t0, a1, name, &miss);
3011 
3012  GenerateLoadViaGetter(masm(), getter);
3013 
3014  __ bind(&miss);
3015  GenerateLoadMiss(masm(), Code::LOAD_IC);
3016 
3017  // Return the generated code.
3018  return GetCode(Code::CALLBACKS, name);
3019 }
3020 
3021 
3022 Handle<Code> LoadStubCompiler::CompileLoadConstant(Handle<JSObject> object,
3023  Handle<JSObject> holder,
3024  Handle<JSFunction> value,
3025  Handle<String> name) {
3026  // ----------- S t a t e -------------
3027  // -- a0 : receiver
3028  // -- a2 : name
3029  // -- ra : return address
3030  // -----------------------------------
3031  Label miss;
3032 
3033  GenerateLoadConstant(object, holder, a0, a3, a1, t0, value, name, &miss);
3034  __ bind(&miss);
3035  GenerateLoadMiss(masm(), Code::LOAD_IC);
3036 
3037  // Return the generated code.
3038  return GetCode(Code::CONSTANT_FUNCTION, name);
3039 }
3040 
3041 
3042 Handle<Code> LoadStubCompiler::CompileLoadInterceptor(Handle<JSObject> object,
3043  Handle<JSObject> holder,
3044  Handle<String> name) {
3045  // ----------- S t a t e -------------
3046  // -- a0 : receiver
3047  // -- a2 : name
3048  // -- ra : return address
3049  // -- [sp] : receiver
3050  // -----------------------------------
3051  Label miss;
3052 
3053  LookupResult lookup(isolate());
3054  LookupPostInterceptor(holder, name, &lookup);
3055  GenerateLoadInterceptor(object, holder, &lookup, a0, a2, a3, a1, t0, name,
3056  &miss);
3057  __ bind(&miss);
3058  GenerateLoadMiss(masm(), Code::LOAD_IC);
3059 
3060  // Return the generated code.
3061  return GetCode(Code::INTERCEPTOR, name);
3062 }
3063 
3064 
3066  Handle<JSObject> object,
3067  Handle<GlobalObject> holder,
3068  Handle<JSGlobalPropertyCell> cell,
3069  Handle<String> name,
3070  bool is_dont_delete) {
3071  // ----------- S t a t e -------------
3072  // -- a0 : receiver
3073  // -- a2 : name
3074  // -- ra : return address
3075  // -----------------------------------
3076  Label miss;
3077 
3078  // Check that the map of the global has not changed.
3079  __ JumpIfSmi(a0, &miss);
3080  CheckPrototypes(object, a0, holder, a3, t0, a1, name, &miss);
3081 
3082  // Get the value from the cell.
3083  __ li(a3, Operand(cell));
3085 
3086  // Check for deleted property if property can actually be deleted.
3087  if (!is_dont_delete) {
3088  __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
3089  __ Branch(&miss, eq, t0, Operand(at));
3090  }
3091 
3092  __ mov(v0, t0);
3093  Counters* counters = masm()->isolate()->counters();
3094  __ IncrementCounter(counters->named_load_global_stub(), 1, a1, a3);
3095  __ Ret();
3096 
3097  __ bind(&miss);
3098  __ IncrementCounter(counters->named_load_global_stub_miss(), 1, a1, a3);
3099  GenerateLoadMiss(masm(), Code::LOAD_IC);
3100 
3101  // Return the generated code.
3102  return GetCode(Code::NORMAL, name);
3103 }
3104 
3105 
3106 Handle<Code> KeyedLoadStubCompiler::CompileLoadField(Handle<String> name,
3107  Handle<JSObject> receiver,
3108  Handle<JSObject> holder,
3109  int index) {
3110  // ----------- S t a t e -------------
3111  // -- ra : return address
3112  // -- a0 : key
3113  // -- a1 : receiver
3114  // -----------------------------------
3115  Label miss;
3116 
3117  // Check the key is the cached one.
3118  __ Branch(&miss, ne, a0, Operand(name));
3119 
3120  GenerateLoadField(receiver, holder, a1, a2, a3, t0, index, name, &miss);
3121  __ bind(&miss);
3122  GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
3123 
3124  return GetCode(Code::FIELD, name);
3125 }
3126 
3127 
3129  Handle<String> name,
3130  Handle<JSObject> receiver,
3131  Handle<JSObject> holder,
3132  Handle<AccessorInfo> callback) {
3133  // ----------- S t a t e -------------
3134  // -- ra : return address
3135  // -- a0 : key
3136  // -- a1 : receiver
3137  // -----------------------------------
3138  Label miss;
3139 
3140  // Check the key is the cached one.
3141  __ Branch(&miss, ne, a0, Operand(name));
3142 
3143  GenerateLoadCallback(receiver, holder, a1, a0, a2, a3, t0, t1, callback,
3144  name, &miss);
3145  __ bind(&miss);
3146  GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
3147 
3148  return GetCode(Code::CALLBACKS, name);
3149 }
3150 
3151 
3153  Handle<String> name,
3154  Handle<JSObject> receiver,
3155  Handle<JSObject> holder,
3156  Handle<JSFunction> value) {
3157  // ----------- S t a t e -------------
3158  // -- ra : return address
3159  // -- a0 : key
3160  // -- a1 : receiver
3161  // -----------------------------------
3162  Label miss;
3163 
3164  // Check the key is the cached one.
3165  __ Branch(&miss, ne, a0, Operand(name));
3166 
3167  GenerateLoadConstant(receiver, holder, a1, a2, a3, t0, value, name, &miss);
3168  __ bind(&miss);
3169  GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
3170 
3171  // Return the generated code.
3172  return GetCode(Code::CONSTANT_FUNCTION, name);
3173 }
3174 
3175 
3177  Handle<JSObject> receiver,
3178  Handle<JSObject> holder,
3179  Handle<String> name) {
3180  // ----------- S t a t e -------------
3181  // -- ra : return address
3182  // -- a0 : key
3183  // -- a1 : receiver
3184  // -----------------------------------
3185  Label miss;
3186 
3187  // Check the key is the cached one.
3188  __ Branch(&miss, ne, a0, Operand(name));
3189 
3190  LookupResult lookup(isolate());
3191  LookupPostInterceptor(holder, name, &lookup);
3192  GenerateLoadInterceptor(receiver, holder, &lookup, a1, a0, a2, a3, t0, name,
3193  &miss);
3194  __ bind(&miss);
3195  GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
3196 
3197  return GetCode(Code::INTERCEPTOR, name);
3198 }
3199 
3200 
3202  Handle<String> name) {
3203  // ----------- S t a t e -------------
3204  // -- ra : return address
3205  // -- a0 : key
3206  // -- a1 : receiver
3207  // -----------------------------------
3208  Label miss;
3209 
3210  // Check the key is the cached one.
3211  __ Branch(&miss, ne, a0, Operand(name));
3212 
3213  GenerateLoadArrayLength(masm(), a1, a2, &miss);
3214  __ bind(&miss);
3215  GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
3216 
3217  return GetCode(Code::CALLBACKS, name);
3218 }
3219 
3220 
3222  Handle<String> name) {
3223  // ----------- S t a t e -------------
3224  // -- ra : return address
3225  // -- a0 : key
3226  // -- a1 : receiver
3227  // -----------------------------------
3228  Label miss;
3229 
3230  Counters* counters = masm()->isolate()->counters();
3231  __ IncrementCounter(counters->keyed_load_string_length(), 1, a2, a3);
3232 
3233  // Check the key is the cached one.
3234  __ Branch(&miss, ne, a0, Operand(name));
3235 
3236  GenerateLoadStringLength(masm(), a1, a2, a3, &miss, true);
3237  __ bind(&miss);
3238  __ DecrementCounter(counters->keyed_load_string_length(), 1, a2, a3);
3239 
3240  GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
3241 
3242  return GetCode(Code::CALLBACKS, name);
3243 }
3244 
3245 
3247  Handle<String> name) {
3248  // ----------- S t a t e -------------
3249  // -- ra : return address
3250  // -- a0 : key
3251  // -- a1 : receiver
3252  // -----------------------------------
3253  Label miss;
3254 
3255  Counters* counters = masm()->isolate()->counters();
3256  __ IncrementCounter(counters->keyed_load_function_prototype(), 1, a2, a3);
3257 
3258  // Check the name hasn't changed.
3259  __ Branch(&miss, ne, a0, Operand(name));
3260 
3261  GenerateLoadFunctionPrototype(masm(), a1, a2, a3, &miss);
3262  __ bind(&miss);
3263  __ DecrementCounter(counters->keyed_load_function_prototype(), 1, a2, a3);
3264  GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
3265 
3266  return GetCode(Code::CALLBACKS, name);
3267 }
3268 
3269 
3271  Handle<Map> receiver_map) {
3272  // ----------- S t a t e -------------
3273  // -- ra : return address
3274  // -- a0 : key
3275  // -- a1 : receiver
3276  // -----------------------------------
3277  ElementsKind elements_kind = receiver_map->elements_kind();
3278  Handle<Code> stub = KeyedLoadElementStub(elements_kind).GetCode();
3279 
3280  __ DispatchMap(a1, a2, receiver_map, stub, DO_SMI_CHECK);
3281 
3282  Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Miss();
3283  __ Jump(ic, RelocInfo::CODE_TARGET);
3284 
3285  // Return the generated code.
3286  return GetCode(Code::NORMAL, factory()->empty_string());
3287 }
3288 
3289 
3291  MapHandleList* receiver_maps,
3292  CodeHandleList* handler_ics) {
3293  // ----------- S t a t e -------------
3294  // -- ra : return address
3295  // -- a0 : key
3296  // -- a1 : receiver
3297  // -----------------------------------
3298  Label miss;
3299  __ JumpIfSmi(a1, &miss);
3300 
3301  int receiver_count = receiver_maps->length();
3303  for (int current = 0; current < receiver_count; ++current) {
3304  __ Jump(handler_ics->at(current), RelocInfo::CODE_TARGET,
3305  eq, a2, Operand(receiver_maps->at(current)));
3306  }
3307 
3308  __ bind(&miss);
3309  Handle<Code> miss_ic = isolate()->builtins()->KeyedLoadIC_Miss();
3310  __ Jump(miss_ic, RelocInfo::CODE_TARGET);
3311 
3312  // Return the generated code.
3313  return GetCode(Code::NORMAL, factory()->empty_string(), MEGAMORPHIC);
3314 }
3315 
3316 
3317 Handle<Code> KeyedStoreStubCompiler::CompileStoreField(Handle<JSObject> object,
3318  int index,
3319  Handle<Map> transition,
3320  Handle<String> name) {
3321  // ----------- S t a t e -------------
3322  // -- a0 : value
3323  // -- a1 : key
3324  // -- a2 : receiver
3325  // -- ra : return address
3326  // -----------------------------------
3327 
3328  Label miss;
3329 
3330  Counters* counters = masm()->isolate()->counters();
3331  __ IncrementCounter(counters->keyed_store_field(), 1, a3, t0);
3332 
3333  // Check that the name has not changed.
3334  __ Branch(&miss, ne, a1, Operand(name));
3335 
3336  // a3 is used as scratch register. a1 and a2 keep their values if a jump to
3337  // the miss label is generated.
3338  GenerateStoreField(masm(),
3339  object,
3340  index,
3341  transition,
3342  name,
3343  a2, a1, a3, t0,
3344  &miss);
3345  __ bind(&miss);
3346 
3347  __ DecrementCounter(counters->keyed_store_field(), 1, a3, t0);
3348  Handle<Code> ic = masm()->isolate()->builtins()->KeyedStoreIC_Miss();
3349  __ Jump(ic, RelocInfo::CODE_TARGET);
3350 
3351  // Return the generated code.
3352  return GetCode(transition.is_null()
3353  ? Code::FIELD
3354  : Code::MAP_TRANSITION, name);
3355 }
3356 
3357 
3359  Handle<Map> receiver_map) {
3360  // ----------- S t a t e -------------
3361  // -- a0 : value
3362  // -- a1 : key
3363  // -- a2 : receiver
3364  // -- ra : return address
3365  // -- a3 : scratch
3366  // -----------------------------------
3367  ElementsKind elements_kind = receiver_map->elements_kind();
3368  bool is_js_array = receiver_map->instance_type() == JS_ARRAY_TYPE;
3369  Handle<Code> stub =
3370  KeyedStoreElementStub(is_js_array, elements_kind, grow_mode_).GetCode();
3371 
3372  __ DispatchMap(a2, a3, receiver_map, stub, DO_SMI_CHECK);
3373 
3374  Handle<Code> ic = isolate()->builtins()->KeyedStoreIC_Miss();
3375  __ Jump(ic, RelocInfo::CODE_TARGET);
3376 
3377  // Return the generated code.
3378  return GetCode(Code::NORMAL, factory()->empty_string());
3379 }
3380 
3381 
3383  MapHandleList* receiver_maps,
3384  CodeHandleList* handler_stubs,
3385  MapHandleList* transitioned_maps) {
3386  // ----------- S t a t e -------------
3387  // -- a0 : value
3388  // -- a1 : key
3389  // -- a2 : receiver
3390  // -- ra : return address
3391  // -- a3 : scratch
3392  // -----------------------------------
3393  Label miss;
3394  __ JumpIfSmi(a2, &miss);
3395 
3396  int receiver_count = receiver_maps->length();
3398  for (int i = 0; i < receiver_count; ++i) {
3399  if (transitioned_maps->at(i).is_null()) {
3400  __ Jump(handler_stubs->at(i), RelocInfo::CODE_TARGET, eq,
3401  a3, Operand(receiver_maps->at(i)));
3402  } else {
3403  Label next_map;
3404  __ Branch(&next_map, ne, a3, Operand(receiver_maps->at(i)));
3405  __ li(a3, Operand(transitioned_maps->at(i)));
3406  __ Jump(handler_stubs->at(i), RelocInfo::CODE_TARGET);
3407  __ bind(&next_map);
3408  }
3409  }
3410 
3411  __ bind(&miss);
3412  Handle<Code> miss_ic = isolate()->builtins()->KeyedStoreIC_Miss();
3413  __ Jump(miss_ic, RelocInfo::CODE_TARGET);
3414 
3415  // Return the generated code.
3416  return GetCode(Code::NORMAL, factory()->empty_string(), MEGAMORPHIC);
3417 }
3418 
3419 
3421  Handle<JSFunction> function) {
3422  // a0 : argc
3423  // a1 : constructor
3424  // ra : return address
3425  // [sp] : last argument
3426  Label generic_stub_call;
3427 
3428  // Use t7 for holding undefined which is used in several places below.
3429  __ LoadRoot(t7, Heap::kUndefinedValueRootIndex);
3430 
3431 #ifdef ENABLE_DEBUGGER_SUPPORT
3432  // Check to see whether there are any break points in the function code. If
3433  // there are jump to the generic constructor stub which calls the actual
3434  // code for the function thereby hitting the break points.
3437  __ Branch(&generic_stub_call, ne, a2, Operand(t7));
3438 #endif
3439 
3440  // Load the initial map and verify that it is in fact a map.
3441  // a1: constructor function
3442  // t7: undefined
3444  __ JumpIfSmi(a2, &generic_stub_call);
3445  __ GetObjectType(a2, a3, t0);
3446  __ Branch(&generic_stub_call, ne, t0, Operand(MAP_TYPE));
3447 
3448 #ifdef DEBUG
3449  // Cannot construct functions this way.
3450  // a0: argc
3451  // a1: constructor function
3452  // a2: initial map
3453  // t7: undefined
3455  __ Check(ne, "Function constructed by construct stub.",
3456  a3, Operand(JS_FUNCTION_TYPE));
3457 #endif
3458 
3459  // Now allocate the JSObject in new space.
3460  // a0: argc
3461  // a1: constructor function
3462  // a2: initial map
3463  // t7: undefined
3465  __ AllocateInNewSpace(a3, t4, t5, t6, &generic_stub_call, SIZE_IN_WORDS);
3466 
3467  // Allocated the JSObject, now initialize the fields. Map is set to initial
3468  // map and properties and elements are set to empty fixed array.
3469  // a0: argc
3470  // a1: constructor function
3471  // a2: initial map
3472  // a3: object size (in words)
3473  // t4: JSObject (not tagged)
3474  // t7: undefined
3475  __ LoadRoot(t6, Heap::kEmptyFixedArrayRootIndex);
3476  __ mov(t5, t4);
3477  __ sw(a2, MemOperand(t5, JSObject::kMapOffset));
3480  __ Addu(t5, t5, Operand(3 * kPointerSize));
3481  ASSERT_EQ(0 * kPointerSize, JSObject::kMapOffset);
3482  ASSERT_EQ(1 * kPointerSize, JSObject::kPropertiesOffset);
3483  ASSERT_EQ(2 * kPointerSize, JSObject::kElementsOffset);
3484 
3485 
3486  // Calculate the location of the first argument. The stack contains only the
3487  // argc arguments.
3488  __ sll(a1, a0, kPointerSizeLog2);
3489  __ Addu(a1, a1, sp);
3490 
3491  // Fill all the in-object properties with undefined.
3492  // a0: argc
3493  // a1: first argument
3494  // a3: object size (in words)
3495  // t4: JSObject (not tagged)
3496  // t5: First in-object property of JSObject (not tagged)
3497  // t7: undefined
3498  // Fill the initialized properties with a constant value or a passed argument
3499  // depending on the this.x = ...; assignment in the function.
3500  Handle<SharedFunctionInfo> shared(function->shared());
3501  for (int i = 0; i < shared->this_property_assignments_count(); i++) {
3502  if (shared->IsThisPropertyAssignmentArgument(i)) {
3503  Label not_passed, next;
3504  // Check if the argument assigned to the property is actually passed.
3505  int arg_number = shared->GetThisPropertyAssignmentArgument(i);
3506  __ Branch(&not_passed, less_equal, a0, Operand(arg_number));
3507  // Argument passed - find it on the stack.
3508  __ lw(a2, MemOperand(a1, (arg_number + 1) * -kPointerSize));
3509  __ sw(a2, MemOperand(t5));
3510  __ Addu(t5, t5, kPointerSize);
3511  __ jmp(&next);
3512  __ bind(&not_passed);
3513  // Set the property to undefined.
3514  __ sw(t7, MemOperand(t5));
3515  __ Addu(t5, t5, Operand(kPointerSize));
3516  __ bind(&next);
3517  } else {
3518  // Set the property to the constant value.
3519  Handle<Object> constant(shared->GetThisPropertyAssignmentConstant(i));
3520  __ li(a2, Operand(constant));
3521  __ sw(a2, MemOperand(t5));
3522  __ Addu(t5, t5, kPointerSize);
3523  }
3524  }
3525 
3526  // Fill the unused in-object property fields with undefined.
3527  ASSERT(function->has_initial_map());
3528  for (int i = shared->this_property_assignments_count();
3529  i < function->initial_map()->inobject_properties();
3530  i++) {
3531  __ sw(t7, MemOperand(t5));
3532  __ Addu(t5, t5, kPointerSize);
3533  }
3534 
3535  // a0: argc
3536  // t4: JSObject (not tagged)
3537  // Move argc to a1 and the JSObject to return to v0 and tag it.
3538  __ mov(a1, a0);
3539  __ mov(v0, t4);
3540  __ Or(v0, v0, Operand(kHeapObjectTag));
3541 
3542  // v0: JSObject
3543  // a1: argc
3544  // Remove caller arguments and receiver from the stack and return.
3545  __ sll(t0, a1, kPointerSizeLog2);
3546  __ Addu(sp, sp, t0);
3547  __ Addu(sp, sp, Operand(kPointerSize));
3548  Counters* counters = masm()->isolate()->counters();
3549  __ IncrementCounter(counters->constructed_objects(), 1, a1, a2);
3550  __ IncrementCounter(counters->constructed_objects_stub(), 1, a1, a2);
3551  __ Ret();
3552 
3553  // Jump to the generic stub in case the specialized code cannot handle the
3554  // construction.
3555  __ bind(&generic_stub_call);
3556  Handle<Code> generic_construct_stub =
3557  masm()->isolate()->builtins()->JSConstructStubGeneric();
3558  __ Jump(generic_construct_stub, RelocInfo::CODE_TARGET);
3559 
3560  // Return the generated code.
3561  return GetCode();
3562 }
3563 
3564 
3565 #undef __
3566 #define __ ACCESS_MASM(masm)
3567 
3568 
3570  MacroAssembler* masm) {
3571  // ---------- S t a t e --------------
3572  // -- ra : return address
3573  // -- a0 : key
3574  // -- a1 : receiver
3575  // -----------------------------------
3576  Label slow, miss_force_generic;
3577 
3578  Register key = a0;
3579  Register receiver = a1;
3580 
3581  __ JumpIfNotSmi(key, &miss_force_generic);
3582  __ lw(t0, FieldMemOperand(receiver, JSObject::kElementsOffset));
3583  __ sra(a2, a0, kSmiTagSize);
3584  __ LoadFromNumberDictionary(&slow, t0, a0, v0, a2, a3, t1);
3585  __ Ret();
3586 
3587  // Slow case, key and receiver still in a0 and a1.
3588  __ bind(&slow);
3589  __ IncrementCounter(
3590  masm->isolate()->counters()->keyed_load_external_array_slow(),
3591  1, a2, a3);
3592  // Entry registers are intact.
3593  // ---------- S t a t e --------------
3594  // -- ra : return address
3595  // -- a0 : key
3596  // -- a1 : receiver
3597  // -----------------------------------
3598  Handle<Code> slow_ic =
3599  masm->isolate()->builtins()->KeyedLoadIC_Slow();
3600  __ Jump(slow_ic, RelocInfo::CODE_TARGET);
3601 
3602  // Miss case, call the runtime.
3603  __ bind(&miss_force_generic);
3604 
3605  // ---------- S t a t e --------------
3606  // -- ra : return address
3607  // -- a0 : key
3608  // -- a1 : receiver
3609  // -----------------------------------
3610 
3611  Handle<Code> miss_ic =
3612  masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric();
3613  __ Jump(miss_ic, RelocInfo::CODE_TARGET);
3614 }
3615 
3616 
3617 static bool IsElementTypeSigned(ElementsKind elements_kind) {
3618  switch (elements_kind) {
3621  case EXTERNAL_INT_ELEMENTS:
3622  return true;
3623 
3628  return false;
3629 
3632  case FAST_SMI_ELEMENTS:
3633  case FAST_ELEMENTS:
3634  case FAST_DOUBLE_ELEMENTS:
3636  case FAST_HOLEY_ELEMENTS:
3638  case DICTIONARY_ELEMENTS:
3640  UNREACHABLE();
3641  return false;
3642  }
3643  return false;
3644 }
3645 
3646 
3647 static void GenerateSmiKeyCheck(MacroAssembler* masm,
3648  Register key,
3649  Register scratch0,
3650  Register scratch1,
3651  FPURegister double_scratch0,
3652  Label* fail) {
3654  CpuFeatures::Scope scope(FPU);
3655  Label key_ok;
3656  // Check for smi or a smi inside a heap number. We convert the heap
3657  // number and check if the conversion is exact and fits into the smi
3658  // range.
3659  __ JumpIfSmi(key, &key_ok);
3660  __ CheckMap(key,
3661  scratch0,
3662  Heap::kHeapNumberMapRootIndex,
3663  fail,
3665  __ ldc1(double_scratch0, FieldMemOperand(key, HeapNumber::kValueOffset));
3666  __ EmitFPUTruncate(kRoundToZero,
3667  double_scratch0,
3668  double_scratch0,
3669  scratch0,
3670  scratch1,
3672 
3673  __ Branch(fail, ne, scratch1, Operand(zero_reg));
3674 
3675  __ mfc1(scratch0, double_scratch0);
3676  __ SmiTagCheckOverflow(key, scratch0, scratch1);
3677  __ BranchOnOverflow(fail, scratch1);
3678  __ bind(&key_ok);
3679  } else {
3680  // Check that the key is a smi.
3681  __ JumpIfNotSmi(key, fail);
3682  }
3683 }
3684 
3685 
3687  MacroAssembler* masm,
3688  ElementsKind elements_kind) {
3689  // ---------- S t a t e --------------
3690  // -- ra : return address
3691  // -- a0 : key
3692  // -- a1 : receiver
3693  // -----------------------------------
3694  Label miss_force_generic, slow, failed_allocation;
3695 
3696  Register key = a0;
3697  Register receiver = a1;
3698 
3699  // This stub is meant to be tail-jumped to, the receiver must already
3700  // have been verified by the caller to not be a smi.
3701 
3702  // Check that the key is a smi or a heap number convertible to a smi.
3703  GenerateSmiKeyCheck(masm, key, t0, t1, f2, &miss_force_generic);
3704 
3705  __ lw(a3, FieldMemOperand(receiver, JSObject::kElementsOffset));
3706  // a3: elements array
3707 
3708  // Check that the index is in range.
3710  __ sra(t2, key, kSmiTagSize);
3711  // Unsigned comparison catches both negative and too-large values.
3712  __ Branch(&miss_force_generic, Ugreater_equal, key, Operand(t1));
3713 
3715  // a3: base pointer of external storage
3716 
3717  // We are not untagging smi key and instead work with it
3718  // as if it was premultiplied by 2.
3719  STATIC_ASSERT((kSmiTag == 0) && (kSmiTagSize == 1));
3720 
3721  Register value = a2;
3722  switch (elements_kind) {
3724  __ srl(t2, key, 1);
3725  __ addu(t3, a3, t2);
3726  __ lb(value, MemOperand(t3, 0));
3727  break;
3730  __ srl(t2, key, 1);
3731  __ addu(t3, a3, t2);
3732  __ lbu(value, MemOperand(t3, 0));
3733  break;
3735  __ addu(t3, a3, key);
3736  __ lh(value, MemOperand(t3, 0));
3737  break;
3739  __ addu(t3, a3, key);
3740  __ lhu(value, MemOperand(t3, 0));
3741  break;
3742  case EXTERNAL_INT_ELEMENTS:
3744  __ sll(t2, key, 1);
3745  __ addu(t3, a3, t2);
3746  __ lw(value, MemOperand(t3, 0));
3747  break;
3749  __ sll(t3, t2, 2);
3750  __ addu(t3, a3, t3);
3752  CpuFeatures::Scope scope(FPU);
3753  __ lwc1(f0, MemOperand(t3, 0));
3754  } else {
3755  __ lw(value, MemOperand(t3, 0));
3756  }
3757  break;
3759  __ sll(t2, key, 2);
3760  __ addu(t3, a3, t2);
3762  CpuFeatures::Scope scope(FPU);
3763  __ ldc1(f0, MemOperand(t3, 0));
3764  } else {
3765  // t3: pointer to the beginning of the double we want to load.
3766  __ lw(a2, MemOperand(t3, 0));
3767  __ lw(a3, MemOperand(t3, Register::kSizeInBytes));
3768  }
3769  break;
3770  case FAST_ELEMENTS:
3771  case FAST_SMI_ELEMENTS:
3772  case FAST_DOUBLE_ELEMENTS:
3773  case FAST_HOLEY_ELEMENTS:
3776  case DICTIONARY_ELEMENTS:
3778  UNREACHABLE();
3779  break;
3780  }
3781 
3782  // For integer array types:
3783  // a2: value
3784  // For float array type:
3785  // f0: value (if FPU is supported)
3786  // a2: value (if FPU is not supported)
3787  // For double array type:
3788  // f0: value (if FPU is supported)
3789  // a2/a3: value (if FPU is not supported)
3790 
3791  if (elements_kind == EXTERNAL_INT_ELEMENTS) {
3792  // For the Int and UnsignedInt array types, we need to see whether
3793  // the value can be represented in a Smi. If not, we need to convert
3794  // it to a HeapNumber.
3795  Label box_int;
3796  __ Subu(t3, value, Operand(0xC0000000)); // Non-smi value gives neg result.
3797  __ Branch(&box_int, lt, t3, Operand(zero_reg));
3798  // Tag integer as smi and return it.
3799  __ sll(v0, value, kSmiTagSize);
3800  __ Ret();
3801 
3802  __ bind(&box_int);
3803  // Allocate a HeapNumber for the result and perform int-to-double
3804  // conversion.
3805  // The arm version uses a temporary here to save r0, but we don't need to
3806  // (a0 is not modified).
3807  __ LoadRoot(t1, Heap::kHeapNumberMapRootIndex);
3808  __ AllocateHeapNumber(v0, a3, t0, t1, &slow);
3809 
3811  CpuFeatures::Scope scope(FPU);
3812  __ mtc1(value, f0);
3813  __ cvt_d_w(f0, f0);
3815  __ Ret();
3816  } else {
3817  Register dst1 = t2;
3818  Register dst2 = t3;
3822  value,
3823  dest,
3824  f0,
3825  dst1,
3826  dst2,
3827  t1,
3828  f2);
3831  __ Ret();
3832  }
3833  } else if (elements_kind == EXTERNAL_UNSIGNED_INT_ELEMENTS) {
3834  // The test is different for unsigned int values. Since we need
3835  // the value to be in the range of a positive smi, we can't
3836  // handle either of the top two bits being set in the value.
3838  CpuFeatures::Scope scope(FPU);
3839  Label pl_box_int;
3840  __ And(t2, value, Operand(0xC0000000));
3841  __ Branch(&pl_box_int, ne, t2, Operand(zero_reg));
3842 
3843  // It can fit in an Smi.
3844  // Tag integer as smi and return it.
3845  __ sll(v0, value, kSmiTagSize);
3846  __ Ret();
3847 
3848  __ bind(&pl_box_int);
3849  // Allocate a HeapNumber for the result and perform int-to-double
3850  // conversion. Don't use a0 and a1 as AllocateHeapNumber clobbers all
3851  // registers - also when jumping due to exhausted young space.
3852  __ LoadRoot(t6, Heap::kHeapNumberMapRootIndex);
3853  __ AllocateHeapNumber(v0, t2, t3, t6, &slow);
3854 
3855  // This is replaced by a macro:
3856  // __ mtc1(value, f0); // LS 32-bits.
3857  // __ mtc1(zero_reg, f1); // MS 32-bits are all zero.
3858  // __ cvt_d_l(f0, f0); // Use 64 bit conv to get correct unsigned 32-bit.
3859 
3860  __ Cvt_d_uw(f0, value, f22);
3861 
3863 
3864  __ Ret();
3865  } else {
3866  // Check whether unsigned integer fits into smi.
3867  Label box_int_0, box_int_1, done;
3868  __ And(t2, value, Operand(0x80000000));
3869  __ Branch(&box_int_0, ne, t2, Operand(zero_reg));
3870  __ And(t2, value, Operand(0x40000000));
3871  __ Branch(&box_int_1, ne, t2, Operand(zero_reg));
3872 
3873  // Tag integer as smi and return it.
3874  __ sll(v0, value, kSmiTagSize);
3875  __ Ret();
3876 
3877  Register hiword = value; // a2.
3878  Register loword = a3;
3879 
3880  __ bind(&box_int_0);
3881  // Integer does not have leading zeros.
3882  GenerateUInt2Double(masm, hiword, loword, t0, 0);
3883  __ Branch(&done);
3884 
3885  __ bind(&box_int_1);
3886  // Integer has one leading zero.
3887  GenerateUInt2Double(masm, hiword, loword, t0, 1);
3888 
3889 
3890  __ bind(&done);
3891  // Integer was converted to double in registers hiword:loword.
3892  // Wrap it into a HeapNumber. Don't use a0 and a1 as AllocateHeapNumber
3893  // clobbers all registers - also when jumping due to exhausted young
3894  // space.
3895  __ LoadRoot(t6, Heap::kHeapNumberMapRootIndex);
3896  __ AllocateHeapNumber(t2, t3, t5, t6, &slow);
3897 
3900 
3901  __ mov(v0, t2);
3902  __ Ret();
3903  }
3904  } else if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
3905  // For the floating-point array type, we need to always allocate a
3906  // HeapNumber.
3908  CpuFeatures::Scope scope(FPU);
3909  // Allocate a HeapNumber for the result. Don't use a0 and a1 as
3910  // AllocateHeapNumber clobbers all registers - also when jumping due to
3911  // exhausted young space.
3912  __ LoadRoot(t6, Heap::kHeapNumberMapRootIndex);
3913  __ AllocateHeapNumber(v0, t3, t5, t6, &slow);
3914  // The float (single) value is already in fpu reg f0 (if we use float).
3915  __ cvt_d_s(f0, f0);
3917  __ Ret();
3918  } else {
3919  // Allocate a HeapNumber for the result. Don't use a0 and a1 as
3920  // AllocateHeapNumber clobbers all registers - also when jumping due to
3921  // exhausted young space.
3922  __ LoadRoot(t6, Heap::kHeapNumberMapRootIndex);
3923  __ AllocateHeapNumber(v0, t3, t5, t6, &slow);
3924  // FPU is not available, do manual single to double conversion.
3925 
3926  // a2: floating point value (binary32).
3927  // v0: heap number for result
3928 
3929  // Extract mantissa to t4.
3930  __ And(t4, value, Operand(kBinary32MantissaMask));
3931 
3932  // Extract exponent to t5.
3933  __ srl(t5, value, kBinary32MantissaBits);
3934  __ And(t5, t5, Operand(kBinary32ExponentMask >> kBinary32MantissaBits));
3935 
3936  Label exponent_rebiased;
3937  __ Branch(&exponent_rebiased, eq, t5, Operand(zero_reg));
3938 
3939  __ li(t0, 0x7ff);
3940  __ Xor(t1, t5, Operand(0xFF));
3941  __ Movz(t5, t0, t1); // Set t5 to 0x7ff only if t5 is equal to 0xff.
3942  __ Branch(&exponent_rebiased, eq, t1, Operand(zero_reg));
3943 
3944  // Rebias exponent.
3945  __ Addu(t5,
3946  t5,
3948 
3949  __ bind(&exponent_rebiased);
3950  __ And(a2, value, Operand(kBinary32SignMask));
3951  value = no_reg;
3952  __ sll(t0, t5, HeapNumber::kMantissaBitsInTopWord);
3953  __ or_(a2, a2, t0);
3954 
3955  // Shift mantissa.
3956  static const int kMantissaShiftForHiWord =
3958 
3959  static const int kMantissaShiftForLoWord =
3960  kBitsPerInt - kMantissaShiftForHiWord;
3961 
3962  __ srl(t0, t4, kMantissaShiftForHiWord);
3963  __ or_(a2, a2, t0);
3964  __ sll(a0, t4, kMantissaShiftForLoWord);
3965 
3968  __ Ret();
3969  }
3970 
3971  } else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
3973  CpuFeatures::Scope scope(FPU);
3974  // Allocate a HeapNumber for the result. Don't use a0 and a1 as
3975  // AllocateHeapNumber clobbers all registers - also when jumping due to
3976  // exhausted young space.
3977  __ LoadRoot(t6, Heap::kHeapNumberMapRootIndex);
3978  __ AllocateHeapNumber(v0, t3, t5, t6, &slow);
3979  // The double value is already in f0
3981  __ Ret();
3982  } else {
3983  // Allocate a HeapNumber for the result. Don't use a0 and a1 as
3984  // AllocateHeapNumber clobbers all registers - also when jumping due to
3985  // exhausted young space.
3986  __ LoadRoot(t6, Heap::kHeapNumberMapRootIndex);
3987  __ AllocateHeapNumber(v0, t3, t5, t6, &slow);
3988 
3991  __ Ret();
3992  }
3993 
3994  } else {
3995  // Tag integer as smi and return it.
3996  __ sll(v0, value, kSmiTagSize);
3997  __ Ret();
3998  }
3999 
4000  // Slow case, key and receiver still in a0 and a1.
4001  __ bind(&slow);
4002  __ IncrementCounter(
4003  masm->isolate()->counters()->keyed_load_external_array_slow(),
4004  1, a2, a3);
4005 
4006  // ---------- S t a t e --------------
4007  // -- ra : return address
4008  // -- a0 : key
4009  // -- a1 : receiver
4010  // -----------------------------------
4011 
4012  __ Push(a1, a0);
4013 
4014  __ TailCallRuntime(Runtime::kKeyedGetProperty, 2, 1);
4015 
4016  __ bind(&miss_force_generic);
4017  Handle<Code> stub =
4018  masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric();
4019  __ Jump(stub, RelocInfo::CODE_TARGET);
4020 }
4021 
4022 
4024  MacroAssembler* masm,
4025  ElementsKind elements_kind) {
4026  // ---------- S t a t e --------------
4027  // -- a0 : value
4028  // -- a1 : key
4029  // -- a2 : receiver
4030  // -- ra : return address
4031  // -----------------------------------
4032 
4033  Label slow, check_heap_number, miss_force_generic;
4034 
4035  // Register usage.
4036  Register value = a0;
4037  Register key = a1;
4038  Register receiver = a2;
4039  // a3 mostly holds the elements array or the destination external array.
4040 
4041  // This stub is meant to be tail-jumped to, the receiver must already
4042  // have been verified by the caller to not be a smi.
4043 
4044  // Check that the key is a smi or a heap number convertible to a smi.
4045  GenerateSmiKeyCheck(masm, key, t0, t1, f2, &miss_force_generic);
4046 
4047  __ lw(a3, FieldMemOperand(receiver, JSObject::kElementsOffset));
4048 
4049  // Check that the index is in range.
4051  // Unsigned comparison catches both negative and too-large values.
4052  __ Branch(&miss_force_generic, Ugreater_equal, key, Operand(t1));
4053 
4054  // Handle both smis and HeapNumbers in the fast path. Go to the
4055  // runtime for all other kinds of values.
4056  // a3: external array.
4057 
4058  if (elements_kind == EXTERNAL_PIXEL_ELEMENTS) {
4059  // Double to pixel conversion is only implemented in the runtime for now.
4060  __ JumpIfNotSmi(value, &slow);
4061  } else {
4062  __ JumpIfNotSmi(value, &check_heap_number);
4063  }
4064  __ SmiUntag(t1, value);
4066 
4067  // a3: base pointer of external storage.
4068  // t1: value (integer).
4069 
4070  switch (elements_kind) {
4071  case EXTERNAL_PIXEL_ELEMENTS: {
4072  // Clamp the value to [0..255].
4073  // v0 is used as a scratch register here.
4074  Label done;
4075  __ li(v0, Operand(255));
4076  // Normal branch: nop in delay slot.
4077  __ Branch(&done, gt, t1, Operand(v0));
4078  // Use delay slot in this branch.
4079  __ Branch(USE_DELAY_SLOT, &done, lt, t1, Operand(zero_reg));
4080  __ mov(v0, zero_reg); // In delay slot.
4081  __ mov(v0, t1); // Value is in range 0..255.
4082  __ bind(&done);
4083  __ mov(t1, v0);
4084 
4085  __ srl(t8, key, 1);
4086  __ addu(t8, a3, t8);
4087  __ sb(t1, MemOperand(t8, 0));
4088  }
4089  break;
4092  __ srl(t8, key, 1);
4093  __ addu(t8, a3, t8);
4094  __ sb(t1, MemOperand(t8, 0));
4095  break;
4098  __ addu(t8, a3, key);
4099  __ sh(t1, MemOperand(t8, 0));
4100  break;
4101  case EXTERNAL_INT_ELEMENTS:
4103  __ sll(t8, key, 1);
4104  __ addu(t8, a3, t8);
4105  __ sw(t1, MemOperand(t8, 0));
4106  break;
4108  // Perform int-to-float conversion and store to memory.
4109  __ SmiUntag(t0, key);
4110  StoreIntAsFloat(masm, a3, t0, t1, t2, t3, t4);
4111  break;
4113  __ sll(t8, key, 2);
4114  __ addu(a3, a3, t8);
4115  // a3: effective address of the double element
4118  destination = FloatingPointHelper::kFPURegisters;
4119  } else {
4121  }
4123  masm, t1, destination,
4124  f0, t2, t3, // These are: double_dst, dst1, dst2.
4125  t0, f2); // These are: scratch2, single_scratch.
4126  if (destination == FloatingPointHelper::kFPURegisters) {
4127  CpuFeatures::Scope scope(FPU);
4128  __ sdc1(f0, MemOperand(a3, 0));
4129  } else {
4130  __ sw(t2, MemOperand(a3, 0));
4131  __ sw(t3, MemOperand(a3, Register::kSizeInBytes));
4132  }
4133  break;
4134  case FAST_ELEMENTS:
4135  case FAST_SMI_ELEMENTS:
4136  case FAST_DOUBLE_ELEMENTS:
4137  case FAST_HOLEY_ELEMENTS:
4140  case DICTIONARY_ELEMENTS:
4142  UNREACHABLE();
4143  break;
4144  }
4145 
4146  // Entry registers are intact, a0 holds the value which is the return value.
4147  __ mov(v0, a0);
4148  __ Ret();
4149 
4150  if (elements_kind != EXTERNAL_PIXEL_ELEMENTS) {
4151  // a3: external array.
4152  __ bind(&check_heap_number);
4153  __ GetObjectType(value, t1, t2);
4154  __ Branch(&slow, ne, t2, Operand(HEAP_NUMBER_TYPE));
4155 
4157 
4158  // a3: base pointer of external storage.
4159 
4160  // The WebGL specification leaves the behavior of storing NaN and
4161  // +/-Infinity into integer arrays basically undefined. For more
4162  // reproducible behavior, convert these to zero.
4163 
4165  CpuFeatures::Scope scope(FPU);
4166 
4168 
4169  if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
4170  __ cvt_s_d(f0, f0);
4171  __ sll(t8, key, 1);
4172  __ addu(t8, a3, t8);
4173  __ swc1(f0, MemOperand(t8, 0));
4174  } else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
4175  __ sll(t8, key, 2);
4176  __ addu(t8, a3, t8);
4177  __ sdc1(f0, MemOperand(t8, 0));
4178  } else {
4179  __ EmitECMATruncate(t3, f0, f2, t2, t1, t5);
4180 
4181  switch (elements_kind) {
4184  __ srl(t8, key, 1);
4185  __ addu(t8, a3, t8);
4186  __ sb(t3, MemOperand(t8, 0));
4187  break;
4190  __ addu(t8, a3, key);
4191  __ sh(t3, MemOperand(t8, 0));
4192  break;
4193  case EXTERNAL_INT_ELEMENTS:
4195  __ sll(t8, key, 1);
4196  __ addu(t8, a3, t8);
4197  __ sw(t3, MemOperand(t8, 0));
4198  break;
4202  case FAST_ELEMENTS:
4203  case FAST_SMI_ELEMENTS:
4204  case FAST_DOUBLE_ELEMENTS:
4205  case FAST_HOLEY_ELEMENTS:
4208  case DICTIONARY_ELEMENTS:
4210  UNREACHABLE();
4211  break;
4212  }
4213  }
4214 
4215  // Entry registers are intact, a0 holds the value
4216  // which is the return value.
4217  __ mov(v0, a0);
4218  __ Ret();
4219  } else {
4220  // FPU is not available, do manual conversions.
4221 
4224 
4225  if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
4226  Label done, nan_or_infinity_or_zero;
4227  static const int kMantissaInHiWordShift =
4229 
4230  static const int kMantissaInLoWordShift =
4231  kBitsPerInt - kMantissaInHiWordShift;
4232 
4233  // Test for all special exponent values: zeros, subnormal numbers, NaNs
4234  // and infinities. All these should be converted to 0.
4235  __ li(t5, HeapNumber::kExponentMask);
4236  __ and_(t6, t3, t5);
4237  __ Branch(&nan_or_infinity_or_zero, eq, t6, Operand(zero_reg));
4238 
4239  __ xor_(t1, t6, t5);
4240  __ li(t2, kBinary32ExponentMask);
4241  __ Movz(t6, t2, t1); // Only if t6 is equal to t5.
4242  __ Branch(&nan_or_infinity_or_zero, eq, t1, Operand(zero_reg));
4243 
4244  // Rebias exponent.
4245  __ srl(t6, t6, HeapNumber::kExponentShift);
4246  __ Addu(t6,
4247  t6,
4249 
4250  __ li(t1, Operand(kBinary32MaxExponent));
4251  __ Slt(t1, t1, t6);
4252  __ And(t2, t3, Operand(HeapNumber::kSignMask));
4253  __ Or(t2, t2, Operand(kBinary32ExponentMask));
4254  __ Movn(t3, t2, t1); // Only if t6 is gt kBinary32MaxExponent.
4255  __ Branch(&done, gt, t6, Operand(kBinary32MaxExponent));
4256 
4257  __ Slt(t1, t6, Operand(kBinary32MinExponent));
4258  __ And(t2, t3, Operand(HeapNumber::kSignMask));
4259  __ Movn(t3, t2, t1); // Only if t6 is lt kBinary32MinExponent.
4260  __ Branch(&done, lt, t6, Operand(kBinary32MinExponent));
4261 
4262  __ And(t7, t3, Operand(HeapNumber::kSignMask));
4263  __ And(t3, t3, Operand(HeapNumber::kMantissaMask));
4264  __ sll(t3, t3, kMantissaInHiWordShift);
4265  __ or_(t7, t7, t3);
4266  __ srl(t4, t4, kMantissaInLoWordShift);
4267  __ or_(t7, t7, t4);
4268  __ sll(t6, t6, kBinary32ExponentShift);
4269  __ or_(t3, t7, t6);
4270 
4271  __ bind(&done);
4272  __ sll(t9, key, 1);
4273  __ addu(t9, a3, t9);
4274  __ sw(t3, MemOperand(t9, 0));
4275 
4276  // Entry registers are intact, a0 holds the value which is the return
4277  // value.
4278  __ mov(v0, a0);
4279  __ Ret();
4280 
4281  __ bind(&nan_or_infinity_or_zero);
4282  __ And(t7, t3, Operand(HeapNumber::kSignMask));
4283  __ And(t3, t3, Operand(HeapNumber::kMantissaMask));
4284  __ or_(t6, t6, t7);
4285  __ sll(t3, t3, kMantissaInHiWordShift);
4286  __ or_(t6, t6, t3);
4287  __ srl(t4, t4, kMantissaInLoWordShift);
4288  __ or_(t3, t6, t4);
4289  __ Branch(&done);
4290  } else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
4291  __ sll(t8, key, 2);
4292  __ addu(t8, a3, t8);
4293  // t8: effective address of destination element.
4294  __ sw(t4, MemOperand(t8, 0));
4295  __ sw(t3, MemOperand(t8, Register::kSizeInBytes));
4296  __ mov(v0, a0);
4297  __ Ret();
4298  } else {
4299  bool is_signed_type = IsElementTypeSigned(elements_kind);
4300  int meaningfull_bits = is_signed_type ? (kBitsPerInt - 1) : kBitsPerInt;
4301  int32_t min_value = is_signed_type ? 0x80000000 : 0x00000000;
4302 
4303  Label done, sign;
4304 
4305  // Test for all special exponent values: zeros, subnormal numbers, NaNs
4306  // and infinities. All these should be converted to 0.
4307  __ li(t5, HeapNumber::kExponentMask);
4308  __ and_(t6, t3, t5);
4309  __ Movz(t3, zero_reg, t6); // Only if t6 is equal to zero.
4310  __ Branch(&done, eq, t6, Operand(zero_reg));
4311 
4312  __ xor_(t2, t6, t5);
4313  __ Movz(t3, zero_reg, t2); // Only if t6 is equal to t5.
4314  __ Branch(&done, eq, t6, Operand(t5));
4315 
4316  // Unbias exponent.
4317  __ srl(t6, t6, HeapNumber::kExponentShift);
4318  __ Subu(t6, t6, Operand(HeapNumber::kExponentBias));
4319  // If exponent is negative then result is 0.
4320  __ slt(t2, t6, zero_reg);
4321  __ Movn(t3, zero_reg, t2); // Only if exponent is negative.
4322  __ Branch(&done, lt, t6, Operand(zero_reg));
4323 
4324  // If exponent is too big then result is minimal value.
4325  __ slti(t1, t6, meaningfull_bits - 1);
4326  __ li(t2, min_value);
4327  __ Movz(t3, t2, t1); // Only if t6 is ge meaningfull_bits - 1.
4328  __ Branch(&done, ge, t6, Operand(meaningfull_bits - 1));
4329 
4330  __ And(t5, t3, Operand(HeapNumber::kSignMask));
4331  __ And(t3, t3, Operand(HeapNumber::kMantissaMask));
4332  __ Or(t3, t3, Operand(1u << HeapNumber::kMantissaBitsInTopWord));
4333 
4334  __ li(t9, HeapNumber::kMantissaBitsInTopWord);
4335  __ subu(t6, t9, t6);
4336  __ slt(t1, t6, zero_reg);
4337  __ srlv(t2, t3, t6);
4338  __ Movz(t3, t2, t1); // Only if t6 is positive.
4339  __ Branch(&sign, ge, t6, Operand(zero_reg));
4340 
4341  __ subu(t6, zero_reg, t6);
4342  __ sllv(t3, t3, t6);
4343  __ li(t9, meaningfull_bits);
4344  __ subu(t6, t9, t6);
4345  __ srlv(t4, t4, t6);
4346  __ or_(t3, t3, t4);
4347 
4348  __ bind(&sign);
4349  __ subu(t2, t3, zero_reg);
4350  __ Movz(t3, t2, t5); // Only if t5 is zero.
4351 
4352  __ bind(&done);
4353 
4354  // Result is in t3.
4355  // This switch block should be exactly the same as above (FPU mode).
4356  switch (elements_kind) {
4359  __ srl(t8, key, 1);
4360  __ addu(t8, a3, t8);
4361  __ sb(t3, MemOperand(t8, 0));
4362  break;
4365  __ addu(t8, a3, key);
4366  __ sh(t3, MemOperand(t8, 0));
4367  break;
4368  case EXTERNAL_INT_ELEMENTS:
4370  __ sll(t8, key, 1);
4371  __ addu(t8, a3, t8);
4372  __ sw(t3, MemOperand(t8, 0));
4373  break;
4377  case FAST_ELEMENTS:
4378  case FAST_SMI_ELEMENTS:
4379  case FAST_DOUBLE_ELEMENTS:
4380  case FAST_HOLEY_ELEMENTS:
4383  case DICTIONARY_ELEMENTS:
4385  UNREACHABLE();
4386  break;
4387  }
4388  }
4389  }
4390  }
4391 
4392  // Slow case, key and receiver still in a0 and a1.
4393  __ bind(&slow);
4394  __ IncrementCounter(
4395  masm->isolate()->counters()->keyed_load_external_array_slow(),
4396  1, a2, a3);
4397  // Entry registers are intact.
4398  // ---------- S t a t e --------------
4399  // -- ra : return address
4400  // -- a0 : key
4401  // -- a1 : receiver
4402  // -----------------------------------
4403  Handle<Code> slow_ic =
4404  masm->isolate()->builtins()->KeyedStoreIC_Slow();
4405  __ Jump(slow_ic, RelocInfo::CODE_TARGET);
4406 
4407  // Miss case, call the runtime.
4408  __ bind(&miss_force_generic);
4409 
4410  // ---------- S t a t e --------------
4411  // -- ra : return address
4412  // -- a0 : key
4413  // -- a1 : receiver
4414  // -----------------------------------
4415 
4416  Handle<Code> miss_ic =
4417  masm->isolate()->builtins()->KeyedStoreIC_MissForceGeneric();
4418  __ Jump(miss_ic, RelocInfo::CODE_TARGET);
4419 }
4420 
4421 
4422 void KeyedLoadStubCompiler::GenerateLoadFastElement(MacroAssembler* masm) {
4423  // ----------- S t a t e -------------
4424  // -- ra : return address
4425  // -- a0 : key
4426  // -- a1 : receiver
4427  // -----------------------------------
4428  Label miss_force_generic;
4429 
4430  // This stub is meant to be tail-jumped to, the receiver must already
4431  // have been verified by the caller to not be a smi.
4432 
4433  // Check that the key is a smi or a heap number convertible to a smi.
4434  GenerateSmiKeyCheck(masm, a0, t0, t1, f2, &miss_force_generic);
4435 
4436  // Get the elements array.
4438  __ AssertFastElements(a2);
4439 
4440  // Check that the key is within bounds.
4442  __ Branch(USE_DELAY_SLOT, &miss_force_generic, hs, a0, Operand(a3));
4443 
4444  // Load the result and make sure it's not the hole.
4445  __ Addu(a3, a2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
4447  __ sll(t0, a0, kPointerSizeLog2 - kSmiTagSize);
4448  __ Addu(t0, t0, a3);
4449  __ lw(t0, MemOperand(t0));
4450  __ LoadRoot(t1, Heap::kTheHoleValueRootIndex);
4451  __ Branch(&miss_force_generic, eq, t0, Operand(t1));
4452  __ Ret(USE_DELAY_SLOT);
4453  __ mov(v0, t0);
4454 
4455  __ bind(&miss_force_generic);
4456  Handle<Code> stub =
4457  masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric();
4458  __ Jump(stub, RelocInfo::CODE_TARGET);
4459 }
4460 
4461 
4463  MacroAssembler* masm) {
4464  // ----------- S t a t e -------------
4465  // -- ra : return address
4466  // -- a0 : key
4467  // -- a1 : receiver
4468  // -----------------------------------
4469  Label miss_force_generic, slow_allocate_heapnumber;
4470 
4471  Register key_reg = a0;
4472  Register receiver_reg = a1;
4473  Register elements_reg = a2;
4474  Register heap_number_reg = a2;
4475  Register indexed_double_offset = a3;
4476  Register scratch = t0;
4477  Register scratch2 = t1;
4478  Register scratch3 = t2;
4479  Register heap_number_map = t3;
4480 
4481  // This stub is meant to be tail-jumped to, the receiver must already
4482  // have been verified by the caller to not be a smi.
4483 
4484  // Check that the key is a smi or a heap number convertible to a smi.
4485  GenerateSmiKeyCheck(masm, key_reg, t0, t1, f2, &miss_force_generic);
4486 
4487  // Get the elements array.
4488  __ lw(elements_reg,
4490 
4491  // Check that the key is within bounds.
4492  __ lw(scratch, FieldMemOperand(elements_reg, FixedArray::kLengthOffset));
4493  __ Branch(&miss_force_generic, hs, key_reg, Operand(scratch));
4494 
4495  // Load the upper word of the double in the fixed array and test for NaN.
4496  __ sll(scratch2, key_reg, kDoubleSizeLog2 - kSmiTagSize);
4497  __ Addu(indexed_double_offset, elements_reg, Operand(scratch2));
4498  uint32_t upper_32_offset = FixedArray::kHeaderSize + sizeof(kHoleNanLower32);
4499  __ lw(scratch, FieldMemOperand(indexed_double_offset, upper_32_offset));
4500  __ Branch(&miss_force_generic, eq, scratch, Operand(kHoleNanUpper32));
4501 
4502  // Non-NaN. Allocate a new heap number and copy the double value into it.
4503  __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
4504  __ AllocateHeapNumber(heap_number_reg, scratch2, scratch3,
4505  heap_number_map, &slow_allocate_heapnumber);
4506 
4507  // Don't need to reload the upper 32 bits of the double, it's already in
4508  // scratch.
4509  __ sw(scratch, FieldMemOperand(heap_number_reg,
4511  __ lw(scratch, FieldMemOperand(indexed_double_offset,
4512  FixedArray::kHeaderSize));
4513  __ sw(scratch, FieldMemOperand(heap_number_reg,
4515 
4516  __ mov(v0, heap_number_reg);
4517  __ Ret();
4518 
4519  __ bind(&slow_allocate_heapnumber);
4520  Handle<Code> slow_ic =
4521  masm->isolate()->builtins()->KeyedLoadIC_Slow();
4522  __ Jump(slow_ic, RelocInfo::CODE_TARGET);
4523 
4524  __ bind(&miss_force_generic);
4525  Handle<Code> miss_ic =
4526  masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric();
4527  __ Jump(miss_ic, RelocInfo::CODE_TARGET);
4528 }
4529 
4530 
4532  MacroAssembler* masm,
4533  bool is_js_array,
4534  ElementsKind elements_kind,
4535  KeyedAccessGrowMode grow_mode) {
4536  // ----------- S t a t e -------------
4537  // -- a0 : value
4538  // -- a1 : key
4539  // -- a2 : receiver
4540  // -- ra : return address
4541  // -- a3 : scratch
4542  // -- a4 : scratch (elements)
4543  // -----------------------------------
4544  Label miss_force_generic, transition_elements_kind, grow, slow;
4545  Label finish_store, check_capacity;
4546 
4547  Register value_reg = a0;
4548  Register key_reg = a1;
4549  Register receiver_reg = a2;
4550  Register scratch = t0;
4551  Register elements_reg = a3;
4552  Register length_reg = t1;
4553  Register scratch2 = t2;
4554 
4555  // This stub is meant to be tail-jumped to, the receiver must already
4556  // have been verified by the caller to not be a smi.
4557 
4558  // Check that the key is a smi or a heap number convertible to a smi.
4559  GenerateSmiKeyCheck(masm, key_reg, t0, t1, f2, &miss_force_generic);
4560 
4561  if (IsFastSmiElementsKind(elements_kind)) {
4562  __ JumpIfNotSmi(value_reg, &transition_elements_kind);
4563  }
4564 
4565  // Check that the key is within bounds.
4566  __ lw(elements_reg,
4568  if (is_js_array) {
4569  __ lw(scratch, FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
4570  } else {
4571  __ lw(scratch, FieldMemOperand(elements_reg, FixedArray::kLengthOffset));
4572  }
4573  // Compare smis.
4574  if (is_js_array && grow_mode == ALLOW_JSARRAY_GROWTH) {
4575  __ Branch(&grow, hs, key_reg, Operand(scratch));
4576  } else {
4577  __ Branch(&miss_force_generic, hs, key_reg, Operand(scratch));
4578  }
4579 
4580  // Make sure elements is a fast element array, not 'cow'.
4581  __ CheckMap(elements_reg,
4582  scratch,
4583  Heap::kFixedArrayMapRootIndex,
4584  &miss_force_generic,
4586 
4587  __ bind(&finish_store);
4588 
4589  if (IsFastSmiElementsKind(elements_kind)) {
4590  __ Addu(scratch,
4591  elements_reg,
4592  Operand(FixedArray::kHeaderSize - kHeapObjectTag));
4594  __ sll(scratch2, key_reg, kPointerSizeLog2 - kSmiTagSize);
4595  __ Addu(scratch, scratch, scratch2);
4596  __ sw(value_reg, MemOperand(scratch));
4597  } else {
4598  ASSERT(IsFastObjectElementsKind(elements_kind));
4599  __ Addu(scratch,
4600  elements_reg,
4601  Operand(FixedArray::kHeaderSize - kHeapObjectTag));
4603  __ sll(scratch2, key_reg, kPointerSizeLog2 - kSmiTagSize);
4604  __ Addu(scratch, scratch, scratch2);
4605  __ sw(value_reg, MemOperand(scratch));
4606  __ mov(receiver_reg, value_reg);
4607  __ RecordWrite(elements_reg, // Object.
4608  scratch, // Address.
4609  receiver_reg, // Value.
4611  kDontSaveFPRegs);
4612  }
4613  // value_reg (a0) is preserved.
4614  // Done.
4615  __ Ret();
4616 
4617  __ bind(&miss_force_generic);
4618  Handle<Code> ic =
4619  masm->isolate()->builtins()->KeyedStoreIC_MissForceGeneric();
4620  __ Jump(ic, RelocInfo::CODE_TARGET);
4621 
4622  __ bind(&transition_elements_kind);
4623  Handle<Code> ic_miss = masm->isolate()->builtins()->KeyedStoreIC_Miss();
4624  __ Jump(ic_miss, RelocInfo::CODE_TARGET);
4625 
4626  if (is_js_array && grow_mode == ALLOW_JSARRAY_GROWTH) {
4627  // Grow the array by a single element if possible.
4628  __ bind(&grow);
4629 
4630  // Make sure the array is only growing by a single element, anything else
4631  // must be handled by the runtime.
4632  __ Branch(&miss_force_generic, ne, key_reg, Operand(scratch));
4633 
4634  // Check for the empty array, and preallocate a small backing store if
4635  // possible.
4636  __ lw(length_reg,
4637  FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
4638  __ lw(elements_reg,
4640  __ LoadRoot(at, Heap::kEmptyFixedArrayRootIndex);
4641  __ Branch(&check_capacity, ne, elements_reg, Operand(at));
4642 
4644  __ AllocateInNewSpace(size, elements_reg, scratch, scratch2, &slow,
4645  TAG_OBJECT);
4646 
4647  __ LoadRoot(scratch, Heap::kFixedArrayMapRootIndex);
4648  __ sw(scratch, FieldMemOperand(elements_reg, JSObject::kMapOffset));
4649  __ li(scratch, Operand(Smi::FromInt(JSArray::kPreallocatedArrayElements)));
4650  __ sw(scratch, FieldMemOperand(elements_reg, FixedArray::kLengthOffset));
4651  __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
4652  for (int i = 1; i < JSArray::kPreallocatedArrayElements; ++i) {
4653  __ sw(scratch, FieldMemOperand(elements_reg, FixedArray::SizeFor(i)));
4654  }
4655 
4656  // Store the element at index zero.
4657  __ sw(value_reg, FieldMemOperand(elements_reg, FixedArray::SizeFor(0)));
4658 
4659  // Install the new backing store in the JSArray.
4660  __ sw(elements_reg,
4662  __ RecordWriteField(receiver_reg, JSObject::kElementsOffset, elements_reg,
4665 
4666  // Increment the length of the array.
4667  __ li(length_reg, Operand(Smi::FromInt(1)));
4668  __ sw(length_reg, FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
4669  __ Ret();
4670 
4671  __ bind(&check_capacity);
4672  // Check for cow elements, in general they are not handled by this stub
4673  __ CheckMap(elements_reg,
4674  scratch,
4675  Heap::kFixedCOWArrayMapRootIndex,
4676  &miss_force_generic,
4678 
4679  __ lw(scratch, FieldMemOperand(elements_reg, FixedArray::kLengthOffset));
4680  __ Branch(&slow, hs, length_reg, Operand(scratch));
4681 
4682  // Grow the array and finish the store.
4683  __ Addu(length_reg, length_reg, Operand(Smi::FromInt(1)));
4684  __ sw(length_reg, FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
4685  __ jmp(&finish_store);
4686 
4687  __ bind(&slow);
4688  Handle<Code> ic_slow = masm->isolate()->builtins()->KeyedStoreIC_Slow();
4689  __ Jump(ic_slow, RelocInfo::CODE_TARGET);
4690  }
4691 }
4692 
4693 
4695  MacroAssembler* masm,
4696  bool is_js_array,
4697  KeyedAccessGrowMode grow_mode) {
4698  // ----------- S t a t e -------------
4699  // -- a0 : value
4700  // -- a1 : key
4701  // -- a2 : receiver
4702  // -- ra : return address
4703  // -- a3 : scratch
4704  // -- t0 : scratch (elements_reg)
4705  // -- t1 : scratch (mantissa_reg)
4706  // -- t2 : scratch (exponent_reg)
4707  // -- t3 : scratch4
4708  // -----------------------------------
4709  Label miss_force_generic, transition_elements_kind, grow, slow;
4710  Label finish_store, check_capacity;
4711 
4712  Register value_reg = a0;
4713  Register key_reg = a1;
4714  Register receiver_reg = a2;
4715  Register elements_reg = a3;
4716  Register scratch1 = t0;
4717  Register scratch2 = t1;
4718  Register scratch3 = t2;
4719  Register scratch4 = t3;
4720  Register length_reg = t3;
4721 
4722  // This stub is meant to be tail-jumped to, the receiver must already
4723  // have been verified by the caller to not be a smi.
4724 
4725  // Check that the key is a smi or a heap number convertible to a smi.
4726  GenerateSmiKeyCheck(masm, key_reg, t0, t1, f2, &miss_force_generic);
4727 
4728  __ lw(elements_reg,
4730 
4731  // Check that the key is within bounds.
4732  if (is_js_array) {
4733  __ lw(scratch1, FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
4734  } else {
4735  __ lw(scratch1,
4737  }
4738  // Compare smis, unsigned compare catches both negative and out-of-bound
4739  // indexes.
4740  if (grow_mode == ALLOW_JSARRAY_GROWTH) {
4741  __ Branch(&grow, hs, key_reg, Operand(scratch1));
4742  } else {
4743  __ Branch(&miss_force_generic, hs, key_reg, Operand(scratch1));
4744  }
4745 
4746  __ bind(&finish_store);
4747 
4748  __ StoreNumberToDoubleElements(value_reg,
4749  key_reg,
4750  receiver_reg,
4751  // All registers after this are overwritten.
4752  elements_reg,
4753  scratch1,
4754  scratch2,
4755  scratch3,
4756  scratch4,
4757  &transition_elements_kind);
4758 
4759  __ Ret(USE_DELAY_SLOT);
4760  __ mov(v0, value_reg); // In delay slot.
4761 
4762  // Handle store cache miss, replacing the ic with the generic stub.
4763  __ bind(&miss_force_generic);
4764  Handle<Code> ic =
4765  masm->isolate()->builtins()->KeyedStoreIC_MissForceGeneric();
4766  __ Jump(ic, RelocInfo::CODE_TARGET);
4767 
4768  __ bind(&transition_elements_kind);
4769  Handle<Code> ic_miss = masm->isolate()->builtins()->KeyedStoreIC_Miss();
4770  __ Jump(ic_miss, RelocInfo::CODE_TARGET);
4771 
4772  if (is_js_array && grow_mode == ALLOW_JSARRAY_GROWTH) {
4773  // Grow the array by a single element if possible.
4774  __ bind(&grow);
4775 
4776  // Make sure the array is only growing by a single element, anything else
4777  // must be handled by the runtime.
4778  __ Branch(&miss_force_generic, ne, key_reg, Operand(scratch1));
4779 
4780  // Transition on values that can't be stored in a FixedDoubleArray.
4781  Label value_is_smi;
4782  __ JumpIfSmi(value_reg, &value_is_smi);
4783  __ lw(scratch1, FieldMemOperand(value_reg, HeapObject::kMapOffset));
4784  __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
4785  __ Branch(&transition_elements_kind, ne, scratch1, Operand(at));
4786  __ bind(&value_is_smi);
4787 
4788  // Check for the empty array, and preallocate a small backing store if
4789  // possible.
4790  __ lw(length_reg,
4791  FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
4792  __ lw(elements_reg,
4794  __ LoadRoot(at, Heap::kEmptyFixedArrayRootIndex);
4795  __ Branch(&check_capacity, ne, elements_reg, Operand(at));
4796 
4797  int size = FixedDoubleArray::SizeFor(JSArray::kPreallocatedArrayElements);
4798  __ AllocateInNewSpace(size, elements_reg, scratch1, scratch2, &slow,
4799  TAG_OBJECT);
4800 
4801  // Initialize the new FixedDoubleArray. Leave elements unitialized for
4802  // efficiency, they are guaranteed to be initialized before use.
4803  __ LoadRoot(scratch1, Heap::kFixedDoubleArrayMapRootIndex);
4804  __ sw(scratch1, FieldMemOperand(elements_reg, JSObject::kMapOffset));
4805  __ li(scratch1, Operand(Smi::FromInt(JSArray::kPreallocatedArrayElements)));
4806  __ sw(scratch1,
4808 
4809  // Install the new backing store in the JSArray.
4810  __ sw(elements_reg,
4812  __ RecordWriteField(receiver_reg, JSObject::kElementsOffset, elements_reg,
4815 
4816  // Increment the length of the array.
4817  __ li(length_reg, Operand(Smi::FromInt(1)));
4818  __ sw(length_reg, FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
4819  __ lw(elements_reg,
4821  __ jmp(&finish_store);
4822 
4823  __ bind(&check_capacity);
4824  // Make sure that the backing store can hold additional elements.
4825  __ lw(scratch1,
4827  __ Branch(&slow, hs, length_reg, Operand(scratch1));
4828 
4829  // Grow the array and finish the store.
4830  __ Addu(length_reg, length_reg, Operand(Smi::FromInt(1)));
4831  __ sw(length_reg, FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
4832  __ jmp(&finish_store);
4833 
4834  __ bind(&slow);
4835  Handle<Code> ic_slow = masm->isolate()->builtins()->KeyedStoreIC_Slow();
4836  __ Jump(ic_slow, RelocInfo::CODE_TARGET);
4837  }
4838 }
4839 
4840 
4841 #undef __
4842 
4843 } } // namespace v8::internal
4844 
4845 #endif // V8_TARGET_ARCH_MIPS
byte * Address
Definition: globals.h:157
static const int kBitFieldOffset
Definition: objects.h:5160
Handle< Code > CompileLoadFunctionPrototype(Handle< String > name)
Handle< Code > CompileLoadCallback(Handle< String > name, Handle< JSObject > object, Handle< JSObject > holder, Handle< AccessorInfo > callback)
const intptr_t kSmiTagMask
Definition: v8.h:4016
static void GenerateStoreViaSetter(MacroAssembler *masm, Handle< JSFunction > setter)
static const int kCodeEntryOffset
Definition: objects.h:6182
Handle< Code > CompileStoreField(Handle< JSObject > object, int index, Handle< Map > transition, Handle< String > name)
static const int kPrototypeOrInitialMapOffset
Definition: objects.h:6183
static int SlotOffset(int index)
Definition: contexts.h:425
static const int kDataOffset
Definition: objects.h:8409
const int kBinary32ExponentShift
Definition: globals.h:250
Handle< Code > CompileLoadNonexistent(Handle< String > name, Handle< JSObject > object, Handle< JSObject > last)
const int kDoubleSizeLog2
Definition: globals.h:222
Handle< Code > CompileStoreElement(Handle< Map > receiver_map)
void GenerateProbe(MacroAssembler *masm, Code::Flags flags, Register receiver, Register name, Register scratch, Register extra, Register extra2=no_reg, Register extra3=no_reg)
const Register cp
static const uint32_t kExponentMask
Definition: objects.h:1352
static const int kFlagsOffset
Definition: objects.h:4540
const uint32_t kBinary32MantissaMask
Definition: globals.h:245
const int kBinary32MaxExponent
Definition: globals.h:247
const FPURegister f0
static Smi * FromInt(int value)
Definition: objects-inl.h:981
bool IsFastObjectElementsKind(ElementsKind kind)
#define LOG(isolate, Call)
Definition: log.h:81
static void GenerateStoreExternalArray(MacroAssembler *masm, ElementsKind elements_kind)
const FPURegister f22
static const int kGlobalReceiverOffset
Definition: objects.h:6288
static void GenerateLoadFastDoubleElement(MacroAssembler *masm)
const int kBinary32MantissaBits
Definition: globals.h:249
const uint32_t kFCSRExceptionFlagMask
static StubType ExtractTypeFromFlags(Flags flags)
Definition: objects-inl.h:3538
static const int kExponentBias
Definition: objects.h:1356
int int32_t
Definition: unicode.cc:47
static bool IsSupported(CpuFeature f)
static const int kExternalPointerOffset
Definition: objects.h:3741
static const int kHasNamedInterceptor
Definition: objects.h:5169
static const int kIsAccessCheckNeeded
Definition: objects.h:5173
List< Handle< Map > > MapHandleList
Definition: list.h:198
#define ASSERT(condition)
Definition: checks.h:270
const int kPointerSizeLog2
Definition: globals.h:232
static const int kInstanceSizeOffset
Definition: objects.h:5147
static const int kDebugInfoOffset
Definition: objects.h:5805
static const int kContextOffset
Definition: objects.h:6187
Handle< Code > CompileLoadField(Handle< JSObject > object, Handle< JSObject > holder, int index, Handle< String > name)
Handle< Code > CompileStoreInterceptor(Handle< JSObject > object, Handle< String > name)
static void GenerateNegativeLookup(MacroAssembler *masm, Label *miss, Label *done, Register receiver, Register properties, Handle< String > name, Register scratch0)
Handle< Code > CompileStoreField(Handle< JSObject > object, int index, Handle< Map > transition, Handle< String > name)
static const int kHashFieldOffset
Definition: objects.h:7319
const Register sp
Handle< Code > CompileLoadInterceptor(Handle< JSObject > object, Handle< JSObject > holder, Handle< String > name)
#define UNREACHABLE()
Definition: checks.h:50
STATIC_ASSERT((FixedDoubleArray::kHeaderSize &kDoubleAlignmentMask)==0)
static const int kLengthOffset
Definition: objects.h:7318
Handle< Code > CompileCallGlobal(Handle< JSObject > object, Handle< GlobalObject > holder, Handle< JSGlobalPropertyCell > cell, Handle< JSFunction > function, Handle< String > name)
static const int kExponentShift
Definition: objects.h:1357
Handle< Code > CompileLoadField(Handle< String > name, Handle< JSObject > object, Handle< JSObject > holder, int index)
static const int kValueOffset
Definition: objects.h:1342
static void GenerateLoadViaGetter(MacroAssembler *masm, Handle< JSFunction > getter)
const uint32_t kHoleNanUpper32
Definition: v8globals.h:469
Handle< Code > CompileStoreGlobal(Handle< GlobalObject > object, Handle< JSGlobalPropertyCell > holder, Handle< String > name)
Handle< Code > CompileLoadViaGetter(Handle< String > name, Handle< JSObject > receiver, Handle< JSObject > holder, Handle< JSFunction > getter)
Handle< Code > CompileLoadConstant(Handle< JSObject > object, Handle< JSObject > holder, Handle< JSFunction > value, Handle< String > name)
Handle< Code > CompileLoadConstant(Handle< String > name, Handle< JSObject > object, Handle< JSObject > holder, Handle< JSFunction > value)
Handle< Code > CompileCallField(Handle< JSObject > object, Handle< JSObject > holder, int index, Handle< String > name)
const int kPointerSize
Definition: globals.h:220
static void GenerateStoreFastElement(MacroAssembler *masm, bool is_js_array, ElementsKind element_kind, KeyedAccessGrowMode grow_mode)
Handle< Code > CompileLoadStringLength(Handle< String > name)
const int kHeapObjectTag
Definition: v8.h:4009
const uint32_t kHoleNanLower32
Definition: v8globals.h:470
#define __
static bool decode(uint32_t value)
Definition: utils.h:273
static const int kPropertiesOffset
Definition: objects.h:2171
const int kBinary32MinExponent
Definition: globals.h:248
Handle< Code > CompileLoadGlobal(Handle< JSObject > object, Handle< GlobalObject > holder, Handle< JSGlobalPropertyCell > cell, Handle< String > name, bool is_dont_delete)
Handle< Code > CompileStoreCallback(Handle< String > name, Handle< JSObject > receiver, Handle< JSObject > holder, Handle< AccessorInfo > callback)
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination use dead code elimination trace on stack replacement optimize closures cache optimized code for closures functions with arguments object loop weight for representation inference allow uint32 values on optimize frames if they are used only in safe operations track parallel recompilation enable all profiler experiments number of stack frames inspected by the profiler call recompile stub directly when self optimizing trigger profiler ticks based on counting instead of timing weight back edges by jump distance for interrupt triggering percentage of ICs that must have type info to allow optimization watch_ic_patching retry_self_opt interrupt_at_exit extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of SAHF instruction if enable use of VFP3 instructions if available this implies enabling ARMv7 and VFP2 enable use of VFP2 instructions if available enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of MIPS FPU instructions if expose natives in global object expose gc extension number of stack frames to capture disable builtin natives files print a stack trace if an assertion failure occurs use random jit cookie to mask large constants trace lazy optimization use adaptive optimizations prepare for turning on always opt minimum length for automatic enable preparsing maximum number of optimization attempts before giving up cache prototype transitions automatically set the debug break flag when debugger commands are in the queue always cause a debug break before aborting maximum length of function source code printed in a stack trace max size of the new max size of the old max size of executable always perform global GCs print one trace line following each garbage collection do not print trace line after scavenger collection print more details following each garbage collection print amount of external allocated memory after each time it is adjusted flush code that we expect not to use again before full gc do incremental marking steps track object counts and memory usage use caching Perform compaction on every full GC Never perform compaction on full GC testing only Compact code space on full incremental collections Default seed for initializing random generator(0, the default, means to use system random).") DEFINE_bool(use_verbose_printer
bool IsFastSmiElementsKind(ElementsKind kind)
static void GenerateMapChangeElementsTransition(MacroAssembler *masm)
const int kBinary32ExponentBias
Definition: globals.h:246
static const int kDataOffset
Definition: objects.h:8539
static int SizeFor(int length)
Definition: objects.h:2434
static const int kElementsOffset
Definition: objects.h:2172
const FPURegister f2
const uint32_t kStringTag
Definition: objects.h:456
#define BASE_EMBEDDED
Definition: allocation.h:68
const int kBitsPerInt
Definition: globals.h:240
static void GenerateLoadDictionaryElement(MacroAssembler *masm)
static void GenerateLoadExternalArray(MacroAssembler *masm, ElementsKind elements_kind)
static const int kLengthOffset
Definition: objects.h:8332
static int SizeFor(int length)
Definition: objects.h:2353
static const int kHeaderSize
Definition: objects.h:2296
static const int kMapOffset
Definition: objects.h:1261
static const int kMantissaBitsInTopWord
Definition: objects.h:1358
const uint32_t kIsNotStringMask
Definition: objects.h:455
List< Handle< Code > > CodeHandleList
Definition: list.h:199
static const int kLengthOffset
Definition: objects.h:2295
static void ConvertIntToDouble(MacroAssembler *masm, Register int_scratch, Destination destination, DwVfpRegister double_dst, Register dst1, Register dst2, Register scratch2, SwVfpRegister single_scratch)
static void GeneratePositiveLookup(MacroAssembler *masm, Label *miss, Label *done, Register elements, Register name, Register r0, Register r1)
Handle< Code > CompileCallInterceptor(Handle< JSObject > object, Handle< JSObject > holder, Handle< String > name)
MemOperand FieldMemOperand(Register object, int offset)
static const int kDataOffset
Definition: objects.h:8563
const FPUControlRegister FCSR
static void GenerateLoadFastElement(MacroAssembler *masm)
static const uint32_t kSignMask
Definition: objects.h:1351
friend class Isolate
Definition: stub-cache.h:392
const int kSmiTagSize
Definition: v8.h:4015
static void GenerateStoreFastDoubleElement(MacroAssembler *masm, bool is_js_array, KeyedAccessGrowMode grow_mode)
static const int kHeaderSize
Definition: objects.h:4549
static Handle< T > null()
Definition: handles.h:86
#define ASSERT_EQ(v1, v2)
Definition: checks.h:271
Handle< Code > CompileLoadArrayLength(Handle< String > name)
static const uint32_t kMantissaMask
Definition: objects.h:1353
const int kSmiTag
Definition: v8.h:4014
Handle< Code > CompileCallConstant(Handle< Object > object, Handle< JSObject > holder, Handle< JSFunction > function, Handle< String > name, CheckType check)
static AccessorInfo * cast(Object *obj)
const uint32_t kBinary32ExponentMask
Definition: globals.h:244
const uint32_t kBinary32SignMask
Definition: globals.h:243
const int kHeapObjectTagSize
Definition: v8.h:4010
static const int kSizeInBytes
Definition: assembler-arm.h:75
static Handle< JSGlobalPropertyCell > EnsurePropertyCell(Handle< GlobalObject > global, Handle< String > name)
Definition: objects.cc:12019
static bool HasCustomCallGenerator(Handle< JSFunction > function)
Definition: stub-cache.cc:1444
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination use dead code elimination trace on stack replacement optimize closures cache optimized code for closures functions with arguments object loop weight for representation inference allow uint32 values on optimize frames if they are used only in safe operations track parallel recompilation enable all profiler experiments number of stack frames inspected by the profiler call recompile stub directly when self optimizing trigger profiler ticks based on counting instead of timing weight back edges by jump distance for interrupt triggering percentage of ICs that must have type info to allow optimization watch_ic_patching retry_self_opt interrupt_at_exit extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of SAHF instruction if enable use of VFP3 instructions if available this implies enabling ARMv7 and VFP2 enable use of VFP2 instructions if available enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of MIPS FPU instructions if NULL
Definition: flags.cc:301
static const int kPreallocatedArrayElements
Definition: objects.h:8329
static const int kPrototypeOffset
Definition: objects.h:5126
static const int kFlagsNotUsedInLookup
Definition: objects.h:4649
const int kInvalidProtoDepth
const Register no_reg
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination use dead code elimination trace on stack replacement optimize closures cache optimized code for closures functions with arguments object loop weight for representation inference allow uint32 values on optimize frames if they are used only in safe operations track parallel recompilation enable all profiler experiments number of stack frames inspected by the profiler call recompile stub directly when self optimizing trigger profiler ticks based on counting instead of timing weight back edges by jump distance for interrupt triggering percentage of ICs that must have type info to allow optimization watch_ic_patching retry_self_opt interrupt_at_exit extra verbose compilation tracing generate extra code(assertions) for debugging") DEFINE_bool(code_comments
static const int kValueOffset
Definition: objects.h:6385
const Register fp
static const int kNativeContextOffset
Definition: objects.h:6286
Handle< Code > CompileLoadCallback(Handle< String > name, Handle< JSObject > object, Handle< JSObject > holder, Handle< AccessorInfo > callback)
Handle< Code > CompileLoadPolymorphic(MapHandleList *receiver_maps, CodeHandleList *handler_ics)
Handle< Code > CompileLoadInterceptor(Handle< JSObject > object, Handle< JSObject > holder, Handle< String > name)
Handle< Code > CompileStorePolymorphic(MapHandleList *receiver_maps, CodeHandleList *handler_stubs, MapHandleList *transitioned_maps)
static const int kSharedFunctionInfoOffset
Definition: objects.h:6185
KeyedAccessGrowMode
Definition: objects.h:142
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination use dead code elimination trace on stack replacement optimize closures cache optimized code for closures functions with arguments object loop weight for representation inference allow uint32 values on optimize frames if they are used only in safe operations track parallel recompilation enable all profiler experiments number of stack frames inspected by the profiler call recompile stub directly when self optimizing trigger profiler ticks based on counting instead of timing weight back edges by jump distance for interrupt triggering percentage of ICs that must have type info to allow optimization watch_ic_patching retry_self_opt interrupt_at_exit extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of SAHF instruction if enable use of VFP3 instructions if available this implies enabling ARMv7 and VFP2 enable use of VFP2 instructions if available enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of MIPS FPU instructions if expose natives in global object expose gc extension number of stack frames to capture disable builtin natives files print a stack trace if an assertion failure occurs use random jit cookie to mask large constants trace lazy optimization use adaptive optimizations prepare for turning on always opt minimum length for automatic enable preparsing maximum number of optimization attempts before giving up cache prototype transitions automatically set the debug break flag when debugger commands are in the queue always cause a debug break before aborting maximum length of function source code printed in a stack trace max size of the new max size of the old max size of executable always perform global GCs print one trace line following each garbage collection do not print trace line after scavenger collection print more details following each garbage collection print amount of external allocated memory after each time it is adjusted flush code that we expect not to use again before full gc do incremental marking steps track object counts and memory usage use caching Perform compaction on every full GC Never perform compaction on full GC testing only Compact code space on full incremental collections Default seed for initializing random allows verbose printing trace parsing and preparsing Check icache flushes in ARM and MIPS simulator Stack alingment in bytes in print stack trace when throwing exceptions randomize hashes to avoid predictable hash Fixed seed to use to hash property activate a timer that switches between V8 threads testing_bool_flag float flag Seed used for threading test randomness A filename with extra code to be included in the Print usage including flags
Definition: flags.cc:495
static const int kMantissaBits
Definition: objects.h:1354
void check(i::Vector< const char > string)
static const int kExponentOffset
Definition: objects.h:1348
Handle< Code > CompileLoadElement(Handle< Map > receiver_map)
Handle< Code > CompileConstructStub(Handle< JSFunction > function)
Handle< Code > CompileStoreViaSetter(Handle< String > name, Handle< JSObject > receiver, Handle< JSObject > holder, Handle< JSFunction > setter)
static JSObject * cast(Object *obj)
static const int kInstanceTypeOffset
Definition: objects.h:5158
static const int kMantissaOffset
Definition: objects.h:1347
static JSFunction * cast(Object *obj)