v8  3.14.5(node0.10.28)
V8 is Google's open source JavaScript engine
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
stub-cache-arm.cc
Go to the documentation of this file.
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 
28 #include "v8.h"
29 
30 #if defined(V8_TARGET_ARCH_ARM)
31 
32 #include "ic-inl.h"
33 #include "codegen.h"
34 #include "stub-cache.h"
35 
36 namespace v8 {
37 namespace internal {
38 
39 #define __ ACCESS_MASM(masm)
40 
41 
42 static void ProbeTable(Isolate* isolate,
43  MacroAssembler* masm,
45  StubCache::Table table,
46  Register receiver,
47  Register name,
48  // Number of the cache entry, not scaled.
49  Register offset,
50  Register scratch,
51  Register scratch2,
52  Register offset_scratch) {
53  ExternalReference key_offset(isolate->stub_cache()->key_reference(table));
54  ExternalReference value_offset(isolate->stub_cache()->value_reference(table));
55  ExternalReference map_offset(isolate->stub_cache()->map_reference(table));
56 
57  uint32_t key_off_addr = reinterpret_cast<uint32_t>(key_offset.address());
58  uint32_t value_off_addr = reinterpret_cast<uint32_t>(value_offset.address());
59  uint32_t map_off_addr = reinterpret_cast<uint32_t>(map_offset.address());
60 
61  // Check the relative positions of the address fields.
62  ASSERT(value_off_addr > key_off_addr);
63  ASSERT((value_off_addr - key_off_addr) % 4 == 0);
64  ASSERT((value_off_addr - key_off_addr) < (256 * 4));
65  ASSERT(map_off_addr > key_off_addr);
66  ASSERT((map_off_addr - key_off_addr) % 4 == 0);
67  ASSERT((map_off_addr - key_off_addr) < (256 * 4));
68 
69  Label miss;
70  Register base_addr = scratch;
71  scratch = no_reg;
72 
73  // Multiply by 3 because there are 3 fields per entry (name, code, map).
74  __ add(offset_scratch, offset, Operand(offset, LSL, 1));
75 
76  // Calculate the base address of the entry.
77  __ mov(base_addr, Operand(key_offset));
78  __ add(base_addr, base_addr, Operand(offset_scratch, LSL, kPointerSizeLog2));
79 
80  // Check that the key in the entry matches the name.
81  __ ldr(ip, MemOperand(base_addr, 0));
82  __ cmp(name, ip);
83  __ b(ne, &miss);
84 
85  // Check the map matches.
86  __ ldr(ip, MemOperand(base_addr, map_off_addr - key_off_addr));
87  __ ldr(scratch2, FieldMemOperand(receiver, HeapObject::kMapOffset));
88  __ cmp(ip, scratch2);
89  __ b(ne, &miss);
90 
91  // Get the code entry from the cache.
92  Register code = scratch2;
93  scratch2 = no_reg;
94  __ ldr(code, MemOperand(base_addr, value_off_addr - key_off_addr));
95 
96  // Check that the flags match what we're looking for.
97  Register flags_reg = base_addr;
98  base_addr = no_reg;
99  __ ldr(flags_reg, FieldMemOperand(code, Code::kFlagsOffset));
100  // It's a nice optimization if this constant is encodable in the bic insn.
101 
102  uint32_t mask = Code::kFlagsNotUsedInLookup;
103  ASSERT(__ ImmediateFitsAddrMode1Instruction(mask));
104  __ bic(flags_reg, flags_reg, Operand(mask));
105  // Using cmn and the negative instead of cmp means we can use movw.
106  if (flags < 0) {
107  __ cmn(flags_reg, Operand(-flags));
108  } else {
109  __ cmp(flags_reg, Operand(flags));
110  }
111  __ b(ne, &miss);
112 
113 #ifdef DEBUG
114  if (FLAG_test_secondary_stub_cache && table == StubCache::kPrimary) {
115  __ jmp(&miss);
116  } else if (FLAG_test_primary_stub_cache && table == StubCache::kSecondary) {
117  __ jmp(&miss);
118  }
119 #endif
120 
121  // Jump to the first instruction in the code stub.
122  __ add(pc, code, Operand(Code::kHeaderSize - kHeapObjectTag));
123 
124  // Miss: fall through.
125  __ bind(&miss);
126 }
127 
128 
129 // Helper function used to check that the dictionary doesn't contain
130 // the property. This function may return false negatives, so miss_label
131 // must always call a backup property check that is complete.
132 // This function is safe to call if the receiver has fast properties.
133 // Name must be a symbol and receiver must be a heap object.
134 static void GenerateDictionaryNegativeLookup(MacroAssembler* masm,
135  Label* miss_label,
136  Register receiver,
137  Handle<String> name,
138  Register scratch0,
139  Register scratch1) {
140  ASSERT(name->IsSymbol());
141  Counters* counters = masm->isolate()->counters();
142  __ IncrementCounter(counters->negative_lookups(), 1, scratch0, scratch1);
143  __ IncrementCounter(counters->negative_lookups_miss(), 1, scratch0, scratch1);
144 
145  Label done;
146 
147  const int kInterceptorOrAccessCheckNeededMask =
149 
150  // Bail out if the receiver has a named interceptor or requires access checks.
151  Register map = scratch1;
152  __ ldr(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
153  __ ldrb(scratch0, FieldMemOperand(map, Map::kBitFieldOffset));
154  __ tst(scratch0, Operand(kInterceptorOrAccessCheckNeededMask));
155  __ b(ne, miss_label);
156 
157  // Check that receiver is a JSObject.
158  __ ldrb(scratch0, FieldMemOperand(map, Map::kInstanceTypeOffset));
159  __ cmp(scratch0, Operand(FIRST_SPEC_OBJECT_TYPE));
160  __ b(lt, miss_label);
161 
162  // Load properties array.
163  Register properties = scratch0;
164  __ ldr(properties, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
165  // Check that the properties array is a dictionary.
166  __ ldr(map, FieldMemOperand(properties, HeapObject::kMapOffset));
167  Register tmp = properties;
168  __ LoadRoot(tmp, Heap::kHashTableMapRootIndex);
169  __ cmp(map, tmp);
170  __ b(ne, miss_label);
171 
172  // Restore the temporarily used register.
173  __ ldr(properties, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
174 
175 
177  miss_label,
178  &done,
179  receiver,
180  properties,
181  name,
182  scratch1);
183  __ bind(&done);
184  __ DecrementCounter(counters->negative_lookups_miss(), 1, scratch0, scratch1);
185 }
186 
187 
188 void StubCache::GenerateProbe(MacroAssembler* masm,
189  Code::Flags flags,
190  Register receiver,
191  Register name,
192  Register scratch,
193  Register extra,
194  Register extra2,
195  Register extra3) {
196  Isolate* isolate = masm->isolate();
197  Label miss;
198 
199  // Make sure that code is valid. The multiplying code relies on the
200  // entry size being 12.
201  ASSERT(sizeof(Entry) == 12);
202 
203  // Make sure the flags does not name a specific type.
204  ASSERT(Code::ExtractTypeFromFlags(flags) == 0);
205 
206  // Make sure that there are no register conflicts.
207  ASSERT(!scratch.is(receiver));
208  ASSERT(!scratch.is(name));
209  ASSERT(!extra.is(receiver));
210  ASSERT(!extra.is(name));
211  ASSERT(!extra.is(scratch));
212  ASSERT(!extra2.is(receiver));
213  ASSERT(!extra2.is(name));
214  ASSERT(!extra2.is(scratch));
215  ASSERT(!extra2.is(extra));
216 
217  // Check scratch, extra and extra2 registers are valid.
218  ASSERT(!scratch.is(no_reg));
219  ASSERT(!extra.is(no_reg));
220  ASSERT(!extra2.is(no_reg));
221  ASSERT(!extra3.is(no_reg));
222 
223  Counters* counters = masm->isolate()->counters();
224  __ IncrementCounter(counters->megamorphic_stub_cache_probes(), 1,
225  extra2, extra3);
226 
227  // Check that the receiver isn't a smi.
228  __ JumpIfSmi(receiver, &miss);
229 
230  // Get the map of the receiver and compute the hash.
231  __ ldr(scratch, FieldMemOperand(name, String::kHashFieldOffset));
232  __ ldr(ip, FieldMemOperand(receiver, HeapObject::kMapOffset));
233  __ add(scratch, scratch, Operand(ip));
234  uint32_t mask = kPrimaryTableSize - 1;
235  // We shift out the last two bits because they are not part of the hash and
236  // they are always 01 for maps.
237  __ mov(scratch, Operand(scratch, LSR, kHeapObjectTagSize));
238  // Mask down the eor argument to the minimum to keep the immediate
239  // ARM-encodable.
240  __ eor(scratch, scratch, Operand((flags >> kHeapObjectTagSize) & mask));
241  // Prefer and_ to ubfx here because ubfx takes 2 cycles.
242  __ and_(scratch, scratch, Operand(mask));
243 
244  // Probe the primary table.
245  ProbeTable(isolate,
246  masm,
247  flags,
248  kPrimary,
249  receiver,
250  name,
251  scratch,
252  extra,
253  extra2,
254  extra3);
255 
256  // Primary miss: Compute hash for secondary probe.
257  __ sub(scratch, scratch, Operand(name, LSR, kHeapObjectTagSize));
258  uint32_t mask2 = kSecondaryTableSize - 1;
259  __ add(scratch, scratch, Operand((flags >> kHeapObjectTagSize) & mask2));
260  __ and_(scratch, scratch, Operand(mask2));
261 
262  // Probe the secondary table.
263  ProbeTable(isolate,
264  masm,
265  flags,
266  kSecondary,
267  receiver,
268  name,
269  scratch,
270  extra,
271  extra2,
272  extra3);
273 
274  // Cache miss: Fall-through and let caller handle the miss by
275  // entering the runtime system.
276  __ bind(&miss);
277  __ IncrementCounter(counters->megamorphic_stub_cache_misses(), 1,
278  extra2, extra3);
279 }
280 
281 
282 void StubCompiler::GenerateLoadGlobalFunctionPrototype(MacroAssembler* masm,
283  int index,
284  Register prototype) {
285  // Load the global or builtins object from the current context.
286  __ ldr(prototype,
288  // Load the native context from the global or builtins object.
289  __ ldr(prototype,
291  // Load the function from the native context.
292  __ ldr(prototype, MemOperand(prototype, Context::SlotOffset(index)));
293  // Load the initial map. The global functions all have initial maps.
294  __ ldr(prototype,
296  // Load the prototype from the initial map.
297  __ ldr(prototype, FieldMemOperand(prototype, Map::kPrototypeOffset));
298 }
299 
300 
301 void StubCompiler::GenerateDirectLoadGlobalFunctionPrototype(
302  MacroAssembler* masm,
303  int index,
304  Register prototype,
305  Label* miss) {
306  Isolate* isolate = masm->isolate();
307  // Check we're still in the same context.
308  __ ldr(prototype,
310  __ Move(ip, isolate->global_object());
311  __ cmp(prototype, ip);
312  __ b(ne, miss);
313  // Get the global function with the given index.
314  Handle<JSFunction> function(
315  JSFunction::cast(isolate->native_context()->get(index)));
316  // Load its initial map. The global functions all have initial maps.
317  __ Move(prototype, Handle<Map>(function->initial_map()));
318  // Load the prototype from the initial map.
319  __ ldr(prototype, FieldMemOperand(prototype, Map::kPrototypeOffset));
320 }
321 
322 
323 // Load a fast property out of a holder object (src). In-object properties
324 // are loaded directly otherwise the property is loaded from the properties
325 // fixed array.
326 void StubCompiler::GenerateFastPropertyLoad(MacroAssembler* masm,
327  Register dst,
328  Register src,
329  Handle<JSObject> holder,
330  int index) {
331  // Adjust for the number of properties stored in the holder.
332  index -= holder->map()->inobject_properties();
333  if (index < 0) {
334  // Get the property straight out of the holder.
335  int offset = holder->map()->instance_size() + (index * kPointerSize);
336  __ ldr(dst, FieldMemOperand(src, offset));
337  } else {
338  // Calculate the offset into the properties array.
339  int offset = index * kPointerSize + FixedArray::kHeaderSize;
341  __ ldr(dst, FieldMemOperand(dst, offset));
342  }
343 }
344 
345 
346 void StubCompiler::GenerateLoadArrayLength(MacroAssembler* masm,
347  Register receiver,
348  Register scratch,
349  Label* miss_label) {
350  // Check that the receiver isn't a smi.
351  __ JumpIfSmi(receiver, miss_label);
352 
353  // Check that the object is a JS array.
354  __ CompareObjectType(receiver, scratch, scratch, JS_ARRAY_TYPE);
355  __ b(ne, miss_label);
356 
357  // Load length directly from the JS array.
358  __ ldr(r0, FieldMemOperand(receiver, JSArray::kLengthOffset));
359  __ Ret();
360 }
361 
362 
363 // Generate code to check if an object is a string. If the object is a
364 // heap object, its map's instance type is left in the scratch1 register.
365 // If this is not needed, scratch1 and scratch2 may be the same register.
366 static void GenerateStringCheck(MacroAssembler* masm,
367  Register receiver,
368  Register scratch1,
369  Register scratch2,
370  Label* smi,
371  Label* non_string_object) {
372  // Check that the receiver isn't a smi.
373  __ JumpIfSmi(receiver, smi);
374 
375  // Check that the object is a string.
376  __ ldr(scratch1, FieldMemOperand(receiver, HeapObject::kMapOffset));
377  __ ldrb(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
378  __ and_(scratch2, scratch1, Operand(kIsNotStringMask));
379  // The cast is to resolve the overload for the argument of 0x0.
380  __ cmp(scratch2, Operand(static_cast<int32_t>(kStringTag)));
381  __ b(ne, non_string_object);
382 }
383 
384 
385 // Generate code to load the length from a string object and return the length.
386 // If the receiver object is not a string or a wrapped string object the
387 // execution continues at the miss label. The register containing the
388 // receiver is potentially clobbered.
389 void StubCompiler::GenerateLoadStringLength(MacroAssembler* masm,
390  Register receiver,
391  Register scratch1,
392  Register scratch2,
393  Label* miss,
394  bool support_wrappers) {
395  Label check_wrapper;
396 
397  // Check if the object is a string leaving the instance type in the
398  // scratch1 register.
399  GenerateStringCheck(masm, receiver, scratch1, scratch2, miss,
400  support_wrappers ? &check_wrapper : miss);
401 
402  // Load length directly from the string.
403  __ ldr(r0, FieldMemOperand(receiver, String::kLengthOffset));
404  __ Ret();
405 
406  if (support_wrappers) {
407  // Check if the object is a JSValue wrapper.
408  __ bind(&check_wrapper);
409  __ cmp(scratch1, Operand(JS_VALUE_TYPE));
410  __ b(ne, miss);
411 
412  // Unwrap the value and check if the wrapped value is a string.
413  __ ldr(scratch1, FieldMemOperand(receiver, JSValue::kValueOffset));
414  GenerateStringCheck(masm, scratch1, scratch2, scratch2, miss, miss);
415  __ ldr(r0, FieldMemOperand(scratch1, String::kLengthOffset));
416  __ Ret();
417  }
418 }
419 
420 
421 void StubCompiler::GenerateLoadFunctionPrototype(MacroAssembler* masm,
422  Register receiver,
423  Register scratch1,
424  Register scratch2,
425  Label* miss_label) {
426  __ TryGetFunctionPrototype(receiver, scratch1, scratch2, miss_label);
427  __ mov(r0, scratch1);
428  __ Ret();
429 }
430 
431 
432 // Generate StoreField code, value is passed in r0 register.
433 // When leaving generated code after success, the receiver_reg and name_reg
434 // may be clobbered. Upon branch to miss_label, the receiver and name
435 // registers have their original values.
436 void StubCompiler::GenerateStoreField(MacroAssembler* masm,
437  Handle<JSObject> object,
438  int index,
439  Handle<Map> transition,
440  Handle<String> name,
441  Register receiver_reg,
442  Register name_reg,
443  Register scratch1,
444  Register scratch2,
445  Label* miss_label) {
446  // r0 : value
447  Label exit;
448 
449  LookupResult lookup(masm->isolate());
450  object->Lookup(*name, &lookup);
451  if (lookup.IsFound() && (lookup.IsReadOnly() || !lookup.IsCacheable())) {
452  // In sloppy mode, we could just return the value and be done. However, we
453  // might be in strict mode, where we have to throw. Since we cannot tell,
454  // go into slow case unconditionally.
455  __ jmp(miss_label);
456  return;
457  }
458 
459  // Check that the map of the object hasn't changed.
460  CompareMapMode mode = transition.is_null() ? ALLOW_ELEMENT_TRANSITION_MAPS
462  __ CheckMap(receiver_reg, scratch1, Handle<Map>(object->map()), miss_label,
463  DO_SMI_CHECK, mode);
464 
465  // Perform global security token check if needed.
466  if (object->IsJSGlobalProxy()) {
467  __ CheckAccessGlobalProxy(receiver_reg, scratch1, miss_label);
468  }
469 
470  // Check that we are allowed to write this.
471  if (!transition.is_null() && object->GetPrototype()->IsJSObject()) {
472  JSObject* holder;
473  if (lookup.IsFound()) {
474  holder = lookup.holder();
475  } else {
476  // Find the top object.
477  holder = *object;
478  do {
479  holder = JSObject::cast(holder->GetPrototype());
480  } while (holder->GetPrototype()->IsJSObject());
481  }
482  // We need an extra register, push
483  __ push(name_reg);
484  Label miss_pop, done_check;
485  CheckPrototypes(object, receiver_reg, Handle<JSObject>(holder), name_reg,
486  scratch1, scratch2, name, &miss_pop);
487  __ jmp(&done_check);
488  __ bind(&miss_pop);
489  __ pop(name_reg);
490  __ jmp(miss_label);
491  __ bind(&done_check);
492  __ pop(name_reg);
493  }
494 
495  // Stub never generated for non-global objects that require access
496  // checks.
497  ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
498 
499  // Perform map transition for the receiver if necessary.
500  if (!transition.is_null() && (object->map()->unused_property_fields() == 0)) {
501  // The properties must be extended before we can store the value.
502  // We jump to a runtime call that extends the properties array.
503  __ push(receiver_reg);
504  __ mov(r2, Operand(transition));
505  __ Push(r2, r0);
506  __ TailCallExternalReference(
507  ExternalReference(IC_Utility(IC::kSharedStoreIC_ExtendStorage),
508  masm->isolate()),
509  3,
510  1);
511  return;
512  }
513 
514  if (!transition.is_null()) {
515  // Update the map of the object.
516  __ mov(scratch1, Operand(transition));
517  __ str(scratch1, FieldMemOperand(receiver_reg, HeapObject::kMapOffset));
518 
519  // Update the write barrier for the map field and pass the now unused
520  // name_reg as scratch register.
521  __ RecordWriteField(receiver_reg,
523  scratch1,
524  name_reg,
529  }
530 
531  // Adjust for the number of properties stored in the object. Even in the
532  // face of a transition we can use the old map here because the size of the
533  // object and the number of in-object properties is not going to change.
534  index -= object->map()->inobject_properties();
535 
536  if (index < 0) {
537  // Set the property straight into the object.
538  int offset = object->map()->instance_size() + (index * kPointerSize);
539  __ str(r0, FieldMemOperand(receiver_reg, offset));
540 
541  // Skip updating write barrier if storing a smi.
542  __ JumpIfSmi(r0, &exit);
543 
544  // Update the write barrier for the array address.
545  // Pass the now unused name_reg as a scratch register.
546  __ mov(name_reg, r0);
547  __ RecordWriteField(receiver_reg,
548  offset,
549  name_reg,
550  scratch1,
553  } else {
554  // Write to the properties array.
555  int offset = index * kPointerSize + FixedArray::kHeaderSize;
556  // Get the properties array
557  __ ldr(scratch1,
559  __ str(r0, FieldMemOperand(scratch1, offset));
560 
561  // Skip updating write barrier if storing a smi.
562  __ JumpIfSmi(r0, &exit);
563 
564  // Update the write barrier for the array address.
565  // Ok to clobber receiver_reg and name_reg, since we return.
566  __ mov(name_reg, r0);
567  __ RecordWriteField(scratch1,
568  offset,
569  name_reg,
570  receiver_reg,
573  }
574 
575  // Return the value (register r0).
576  __ bind(&exit);
577  __ Ret();
578 }
579 
580 
581 void StubCompiler::GenerateLoadMiss(MacroAssembler* masm, Code::Kind kind) {
582  ASSERT(kind == Code::LOAD_IC || kind == Code::KEYED_LOAD_IC);
583  Handle<Code> code = (kind == Code::LOAD_IC)
584  ? masm->isolate()->builtins()->LoadIC_Miss()
585  : masm->isolate()->builtins()->KeyedLoadIC_Miss();
586  __ Jump(code, RelocInfo::CODE_TARGET);
587 }
588 
589 
590 static void GenerateCallFunction(MacroAssembler* masm,
591  Handle<Object> object,
592  const ParameterCount& arguments,
593  Label* miss,
594  Code::ExtraICState extra_ic_state) {
595  // ----------- S t a t e -------------
596  // -- r0: receiver
597  // -- r1: function to call
598  // -----------------------------------
599 
600  // Check that the function really is a function.
601  __ JumpIfSmi(r1, miss);
602  __ CompareObjectType(r1, r3, r3, JS_FUNCTION_TYPE);
603  __ b(ne, miss);
604 
605  // Patch the receiver on the stack with the global proxy if
606  // necessary.
607  if (object->IsGlobalObject()) {
609  __ str(r3, MemOperand(sp, arguments.immediate() * kPointerSize));
610  }
611 
612  // Invoke the function.
613  CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state)
615  : CALL_AS_METHOD;
616  __ InvokeFunction(r1, arguments, JUMP_FUNCTION, NullCallWrapper(), call_kind);
617 }
618 
619 
620 static void PushInterceptorArguments(MacroAssembler* masm,
621  Register receiver,
622  Register holder,
623  Register name,
624  Handle<JSObject> holder_obj) {
625  __ push(name);
626  Handle<InterceptorInfo> interceptor(holder_obj->GetNamedInterceptor());
627  ASSERT(!masm->isolate()->heap()->InNewSpace(*interceptor));
628  Register scratch = name;
629  __ mov(scratch, Operand(interceptor));
630  __ push(scratch);
631  __ push(receiver);
632  __ push(holder);
633  __ ldr(scratch, FieldMemOperand(scratch, InterceptorInfo::kDataOffset));
634  __ push(scratch);
635  __ mov(scratch, Operand(ExternalReference::isolate_address()));
636  __ push(scratch);
637 }
638 
639 
640 static void CompileCallLoadPropertyWithInterceptor(
641  MacroAssembler* masm,
642  Register receiver,
643  Register holder,
644  Register name,
645  Handle<JSObject> holder_obj) {
646  PushInterceptorArguments(masm, receiver, holder, name, holder_obj);
647 
648  ExternalReference ref =
649  ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorOnly),
650  masm->isolate());
651  __ mov(r0, Operand(6));
652  __ mov(r1, Operand(ref));
653 
654  CEntryStub stub(1);
655  __ CallStub(&stub);
656 }
657 
658 
659 static const int kFastApiCallArguments = 4;
660 
661 // Reserves space for the extra arguments to API function in the
662 // caller's frame.
663 //
664 // These arguments are set by CheckPrototypes and GenerateFastApiDirectCall.
665 static void ReserveSpaceForFastApiCall(MacroAssembler* masm,
666  Register scratch) {
667  __ mov(scratch, Operand(Smi::FromInt(0)));
668  for (int i = 0; i < kFastApiCallArguments; i++) {
669  __ push(scratch);
670  }
671 }
672 
673 
674 // Undoes the effects of ReserveSpaceForFastApiCall.
675 static void FreeSpaceForFastApiCall(MacroAssembler* masm) {
676  __ Drop(kFastApiCallArguments);
677 }
678 
679 
680 static void GenerateFastApiDirectCall(MacroAssembler* masm,
681  const CallOptimization& optimization,
682  int argc) {
683  // ----------- S t a t e -------------
684  // -- sp[0] : holder (set by CheckPrototypes)
685  // -- sp[4] : callee JS function
686  // -- sp[8] : call data
687  // -- sp[12] : isolate
688  // -- sp[16] : last JS argument
689  // -- ...
690  // -- sp[(argc + 3) * 4] : first JS argument
691  // -- sp[(argc + 4) * 4] : receiver
692  // -----------------------------------
693  // Get the function and setup the context.
694  Handle<JSFunction> function = optimization.constant_function();
695  __ LoadHeapObject(r5, function);
697 
698  // Pass the additional arguments.
699  Handle<CallHandlerInfo> api_call_info = optimization.api_call_info();
700  Handle<Object> call_data(api_call_info->data());
701  if (masm->isolate()->heap()->InNewSpace(*call_data)) {
702  __ Move(r0, api_call_info);
704  } else {
705  __ Move(r6, call_data);
706  }
707  __ mov(r7, Operand(ExternalReference::isolate_address()));
708  // Store JS function, call data and isolate.
709  __ stm(ib, sp, r5.bit() | r6.bit() | r7.bit());
710 
711  // Prepare arguments.
712  __ add(r2, sp, Operand(3 * kPointerSize));
713 
714  // Allocate the v8::Arguments structure in the arguments' space since
715  // it's not controlled by GC.
716  const int kApiStackSpace = 4;
717 
718  FrameScope frame_scope(masm, StackFrame::MANUAL);
719  __ EnterExitFrame(false, kApiStackSpace);
720 
721  // r0 = v8::Arguments&
722  // Arguments is after the return address.
723  __ add(r0, sp, Operand(1 * kPointerSize));
724  // v8::Arguments::implicit_args_
725  __ str(r2, MemOperand(r0, 0 * kPointerSize));
726  // v8::Arguments::values_
727  __ add(ip, r2, Operand(argc * kPointerSize));
728  __ str(ip, MemOperand(r0, 1 * kPointerSize));
729  // v8::Arguments::length_ = argc
730  __ mov(ip, Operand(argc));
731  __ str(ip, MemOperand(r0, 2 * kPointerSize));
732  // v8::Arguments::is_construct_call = 0
733  __ mov(ip, Operand(0));
734  __ str(ip, MemOperand(r0, 3 * kPointerSize));
735 
736  const int kStackUnwindSpace = argc + kFastApiCallArguments + 1;
737  Address function_address = v8::ToCData<Address>(api_call_info->callback());
738  ApiFunction fun(function_address);
739  ExternalReference ref = ExternalReference(&fun,
740  ExternalReference::DIRECT_API_CALL,
741  masm->isolate());
742  AllowExternalCallThatCantCauseGC scope(masm);
743 
744  __ CallApiFunctionAndReturn(ref, kStackUnwindSpace);
745 }
746 
747 
748 class CallInterceptorCompiler BASE_EMBEDDED {
749  public:
750  CallInterceptorCompiler(StubCompiler* stub_compiler,
751  const ParameterCount& arguments,
752  Register name,
753  Code::ExtraICState extra_ic_state)
754  : stub_compiler_(stub_compiler),
755  arguments_(arguments),
756  name_(name),
757  extra_ic_state_(extra_ic_state) {}
758 
759  void Compile(MacroAssembler* masm,
760  Handle<JSObject> object,
761  Handle<JSObject> holder,
762  Handle<String> name,
763  LookupResult* lookup,
764  Register receiver,
765  Register scratch1,
766  Register scratch2,
767  Register scratch3,
768  Label* miss) {
769  ASSERT(holder->HasNamedInterceptor());
770  ASSERT(!holder->GetNamedInterceptor()->getter()->IsUndefined());
771 
772  // Check that the receiver isn't a smi.
773  __ JumpIfSmi(receiver, miss);
774  CallOptimization optimization(lookup);
775  if (optimization.is_constant_call()) {
776  CompileCacheable(masm, object, receiver, scratch1, scratch2, scratch3,
777  holder, lookup, name, optimization, miss);
778  } else {
779  CompileRegular(masm, object, receiver, scratch1, scratch2, scratch3,
780  name, holder, miss);
781  }
782  }
783 
784  private:
785  void CompileCacheable(MacroAssembler* masm,
786  Handle<JSObject> object,
787  Register receiver,
788  Register scratch1,
789  Register scratch2,
790  Register scratch3,
791  Handle<JSObject> interceptor_holder,
792  LookupResult* lookup,
793  Handle<String> name,
794  const CallOptimization& optimization,
795  Label* miss_label) {
796  ASSERT(optimization.is_constant_call());
797  ASSERT(!lookup->holder()->IsGlobalObject());
798  Counters* counters = masm->isolate()->counters();
799  int depth1 = kInvalidProtoDepth;
800  int depth2 = kInvalidProtoDepth;
801  bool can_do_fast_api_call = false;
802  if (optimization.is_simple_api_call() &&
803  !lookup->holder()->IsGlobalObject()) {
804  depth1 = optimization.GetPrototypeDepthOfExpectedType(
805  object, interceptor_holder);
806  if (depth1 == kInvalidProtoDepth) {
807  depth2 = optimization.GetPrototypeDepthOfExpectedType(
808  interceptor_holder, Handle<JSObject>(lookup->holder()));
809  }
810  can_do_fast_api_call =
811  depth1 != kInvalidProtoDepth || depth2 != kInvalidProtoDepth;
812  }
813 
814  __ IncrementCounter(counters->call_const_interceptor(), 1,
815  scratch1, scratch2);
816 
817  if (can_do_fast_api_call) {
818  __ IncrementCounter(counters->call_const_interceptor_fast_api(), 1,
819  scratch1, scratch2);
820  ReserveSpaceForFastApiCall(masm, scratch1);
821  }
822 
823  // Check that the maps from receiver to interceptor's holder
824  // haven't changed and thus we can invoke interceptor.
825  Label miss_cleanup;
826  Label* miss = can_do_fast_api_call ? &miss_cleanup : miss_label;
827  Register holder =
828  stub_compiler_->CheckPrototypes(object, receiver, interceptor_holder,
829  scratch1, scratch2, scratch3,
830  name, depth1, miss);
831 
832  // Invoke an interceptor and if it provides a value,
833  // branch to |regular_invoke|.
834  Label regular_invoke;
835  LoadWithInterceptor(masm, receiver, holder, interceptor_holder, scratch2,
836  &regular_invoke);
837 
838  // Interceptor returned nothing for this property. Try to use cached
839  // constant function.
840 
841  // Check that the maps from interceptor's holder to constant function's
842  // holder haven't changed and thus we can use cached constant function.
843  if (*interceptor_holder != lookup->holder()) {
844  stub_compiler_->CheckPrototypes(interceptor_holder, receiver,
845  Handle<JSObject>(lookup->holder()),
846  scratch1, scratch2, scratch3,
847  name, depth2, miss);
848  } else {
849  // CheckPrototypes has a side effect of fetching a 'holder'
850  // for API (object which is instanceof for the signature). It's
851  // safe to omit it here, as if present, it should be fetched
852  // by the previous CheckPrototypes.
853  ASSERT(depth2 == kInvalidProtoDepth);
854  }
855 
856  // Invoke function.
857  if (can_do_fast_api_call) {
858  GenerateFastApiDirectCall(masm, optimization, arguments_.immediate());
859  } else {
860  CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state_)
862  : CALL_AS_METHOD;
863  __ InvokeFunction(optimization.constant_function(), arguments_,
864  JUMP_FUNCTION, NullCallWrapper(), call_kind);
865  }
866 
867  // Deferred code for fast API call case---clean preallocated space.
868  if (can_do_fast_api_call) {
869  __ bind(&miss_cleanup);
870  FreeSpaceForFastApiCall(masm);
871  __ b(miss_label);
872  }
873 
874  // Invoke a regular function.
875  __ bind(&regular_invoke);
876  if (can_do_fast_api_call) {
877  FreeSpaceForFastApiCall(masm);
878  }
879  }
880 
881  void CompileRegular(MacroAssembler* masm,
882  Handle<JSObject> object,
883  Register receiver,
884  Register scratch1,
885  Register scratch2,
886  Register scratch3,
887  Handle<String> name,
888  Handle<JSObject> interceptor_holder,
889  Label* miss_label) {
890  Register holder =
891  stub_compiler_->CheckPrototypes(object, receiver, interceptor_holder,
892  scratch1, scratch2, scratch3,
893  name, miss_label);
894 
895  // Call a runtime function to load the interceptor property.
896  FrameScope scope(masm, StackFrame::INTERNAL);
897  // Save the name_ register across the call.
898  __ push(name_);
899  PushInterceptorArguments(masm, receiver, holder, name_, interceptor_holder);
900  __ CallExternalReference(
901  ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorForCall),
902  masm->isolate()),
903  6);
904  // Restore the name_ register.
905  __ pop(name_);
906  // Leave the internal frame.
907  }
908 
909  void LoadWithInterceptor(MacroAssembler* masm,
910  Register receiver,
911  Register holder,
912  Handle<JSObject> holder_obj,
913  Register scratch,
914  Label* interceptor_succeeded) {
915  {
916  FrameScope scope(masm, StackFrame::INTERNAL);
917  __ Push(holder, name_);
918  CompileCallLoadPropertyWithInterceptor(masm,
919  receiver,
920  holder,
921  name_,
922  holder_obj);
923  __ pop(name_); // Restore the name.
924  __ pop(receiver); // Restore the holder.
925  }
926  // If interceptor returns no-result sentinel, call the constant function.
927  __ LoadRoot(scratch, Heap::kNoInterceptorResultSentinelRootIndex);
928  __ cmp(r0, scratch);
929  __ b(ne, interceptor_succeeded);
930  }
931 
932  StubCompiler* stub_compiler_;
933  const ParameterCount& arguments_;
934  Register name_;
935  Code::ExtraICState extra_ic_state_;
936 };
937 
938 
939 // Generate code to check that a global property cell is empty. Create
940 // the property cell at compilation time if no cell exists for the
941 // property.
942 static void GenerateCheckPropertyCell(MacroAssembler* masm,
943  Handle<GlobalObject> global,
944  Handle<String> name,
945  Register scratch,
946  Label* miss) {
947  Handle<JSGlobalPropertyCell> cell =
948  GlobalObject::EnsurePropertyCell(global, name);
949  ASSERT(cell->value()->IsTheHole());
950  __ mov(scratch, Operand(cell));
951  __ ldr(scratch,
953  __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
954  __ cmp(scratch, ip);
955  __ b(ne, miss);
956 }
957 
958 
959 // Calls GenerateCheckPropertyCell for each global object in the prototype chain
960 // from object to (but not including) holder.
961 static void GenerateCheckPropertyCells(MacroAssembler* masm,
962  Handle<JSObject> object,
963  Handle<JSObject> holder,
964  Handle<String> name,
965  Register scratch,
966  Label* miss) {
967  Handle<JSObject> current = object;
968  while (!current.is_identical_to(holder)) {
969  if (current->IsGlobalObject()) {
970  GenerateCheckPropertyCell(masm,
971  Handle<GlobalObject>::cast(current),
972  name,
973  scratch,
974  miss);
975  }
976  current = Handle<JSObject>(JSObject::cast(current->GetPrototype()));
977  }
978 }
979 
980 
981 // Convert and store int passed in register ival to IEEE 754 single precision
982 // floating point value at memory location (dst + 4 * wordoffset)
983 // If VFP3 is available use it for conversion.
984 static void StoreIntAsFloat(MacroAssembler* masm,
985  Register dst,
986  Register wordoffset,
987  Register ival,
988  Register fval,
989  Register scratch1,
990  Register scratch2) {
992  CpuFeatures::Scope scope(VFP2);
993  __ vmov(s0, ival);
994  __ add(scratch1, dst, Operand(wordoffset, LSL, 2));
995  __ vcvt_f32_s32(s0, s0);
996  __ vstr(s0, scratch1, 0);
997  } else {
998  Label not_special, done;
999  // Move sign bit from source to destination. This works because the sign
1000  // bit in the exponent word of the double has the same position and polarity
1001  // as the 2's complement sign bit in a Smi.
1002  ASSERT(kBinary32SignMask == 0x80000000u);
1003 
1004  __ and_(fval, ival, Operand(kBinary32SignMask), SetCC);
1005  // Negate value if it is negative.
1006  __ rsb(ival, ival, Operand(0, RelocInfo::NONE), LeaveCC, ne);
1007 
1008  // We have -1, 0 or 1, which we treat specially. Register ival contains
1009  // absolute value: it is either equal to 1 (special case of -1 and 1),
1010  // greater than 1 (not a special case) or less than 1 (special case of 0).
1011  __ cmp(ival, Operand(1));
1012  __ b(gt, &not_special);
1013 
1014  // For 1 or -1 we need to or in the 0 exponent (biased).
1015  static const uint32_t exponent_word_for_1 =
1017 
1018  __ orr(fval, fval, Operand(exponent_word_for_1), LeaveCC, eq);
1019  __ b(&done);
1020 
1021  __ bind(&not_special);
1022  // Count leading zeros.
1023  // Gets the wrong answer for 0, but we already checked for that case above.
1024  Register zeros = scratch2;
1025  __ CountLeadingZeros(zeros, ival, scratch1);
1026 
1027  // Compute exponent and or it into the exponent register.
1028  __ rsb(scratch1,
1029  zeros,
1030  Operand((kBitsPerInt - 1) + kBinary32ExponentBias));
1031 
1032  __ orr(fval,
1033  fval,
1034  Operand(scratch1, LSL, kBinary32ExponentShift));
1035 
1036  // Shift up the source chopping the top bit off.
1037  __ add(zeros, zeros, Operand(1));
1038  // This wouldn't work for 1 and -1 as the shift would be 32 which means 0.
1039  __ mov(ival, Operand(ival, LSL, zeros));
1040  // And the top (top 20 bits).
1041  __ orr(fval,
1042  fval,
1043  Operand(ival, LSR, kBitsPerInt - kBinary32MantissaBits));
1044 
1045  __ bind(&done);
1046  __ str(fval, MemOperand(dst, wordoffset, LSL, 2));
1047  }
1048 }
1049 
1050 
1051 // Convert unsigned integer with specified number of leading zeroes in binary
1052 // representation to IEEE 754 double.
1053 // Integer to convert is passed in register hiword.
1054 // Resulting double is returned in registers hiword:loword.
1055 // This functions does not work correctly for 0.
1056 static void GenerateUInt2Double(MacroAssembler* masm,
1057  Register hiword,
1058  Register loword,
1059  Register scratch,
1060  int leading_zeroes) {
1061  const int meaningful_bits = kBitsPerInt - leading_zeroes - 1;
1062  const int biased_exponent = HeapNumber::kExponentBias + meaningful_bits;
1063 
1064  const int mantissa_shift_for_hi_word =
1065  meaningful_bits - HeapNumber::kMantissaBitsInTopWord;
1066 
1067  const int mantissa_shift_for_lo_word =
1068  kBitsPerInt - mantissa_shift_for_hi_word;
1069 
1070  __ mov(scratch, Operand(biased_exponent << HeapNumber::kExponentShift));
1071  if (mantissa_shift_for_hi_word > 0) {
1072  __ mov(loword, Operand(hiword, LSL, mantissa_shift_for_lo_word));
1073  __ orr(hiword, scratch, Operand(hiword, LSR, mantissa_shift_for_hi_word));
1074  } else {
1075  __ mov(loword, Operand(0, RelocInfo::NONE));
1076  __ orr(hiword, scratch, Operand(hiword, LSL, mantissa_shift_for_hi_word));
1077  }
1078 
1079  // If least significant bit of biased exponent was not 1 it was corrupted
1080  // by most significant bit of mantissa so we should fix that.
1081  if (!(biased_exponent & 1)) {
1082  __ bic(hiword, hiword, Operand(1 << HeapNumber::kExponentShift));
1083  }
1084 }
1085 
1086 
1087 #undef __
1088 #define __ ACCESS_MASM(masm())
1089 
1090 
1091 Register StubCompiler::CheckPrototypes(Handle<JSObject> object,
1092  Register object_reg,
1093  Handle<JSObject> holder,
1094  Register holder_reg,
1095  Register scratch1,
1096  Register scratch2,
1097  Handle<String> name,
1098  int save_at_depth,
1099  Label* miss) {
1100  // Make sure there's no overlap between holder and object registers.
1101  ASSERT(!scratch1.is(object_reg) && !scratch1.is(holder_reg));
1102  ASSERT(!scratch2.is(object_reg) && !scratch2.is(holder_reg)
1103  && !scratch2.is(scratch1));
1104 
1105  // Keep track of the current object in register reg.
1106  Register reg = object_reg;
1107  int depth = 0;
1108 
1109  if (save_at_depth == depth) {
1110  __ str(reg, MemOperand(sp));
1111  }
1112 
1113  // Check the maps in the prototype chain.
1114  // Traverse the prototype chain from the object and do map checks.
1115  Handle<JSObject> current = object;
1116  while (!current.is_identical_to(holder)) {
1117  ++depth;
1118 
1119  // Only global objects and objects that do not require access
1120  // checks are allowed in stubs.
1121  ASSERT(current->IsJSGlobalProxy() || !current->IsAccessCheckNeeded());
1122 
1123  Handle<JSObject> prototype(JSObject::cast(current->GetPrototype()));
1124  if (!current->HasFastProperties() &&
1125  !current->IsJSGlobalObject() &&
1126  !current->IsJSGlobalProxy()) {
1127  if (!name->IsSymbol()) {
1128  name = factory()->LookupSymbol(name);
1129  }
1130  ASSERT(current->property_dictionary()->FindEntry(*name) ==
1132 
1133  GenerateDictionaryNegativeLookup(masm(), miss, reg, name,
1134  scratch1, scratch2);
1135 
1136  __ ldr(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset));
1137  reg = holder_reg; // From now on the object will be in holder_reg.
1138  __ ldr(reg, FieldMemOperand(scratch1, Map::kPrototypeOffset));
1139  } else {
1140  Handle<Map> current_map(current->map());
1141  __ CheckMap(reg, scratch1, current_map, miss, DONT_DO_SMI_CHECK,
1143 
1144  // Check access rights to the global object. This has to happen after
1145  // the map check so that we know that the object is actually a global
1146  // object.
1147  if (current->IsJSGlobalProxy()) {
1148  __ CheckAccessGlobalProxy(reg, scratch2, miss);
1149  }
1150  reg = holder_reg; // From now on the object will be in holder_reg.
1151 
1152  if (heap()->InNewSpace(*prototype)) {
1153  // The prototype is in new space; we cannot store a reference to it
1154  // in the code. Load it from the map.
1155  __ ldr(reg, FieldMemOperand(scratch1, Map::kPrototypeOffset));
1156  } else {
1157  // The prototype is in old space; load it directly.
1158  __ mov(reg, Operand(prototype));
1159  }
1160  }
1161 
1162  if (save_at_depth == depth) {
1163  __ str(reg, MemOperand(sp));
1164  }
1165 
1166  // Go to the next object in the prototype chain.
1167  current = prototype;
1168  }
1169 
1170  // Log the check depth.
1171  LOG(masm()->isolate(), IntEvent("check-maps-depth", depth + 1));
1172 
1173  // Check the holder map.
1174  __ CheckMap(reg, scratch1, Handle<Map>(current->map()), miss,
1176 
1177  // Perform security check for access to the global object.
1178  ASSERT(holder->IsJSGlobalProxy() || !holder->IsAccessCheckNeeded());
1179  if (holder->IsJSGlobalProxy()) {
1180  __ CheckAccessGlobalProxy(reg, scratch1, miss);
1181  }
1182 
1183  // If we've skipped any global objects, it's not enough to verify that
1184  // their maps haven't changed. We also need to check that the property
1185  // cell for the property is still empty.
1186  GenerateCheckPropertyCells(masm(), object, holder, name, scratch1, miss);
1187 
1188  // Return the register containing the holder.
1189  return reg;
1190 }
1191 
1192 
1193 void StubCompiler::GenerateLoadField(Handle<JSObject> object,
1194  Handle<JSObject> holder,
1195  Register receiver,
1196  Register scratch1,
1197  Register scratch2,
1198  Register scratch3,
1199  int index,
1200  Handle<String> name,
1201  Label* miss) {
1202  // Check that the receiver isn't a smi.
1203  __ JumpIfSmi(receiver, miss);
1204 
1205  // Check that the maps haven't changed.
1206  Register reg = CheckPrototypes(
1207  object, receiver, holder, scratch1, scratch2, scratch3, name, miss);
1208  GenerateFastPropertyLoad(masm(), r0, reg, holder, index);
1209  __ Ret();
1210 }
1211 
1212 
1213 void StubCompiler::GenerateLoadConstant(Handle<JSObject> object,
1214  Handle<JSObject> holder,
1215  Register receiver,
1216  Register scratch1,
1217  Register scratch2,
1218  Register scratch3,
1219  Handle<JSFunction> value,
1220  Handle<String> name,
1221  Label* miss) {
1222  // Check that the receiver isn't a smi.
1223  __ JumpIfSmi(receiver, miss);
1224 
1225  // Check that the maps haven't changed.
1226  CheckPrototypes(
1227  object, receiver, holder, scratch1, scratch2, scratch3, name, miss);
1228 
1229  // Return the constant value.
1230  __ LoadHeapObject(r0, value);
1231  __ Ret();
1232 }
1233 
1234 
1235 void StubCompiler::GenerateDictionaryLoadCallback(Register receiver,
1236  Register name_reg,
1237  Register scratch1,
1238  Register scratch2,
1239  Register scratch3,
1240  Handle<AccessorInfo> callback,
1241  Handle<String> name,
1242  Label* miss) {
1243  ASSERT(!receiver.is(scratch1));
1244  ASSERT(!receiver.is(scratch2));
1245  ASSERT(!receiver.is(scratch3));
1246 
1247  // Load the properties dictionary.
1248  Register dictionary = scratch1;
1249  __ ldr(dictionary, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
1250 
1251  // Probe the dictionary.
1252  Label probe_done;
1254  miss,
1255  &probe_done,
1256  dictionary,
1257  name_reg,
1258  scratch2,
1259  scratch3);
1260  __ bind(&probe_done);
1261 
1262  // If probing finds an entry in the dictionary, scratch3 contains the
1263  // pointer into the dictionary. Check that the value is the callback.
1264  Register pointer = scratch3;
1265  const int kElementsStartOffset = StringDictionary::kHeaderSize +
1267  const int kValueOffset = kElementsStartOffset + kPointerSize;
1268  __ ldr(scratch2, FieldMemOperand(pointer, kValueOffset));
1269  __ cmp(scratch2, Operand(callback));
1270  __ b(ne, miss);
1271 }
1272 
1273 
1274 void StubCompiler::GenerateLoadCallback(Handle<JSObject> object,
1275  Handle<JSObject> holder,
1276  Register receiver,
1277  Register name_reg,
1278  Register scratch1,
1279  Register scratch2,
1280  Register scratch3,
1281  Register scratch4,
1282  Handle<AccessorInfo> callback,
1283  Handle<String> name,
1284  Label* miss) {
1285  // Check that the receiver isn't a smi.
1286  __ JumpIfSmi(receiver, miss);
1287 
1288  // Check that the maps haven't changed.
1289  Register reg = CheckPrototypes(object, receiver, holder, scratch1,
1290  scratch2, scratch3, name, miss);
1291 
1292  if (!holder->HasFastProperties() && !holder->IsJSGlobalObject()) {
1293  GenerateDictionaryLoadCallback(
1294  reg, name_reg, scratch2, scratch3, scratch4, callback, name, miss);
1295  }
1296 
1297  // Build AccessorInfo::args_ list on the stack and push property name below
1298  // the exit frame to make GC aware of them and store pointers to them.
1299  __ push(receiver);
1300  __ mov(scratch2, sp); // scratch2 = AccessorInfo::args_
1301  if (heap()->InNewSpace(callback->data())) {
1302  __ Move(scratch3, callback);
1303  __ ldr(scratch3, FieldMemOperand(scratch3, AccessorInfo::kDataOffset));
1304  } else {
1305  __ Move(scratch3, Handle<Object>(callback->data()));
1306  }
1307  __ Push(reg, scratch3);
1308  __ mov(scratch3, Operand(ExternalReference::isolate_address()));
1309  __ Push(scratch3, name_reg);
1310  __ mov(r0, sp); // r0 = Handle<String>
1311 
1312  const int kApiStackSpace = 1;
1313  FrameScope frame_scope(masm(), StackFrame::MANUAL);
1314  __ EnterExitFrame(false, kApiStackSpace);
1315 
1316  // Create AccessorInfo instance on the stack above the exit frame with
1317  // scratch2 (internal::Object** args_) as the data.
1318  __ str(scratch2, MemOperand(sp, 1 * kPointerSize));
1319  __ add(r1, sp, Operand(1 * kPointerSize)); // r1 = AccessorInfo&
1320 
1321  const int kStackUnwindSpace = 5;
1322  Address getter_address = v8::ToCData<Address>(callback->getter());
1323  ApiFunction fun(getter_address);
1324  ExternalReference ref =
1325  ExternalReference(&fun,
1326  ExternalReference::DIRECT_GETTER_CALL,
1327  masm()->isolate());
1328  __ CallApiFunctionAndReturn(ref, kStackUnwindSpace);
1329 }
1330 
1331 
1332 void StubCompiler::GenerateLoadInterceptor(Handle<JSObject> object,
1333  Handle<JSObject> interceptor_holder,
1334  LookupResult* lookup,
1335  Register receiver,
1336  Register name_reg,
1337  Register scratch1,
1338  Register scratch2,
1339  Register scratch3,
1340  Handle<String> name,
1341  Label* miss) {
1342  ASSERT(interceptor_holder->HasNamedInterceptor());
1343  ASSERT(!interceptor_holder->GetNamedInterceptor()->getter()->IsUndefined());
1344 
1345  // Check that the receiver isn't a smi.
1346  __ JumpIfSmi(receiver, miss);
1347 
1348  // So far the most popular follow ups for interceptor loads are FIELD
1349  // and CALLBACKS, so inline only them, other cases may be added
1350  // later.
1351  bool compile_followup_inline = false;
1352  if (lookup->IsFound() && lookup->IsCacheable()) {
1353  if (lookup->IsField()) {
1354  compile_followup_inline = true;
1355  } else if (lookup->type() == CALLBACKS &&
1356  lookup->GetCallbackObject()->IsAccessorInfo()) {
1357  AccessorInfo* callback = AccessorInfo::cast(lookup->GetCallbackObject());
1358  compile_followup_inline = callback->getter() != NULL &&
1359  callback->IsCompatibleReceiver(*object);
1360  }
1361  }
1362 
1363  if (compile_followup_inline) {
1364  // Compile the interceptor call, followed by inline code to load the
1365  // property from further up the prototype chain if the call fails.
1366  // Check that the maps haven't changed.
1367  Register holder_reg = CheckPrototypes(object, receiver, interceptor_holder,
1368  scratch1, scratch2, scratch3,
1369  name, miss);
1370  ASSERT(holder_reg.is(receiver) || holder_reg.is(scratch1));
1371 
1372  // Preserve the receiver register explicitly whenever it is different from
1373  // the holder and it is needed should the interceptor return without any
1374  // result. The CALLBACKS case needs the receiver to be passed into C++ code,
1375  // the FIELD case might cause a miss during the prototype check.
1376  bool must_perfrom_prototype_check = *interceptor_holder != lookup->holder();
1377  bool must_preserve_receiver_reg = !receiver.is(holder_reg) &&
1378  (lookup->type() == CALLBACKS || must_perfrom_prototype_check);
1379 
1380  // Save necessary data before invoking an interceptor.
1381  // Requires a frame to make GC aware of pushed pointers.
1382  {
1383  FrameScope frame_scope(masm(), StackFrame::INTERNAL);
1384  if (must_preserve_receiver_reg) {
1385  __ Push(receiver, holder_reg, name_reg);
1386  } else {
1387  __ Push(holder_reg, name_reg);
1388  }
1389  // Invoke an interceptor. Note: map checks from receiver to
1390  // interceptor's holder has been compiled before (see a caller
1391  // of this method.)
1392  CompileCallLoadPropertyWithInterceptor(masm(),
1393  receiver,
1394  holder_reg,
1395  name_reg,
1396  interceptor_holder);
1397  // Check if interceptor provided a value for property. If it's
1398  // the case, return immediately.
1399  Label interceptor_failed;
1400  __ LoadRoot(scratch1, Heap::kNoInterceptorResultSentinelRootIndex);
1401  __ cmp(r0, scratch1);
1402  __ b(eq, &interceptor_failed);
1403  frame_scope.GenerateLeaveFrame();
1404  __ Ret();
1405 
1406  __ bind(&interceptor_failed);
1407  __ pop(name_reg);
1408  __ pop(holder_reg);
1409  if (must_preserve_receiver_reg) {
1410  __ pop(receiver);
1411  }
1412  // Leave the internal frame.
1413  }
1414  // Check that the maps from interceptor's holder to lookup's holder
1415  // haven't changed. And load lookup's holder into |holder| register.
1416  if (must_perfrom_prototype_check) {
1417  holder_reg = CheckPrototypes(interceptor_holder,
1418  holder_reg,
1419  Handle<JSObject>(lookup->holder()),
1420  scratch1,
1421  scratch2,
1422  scratch3,
1423  name,
1424  miss);
1425  }
1426 
1427  if (lookup->IsField()) {
1428  // We found FIELD property in prototype chain of interceptor's holder.
1429  // Retrieve a field from field's holder.
1430  GenerateFastPropertyLoad(masm(), r0, holder_reg,
1431  Handle<JSObject>(lookup->holder()),
1432  lookup->GetFieldIndex());
1433  __ Ret();
1434  } else {
1435  // We found CALLBACKS property in prototype chain of interceptor's
1436  // holder.
1437  ASSERT(lookup->type() == CALLBACKS);
1438  Handle<AccessorInfo> callback(
1439  AccessorInfo::cast(lookup->GetCallbackObject()));
1440  ASSERT(callback->getter() != NULL);
1441 
1442  // Tail call to runtime.
1443  // Important invariant in CALLBACKS case: the code above must be
1444  // structured to never clobber |receiver| register.
1445  __ Move(scratch2, callback);
1446  // holder_reg is either receiver or scratch1.
1447  if (!receiver.is(holder_reg)) {
1448  ASSERT(scratch1.is(holder_reg));
1449  __ Push(receiver, holder_reg);
1450  } else {
1451  __ push(receiver);
1452  __ push(holder_reg);
1453  }
1454  __ ldr(scratch3,
1456  __ mov(scratch1, Operand(ExternalReference::isolate_address()));
1457  __ Push(scratch3, scratch1, scratch2, name_reg);
1458 
1459  ExternalReference ref =
1460  ExternalReference(IC_Utility(IC::kLoadCallbackProperty),
1461  masm()->isolate());
1462  __ TailCallExternalReference(ref, 6, 1);
1463  }
1464  } else { // !compile_followup_inline
1465  // Call the runtime system to load the interceptor.
1466  // Check that the maps haven't changed.
1467  Register holder_reg = CheckPrototypes(object, receiver, interceptor_holder,
1468  scratch1, scratch2, scratch3,
1469  name, miss);
1470  PushInterceptorArguments(masm(), receiver, holder_reg,
1471  name_reg, interceptor_holder);
1472 
1473  ExternalReference ref =
1474  ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorForLoad),
1475  masm()->isolate());
1476  __ TailCallExternalReference(ref, 6, 1);
1477  }
1478 }
1479 
1480 
1481 void CallStubCompiler::GenerateNameCheck(Handle<String> name, Label* miss) {
1482  if (kind_ == Code::KEYED_CALL_IC) {
1483  __ cmp(r2, Operand(name));
1484  __ b(ne, miss);
1485  }
1486 }
1487 
1488 
1489 void CallStubCompiler::GenerateGlobalReceiverCheck(Handle<JSObject> object,
1490  Handle<JSObject> holder,
1491  Handle<String> name,
1492  Label* miss) {
1493  ASSERT(holder->IsGlobalObject());
1494 
1495  // Get the number of arguments.
1496  const int argc = arguments().immediate();
1497 
1498  // Get the receiver from the stack.
1499  __ ldr(r0, MemOperand(sp, argc * kPointerSize));
1500 
1501  // Check that the maps haven't changed.
1502  __ JumpIfSmi(r0, miss);
1503  CheckPrototypes(object, r0, holder, r3, r1, r4, name, miss);
1504 }
1505 
1506 
1507 void CallStubCompiler::GenerateLoadFunctionFromCell(
1508  Handle<JSGlobalPropertyCell> cell,
1509  Handle<JSFunction> function,
1510  Label* miss) {
1511  // Get the value from the cell.
1512  __ mov(r3, Operand(cell));
1514 
1515  // Check that the cell contains the same function.
1516  if (heap()->InNewSpace(*function)) {
1517  // We can't embed a pointer to a function in new space so we have
1518  // to verify that the shared function info is unchanged. This has
1519  // the nice side effect that multiple closures based on the same
1520  // function can all use this call IC. Before we load through the
1521  // function, we have to verify that it still is a function.
1522  __ JumpIfSmi(r1, miss);
1523  __ CompareObjectType(r1, r3, r3, JS_FUNCTION_TYPE);
1524  __ b(ne, miss);
1525 
1526  // Check the shared function info. Make sure it hasn't changed.
1527  __ Move(r3, Handle<SharedFunctionInfo>(function->shared()));
1529  __ cmp(r4, r3);
1530  } else {
1531  __ cmp(r1, Operand(function));
1532  }
1533  __ b(ne, miss);
1534 }
1535 
1536 
1537 void CallStubCompiler::GenerateMissBranch() {
1538  Handle<Code> code =
1539  isolate()->stub_cache()->ComputeCallMiss(arguments().immediate(),
1540  kind_,
1541  extra_state_);
1542  __ Jump(code, RelocInfo::CODE_TARGET);
1543 }
1544 
1545 
1546 Handle<Code> CallStubCompiler::CompileCallField(Handle<JSObject> object,
1547  Handle<JSObject> holder,
1548  int index,
1549  Handle<String> name) {
1550  // ----------- S t a t e -------------
1551  // -- r2 : name
1552  // -- lr : return address
1553  // -----------------------------------
1554  Label miss;
1555 
1556  GenerateNameCheck(name, &miss);
1557 
1558  const int argc = arguments().immediate();
1559 
1560  // Get the receiver of the function from the stack into r0.
1561  __ ldr(r0, MemOperand(sp, argc * kPointerSize));
1562  // Check that the receiver isn't a smi.
1563  __ JumpIfSmi(r0, &miss);
1564 
1565  // Do the right check and compute the holder register.
1566  Register reg = CheckPrototypes(object, r0, holder, r1, r3, r4, name, &miss);
1567  GenerateFastPropertyLoad(masm(), r1, reg, holder, index);
1568 
1569  GenerateCallFunction(masm(), object, arguments(), &miss, extra_state_);
1570 
1571  // Handle call cache miss.
1572  __ bind(&miss);
1573  GenerateMissBranch();
1574 
1575  // Return the generated code.
1576  return GetCode(Code::FIELD, name);
1577 }
1578 
1579 
1580 Handle<Code> CallStubCompiler::CompileArrayPushCall(
1581  Handle<Object> object,
1582  Handle<JSObject> holder,
1583  Handle<JSGlobalPropertyCell> cell,
1584  Handle<JSFunction> function,
1585  Handle<String> name) {
1586  // ----------- S t a t e -------------
1587  // -- r2 : name
1588  // -- lr : return address
1589  // -- sp[(argc - n - 1) * 4] : arg[n] (zero-based)
1590  // -- ...
1591  // -- sp[argc * 4] : receiver
1592  // -----------------------------------
1593 
1594  // If object is not an array, bail out to regular call.
1595  if (!object->IsJSArray() || !cell.is_null()) return Handle<Code>::null();
1596 
1597  Label miss;
1598  GenerateNameCheck(name, &miss);
1599 
1600  Register receiver = r1;
1601  // Get the receiver from the stack
1602  const int argc = arguments().immediate();
1603  __ ldr(receiver, MemOperand(sp, argc * kPointerSize));
1604 
1605  // Check that the receiver isn't a smi.
1606  __ JumpIfSmi(receiver, &miss);
1607 
1608  // Check that the maps haven't changed.
1609  CheckPrototypes(Handle<JSObject>::cast(object), receiver, holder, r3, r0, r4,
1610  name, &miss);
1611 
1612  if (argc == 0) {
1613  // Nothing to do, just return the length.
1614  __ ldr(r0, FieldMemOperand(receiver, JSArray::kLengthOffset));
1615  __ Drop(argc + 1);
1616  __ Ret();
1617  } else {
1618  Label call_builtin;
1619 
1620  if (argc == 1) { // Otherwise fall through to call the builtin.
1621  Label attempt_to_grow_elements;
1622 
1623  Register elements = r6;
1624  Register end_elements = r5;
1625  // Get the elements array of the object.
1626  __ ldr(elements, FieldMemOperand(receiver, JSArray::kElementsOffset));
1627 
1628  // Check that the elements are in fast mode and writable.
1629  __ CheckMap(elements,
1630  r0,
1631  Heap::kFixedArrayMapRootIndex,
1632  &call_builtin,
1634 
1635 
1636  // Get the array's length into r0 and calculate new length.
1637  __ ldr(r0, FieldMemOperand(receiver, JSArray::kLengthOffset));
1638  STATIC_ASSERT(kSmiTagSize == 1);
1639  STATIC_ASSERT(kSmiTag == 0);
1640  __ add(r0, r0, Operand(Smi::FromInt(argc)));
1641 
1642  // Get the elements' length.
1644 
1645  // Check if we could survive without allocation.
1646  __ cmp(r0, r4);
1647  __ b(gt, &attempt_to_grow_elements);
1648 
1649  // Check if value is a smi.
1650  Label with_write_barrier;
1651  __ ldr(r4, MemOperand(sp, (argc - 1) * kPointerSize));
1652  __ JumpIfNotSmi(r4, &with_write_barrier);
1653 
1654  // Save new length.
1655  __ str(r0, FieldMemOperand(receiver, JSArray::kLengthOffset));
1656 
1657  // Store the value.
1658  // We may need a register containing the address end_elements below,
1659  // so write back the value in end_elements.
1660  __ add(end_elements, elements,
1661  Operand(r0, LSL, kPointerSizeLog2 - kSmiTagSize));
1662  const int kEndElementsOffset =
1663  FixedArray::kHeaderSize - kHeapObjectTag - argc * kPointerSize;
1664  __ str(r4, MemOperand(end_elements, kEndElementsOffset, PreIndex));
1665 
1666  // Check for a smi.
1667  __ Drop(argc + 1);
1668  __ Ret();
1669 
1670  __ bind(&with_write_barrier);
1671 
1672  __ ldr(r3, FieldMemOperand(receiver, HeapObject::kMapOffset));
1673 
1674  if (FLAG_smi_only_arrays && !FLAG_trace_elements_transitions) {
1675  Label fast_object, not_fast_object;
1676  __ CheckFastObjectElements(r3, r7, &not_fast_object);
1677  __ jmp(&fast_object);
1678  // In case of fast smi-only, convert to fast object, otherwise bail out.
1679  __ bind(&not_fast_object);
1680  __ CheckFastSmiElements(r3, r7, &call_builtin);
1681  // edx: receiver
1682  // r3: map
1683  Label try_holey_map;
1684  __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
1685  FAST_ELEMENTS,
1686  r3,
1687  r7,
1688  &try_holey_map);
1689  __ mov(r2, receiver);
1692  __ jmp(&fast_object);
1693 
1694  __ bind(&try_holey_map);
1695  __ LoadTransitionedArrayMapConditional(FAST_HOLEY_SMI_ELEMENTS,
1697  r3,
1698  r7,
1699  &call_builtin);
1700  __ mov(r2, receiver);
1703  __ bind(&fast_object);
1704  } else {
1705  __ CheckFastObjectElements(r3, r3, &call_builtin);
1706  }
1707 
1708  // Save new length.
1709  __ str(r0, FieldMemOperand(receiver, JSArray::kLengthOffset));
1710 
1711  // Store the value.
1712  // We may need a register containing the address end_elements below,
1713  // so write back the value in end_elements.
1714  __ add(end_elements, elements,
1715  Operand(r0, LSL, kPointerSizeLog2 - kSmiTagSize));
1716  __ str(r4, MemOperand(end_elements, kEndElementsOffset, PreIndex));
1717 
1718  __ RecordWrite(elements,
1719  end_elements,
1720  r4,
1724  OMIT_SMI_CHECK);
1725  __ Drop(argc + 1);
1726  __ Ret();
1727 
1728  __ bind(&attempt_to_grow_elements);
1729  // r0: array's length + 1.
1730  // r4: elements' length.
1731 
1732  if (!FLAG_inline_new) {
1733  __ b(&call_builtin);
1734  }
1735 
1736  __ ldr(r2, MemOperand(sp, (argc - 1) * kPointerSize));
1737  // Growing elements that are SMI-only requires special handling in case
1738  // the new element is non-Smi. For now, delegate to the builtin.
1739  Label no_fast_elements_check;
1740  __ JumpIfSmi(r2, &no_fast_elements_check);
1741  __ ldr(r7, FieldMemOperand(receiver, HeapObject::kMapOffset));
1742  __ CheckFastObjectElements(r7, r7, &call_builtin);
1743  __ bind(&no_fast_elements_check);
1744 
1745  Isolate* isolate = masm()->isolate();
1746  ExternalReference new_space_allocation_top =
1747  ExternalReference::new_space_allocation_top_address(isolate);
1748  ExternalReference new_space_allocation_limit =
1749  ExternalReference::new_space_allocation_limit_address(isolate);
1750 
1751  const int kAllocationDelta = 4;
1752  // Load top and check if it is the end of elements.
1753  __ add(end_elements, elements,
1754  Operand(r0, LSL, kPointerSizeLog2 - kSmiTagSize));
1755  __ add(end_elements, end_elements, Operand(kEndElementsOffset));
1756  __ mov(r7, Operand(new_space_allocation_top));
1757  __ ldr(r3, MemOperand(r7));
1758  __ cmp(end_elements, r3);
1759  __ b(ne, &call_builtin);
1760 
1761  __ mov(r9, Operand(new_space_allocation_limit));
1762  __ ldr(r9, MemOperand(r9));
1763  __ add(r3, r3, Operand(kAllocationDelta * kPointerSize));
1764  __ cmp(r3, r9);
1765  __ b(hi, &call_builtin);
1766 
1767  // We fit and could grow elements.
1768  // Update new_space_allocation_top.
1769  __ str(r3, MemOperand(r7));
1770  // Push the argument.
1771  __ str(r2, MemOperand(end_elements));
1772  // Fill the rest with holes.
1773  __ LoadRoot(r3, Heap::kTheHoleValueRootIndex);
1774  for (int i = 1; i < kAllocationDelta; i++) {
1775  __ str(r3, MemOperand(end_elements, i * kPointerSize));
1776  }
1777 
1778  // Update elements' and array's sizes.
1779  __ str(r0, FieldMemOperand(receiver, JSArray::kLengthOffset));
1780  __ add(r4, r4, Operand(Smi::FromInt(kAllocationDelta)));
1782 
1783  // Elements are in new space, so write barrier is not required.
1784  __ Drop(argc + 1);
1785  __ Ret();
1786  }
1787  __ bind(&call_builtin);
1788  __ TailCallExternalReference(ExternalReference(Builtins::c_ArrayPush,
1789  masm()->isolate()),
1790  argc + 1,
1791  1);
1792  }
1793 
1794  // Handle call cache miss.
1795  __ bind(&miss);
1796  GenerateMissBranch();
1797 
1798  // Return the generated code.
1799  return GetCode(function);
1800 }
1801 
1802 
1803 Handle<Code> CallStubCompiler::CompileArrayPopCall(
1804  Handle<Object> object,
1805  Handle<JSObject> holder,
1806  Handle<JSGlobalPropertyCell> cell,
1807  Handle<JSFunction> function,
1808  Handle<String> name) {
1809  // ----------- S t a t e -------------
1810  // -- r2 : name
1811  // -- lr : return address
1812  // -- sp[(argc - n - 1) * 4] : arg[n] (zero-based)
1813  // -- ...
1814  // -- sp[argc * 4] : receiver
1815  // -----------------------------------
1816 
1817  // If object is not an array, bail out to regular call.
1818  if (!object->IsJSArray() || !cell.is_null()) return Handle<Code>::null();
1819 
1820  Label miss, return_undefined, call_builtin;
1821  Register receiver = r1;
1822  Register elements = r3;
1823  GenerateNameCheck(name, &miss);
1824 
1825  // Get the receiver from the stack
1826  const int argc = arguments().immediate();
1827  __ ldr(receiver, MemOperand(sp, argc * kPointerSize));
1828  // Check that the receiver isn't a smi.
1829  __ JumpIfSmi(receiver, &miss);
1830 
1831  // Check that the maps haven't changed.
1832  CheckPrototypes(Handle<JSObject>::cast(object), receiver, holder, elements,
1833  r4, r0, name, &miss);
1834 
1835  // Get the elements array of the object.
1836  __ ldr(elements, FieldMemOperand(receiver, JSArray::kElementsOffset));
1837 
1838  // Check that the elements are in fast mode and writable.
1839  __ CheckMap(elements,
1840  r0,
1841  Heap::kFixedArrayMapRootIndex,
1842  &call_builtin,
1844 
1845  // Get the array's length into r4 and calculate new length.
1846  __ ldr(r4, FieldMemOperand(receiver, JSArray::kLengthOffset));
1847  __ sub(r4, r4, Operand(Smi::FromInt(1)), SetCC);
1848  __ b(lt, &return_undefined);
1849 
1850  // Get the last element.
1851  __ LoadRoot(r6, Heap::kTheHoleValueRootIndex);
1852  STATIC_ASSERT(kSmiTagSize == 1);
1853  STATIC_ASSERT(kSmiTag == 0);
1854  // We can't address the last element in one operation. Compute the more
1855  // expensive shift first, and use an offset later on.
1856  __ add(elements, elements, Operand(r4, LSL, kPointerSizeLog2 - kSmiTagSize));
1857  __ ldr(r0, FieldMemOperand(elements, FixedArray::kHeaderSize));
1858  __ cmp(r0, r6);
1859  __ b(eq, &call_builtin);
1860 
1861  // Set the array's length.
1862  __ str(r4, FieldMemOperand(receiver, JSArray::kLengthOffset));
1863 
1864  // Fill with the hole.
1865  __ str(r6, FieldMemOperand(elements, FixedArray::kHeaderSize));
1866  __ Drop(argc + 1);
1867  __ Ret();
1868 
1869  __ bind(&return_undefined);
1870  __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
1871  __ Drop(argc + 1);
1872  __ Ret();
1873 
1874  __ bind(&call_builtin);
1875  __ TailCallExternalReference(ExternalReference(Builtins::c_ArrayPop,
1876  masm()->isolate()),
1877  argc + 1,
1878  1);
1879 
1880  // Handle call cache miss.
1881  __ bind(&miss);
1882  GenerateMissBranch();
1883 
1884  // Return the generated code.
1885  return GetCode(function);
1886 }
1887 
1888 
1889 Handle<Code> CallStubCompiler::CompileStringCharCodeAtCall(
1890  Handle<Object> object,
1891  Handle<JSObject> holder,
1892  Handle<JSGlobalPropertyCell> cell,
1893  Handle<JSFunction> function,
1894  Handle<String> name) {
1895  // ----------- S t a t e -------------
1896  // -- r2 : function name
1897  // -- lr : return address
1898  // -- sp[(argc - n - 1) * 4] : arg[n] (zero-based)
1899  // -- ...
1900  // -- sp[argc * 4] : receiver
1901  // -----------------------------------
1902 
1903  // If object is not a string, bail out to regular call.
1904  if (!object->IsString() || !cell.is_null()) return Handle<Code>::null();
1905 
1906  const int argc = arguments().immediate();
1907  Label miss;
1908  Label name_miss;
1909  Label index_out_of_range;
1910  Label* index_out_of_range_label = &index_out_of_range;
1911 
1912  if (kind_ == Code::CALL_IC &&
1913  (CallICBase::StringStubState::decode(extra_state_) ==
1915  index_out_of_range_label = &miss;
1916  }
1917  GenerateNameCheck(name, &name_miss);
1918 
1919  // Check that the maps starting from the prototype haven't changed.
1920  GenerateDirectLoadGlobalFunctionPrototype(masm(),
1922  r0,
1923  &miss);
1924  ASSERT(!object.is_identical_to(holder));
1925  CheckPrototypes(Handle<JSObject>(JSObject::cast(object->GetPrototype())),
1926  r0, holder, r1, r3, r4, name, &miss);
1927 
1928  Register receiver = r1;
1929  Register index = r4;
1930  Register result = r0;
1931  __ ldr(receiver, MemOperand(sp, argc * kPointerSize));
1932  if (argc > 0) {
1933  __ ldr(index, MemOperand(sp, (argc - 1) * kPointerSize));
1934  } else {
1935  __ LoadRoot(index, Heap::kUndefinedValueRootIndex);
1936  }
1937 
1938  StringCharCodeAtGenerator generator(receiver,
1939  index,
1940  result,
1941  &miss, // When not a string.
1942  &miss, // When not a number.
1943  index_out_of_range_label,
1945  generator.GenerateFast(masm());
1946  __ Drop(argc + 1);
1947  __ Ret();
1948 
1949  StubRuntimeCallHelper call_helper;
1950  generator.GenerateSlow(masm(), call_helper);
1951 
1952  if (index_out_of_range.is_linked()) {
1953  __ bind(&index_out_of_range);
1954  __ LoadRoot(r0, Heap::kNanValueRootIndex);
1955  __ Drop(argc + 1);
1956  __ Ret();
1957  }
1958 
1959  __ bind(&miss);
1960  // Restore function name in r2.
1961  __ Move(r2, name);
1962  __ bind(&name_miss);
1963  GenerateMissBranch();
1964 
1965  // Return the generated code.
1966  return GetCode(function);
1967 }
1968 
1969 
1970 Handle<Code> CallStubCompiler::CompileStringCharAtCall(
1971  Handle<Object> object,
1972  Handle<JSObject> holder,
1973  Handle<JSGlobalPropertyCell> cell,
1974  Handle<JSFunction> function,
1975  Handle<String> name) {
1976  // ----------- S t a t e -------------
1977  // -- r2 : function name
1978  // -- lr : return address
1979  // -- sp[(argc - n - 1) * 4] : arg[n] (zero-based)
1980  // -- ...
1981  // -- sp[argc * 4] : receiver
1982  // -----------------------------------
1983 
1984  // If object is not a string, bail out to regular call.
1985  if (!object->IsString() || !cell.is_null()) return Handle<Code>::null();
1986 
1987  const int argc = arguments().immediate();
1988  Label miss;
1989  Label name_miss;
1990  Label index_out_of_range;
1991  Label* index_out_of_range_label = &index_out_of_range;
1992  if (kind_ == Code::CALL_IC &&
1993  (CallICBase::StringStubState::decode(extra_state_) ==
1995  index_out_of_range_label = &miss;
1996  }
1997  GenerateNameCheck(name, &name_miss);
1998 
1999  // Check that the maps starting from the prototype haven't changed.
2000  GenerateDirectLoadGlobalFunctionPrototype(masm(),
2002  r0,
2003  &miss);
2004  ASSERT(!object.is_identical_to(holder));
2005  CheckPrototypes(Handle<JSObject>(JSObject::cast(object->GetPrototype())),
2006  r0, holder, r1, r3, r4, name, &miss);
2007 
2008  Register receiver = r0;
2009  Register index = r4;
2010  Register scratch = r3;
2011  Register result = r0;
2012  __ ldr(receiver, MemOperand(sp, argc * kPointerSize));
2013  if (argc > 0) {
2014  __ ldr(index, MemOperand(sp, (argc - 1) * kPointerSize));
2015  } else {
2016  __ LoadRoot(index, Heap::kUndefinedValueRootIndex);
2017  }
2018 
2019  StringCharAtGenerator generator(receiver,
2020  index,
2021  scratch,
2022  result,
2023  &miss, // When not a string.
2024  &miss, // When not a number.
2025  index_out_of_range_label,
2027  generator.GenerateFast(masm());
2028  __ Drop(argc + 1);
2029  __ Ret();
2030 
2031  StubRuntimeCallHelper call_helper;
2032  generator.GenerateSlow(masm(), call_helper);
2033 
2034  if (index_out_of_range.is_linked()) {
2035  __ bind(&index_out_of_range);
2036  __ LoadRoot(r0, Heap::kEmptyStringRootIndex);
2037  __ Drop(argc + 1);
2038  __ Ret();
2039  }
2040 
2041  __ bind(&miss);
2042  // Restore function name in r2.
2043  __ Move(r2, name);
2044  __ bind(&name_miss);
2045  GenerateMissBranch();
2046 
2047  // Return the generated code.
2048  return GetCode(function);
2049 }
2050 
2051 
2052 Handle<Code> CallStubCompiler::CompileStringFromCharCodeCall(
2053  Handle<Object> object,
2054  Handle<JSObject> holder,
2055  Handle<JSGlobalPropertyCell> cell,
2056  Handle<JSFunction> function,
2057  Handle<String> name) {
2058  // ----------- S t a t e -------------
2059  // -- r2 : function name
2060  // -- lr : return address
2061  // -- sp[(argc - n - 1) * 4] : arg[n] (zero-based)
2062  // -- ...
2063  // -- sp[argc * 4] : receiver
2064  // -----------------------------------
2065 
2066  const int argc = arguments().immediate();
2067 
2068  // If the object is not a JSObject or we got an unexpected number of
2069  // arguments, bail out to the regular call.
2070  if (!object->IsJSObject() || argc != 1) return Handle<Code>::null();
2071 
2072  Label miss;
2073  GenerateNameCheck(name, &miss);
2074 
2075  if (cell.is_null()) {
2076  __ ldr(r1, MemOperand(sp, 1 * kPointerSize));
2077 
2078  STATIC_ASSERT(kSmiTag == 0);
2079  __ JumpIfSmi(r1, &miss);
2080 
2081  CheckPrototypes(Handle<JSObject>::cast(object), r1, holder, r0, r3, r4,
2082  name, &miss);
2083  } else {
2084  ASSERT(cell->value() == *function);
2085  GenerateGlobalReceiverCheck(Handle<JSObject>::cast(object), holder, name,
2086  &miss);
2087  GenerateLoadFunctionFromCell(cell, function, &miss);
2088  }
2089 
2090  // Load the char code argument.
2091  Register code = r1;
2092  __ ldr(code, MemOperand(sp, 0 * kPointerSize));
2093 
2094  // Check the code is a smi.
2095  Label slow;
2096  STATIC_ASSERT(kSmiTag == 0);
2097  __ JumpIfNotSmi(code, &slow);
2098 
2099  // Convert the smi code to uint16.
2100  __ and_(code, code, Operand(Smi::FromInt(0xffff)));
2101 
2102  StringCharFromCodeGenerator generator(code, r0);
2103  generator.GenerateFast(masm());
2104  __ Drop(argc + 1);
2105  __ Ret();
2106 
2107  StubRuntimeCallHelper call_helper;
2108  generator.GenerateSlow(masm(), call_helper);
2109 
2110  // Tail call the full function. We do not have to patch the receiver
2111  // because the function makes no use of it.
2112  __ bind(&slow);
2113  __ InvokeFunction(
2114  function, arguments(), JUMP_FUNCTION, NullCallWrapper(), CALL_AS_METHOD);
2115 
2116  __ bind(&miss);
2117  // r2: function name.
2118  GenerateMissBranch();
2119 
2120  // Return the generated code.
2121  return cell.is_null() ? GetCode(function) : GetCode(Code::NORMAL, name);
2122 }
2123 
2124 
2125 Handle<Code> CallStubCompiler::CompileMathFloorCall(
2126  Handle<Object> object,
2127  Handle<JSObject> holder,
2128  Handle<JSGlobalPropertyCell> cell,
2129  Handle<JSFunction> function,
2130  Handle<String> name) {
2131  // ----------- S t a t e -------------
2132  // -- r2 : function name
2133  // -- lr : return address
2134  // -- sp[(argc - n - 1) * 4] : arg[n] (zero-based)
2135  // -- ...
2136  // -- sp[argc * 4] : receiver
2137  // -----------------------------------
2138 
2140  return Handle<Code>::null();
2141  }
2142 
2143  CpuFeatures::Scope scope_vfp2(VFP2);
2144  const int argc = arguments().immediate();
2145  // If the object is not a JSObject or we got an unexpected number of
2146  // arguments, bail out to the regular call.
2147  if (!object->IsJSObject() || argc != 1) return Handle<Code>::null();
2148 
2149  Label miss, slow;
2150  GenerateNameCheck(name, &miss);
2151 
2152  if (cell.is_null()) {
2153  __ ldr(r1, MemOperand(sp, 1 * kPointerSize));
2154  STATIC_ASSERT(kSmiTag == 0);
2155  __ JumpIfSmi(r1, &miss);
2156  CheckPrototypes(Handle<JSObject>::cast(object), r1, holder, r0, r3, r4,
2157  name, &miss);
2158  } else {
2159  ASSERT(cell->value() == *function);
2160  GenerateGlobalReceiverCheck(Handle<JSObject>::cast(object), holder, name,
2161  &miss);
2162  GenerateLoadFunctionFromCell(cell, function, &miss);
2163  }
2164 
2165  // Load the (only) argument into r0.
2166  __ ldr(r0, MemOperand(sp, 0 * kPointerSize));
2167 
2168  // If the argument is a smi, just return.
2169  STATIC_ASSERT(kSmiTag == 0);
2170  __ tst(r0, Operand(kSmiTagMask));
2171  __ Drop(argc + 1, eq);
2172  __ Ret(eq);
2173 
2174  __ CheckMap(r0, r1, Heap::kHeapNumberMapRootIndex, &slow, DONT_DO_SMI_CHECK);
2175 
2176  Label wont_fit_smi, no_vfp_exception, restore_fpscr_and_return;
2177 
2178  // If vfp3 is enabled, we use the fpu rounding with the RM (round towards
2179  // minus infinity) mode.
2180 
2181  // Load the HeapNumber value.
2182  // We will need access to the value in the core registers, so we load it
2183  // with ldrd and move it to the fpu. It also spares a sub instruction for
2184  // updating the HeapNumber value address, as vldr expects a multiple
2185  // of 4 offset.
2187  __ vmov(d1, r4, r5);
2188 
2189  // Backup FPSCR.
2190  __ vmrs(r3);
2191  // Set custom FPCSR:
2192  // - Set rounding mode to "Round towards Minus Infinity"
2193  // (i.e. bits [23:22] = 0b10).
2194  // - Clear vfp cumulative exception flags (bits [3:0]).
2195  // - Make sure Flush-to-zero mode control bit is unset (bit 22).
2196  __ bic(r9, r3,
2198  __ orr(r9, r9, Operand(kRoundToMinusInf));
2199  __ vmsr(r9);
2200 
2201  // Convert the argument to an integer.
2202  __ vcvt_s32_f64(s0, d1, kFPSCRRounding);
2203 
2204  // Use vcvt latency to start checking for special cases.
2205  // Get the argument exponent and clear the sign bit.
2206  __ bic(r6, r5, Operand(HeapNumber::kSignMask));
2207  __ mov(r6, Operand(r6, LSR, HeapNumber::kMantissaBitsInTopWord));
2208 
2209  // Retrieve FPSCR and check for vfp exceptions.
2210  __ vmrs(r9);
2211  __ tst(r9, Operand(kVFPExceptionMask));
2212  __ b(&no_vfp_exception, eq);
2213 
2214  // Check for NaN, Infinity, and -Infinity.
2215  // They are invariant through a Math.Floor call, so just
2216  // return the original argument.
2217  __ sub(r7, r6, Operand(HeapNumber::kExponentMask
2218  >> HeapNumber::kMantissaBitsInTopWord), SetCC);
2219  __ b(&restore_fpscr_and_return, eq);
2220  // We had an overflow or underflow in the conversion. Check if we
2221  // have a big exponent.
2222  __ cmp(r7, Operand(HeapNumber::kMantissaBits));
2223  // If greater or equal, the argument is already round and in r0.
2224  __ b(&restore_fpscr_and_return, ge);
2225  __ b(&wont_fit_smi);
2226 
2227  __ bind(&no_vfp_exception);
2228  // Move the result back to general purpose register r0.
2229  __ vmov(r0, s0);
2230  // Check if the result fits into a smi.
2231  __ add(r1, r0, Operand(0x40000000), SetCC);
2232  __ b(&wont_fit_smi, mi);
2233  // Tag the result.
2234  STATIC_ASSERT(kSmiTag == 0);
2235  __ mov(r0, Operand(r0, LSL, kSmiTagSize));
2236 
2237  // Check for -0.
2238  __ cmp(r0, Operand(0, RelocInfo::NONE));
2239  __ b(&restore_fpscr_and_return, ne);
2240  // r5 already holds the HeapNumber exponent.
2241  __ tst(r5, Operand(HeapNumber::kSignMask));
2242  // If our HeapNumber is negative it was -0, so load its address and return.
2243  // Else r0 is loaded with 0, so we can also just return.
2244  __ ldr(r0, MemOperand(sp, 0 * kPointerSize), ne);
2245 
2246  __ bind(&restore_fpscr_and_return);
2247  // Restore FPSCR and return.
2248  __ vmsr(r3);
2249  __ Drop(argc + 1);
2250  __ Ret();
2251 
2252  __ bind(&wont_fit_smi);
2253  // Restore FPCSR and fall to slow case.
2254  __ vmsr(r3);
2255 
2256  __ bind(&slow);
2257  // Tail call the full function. We do not have to patch the receiver
2258  // because the function makes no use of it.
2259  __ InvokeFunction(
2260  function, arguments(), JUMP_FUNCTION, NullCallWrapper(), CALL_AS_METHOD);
2261 
2262  __ bind(&miss);
2263  // r2: function name.
2264  GenerateMissBranch();
2265 
2266  // Return the generated code.
2267  return cell.is_null() ? GetCode(function) : GetCode(Code::NORMAL, name);
2268 }
2269 
2270 
2271 Handle<Code> CallStubCompiler::CompileMathAbsCall(
2272  Handle<Object> object,
2273  Handle<JSObject> holder,
2274  Handle<JSGlobalPropertyCell> cell,
2275  Handle<JSFunction> function,
2276  Handle<String> name) {
2277  // ----------- S t a t e -------------
2278  // -- r2 : function name
2279  // -- lr : return address
2280  // -- sp[(argc - n - 1) * 4] : arg[n] (zero-based)
2281  // -- ...
2282  // -- sp[argc * 4] : receiver
2283  // -----------------------------------
2284 
2285  const int argc = arguments().immediate();
2286  // If the object is not a JSObject or we got an unexpected number of
2287  // arguments, bail out to the regular call.
2288  if (!object->IsJSObject() || argc != 1) return Handle<Code>::null();
2289 
2290  Label miss;
2291  GenerateNameCheck(name, &miss);
2292  if (cell.is_null()) {
2293  __ ldr(r1, MemOperand(sp, 1 * kPointerSize));
2294  STATIC_ASSERT(kSmiTag == 0);
2295  __ JumpIfSmi(r1, &miss);
2296  CheckPrototypes(Handle<JSObject>::cast(object), r1, holder, r0, r3, r4,
2297  name, &miss);
2298  } else {
2299  ASSERT(cell->value() == *function);
2300  GenerateGlobalReceiverCheck(Handle<JSObject>::cast(object), holder, name,
2301  &miss);
2302  GenerateLoadFunctionFromCell(cell, function, &miss);
2303  }
2304 
2305  // Load the (only) argument into r0.
2306  __ ldr(r0, MemOperand(sp, 0 * kPointerSize));
2307 
2308  // Check if the argument is a smi.
2309  Label not_smi;
2310  STATIC_ASSERT(kSmiTag == 0);
2311  __ JumpIfNotSmi(r0, &not_smi);
2312 
2313  // Do bitwise not or do nothing depending on the sign of the
2314  // argument.
2315  __ eor(r1, r0, Operand(r0, ASR, kBitsPerInt - 1));
2316 
2317  // Add 1 or do nothing depending on the sign of the argument.
2318  __ sub(r0, r1, Operand(r0, ASR, kBitsPerInt - 1), SetCC);
2319 
2320  // If the result is still negative, go to the slow case.
2321  // This only happens for the most negative smi.
2322  Label slow;
2323  __ b(mi, &slow);
2324 
2325  // Smi case done.
2326  __ Drop(argc + 1);
2327  __ Ret();
2328 
2329  // Check if the argument is a heap number and load its exponent and
2330  // sign.
2331  __ bind(&not_smi);
2332  __ CheckMap(r0, r1, Heap::kHeapNumberMapRootIndex, &slow, DONT_DO_SMI_CHECK);
2334 
2335  // Check the sign of the argument. If the argument is positive,
2336  // just return it.
2337  Label negative_sign;
2338  __ tst(r1, Operand(HeapNumber::kSignMask));
2339  __ b(ne, &negative_sign);
2340  __ Drop(argc + 1);
2341  __ Ret();
2342 
2343  // If the argument is negative, clear the sign, and return a new
2344  // number.
2345  __ bind(&negative_sign);
2346  __ eor(r1, r1, Operand(HeapNumber::kSignMask));
2348  __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
2349  __ AllocateHeapNumber(r0, r4, r5, r6, &slow);
2352  __ Drop(argc + 1);
2353  __ Ret();
2354 
2355  // Tail call the full function. We do not have to patch the receiver
2356  // because the function makes no use of it.
2357  __ bind(&slow);
2358  __ InvokeFunction(
2359  function, arguments(), JUMP_FUNCTION, NullCallWrapper(), CALL_AS_METHOD);
2360 
2361  __ bind(&miss);
2362  // r2: function name.
2363  GenerateMissBranch();
2364 
2365  // Return the generated code.
2366  return cell.is_null() ? GetCode(function) : GetCode(Code::NORMAL, name);
2367 }
2368 
2369 
2370 Handle<Code> CallStubCompiler::CompileFastApiCall(
2371  const CallOptimization& optimization,
2372  Handle<Object> object,
2373  Handle<JSObject> holder,
2374  Handle<JSGlobalPropertyCell> cell,
2375  Handle<JSFunction> function,
2376  Handle<String> name) {
2377  Counters* counters = isolate()->counters();
2378 
2379  ASSERT(optimization.is_simple_api_call());
2380  // Bail out if object is a global object as we don't want to
2381  // repatch it to global receiver.
2382  if (object->IsGlobalObject()) return Handle<Code>::null();
2383  if (!cell.is_null()) return Handle<Code>::null();
2384  if (!object->IsJSObject()) return Handle<Code>::null();
2385  int depth = optimization.GetPrototypeDepthOfExpectedType(
2386  Handle<JSObject>::cast(object), holder);
2387  if (depth == kInvalidProtoDepth) return Handle<Code>::null();
2388 
2389  Label miss, miss_before_stack_reserved;
2390  GenerateNameCheck(name, &miss_before_stack_reserved);
2391 
2392  // Get the receiver from the stack.
2393  const int argc = arguments().immediate();
2394  __ ldr(r1, MemOperand(sp, argc * kPointerSize));
2395 
2396  // Check that the receiver isn't a smi.
2397  __ JumpIfSmi(r1, &miss_before_stack_reserved);
2398 
2399  __ IncrementCounter(counters->call_const(), 1, r0, r3);
2400  __ IncrementCounter(counters->call_const_fast_api(), 1, r0, r3);
2401 
2402  ReserveSpaceForFastApiCall(masm(), r0);
2403 
2404  // Check that the maps haven't changed and find a Holder as a side effect.
2405  CheckPrototypes(Handle<JSObject>::cast(object), r1, holder, r0, r3, r4, name,
2406  depth, &miss);
2407 
2408  GenerateFastApiDirectCall(masm(), optimization, argc);
2409 
2410  __ bind(&miss);
2411  FreeSpaceForFastApiCall(masm());
2412 
2413  __ bind(&miss_before_stack_reserved);
2414  GenerateMissBranch();
2415 
2416  // Return the generated code.
2417  return GetCode(function);
2418 }
2419 
2420 
2421 Handle<Code> CallStubCompiler::CompileCallConstant(Handle<Object> object,
2422  Handle<JSObject> holder,
2423  Handle<JSFunction> function,
2424  Handle<String> name,
2425  CheckType check) {
2426  // ----------- S t a t e -------------
2427  // -- r2 : name
2428  // -- lr : return address
2429  // -----------------------------------
2430  if (HasCustomCallGenerator(function)) {
2431  Handle<Code> code = CompileCustomCall(object, holder,
2432  Handle<JSGlobalPropertyCell>::null(),
2433  function, name);
2434  // A null handle means bail out to the regular compiler code below.
2435  if (!code.is_null()) return code;
2436  }
2437 
2438  Label miss;
2439  GenerateNameCheck(name, &miss);
2440 
2441  // Get the receiver from the stack
2442  const int argc = arguments().immediate();
2443  __ ldr(r1, MemOperand(sp, argc * kPointerSize));
2444 
2445  // Check that the receiver isn't a smi.
2446  if (check != NUMBER_CHECK) {
2447  __ JumpIfSmi(r1, &miss);
2448  }
2449 
2450  // Make sure that it's okay not to patch the on stack receiver
2451  // unless we're doing a receiver map check.
2452  ASSERT(!object->IsGlobalObject() || check == RECEIVER_MAP_CHECK);
2453  switch (check) {
2454  case RECEIVER_MAP_CHECK:
2455  __ IncrementCounter(masm()->isolate()->counters()->call_const(),
2456  1, r0, r3);
2457 
2458  // Check that the maps haven't changed.
2459  CheckPrototypes(Handle<JSObject>::cast(object), r1, holder, r0, r3, r4,
2460  name, &miss);
2461 
2462  // Patch the receiver on the stack with the global proxy if
2463  // necessary.
2464  if (object->IsGlobalObject()) {
2466  __ str(r3, MemOperand(sp, argc * kPointerSize));
2467  }
2468  break;
2469 
2470  case STRING_CHECK:
2471  if (function->IsBuiltin() || !function->shared()->is_classic_mode()) {
2472  // Check that the object is a two-byte string or a symbol.
2473  __ CompareObjectType(r1, r3, r3, FIRST_NONSTRING_TYPE);
2474  __ b(ge, &miss);
2475  // Check that the maps starting from the prototype haven't changed.
2476  GenerateDirectLoadGlobalFunctionPrototype(
2477  masm(), Context::STRING_FUNCTION_INDEX, r0, &miss);
2478  CheckPrototypes(
2479  Handle<JSObject>(JSObject::cast(object->GetPrototype())),
2480  r0, holder, r3, r1, r4, name, &miss);
2481  } else {
2482  // Calling non-strict non-builtins with a value as the receiver
2483  // requires boxing.
2484  __ jmp(&miss);
2485  }
2486  break;
2487 
2488  case NUMBER_CHECK:
2489  if (function->IsBuiltin() || !function->shared()->is_classic_mode()) {
2490  Label fast;
2491  // Check that the object is a smi or a heap number.
2492  __ JumpIfSmi(r1, &fast);
2493  __ CompareObjectType(r1, r0, r0, HEAP_NUMBER_TYPE);
2494  __ b(ne, &miss);
2495  __ bind(&fast);
2496  // Check that the maps starting from the prototype haven't changed.
2497  GenerateDirectLoadGlobalFunctionPrototype(
2498  masm(), Context::NUMBER_FUNCTION_INDEX, r0, &miss);
2499  CheckPrototypes(
2500  Handle<JSObject>(JSObject::cast(object->GetPrototype())),
2501  r0, holder, r3, r1, r4, name, &miss);
2502  } else {
2503  // Calling non-strict non-builtins with a value as the receiver
2504  // requires boxing.
2505  __ jmp(&miss);
2506  }
2507  break;
2508 
2509  case BOOLEAN_CHECK:
2510  if (function->IsBuiltin() || !function->shared()->is_classic_mode()) {
2511  Label fast;
2512  // Check that the object is a boolean.
2513  __ LoadRoot(ip, Heap::kTrueValueRootIndex);
2514  __ cmp(r1, ip);
2515  __ b(eq, &fast);
2516  __ LoadRoot(ip, Heap::kFalseValueRootIndex);
2517  __ cmp(r1, ip);
2518  __ b(ne, &miss);
2519  __ bind(&fast);
2520  // Check that the maps starting from the prototype haven't changed.
2521  GenerateDirectLoadGlobalFunctionPrototype(
2522  masm(), Context::BOOLEAN_FUNCTION_INDEX, r0, &miss);
2523  CheckPrototypes(
2524  Handle<JSObject>(JSObject::cast(object->GetPrototype())),
2525  r0, holder, r3, r1, r4, name, &miss);
2526  } else {
2527  // Calling non-strict non-builtins with a value as the receiver
2528  // requires boxing.
2529  __ jmp(&miss);
2530  }
2531  break;
2532  }
2533 
2534  CallKind call_kind = CallICBase::Contextual::decode(extra_state_)
2536  : CALL_AS_METHOD;
2537  __ InvokeFunction(
2538  function, arguments(), JUMP_FUNCTION, NullCallWrapper(), call_kind);
2539 
2540  // Handle call cache miss.
2541  __ bind(&miss);
2542  GenerateMissBranch();
2543 
2544  // Return the generated code.
2545  return GetCode(function);
2546 }
2547 
2548 
2549 Handle<Code> CallStubCompiler::CompileCallInterceptor(Handle<JSObject> object,
2550  Handle<JSObject> holder,
2551  Handle<String> name) {
2552  // ----------- S t a t e -------------
2553  // -- r2 : name
2554  // -- lr : return address
2555  // -----------------------------------
2556  Label miss;
2557  GenerateNameCheck(name, &miss);
2558 
2559  // Get the number of arguments.
2560  const int argc = arguments().immediate();
2561  LookupResult lookup(isolate());
2562  LookupPostInterceptor(holder, name, &lookup);
2563 
2564  // Get the receiver from the stack.
2565  __ ldr(r1, MemOperand(sp, argc * kPointerSize));
2566 
2567  CallInterceptorCompiler compiler(this, arguments(), r2, extra_state_);
2568  compiler.Compile(masm(), object, holder, name, &lookup, r1, r3, r4, r0,
2569  &miss);
2570 
2571  // Move returned value, the function to call, to r1.
2572  __ mov(r1, r0);
2573  // Restore receiver.
2574  __ ldr(r0, MemOperand(sp, argc * kPointerSize));
2575 
2576  GenerateCallFunction(masm(), object, arguments(), &miss, extra_state_);
2577 
2578  // Handle call cache miss.
2579  __ bind(&miss);
2580  GenerateMissBranch();
2581 
2582  // Return the generated code.
2583  return GetCode(Code::INTERCEPTOR, name);
2584 }
2585 
2586 
2588  Handle<JSObject> object,
2589  Handle<GlobalObject> holder,
2590  Handle<JSGlobalPropertyCell> cell,
2591  Handle<JSFunction> function,
2592  Handle<String> name) {
2593  // ----------- S t a t e -------------
2594  // -- r2 : name
2595  // -- lr : return address
2596  // -----------------------------------
2597  if (HasCustomCallGenerator(function)) {
2598  Handle<Code> code = CompileCustomCall(object, holder, cell, function, name);
2599  // A null handle means bail out to the regular compiler code below.
2600  if (!code.is_null()) return code;
2601  }
2602 
2603  Label miss;
2604  GenerateNameCheck(name, &miss);
2605 
2606  // Get the number of arguments.
2607  const int argc = arguments().immediate();
2608  GenerateGlobalReceiverCheck(object, holder, name, &miss);
2609  GenerateLoadFunctionFromCell(cell, function, &miss);
2610 
2611  // Patch the receiver on the stack with the global proxy if
2612  // necessary.
2613  if (object->IsGlobalObject()) {
2615  __ str(r3, MemOperand(sp, argc * kPointerSize));
2616  }
2617 
2618  // Set up the context (function already in r1).
2620 
2621  // Jump to the cached code (tail call).
2622  Counters* counters = masm()->isolate()->counters();
2623  __ IncrementCounter(counters->call_global_inline(), 1, r3, r4);
2624  ParameterCount expected(function->shared()->formal_parameter_count());
2625  CallKind call_kind = CallICBase::Contextual::decode(extra_state_)
2627  : CALL_AS_METHOD;
2628  // We call indirectly through the code field in the function to
2629  // allow recompilation to take effect without changing any of the
2630  // call sites.
2632  __ InvokeCode(r3, expected, arguments(), JUMP_FUNCTION,
2633  NullCallWrapper(), call_kind);
2634 
2635  // Handle call cache miss.
2636  __ bind(&miss);
2637  __ IncrementCounter(counters->call_global_inline_miss(), 1, r1, r3);
2638  GenerateMissBranch();
2639 
2640  // Return the generated code.
2641  return GetCode(Code::NORMAL, name);
2642 }
2643 
2644 
2645 Handle<Code> StoreStubCompiler::CompileStoreField(Handle<JSObject> object,
2646  int index,
2647  Handle<Map> transition,
2648  Handle<String> name) {
2649  // ----------- S t a t e -------------
2650  // -- r0 : value
2651  // -- r1 : receiver
2652  // -- r2 : name
2653  // -- lr : return address
2654  // -----------------------------------
2655  Label miss;
2656 
2657  GenerateStoreField(masm(),
2658  object,
2659  index,
2660  transition,
2661  name,
2662  r1, r2, r3, r4,
2663  &miss);
2664  __ bind(&miss);
2665  Handle<Code> ic = masm()->isolate()->builtins()->StoreIC_Miss();
2666  __ Jump(ic, RelocInfo::CODE_TARGET);
2667 
2668  // Return the generated code.
2669  return GetCode(transition.is_null()
2670  ? Code::FIELD
2671  : Code::MAP_TRANSITION, name);
2672 }
2673 
2674 
2676  Handle<String> name,
2677  Handle<JSObject> receiver,
2678  Handle<JSObject> holder,
2679  Handle<AccessorInfo> callback) {
2680  // ----------- S t a t e -------------
2681  // -- r0 : value
2682  // -- r1 : receiver
2683  // -- r2 : name
2684  // -- lr : return address
2685  // -----------------------------------
2686  Label miss;
2687  // Check that the maps haven't changed.
2688  __ JumpIfSmi(r1, &miss);
2689  CheckPrototypes(receiver, r1, holder, r3, r4, r5, name, &miss);
2690 
2691  // Stub never generated for non-global objects that require access checks.
2692  ASSERT(holder->IsJSGlobalProxy() || !holder->IsAccessCheckNeeded());
2693 
2694  __ push(r1); // receiver
2695  __ mov(ip, Operand(callback)); // callback info
2696  __ Push(ip, r2, r0);
2697 
2698  // Do tail-call to the runtime system.
2699  ExternalReference store_callback_property =
2700  ExternalReference(IC_Utility(IC::kStoreCallbackProperty),
2701  masm()->isolate());
2702  __ TailCallExternalReference(store_callback_property, 4, 1);
2703 
2704  // Handle store cache miss.
2705  __ bind(&miss);
2706  Handle<Code> ic = masm()->isolate()->builtins()->StoreIC_Miss();
2707  __ Jump(ic, RelocInfo::CODE_TARGET);
2708 
2709  // Return the generated code.
2710  return GetCode(Code::CALLBACKS, name);
2711 }
2712 
2713 
2714 #undef __
2715 #define __ ACCESS_MASM(masm)
2716 
2717 
2719  MacroAssembler* masm,
2720  Handle<JSFunction> setter) {
2721  // ----------- S t a t e -------------
2722  // -- r0 : value
2723  // -- r1 : receiver
2724  // -- r2 : name
2725  // -- lr : return address
2726  // -----------------------------------
2727  {
2728  FrameScope scope(masm, StackFrame::INTERNAL);
2729 
2730  // Save value register, so we can restore it later.
2731  __ push(r0);
2732 
2733  if (!setter.is_null()) {
2734  // Call the JavaScript setter with receiver and value on the stack.
2735  __ Push(r1, r0);
2736  ParameterCount actual(1);
2737  __ InvokeFunction(setter, actual, CALL_FUNCTION, NullCallWrapper(),
2738  CALL_AS_METHOD);
2739  } else {
2740  // If we generate a global code snippet for deoptimization only, remember
2741  // the place to continue after deoptimization.
2742  masm->isolate()->heap()->SetSetterStubDeoptPCOffset(masm->pc_offset());
2743  }
2744 
2745  // We have to return the passed value, not the return value of the setter.
2746  __ pop(r0);
2747 
2748  // Restore context register.
2750  }
2751  __ Ret();
2752 }
2753 
2754 
2755 #undef __
2756 #define __ ACCESS_MASM(masm())
2757 
2758 
2760  Handle<String> name,
2761  Handle<JSObject> receiver,
2762  Handle<JSObject> holder,
2763  Handle<JSFunction> setter) {
2764  // ----------- S t a t e -------------
2765  // -- r0 : value
2766  // -- r1 : receiver
2767  // -- r2 : name
2768  // -- lr : return address
2769  // -----------------------------------
2770  Label miss;
2771 
2772  // Check that the maps haven't changed.
2773  __ JumpIfSmi(r1, &miss);
2774  CheckPrototypes(receiver, r1, holder, r3, r4, r5, name, &miss);
2775 
2776  GenerateStoreViaSetter(masm(), setter);
2777 
2778  __ bind(&miss);
2779  Handle<Code> ic = masm()->isolate()->builtins()->StoreIC_Miss();
2780  __ Jump(ic, RelocInfo::CODE_TARGET);
2781 
2782  // Return the generated code.
2783  return GetCode(Code::CALLBACKS, name);
2784 }
2785 
2786 
2788  Handle<JSObject> receiver,
2789  Handle<String> name) {
2790  // ----------- S t a t e -------------
2791  // -- r0 : value
2792  // -- r1 : receiver
2793  // -- r2 : name
2794  // -- lr : return address
2795  // -----------------------------------
2796  Label miss;
2797 
2798  // Check that the map of the object hasn't changed.
2799  __ CheckMap(r1, r3, Handle<Map>(receiver->map()), &miss,
2801 
2802  // Perform global security token check if needed.
2803  if (receiver->IsJSGlobalProxy()) {
2804  __ CheckAccessGlobalProxy(r1, r3, &miss);
2805  }
2806 
2807  // Stub is never generated for non-global objects that require access
2808  // checks.
2809  ASSERT(receiver->IsJSGlobalProxy() || !receiver->IsAccessCheckNeeded());
2810 
2811  __ Push(r1, r2, r0); // Receiver, name, value.
2812 
2813  __ mov(r0, Operand(Smi::FromInt(strict_mode_)));
2814  __ push(r0); // strict mode
2815 
2816  // Do tail-call to the runtime system.
2817  ExternalReference store_ic_property =
2818  ExternalReference(IC_Utility(IC::kStoreInterceptorProperty),
2819  masm()->isolate());
2820  __ TailCallExternalReference(store_ic_property, 4, 1);
2821 
2822  // Handle store cache miss.
2823  __ bind(&miss);
2824  Handle<Code> ic = masm()->isolate()->builtins()->StoreIC_Miss();
2825  __ Jump(ic, RelocInfo::CODE_TARGET);
2826 
2827  // Return the generated code.
2828  return GetCode(Code::INTERCEPTOR, name);
2829 }
2830 
2831 
2833  Handle<GlobalObject> object,
2834  Handle<JSGlobalPropertyCell> cell,
2835  Handle<String> name) {
2836  // ----------- S t a t e -------------
2837  // -- r0 : value
2838  // -- r1 : receiver
2839  // -- r2 : name
2840  // -- lr : return address
2841  // -----------------------------------
2842  Label miss;
2843 
2844  // Check that the map of the global has not changed.
2846  __ cmp(r3, Operand(Handle<Map>(object->map())));
2847  __ b(ne, &miss);
2848 
2849  // Check that the value in the cell is not the hole. If it is, this
2850  // cell could have been deleted and reintroducing the global needs
2851  // to update the property details in the property dictionary of the
2852  // global object. We bail out to the runtime system to do that.
2853  __ mov(r4, Operand(cell));
2854  __ LoadRoot(r5, Heap::kTheHoleValueRootIndex);
2856  __ cmp(r5, r6);
2857  __ b(eq, &miss);
2858 
2859  // Store the value in the cell.
2861  // Cells are always rescanned, so no write barrier here.
2862 
2863  Counters* counters = masm()->isolate()->counters();
2864  __ IncrementCounter(counters->named_store_global_inline(), 1, r4, r3);
2865  __ Ret();
2866 
2867  // Handle store cache miss.
2868  __ bind(&miss);
2869  __ IncrementCounter(counters->named_store_global_inline_miss(), 1, r4, r3);
2870  Handle<Code> ic = masm()->isolate()->builtins()->StoreIC_Miss();
2871  __ Jump(ic, RelocInfo::CODE_TARGET);
2872 
2873  // Return the generated code.
2874  return GetCode(Code::NORMAL, name);
2875 }
2876 
2877 
2878 Handle<Code> LoadStubCompiler::CompileLoadNonexistent(Handle<String> name,
2879  Handle<JSObject> object,
2880  Handle<JSObject> last) {
2881  // ----------- S t a t e -------------
2882  // -- r0 : receiver
2883  // -- lr : return address
2884  // -----------------------------------
2885  Label miss;
2886 
2887  // Check that receiver is not a smi.
2888  __ JumpIfSmi(r0, &miss);
2889 
2890  // Check the maps of the full prototype chain.
2891  CheckPrototypes(object, r0, last, r3, r1, r4, name, &miss);
2892 
2893  // If the last object in the prototype chain is a global object,
2894  // check that the global property cell is empty.
2895  if (last->IsGlobalObject()) {
2896  GenerateCheckPropertyCell(
2897  masm(), Handle<GlobalObject>::cast(last), name, r1, &miss);
2898  }
2899 
2900  // Return undefined if maps of the full prototype chain are still the
2901  // same and no global property with this name contains a value.
2902  __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
2903  __ Ret();
2904 
2905  __ bind(&miss);
2906  GenerateLoadMiss(masm(), Code::LOAD_IC);
2907 
2908  // Return the generated code.
2909  return GetCode(Code::NONEXISTENT, factory()->empty_string());
2910 }
2911 
2912 
2913 Handle<Code> LoadStubCompiler::CompileLoadField(Handle<JSObject> object,
2914  Handle<JSObject> holder,
2915  int index,
2916  Handle<String> name) {
2917  // ----------- S t a t e -------------
2918  // -- r0 : receiver
2919  // -- r2 : name
2920  // -- lr : return address
2921  // -----------------------------------
2922  Label miss;
2923 
2924  GenerateLoadField(object, holder, r0, r3, r1, r4, index, name, &miss);
2925  __ bind(&miss);
2926  GenerateLoadMiss(masm(), Code::LOAD_IC);
2927 
2928  // Return the generated code.
2929  return GetCode(Code::FIELD, name);
2930 }
2931 
2932 
2934  Handle<String> name,
2935  Handle<JSObject> object,
2936  Handle<JSObject> holder,
2937  Handle<AccessorInfo> callback) {
2938  // ----------- S t a t e -------------
2939  // -- r0 : receiver
2940  // -- r2 : name
2941  // -- lr : return address
2942  // -----------------------------------
2943  Label miss;
2944  GenerateLoadCallback(object, holder, r0, r2, r3, r1, r4, r5, callback, name,
2945  &miss);
2946  __ bind(&miss);
2947  GenerateLoadMiss(masm(), Code::LOAD_IC);
2948 
2949  // Return the generated code.
2950  return GetCode(Code::CALLBACKS, name);
2951 }
2952 
2953 
2954 #undef __
2955 #define __ ACCESS_MASM(masm)
2956 
2957 
2958 void LoadStubCompiler::GenerateLoadViaGetter(MacroAssembler* masm,
2959  Handle<JSFunction> getter) {
2960  // ----------- S t a t e -------------
2961  // -- r0 : receiver
2962  // -- r2 : name
2963  // -- lr : return address
2964  // -----------------------------------
2965  {
2966  FrameScope scope(masm, StackFrame::INTERNAL);
2967 
2968  if (!getter.is_null()) {
2969  // Call the JavaScript getter with the receiver on the stack.
2970  __ push(r0);
2971  ParameterCount actual(0);
2972  __ InvokeFunction(getter, actual, CALL_FUNCTION, NullCallWrapper(),
2973  CALL_AS_METHOD);
2974  } else {
2975  // If we generate a global code snippet for deoptimization only, remember
2976  // the place to continue after deoptimization.
2977  masm->isolate()->heap()->SetGetterStubDeoptPCOffset(masm->pc_offset());
2978  }
2979 
2980  // Restore context register.
2982  }
2983  __ Ret();
2984 }
2985 
2986 
2987 #undef __
2988 #define __ ACCESS_MASM(masm())
2989 
2990 
2992  Handle<String> name,
2993  Handle<JSObject> receiver,
2994  Handle<JSObject> holder,
2995  Handle<JSFunction> getter) {
2996  // ----------- S t a t e -------------
2997  // -- r0 : receiver
2998  // -- r2 : name
2999  // -- lr : return address
3000  // -----------------------------------
3001  Label miss;
3002 
3003  // Check that the maps haven't changed.
3004  __ JumpIfSmi(r0, &miss);
3005  CheckPrototypes(receiver, r0, holder, r3, r4, r1, name, &miss);
3006 
3007  GenerateLoadViaGetter(masm(), getter);
3008 
3009  __ bind(&miss);
3010  GenerateLoadMiss(masm(), Code::LOAD_IC);
3011 
3012  // Return the generated code.
3013  return GetCode(Code::CALLBACKS, name);
3014 }
3015 
3016 
3017 Handle<Code> LoadStubCompiler::CompileLoadConstant(Handle<JSObject> object,
3018  Handle<JSObject> holder,
3019  Handle<JSFunction> value,
3020  Handle<String> name) {
3021  // ----------- S t a t e -------------
3022  // -- r0 : receiver
3023  // -- r2 : name
3024  // -- lr : return address
3025  // -----------------------------------
3026  Label miss;
3027 
3028  GenerateLoadConstant(object, holder, r0, r3, r1, r4, value, name, &miss);
3029  __ bind(&miss);
3030  GenerateLoadMiss(masm(), Code::LOAD_IC);
3031 
3032  // Return the generated code.
3033  return GetCode(Code::CONSTANT_FUNCTION, name);
3034 }
3035 
3036 
3037 Handle<Code> LoadStubCompiler::CompileLoadInterceptor(Handle<JSObject> object,
3038  Handle<JSObject> holder,
3039  Handle<String> name) {
3040  // ----------- S t a t e -------------
3041  // -- r0 : receiver
3042  // -- r2 : name
3043  // -- lr : return address
3044  // -----------------------------------
3045  Label miss;
3046 
3047  LookupResult lookup(isolate());
3048  LookupPostInterceptor(holder, name, &lookup);
3049  GenerateLoadInterceptor(object, holder, &lookup, r0, r2, r3, r1, r4, name,
3050  &miss);
3051  __ bind(&miss);
3052  GenerateLoadMiss(masm(), Code::LOAD_IC);
3053 
3054  // Return the generated code.
3055  return GetCode(Code::INTERCEPTOR, name);
3056 }
3057 
3058 
3060  Handle<JSObject> object,
3061  Handle<GlobalObject> holder,
3062  Handle<JSGlobalPropertyCell> cell,
3063  Handle<String> name,
3064  bool is_dont_delete) {
3065  // ----------- S t a t e -------------
3066  // -- r0 : receiver
3067  // -- r2 : name
3068  // -- lr : return address
3069  // -----------------------------------
3070  Label miss;
3071 
3072  // Check that the map of the global has not changed.
3073  __ JumpIfSmi(r0, &miss);
3074  CheckPrototypes(object, r0, holder, r3, r4, r1, name, &miss);
3075 
3076  // Get the value from the cell.
3077  __ mov(r3, Operand(cell));
3079 
3080  // Check for deleted property if property can actually be deleted.
3081  if (!is_dont_delete) {
3082  __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
3083  __ cmp(r4, ip);
3084  __ b(eq, &miss);
3085  }
3086 
3087  __ mov(r0, r4);
3088  Counters* counters = masm()->isolate()->counters();
3089  __ IncrementCounter(counters->named_load_global_stub(), 1, r1, r3);
3090  __ Ret();
3091 
3092  __ bind(&miss);
3093  __ IncrementCounter(counters->named_load_global_stub_miss(), 1, r1, r3);
3094  GenerateLoadMiss(masm(), Code::LOAD_IC);
3095 
3096  // Return the generated code.
3097  return GetCode(Code::NORMAL, name);
3098 }
3099 
3100 
3101 Handle<Code> KeyedLoadStubCompiler::CompileLoadField(Handle<String> name,
3102  Handle<JSObject> receiver,
3103  Handle<JSObject> holder,
3104  int index) {
3105  // ----------- S t a t e -------------
3106  // -- lr : return address
3107  // -- r0 : key
3108  // -- r1 : receiver
3109  // -----------------------------------
3110  Label miss;
3111 
3112  // Check the key is the cached one.
3113  __ cmp(r0, Operand(name));
3114  __ b(ne, &miss);
3115 
3116  GenerateLoadField(receiver, holder, r1, r2, r3, r4, index, name, &miss);
3117  __ bind(&miss);
3118  GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
3119 
3120  return GetCode(Code::FIELD, name);
3121 }
3122 
3123 
3125  Handle<String> name,
3126  Handle<JSObject> receiver,
3127  Handle<JSObject> holder,
3128  Handle<AccessorInfo> callback) {
3129  // ----------- S t a t e -------------
3130  // -- lr : return address
3131  // -- r0 : key
3132  // -- r1 : receiver
3133  // -----------------------------------
3134  Label miss;
3135 
3136  // Check the key is the cached one.
3137  __ cmp(r0, Operand(name));
3138  __ b(ne, &miss);
3139 
3140  GenerateLoadCallback(receiver, holder, r1, r0, r2, r3, r4, r5, callback, name,
3141  &miss);
3142  __ bind(&miss);
3143  GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
3144 
3145  return GetCode(Code::CALLBACKS, name);
3146 }
3147 
3148 
3150  Handle<String> name,
3151  Handle<JSObject> receiver,
3152  Handle<JSObject> holder,
3153  Handle<JSFunction> value) {
3154  // ----------- S t a t e -------------
3155  // -- lr : return address
3156  // -- r0 : key
3157  // -- r1 : receiver
3158  // -----------------------------------
3159  Label miss;
3160 
3161  // Check the key is the cached one.
3162  __ cmp(r0, Operand(name));
3163  __ b(ne, &miss);
3164 
3165  GenerateLoadConstant(receiver, holder, r1, r2, r3, r4, value, name, &miss);
3166  __ bind(&miss);
3167  GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
3168 
3169  // Return the generated code.
3170  return GetCode(Code::CONSTANT_FUNCTION, name);
3171 }
3172 
3173 
3175  Handle<JSObject> receiver,
3176  Handle<JSObject> holder,
3177  Handle<String> name) {
3178  // ----------- S t a t e -------------
3179  // -- lr : return address
3180  // -- r0 : key
3181  // -- r1 : receiver
3182  // -----------------------------------
3183  Label miss;
3184 
3185  // Check the key is the cached one.
3186  __ cmp(r0, Operand(name));
3187  __ b(ne, &miss);
3188 
3189  LookupResult lookup(isolate());
3190  LookupPostInterceptor(holder, name, &lookup);
3191  GenerateLoadInterceptor(receiver, holder, &lookup, r1, r0, r2, r3, r4, name,
3192  &miss);
3193  __ bind(&miss);
3194  GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
3195 
3196  return GetCode(Code::INTERCEPTOR, name);
3197 }
3198 
3199 
3201  Handle<String> name) {
3202  // ----------- S t a t e -------------
3203  // -- lr : return address
3204  // -- r0 : key
3205  // -- r1 : receiver
3206  // -----------------------------------
3207  Label miss;
3208 
3209  // Check the key is the cached one.
3210  __ cmp(r0, Operand(name));
3211  __ b(ne, &miss);
3212 
3213  GenerateLoadArrayLength(masm(), r1, r2, &miss);
3214  __ bind(&miss);
3215  GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
3216 
3217  return GetCode(Code::CALLBACKS, name);
3218 }
3219 
3220 
3222  Handle<String> name) {
3223  // ----------- S t a t e -------------
3224  // -- lr : return address
3225  // -- r0 : key
3226  // -- r1 : receiver
3227  // -----------------------------------
3228  Label miss;
3229 
3230  Counters* counters = masm()->isolate()->counters();
3231  __ IncrementCounter(counters->keyed_load_string_length(), 1, r2, r3);
3232 
3233  // Check the key is the cached one.
3234  __ cmp(r0, Operand(name));
3235  __ b(ne, &miss);
3236 
3237  GenerateLoadStringLength(masm(), r1, r2, r3, &miss, true);
3238  __ bind(&miss);
3239  __ DecrementCounter(counters->keyed_load_string_length(), 1, r2, r3);
3240 
3241  GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
3242 
3243  return GetCode(Code::CALLBACKS, name);
3244 }
3245 
3246 
3248  Handle<String> name) {
3249  // ----------- S t a t e -------------
3250  // -- lr : return address
3251  // -- r0 : key
3252  // -- r1 : receiver
3253  // -----------------------------------
3254  Label miss;
3255 
3256  Counters* counters = masm()->isolate()->counters();
3257  __ IncrementCounter(counters->keyed_load_function_prototype(), 1, r2, r3);
3258 
3259  // Check the name hasn't changed.
3260  __ cmp(r0, Operand(name));
3261  __ b(ne, &miss);
3262 
3263  GenerateLoadFunctionPrototype(masm(), r1, r2, r3, &miss);
3264  __ bind(&miss);
3265  __ DecrementCounter(counters->keyed_load_function_prototype(), 1, r2, r3);
3266  GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
3267 
3268  return GetCode(Code::CALLBACKS, name);
3269 }
3270 
3271 
3273  Handle<Map> receiver_map) {
3274  // ----------- S t a t e -------------
3275  // -- lr : return address
3276  // -- r0 : key
3277  // -- r1 : receiver
3278  // -----------------------------------
3279  ElementsKind elements_kind = receiver_map->elements_kind();
3280  Handle<Code> stub = KeyedLoadElementStub(elements_kind).GetCode();
3281 
3282  __ DispatchMap(r1, r2, receiver_map, stub, DO_SMI_CHECK);
3283 
3284  Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Miss();
3285  __ Jump(ic, RelocInfo::CODE_TARGET);
3286 
3287  // Return the generated code.
3288  return GetCode(Code::NORMAL, factory()->empty_string());
3289 }
3290 
3291 
3293  MapHandleList* receiver_maps,
3294  CodeHandleList* handler_ics) {
3295  // ----------- S t a t e -------------
3296  // -- lr : return address
3297  // -- r0 : key
3298  // -- r1 : receiver
3299  // -----------------------------------
3300  Label miss;
3301  __ JumpIfSmi(r1, &miss);
3302 
3303  int receiver_count = receiver_maps->length();
3305  for (int current = 0; current < receiver_count; ++current) {
3306  __ mov(ip, Operand(receiver_maps->at(current)));
3307  __ cmp(r2, ip);
3308  __ Jump(handler_ics->at(current), RelocInfo::CODE_TARGET, eq);
3309  }
3310 
3311  __ bind(&miss);
3312  Handle<Code> miss_ic = isolate()->builtins()->KeyedLoadIC_Miss();
3313  __ Jump(miss_ic, RelocInfo::CODE_TARGET, al);
3314 
3315  // Return the generated code.
3316  return GetCode(Code::NORMAL, factory()->empty_string(), MEGAMORPHIC);
3317 }
3318 
3319 
3320 Handle<Code> KeyedStoreStubCompiler::CompileStoreField(Handle<JSObject> object,
3321  int index,
3322  Handle<Map> transition,
3323  Handle<String> name) {
3324  // ----------- S t a t e -------------
3325  // -- r0 : value
3326  // -- r1 : name
3327  // -- r2 : receiver
3328  // -- lr : return address
3329  // -----------------------------------
3330  Label miss;
3331 
3332  Counters* counters = masm()->isolate()->counters();
3333  __ IncrementCounter(counters->keyed_store_field(), 1, r3, r4);
3334 
3335  // Check that the name has not changed.
3336  __ cmp(r1, Operand(name));
3337  __ b(ne, &miss);
3338 
3339  // r3 is used as scratch register. r1 and r2 keep their values if a jump to
3340  // the miss label is generated.
3341  GenerateStoreField(masm(),
3342  object,
3343  index,
3344  transition,
3345  name,
3346  r2, r1, r3, r4,
3347  &miss);
3348  __ bind(&miss);
3349 
3350  __ DecrementCounter(counters->keyed_store_field(), 1, r3, r4);
3351  Handle<Code> ic = masm()->isolate()->builtins()->KeyedStoreIC_Miss();
3352  __ Jump(ic, RelocInfo::CODE_TARGET);
3353 
3354  // Return the generated code.
3355  return GetCode(transition.is_null()
3356  ? Code::FIELD
3357  : Code::MAP_TRANSITION, name);
3358 }
3359 
3360 
3362  Handle<Map> receiver_map) {
3363  // ----------- S t a t e -------------
3364  // -- r0 : value
3365  // -- r1 : key
3366  // -- r2 : receiver
3367  // -- lr : return address
3368  // -- r3 : scratch
3369  // -----------------------------------
3370  ElementsKind elements_kind = receiver_map->elements_kind();
3371  bool is_js_array = receiver_map->instance_type() == JS_ARRAY_TYPE;
3372  Handle<Code> stub =
3373  KeyedStoreElementStub(is_js_array, elements_kind, grow_mode_).GetCode();
3374 
3375  __ DispatchMap(r2, r3, receiver_map, stub, DO_SMI_CHECK);
3376 
3377  Handle<Code> ic = isolate()->builtins()->KeyedStoreIC_Miss();
3378  __ Jump(ic, RelocInfo::CODE_TARGET);
3379 
3380  // Return the generated code.
3381  return GetCode(Code::NORMAL, factory()->empty_string());
3382 }
3383 
3384 
3386  MapHandleList* receiver_maps,
3387  CodeHandleList* handler_stubs,
3388  MapHandleList* transitioned_maps) {
3389  // ----------- S t a t e -------------
3390  // -- r0 : value
3391  // -- r1 : key
3392  // -- r2 : receiver
3393  // -- lr : return address
3394  // -- r3 : scratch
3395  // -----------------------------------
3396  Label miss;
3397  __ JumpIfSmi(r2, &miss);
3398 
3399  int receiver_count = receiver_maps->length();
3401  for (int i = 0; i < receiver_count; ++i) {
3402  __ mov(ip, Operand(receiver_maps->at(i)));
3403  __ cmp(r3, ip);
3404  if (transitioned_maps->at(i).is_null()) {
3405  __ Jump(handler_stubs->at(i), RelocInfo::CODE_TARGET, eq);
3406  } else {
3407  Label next_map;
3408  __ b(ne, &next_map);
3409  __ mov(r3, Operand(transitioned_maps->at(i)));
3410  __ Jump(handler_stubs->at(i), RelocInfo::CODE_TARGET, al);
3411  __ bind(&next_map);
3412  }
3413  }
3414 
3415  __ bind(&miss);
3416  Handle<Code> miss_ic = isolate()->builtins()->KeyedStoreIC_Miss();
3417  __ Jump(miss_ic, RelocInfo::CODE_TARGET, al);
3418 
3419  // Return the generated code.
3420  return GetCode(Code::NORMAL, factory()->empty_string(), MEGAMORPHIC);
3421 }
3422 
3423 
3425  Handle<JSFunction> function) {
3426  // ----------- S t a t e -------------
3427  // -- r0 : argc
3428  // -- r1 : constructor
3429  // -- lr : return address
3430  // -- [sp] : last argument
3431  // -----------------------------------
3432  Label generic_stub_call;
3433 
3434  // Use r7 for holding undefined which is used in several places below.
3435  __ LoadRoot(r7, Heap::kUndefinedValueRootIndex);
3436 
3437 #ifdef ENABLE_DEBUGGER_SUPPORT
3438  // Check to see whether there are any break points in the function code. If
3439  // there are jump to the generic constructor stub which calls the actual
3440  // code for the function thereby hitting the break points.
3443  __ cmp(r2, r7);
3444  __ b(ne, &generic_stub_call);
3445 #endif
3446 
3447  // Load the initial map and verify that it is in fact a map.
3448  // r1: constructor function
3449  // r7: undefined
3451  __ JumpIfSmi(r2, &generic_stub_call);
3452  __ CompareObjectType(r2, r3, r4, MAP_TYPE);
3453  __ b(ne, &generic_stub_call);
3454 
3455 #ifdef DEBUG
3456  // Cannot construct functions this way.
3457  // r0: argc
3458  // r1: constructor function
3459  // r2: initial map
3460  // r7: undefined
3461  __ CompareInstanceType(r2, r3, JS_FUNCTION_TYPE);
3462  __ Check(ne, "Function constructed by construct stub.");
3463 #endif
3464 
3465  // Now allocate the JSObject in new space.
3466  // r0: argc
3467  // r1: constructor function
3468  // r2: initial map
3469  // r7: undefined
3471  __ AllocateInNewSpace(r3, r4, r5, r6, &generic_stub_call, SIZE_IN_WORDS);
3472 
3473  // Allocated the JSObject, now initialize the fields. Map is set to initial
3474  // map and properties and elements are set to empty fixed array.
3475  // r0: argc
3476  // r1: constructor function
3477  // r2: initial map
3478  // r3: object size (in words)
3479  // r4: JSObject (not tagged)
3480  // r7: undefined
3481  __ LoadRoot(r6, Heap::kEmptyFixedArrayRootIndex);
3482  __ mov(r5, r4);
3483  ASSERT_EQ(0 * kPointerSize, JSObject::kMapOffset);
3484  __ str(r2, MemOperand(r5, kPointerSize, PostIndex));
3485  ASSERT_EQ(1 * kPointerSize, JSObject::kPropertiesOffset);
3486  __ str(r6, MemOperand(r5, kPointerSize, PostIndex));
3487  ASSERT_EQ(2 * kPointerSize, JSObject::kElementsOffset);
3488  __ str(r6, MemOperand(r5, kPointerSize, PostIndex));
3489 
3490  // Calculate the location of the first argument. The stack contains only the
3491  // argc arguments.
3492  __ add(r1, sp, Operand(r0, LSL, kPointerSizeLog2));
3493 
3494  // Fill all the in-object properties with undefined.
3495  // r0: argc
3496  // r1: first argument
3497  // r3: object size (in words)
3498  // r4: JSObject (not tagged)
3499  // r5: First in-object property of JSObject (not tagged)
3500  // r7: undefined
3501  // Fill the initialized properties with a constant value or a passed argument
3502  // depending on the this.x = ...; assignment in the function.
3503  Handle<SharedFunctionInfo> shared(function->shared());
3504  for (int i = 0; i < shared->this_property_assignments_count(); i++) {
3505  if (shared->IsThisPropertyAssignmentArgument(i)) {
3506  Label not_passed, next;
3507  // Check if the argument assigned to the property is actually passed.
3508  int arg_number = shared->GetThisPropertyAssignmentArgument(i);
3509  __ cmp(r0, Operand(arg_number));
3510  __ b(le, &not_passed);
3511  // Argument passed - find it on the stack.
3512  __ ldr(r2, MemOperand(r1, (arg_number + 1) * -kPointerSize));
3513  __ str(r2, MemOperand(r5, kPointerSize, PostIndex));
3514  __ b(&next);
3515  __ bind(&not_passed);
3516  // Set the property to undefined.
3517  __ str(r7, MemOperand(r5, kPointerSize, PostIndex));
3518  __ bind(&next);
3519  } else {
3520  // Set the property to the constant value.
3521  Handle<Object> constant(shared->GetThisPropertyAssignmentConstant(i));
3522  __ mov(r2, Operand(constant));
3523  __ str(r2, MemOperand(r5, kPointerSize, PostIndex));
3524  }
3525  }
3526 
3527  // Fill the unused in-object property fields with undefined.
3528  ASSERT(function->has_initial_map());
3529  for (int i = shared->this_property_assignments_count();
3530  i < function->initial_map()->inobject_properties();
3531  i++) {
3532  __ str(r7, MemOperand(r5, kPointerSize, PostIndex));
3533  }
3534 
3535  // r0: argc
3536  // r4: JSObject (not tagged)
3537  // Move argc to r1 and the JSObject to return to r0 and tag it.
3538  __ mov(r1, r0);
3539  __ mov(r0, r4);
3540  __ orr(r0, r0, Operand(kHeapObjectTag));
3541 
3542  // r0: JSObject
3543  // r1: argc
3544  // Remove caller arguments and receiver from the stack and return.
3545  __ add(sp, sp, Operand(r1, LSL, kPointerSizeLog2));
3546  __ add(sp, sp, Operand(kPointerSize));
3547  Counters* counters = masm()->isolate()->counters();
3548  __ IncrementCounter(counters->constructed_objects(), 1, r1, r2);
3549  __ IncrementCounter(counters->constructed_objects_stub(), 1, r1, r2);
3550  __ Jump(lr);
3551 
3552  // Jump to the generic stub in case the specialized code cannot handle the
3553  // construction.
3554  __ bind(&generic_stub_call);
3555  Handle<Code> code = masm()->isolate()->builtins()->JSConstructStubGeneric();
3556  __ Jump(code, RelocInfo::CODE_TARGET);
3557 
3558  // Return the generated code.
3559  return GetCode();
3560 }
3561 
3562 
3563 #undef __
3564 #define __ ACCESS_MASM(masm)
3565 
3566 
3568  MacroAssembler* masm) {
3569  // ---------- S t a t e --------------
3570  // -- lr : return address
3571  // -- r0 : key
3572  // -- r1 : receiver
3573  // -----------------------------------
3574  Label slow, miss_force_generic;
3575 
3576  Register key = r0;
3577  Register receiver = r1;
3578 
3579  __ JumpIfNotSmi(key, &miss_force_generic);
3580  __ mov(r2, Operand(key, ASR, kSmiTagSize));
3581  __ ldr(r4, FieldMemOperand(receiver, JSObject::kElementsOffset));
3582  __ LoadFromNumberDictionary(&slow, r4, key, r0, r2, r3, r5);
3583  __ Ret();
3584 
3585  __ bind(&slow);
3586  __ IncrementCounter(
3587  masm->isolate()->counters()->keyed_load_external_array_slow(),
3588  1, r2, r3);
3589 
3590  // ---------- S t a t e --------------
3591  // -- lr : return address
3592  // -- r0 : key
3593  // -- r1 : receiver
3594  // -----------------------------------
3595  Handle<Code> slow_ic =
3596  masm->isolate()->builtins()->KeyedLoadIC_Slow();
3597  __ Jump(slow_ic, RelocInfo::CODE_TARGET);
3598 
3599  // Miss case, call the runtime.
3600  __ bind(&miss_force_generic);
3601 
3602  // ---------- S t a t e --------------
3603  // -- lr : return address
3604  // -- r0 : key
3605  // -- r1 : receiver
3606  // -----------------------------------
3607 
3608  Handle<Code> miss_ic =
3609  masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric();
3610  __ Jump(miss_ic, RelocInfo::CODE_TARGET);
3611 }
3612 
3613 
3614 static bool IsElementTypeSigned(ElementsKind elements_kind) {
3615  switch (elements_kind) {
3618  case EXTERNAL_INT_ELEMENTS:
3619  return true;
3620 
3625  return false;
3626 
3629  case FAST_ELEMENTS:
3630  case FAST_SMI_ELEMENTS:
3631  case FAST_DOUBLE_ELEMENTS:
3632  case FAST_HOLEY_ELEMENTS:
3635  case DICTIONARY_ELEMENTS:
3637  UNREACHABLE();
3638  return false;
3639  }
3640  return false;
3641 }
3642 
3643 
3644 static void GenerateSmiKeyCheck(MacroAssembler* masm,
3645  Register key,
3646  Register scratch0,
3647  Register scratch1,
3648  DwVfpRegister double_scratch0,
3649  DwVfpRegister double_scratch1,
3650  Label* fail) {
3652  CpuFeatures::Scope scope(VFP2);
3653  Label key_ok;
3654  // Check for smi or a smi inside a heap number. We convert the heap
3655  // number and check if the conversion is exact and fits into the smi
3656  // range.
3657  __ JumpIfSmi(key, &key_ok);
3658  __ CheckMap(key,
3659  scratch0,
3660  Heap::kHeapNumberMapRootIndex,
3661  fail,
3663  __ sub(ip, key, Operand(kHeapObjectTag));
3664  __ vldr(double_scratch0, ip, HeapNumber::kValueOffset);
3665  __ EmitVFPTruncate(kRoundToZero,
3666  scratch0,
3667  double_scratch0,
3668  scratch1,
3669  double_scratch1,
3671  __ b(ne, fail);
3672  __ TrySmiTag(scratch0, fail, scratch1);
3673  __ mov(key, scratch0);
3674  __ bind(&key_ok);
3675  } else {
3676  // Check that the key is a smi.
3677  __ JumpIfNotSmi(key, fail);
3678  }
3679 }
3680 
3681 
3683  MacroAssembler* masm,
3684  ElementsKind elements_kind) {
3685  // ---------- S t a t e --------------
3686  // -- lr : return address
3687  // -- r0 : key
3688  // -- r1 : receiver
3689  // -----------------------------------
3690  Label miss_force_generic, slow, failed_allocation;
3691 
3692  Register key = r0;
3693  Register receiver = r1;
3694 
3695  // This stub is meant to be tail-jumped to, the receiver must already
3696  // have been verified by the caller to not be a smi.
3697 
3698  // Check that the key is a smi or a heap number convertible to a smi.
3699  GenerateSmiKeyCheck(masm, key, r4, r5, d1, d2, &miss_force_generic);
3700 
3701  __ ldr(r3, FieldMemOperand(receiver, JSObject::kElementsOffset));
3702  // r3: elements array
3703 
3704  // Check that the index is in range.
3706  __ cmp(key, ip);
3707  // Unsigned comparison catches both negative and too-large values.
3708  __ b(hs, &miss_force_generic);
3709 
3711  // r3: base pointer of external storage
3712 
3713  // We are not untagging smi key and instead work with it
3714  // as if it was premultiplied by 2.
3715  STATIC_ASSERT((kSmiTag == 0) && (kSmiTagSize == 1));
3716 
3717  Register value = r2;
3718  switch (elements_kind) {
3720  __ ldrsb(value, MemOperand(r3, key, LSR, 1));
3721  break;
3724  __ ldrb(value, MemOperand(r3, key, LSR, 1));
3725  break;
3727  __ ldrsh(value, MemOperand(r3, key, LSL, 0));
3728  break;
3730  __ ldrh(value, MemOperand(r3, key, LSL, 0));
3731  break;
3732  case EXTERNAL_INT_ELEMENTS:
3734  __ ldr(value, MemOperand(r3, key, LSL, 1));
3735  break;
3738  CpuFeatures::Scope scope(VFP2);
3739  __ add(r2, r3, Operand(key, LSL, 1));
3740  __ vldr(s0, r2, 0);
3741  } else {
3742  __ ldr(value, MemOperand(r3, key, LSL, 1));
3743  }
3744  break;
3747  CpuFeatures::Scope scope(VFP2);
3748  __ add(r2, r3, Operand(key, LSL, 2));
3749  __ vldr(d0, r2, 0);
3750  } else {
3751  __ add(r4, r3, Operand(key, LSL, 2));
3752  // r4: pointer to the beginning of the double we want to load.
3753  __ ldr(r2, MemOperand(r4, 0));
3754  __ ldr(r3, MemOperand(r4, Register::kSizeInBytes));
3755  }
3756  break;
3757  case FAST_ELEMENTS:
3758  case FAST_SMI_ELEMENTS:
3759  case FAST_DOUBLE_ELEMENTS:
3760  case FAST_HOLEY_ELEMENTS:
3763  case DICTIONARY_ELEMENTS:
3765  UNREACHABLE();
3766  break;
3767  }
3768 
3769  // For integer array types:
3770  // r2: value
3771  // For float array type:
3772  // s0: value (if VFP3 is supported)
3773  // r2: value (if VFP3 is not supported)
3774  // For double array type:
3775  // d0: value (if VFP3 is supported)
3776  // r2/r3: value (if VFP3 is not supported)
3777 
3778  if (elements_kind == EXTERNAL_INT_ELEMENTS) {
3779  // For the Int and UnsignedInt array types, we need to see whether
3780  // the value can be represented in a Smi. If not, we need to convert
3781  // it to a HeapNumber.
3782  Label box_int;
3783  __ cmp(value, Operand(0xC0000000));
3784  __ b(mi, &box_int);
3785  // Tag integer as smi and return it.
3786  __ mov(r0, Operand(value, LSL, kSmiTagSize));
3787  __ Ret();
3788 
3789  __ bind(&box_int);
3791  CpuFeatures::Scope scope(VFP2);
3792  // Allocate a HeapNumber for the result and perform int-to-double
3793  // conversion. Don't touch r0 or r1 as they are needed if allocation
3794  // fails.
3795  __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
3796 
3797  __ AllocateHeapNumber(r5, r3, r4, r6, &slow, DONT_TAG_RESULT);
3798  // Now we can use r0 for the result as key is not needed any more.
3799  __ add(r0, r5, Operand(kHeapObjectTag));
3800  __ vmov(s0, value);
3801  __ vcvt_f64_s32(d0, s0);
3802  __ vstr(d0, r5, HeapNumber::kValueOffset);
3803  __ Ret();
3804  } else {
3805  // Allocate a HeapNumber for the result and perform int-to-double
3806  // conversion. Don't touch r0 or r1 as they are needed if allocation
3807  // fails.
3808  __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
3809  __ AllocateHeapNumber(r5, r3, r4, r6, &slow, TAG_RESULT);
3810  // Now we can use r0 for the result as key is not needed any more.
3811  __ mov(r0, r5);
3812  Register dst1 = r1;
3813  Register dst2 = r3;
3817  value,
3818  dest,
3819  d0,
3820  dst1,
3821  dst2,
3822  r9,
3823  s0);
3826  __ Ret();
3827  }
3828  } else if (elements_kind == EXTERNAL_UNSIGNED_INT_ELEMENTS) {
3829  // The test is different for unsigned int values. Since we need
3830  // the value to be in the range of a positive smi, we can't
3831  // handle either of the top two bits being set in the value.
3833  CpuFeatures::Scope scope(VFP2);
3834  Label box_int, done;
3835  __ tst(value, Operand(0xC0000000));
3836  __ b(ne, &box_int);
3837  // Tag integer as smi and return it.
3838  __ mov(r0, Operand(value, LSL, kSmiTagSize));
3839  __ Ret();
3840 
3841  __ bind(&box_int);
3842  __ vmov(s0, value);
3843  // Allocate a HeapNumber for the result and perform int-to-double
3844  // conversion. Don't use r0 and r1 as AllocateHeapNumber clobbers all
3845  // registers - also when jumping due to exhausted young space.
3846  __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
3847  __ AllocateHeapNumber(r2, r3, r4, r6, &slow, DONT_TAG_RESULT);
3848 
3849  __ vcvt_f64_u32(d0, s0);
3850  __ vstr(d0, r2, HeapNumber::kValueOffset);
3851 
3852  __ add(r0, r2, Operand(kHeapObjectTag));
3853  __ Ret();
3854  } else {
3855  // Check whether unsigned integer fits into smi.
3856  Label box_int_0, box_int_1, done;
3857  __ tst(value, Operand(0x80000000));
3858  __ b(ne, &box_int_0);
3859  __ tst(value, Operand(0x40000000));
3860  __ b(ne, &box_int_1);
3861  // Tag integer as smi and return it.
3862  __ mov(r0, Operand(value, LSL, kSmiTagSize));
3863  __ Ret();
3864 
3865  Register hiword = value; // r2.
3866  Register loword = r3;
3867 
3868  __ bind(&box_int_0);
3869  // Integer does not have leading zeros.
3870  GenerateUInt2Double(masm, hiword, loword, r4, 0);
3871  __ b(&done);
3872 
3873  __ bind(&box_int_1);
3874  // Integer has one leading zero.
3875  GenerateUInt2Double(masm, hiword, loword, r4, 1);
3876 
3877 
3878  __ bind(&done);
3879  // Integer was converted to double in registers hiword:loword.
3880  // Wrap it into a HeapNumber. Don't use r0 and r1 as AllocateHeapNumber
3881  // clobbers all registers - also when jumping due to exhausted young
3882  // space.
3883  __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
3884  __ AllocateHeapNumber(r4, r5, r7, r6, &slow, TAG_RESULT);
3885 
3888 
3889  __ mov(r0, r4);
3890  __ Ret();
3891  }
3892  } else if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
3893  // For the floating-point array type, we need to always allocate a
3894  // HeapNumber.
3896  CpuFeatures::Scope scope(VFP2);
3897  // Allocate a HeapNumber for the result. Don't use r0 and r1 as
3898  // AllocateHeapNumber clobbers all registers - also when jumping due to
3899  // exhausted young space.
3900  __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
3901  __ AllocateHeapNumber(r2, r3, r4, r6, &slow, DONT_TAG_RESULT);
3902  __ vcvt_f64_f32(d0, s0);
3903  __ vstr(d0, r2, HeapNumber::kValueOffset);
3904 
3905  __ add(r0, r2, Operand(kHeapObjectTag));
3906  __ Ret();
3907  } else {
3908  // Allocate a HeapNumber for the result. Don't use r0 and r1 as
3909  // AllocateHeapNumber clobbers all registers - also when jumping due to
3910  // exhausted young space.
3911  __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
3912  __ AllocateHeapNumber(r3, r4, r5, r6, &slow, TAG_RESULT);
3913  // VFP is not available, do manual single to double conversion.
3914 
3915  // r2: floating point value (binary32)
3916  // r3: heap number for result
3917 
3918  // Extract mantissa to r0. OK to clobber r0 now as there are no jumps to
3919  // the slow case from here.
3920  __ and_(r0, value, Operand(kBinary32MantissaMask));
3921 
3922  // Extract exponent to r1. OK to clobber r1 now as there are no jumps to
3923  // the slow case from here.
3924  __ mov(r1, Operand(value, LSR, kBinary32MantissaBits));
3925  __ and_(r1, r1, Operand(kBinary32ExponentMask >> kBinary32MantissaBits));
3926 
3927  Label exponent_rebiased;
3928  __ teq(r1, Operand(0x00));
3929  __ b(eq, &exponent_rebiased);
3930 
3931  __ teq(r1, Operand(0xff));
3932  __ mov(r1, Operand(0x7ff), LeaveCC, eq);
3933  __ b(eq, &exponent_rebiased);
3934 
3935  // Rebias exponent.
3936  __ add(r1,
3937  r1,
3939 
3940  __ bind(&exponent_rebiased);
3941  __ and_(r2, value, Operand(kBinary32SignMask));
3942  value = no_reg;
3943  __ orr(r2, r2, Operand(r1, LSL, HeapNumber::kMantissaBitsInTopWord));
3944 
3945  // Shift mantissa.
3946  static const int kMantissaShiftForHiWord =
3948 
3949  static const int kMantissaShiftForLoWord =
3950  kBitsPerInt - kMantissaShiftForHiWord;
3951 
3952  __ orr(r2, r2, Operand(r0, LSR, kMantissaShiftForHiWord));
3953  __ mov(r0, Operand(r0, LSL, kMantissaShiftForLoWord));
3954 
3957 
3958  __ mov(r0, r3);
3959  __ Ret();
3960  }
3961  } else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
3963  CpuFeatures::Scope scope(VFP2);
3964  // Allocate a HeapNumber for the result. Don't use r0 and r1 as
3965  // AllocateHeapNumber clobbers all registers - also when jumping due to
3966  // exhausted young space.
3967  __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
3968  __ AllocateHeapNumber(r2, r3, r4, r6, &slow, DONT_TAG_RESULT);
3969  __ vstr(d0, r2, HeapNumber::kValueOffset);
3970 
3971  __ add(r0, r2, Operand(kHeapObjectTag));
3972  __ Ret();
3973  } else {
3974  // Allocate a HeapNumber for the result. Don't use r0 and r1 as
3975  // AllocateHeapNumber clobbers all registers - also when jumping due to
3976  // exhausted young space.
3977  __ LoadRoot(r7, Heap::kHeapNumberMapRootIndex);
3978  __ AllocateHeapNumber(r4, r5, r6, r7, &slow, TAG_RESULT);
3979 
3982  __ mov(r0, r4);
3983  __ Ret();
3984  }
3985 
3986  } else {
3987  // Tag integer as smi and return it.
3988  __ mov(r0, Operand(value, LSL, kSmiTagSize));
3989  __ Ret();
3990  }
3991 
3992  // Slow case, key and receiver still in r0 and r1.
3993  __ bind(&slow);
3994  __ IncrementCounter(
3995  masm->isolate()->counters()->keyed_load_external_array_slow(),
3996  1, r2, r3);
3997 
3998  // ---------- S t a t e --------------
3999  // -- lr : return address
4000  // -- r0 : key
4001  // -- r1 : receiver
4002  // -----------------------------------
4003 
4004  __ Push(r1, r0);
4005 
4006  __ TailCallRuntime(Runtime::kKeyedGetProperty, 2, 1);
4007 
4008  __ bind(&miss_force_generic);
4009  Handle<Code> stub =
4010  masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric();
4011  __ Jump(stub, RelocInfo::CODE_TARGET);
4012 }
4013 
4014 
4016  MacroAssembler* masm,
4017  ElementsKind elements_kind) {
4018  // ---------- S t a t e --------------
4019  // -- r0 : value
4020  // -- r1 : key
4021  // -- r2 : receiver
4022  // -- lr : return address
4023  // -----------------------------------
4024  Label slow, check_heap_number, miss_force_generic;
4025 
4026  // Register usage.
4027  Register value = r0;
4028  Register key = r1;
4029  Register receiver = r2;
4030  // r3 mostly holds the elements array or the destination external array.
4031 
4032  // This stub is meant to be tail-jumped to, the receiver must already
4033  // have been verified by the caller to not be a smi.
4034 
4035  // Check that the key is a smi or a heap number convertible to a smi.
4036  GenerateSmiKeyCheck(masm, key, r4, r5, d1, d2, &miss_force_generic);
4037 
4038  __ ldr(r3, FieldMemOperand(receiver, JSObject::kElementsOffset));
4039 
4040  // Check that the index is in range
4042  __ cmp(key, ip);
4043  // Unsigned comparison catches both negative and too-large values.
4044  __ b(hs, &miss_force_generic);
4045 
4046  // Handle both smis and HeapNumbers in the fast path. Go to the
4047  // runtime for all other kinds of values.
4048  // r3: external array.
4049  if (elements_kind == EXTERNAL_PIXEL_ELEMENTS) {
4050  // Double to pixel conversion is only implemented in the runtime for now.
4051  __ JumpIfNotSmi(value, &slow);
4052  } else {
4053  __ JumpIfNotSmi(value, &check_heap_number);
4054  }
4055  __ SmiUntag(r5, value);
4057 
4058  // r3: base pointer of external storage.
4059  // r5: value (integer).
4060  switch (elements_kind) {
4062  // Clamp the value to [0..255].
4063  __ Usat(r5, 8, Operand(r5));
4064  __ strb(r5, MemOperand(r3, key, LSR, 1));
4065  break;
4068  __ strb(r5, MemOperand(r3, key, LSR, 1));
4069  break;
4072  __ strh(r5, MemOperand(r3, key, LSL, 0));
4073  break;
4074  case EXTERNAL_INT_ELEMENTS:
4076  __ str(r5, MemOperand(r3, key, LSL, 1));
4077  break;
4079  // Perform int-to-float conversion and store to memory.
4080  __ SmiUntag(r4, key);
4081  StoreIntAsFloat(masm, r3, r4, r5, r6, r7, r9);
4082  break;
4084  __ add(r3, r3, Operand(key, LSL, 2));
4085  // r3: effective address of the double element
4088  destination = FloatingPointHelper::kVFPRegisters;
4089  } else {
4091  }
4093  masm, r5, destination,
4094  d0, r6, r7, // These are: double_dst, dst1, dst2.
4095  r4, s2); // These are: scratch2, single_scratch.
4096  if (destination == FloatingPointHelper::kVFPRegisters) {
4097  CpuFeatures::Scope scope(VFP2);
4098  __ vstr(d0, r3, 0);
4099  } else {
4100  __ str(r6, MemOperand(r3, 0));
4102  }
4103  break;
4104  case FAST_ELEMENTS:
4105  case FAST_SMI_ELEMENTS:
4106  case FAST_DOUBLE_ELEMENTS:
4107  case FAST_HOLEY_ELEMENTS:
4110  case DICTIONARY_ELEMENTS:
4112  UNREACHABLE();
4113  break;
4114  }
4115 
4116  // Entry registers are intact, r0 holds the value which is the return value.
4117  __ Ret();
4118 
4119  if (elements_kind != EXTERNAL_PIXEL_ELEMENTS) {
4120  // r3: external array.
4121  __ bind(&check_heap_number);
4122  __ CompareObjectType(value, r5, r6, HEAP_NUMBER_TYPE);
4123  __ b(ne, &slow);
4124 
4126 
4127  // r3: base pointer of external storage.
4128 
4129  // The WebGL specification leaves the behavior of storing NaN and
4130  // +/-Infinity into integer arrays basically undefined. For more
4131  // reproducible behavior, convert these to zero.
4133  CpuFeatures::Scope scope(VFP2);
4134 
4135  if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
4136  // vldr requires offset to be a multiple of 4 so we can not
4137  // include -kHeapObjectTag into it.
4138  __ sub(r5, r0, Operand(kHeapObjectTag));
4139  __ vldr(d0, r5, HeapNumber::kValueOffset);
4140  __ add(r5, r3, Operand(key, LSL, 1));
4141  __ vcvt_f32_f64(s0, d0);
4142  __ vstr(s0, r5, 0);
4143  } else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
4144  __ sub(r5, r0, Operand(kHeapObjectTag));
4145  __ vldr(d0, r5, HeapNumber::kValueOffset);
4146  __ add(r5, r3, Operand(key, LSL, 2));
4147  __ vstr(d0, r5, 0);
4148  } else {
4149  // Hoisted load. vldr requires offset to be a multiple of 4 so we can
4150  // not include -kHeapObjectTag into it.
4151  __ sub(r5, value, Operand(kHeapObjectTag));
4152  __ vldr(d0, r5, HeapNumber::kValueOffset);
4153  __ EmitECMATruncate(r5, d0, s2, r6, r7, r9);
4154 
4155  switch (elements_kind) {
4158  __ strb(r5, MemOperand(r3, key, LSR, 1));
4159  break;
4162  __ strh(r5, MemOperand(r3, key, LSL, 0));
4163  break;
4164  case EXTERNAL_INT_ELEMENTS:
4166  __ str(r5, MemOperand(r3, key, LSL, 1));
4167  break;
4171  case FAST_ELEMENTS:
4172  case FAST_SMI_ELEMENTS:
4173  case FAST_DOUBLE_ELEMENTS:
4174  case FAST_HOLEY_ELEMENTS:
4177  case DICTIONARY_ELEMENTS:
4179  UNREACHABLE();
4180  break;
4181  }
4182  }
4183 
4184  // Entry registers are intact, r0 holds the value which is the return
4185  // value.
4186  __ Ret();
4187  } else {
4188  // VFP3 is not available do manual conversions.
4191 
4192  if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
4193  Label done, nan_or_infinity_or_zero;
4194  static const int kMantissaInHiWordShift =
4196 
4197  static const int kMantissaInLoWordShift =
4198  kBitsPerInt - kMantissaInHiWordShift;
4199 
4200  // Test for all special exponent values: zeros, subnormal numbers, NaNs
4201  // and infinities. All these should be converted to 0.
4202  __ mov(r7, Operand(HeapNumber::kExponentMask));
4203  __ and_(r9, r5, Operand(r7), SetCC);
4204  __ b(eq, &nan_or_infinity_or_zero);
4205 
4206  __ teq(r9, Operand(r7));
4207  __ mov(r9, Operand(kBinary32ExponentMask), LeaveCC, eq);
4208  __ b(eq, &nan_or_infinity_or_zero);
4209 
4210  // Rebias exponent.
4211  __ mov(r9, Operand(r9, LSR, HeapNumber::kExponentShift));
4212  __ add(r9,
4213  r9,
4215 
4216  __ cmp(r9, Operand(kBinary32MaxExponent));
4217  __ and_(r5, r5, Operand(HeapNumber::kSignMask), LeaveCC, gt);
4218  __ orr(r5, r5, Operand(kBinary32ExponentMask), LeaveCC, gt);
4219  __ b(gt, &done);
4220 
4221  __ cmp(r9, Operand(kBinary32MinExponent));
4222  __ and_(r5, r5, Operand(HeapNumber::kSignMask), LeaveCC, lt);
4223  __ b(lt, &done);
4224 
4225  __ and_(r7, r5, Operand(HeapNumber::kSignMask));
4226  __ and_(r5, r5, Operand(HeapNumber::kMantissaMask));
4227  __ orr(r7, r7, Operand(r5, LSL, kMantissaInHiWordShift));
4228  __ orr(r7, r7, Operand(r6, LSR, kMantissaInLoWordShift));
4229  __ orr(r5, r7, Operand(r9, LSL, kBinary32ExponentShift));
4230 
4231  __ bind(&done);
4232  __ str(r5, MemOperand(r3, key, LSL, 1));
4233  // Entry registers are intact, r0 holds the value which is the return
4234  // value.
4235  __ Ret();
4236 
4237  __ bind(&nan_or_infinity_or_zero);
4238  __ and_(r7, r5, Operand(HeapNumber::kSignMask));
4239  __ and_(r5, r5, Operand(HeapNumber::kMantissaMask));
4240  __ orr(r9, r9, r7);
4241  __ orr(r9, r9, Operand(r5, LSL, kMantissaInHiWordShift));
4242  __ orr(r5, r9, Operand(r6, LSR, kMantissaInLoWordShift));
4243  __ b(&done);
4244  } else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
4245  __ add(r7, r3, Operand(key, LSL, 2));
4246  // r7: effective address of destination element.
4247  __ str(r6, MemOperand(r7, 0));
4249  __ Ret();
4250  } else {
4251  bool is_signed_type = IsElementTypeSigned(elements_kind);
4252  int meaningfull_bits = is_signed_type ? (kBitsPerInt - 1) : kBitsPerInt;
4253  int32_t min_value = is_signed_type ? 0x80000000 : 0x00000000;
4254 
4255  Label done, sign;
4256 
4257  // Test for all special exponent values: zeros, subnormal numbers, NaNs
4258  // and infinities. All these should be converted to 0.
4259  __ mov(r7, Operand(HeapNumber::kExponentMask));
4260  __ and_(r9, r5, Operand(r7), SetCC);
4261  __ mov(r5, Operand(0, RelocInfo::NONE), LeaveCC, eq);
4262  __ b(eq, &done);
4263 
4264  __ teq(r9, Operand(r7));
4265  __ mov(r5, Operand(0, RelocInfo::NONE), LeaveCC, eq);
4266  __ b(eq, &done);
4267 
4268  // Unbias exponent.
4269  __ mov(r9, Operand(r9, LSR, HeapNumber::kExponentShift));
4270  __ sub(r9, r9, Operand(HeapNumber::kExponentBias), SetCC);
4271  // If exponent is negative then result is 0.
4272  __ mov(r5, Operand(0, RelocInfo::NONE), LeaveCC, mi);
4273  __ b(mi, &done);
4274 
4275  // If exponent is too big then result is minimal value.
4276  __ cmp(r9, Operand(meaningfull_bits - 1));
4277  __ mov(r5, Operand(min_value), LeaveCC, ge);
4278  __ b(ge, &done);
4279 
4280  __ and_(r7, r5, Operand(HeapNumber::kSignMask), SetCC);
4281  __ and_(r5, r5, Operand(HeapNumber::kMantissaMask));
4282  __ orr(r5, r5, Operand(1u << HeapNumber::kMantissaBitsInTopWord));
4283 
4284  __ rsb(r9, r9, Operand(HeapNumber::kMantissaBitsInTopWord), SetCC);
4285  __ mov(r5, Operand(r5, LSR, r9), LeaveCC, pl);
4286  __ b(pl, &sign);
4287 
4288  __ rsb(r9, r9, Operand(0, RelocInfo::NONE));
4289  __ mov(r5, Operand(r5, LSL, r9));
4290  __ rsb(r9, r9, Operand(meaningfull_bits));
4291  __ orr(r5, r5, Operand(r6, LSR, r9));
4292 
4293  __ bind(&sign);
4294  __ teq(r7, Operand(0, RelocInfo::NONE));
4295  __ rsb(r5, r5, Operand(0, RelocInfo::NONE), LeaveCC, ne);
4296 
4297  __ bind(&done);
4298  switch (elements_kind) {
4301  __ strb(r5, MemOperand(r3, key, LSR, 1));
4302  break;
4305  __ strh(r5, MemOperand(r3, key, LSL, 0));
4306  break;
4307  case EXTERNAL_INT_ELEMENTS:
4309  __ str(r5, MemOperand(r3, key, LSL, 1));
4310  break;
4314  case FAST_ELEMENTS:
4315  case FAST_SMI_ELEMENTS:
4316  case FAST_DOUBLE_ELEMENTS:
4317  case FAST_HOLEY_ELEMENTS:
4320  case DICTIONARY_ELEMENTS:
4322  UNREACHABLE();
4323  break;
4324  }
4325  }
4326  }
4327  }
4328 
4329  // Slow case, key and receiver still in r0 and r1.
4330  __ bind(&slow);
4331  __ IncrementCounter(
4332  masm->isolate()->counters()->keyed_load_external_array_slow(),
4333  1, r2, r3);
4334 
4335  // ---------- S t a t e --------------
4336  // -- lr : return address
4337  // -- r0 : key
4338  // -- r1 : receiver
4339  // -----------------------------------
4340  Handle<Code> slow_ic =
4341  masm->isolate()->builtins()->KeyedStoreIC_Slow();
4342  __ Jump(slow_ic, RelocInfo::CODE_TARGET);
4343 
4344  // Miss case, call the runtime.
4345  __ bind(&miss_force_generic);
4346 
4347  // ---------- S t a t e --------------
4348  // -- lr : return address
4349  // -- r0 : key
4350  // -- r1 : receiver
4351  // -----------------------------------
4352 
4353  Handle<Code> miss_ic =
4354  masm->isolate()->builtins()->KeyedStoreIC_MissForceGeneric();
4355  __ Jump(miss_ic, RelocInfo::CODE_TARGET);
4356 }
4357 
4358 
4359 void KeyedLoadStubCompiler::GenerateLoadFastElement(MacroAssembler* masm) {
4360  // ----------- S t a t e -------------
4361  // -- lr : return address
4362  // -- r0 : key
4363  // -- r1 : receiver
4364  // -----------------------------------
4365  Label miss_force_generic;
4366 
4367  // This stub is meant to be tail-jumped to, the receiver must already
4368  // have been verified by the caller to not be a smi.
4369 
4370  // Check that the key is a smi or a heap number convertible to a smi.
4371  GenerateSmiKeyCheck(masm, r0, r4, r5, d1, d2, &miss_force_generic);
4372 
4373  // Get the elements array.
4375  __ AssertFastElements(r2);
4376 
4377  // Check that the key is within bounds.
4379  __ cmp(r0, Operand(r3));
4380  __ b(hs, &miss_force_generic);
4381 
4382  // Load the result and make sure it's not the hole.
4383  __ add(r3, r2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
4385  __ ldr(r4,
4387  __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
4388  __ cmp(r4, ip);
4389  __ b(eq, &miss_force_generic);
4390  __ mov(r0, r4);
4391  __ Ret();
4392 
4393  __ bind(&miss_force_generic);
4394  Handle<Code> stub =
4395  masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric();
4396  __ Jump(stub, RelocInfo::CODE_TARGET);
4397 }
4398 
4399 
4401  MacroAssembler* masm) {
4402  // ----------- S t a t e -------------
4403  // -- lr : return address
4404  // -- r0 : key
4405  // -- r1 : receiver
4406  // -----------------------------------
4407  Label miss_force_generic, slow_allocate_heapnumber;
4408 
4409  Register key_reg = r0;
4410  Register receiver_reg = r1;
4411  Register elements_reg = r2;
4412  Register heap_number_reg = r2;
4413  Register indexed_double_offset = r3;
4414  Register scratch = r4;
4415  Register scratch2 = r5;
4416  Register scratch3 = r6;
4417  Register heap_number_map = r7;
4418 
4419  // This stub is meant to be tail-jumped to, the receiver must already
4420  // have been verified by the caller to not be a smi.
4421 
4422  // Check that the key is a smi or a heap number convertible to a smi.
4423  GenerateSmiKeyCheck(masm, key_reg, r4, r5, d1, d2, &miss_force_generic);
4424 
4425  // Get the elements array.
4426  __ ldr(elements_reg,
4428 
4429  // Check that the key is within bounds.
4430  __ ldr(scratch, FieldMemOperand(elements_reg, FixedArray::kLengthOffset));
4431  __ cmp(key_reg, Operand(scratch));
4432  __ b(hs, &miss_force_generic);
4433 
4434  // Load the upper word of the double in the fixed array and test for NaN.
4435  __ add(indexed_double_offset, elements_reg,
4436  Operand(key_reg, LSL, kDoubleSizeLog2 - kSmiTagSize));
4437  uint32_t upper_32_offset = FixedArray::kHeaderSize + sizeof(kHoleNanLower32);
4438  __ ldr(scratch, FieldMemOperand(indexed_double_offset, upper_32_offset));
4439  __ cmp(scratch, Operand(kHoleNanUpper32));
4440  __ b(&miss_force_generic, eq);
4441 
4442  // Non-NaN. Allocate a new heap number and copy the double value into it.
4443  __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
4444  __ AllocateHeapNumber(heap_number_reg, scratch2, scratch3,
4445  heap_number_map, &slow_allocate_heapnumber, TAG_RESULT);
4446 
4447  // Don't need to reload the upper 32 bits of the double, it's already in
4448  // scratch.
4449  __ str(scratch, FieldMemOperand(heap_number_reg,
4451  __ ldr(scratch, FieldMemOperand(indexed_double_offset,
4452  FixedArray::kHeaderSize));
4453  __ str(scratch, FieldMemOperand(heap_number_reg,
4455 
4456  __ mov(r0, heap_number_reg);
4457  __ Ret();
4458 
4459  __ bind(&slow_allocate_heapnumber);
4460  Handle<Code> slow_ic =
4461  masm->isolate()->builtins()->KeyedLoadIC_Slow();
4462  __ Jump(slow_ic, RelocInfo::CODE_TARGET);
4463 
4464  __ bind(&miss_force_generic);
4465  Handle<Code> miss_ic =
4466  masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric();
4467  __ Jump(miss_ic, RelocInfo::CODE_TARGET);
4468 }
4469 
4470 
4472  MacroAssembler* masm,
4473  bool is_js_array,
4474  ElementsKind elements_kind,
4475  KeyedAccessGrowMode grow_mode) {
4476  // ----------- S t a t e -------------
4477  // -- r0 : value
4478  // -- r1 : key
4479  // -- r2 : receiver
4480  // -- lr : return address
4481  // -- r3 : scratch
4482  // -- r4 : scratch (elements)
4483  // -----------------------------------
4484  Label miss_force_generic, transition_elements_kind, grow, slow;
4485  Label finish_store, check_capacity;
4486 
4487  Register value_reg = r0;
4488  Register key_reg = r1;
4489  Register receiver_reg = r2;
4490  Register scratch = r4;
4491  Register elements_reg = r3;
4492  Register length_reg = r5;
4493  Register scratch2 = r6;
4494 
4495  // This stub is meant to be tail-jumped to, the receiver must already
4496  // have been verified by the caller to not be a smi.
4497 
4498  // Check that the key is a smi or a heap number convertible to a smi.
4499  GenerateSmiKeyCheck(masm, key_reg, r4, r5, d1, d2, &miss_force_generic);
4500 
4501  if (IsFastSmiElementsKind(elements_kind)) {
4502  __ JumpIfNotSmi(value_reg, &transition_elements_kind);
4503  }
4504 
4505  // Check that the key is within bounds.
4506  __ ldr(elements_reg,
4508  if (is_js_array) {
4509  __ ldr(scratch, FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
4510  } else {
4511  __ ldr(scratch, FieldMemOperand(elements_reg, FixedArray::kLengthOffset));
4512  }
4513  // Compare smis.
4514  __ cmp(key_reg, scratch);
4515  if (is_js_array && grow_mode == ALLOW_JSARRAY_GROWTH) {
4516  __ b(hs, &grow);
4517  } else {
4518  __ b(hs, &miss_force_generic);
4519  }
4520 
4521  // Make sure elements is a fast element array, not 'cow'.
4522  __ CheckMap(elements_reg,
4523  scratch,
4524  Heap::kFixedArrayMapRootIndex,
4525  &miss_force_generic,
4527 
4528  __ bind(&finish_store);
4529  if (IsFastSmiElementsKind(elements_kind)) {
4530  __ add(scratch,
4531  elements_reg,
4532  Operand(FixedArray::kHeaderSize - kHeapObjectTag));
4534  __ add(scratch,
4535  scratch,
4536  Operand(key_reg, LSL, kPointerSizeLog2 - kSmiTagSize));
4537  __ str(value_reg, MemOperand(scratch));
4538  } else {
4539  ASSERT(IsFastObjectElementsKind(elements_kind));
4540  __ add(scratch,
4541  elements_reg,
4542  Operand(FixedArray::kHeaderSize - kHeapObjectTag));
4544  __ add(scratch,
4545  scratch,
4546  Operand(key_reg, LSL, kPointerSizeLog2 - kSmiTagSize));
4547  __ str(value_reg, MemOperand(scratch));
4548  __ mov(receiver_reg, value_reg);
4549  __ RecordWrite(elements_reg, // Object.
4550  scratch, // Address.
4551  receiver_reg, // Value.
4553  kDontSaveFPRegs);
4554  }
4555  // value_reg (r0) is preserved.
4556  // Done.
4557  __ Ret();
4558 
4559  __ bind(&miss_force_generic);
4560  Handle<Code> ic =
4561  masm->isolate()->builtins()->KeyedStoreIC_MissForceGeneric();
4562  __ Jump(ic, RelocInfo::CODE_TARGET);
4563 
4564  __ bind(&transition_elements_kind);
4565  Handle<Code> ic_miss = masm->isolate()->builtins()->KeyedStoreIC_Miss();
4566  __ Jump(ic_miss, RelocInfo::CODE_TARGET);
4567 
4568  if (is_js_array && grow_mode == ALLOW_JSARRAY_GROWTH) {
4569  // Grow the array by a single element if possible.
4570  __ bind(&grow);
4571 
4572  // Make sure the array is only growing by a single element, anything else
4573  // must be handled by the runtime. Flags already set by previous compare.
4574  __ b(ne, &miss_force_generic);
4575 
4576  // Check for the empty array, and preallocate a small backing store if
4577  // possible.
4578  __ ldr(length_reg,
4579  FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
4580  __ ldr(elements_reg,
4582  __ CompareRoot(elements_reg, Heap::kEmptyFixedArrayRootIndex);
4583  __ b(ne, &check_capacity);
4584 
4586  __ AllocateInNewSpace(size, elements_reg, scratch, scratch2, &slow,
4587  TAG_OBJECT);
4588 
4589  __ LoadRoot(scratch, Heap::kFixedArrayMapRootIndex);
4590  __ str(scratch, FieldMemOperand(elements_reg, JSObject::kMapOffset));
4591  __ mov(scratch, Operand(Smi::FromInt(JSArray::kPreallocatedArrayElements)));
4592  __ str(scratch, FieldMemOperand(elements_reg, FixedArray::kLengthOffset));
4593  __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
4594  for (int i = 1; i < JSArray::kPreallocatedArrayElements; ++i) {
4595  __ str(scratch, FieldMemOperand(elements_reg, FixedArray::SizeFor(i)));
4596  }
4597 
4598  // Store the element at index zero.
4599  __ str(value_reg, FieldMemOperand(elements_reg, FixedArray::SizeFor(0)));
4600 
4601  // Install the new backing store in the JSArray.
4602  __ str(elements_reg,
4604  __ RecordWriteField(receiver_reg, JSObject::kElementsOffset, elements_reg,
4607 
4608  // Increment the length of the array.
4609  __ mov(length_reg, Operand(Smi::FromInt(1)));
4610  __ str(length_reg, FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
4611  __ Ret();
4612 
4613  __ bind(&check_capacity);
4614  // Check for cow elements, in general they are not handled by this stub
4615  __ CheckMap(elements_reg,
4616  scratch,
4617  Heap::kFixedCOWArrayMapRootIndex,
4618  &miss_force_generic,
4620 
4621  __ ldr(scratch, FieldMemOperand(elements_reg, FixedArray::kLengthOffset));
4622  __ cmp(length_reg, scratch);
4623  __ b(hs, &slow);
4624 
4625  // Grow the array and finish the store.
4626  __ add(length_reg, length_reg, Operand(Smi::FromInt(1)));
4627  __ str(length_reg, FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
4628  __ jmp(&finish_store);
4629 
4630  __ bind(&slow);
4631  Handle<Code> ic_slow = masm->isolate()->builtins()->KeyedStoreIC_Slow();
4632  __ Jump(ic_slow, RelocInfo::CODE_TARGET);
4633  }
4634 }
4635 
4636 
4638  MacroAssembler* masm,
4639  bool is_js_array,
4640  KeyedAccessGrowMode grow_mode) {
4641  // ----------- S t a t e -------------
4642  // -- r0 : value
4643  // -- r1 : key
4644  // -- r2 : receiver
4645  // -- lr : return address
4646  // -- r3 : scratch
4647  // -- r4 : scratch
4648  // -- r5 : scratch
4649  // -----------------------------------
4650  Label miss_force_generic, transition_elements_kind, grow, slow;
4651  Label finish_store, check_capacity;
4652 
4653  Register value_reg = r0;
4654  Register key_reg = r1;
4655  Register receiver_reg = r2;
4656  Register elements_reg = r3;
4657  Register scratch1 = r4;
4658  Register scratch2 = r5;
4659  Register scratch3 = r6;
4660  Register scratch4 = r7;
4661  Register length_reg = r7;
4662 
4663  // This stub is meant to be tail-jumped to, the receiver must already
4664  // have been verified by the caller to not be a smi.
4665 
4666  // Check that the key is a smi or a heap number convertible to a smi.
4667  GenerateSmiKeyCheck(masm, key_reg, r4, r5, d1, d2, &miss_force_generic);
4668 
4669  __ ldr(elements_reg,
4671 
4672  // Check that the key is within bounds.
4673  if (is_js_array) {
4674  __ ldr(scratch1, FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
4675  } else {
4676  __ ldr(scratch1,
4678  }
4679  // Compare smis, unsigned compare catches both negative and out-of-bound
4680  // indexes.
4681  __ cmp(key_reg, scratch1);
4682  if (grow_mode == ALLOW_JSARRAY_GROWTH) {
4683  __ b(hs, &grow);
4684  } else {
4685  __ b(hs, &miss_force_generic);
4686  }
4687 
4688  __ bind(&finish_store);
4689  __ StoreNumberToDoubleElements(value_reg,
4690  key_reg,
4691  receiver_reg,
4692  // All registers after this are overwritten.
4693  elements_reg,
4694  scratch1,
4695  scratch2,
4696  scratch3,
4697  scratch4,
4698  &transition_elements_kind);
4699  __ Ret();
4700 
4701  // Handle store cache miss, replacing the ic with the generic stub.
4702  __ bind(&miss_force_generic);
4703  Handle<Code> ic =
4704  masm->isolate()->builtins()->KeyedStoreIC_MissForceGeneric();
4705  __ Jump(ic, RelocInfo::CODE_TARGET);
4706 
4707  __ bind(&transition_elements_kind);
4708  Handle<Code> ic_miss = masm->isolate()->builtins()->KeyedStoreIC_Miss();
4709  __ Jump(ic_miss, RelocInfo::CODE_TARGET);
4710 
4711  if (is_js_array && grow_mode == ALLOW_JSARRAY_GROWTH) {
4712  // Grow the array by a single element if possible.
4713  __ bind(&grow);
4714 
4715  // Make sure the array is only growing by a single element, anything else
4716  // must be handled by the runtime. Flags already set by previous compare.
4717  __ b(ne, &miss_force_generic);
4718 
4719  // Transition on values that can't be stored in a FixedDoubleArray.
4720  Label value_is_smi;
4721  __ JumpIfSmi(value_reg, &value_is_smi);
4722  __ ldr(scratch1, FieldMemOperand(value_reg, HeapObject::kMapOffset));
4723  __ CompareRoot(scratch1, Heap::kHeapNumberMapRootIndex);
4724  __ b(ne, &transition_elements_kind);
4725  __ bind(&value_is_smi);
4726 
4727  // Check for the empty array, and preallocate a small backing store if
4728  // possible.
4729  __ ldr(length_reg,
4730  FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
4731  __ ldr(elements_reg,
4733  __ CompareRoot(elements_reg, Heap::kEmptyFixedArrayRootIndex);
4734  __ b(ne, &check_capacity);
4735 
4736  int size = FixedDoubleArray::SizeFor(JSArray::kPreallocatedArrayElements);
4737  __ AllocateInNewSpace(size, elements_reg, scratch1, scratch2, &slow,
4738  TAG_OBJECT);
4739 
4740  // Initialize the new FixedDoubleArray. Leave elements unitialized for
4741  // efficiency, they are guaranteed to be initialized before use.
4742  __ LoadRoot(scratch1, Heap::kFixedDoubleArrayMapRootIndex);
4743  __ str(scratch1, FieldMemOperand(elements_reg, JSObject::kMapOffset));
4744  __ mov(scratch1,
4745  Operand(Smi::FromInt(JSArray::kPreallocatedArrayElements)));
4746  __ str(scratch1,
4748 
4749  // Install the new backing store in the JSArray.
4750  __ str(elements_reg,
4752  __ RecordWriteField(receiver_reg, JSObject::kElementsOffset, elements_reg,
4755 
4756  // Increment the length of the array.
4757  __ mov(length_reg, Operand(Smi::FromInt(1)));
4758  __ str(length_reg, FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
4759  __ ldr(elements_reg,
4761  __ jmp(&finish_store);
4762 
4763  __ bind(&check_capacity);
4764  // Make sure that the backing store can hold additional elements.
4765  __ ldr(scratch1,
4767  __ cmp(length_reg, scratch1);
4768  __ b(hs, &slow);
4769 
4770  // Grow the array and finish the store.
4771  __ add(length_reg, length_reg, Operand(Smi::FromInt(1)));
4772  __ str(length_reg, FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
4773  __ jmp(&finish_store);
4774 
4775  __ bind(&slow);
4776  Handle<Code> ic_slow = masm->isolate()->builtins()->KeyedStoreIC_Slow();
4777  __ Jump(ic_slow, RelocInfo::CODE_TARGET);
4778  }
4779 }
4780 
4781 
4782 #undef __
4783 
4784 } } // namespace v8::internal
4785 
4786 #endif // V8_TARGET_ARCH_ARM
byte * Address
Definition: globals.h:157
const SwVfpRegister s2
static const int kBitFieldOffset
Definition: objects.h:5160
Handle< Code > CompileLoadFunctionPrototype(Handle< String > name)
Handle< Code > CompileLoadCallback(Handle< String > name, Handle< JSObject > object, Handle< JSObject > holder, Handle< AccessorInfo > callback)
const intptr_t kSmiTagMask
Definition: v8.h:4016
static void GenerateStoreViaSetter(MacroAssembler *masm, Handle< JSFunction > setter)
static const int kCodeEntryOffset
Definition: objects.h:6182
Handle< Code > CompileStoreField(Handle< JSObject > object, int index, Handle< Map > transition, Handle< String > name)
static const int kPrototypeOrInitialMapOffset
Definition: objects.h:6183
static int SlotOffset(int index)
Definition: contexts.h:425
static const int kDataOffset
Definition: objects.h:8409
const Register r3
const int kBinary32ExponentShift
Definition: globals.h:250
Handle< Code > CompileLoadNonexistent(Handle< String > name, Handle< JSObject > object, Handle< JSObject > last)
const int kDoubleSizeLog2
Definition: globals.h:222
Handle< Code > CompileStoreElement(Handle< Map > receiver_map)
void GenerateProbe(MacroAssembler *masm, Code::Flags flags, Register receiver, Register name, Register scratch, Register extra, Register extra2=no_reg, Register extra3=no_reg)
const Register cp
static const uint32_t kExponentMask
Definition: objects.h:1352
static const int kFlagsOffset
Definition: objects.h:4540
const uint32_t kBinary32MantissaMask
Definition: globals.h:245
const int kBinary32MaxExponent
Definition: globals.h:247
static Smi * FromInt(int value)
Definition: objects-inl.h:981
bool IsFastObjectElementsKind(ElementsKind kind)
#define LOG(isolate, Call)
Definition: log.h:81
static void GenerateStoreExternalArray(MacroAssembler *masm, ElementsKind elements_kind)
const DwVfpRegister d0
static const int kGlobalReceiverOffset
Definition: objects.h:6288
static void GenerateLoadFastDoubleElement(MacroAssembler *masm)
const Register r6
const int kBinary32MantissaBits
Definition: globals.h:249
static StubType ExtractTypeFromFlags(Flags flags)
Definition: objects-inl.h:3538
static const int kExponentBias
Definition: objects.h:1356
int int32_t
Definition: unicode.cc:47
static bool IsSupported(CpuFeature f)
static const int kExternalPointerOffset
Definition: objects.h:3741
static const int kHasNamedInterceptor
Definition: objects.h:5169
static const int kIsAccessCheckNeeded
Definition: objects.h:5173
List< Handle< Map > > MapHandleList
Definition: list.h:198
#define ASSERT(condition)
Definition: checks.h:270
const int kPointerSizeLog2
Definition: globals.h:232
static const int kInstanceSizeOffset
Definition: objects.h:5147
static const int kDebugInfoOffset
Definition: objects.h:5805
const Register r2
static const int kContextOffset
Definition: objects.h:6187
Handle< Code > CompileLoadField(Handle< JSObject > object, Handle< JSObject > holder, int index, Handle< String > name)
Handle< Code > CompileStoreInterceptor(Handle< JSObject > object, Handle< String > name)
static void GenerateNegativeLookup(MacroAssembler *masm, Label *miss, Label *done, Register receiver, Register properties, Handle< String > name, Register scratch0)
Handle< Code > CompileStoreField(Handle< JSObject > object, int index, Handle< Map > transition, Handle< String > name)
static const int kHashFieldOffset
Definition: objects.h:7319
const uint32_t kVFPFlushToZeroMask
const Register sp
Handle< Code > CompileLoadInterceptor(Handle< JSObject > object, Handle< JSObject > holder, Handle< String > name)
#define UNREACHABLE()
Definition: checks.h:50
STATIC_ASSERT((FixedDoubleArray::kHeaderSize &kDoubleAlignmentMask)==0)
static const int kLengthOffset
Definition: objects.h:7318
Handle< Code > CompileCallGlobal(Handle< JSObject > object, Handle< GlobalObject > holder, Handle< JSGlobalPropertyCell > cell, Handle< JSFunction > function, Handle< String > name)
static const int kExponentShift
Definition: objects.h:1357
Handle< Code > CompileLoadField(Handle< String > name, Handle< JSObject > object, Handle< JSObject > holder, int index)
static const int kValueOffset
Definition: objects.h:1342
static void GenerateLoadViaGetter(MacroAssembler *masm, Handle< JSFunction > getter)
const uint32_t kHoleNanUpper32
Definition: v8globals.h:469
Handle< Code > CompileStoreGlobal(Handle< GlobalObject > object, Handle< JSGlobalPropertyCell > holder, Handle< String > name)
Handle< Code > CompileLoadViaGetter(Handle< String > name, Handle< JSObject > receiver, Handle< JSObject > holder, Handle< JSFunction > getter)
Handle< Code > CompileLoadConstant(Handle< JSObject > object, Handle< JSObject > holder, Handle< JSFunction > value, Handle< String > name)
Handle< Code > CompileLoadConstant(Handle< String > name, Handle< JSObject > object, Handle< JSObject > holder, Handle< JSFunction > value)
const Register ip
Handle< Code > CompileCallField(Handle< JSObject > object, Handle< JSObject > holder, int index, Handle< String > name)
const Register r9
const int kPointerSize
Definition: globals.h:220
static void GenerateStoreFastElement(MacroAssembler *masm, bool is_js_array, ElementsKind element_kind, KeyedAccessGrowMode grow_mode)
Handle< Code > CompileLoadStringLength(Handle< String > name)
const int kHeapObjectTag
Definition: v8.h:4009
const uint32_t kHoleNanLower32
Definition: v8globals.h:470
#define __
static bool decode(uint32_t value)
Definition: utils.h:273
const Register pc
static const int kPropertiesOffset
Definition: objects.h:2171
const int kBinary32MinExponent
Definition: globals.h:248
Handle< Code > CompileLoadGlobal(Handle< JSObject > object, Handle< GlobalObject > holder, Handle< JSGlobalPropertyCell > cell, Handle< String > name, bool is_dont_delete)
Handle< Code > CompileStoreCallback(Handle< String > name, Handle< JSObject > receiver, Handle< JSObject > holder, Handle< AccessorInfo > callback)
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination use dead code elimination trace on stack replacement optimize closures cache optimized code for closures functions with arguments object loop weight for representation inference allow uint32 values on optimize frames if they are used only in safe operations track parallel recompilation enable all profiler experiments number of stack frames inspected by the profiler call recompile stub directly when self optimizing trigger profiler ticks based on counting instead of timing weight back edges by jump distance for interrupt triggering percentage of ICs that must have type info to allow optimization watch_ic_patching retry_self_opt interrupt_at_exit extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of SAHF instruction if enable use of VFP3 instructions if available this implies enabling ARMv7 and VFP2 enable use of VFP2 instructions if available enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of MIPS FPU instructions if expose natives in global object expose gc extension number of stack frames to capture disable builtin natives files print a stack trace if an assertion failure occurs use random jit cookie to mask large constants trace lazy optimization use adaptive optimizations prepare for turning on always opt minimum length for automatic enable preparsing maximum number of optimization attempts before giving up cache prototype transitions automatically set the debug break flag when debugger commands are in the queue always cause a debug break before aborting maximum length of function source code printed in a stack trace max size of the new max size of the old max size of executable always perform global GCs print one trace line following each garbage collection do not print trace line after scavenger collection print more details following each garbage collection print amount of external allocated memory after each time it is adjusted flush code that we expect not to use again before full gc do incremental marking steps track object counts and memory usage use caching Perform compaction on every full GC Never perform compaction on full GC testing only Compact code space on full incremental collections Default seed for initializing random generator(0, the default, means to use system random).") DEFINE_bool(use_verbose_printer
bool IsFastSmiElementsKind(ElementsKind kind)
const uint32_t kVFPExceptionMask
const SwVfpRegister s0
static void GenerateMapChangeElementsTransition(MacroAssembler *masm)
const int kBinary32ExponentBias
Definition: globals.h:246
static const int kDataOffset
Definition: objects.h:8539
static int SizeFor(int length)
Definition: objects.h:2434
const Register r0
static const int kElementsOffset
Definition: objects.h:2172
const uint32_t kStringTag
Definition: objects.h:456
#define BASE_EMBEDDED
Definition: allocation.h:68
const int kBitsPerInt
Definition: globals.h:240
static void GenerateLoadDictionaryElement(MacroAssembler *masm)
static void GenerateLoadExternalArray(MacroAssembler *masm, ElementsKind elements_kind)
static const int kLengthOffset
Definition: objects.h:8332
static int SizeFor(int length)
Definition: objects.h:2353
static const int kHeaderSize
Definition: objects.h:2296
const Register lr
static const int kMapOffset
Definition: objects.h:1261
static const int kMantissaBitsInTopWord
Definition: objects.h:1358
const uint32_t kIsNotStringMask
Definition: objects.h:455
List< Handle< Code > > CodeHandleList
Definition: list.h:199
const Register r1
static const int kLengthOffset
Definition: objects.h:2295
static void ConvertIntToDouble(MacroAssembler *masm, Register int_scratch, Destination destination, DwVfpRegister double_dst, Register dst1, Register dst2, Register scratch2, SwVfpRegister single_scratch)
static void GeneratePositiveLookup(MacroAssembler *masm, Label *miss, Label *done, Register elements, Register name, Register r0, Register r1)
Handle< Code > CompileCallInterceptor(Handle< JSObject > object, Handle< JSObject > holder, Handle< String > name)
MemOperand FieldMemOperand(Register object, int offset)
static const int kDataOffset
Definition: objects.h:8563
static void GenerateLoadFastElement(MacroAssembler *masm)
static const uint32_t kSignMask
Definition: objects.h:1351
friend class Isolate
Definition: stub-cache.h:392
const int kSmiTagSize
Definition: v8.h:4015
static void GenerateStoreFastDoubleElement(MacroAssembler *masm, bool is_js_array, KeyedAccessGrowMode grow_mode)
static const int kHeaderSize
Definition: objects.h:4549
static Handle< T > null()
Definition: handles.h:86
#define ASSERT_EQ(v1, v2)
Definition: checks.h:271
Handle< Code > CompileLoadArrayLength(Handle< String > name)
static const uint32_t kMantissaMask
Definition: objects.h:1353
const DwVfpRegister d2
const int kSmiTag
Definition: v8.h:4014
Handle< Code > CompileCallConstant(Handle< Object > object, Handle< JSObject > holder, Handle< JSFunction > function, Handle< String > name, CheckType check)
static AccessorInfo * cast(Object *obj)
const uint32_t kBinary32ExponentMask
Definition: globals.h:244
const uint32_t kBinary32SignMask
Definition: globals.h:243
const int kHeapObjectTagSize
Definition: v8.h:4010
static const int kSizeInBytes
Definition: assembler-arm.h:75
static Handle< JSGlobalPropertyCell > EnsurePropertyCell(Handle< GlobalObject > global, Handle< String > name)
Definition: objects.cc:12019
static bool HasCustomCallGenerator(Handle< JSFunction > function)
Definition: stub-cache.cc:1444
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination use dead code elimination trace on stack replacement optimize closures cache optimized code for closures functions with arguments object loop weight for representation inference allow uint32 values on optimize frames if they are used only in safe operations track parallel recompilation enable all profiler experiments number of stack frames inspected by the profiler call recompile stub directly when self optimizing trigger profiler ticks based on counting instead of timing weight back edges by jump distance for interrupt triggering percentage of ICs that must have type info to allow optimization watch_ic_patching retry_self_opt interrupt_at_exit extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of SAHF instruction if enable use of VFP3 instructions if available this implies enabling ARMv7 and VFP2 enable use of VFP2 instructions if available enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of MIPS FPU instructions if NULL
Definition: flags.cc:301
static const int kPreallocatedArrayElements
Definition: objects.h:8329
static const int kPrototypeOffset
Definition: objects.h:5126
static const int kFlagsNotUsedInLookup
Definition: objects.h:4649
const int kInvalidProtoDepth
const uint32_t kVFPRoundingModeMask
const Register no_reg
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination use dead code elimination trace on stack replacement optimize closures cache optimized code for closures functions with arguments object loop weight for representation inference allow uint32 values on optimize frames if they are used only in safe operations track parallel recompilation enable all profiler experiments number of stack frames inspected by the profiler call recompile stub directly when self optimizing trigger profiler ticks based on counting instead of timing weight back edges by jump distance for interrupt triggering percentage of ICs that must have type info to allow optimization watch_ic_patching retry_self_opt interrupt_at_exit extra verbose compilation tracing generate extra code(assertions) for debugging") DEFINE_bool(code_comments
static const int kValueOffset
Definition: objects.h:6385
const DwVfpRegister d1
const Register fp
static const int kNativeContextOffset
Definition: objects.h:6286
Handle< Code > CompileLoadCallback(Handle< String > name, Handle< JSObject > object, Handle< JSObject > holder, Handle< AccessorInfo > callback)
Handle< Code > CompileLoadPolymorphic(MapHandleList *receiver_maps, CodeHandleList *handler_ics)
Handle< Code > CompileLoadInterceptor(Handle< JSObject > object, Handle< JSObject > holder, Handle< String > name)
Handle< Code > CompileStorePolymorphic(MapHandleList *receiver_maps, CodeHandleList *handler_stubs, MapHandleList *transitioned_maps)
static const int kSharedFunctionInfoOffset
Definition: objects.h:6185
KeyedAccessGrowMode
Definition: objects.h:142
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination use dead code elimination trace on stack replacement optimize closures cache optimized code for closures functions with arguments object loop weight for representation inference allow uint32 values on optimize frames if they are used only in safe operations track parallel recompilation enable all profiler experiments number of stack frames inspected by the profiler call recompile stub directly when self optimizing trigger profiler ticks based on counting instead of timing weight back edges by jump distance for interrupt triggering percentage of ICs that must have type info to allow optimization watch_ic_patching retry_self_opt interrupt_at_exit extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of SAHF instruction if enable use of VFP3 instructions if available this implies enabling ARMv7 and VFP2 enable use of VFP2 instructions if available enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of MIPS FPU instructions if expose natives in global object expose gc extension number of stack frames to capture disable builtin natives files print a stack trace if an assertion failure occurs use random jit cookie to mask large constants trace lazy optimization use adaptive optimizations prepare for turning on always opt minimum length for automatic enable preparsing maximum number of optimization attempts before giving up cache prototype transitions automatically set the debug break flag when debugger commands are in the queue always cause a debug break before aborting maximum length of function source code printed in a stack trace max size of the new max size of the old max size of executable always perform global GCs print one trace line following each garbage collection do not print trace line after scavenger collection print more details following each garbage collection print amount of external allocated memory after each time it is adjusted flush code that we expect not to use again before full gc do incremental marking steps track object counts and memory usage use caching Perform compaction on every full GC Never perform compaction on full GC testing only Compact code space on full incremental collections Default seed for initializing random allows verbose printing trace parsing and preparsing Check icache flushes in ARM and MIPS simulator Stack alingment in bytes in print stack trace when throwing exceptions randomize hashes to avoid predictable hash Fixed seed to use to hash property activate a timer that switches between V8 threads testing_bool_flag float flag Seed used for threading test randomness A filename with extra code to be included in the Print usage including flags
Definition: flags.cc:495
static const int kMantissaBits
Definition: objects.h:1354
void check(i::Vector< const char > string)
static const int kExponentOffset
Definition: objects.h:1348
Handle< Code > CompileLoadElement(Handle< Map > receiver_map)
Handle< Code > CompileConstructStub(Handle< JSFunction > function)
Handle< Code > CompileStoreViaSetter(Handle< String > name, Handle< JSObject > receiver, Handle< JSObject > holder, Handle< JSFunction > setter)
static JSObject * cast(Object *obj)
const Register r5
static const int kInstanceTypeOffset
Definition: objects.h:5158
static const int kMantissaOffset
Definition: objects.h:1347
const Register r4
const Register r7
static JSFunction * cast(Object *obj)