v8  3.14.5(node0.10.28)
V8 is Google's open source JavaScript engine
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
macro-assembler-ia32.cc
Go to the documentation of this file.
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 
28 #include "v8.h"
29 
30 #if defined(V8_TARGET_ARCH_IA32)
31 
32 #include "bootstrapper.h"
33 #include "codegen.h"
34 #include "debug.h"
35 #include "runtime.h"
36 #include "serialize.h"
37 
38 namespace v8 {
39 namespace internal {
40 
41 // -------------------------------------------------------------------------
42 // MacroAssembler implementation.
43 
44 MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size)
45  : Assembler(arg_isolate, buffer, size),
46  generating_stub_(false),
47  allow_stub_calls_(true),
48  has_frame_(false) {
49  if (isolate() != NULL) {
50  code_object_ = Handle<Object>(isolate()->heap()->undefined_value(),
51  isolate());
52  }
53 }
54 
55 
56 void MacroAssembler::InNewSpace(
57  Register object,
58  Register scratch,
59  Condition cc,
60  Label* condition_met,
61  Label::Distance condition_met_distance) {
62  ASSERT(cc == equal || cc == not_equal);
63  if (scratch.is(object)) {
64  and_(scratch, Immediate(~Page::kPageAlignmentMask));
65  } else {
66  mov(scratch, Immediate(~Page::kPageAlignmentMask));
67  and_(scratch, object);
68  }
69  // Check that we can use a test_b.
70  ASSERT(MemoryChunk::IN_FROM_SPACE < 8);
71  ASSERT(MemoryChunk::IN_TO_SPACE < 8);
72  int mask = (1 << MemoryChunk::IN_FROM_SPACE)
73  | (1 << MemoryChunk::IN_TO_SPACE);
74  // If non-zero, the page belongs to new-space.
75  test_b(Operand(scratch, MemoryChunk::kFlagsOffset),
76  static_cast<uint8_t>(mask));
77  j(cc, condition_met, condition_met_distance);
78 }
79 
80 
81 void MacroAssembler::RememberedSetHelper(
82  Register object, // Only used for debug checks.
83  Register addr,
84  Register scratch,
85  SaveFPRegsMode save_fp,
86  MacroAssembler::RememberedSetFinalAction and_then) {
87  Label done;
88  if (emit_debug_code()) {
89  Label ok;
90  JumpIfNotInNewSpace(object, scratch, &ok, Label::kNear);
91  int3();
92  bind(&ok);
93  }
94  // Load store buffer top.
95  ExternalReference store_buffer =
96  ExternalReference::store_buffer_top(isolate());
97  mov(scratch, Operand::StaticVariable(store_buffer));
98  // Store pointer to buffer.
99  mov(Operand(scratch, 0), addr);
100  // Increment buffer top.
101  add(scratch, Immediate(kPointerSize));
102  // Write back new top of buffer.
103  mov(Operand::StaticVariable(store_buffer), scratch);
104  // Call stub on end of buffer.
105  // Check for end of buffer.
106  test(scratch, Immediate(StoreBuffer::kStoreBufferOverflowBit));
107  if (and_then == kReturnAtEnd) {
108  Label buffer_overflowed;
109  j(not_equal, &buffer_overflowed, Label::kNear);
110  ret(0);
111  bind(&buffer_overflowed);
112  } else {
113  ASSERT(and_then == kFallThroughAtEnd);
114  j(equal, &done, Label::kNear);
115  }
116  StoreBufferOverflowStub store_buffer_overflow =
117  StoreBufferOverflowStub(save_fp);
118  CallStub(&store_buffer_overflow);
119  if (and_then == kReturnAtEnd) {
120  ret(0);
121  } else {
122  ASSERT(and_then == kFallThroughAtEnd);
123  bind(&done);
124  }
125 }
126 
127 
128 void MacroAssembler::ClampDoubleToUint8(XMMRegister input_reg,
129  XMMRegister scratch_reg,
130  Register result_reg) {
131  Label done;
132  Label conv_failure;
133  pxor(scratch_reg, scratch_reg);
134  cvtsd2si(result_reg, input_reg);
135  test(result_reg, Immediate(0xFFFFFF00));
136  j(zero, &done, Label::kNear);
137  cmp(result_reg, Immediate(0x80000000));
138  j(equal, &conv_failure, Label::kNear);
139  mov(result_reg, Immediate(0));
140  setcc(above, result_reg);
141  sub(result_reg, Immediate(1));
142  and_(result_reg, Immediate(255));
143  jmp(&done, Label::kNear);
144  bind(&conv_failure);
145  Set(result_reg, Immediate(0));
146  ucomisd(input_reg, scratch_reg);
147  j(below, &done, Label::kNear);
148  Set(result_reg, Immediate(255));
149  bind(&done);
150 }
151 
152 
153 void MacroAssembler::ClampUint8(Register reg) {
154  Label done;
155  test(reg, Immediate(0xFFFFFF00));
156  j(zero, &done, Label::kNear);
157  setcc(negative, reg); // 1 if negative, 0 if positive.
158  dec_b(reg); // 0 if negative, 255 if positive.
159  bind(&done);
160 }
161 
162 
163 static double kUint32Bias =
164  static_cast<double>(static_cast<uint32_t>(0xFFFFFFFF)) + 1;
165 
166 
167 void MacroAssembler::LoadUint32(XMMRegister dst,
168  Register src,
169  XMMRegister scratch) {
170  Label done;
171  cmp(src, Immediate(0));
172  movdbl(scratch,
173  Operand(reinterpret_cast<int32_t>(&kUint32Bias), RelocInfo::NONE));
174  cvtsi2sd(dst, src);
175  j(not_sign, &done, Label::kNear);
176  addsd(dst, scratch);
177  bind(&done);
178 }
179 
180 
181 void MacroAssembler::RecordWriteArray(Register object,
182  Register value,
183  Register index,
184  SaveFPRegsMode save_fp,
185  RememberedSetAction remembered_set_action,
186  SmiCheck smi_check) {
187  // First, check if a write barrier is even needed. The tests below
188  // catch stores of Smis.
189  Label done;
190 
191  // Skip barrier if writing a smi.
192  if (smi_check == INLINE_SMI_CHECK) {
193  ASSERT_EQ(0, kSmiTag);
194  test(value, Immediate(kSmiTagMask));
195  j(zero, &done);
196  }
197 
198  // Array access: calculate the destination address in the same manner as
199  // KeyedStoreIC::GenerateGeneric. Multiply a smi by 2 to get an offset
200  // into an array of words.
201  Register dst = index;
202  lea(dst, Operand(object, index, times_half_pointer_size,
203  FixedArray::kHeaderSize - kHeapObjectTag));
204 
205  RecordWrite(
206  object, dst, value, save_fp, remembered_set_action, OMIT_SMI_CHECK);
207 
208  bind(&done);
209 
210  // Clobber clobbered input registers when running with the debug-code flag
211  // turned on to provoke errors.
212  if (emit_debug_code()) {
213  mov(value, Immediate(BitCast<int32_t>(kZapValue)));
214  mov(index, Immediate(BitCast<int32_t>(kZapValue)));
215  }
216 }
217 
218 
219 void MacroAssembler::RecordWriteField(
220  Register object,
221  int offset,
222  Register value,
223  Register dst,
224  SaveFPRegsMode save_fp,
225  RememberedSetAction remembered_set_action,
226  SmiCheck smi_check) {
227  // First, check if a write barrier is even needed. The tests below
228  // catch stores of Smis.
229  Label done;
230 
231  // Skip barrier if writing a smi.
232  if (smi_check == INLINE_SMI_CHECK) {
233  JumpIfSmi(value, &done, Label::kNear);
234  }
235 
236  // Although the object register is tagged, the offset is relative to the start
237  // of the object, so so offset must be a multiple of kPointerSize.
238  ASSERT(IsAligned(offset, kPointerSize));
239 
240  lea(dst, FieldOperand(object, offset));
241  if (emit_debug_code()) {
242  Label ok;
243  test_b(dst, (1 << kPointerSizeLog2) - 1);
244  j(zero, &ok, Label::kNear);
245  int3();
246  bind(&ok);
247  }
248 
249  RecordWrite(
250  object, dst, value, save_fp, remembered_set_action, OMIT_SMI_CHECK);
251 
252  bind(&done);
253 
254  // Clobber clobbered input registers when running with the debug-code flag
255  // turned on to provoke errors.
256  if (emit_debug_code()) {
257  mov(value, Immediate(BitCast<int32_t>(kZapValue)));
258  mov(dst, Immediate(BitCast<int32_t>(kZapValue)));
259  }
260 }
261 
262 
263 void MacroAssembler::RecordWriteForMap(
264  Register object,
265  Handle<Map> map,
266  Register scratch1,
267  Register scratch2,
268  SaveFPRegsMode save_fp) {
269  Label done;
270 
271  Register address = scratch1;
272  Register value = scratch2;
273  if (emit_debug_code()) {
274  Label ok;
275  lea(address, FieldOperand(object, HeapObject::kMapOffset));
276  test_b(address, (1 << kPointerSizeLog2) - 1);
277  j(zero, &ok, Label::kNear);
278  int3();
279  bind(&ok);
280  }
281 
282  ASSERT(!object.is(value));
283  ASSERT(!object.is(address));
284  ASSERT(!value.is(address));
285  AssertNotSmi(object);
286 
287  if (!FLAG_incremental_marking) {
288  return;
289  }
290 
291  // A single check of the map's pages interesting flag suffices, since it is
292  // only set during incremental collection, and then it's also guaranteed that
293  // the from object's page's interesting flag is also set. This optimization
294  // relies on the fact that maps can never be in new space.
295  ASSERT(!isolate()->heap()->InNewSpace(*map));
296  CheckPageFlagForMap(map,
297  MemoryChunk::kPointersToHereAreInterestingMask,
298  zero,
299  &done,
300  Label::kNear);
301 
302  // Delay the initialization of |address| and |value| for the stub until it's
303  // known that the will be needed. Up until this point their values are not
304  // needed since they are embedded in the operands of instructions that need
305  // them.
306  lea(address, FieldOperand(object, HeapObject::kMapOffset));
307  mov(value, Immediate(map));
308  RecordWriteStub stub(object, value, address, OMIT_REMEMBERED_SET, save_fp);
309  CallStub(&stub);
310 
311  bind(&done);
312 
313  // Clobber clobbered input registers when running with the debug-code flag
314  // turned on to provoke errors.
315  if (emit_debug_code()) {
316  mov(value, Immediate(BitCast<int32_t>(kZapValue)));
317  mov(scratch1, Immediate(BitCast<int32_t>(kZapValue)));
318  mov(scratch2, Immediate(BitCast<int32_t>(kZapValue)));
319  }
320 }
321 
322 
323 void MacroAssembler::RecordWrite(Register object,
324  Register address,
325  Register value,
326  SaveFPRegsMode fp_mode,
327  RememberedSetAction remembered_set_action,
328  SmiCheck smi_check) {
329  ASSERT(!object.is(value));
330  ASSERT(!object.is(address));
331  ASSERT(!value.is(address));
332  AssertNotSmi(object);
333 
334  if (remembered_set_action == OMIT_REMEMBERED_SET &&
335  !FLAG_incremental_marking) {
336  return;
337  }
338 
339  if (emit_debug_code()) {
340  Label ok;
341  cmp(value, Operand(address, 0));
342  j(equal, &ok, Label::kNear);
343  int3();
344  bind(&ok);
345  }
346 
347  // First, check if a write barrier is even needed. The tests below
348  // catch stores of Smis and stores into young gen.
349  Label done;
350 
351  if (smi_check == INLINE_SMI_CHECK) {
352  // Skip barrier if writing a smi.
353  JumpIfSmi(value, &done, Label::kNear);
354  }
355 
356  CheckPageFlag(value,
357  value, // Used as scratch.
358  MemoryChunk::kPointersToHereAreInterestingMask,
359  zero,
360  &done,
361  Label::kNear);
362  CheckPageFlag(object,
363  value, // Used as scratch.
364  MemoryChunk::kPointersFromHereAreInterestingMask,
365  zero,
366  &done,
367  Label::kNear);
368 
369  RecordWriteStub stub(object, value, address, remembered_set_action, fp_mode);
370  CallStub(&stub);
371 
372  bind(&done);
373 
374  // Clobber clobbered registers when running with the debug-code flag
375  // turned on to provoke errors.
376  if (emit_debug_code()) {
377  mov(address, Immediate(BitCast<int32_t>(kZapValue)));
378  mov(value, Immediate(BitCast<int32_t>(kZapValue)));
379  }
380 }
381 
382 
383 #ifdef ENABLE_DEBUGGER_SUPPORT
384 void MacroAssembler::DebugBreak() {
385  Set(eax, Immediate(0));
386  mov(ebx, Immediate(ExternalReference(Runtime::kDebugBreak, isolate())));
387  CEntryStub ces(1);
388  call(ces.GetCode(), RelocInfo::DEBUG_BREAK);
389 }
390 #endif
391 
392 
393 void MacroAssembler::Set(Register dst, const Immediate& x) {
394  if (x.is_zero()) {
395  xor_(dst, dst); // Shorter than mov.
396  } else {
397  mov(dst, x);
398  }
399 }
400 
401 
402 void MacroAssembler::Set(const Operand& dst, const Immediate& x) {
403  mov(dst, x);
404 }
405 
406 
407 bool MacroAssembler::IsUnsafeImmediate(const Immediate& x) {
408  static const int kMaxImmediateBits = 17;
409  if (x.rmode_ != RelocInfo::NONE) return false;
410  return !is_intn(x.x_, kMaxImmediateBits);
411 }
412 
413 
414 void MacroAssembler::SafeSet(Register dst, const Immediate& x) {
415  if (IsUnsafeImmediate(x) && jit_cookie() != 0) {
416  Set(dst, Immediate(x.x_ ^ jit_cookie()));
417  xor_(dst, jit_cookie());
418  } else {
419  Set(dst, x);
420  }
421 }
422 
423 
424 void MacroAssembler::SafePush(const Immediate& x) {
425  if (IsUnsafeImmediate(x) && jit_cookie() != 0) {
426  push(Immediate(x.x_ ^ jit_cookie()));
427  xor_(Operand(esp, 0), Immediate(jit_cookie()));
428  } else {
429  push(x);
430  }
431 }
432 
433 
434 void MacroAssembler::CompareRoot(Register with, Heap::RootListIndex index) {
435  // see ROOT_ACCESSOR macro in factory.h
436  Handle<Object> value(&isolate()->heap()->roots_array_start()[index]);
437  cmp(with, value);
438 }
439 
440 
441 void MacroAssembler::CompareRoot(const Operand& with,
442  Heap::RootListIndex index) {
443  // see ROOT_ACCESSOR macro in factory.h
444  Handle<Object> value(&isolate()->heap()->roots_array_start()[index]);
445  cmp(with, value);
446 }
447 
448 
449 void MacroAssembler::CmpObjectType(Register heap_object,
450  InstanceType type,
451  Register map) {
452  mov(map, FieldOperand(heap_object, HeapObject::kMapOffset));
453  CmpInstanceType(map, type);
454 }
455 
456 
457 void MacroAssembler::CmpInstanceType(Register map, InstanceType type) {
458  cmpb(FieldOperand(map, Map::kInstanceTypeOffset),
459  static_cast<int8_t>(type));
460 }
461 
462 
463 void MacroAssembler::CheckFastElements(Register map,
464  Label* fail,
465  Label::Distance distance) {
470  cmpb(FieldOperand(map, Map::kBitField2Offset),
471  Map::kMaximumBitField2FastHoleyElementValue);
472  j(above, fail, distance);
473 }
474 
475 
476 void MacroAssembler::CheckFastObjectElements(Register map,
477  Label* fail,
478  Label::Distance distance) {
483  cmpb(FieldOperand(map, Map::kBitField2Offset),
484  Map::kMaximumBitField2FastHoleySmiElementValue);
485  j(below_equal, fail, distance);
486  cmpb(FieldOperand(map, Map::kBitField2Offset),
487  Map::kMaximumBitField2FastHoleyElementValue);
488  j(above, fail, distance);
489 }
490 
491 
492 void MacroAssembler::CheckFastSmiElements(Register map,
493  Label* fail,
494  Label::Distance distance) {
497  cmpb(FieldOperand(map, Map::kBitField2Offset),
498  Map::kMaximumBitField2FastHoleySmiElementValue);
499  j(above, fail, distance);
500 }
501 
502 
503 void MacroAssembler::StoreNumberToDoubleElements(
504  Register maybe_number,
505  Register elements,
506  Register key,
507  Register scratch1,
508  XMMRegister scratch2,
509  Label* fail,
510  bool specialize_for_processor) {
511  Label smi_value, done, maybe_nan, not_nan, is_nan, have_double_value;
512  JumpIfSmi(maybe_number, &smi_value, Label::kNear);
513 
514  CheckMap(maybe_number,
515  isolate()->factory()->heap_number_map(),
516  fail,
518 
519  // Double value, canonicalize NaN.
520  uint32_t offset = HeapNumber::kValueOffset + sizeof(kHoleNanLower32);
521  cmp(FieldOperand(maybe_number, offset),
523  j(greater_equal, &maybe_nan, Label::kNear);
524 
525  bind(&not_nan);
526  ExternalReference canonical_nan_reference =
527  ExternalReference::address_of_canonical_non_hole_nan();
528  if (CpuFeatures::IsSupported(SSE2) && specialize_for_processor) {
529  CpuFeatures::Scope use_sse2(SSE2);
530  movdbl(scratch2, FieldOperand(maybe_number, HeapNumber::kValueOffset));
531  bind(&have_double_value);
532  movdbl(FieldOperand(elements, key, times_4, FixedDoubleArray::kHeaderSize),
533  scratch2);
534  } else {
535  fld_d(FieldOperand(maybe_number, HeapNumber::kValueOffset));
536  bind(&have_double_value);
537  fstp_d(FieldOperand(elements, key, times_4, FixedDoubleArray::kHeaderSize));
538  }
539  jmp(&done);
540 
541  bind(&maybe_nan);
542  // Could be NaN or Infinity. If fraction is not zero, it's NaN, otherwise
543  // it's an Infinity, and the non-NaN code path applies.
544  j(greater, &is_nan, Label::kNear);
545  cmp(FieldOperand(maybe_number, HeapNumber::kValueOffset), Immediate(0));
546  j(zero, &not_nan);
547  bind(&is_nan);
548  if (CpuFeatures::IsSupported(SSE2) && specialize_for_processor) {
549  CpuFeatures::Scope use_sse2(SSE2);
550  movdbl(scratch2, Operand::StaticVariable(canonical_nan_reference));
551  } else {
552  fld_d(Operand::StaticVariable(canonical_nan_reference));
553  }
554  jmp(&have_double_value, Label::kNear);
555 
556  bind(&smi_value);
557  // Value is a smi. Convert to a double and store.
558  // Preserve original value.
559  mov(scratch1, maybe_number);
560  SmiUntag(scratch1);
561  if (CpuFeatures::IsSupported(SSE2) && specialize_for_processor) {
562  CpuFeatures::Scope fscope(SSE2);
563  cvtsi2sd(scratch2, scratch1);
564  movdbl(FieldOperand(elements, key, times_4, FixedDoubleArray::kHeaderSize),
565  scratch2);
566  } else {
567  push(scratch1);
568  fild_s(Operand(esp, 0));
569  pop(scratch1);
570  fstp_d(FieldOperand(elements, key, times_4, FixedDoubleArray::kHeaderSize));
571  }
572  bind(&done);
573 }
574 
575 
576 void MacroAssembler::CompareMap(Register obj,
577  Handle<Map> map,
578  Label* early_success,
579  CompareMapMode mode) {
580  cmp(FieldOperand(obj, HeapObject::kMapOffset), map);
581  if (mode == ALLOW_ELEMENT_TRANSITION_MAPS) {
582  ElementsKind kind = map->elements_kind();
583  if (IsFastElementsKind(kind)) {
584  bool packed = IsFastPackedElementsKind(kind);
585  Map* current_map = *map;
586  while (CanTransitionToMoreGeneralFastElementsKind(kind, packed)) {
587  kind = GetNextMoreGeneralFastElementsKind(kind, packed);
588  current_map = current_map->LookupElementsTransitionMap(kind);
589  if (!current_map) break;
590  j(equal, early_success, Label::kNear);
591  cmp(FieldOperand(obj, HeapObject::kMapOffset),
592  Handle<Map>(current_map));
593  }
594  }
595  }
596 }
597 
598 
599 void MacroAssembler::CheckMap(Register obj,
600  Handle<Map> map,
601  Label* fail,
602  SmiCheckType smi_check_type,
603  CompareMapMode mode) {
604  if (smi_check_type == DO_SMI_CHECK) {
605  JumpIfSmi(obj, fail);
606  }
607 
608  Label success;
609  CompareMap(obj, map, &success, mode);
610  j(not_equal, fail);
611  bind(&success);
612 }
613 
614 
615 void MacroAssembler::DispatchMap(Register obj,
616  Handle<Map> map,
617  Handle<Code> success,
618  SmiCheckType smi_check_type) {
619  Label fail;
620  if (smi_check_type == DO_SMI_CHECK) {
621  JumpIfSmi(obj, &fail);
622  }
623  cmp(FieldOperand(obj, HeapObject::kMapOffset), Immediate(map));
624  j(equal, success);
625 
626  bind(&fail);
627 }
628 
629 
630 Condition MacroAssembler::IsObjectStringType(Register heap_object,
631  Register map,
632  Register instance_type) {
633  mov(map, FieldOperand(heap_object, HeapObject::kMapOffset));
634  movzx_b(instance_type, FieldOperand(map, Map::kInstanceTypeOffset));
636  test(instance_type, Immediate(kIsNotStringMask));
637  return zero;
638 }
639 
640 
641 void MacroAssembler::IsObjectJSObjectType(Register heap_object,
642  Register map,
643  Register scratch,
644  Label* fail) {
645  mov(map, FieldOperand(heap_object, HeapObject::kMapOffset));
646  IsInstanceJSObjectType(map, scratch, fail);
647 }
648 
649 
650 void MacroAssembler::IsInstanceJSObjectType(Register map,
651  Register scratch,
652  Label* fail) {
653  movzx_b(scratch, FieldOperand(map, Map::kInstanceTypeOffset));
654  sub(scratch, Immediate(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
655  cmp(scratch,
657  j(above, fail);
658 }
659 
660 
661 void MacroAssembler::FCmp() {
662  if (CpuFeatures::IsSupported(CMOV)) {
663  fucomip();
664  fstp(0);
665  } else {
666  fucompp();
667  push(eax);
668  fnstsw_ax();
669  sahf();
670  pop(eax);
671  }
672 }
673 
674 
675 void MacroAssembler::AssertNumber(Register object) {
676  if (emit_debug_code()) {
677  Label ok;
678  JumpIfSmi(object, &ok);
679  cmp(FieldOperand(object, HeapObject::kMapOffset),
680  isolate()->factory()->heap_number_map());
681  Check(equal, "Operand not a number");
682  bind(&ok);
683  }
684 }
685 
686 
687 void MacroAssembler::AssertSmi(Register object) {
688  if (emit_debug_code()) {
689  test(object, Immediate(kSmiTagMask));
690  Check(equal, "Operand is not a smi");
691  }
692 }
693 
694 
695 void MacroAssembler::AssertString(Register object) {
696  if (emit_debug_code()) {
697  test(object, Immediate(kSmiTagMask));
698  Check(not_equal, "Operand is a smi and not a string");
699  push(object);
700  mov(object, FieldOperand(object, HeapObject::kMapOffset));
701  CmpInstanceType(object, FIRST_NONSTRING_TYPE);
702  pop(object);
703  Check(below, "Operand is not a string");
704  }
705 }
706 
707 
708 void MacroAssembler::AssertNotSmi(Register object) {
709  if (emit_debug_code()) {
710  test(object, Immediate(kSmiTagMask));
711  Check(not_equal, "Operand is a smi");
712  }
713 }
714 
715 
716 void MacroAssembler::EnterFrame(StackFrame::Type type) {
717  push(ebp);
718  mov(ebp, esp);
719  push(esi);
720  push(Immediate(Smi::FromInt(type)));
721  push(Immediate(CodeObject()));
722  if (emit_debug_code()) {
723  cmp(Operand(esp, 0), Immediate(isolate()->factory()->undefined_value()));
724  Check(not_equal, "code object not properly patched");
725  }
726 }
727 
728 
729 void MacroAssembler::LeaveFrame(StackFrame::Type type) {
730  if (emit_debug_code()) {
731  cmp(Operand(ebp, StandardFrameConstants::kMarkerOffset),
732  Immediate(Smi::FromInt(type)));
733  Check(equal, "stack frame types must match");
734  }
735  leave();
736 }
737 
738 
739 void MacroAssembler::EnterExitFramePrologue() {
740  // Set up the frame structure on the stack.
741  ASSERT(ExitFrameConstants::kCallerSPDisplacement == +2 * kPointerSize);
742  ASSERT(ExitFrameConstants::kCallerPCOffset == +1 * kPointerSize);
743  ASSERT(ExitFrameConstants::kCallerFPOffset == 0 * kPointerSize);
744  push(ebp);
745  mov(ebp, esp);
746 
747  // Reserve room for entry stack pointer and push the code object.
748  ASSERT(ExitFrameConstants::kSPOffset == -1 * kPointerSize);
749  push(Immediate(0)); // Saved entry sp, patched before call.
750  push(Immediate(CodeObject())); // Accessed from ExitFrame::code_slot.
751 
752  // Save the frame pointer and the context in top.
753  ExternalReference c_entry_fp_address(Isolate::kCEntryFPAddress,
754  isolate());
755  ExternalReference context_address(Isolate::kContextAddress,
756  isolate());
757  mov(Operand::StaticVariable(c_entry_fp_address), ebp);
758  mov(Operand::StaticVariable(context_address), esi);
759 }
760 
761 
762 void MacroAssembler::EnterExitFrameEpilogue(int argc, bool save_doubles) {
763  // Optionally save all XMM registers.
764  if (save_doubles) {
765  CpuFeatures::Scope scope(SSE2);
766  int space = XMMRegister::kNumRegisters * kDoubleSize + argc * kPointerSize;
767  sub(esp, Immediate(space));
768  const int offset = -2 * kPointerSize;
769  for (int i = 0; i < XMMRegister::kNumRegisters; i++) {
770  XMMRegister reg = XMMRegister::from_code(i);
771  movdbl(Operand(ebp, offset - ((i + 1) * kDoubleSize)), reg);
772  }
773  } else {
774  sub(esp, Immediate(argc * kPointerSize));
775  }
776 
777  // Get the required frame alignment for the OS.
778  const int kFrameAlignment = OS::ActivationFrameAlignment();
779  if (kFrameAlignment > 0) {
780  ASSERT(IsPowerOf2(kFrameAlignment));
781  and_(esp, -kFrameAlignment);
782  }
783 
784  // Patch the saved entry sp.
785  mov(Operand(ebp, ExitFrameConstants::kSPOffset), esp);
786 }
787 
788 
789 void MacroAssembler::EnterExitFrame(bool save_doubles) {
790  EnterExitFramePrologue();
791 
792  // Set up argc and argv in callee-saved registers.
793  int offset = StandardFrameConstants::kCallerSPOffset - kPointerSize;
794  mov(edi, eax);
795  lea(esi, Operand(ebp, eax, times_4, offset));
796 
797  // Reserve space for argc, argv and isolate.
798  EnterExitFrameEpilogue(3, save_doubles);
799 }
800 
801 
802 void MacroAssembler::EnterApiExitFrame(int argc) {
803  EnterExitFramePrologue();
804  EnterExitFrameEpilogue(argc, false);
805 }
806 
807 
808 void MacroAssembler::LeaveExitFrame(bool save_doubles) {
809  // Optionally restore all XMM registers.
810  if (save_doubles) {
811  CpuFeatures::Scope scope(SSE2);
812  const int offset = -2 * kPointerSize;
813  for (int i = 0; i < XMMRegister::kNumRegisters; i++) {
814  XMMRegister reg = XMMRegister::from_code(i);
815  movdbl(reg, Operand(ebp, offset - ((i + 1) * kDoubleSize)));
816  }
817  }
818 
819  // Get the return address from the stack and restore the frame pointer.
820  mov(ecx, Operand(ebp, 1 * kPointerSize));
821  mov(ebp, Operand(ebp, 0 * kPointerSize));
822 
823  // Pop the arguments and the receiver from the caller stack.
824  lea(esp, Operand(esi, 1 * kPointerSize));
825 
826  // Push the return address to get ready to return.
827  push(ecx);
828 
829  LeaveExitFrameEpilogue();
830 }
831 
832 void MacroAssembler::LeaveExitFrameEpilogue() {
833  // Restore current context from top and clear it in debug mode.
834  ExternalReference context_address(Isolate::kContextAddress, isolate());
835  mov(esi, Operand::StaticVariable(context_address));
836 #ifdef DEBUG
837  mov(Operand::StaticVariable(context_address), Immediate(0));
838 #endif
839 
840  // Clear the top frame.
841  ExternalReference c_entry_fp_address(Isolate::kCEntryFPAddress,
842  isolate());
843  mov(Operand::StaticVariable(c_entry_fp_address), Immediate(0));
844 }
845 
846 
847 void MacroAssembler::LeaveApiExitFrame() {
848  mov(esp, ebp);
849  pop(ebp);
850 
851  LeaveExitFrameEpilogue();
852 }
853 
854 
855 void MacroAssembler::PushTryHandler(StackHandler::Kind kind,
856  int handler_index) {
857  // Adjust this code if not the case.
858  STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
859  STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
860  STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
861  STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
862  STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
863  STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
864 
865  // We will build up the handler from the bottom by pushing on the stack.
866  // First push the frame pointer and context.
867  if (kind == StackHandler::JS_ENTRY) {
868  // The frame pointer does not point to a JS frame so we save NULL for
869  // ebp. We expect the code throwing an exception to check ebp before
870  // dereferencing it to restore the context.
871  push(Immediate(0)); // NULL frame pointer.
872  push(Immediate(Smi::FromInt(0))); // No context.
873  } else {
874  push(ebp);
875  push(esi);
876  }
877  // Push the state and the code object.
878  unsigned state =
879  StackHandler::IndexField::encode(handler_index) |
880  StackHandler::KindField::encode(kind);
881  push(Immediate(state));
882  Push(CodeObject());
883 
884  // Link the current handler as the next handler.
885  ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
886  push(Operand::StaticVariable(handler_address));
887  // Set this new handler as the current one.
888  mov(Operand::StaticVariable(handler_address), esp);
889 }
890 
891 
892 void MacroAssembler::PopTryHandler() {
893  STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
894  ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
895  pop(Operand::StaticVariable(handler_address));
896  add(esp, Immediate(StackHandlerConstants::kSize - kPointerSize));
897 }
898 
899 
900 void MacroAssembler::JumpToHandlerEntry() {
901  // Compute the handler entry address and jump to it. The handler table is
902  // a fixed array of (smi-tagged) code offsets.
903  // eax = exception, edi = code object, edx = state.
904  mov(ebx, FieldOperand(edi, Code::kHandlerTableOffset));
905  shr(edx, StackHandler::kKindWidth);
906  mov(edx, FieldOperand(ebx, edx, times_4, FixedArray::kHeaderSize));
907  SmiUntag(edx);
908  lea(edi, FieldOperand(edi, edx, times_1, Code::kHeaderSize));
909  jmp(edi);
910 }
911 
912 
913 void MacroAssembler::Throw(Register value) {
914  // Adjust this code if not the case.
915  STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
916  STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
917  STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
918  STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
919  STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
920  STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
921 
922  // The exception is expected in eax.
923  if (!value.is(eax)) {
924  mov(eax, value);
925  }
926  // Drop the stack pointer to the top of the top handler.
927  ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
928  mov(esp, Operand::StaticVariable(handler_address));
929  // Restore the next handler.
930  pop(Operand::StaticVariable(handler_address));
931 
932  // Remove the code object and state, compute the handler address in edi.
933  pop(edi); // Code object.
934  pop(edx); // Index and state.
935 
936  // Restore the context and frame pointer.
937  pop(esi); // Context.
938  pop(ebp); // Frame pointer.
939 
940  // If the handler is a JS frame, restore the context to the frame.
941  // (kind == ENTRY) == (ebp == 0) == (esi == 0), so we could test either
942  // ebp or esi.
943  Label skip;
944  test(esi, esi);
945  j(zero, &skip, Label::kNear);
946  mov(Operand(ebp, StandardFrameConstants::kContextOffset), esi);
947  bind(&skip);
948 
949  JumpToHandlerEntry();
950 }
951 
952 
953 void MacroAssembler::ThrowUncatchable(Register value) {
954  // Adjust this code if not the case.
955  STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
956  STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
957  STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
958  STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
959  STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
960  STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
961 
962  // The exception is expected in eax.
963  if (!value.is(eax)) {
964  mov(eax, value);
965  }
966  // Drop the stack pointer to the top of the top stack handler.
967  ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
968  mov(esp, Operand::StaticVariable(handler_address));
969 
970  // Unwind the handlers until the top ENTRY handler is found.
971  Label fetch_next, check_kind;
972  jmp(&check_kind, Label::kNear);
973  bind(&fetch_next);
974  mov(esp, Operand(esp, StackHandlerConstants::kNextOffset));
975 
976  bind(&check_kind);
977  STATIC_ASSERT(StackHandler::JS_ENTRY == 0);
978  test(Operand(esp, StackHandlerConstants::kStateOffset),
979  Immediate(StackHandler::KindField::kMask));
980  j(not_zero, &fetch_next);
981 
982  // Set the top handler address to next handler past the top ENTRY handler.
983  pop(Operand::StaticVariable(handler_address));
984 
985  // Remove the code object and state, compute the handler address in edi.
986  pop(edi); // Code object.
987  pop(edx); // Index and state.
988 
989  // Clear the context pointer and frame pointer (0 was saved in the handler).
990  pop(esi);
991  pop(ebp);
992 
993  JumpToHandlerEntry();
994 }
995 
996 
997 void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
998  Register scratch,
999  Label* miss) {
1000  Label same_contexts;
1001 
1002  ASSERT(!holder_reg.is(scratch));
1003 
1004  // Load current lexical context from the stack frame.
1005  mov(scratch, Operand(ebp, StandardFrameConstants::kContextOffset));
1006 
1007  // When generating debug code, make sure the lexical context is set.
1008  if (emit_debug_code()) {
1009  cmp(scratch, Immediate(0));
1010  Check(not_equal, "we should not have an empty lexical context");
1011  }
1012  // Load the native context of the current context.
1013  int offset =
1014  Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX * kPointerSize;
1015  mov(scratch, FieldOperand(scratch, offset));
1016  mov(scratch, FieldOperand(scratch, GlobalObject::kNativeContextOffset));
1017 
1018  // Check the context is a native context.
1019  if (emit_debug_code()) {
1020  push(scratch);
1021  // Read the first word and compare to native_context_map.
1022  mov(scratch, FieldOperand(scratch, HeapObject::kMapOffset));
1023  cmp(scratch, isolate()->factory()->native_context_map());
1024  Check(equal, "JSGlobalObject::native_context should be a native context.");
1025  pop(scratch);
1026  }
1027 
1028  // Check if both contexts are the same.
1029  cmp(scratch, FieldOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
1030  j(equal, &same_contexts);
1031 
1032  // Compare security tokens, save holder_reg on the stack so we can use it
1033  // as a temporary register.
1034  //
1035  // TODO(119): avoid push(holder_reg)/pop(holder_reg)
1036  push(holder_reg);
1037  // Check that the security token in the calling global object is
1038  // compatible with the security token in the receiving global
1039  // object.
1040  mov(holder_reg,
1041  FieldOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
1042 
1043  // Check the context is a native context.
1044  if (emit_debug_code()) {
1045  cmp(holder_reg, isolate()->factory()->null_value());
1046  Check(not_equal, "JSGlobalProxy::context() should not be null.");
1047 
1048  push(holder_reg);
1049  // Read the first word and compare to native_context_map(),
1050  mov(holder_reg, FieldOperand(holder_reg, HeapObject::kMapOffset));
1051  cmp(holder_reg, isolate()->factory()->native_context_map());
1052  Check(equal, "JSGlobalObject::native_context should be a native context.");
1053  pop(holder_reg);
1054  }
1055 
1056  int token_offset = Context::kHeaderSize +
1057  Context::SECURITY_TOKEN_INDEX * kPointerSize;
1058  mov(scratch, FieldOperand(scratch, token_offset));
1059  cmp(scratch, FieldOperand(holder_reg, token_offset));
1060  pop(holder_reg);
1061  j(not_equal, miss);
1062 
1063  bind(&same_contexts);
1064 }
1065 
1066 
1067 // Compute the hash code from the untagged key. This must be kept in sync
1068 // with ComputeIntegerHash in utils.h.
1069 //
1070 // Note: r0 will contain hash code
1071 void MacroAssembler::GetNumberHash(Register r0, Register scratch) {
1072  // Xor original key with a seed.
1073  if (Serializer::enabled()) {
1074  ExternalReference roots_array_start =
1075  ExternalReference::roots_array_start(isolate());
1076  mov(scratch, Immediate(Heap::kHashSeedRootIndex));
1077  mov(scratch,
1078  Operand::StaticArray(scratch, times_pointer_size, roots_array_start));
1079  SmiUntag(scratch);
1080  xor_(r0, scratch);
1081  } else {
1082  int32_t seed = isolate()->heap()->HashSeed();
1083  xor_(r0, Immediate(seed));
1084  }
1085 
1086  // hash = ~hash + (hash << 15);
1087  mov(scratch, r0);
1088  not_(r0);
1089  shl(scratch, 15);
1090  add(r0, scratch);
1091  // hash = hash ^ (hash >> 12);
1092  mov(scratch, r0);
1093  shr(scratch, 12);
1094  xor_(r0, scratch);
1095  // hash = hash + (hash << 2);
1096  lea(r0, Operand(r0, r0, times_4, 0));
1097  // hash = hash ^ (hash >> 4);
1098  mov(scratch, r0);
1099  shr(scratch, 4);
1100  xor_(r0, scratch);
1101  // hash = hash * 2057;
1102  imul(r0, r0, 2057);
1103  // hash = hash ^ (hash >> 16);
1104  mov(scratch, r0);
1105  shr(scratch, 16);
1106  xor_(r0, scratch);
1107 }
1108 
1109 
1110 
1111 void MacroAssembler::LoadFromNumberDictionary(Label* miss,
1112  Register elements,
1113  Register key,
1114  Register r0,
1115  Register r1,
1116  Register r2,
1117  Register result) {
1118  // Register use:
1119  //
1120  // elements - holds the slow-case elements of the receiver and is unchanged.
1121  //
1122  // key - holds the smi key on entry and is unchanged.
1123  //
1124  // Scratch registers:
1125  //
1126  // r0 - holds the untagged key on entry and holds the hash once computed.
1127  //
1128  // r1 - used to hold the capacity mask of the dictionary
1129  //
1130  // r2 - used for the index into the dictionary.
1131  //
1132  // result - holds the result on exit if the load succeeds and we fall through.
1133 
1134  Label done;
1135 
1136  GetNumberHash(r0, r1);
1137 
1138  // Compute capacity mask.
1139  mov(r1, FieldOperand(elements, SeededNumberDictionary::kCapacityOffset));
1140  shr(r1, kSmiTagSize); // convert smi to int
1141  dec(r1);
1142 
1143  // Generate an unrolled loop that performs a few probes before giving up.
1144  const int kProbes = 4;
1145  for (int i = 0; i < kProbes; i++) {
1146  // Use r2 for index calculations and keep the hash intact in r0.
1147  mov(r2, r0);
1148  // Compute the masked index: (hash + i + i * i) & mask.
1149  if (i > 0) {
1150  add(r2, Immediate(SeededNumberDictionary::GetProbeOffset(i)));
1151  }
1152  and_(r2, r1);
1153 
1154  // Scale the index by multiplying by the entry size.
1155  ASSERT(SeededNumberDictionary::kEntrySize == 3);
1156  lea(r2, Operand(r2, r2, times_2, 0)); // r2 = r2 * 3
1157 
1158  // Check if the key matches.
1159  cmp(key, FieldOperand(elements,
1160  r2,
1162  SeededNumberDictionary::kElementsStartOffset));
1163  if (i != (kProbes - 1)) {
1164  j(equal, &done);
1165  } else {
1166  j(not_equal, miss);
1167  }
1168  }
1169 
1170  bind(&done);
1171  // Check that the value is a normal propety.
1172  const int kDetailsOffset =
1173  SeededNumberDictionary::kElementsStartOffset + 2 * kPointerSize;
1174  ASSERT_EQ(NORMAL, 0);
1175  test(FieldOperand(elements, r2, times_pointer_size, kDetailsOffset),
1176  Immediate(PropertyDetails::TypeField::kMask << kSmiTagSize));
1177  j(not_zero, miss);
1178 
1179  // Get the value at the masked, scaled index.
1180  const int kValueOffset =
1181  SeededNumberDictionary::kElementsStartOffset + kPointerSize;
1182  mov(result, FieldOperand(elements, r2, times_pointer_size, kValueOffset));
1183 }
1184 
1185 
1186 void MacroAssembler::LoadAllocationTopHelper(Register result,
1187  Register scratch,
1189  ExternalReference new_space_allocation_top =
1190  ExternalReference::new_space_allocation_top_address(isolate());
1191 
1192  // Just return if allocation top is already known.
1193  if ((flags & RESULT_CONTAINS_TOP) != 0) {
1194  // No use of scratch if allocation top is provided.
1195  ASSERT(scratch.is(no_reg));
1196 #ifdef DEBUG
1197  // Assert that result actually contains top on entry.
1198  cmp(result, Operand::StaticVariable(new_space_allocation_top));
1199  Check(equal, "Unexpected allocation top");
1200 #endif
1201  return;
1202  }
1203 
1204  // Move address of new object to result. Use scratch register if available.
1205  if (scratch.is(no_reg)) {
1206  mov(result, Operand::StaticVariable(new_space_allocation_top));
1207  } else {
1208  mov(scratch, Immediate(new_space_allocation_top));
1209  mov(result, Operand(scratch, 0));
1210  }
1211 }
1212 
1213 
1214 void MacroAssembler::UpdateAllocationTopHelper(Register result_end,
1215  Register scratch) {
1216  if (emit_debug_code()) {
1217  test(result_end, Immediate(kObjectAlignmentMask));
1218  Check(zero, "Unaligned allocation in new space");
1219  }
1220 
1221  ExternalReference new_space_allocation_top =
1222  ExternalReference::new_space_allocation_top_address(isolate());
1223 
1224  // Update new top. Use scratch if available.
1225  if (scratch.is(no_reg)) {
1226  mov(Operand::StaticVariable(new_space_allocation_top), result_end);
1227  } else {
1228  mov(Operand(scratch, 0), result_end);
1229  }
1230 }
1231 
1232 
1233 void MacroAssembler::AllocateInNewSpace(int object_size,
1234  Register result,
1235  Register result_end,
1236  Register scratch,
1237  Label* gc_required,
1238  AllocationFlags flags) {
1239  if (!FLAG_inline_new) {
1240  if (emit_debug_code()) {
1241  // Trash the registers to simulate an allocation failure.
1242  mov(result, Immediate(0x7091));
1243  if (result_end.is_valid()) {
1244  mov(result_end, Immediate(0x7191));
1245  }
1246  if (scratch.is_valid()) {
1247  mov(scratch, Immediate(0x7291));
1248  }
1249  }
1250  jmp(gc_required);
1251  return;
1252  }
1253  ASSERT(!result.is(result_end));
1254 
1255  // Load address of new object into result.
1256  LoadAllocationTopHelper(result, scratch, flags);
1257 
1258  Register top_reg = result_end.is_valid() ? result_end : result;
1259 
1260  // Calculate new top and bail out if new space is exhausted.
1261  ExternalReference new_space_allocation_limit =
1262  ExternalReference::new_space_allocation_limit_address(isolate());
1263 
1264  if (!top_reg.is(result)) {
1265  mov(top_reg, result);
1266  }
1267  add(top_reg, Immediate(object_size));
1268  j(carry, gc_required);
1269  cmp(top_reg, Operand::StaticVariable(new_space_allocation_limit));
1270  j(above, gc_required);
1271 
1272  // Update allocation top.
1273  UpdateAllocationTopHelper(top_reg, scratch);
1274 
1275  // Tag result if requested.
1276  if (top_reg.is(result)) {
1277  if ((flags & TAG_OBJECT) != 0) {
1278  sub(result, Immediate(object_size - kHeapObjectTag));
1279  } else {
1280  sub(result, Immediate(object_size));
1281  }
1282  } else if ((flags & TAG_OBJECT) != 0) {
1283  add(result, Immediate(kHeapObjectTag));
1284  }
1285 }
1286 
1287 
1288 void MacroAssembler::AllocateInNewSpace(int header_size,
1289  ScaleFactor element_size,
1290  Register element_count,
1291  Register result,
1292  Register result_end,
1293  Register scratch,
1294  Label* gc_required,
1295  AllocationFlags flags) {
1296  if (!FLAG_inline_new) {
1297  if (emit_debug_code()) {
1298  // Trash the registers to simulate an allocation failure.
1299  mov(result, Immediate(0x7091));
1300  mov(result_end, Immediate(0x7191));
1301  if (scratch.is_valid()) {
1302  mov(scratch, Immediate(0x7291));
1303  }
1304  // Register element_count is not modified by the function.
1305  }
1306  jmp(gc_required);
1307  return;
1308  }
1309  ASSERT(!result.is(result_end));
1310 
1311  // Load address of new object into result.
1312  LoadAllocationTopHelper(result, scratch, flags);
1313 
1314  // Calculate new top and bail out if new space is exhausted.
1315  ExternalReference new_space_allocation_limit =
1316  ExternalReference::new_space_allocation_limit_address(isolate());
1317 
1318  // We assume that element_count*element_size + header_size does not
1319  // overflow.
1320  lea(result_end, Operand(element_count, element_size, header_size));
1321  add(result_end, result);
1322  j(carry, gc_required);
1323  cmp(result_end, Operand::StaticVariable(new_space_allocation_limit));
1324  j(above, gc_required);
1325 
1326  // Tag result if requested.
1327  if ((flags & TAG_OBJECT) != 0) {
1328  lea(result, Operand(result, kHeapObjectTag));
1329  }
1330 
1331  // Update allocation top.
1332  UpdateAllocationTopHelper(result_end, scratch);
1333 }
1334 
1335 
1336 void MacroAssembler::AllocateInNewSpace(Register object_size,
1337  Register result,
1338  Register result_end,
1339  Register scratch,
1340  Label* gc_required,
1341  AllocationFlags flags) {
1342  if (!FLAG_inline_new) {
1343  if (emit_debug_code()) {
1344  // Trash the registers to simulate an allocation failure.
1345  mov(result, Immediate(0x7091));
1346  mov(result_end, Immediate(0x7191));
1347  if (scratch.is_valid()) {
1348  mov(scratch, Immediate(0x7291));
1349  }
1350  // object_size is left unchanged by this function.
1351  }
1352  jmp(gc_required);
1353  return;
1354  }
1355  ASSERT(!result.is(result_end));
1356 
1357  // Load address of new object into result.
1358  LoadAllocationTopHelper(result, scratch, flags);
1359 
1360  // Calculate new top and bail out if new space is exhausted.
1361  ExternalReference new_space_allocation_limit =
1362  ExternalReference::new_space_allocation_limit_address(isolate());
1363  if (!object_size.is(result_end)) {
1364  mov(result_end, object_size);
1365  }
1366  add(result_end, result);
1367  j(carry, gc_required);
1368  cmp(result_end, Operand::StaticVariable(new_space_allocation_limit));
1369  j(above, gc_required);
1370 
1371  // Tag result if requested.
1372  if ((flags & TAG_OBJECT) != 0) {
1373  lea(result, Operand(result, kHeapObjectTag));
1374  }
1375 
1376  // Update allocation top.
1377  UpdateAllocationTopHelper(result_end, scratch);
1378 }
1379 
1380 
1381 void MacroAssembler::UndoAllocationInNewSpace(Register object) {
1382  ExternalReference new_space_allocation_top =
1383  ExternalReference::new_space_allocation_top_address(isolate());
1384 
1385  // Make sure the object has no tag before resetting top.
1386  and_(object, Immediate(~kHeapObjectTagMask));
1387 #ifdef DEBUG
1388  cmp(object, Operand::StaticVariable(new_space_allocation_top));
1389  Check(below, "Undo allocation of non allocated memory");
1390 #endif
1391  mov(Operand::StaticVariable(new_space_allocation_top), object);
1392 }
1393 
1394 
1395 void MacroAssembler::AllocateHeapNumber(Register result,
1396  Register scratch1,
1397  Register scratch2,
1398  Label* gc_required) {
1399  // Allocate heap number in new space.
1400  AllocateInNewSpace(HeapNumber::kSize,
1401  result,
1402  scratch1,
1403  scratch2,
1404  gc_required,
1405  TAG_OBJECT);
1406 
1407  // Set the map.
1408  mov(FieldOperand(result, HeapObject::kMapOffset),
1409  Immediate(isolate()->factory()->heap_number_map()));
1410 }
1411 
1412 
1413 void MacroAssembler::AllocateTwoByteString(Register result,
1414  Register length,
1415  Register scratch1,
1416  Register scratch2,
1417  Register scratch3,
1418  Label* gc_required) {
1419  // Calculate the number of bytes needed for the characters in the string while
1420  // observing object alignment.
1421  ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
1422  ASSERT(kShortSize == 2);
1423  // scratch1 = length * 2 + kObjectAlignmentMask.
1424  lea(scratch1, Operand(length, length, times_1, kObjectAlignmentMask));
1425  and_(scratch1, Immediate(~kObjectAlignmentMask));
1426 
1427  // Allocate two byte string in new space.
1428  AllocateInNewSpace(SeqTwoByteString::kHeaderSize,
1429  times_1,
1430  scratch1,
1431  result,
1432  scratch2,
1433  scratch3,
1434  gc_required,
1435  TAG_OBJECT);
1436 
1437  // Set the map, length and hash field.
1438  mov(FieldOperand(result, HeapObject::kMapOffset),
1439  Immediate(isolate()->factory()->string_map()));
1440  mov(scratch1, length);
1441  SmiTag(scratch1);
1442  mov(FieldOperand(result, String::kLengthOffset), scratch1);
1443  mov(FieldOperand(result, String::kHashFieldOffset),
1444  Immediate(String::kEmptyHashField));
1445 }
1446 
1447 
1448 void MacroAssembler::AllocateAsciiString(Register result,
1449  Register length,
1450  Register scratch1,
1451  Register scratch2,
1452  Register scratch3,
1453  Label* gc_required) {
1454  // Calculate the number of bytes needed for the characters in the string while
1455  // observing object alignment.
1456  ASSERT((SeqAsciiString::kHeaderSize & kObjectAlignmentMask) == 0);
1457  mov(scratch1, length);
1458  ASSERT(kCharSize == 1);
1459  add(scratch1, Immediate(kObjectAlignmentMask));
1460  and_(scratch1, Immediate(~kObjectAlignmentMask));
1461 
1462  // Allocate ASCII string in new space.
1463  AllocateInNewSpace(SeqAsciiString::kHeaderSize,
1464  times_1,
1465  scratch1,
1466  result,
1467  scratch2,
1468  scratch3,
1469  gc_required,
1470  TAG_OBJECT);
1471 
1472  // Set the map, length and hash field.
1473  mov(FieldOperand(result, HeapObject::kMapOffset),
1474  Immediate(isolate()->factory()->ascii_string_map()));
1475  mov(scratch1, length);
1476  SmiTag(scratch1);
1477  mov(FieldOperand(result, String::kLengthOffset), scratch1);
1478  mov(FieldOperand(result, String::kHashFieldOffset),
1479  Immediate(String::kEmptyHashField));
1480 }
1481 
1482 
1483 void MacroAssembler::AllocateAsciiString(Register result,
1484  int length,
1485  Register scratch1,
1486  Register scratch2,
1487  Label* gc_required) {
1488  ASSERT(length > 0);
1489 
1490  // Allocate ASCII string in new space.
1491  AllocateInNewSpace(SeqAsciiString::SizeFor(length),
1492  result,
1493  scratch1,
1494  scratch2,
1495  gc_required,
1496  TAG_OBJECT);
1497 
1498  // Set the map, length and hash field.
1499  mov(FieldOperand(result, HeapObject::kMapOffset),
1500  Immediate(isolate()->factory()->ascii_string_map()));
1501  mov(FieldOperand(result, String::kLengthOffset),
1502  Immediate(Smi::FromInt(length)));
1503  mov(FieldOperand(result, String::kHashFieldOffset),
1504  Immediate(String::kEmptyHashField));
1505 }
1506 
1507 
1508 void MacroAssembler::AllocateTwoByteConsString(Register result,
1509  Register scratch1,
1510  Register scratch2,
1511  Label* gc_required) {
1512  // Allocate heap number in new space.
1513  AllocateInNewSpace(ConsString::kSize,
1514  result,
1515  scratch1,
1516  scratch2,
1517  gc_required,
1518  TAG_OBJECT);
1519 
1520  // Set the map. The other fields are left uninitialized.
1521  mov(FieldOperand(result, HeapObject::kMapOffset),
1522  Immediate(isolate()->factory()->cons_string_map()));
1523 }
1524 
1525 
1526 void MacroAssembler::AllocateAsciiConsString(Register result,
1527  Register scratch1,
1528  Register scratch2,
1529  Label* gc_required) {
1530  // Allocate heap number in new space.
1531  AllocateInNewSpace(ConsString::kSize,
1532  result,
1533  scratch1,
1534  scratch2,
1535  gc_required,
1536  TAG_OBJECT);
1537 
1538  // Set the map. The other fields are left uninitialized.
1539  mov(FieldOperand(result, HeapObject::kMapOffset),
1540  Immediate(isolate()->factory()->cons_ascii_string_map()));
1541 }
1542 
1543 
1544 void MacroAssembler::AllocateTwoByteSlicedString(Register result,
1545  Register scratch1,
1546  Register scratch2,
1547  Label* gc_required) {
1548  // Allocate heap number in new space.
1549  AllocateInNewSpace(SlicedString::kSize,
1550  result,
1551  scratch1,
1552  scratch2,
1553  gc_required,
1554  TAG_OBJECT);
1555 
1556  // Set the map. The other fields are left uninitialized.
1557  mov(FieldOperand(result, HeapObject::kMapOffset),
1558  Immediate(isolate()->factory()->sliced_string_map()));
1559 }
1560 
1561 
1562 void MacroAssembler::AllocateAsciiSlicedString(Register result,
1563  Register scratch1,
1564  Register scratch2,
1565  Label* gc_required) {
1566  // Allocate heap number in new space.
1567  AllocateInNewSpace(SlicedString::kSize,
1568  result,
1569  scratch1,
1570  scratch2,
1571  gc_required,
1572  TAG_OBJECT);
1573 
1574  // Set the map. The other fields are left uninitialized.
1575  mov(FieldOperand(result, HeapObject::kMapOffset),
1576  Immediate(isolate()->factory()->sliced_ascii_string_map()));
1577 }
1578 
1579 
1580 // Copy memory, byte-by-byte, from source to destination. Not optimized for
1581 // long or aligned copies. The contents of scratch and length are destroyed.
1582 // Source and destination are incremented by length.
1583 // Many variants of movsb, loop unrolling, word moves, and indexed operands
1584 // have been tried here already, and this is fastest.
1585 // A simpler loop is faster on small copies, but 30% slower on large ones.
1586 // The cld() instruction must have been emitted, to set the direction flag(),
1587 // before calling this function.
1588 void MacroAssembler::CopyBytes(Register source,
1589  Register destination,
1590  Register length,
1591  Register scratch) {
1592  Label loop, done, short_string, short_loop;
1593  // Experimentation shows that the short string loop is faster if length < 10.
1594  cmp(length, Immediate(10));
1595  j(less_equal, &short_string);
1596 
1597  ASSERT(source.is(esi));
1598  ASSERT(destination.is(edi));
1599  ASSERT(length.is(ecx));
1600 
1601  // Because source is 4-byte aligned in our uses of this function,
1602  // we keep source aligned for the rep_movs call by copying the odd bytes
1603  // at the end of the ranges.
1604  mov(scratch, Operand(source, length, times_1, -4));
1605  mov(Operand(destination, length, times_1, -4), scratch);
1606  mov(scratch, ecx);
1607  shr(ecx, 2);
1608  rep_movs();
1609  and_(scratch, Immediate(0x3));
1610  add(destination, scratch);
1611  jmp(&done);
1612 
1613  bind(&short_string);
1614  test(length, length);
1615  j(zero, &done);
1616 
1617  bind(&short_loop);
1618  mov_b(scratch, Operand(source, 0));
1619  mov_b(Operand(destination, 0), scratch);
1620  inc(source);
1621  inc(destination);
1622  dec(length);
1623  j(not_zero, &short_loop);
1624 
1625  bind(&done);
1626 }
1627 
1628 
1629 void MacroAssembler::InitializeFieldsWithFiller(Register start_offset,
1630  Register end_offset,
1631  Register filler) {
1632  Label loop, entry;
1633  jmp(&entry);
1634  bind(&loop);
1635  mov(Operand(start_offset, 0), filler);
1636  add(start_offset, Immediate(kPointerSize));
1637  bind(&entry);
1638  cmp(start_offset, end_offset);
1639  j(less, &loop);
1640 }
1641 
1642 
1643 void MacroAssembler::BooleanBitTest(Register object,
1644  int field_offset,
1645  int bit_index) {
1646  bit_index += kSmiTagSize + kSmiShiftSize;
1648  int byte_index = bit_index / kBitsPerByte;
1649  int byte_bit_index = bit_index & (kBitsPerByte - 1);
1650  test_b(FieldOperand(object, field_offset + byte_index),
1651  static_cast<byte>(1 << byte_bit_index));
1652 }
1653 
1654 
1655 
1656 void MacroAssembler::NegativeZeroTest(Register result,
1657  Register op,
1658  Label* then_label) {
1659  Label ok;
1660  test(result, result);
1661  j(not_zero, &ok);
1662  test(op, op);
1663  j(sign, then_label);
1664  bind(&ok);
1665 }
1666 
1667 
1668 void MacroAssembler::NegativeZeroTest(Register result,
1669  Register op1,
1670  Register op2,
1671  Register scratch,
1672  Label* then_label) {
1673  Label ok;
1674  test(result, result);
1675  j(not_zero, &ok);
1676  mov(scratch, op1);
1677  or_(scratch, op2);
1678  j(sign, then_label);
1679  bind(&ok);
1680 }
1681 
1682 
1683 void MacroAssembler::TryGetFunctionPrototype(Register function,
1684  Register result,
1685  Register scratch,
1686  Label* miss,
1687  bool miss_on_bound_function) {
1688  // Check that the receiver isn't a smi.
1689  JumpIfSmi(function, miss);
1690 
1691  // Check that the function really is a function.
1692  CmpObjectType(function, JS_FUNCTION_TYPE, result);
1693  j(not_equal, miss);
1694 
1695  if (miss_on_bound_function) {
1696  // If a bound function, go to miss label.
1697  mov(scratch,
1698  FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
1699  BooleanBitTest(scratch, SharedFunctionInfo::kCompilerHintsOffset,
1700  SharedFunctionInfo::kBoundFunction);
1701  j(not_zero, miss);
1702  }
1703 
1704  // Make sure that the function has an instance prototype.
1705  Label non_instance;
1706  movzx_b(scratch, FieldOperand(result, Map::kBitFieldOffset));
1707  test(scratch, Immediate(1 << Map::kHasNonInstancePrototype));
1708  j(not_zero, &non_instance);
1709 
1710  // Get the prototype or initial map from the function.
1711  mov(result,
1712  FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
1713 
1714  // If the prototype or initial map is the hole, don't return it and
1715  // simply miss the cache instead. This will allow us to allocate a
1716  // prototype object on-demand in the runtime system.
1717  cmp(result, Immediate(isolate()->factory()->the_hole_value()));
1718  j(equal, miss);
1719 
1720  // If the function does not have an initial map, we're done.
1721  Label done;
1722  CmpObjectType(result, MAP_TYPE, scratch);
1723  j(not_equal, &done);
1724 
1725  // Get the prototype from the initial map.
1726  mov(result, FieldOperand(result, Map::kPrototypeOffset));
1727  jmp(&done);
1728 
1729  // Non-instance prototype: Fetch prototype from constructor field
1730  // in initial map.
1731  bind(&non_instance);
1732  mov(result, FieldOperand(result, Map::kConstructorOffset));
1733 
1734  // All done.
1735  bind(&done);
1736 }
1737 
1738 
1739 void MacroAssembler::CallStub(CodeStub* stub, TypeFeedbackId ast_id) {
1740  ASSERT(AllowThisStubCall(stub)); // Calls are not allowed in some stubs.
1741  call(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id);
1742 }
1743 
1744 
1745 void MacroAssembler::TailCallStub(CodeStub* stub) {
1746  ASSERT(allow_stub_calls_ || stub->CompilingCallsToThisStubIsGCSafe());
1747  jmp(stub->GetCode(), RelocInfo::CODE_TARGET);
1748 }
1749 
1750 
1751 void MacroAssembler::StubReturn(int argc) {
1752  ASSERT(argc >= 1 && generating_stub());
1753  ret((argc - 1) * kPointerSize);
1754 }
1755 
1756 
1757 bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
1758  if (!has_frame_ && stub->SometimesSetsUpAFrame()) return false;
1759  return allow_stub_calls_ || stub->CompilingCallsToThisStubIsGCSafe();
1760 }
1761 
1762 
1763 void MacroAssembler::IllegalOperation(int num_arguments) {
1764  if (num_arguments > 0) {
1765  add(esp, Immediate(num_arguments * kPointerSize));
1766  }
1767  mov(eax, Immediate(isolate()->factory()->undefined_value()));
1768 }
1769 
1770 
1771 void MacroAssembler::IndexFromHash(Register hash, Register index) {
1772  // The assert checks that the constants for the maximum number of digits
1773  // for an array index cached in the hash field and the number of bits
1774  // reserved for it does not conflict.
1775  ASSERT(TenToThe(String::kMaxCachedArrayIndexLength) <
1776  (1 << String::kArrayIndexValueBits));
1777  // We want the smi-tagged index in key. kArrayIndexValueMask has zeros in
1778  // the low kHashShift bits.
1779  and_(hash, String::kArrayIndexValueMask);
1780  STATIC_ASSERT(String::kHashShift >= kSmiTagSize && kSmiTag == 0);
1781  if (String::kHashShift > kSmiTagSize) {
1782  shr(hash, String::kHashShift - kSmiTagSize);
1783  }
1784  if (!index.is(hash)) {
1785  mov(index, hash);
1786  }
1787 }
1788 
1789 
1790 void MacroAssembler::CallRuntime(Runtime::FunctionId id, int num_arguments) {
1791  CallRuntime(Runtime::FunctionForId(id), num_arguments);
1792 }
1793 
1794 
1795 void MacroAssembler::CallRuntimeSaveDoubles(Runtime::FunctionId id) {
1796  const Runtime::Function* function = Runtime::FunctionForId(id);
1797  Set(eax, Immediate(function->nargs));
1798  mov(ebx, Immediate(ExternalReference(function, isolate())));
1799  CEntryStub ces(1, kSaveFPRegs);
1800  CallStub(&ces);
1801 }
1802 
1803 
1804 void MacroAssembler::CallRuntime(const Runtime::Function* f,
1805  int num_arguments) {
1806  // If the expected number of arguments of the runtime function is
1807  // constant, we check that the actual number of arguments match the
1808  // expectation.
1809  if (f->nargs >= 0 && f->nargs != num_arguments) {
1810  IllegalOperation(num_arguments);
1811  return;
1812  }
1813 
1814  // TODO(1236192): Most runtime routines don't need the number of
1815  // arguments passed in because it is constant. At some point we
1816  // should remove this need and make the runtime routine entry code
1817  // smarter.
1818  Set(eax, Immediate(num_arguments));
1819  mov(ebx, Immediate(ExternalReference(f, isolate())));
1820  CEntryStub ces(1);
1821  CallStub(&ces);
1822 }
1823 
1824 
1825 void MacroAssembler::CallExternalReference(ExternalReference ref,
1826  int num_arguments) {
1827  mov(eax, Immediate(num_arguments));
1828  mov(ebx, Immediate(ref));
1829 
1830  CEntryStub stub(1);
1831  CallStub(&stub);
1832 }
1833 
1834 
1835 void MacroAssembler::TailCallExternalReference(const ExternalReference& ext,
1836  int num_arguments,
1837  int result_size) {
1838  // TODO(1236192): Most runtime routines don't need the number of
1839  // arguments passed in because it is constant. At some point we
1840  // should remove this need and make the runtime routine entry code
1841  // smarter.
1842  Set(eax, Immediate(num_arguments));
1843  JumpToExternalReference(ext);
1844 }
1845 
1846 
1847 void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid,
1848  int num_arguments,
1849  int result_size) {
1850  TailCallExternalReference(ExternalReference(fid, isolate()),
1851  num_arguments,
1852  result_size);
1853 }
1854 
1855 
1856 // If true, a Handle<T> returned by value from a function with cdecl calling
1857 // convention will be returned directly as a value of location_ field in a
1858 // register eax.
1859 // If false, it is returned as a pointer to a preallocated by caller memory
1860 // region. Pointer to this region should be passed to a function as an
1861 // implicit first argument.
1862 #if defined(USING_BSD_ABI) || defined(__MINGW32__) || defined(__CYGWIN__)
1863 static const bool kReturnHandlesDirectly = true;
1864 #else
1865 static const bool kReturnHandlesDirectly = false;
1866 #endif
1867 
1868 
1869 Operand ApiParameterOperand(int index) {
1870  return Operand(
1871  esp, (index + (kReturnHandlesDirectly ? 0 : 1)) * kPointerSize);
1872 }
1873 
1874 
1875 void MacroAssembler::PrepareCallApiFunction(int argc) {
1876  if (kReturnHandlesDirectly) {
1877  EnterApiExitFrame(argc);
1878  // When handles are returned directly we don't have to allocate extra
1879  // space for and pass an out parameter.
1880  if (emit_debug_code()) {
1881  mov(esi, Immediate(BitCast<int32_t>(kZapValue)));
1882  }
1883  } else {
1884  // We allocate two additional slots: return value and pointer to it.
1885  EnterApiExitFrame(argc + 2);
1886 
1887  // The argument slots are filled as follows:
1888  //
1889  // n + 1: output slot
1890  // n: arg n
1891  // ...
1892  // 1: arg1
1893  // 0: pointer to the output slot
1894 
1895  lea(esi, Operand(esp, (argc + 1) * kPointerSize));
1896  mov(Operand(esp, 0 * kPointerSize), esi);
1897  if (emit_debug_code()) {
1898  mov(Operand(esi, 0), Immediate(0));
1899  }
1900  }
1901 }
1902 
1903 
1904 void MacroAssembler::CallApiFunctionAndReturn(Address function_address,
1905  int stack_space) {
1906  ExternalReference next_address =
1907  ExternalReference::handle_scope_next_address();
1908  ExternalReference limit_address =
1909  ExternalReference::handle_scope_limit_address();
1910  ExternalReference level_address =
1911  ExternalReference::handle_scope_level_address();
1912 
1913  // Allocate HandleScope in callee-save registers.
1914  mov(ebx, Operand::StaticVariable(next_address));
1915  mov(edi, Operand::StaticVariable(limit_address));
1916  add(Operand::StaticVariable(level_address), Immediate(1));
1917 
1918  // Call the api function.
1919  call(function_address, RelocInfo::RUNTIME_ENTRY);
1920 
1921  if (!kReturnHandlesDirectly) {
1922  // PrepareCallApiFunction saved pointer to the output slot into
1923  // callee-save register esi.
1924  mov(eax, Operand(esi, 0));
1925  }
1926 
1927  Label empty_handle;
1928  Label prologue;
1929  Label promote_scheduled_exception;
1930  Label delete_allocated_handles;
1931  Label leave_exit_frame;
1932 
1933  // Check if the result handle holds 0.
1934  test(eax, eax);
1935  j(zero, &empty_handle);
1936  // It was non-zero. Dereference to get the result value.
1937  mov(eax, Operand(eax, 0));
1938  bind(&prologue);
1939  // No more valid handles (the result handle was the last one). Restore
1940  // previous handle scope.
1941  mov(Operand::StaticVariable(next_address), ebx);
1942  sub(Operand::StaticVariable(level_address), Immediate(1));
1943  Assert(above_equal, "Invalid HandleScope level");
1944  cmp(edi, Operand::StaticVariable(limit_address));
1945  j(not_equal, &delete_allocated_handles);
1946  bind(&leave_exit_frame);
1947 
1948  // Check if the function scheduled an exception.
1949  ExternalReference scheduled_exception_address =
1950  ExternalReference::scheduled_exception_address(isolate());
1951  cmp(Operand::StaticVariable(scheduled_exception_address),
1952  Immediate(isolate()->factory()->the_hole_value()));
1953  j(not_equal, &promote_scheduled_exception);
1954 
1955 #if ENABLE_EXTRA_CHECKS
1956  // Check if the function returned a valid JavaScript value.
1957  Label ok;
1958  Register return_value = eax;
1959  Register map = ecx;
1960 
1961  JumpIfSmi(return_value, &ok, Label::kNear);
1962  mov(map, FieldOperand(return_value, HeapObject::kMapOffset));
1963 
1964  CmpInstanceType(map, FIRST_NONSTRING_TYPE);
1965  j(below, &ok, Label::kNear);
1966 
1967  CmpInstanceType(map, FIRST_SPEC_OBJECT_TYPE);
1968  j(above_equal, &ok, Label::kNear);
1969 
1970  cmp(map, isolate()->factory()->heap_number_map());
1971  j(equal, &ok, Label::kNear);
1972 
1973  cmp(return_value, isolate()->factory()->undefined_value());
1974  j(equal, &ok, Label::kNear);
1975 
1976  cmp(return_value, isolate()->factory()->true_value());
1977  j(equal, &ok, Label::kNear);
1978 
1979  cmp(return_value, isolate()->factory()->false_value());
1980  j(equal, &ok, Label::kNear);
1981 
1982  cmp(return_value, isolate()->factory()->null_value());
1983  j(equal, &ok, Label::kNear);
1984 
1985  Abort("API call returned invalid object");
1986 
1987  bind(&ok);
1988 #endif
1989 
1990  LeaveApiExitFrame();
1991  ret(stack_space * kPointerSize);
1992 
1993  bind(&empty_handle);
1994  // It was zero; the result is undefined.
1995  mov(eax, isolate()->factory()->undefined_value());
1996  jmp(&prologue);
1997 
1998  bind(&promote_scheduled_exception);
1999  TailCallRuntime(Runtime::kPromoteScheduledException, 0, 1);
2000 
2001  // HandleScope limit has changed. Delete allocated extensions.
2002  ExternalReference delete_extensions =
2003  ExternalReference::delete_handle_scope_extensions(isolate());
2004  bind(&delete_allocated_handles);
2005  mov(Operand::StaticVariable(limit_address), edi);
2006  mov(edi, eax);
2007  mov(Operand(esp, 0), Immediate(ExternalReference::isolate_address()));
2008  mov(eax, Immediate(delete_extensions));
2009  call(eax);
2010  mov(eax, edi);
2011  jmp(&leave_exit_frame);
2012 }
2013 
2014 
2015 void MacroAssembler::JumpToExternalReference(const ExternalReference& ext) {
2016  // Set the entry point and jump to the C entry runtime stub.
2017  mov(ebx, Immediate(ext));
2018  CEntryStub ces(1);
2019  jmp(ces.GetCode(), RelocInfo::CODE_TARGET);
2020 }
2021 
2022 
2023 void MacroAssembler::SetCallKind(Register dst, CallKind call_kind) {
2024  // This macro takes the dst register to make the code more readable
2025  // at the call sites. However, the dst register has to be ecx to
2026  // follow the calling convention which requires the call type to be
2027  // in ecx.
2028  ASSERT(dst.is(ecx));
2029  if (call_kind == CALL_AS_FUNCTION) {
2030  // Set to some non-zero smi by updating the least significant
2031  // byte.
2032  mov_b(dst, 1 << kSmiTagSize);
2033  } else {
2034  // Set to smi zero by clearing the register.
2035  xor_(dst, dst);
2036  }
2037 }
2038 
2039 
2040 void MacroAssembler::InvokePrologue(const ParameterCount& expected,
2041  const ParameterCount& actual,
2042  Handle<Code> code_constant,
2043  const Operand& code_operand,
2044  Label* done,
2045  bool* definitely_mismatches,
2046  InvokeFlag flag,
2047  Label::Distance done_near,
2048  const CallWrapper& call_wrapper,
2049  CallKind call_kind) {
2050  bool definitely_matches = false;
2051  *definitely_mismatches = false;
2052  Label invoke;
2053  if (expected.is_immediate()) {
2054  ASSERT(actual.is_immediate());
2055  if (expected.immediate() == actual.immediate()) {
2056  definitely_matches = true;
2057  } else {
2058  mov(eax, actual.immediate());
2059  const int sentinel = SharedFunctionInfo::kDontAdaptArgumentsSentinel;
2060  if (expected.immediate() == sentinel) {
2061  // Don't worry about adapting arguments for builtins that
2062  // don't want that done. Skip adaption code by making it look
2063  // like we have a match between expected and actual number of
2064  // arguments.
2065  definitely_matches = true;
2066  } else {
2067  *definitely_mismatches = true;
2068  mov(ebx, expected.immediate());
2069  }
2070  }
2071  } else {
2072  if (actual.is_immediate()) {
2073  // Expected is in register, actual is immediate. This is the
2074  // case when we invoke function values without going through the
2075  // IC mechanism.
2076  cmp(expected.reg(), actual.immediate());
2077  j(equal, &invoke);
2078  ASSERT(expected.reg().is(ebx));
2079  mov(eax, actual.immediate());
2080  } else if (!expected.reg().is(actual.reg())) {
2081  // Both expected and actual are in (different) registers. This
2082  // is the case when we invoke functions using call and apply.
2083  cmp(expected.reg(), actual.reg());
2084  j(equal, &invoke);
2085  ASSERT(actual.reg().is(eax));
2086  ASSERT(expected.reg().is(ebx));
2087  }
2088  }
2089 
2090  if (!definitely_matches) {
2091  Handle<Code> adaptor =
2092  isolate()->builtins()->ArgumentsAdaptorTrampoline();
2093  if (!code_constant.is_null()) {
2094  mov(edx, Immediate(code_constant));
2095  add(edx, Immediate(Code::kHeaderSize - kHeapObjectTag));
2096  } else if (!code_operand.is_reg(edx)) {
2097  mov(edx, code_operand);
2098  }
2099 
2100  if (flag == CALL_FUNCTION) {
2101  call_wrapper.BeforeCall(CallSize(adaptor, RelocInfo::CODE_TARGET));
2102  SetCallKind(ecx, call_kind);
2103  call(adaptor, RelocInfo::CODE_TARGET);
2104  call_wrapper.AfterCall();
2105  if (!*definitely_mismatches) {
2106  jmp(done, done_near);
2107  }
2108  } else {
2109  SetCallKind(ecx, call_kind);
2110  jmp(adaptor, RelocInfo::CODE_TARGET);
2111  }
2112  bind(&invoke);
2113  }
2114 }
2115 
2116 
2117 void MacroAssembler::InvokeCode(const Operand& code,
2118  const ParameterCount& expected,
2119  const ParameterCount& actual,
2120  InvokeFlag flag,
2121  const CallWrapper& call_wrapper,
2122  CallKind call_kind) {
2123  // You can't call a function without a valid frame.
2124  ASSERT(flag == JUMP_FUNCTION || has_frame());
2125 
2126  Label done;
2127  bool definitely_mismatches = false;
2128  InvokePrologue(expected, actual, Handle<Code>::null(), code,
2129  &done, &definitely_mismatches, flag, Label::kNear,
2130  call_wrapper, call_kind);
2131  if (!definitely_mismatches) {
2132  if (flag == CALL_FUNCTION) {
2133  call_wrapper.BeforeCall(CallSize(code));
2134  SetCallKind(ecx, call_kind);
2135  call(code);
2136  call_wrapper.AfterCall();
2137  } else {
2138  ASSERT(flag == JUMP_FUNCTION);
2139  SetCallKind(ecx, call_kind);
2140  jmp(code);
2141  }
2142  bind(&done);
2143  }
2144 }
2145 
2146 
2147 void MacroAssembler::InvokeCode(Handle<Code> code,
2148  const ParameterCount& expected,
2149  const ParameterCount& actual,
2150  RelocInfo::Mode rmode,
2151  InvokeFlag flag,
2152  const CallWrapper& call_wrapper,
2153  CallKind call_kind) {
2154  // You can't call a function without a valid frame.
2155  ASSERT(flag == JUMP_FUNCTION || has_frame());
2156 
2157  Label done;
2158  Operand dummy(eax, 0);
2159  bool definitely_mismatches = false;
2160  InvokePrologue(expected, actual, code, dummy, &done, &definitely_mismatches,
2161  flag, Label::kNear, call_wrapper, call_kind);
2162  if (!definitely_mismatches) {
2163  if (flag == CALL_FUNCTION) {
2164  call_wrapper.BeforeCall(CallSize(code, rmode));
2165  SetCallKind(ecx, call_kind);
2166  call(code, rmode);
2167  call_wrapper.AfterCall();
2168  } else {
2169  ASSERT(flag == JUMP_FUNCTION);
2170  SetCallKind(ecx, call_kind);
2171  jmp(code, rmode);
2172  }
2173  bind(&done);
2174  }
2175 }
2176 
2177 
2178 void MacroAssembler::InvokeFunction(Register fun,
2179  const ParameterCount& actual,
2180  InvokeFlag flag,
2181  const CallWrapper& call_wrapper,
2182  CallKind call_kind) {
2183  // You can't call a function without a valid frame.
2184  ASSERT(flag == JUMP_FUNCTION || has_frame());
2185 
2186  ASSERT(fun.is(edi));
2187  mov(edx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
2188  mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
2189  mov(ebx, FieldOperand(edx, SharedFunctionInfo::kFormalParameterCountOffset));
2190  SmiUntag(ebx);
2191 
2192  ParameterCount expected(ebx);
2193  InvokeCode(FieldOperand(edi, JSFunction::kCodeEntryOffset),
2194  expected, actual, flag, call_wrapper, call_kind);
2195 }
2196 
2197 
2198 void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
2199  const ParameterCount& actual,
2200  InvokeFlag flag,
2201  const CallWrapper& call_wrapper,
2202  CallKind call_kind) {
2203  // You can't call a function without a valid frame.
2204  ASSERT(flag == JUMP_FUNCTION || has_frame());
2205 
2206  // Get the function and setup the context.
2207  LoadHeapObject(edi, function);
2208  mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
2209 
2210  ParameterCount expected(function->shared()->formal_parameter_count());
2211  // We call indirectly through the code field in the function to
2212  // allow recompilation to take effect without changing any of the
2213  // call sites.
2214  InvokeCode(FieldOperand(edi, JSFunction::kCodeEntryOffset),
2215  expected, actual, flag, call_wrapper, call_kind);
2216 }
2217 
2218 
2219 void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
2220  InvokeFlag flag,
2221  const CallWrapper& call_wrapper) {
2222  // You can't call a builtin without a valid frame.
2223  ASSERT(flag == JUMP_FUNCTION || has_frame());
2224 
2225  // Rely on the assertion to check that the number of provided
2226  // arguments match the expected number of arguments. Fake a
2227  // parameter count to avoid emitting code to do the check.
2228  ParameterCount expected(0);
2229  GetBuiltinFunction(edi, id);
2230  InvokeCode(FieldOperand(edi, JSFunction::kCodeEntryOffset),
2231  expected, expected, flag, call_wrapper, CALL_AS_METHOD);
2232 }
2233 
2234 
2235 void MacroAssembler::GetBuiltinFunction(Register target,
2236  Builtins::JavaScript id) {
2237  // Load the JavaScript builtin function from the builtins object.
2238  mov(target, Operand(esi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
2239  mov(target, FieldOperand(target, GlobalObject::kBuiltinsOffset));
2240  mov(target, FieldOperand(target,
2241  JSBuiltinsObject::OffsetOfFunctionWithId(id)));
2242 }
2243 
2244 
2245 void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) {
2246  ASSERT(!target.is(edi));
2247  // Load the JavaScript builtin function from the builtins object.
2248  GetBuiltinFunction(edi, id);
2249  // Load the code entry point from the function into the target register.
2250  mov(target, FieldOperand(edi, JSFunction::kCodeEntryOffset));
2251 }
2252 
2253 
2254 void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
2255  if (context_chain_length > 0) {
2256  // Move up the chain of contexts to the context containing the slot.
2257  mov(dst, Operand(esi, Context::SlotOffset(Context::PREVIOUS_INDEX)));
2258  for (int i = 1; i < context_chain_length; i++) {
2259  mov(dst, Operand(dst, Context::SlotOffset(Context::PREVIOUS_INDEX)));
2260  }
2261  } else {
2262  // Slot is in the current function context. Move it into the
2263  // destination register in case we store into it (the write barrier
2264  // cannot be allowed to destroy the context in esi).
2265  mov(dst, esi);
2266  }
2267 
2268  // We should not have found a with context by walking the context chain
2269  // (i.e., the static scope chain and runtime context chain do not agree).
2270  // A variable occurring in such a scope should have slot type LOOKUP and
2271  // not CONTEXT.
2272  if (emit_debug_code()) {
2273  cmp(FieldOperand(dst, HeapObject::kMapOffset),
2274  isolate()->factory()->with_context_map());
2275  Check(not_equal, "Variable resolved to with context.");
2276  }
2277 }
2278 
2279 
2280 void MacroAssembler::LoadTransitionedArrayMapConditional(
2281  ElementsKind expected_kind,
2282  ElementsKind transitioned_kind,
2283  Register map_in_out,
2284  Register scratch,
2285  Label* no_map_match) {
2286  // Load the global or builtins object from the current context.
2287  mov(scratch, Operand(esi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
2288  mov(scratch, FieldOperand(scratch, GlobalObject::kNativeContextOffset));
2289 
2290  // Check that the function's map is the same as the expected cached map.
2291  mov(scratch, Operand(scratch,
2292  Context::SlotOffset(Context::JS_ARRAY_MAPS_INDEX)));
2293 
2294  size_t offset = expected_kind * kPointerSize +
2295  FixedArrayBase::kHeaderSize;
2296  cmp(map_in_out, FieldOperand(scratch, offset));
2297  j(not_equal, no_map_match);
2298 
2299  // Use the transitioned cached map.
2300  offset = transitioned_kind * kPointerSize +
2301  FixedArrayBase::kHeaderSize;
2302  mov(map_in_out, FieldOperand(scratch, offset));
2303 }
2304 
2305 
2306 void MacroAssembler::LoadInitialArrayMap(
2307  Register function_in, Register scratch,
2308  Register map_out, bool can_have_holes) {
2309  ASSERT(!function_in.is(map_out));
2310  Label done;
2311  mov(map_out, FieldOperand(function_in,
2312  JSFunction::kPrototypeOrInitialMapOffset));
2313  if (!FLAG_smi_only_arrays) {
2314  ElementsKind kind = can_have_holes ? FAST_HOLEY_ELEMENTS : FAST_ELEMENTS;
2315  LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
2316  kind,
2317  map_out,
2318  scratch,
2319  &done);
2320  } else if (can_have_holes) {
2321  LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
2323  map_out,
2324  scratch,
2325  &done);
2326  }
2327  bind(&done);
2328 }
2329 
2330 
2331 void MacroAssembler::LoadGlobalFunction(int index, Register function) {
2332  // Load the global or builtins object from the current context.
2333  mov(function,
2334  Operand(esi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
2335  // Load the native context from the global or builtins object.
2336  mov(function, FieldOperand(function, GlobalObject::kNativeContextOffset));
2337  // Load the function from the native context.
2338  mov(function, Operand(function, Context::SlotOffset(index)));
2339 }
2340 
2341 
2342 void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
2343  Register map) {
2344  // Load the initial map. The global functions all have initial maps.
2345  mov(map, FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
2346  if (emit_debug_code()) {
2347  Label ok, fail;
2348  CheckMap(map, isolate()->factory()->meta_map(), &fail, DO_SMI_CHECK);
2349  jmp(&ok);
2350  bind(&fail);
2351  Abort("Global functions must have initial map");
2352  bind(&ok);
2353  }
2354 }
2355 
2356 
2357 // Store the value in register src in the safepoint register stack
2358 // slot for register dst.
2359 void MacroAssembler::StoreToSafepointRegisterSlot(Register dst, Register src) {
2360  mov(SafepointRegisterSlot(dst), src);
2361 }
2362 
2363 
2364 void MacroAssembler::StoreToSafepointRegisterSlot(Register dst, Immediate src) {
2365  mov(SafepointRegisterSlot(dst), src);
2366 }
2367 
2368 
2369 void MacroAssembler::LoadFromSafepointRegisterSlot(Register dst, Register src) {
2370  mov(dst, SafepointRegisterSlot(src));
2371 }
2372 
2373 
2374 Operand MacroAssembler::SafepointRegisterSlot(Register reg) {
2375  return Operand(esp, SafepointRegisterStackIndex(reg.code()) * kPointerSize);
2376 }
2377 
2378 
2379 int MacroAssembler::SafepointRegisterStackIndex(int reg_code) {
2380  // The registers are pushed starting with the lowest encoding,
2381  // which means that lowest encodings are furthest away from
2382  // the stack pointer.
2383  ASSERT(reg_code >= 0 && reg_code < kNumSafepointRegisters);
2384  return kNumSafepointRegisters - reg_code - 1;
2385 }
2386 
2387 
2388 void MacroAssembler::LoadHeapObject(Register result,
2389  Handle<HeapObject> object) {
2390  if (isolate()->heap()->InNewSpace(*object)) {
2391  Handle<JSGlobalPropertyCell> cell =
2392  isolate()->factory()->NewJSGlobalPropertyCell(object);
2393  mov(result, Operand::Cell(cell));
2394  } else {
2395  mov(result, object);
2396  }
2397 }
2398 
2399 
2400 void MacroAssembler::PushHeapObject(Handle<HeapObject> object) {
2401  if (isolate()->heap()->InNewSpace(*object)) {
2402  Handle<JSGlobalPropertyCell> cell =
2403  isolate()->factory()->NewJSGlobalPropertyCell(object);
2404  push(Operand::Cell(cell));
2405  } else {
2406  Push(object);
2407  }
2408 }
2409 
2410 
2411 void MacroAssembler::Ret() {
2412  ret(0);
2413 }
2414 
2415 
2416 void MacroAssembler::Ret(int bytes_dropped, Register scratch) {
2417  if (is_uint16(bytes_dropped)) {
2418  ret(bytes_dropped);
2419  } else {
2420  pop(scratch);
2421  add(esp, Immediate(bytes_dropped));
2422  push(scratch);
2423  ret(0);
2424  }
2425 }
2426 
2427 
2428 void MacroAssembler::Drop(int stack_elements) {
2429  if (stack_elements > 0) {
2430  add(esp, Immediate(stack_elements * kPointerSize));
2431  }
2432 }
2433 
2434 
2435 void MacroAssembler::Move(Register dst, Register src) {
2436  if (!dst.is(src)) {
2437  mov(dst, src);
2438  }
2439 }
2440 
2441 
2442 void MacroAssembler::SetCounter(StatsCounter* counter, int value) {
2443  if (FLAG_native_code_counters && counter->Enabled()) {
2444  mov(Operand::StaticVariable(ExternalReference(counter)), Immediate(value));
2445  }
2446 }
2447 
2448 
2449 void MacroAssembler::IncrementCounter(StatsCounter* counter, int value) {
2450  ASSERT(value > 0);
2451  if (FLAG_native_code_counters && counter->Enabled()) {
2452  Operand operand = Operand::StaticVariable(ExternalReference(counter));
2453  if (value == 1) {
2454  inc(operand);
2455  } else {
2456  add(operand, Immediate(value));
2457  }
2458  }
2459 }
2460 
2461 
2462 void MacroAssembler::DecrementCounter(StatsCounter* counter, int value) {
2463  ASSERT(value > 0);
2464  if (FLAG_native_code_counters && counter->Enabled()) {
2465  Operand operand = Operand::StaticVariable(ExternalReference(counter));
2466  if (value == 1) {
2467  dec(operand);
2468  } else {
2469  sub(operand, Immediate(value));
2470  }
2471  }
2472 }
2473 
2474 
2475 void MacroAssembler::IncrementCounter(Condition cc,
2476  StatsCounter* counter,
2477  int value) {
2478  ASSERT(value > 0);
2479  if (FLAG_native_code_counters && counter->Enabled()) {
2480  Label skip;
2481  j(NegateCondition(cc), &skip);
2482  pushfd();
2483  IncrementCounter(counter, value);
2484  popfd();
2485  bind(&skip);
2486  }
2487 }
2488 
2489 
2490 void MacroAssembler::DecrementCounter(Condition cc,
2491  StatsCounter* counter,
2492  int value) {
2493  ASSERT(value > 0);
2494  if (FLAG_native_code_counters && counter->Enabled()) {
2495  Label skip;
2496  j(NegateCondition(cc), &skip);
2497  pushfd();
2498  DecrementCounter(counter, value);
2499  popfd();
2500  bind(&skip);
2501  }
2502 }
2503 
2504 
2505 void MacroAssembler::Assert(Condition cc, const char* msg) {
2506  if (emit_debug_code()) Check(cc, msg);
2507 }
2508 
2509 
2510 void MacroAssembler::AssertFastElements(Register elements) {
2511  if (emit_debug_code()) {
2512  Factory* factory = isolate()->factory();
2513  Label ok;
2514  cmp(FieldOperand(elements, HeapObject::kMapOffset),
2515  Immediate(factory->fixed_array_map()));
2516  j(equal, &ok);
2517  cmp(FieldOperand(elements, HeapObject::kMapOffset),
2518  Immediate(factory->fixed_double_array_map()));
2519  j(equal, &ok);
2520  cmp(FieldOperand(elements, HeapObject::kMapOffset),
2521  Immediate(factory->fixed_cow_array_map()));
2522  j(equal, &ok);
2523  Abort("JSObject with fast elements map has slow elements");
2524  bind(&ok);
2525  }
2526 }
2527 
2528 
2529 void MacroAssembler::Check(Condition cc, const char* msg) {
2530  Label L;
2531  j(cc, &L);
2532  Abort(msg);
2533  // will not return here
2534  bind(&L);
2535 }
2536 
2537 
2538 void MacroAssembler::CheckStackAlignment() {
2539  int frame_alignment = OS::ActivationFrameAlignment();
2540  int frame_alignment_mask = frame_alignment - 1;
2541  if (frame_alignment > kPointerSize) {
2542  ASSERT(IsPowerOf2(frame_alignment));
2543  Label alignment_as_expected;
2544  test(esp, Immediate(frame_alignment_mask));
2545  j(zero, &alignment_as_expected);
2546  // Abort if stack is not aligned.
2547  int3();
2548  bind(&alignment_as_expected);
2549  }
2550 }
2551 
2552 
2553 void MacroAssembler::Abort(const char* msg) {
2554  // We want to pass the msg string like a smi to avoid GC
2555  // problems, however msg is not guaranteed to be aligned
2556  // properly. Instead, we pass an aligned pointer that is
2557  // a proper v8 smi, but also pass the alignment difference
2558  // from the real pointer as a smi.
2559  intptr_t p1 = reinterpret_cast<intptr_t>(msg);
2560  intptr_t p0 = (p1 & ~kSmiTagMask) + kSmiTag;
2561  ASSERT(reinterpret_cast<Object*>(p0)->IsSmi());
2562 #ifdef DEBUG
2563  if (msg != NULL) {
2564  RecordComment("Abort message: ");
2565  RecordComment(msg);
2566  }
2567 #endif
2568 
2569  push(eax);
2570  push(Immediate(p0));
2571  push(Immediate(reinterpret_cast<intptr_t>(Smi::FromInt(p1 - p0))));
2572  // Disable stub call restrictions to always allow calls to abort.
2573  if (!has_frame_) {
2574  // We don't actually want to generate a pile of code for this, so just
2575  // claim there is a stack frame, without generating one.
2576  FrameScope scope(this, StackFrame::NONE);
2577  CallRuntime(Runtime::kAbort, 2);
2578  } else {
2579  CallRuntime(Runtime::kAbort, 2);
2580  }
2581  // will not return here
2582  int3();
2583 }
2584 
2585 
2586 void MacroAssembler::LoadInstanceDescriptors(Register map,
2587  Register descriptors) {
2588  mov(descriptors, FieldOperand(map, Map::kDescriptorsOffset));
2589 }
2590 
2591 
2592 void MacroAssembler::NumberOfOwnDescriptors(Register dst, Register map) {
2593  mov(dst, FieldOperand(map, Map::kBitField3Offset));
2594  DecodeField<Map::NumberOfOwnDescriptorsBits>(dst);
2595 }
2596 
2597 
2598 void MacroAssembler::LoadPowerOf2(XMMRegister dst,
2599  Register scratch,
2600  int power) {
2601  ASSERT(is_uintn(power + HeapNumber::kExponentBias,
2602  HeapNumber::kExponentBits));
2603  mov(scratch, Immediate(power + HeapNumber::kExponentBias));
2604  movd(dst, scratch);
2605  psllq(dst, HeapNumber::kMantissaBits);
2606 }
2607 
2608 
2609 void MacroAssembler::JumpIfInstanceTypeIsNotSequentialAscii(
2610  Register instance_type,
2611  Register scratch,
2612  Label* failure) {
2613  if (!scratch.is(instance_type)) {
2614  mov(scratch, instance_type);
2615  }
2616  and_(scratch,
2618  cmp(scratch, kStringTag | kSeqStringTag | kAsciiStringTag);
2619  j(not_equal, failure);
2620 }
2621 
2622 
2623 void MacroAssembler::JumpIfNotBothSequentialAsciiStrings(Register object1,
2624  Register object2,
2625  Register scratch1,
2626  Register scratch2,
2627  Label* failure) {
2628  // Check that both objects are not smis.
2629  STATIC_ASSERT(kSmiTag == 0);
2630  mov(scratch1, object1);
2631  and_(scratch1, object2);
2632  JumpIfSmi(scratch1, failure);
2633 
2634  // Load instance type for both strings.
2635  mov(scratch1, FieldOperand(object1, HeapObject::kMapOffset));
2636  mov(scratch2, FieldOperand(object2, HeapObject::kMapOffset));
2637  movzx_b(scratch1, FieldOperand(scratch1, Map::kInstanceTypeOffset));
2638  movzx_b(scratch2, FieldOperand(scratch2, Map::kInstanceTypeOffset));
2639 
2640  // Check that both are flat ASCII strings.
2641  const int kFlatAsciiStringMask =
2643  const int kFlatAsciiStringTag = ASCII_STRING_TYPE;
2644  // Interleave bits from both instance types and compare them in one check.
2645  ASSERT_EQ(0, kFlatAsciiStringMask & (kFlatAsciiStringMask << 3));
2646  and_(scratch1, kFlatAsciiStringMask);
2647  and_(scratch2, kFlatAsciiStringMask);
2648  lea(scratch1, Operand(scratch1, scratch2, times_8, 0));
2649  cmp(scratch1, kFlatAsciiStringTag | (kFlatAsciiStringTag << 3));
2650  j(not_equal, failure);
2651 }
2652 
2653 
2654 void MacroAssembler::PrepareCallCFunction(int num_arguments, Register scratch) {
2655  int frame_alignment = OS::ActivationFrameAlignment();
2656  if (frame_alignment != 0) {
2657  // Make stack end at alignment and make room for num_arguments words
2658  // and the original value of esp.
2659  mov(scratch, esp);
2660  sub(esp, Immediate((num_arguments + 1) * kPointerSize));
2661  ASSERT(IsPowerOf2(frame_alignment));
2662  and_(esp, -frame_alignment);
2663  mov(Operand(esp, num_arguments * kPointerSize), scratch);
2664  } else {
2665  sub(esp, Immediate(num_arguments * kPointerSize));
2666  }
2667 }
2668 
2669 
2670 void MacroAssembler::CallCFunction(ExternalReference function,
2671  int num_arguments) {
2672  // Trashing eax is ok as it will be the return value.
2673  mov(eax, Immediate(function));
2674  CallCFunction(eax, num_arguments);
2675 }
2676 
2677 
2678 void MacroAssembler::CallCFunction(Register function,
2679  int num_arguments) {
2680  ASSERT(has_frame());
2681  // Check stack alignment.
2682  if (emit_debug_code()) {
2683  CheckStackAlignment();
2684  }
2685 
2686  call(function);
2687  if (OS::ActivationFrameAlignment() != 0) {
2688  mov(esp, Operand(esp, num_arguments * kPointerSize));
2689  } else {
2690  add(esp, Immediate(num_arguments * kPointerSize));
2691  }
2692 }
2693 
2694 
2695 bool AreAliased(Register r1, Register r2, Register r3, Register r4) {
2696  if (r1.is(r2)) return true;
2697  if (r1.is(r3)) return true;
2698  if (r1.is(r4)) return true;
2699  if (r2.is(r3)) return true;
2700  if (r2.is(r4)) return true;
2701  if (r3.is(r4)) return true;
2702  return false;
2703 }
2704 
2705 
2706 CodePatcher::CodePatcher(byte* address, int size)
2707  : address_(address),
2708  size_(size),
2709  masm_(NULL, address, size + Assembler::kGap) {
2710  // Create a new macro assembler pointing to the address of the code to patch.
2711  // The size is adjusted with kGap on order for the assembler to generate size
2712  // bytes of instructions without failing with buffer size constraints.
2713  ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
2714 }
2715 
2716 
2717 CodePatcher::~CodePatcher() {
2718  // Indicate that code has changed.
2719  CPU::FlushICache(address_, size_);
2720 
2721  // Check that the code was patched as expected.
2722  ASSERT(masm_.pc_ == address_ + size_);
2723  ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
2724 }
2725 
2726 
2727 void MacroAssembler::CheckPageFlag(
2728  Register object,
2729  Register scratch,
2730  int mask,
2731  Condition cc,
2732  Label* condition_met,
2733  Label::Distance condition_met_distance) {
2734  ASSERT(cc == zero || cc == not_zero);
2735  if (scratch.is(object)) {
2736  and_(scratch, Immediate(~Page::kPageAlignmentMask));
2737  } else {
2738  mov(scratch, Immediate(~Page::kPageAlignmentMask));
2739  and_(scratch, object);
2740  }
2741  if (mask < (1 << kBitsPerByte)) {
2742  test_b(Operand(scratch, MemoryChunk::kFlagsOffset),
2743  static_cast<uint8_t>(mask));
2744  } else {
2745  test(Operand(scratch, MemoryChunk::kFlagsOffset), Immediate(mask));
2746  }
2747  j(cc, condition_met, condition_met_distance);
2748 }
2749 
2750 
2751 void MacroAssembler::CheckPageFlagForMap(
2752  Handle<Map> map,
2753  int mask,
2754  Condition cc,
2755  Label* condition_met,
2756  Label::Distance condition_met_distance) {
2757  ASSERT(cc == zero || cc == not_zero);
2758  Page* page = Page::FromAddress(map->address());
2759  ExternalReference reference(ExternalReference::page_flags(page));
2760  // The inlined static address check of the page's flags relies
2761  // on maps never being compacted.
2762  ASSERT(!isolate()->heap()->mark_compact_collector()->
2763  IsOnEvacuationCandidate(*map));
2764  if (mask < (1 << kBitsPerByte)) {
2765  test_b(Operand::StaticVariable(reference), static_cast<uint8_t>(mask));
2766  } else {
2767  test(Operand::StaticVariable(reference), Immediate(mask));
2768  }
2769  j(cc, condition_met, condition_met_distance);
2770 }
2771 
2772 
2773 void MacroAssembler::JumpIfBlack(Register object,
2774  Register scratch0,
2775  Register scratch1,
2776  Label* on_black,
2777  Label::Distance on_black_near) {
2778  HasColor(object, scratch0, scratch1,
2779  on_black, on_black_near,
2780  1, 0); // kBlackBitPattern.
2781  ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
2782 }
2783 
2784 
2785 void MacroAssembler::HasColor(Register object,
2786  Register bitmap_scratch,
2787  Register mask_scratch,
2788  Label* has_color,
2789  Label::Distance has_color_distance,
2790  int first_bit,
2791  int second_bit) {
2792  ASSERT(!AreAliased(object, bitmap_scratch, mask_scratch, ecx));
2793 
2794  GetMarkBits(object, bitmap_scratch, mask_scratch);
2795 
2796  Label other_color, word_boundary;
2797  test(mask_scratch, Operand(bitmap_scratch, MemoryChunk::kHeaderSize));
2798  j(first_bit == 1 ? zero : not_zero, &other_color, Label::kNear);
2799  add(mask_scratch, mask_scratch); // Shift left 1 by adding.
2800  j(zero, &word_boundary, Label::kNear);
2801  test(mask_scratch, Operand(bitmap_scratch, MemoryChunk::kHeaderSize));
2802  j(second_bit == 1 ? not_zero : zero, has_color, has_color_distance);
2803  jmp(&other_color, Label::kNear);
2804 
2805  bind(&word_boundary);
2806  test_b(Operand(bitmap_scratch, MemoryChunk::kHeaderSize + kPointerSize), 1);
2807 
2808  j(second_bit == 1 ? not_zero : zero, has_color, has_color_distance);
2809  bind(&other_color);
2810 }
2811 
2812 
2813 void MacroAssembler::GetMarkBits(Register addr_reg,
2814  Register bitmap_reg,
2815  Register mask_reg) {
2816  ASSERT(!AreAliased(addr_reg, mask_reg, bitmap_reg, ecx));
2817  mov(bitmap_reg, Immediate(~Page::kPageAlignmentMask));
2818  and_(bitmap_reg, addr_reg);
2819  mov(ecx, addr_reg);
2820  int shift =
2821  Bitmap::kBitsPerCellLog2 + kPointerSizeLog2 - Bitmap::kBytesPerCellLog2;
2822  shr(ecx, shift);
2823  and_(ecx,
2824  (Page::kPageAlignmentMask >> shift) & ~(Bitmap::kBytesPerCell - 1));
2825 
2826  add(bitmap_reg, ecx);
2827  mov(ecx, addr_reg);
2828  shr(ecx, kPointerSizeLog2);
2829  and_(ecx, (1 << Bitmap::kBitsPerCellLog2) - 1);
2830  mov(mask_reg, Immediate(1));
2831  shl_cl(mask_reg);
2832 }
2833 
2834 
2835 void MacroAssembler::EnsureNotWhite(
2836  Register value,
2837  Register bitmap_scratch,
2838  Register mask_scratch,
2839  Label* value_is_white_and_not_data,
2840  Label::Distance distance) {
2841  ASSERT(!AreAliased(value, bitmap_scratch, mask_scratch, ecx));
2842  GetMarkBits(value, bitmap_scratch, mask_scratch);
2843 
2844  // If the value is black or grey we don't need to do anything.
2845  ASSERT(strcmp(Marking::kWhiteBitPattern, "00") == 0);
2846  ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
2847  ASSERT(strcmp(Marking::kGreyBitPattern, "11") == 0);
2848  ASSERT(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
2849 
2850  Label done;
2851 
2852  // Since both black and grey have a 1 in the first position and white does
2853  // not have a 1 there we only need to check one bit.
2854  test(mask_scratch, Operand(bitmap_scratch, MemoryChunk::kHeaderSize));
2855  j(not_zero, &done, Label::kNear);
2856 
2857  if (emit_debug_code()) {
2858  // Check for impossible bit pattern.
2859  Label ok;
2860  push(mask_scratch);
2861  // shl. May overflow making the check conservative.
2862  add(mask_scratch, mask_scratch);
2863  test(mask_scratch, Operand(bitmap_scratch, MemoryChunk::kHeaderSize));
2864  j(zero, &ok, Label::kNear);
2865  int3();
2866  bind(&ok);
2867  pop(mask_scratch);
2868  }
2869 
2870  // Value is white. We check whether it is data that doesn't need scanning.
2871  // Currently only checks for HeapNumber and non-cons strings.
2872  Register map = ecx; // Holds map while checking type.
2873  Register length = ecx; // Holds length of object after checking type.
2874  Label not_heap_number;
2875  Label is_data_object;
2876 
2877  // Check for heap-number
2878  mov(map, FieldOperand(value, HeapObject::kMapOffset));
2879  cmp(map, FACTORY->heap_number_map());
2880  j(not_equal, &not_heap_number, Label::kNear);
2881  mov(length, Immediate(HeapNumber::kSize));
2882  jmp(&is_data_object, Label::kNear);
2883 
2884  bind(&not_heap_number);
2885  // Check for strings.
2887  ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
2888  // If it's a string and it's not a cons string then it's an object containing
2889  // no GC pointers.
2890  Register instance_type = ecx;
2891  movzx_b(instance_type, FieldOperand(map, Map::kInstanceTypeOffset));
2892  test_b(instance_type, kIsIndirectStringMask | kIsNotStringMask);
2893  j(not_zero, value_is_white_and_not_data);
2894  // It's a non-indirect (non-cons and non-slice) string.
2895  // If it's external, the length is just ExternalString::kSize.
2896  // Otherwise it's String::kHeaderSize + string->length() * (1 or 2).
2897  Label not_external;
2898  // External strings are the only ones with the kExternalStringTag bit
2899  // set.
2902  test_b(instance_type, kExternalStringTag);
2903  j(zero, &not_external, Label::kNear);
2904  mov(length, Immediate(ExternalString::kSize));
2905  jmp(&is_data_object, Label::kNear);
2906 
2907  bind(&not_external);
2908  // Sequential string, either ASCII or UC16.
2909  ASSERT(kAsciiStringTag == 0x04);
2910  and_(length, Immediate(kStringEncodingMask));
2911  xor_(length, Immediate(kStringEncodingMask));
2912  add(length, Immediate(0x04));
2913  // Value now either 4 (if ASCII) or 8 (if UC16), i.e., char-size shifted
2914  // by 2. If we multiply the string length as smi by this, it still
2915  // won't overflow a 32-bit value.
2916  ASSERT_EQ(SeqAsciiString::kMaxSize, SeqTwoByteString::kMaxSize);
2917  ASSERT(SeqAsciiString::kMaxSize <=
2918  static_cast<int>(0xffffffffu >> (2 + kSmiTagSize)));
2919  imul(length, FieldOperand(value, String::kLengthOffset));
2920  shr(length, 2 + kSmiTagSize + kSmiShiftSize);
2921  add(length, Immediate(SeqString::kHeaderSize + kObjectAlignmentMask));
2922  and_(length, Immediate(~kObjectAlignmentMask));
2923 
2924  bind(&is_data_object);
2925  // Value is a data object, and it is white. Mark it black. Since we know
2926  // that the object is white we can make it black by flipping one bit.
2927  or_(Operand(bitmap_scratch, MemoryChunk::kHeaderSize), mask_scratch);
2928 
2929  and_(bitmap_scratch, Immediate(~Page::kPageAlignmentMask));
2930  add(Operand(bitmap_scratch, MemoryChunk::kLiveBytesOffset),
2931  length);
2932  if (emit_debug_code()) {
2933  mov(length, Operand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
2934  cmp(length, Operand(bitmap_scratch, MemoryChunk::kSizeOffset));
2935  Check(less_equal, "Live Bytes Count overflow chunk size");
2936  }
2937 
2938  bind(&done);
2939 }
2940 
2941 
2942 void MacroAssembler::EnumLength(Register dst, Register map) {
2943  STATIC_ASSERT(Map::EnumLengthBits::kShift == 0);
2944  mov(dst, FieldOperand(map, Map::kBitField3Offset));
2945  and_(dst, Immediate(Smi::FromInt(Map::EnumLengthBits::kMask)));
2946 }
2947 
2948 
2949 void MacroAssembler::CheckEnumCache(Label* call_runtime) {
2950  Label next, start;
2951  mov(ecx, eax);
2952 
2953  // Check if the enum length field is properly initialized, indicating that
2954  // there is an enum cache.
2955  mov(ebx, FieldOperand(ecx, HeapObject::kMapOffset));
2956 
2957  EnumLength(edx, ebx);
2958  cmp(edx, Immediate(Smi::FromInt(Map::kInvalidEnumCache)));
2959  j(equal, call_runtime);
2960 
2961  jmp(&start);
2962 
2963  bind(&next);
2964  mov(ebx, FieldOperand(ecx, HeapObject::kMapOffset));
2965 
2966  // For all objects but the receiver, check that the cache is empty.
2967  EnumLength(edx, ebx);
2968  cmp(edx, Immediate(Smi::FromInt(0)));
2969  j(not_equal, call_runtime);
2970 
2971  bind(&start);
2972 
2973  // Check that there are no elements. Register rcx contains the current JS
2974  // object we've reached through the prototype chain.
2975  mov(ecx, FieldOperand(ecx, JSObject::kElementsOffset));
2976  cmp(ecx, isolate()->factory()->empty_fixed_array());
2977  j(not_equal, call_runtime);
2978 
2979  mov(ecx, FieldOperand(ebx, Map::kPrototypeOffset));
2980  cmp(ecx, isolate()->factory()->null_value());
2981  j(not_equal, &next);
2982 }
2983 
2984 } } // namespace v8::internal
2985 
2986 #endif // V8_TARGET_ARCH_IA32
byte * Address
Definition: globals.h:157
unsigned char byte
Definition: disasm.h:33
const intptr_t kSmiTagMask
Definition: v8.h:4016
const uint32_t kNaNOrInfinityLowerBoundUpper32
Definition: v8globals.h:471
bool is_intn(int x, int n)
Definition: assembler.h:832
const Register r3
const int kNumRegisters
Definition: constants-arm.h:92
const Register esp
int int32_t
Definition: unicode.cc:47
bool AreAliased(Register r1, Register r2, Register r3, Register r4)
#define ASSERT(condition)
Definition: checks.h:270
bool CanTransitionToMoreGeneralFastElementsKind(ElementsKind elements_kind, bool allow_only_packed)
const int kPointerSizeLog2
Definition: globals.h:232
const uint32_t kStringRepresentationMask
Definition: objects.h:474
const Register r2
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination use dead code elimination trace on stack replacement optimize closures cache optimized code for closures functions with arguments object loop weight for representation inference allow uint32 values on optimize frames if they are used only in safe operations track parallel recompilation enable all profiler experiments number of stack frames inspected by the profiler call recompile stub directly when self optimizing trigger profiler ticks based on counting instead of timing weight back edges by jump distance for interrupt triggering percentage of ICs that must have type info to allow optimization watch_ic_patching retry_self_opt interrupt_at_exit extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of SAHF instruction if enable use of VFP3 instructions if available this implies enabling ARMv7 and VFP2 enable use of VFP2 instructions if available enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of MIPS FPU instructions if expose natives in global object expose gc extension number of stack frames to capture disable builtin natives files print a stack trace if an assertion failure occurs use random jit cookie to mask large constants trace lazy optimization use adaptive optimizations prepare for turning on always opt minimum length for automatic enable preparsing maximum number of optimization attempts before giving up cache prototype transitions automatically set the debug break flag when debugger commands are in the queue always cause a debug break before aborting maximum length of function source code printed in a stack trace max size of the new max size of the old max size of executable always perform global GCs print one trace line following each garbage collection do not print trace line after scavenger collection print more details following each garbage collection print amount of external allocated memory after each time it is adjusted flush code that we expect not to use again before full gc do incremental marking steps track object counts and memory usage use caching Perform compaction on every full GC Never perform compaction on full GC testing only Compact code space on full incremental collections Default seed for initializing random allows verbose printing trace parsing and preparsing Check icache flushes in ARM and MIPS simulator Stack alingment in bytes in print stack trace when throwing exceptions randomize hashes to avoid predictable hash Fixed seed to use to hash property activate a timer that switches between V8 threads testing_bool_flag float flag Seed used for threading test randomness A filename with extra code to be included in the Print usage including flags
const intptr_t kObjectAlignmentMask
Definition: v8globals.h:45
bool IsFastElementsKind(ElementsKind kind)
const intptr_t kHeapObjectTagMask
Definition: v8.h:4011
unsigned int seed
Definition: test-strings.cc:18
const Register edi
const uint32_t kNotStringTag
Definition: objects.h:457
const Register ebp
const Register eax
bool IsFastPackedElementsKind(ElementsKind kind)
const int kDoubleSize
Definition: globals.h:218
const uint32_t kIsIndirectStringMask
Definition: objects.h:481
const int kPointerSize
Definition: globals.h:220
Operand FieldOperand(Register object, int offset)
const Register ecx
const Address kZapValue
Definition: v8globals.h:80
const int kHeapObjectTag
Definition: v8.h:4009
bool IsAligned(T value, U alignment)
Definition: utils.h:206
bool is_uint16(int x)
Definition: assembler.h:853
const uint32_t kHoleNanLower32
Definition: v8globals.h:470
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination use dead code elimination trace on stack replacement optimize closures cache optimized code for closures functions with arguments object loop weight for representation inference allow uint32 values on optimize frames if they are used only in safe operations track parallel recompilation enable all profiler experiments number of stack frames inspected by the profiler call recompile stub directly when self optimizing trigger profiler ticks based on counting instead of timing weight back edges by jump distance for interrupt triggering percentage of ICs that must have type info to allow optimization watch_ic_patching retry_self_opt interrupt_at_exit extra verbose compilation tracing generate extra code(assertions) for debugging") DEFINE_bool(code_comments
const int kBitsPerByte
Definition: globals.h:237
const Register r0
bool IsPowerOf2(T x)
Definition: utils.h:50
int TenToThe(int exponent)
Definition: utils.h:795
MacroAssembler(Isolate *isolate, void *buffer, int size)
const uint32_t kStringTag
Definition: objects.h:456
activate correct semantics for inheriting readonliness false
Definition: flags.cc:141
InvokeFlag
const uint32_t kIsNotStringMask
Definition: objects.h:455
const Register r1
const Register ebx
const int kNumSafepointRegisters
Definition: frames-arm.h:92
const int kSmiShiftSize
Definition: v8.h:4060
ElementsKind GetNextMoreGeneralFastElementsKind(ElementsKind elements_kind, bool allow_only_packed)
Operand ApiParameterOperand(int index)
const int kSmiTagSize
Definition: v8.h:4015
Condition NegateCondition(Condition cond)
#define ASSERT_EQ(v1, v2)
Definition: checks.h:271
const Register esi
const int kShortSize
Definition: globals.h:216
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination use dead code elimination trace on stack replacement optimize closures cache optimized code for closures functions with arguments object loop weight for representation inference allow uint32 values on optimize frames if they are used only in safe operations track parallel recompilation enable all profiler experiments number of stack frames inspected by the profiler call recompile stub directly when self optimizing trigger profiler ticks based on counting instead of timing weight back edges by jump distance for interrupt triggering percentage of ICs that must have type info to allow optimization watch_ic_patching retry_self_opt interrupt_at_exit extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of SAHF instruction if enable use of VFP3 instructions if available this implies enabling ARMv7 and VFP2 enable use of VFP2 instructions if available enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of MIPS FPU instructions if NULL
const int kSmiTag
Definition: v8.h:4014
#define FACTORY
Definition: isolate.h:1434
const uint32_t kIsIndirectStringTag
Definition: objects.h:482
#define RUNTIME_ENTRY(name, nargs, ressize)
const Register no_reg
#define STATIC_ASSERT(test)
Definition: checks.h:283
const uint32_t kAsciiStringTag
Definition: objects.h:470
const Register edx
const int kCharSize
Definition: globals.h:215
bool is_uintn(int x, int n)
Definition: assembler.h:841
const uint32_t kStringEncodingMask
Definition: objects.h:468
const Register r4
kPropertyAccessorsOffset kNamedPropertyHandlerOffset kInstanceTemplateOffset kAccessCheckInfoOffset kEvalFrominstructionsOffsetOffset kInstanceClassNameOffset flag
Definition: objects-inl.h:3923