v8  3.25.30(node0.11.13)
V8 is Google's open source JavaScript engine
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
macro-assembler-ia32.cc
Go to the documentation of this file.
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 
28 #include "v8.h"
29 
30 #if V8_TARGET_ARCH_IA32
31 
32 #include "bootstrapper.h"
33 #include "codegen.h"
34 #include "cpu-profiler.h"
35 #include "debug.h"
36 #include "isolate-inl.h"
37 #include "runtime.h"
38 #include "serialize.h"
39 
40 namespace v8 {
41 namespace internal {
42 
43 // -------------------------------------------------------------------------
44 // MacroAssembler implementation.
45 
46 MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size)
47  : Assembler(arg_isolate, buffer, size),
48  generating_stub_(false),
49  has_frame_(false) {
50  if (isolate() != NULL) {
51  // TODO(titzer): should we just use a null handle here instead?
52  code_object_ = Handle<Object>(isolate()->heap()->undefined_value(),
53  isolate());
54  }
55 }
56 
57 
58 void MacroAssembler::Load(Register dst, const Operand& src, Representation r) {
59  ASSERT(!r.IsDouble());
60  if (r.IsInteger8()) {
61  movsx_b(dst, src);
62  } else if (r.IsUInteger8()) {
63  movzx_b(dst, src);
64  } else if (r.IsInteger16()) {
65  movsx_w(dst, src);
66  } else if (r.IsUInteger16()) {
67  movzx_w(dst, src);
68  } else {
69  mov(dst, src);
70  }
71 }
72 
73 
74 void MacroAssembler::Store(Register src, const Operand& dst, Representation r) {
75  ASSERT(!r.IsDouble());
76  if (r.IsInteger8() || r.IsUInteger8()) {
77  mov_b(dst, src);
78  } else if (r.IsInteger16() || r.IsUInteger16()) {
79  mov_w(dst, src);
80  } else {
81  mov(dst, src);
82  }
83 }
84 
85 
86 void MacroAssembler::LoadRoot(Register destination, Heap::RootListIndex index) {
87  if (isolate()->heap()->RootCanBeTreatedAsConstant(index)) {
88  Handle<Object> value(&isolate()->heap()->roots_array_start()[index]);
89  mov(destination, value);
90  return;
91  }
92  ExternalReference roots_array_start =
93  ExternalReference::roots_array_start(isolate());
94  mov(destination, Immediate(index));
95  mov(destination, Operand::StaticArray(destination,
97  roots_array_start));
98 }
99 
100 
101 void MacroAssembler::StoreRoot(Register source,
102  Register scratch,
103  Heap::RootListIndex index) {
104  ASSERT(Heap::RootCanBeWrittenAfterInitialization(index));
105  ExternalReference roots_array_start =
106  ExternalReference::roots_array_start(isolate());
107  mov(scratch, Immediate(index));
108  mov(Operand::StaticArray(scratch, times_pointer_size, roots_array_start),
109  source);
110 }
111 
112 
113 void MacroAssembler::CompareRoot(Register with,
114  Register scratch,
115  Heap::RootListIndex index) {
116  ExternalReference roots_array_start =
117  ExternalReference::roots_array_start(isolate());
118  mov(scratch, Immediate(index));
119  cmp(with, Operand::StaticArray(scratch,
121  roots_array_start));
122 }
123 
124 
125 void MacroAssembler::CompareRoot(Register with, Heap::RootListIndex index) {
126  ASSERT(isolate()->heap()->RootCanBeTreatedAsConstant(index));
127  Handle<Object> value(&isolate()->heap()->roots_array_start()[index]);
128  cmp(with, value);
129 }
130 
131 
132 void MacroAssembler::CompareRoot(const Operand& with,
133  Heap::RootListIndex index) {
134  ASSERT(isolate()->heap()->RootCanBeTreatedAsConstant(index));
135  Handle<Object> value(&isolate()->heap()->roots_array_start()[index]);
136  cmp(with, value);
137 }
138 
139 
140 void MacroAssembler::InNewSpace(
141  Register object,
142  Register scratch,
143  Condition cc,
144  Label* condition_met,
145  Label::Distance condition_met_distance) {
146  ASSERT(cc == equal || cc == not_equal);
147  if (scratch.is(object)) {
148  and_(scratch, Immediate(~Page::kPageAlignmentMask));
149  } else {
150  mov(scratch, Immediate(~Page::kPageAlignmentMask));
151  and_(scratch, object);
152  }
153  // Check that we can use a test_b.
154  ASSERT(MemoryChunk::IN_FROM_SPACE < 8);
155  ASSERT(MemoryChunk::IN_TO_SPACE < 8);
156  int mask = (1 << MemoryChunk::IN_FROM_SPACE)
157  | (1 << MemoryChunk::IN_TO_SPACE);
158  // If non-zero, the page belongs to new-space.
159  test_b(Operand(scratch, MemoryChunk::kFlagsOffset),
160  static_cast<uint8_t>(mask));
161  j(cc, condition_met, condition_met_distance);
162 }
163 
164 
165 void MacroAssembler::RememberedSetHelper(
166  Register object, // Only used for debug checks.
167  Register addr,
168  Register scratch,
169  SaveFPRegsMode save_fp,
170  MacroAssembler::RememberedSetFinalAction and_then) {
171  Label done;
172  if (emit_debug_code()) {
173  Label ok;
174  JumpIfNotInNewSpace(object, scratch, &ok, Label::kNear);
175  int3();
176  bind(&ok);
177  }
178  // Load store buffer top.
179  ExternalReference store_buffer =
180  ExternalReference::store_buffer_top(isolate());
181  mov(scratch, Operand::StaticVariable(store_buffer));
182  // Store pointer to buffer.
183  mov(Operand(scratch, 0), addr);
184  // Increment buffer top.
185  add(scratch, Immediate(kPointerSize));
186  // Write back new top of buffer.
187  mov(Operand::StaticVariable(store_buffer), scratch);
188  // Call stub on end of buffer.
189  // Check for end of buffer.
190  test(scratch, Immediate(StoreBuffer::kStoreBufferOverflowBit));
191  if (and_then == kReturnAtEnd) {
192  Label buffer_overflowed;
193  j(not_equal, &buffer_overflowed, Label::kNear);
194  ret(0);
195  bind(&buffer_overflowed);
196  } else {
197  ASSERT(and_then == kFallThroughAtEnd);
198  j(equal, &done, Label::kNear);
199  }
200  StoreBufferOverflowStub store_buffer_overflow =
201  StoreBufferOverflowStub(save_fp);
202  CallStub(&store_buffer_overflow);
203  if (and_then == kReturnAtEnd) {
204  ret(0);
205  } else {
206  ASSERT(and_then == kFallThroughAtEnd);
207  bind(&done);
208  }
209 }
210 
211 
212 void MacroAssembler::ClampDoubleToUint8(XMMRegister input_reg,
213  XMMRegister scratch_reg,
214  Register result_reg) {
215  Label done;
216  Label conv_failure;
217  xorps(scratch_reg, scratch_reg);
218  cvtsd2si(result_reg, input_reg);
219  test(result_reg, Immediate(0xFFFFFF00));
220  j(zero, &done, Label::kNear);
221  cmp(result_reg, Immediate(0x1));
222  j(overflow, &conv_failure, Label::kNear);
223  mov(result_reg, Immediate(0));
224  setcc(sign, result_reg);
225  sub(result_reg, Immediate(1));
226  and_(result_reg, Immediate(255));
227  jmp(&done, Label::kNear);
228  bind(&conv_failure);
229  Move(result_reg, Immediate(0));
230  ucomisd(input_reg, scratch_reg);
231  j(below, &done, Label::kNear);
232  Move(result_reg, Immediate(255));
233  bind(&done);
234 }
235 
236 
237 void MacroAssembler::ClampUint8(Register reg) {
238  Label done;
239  test(reg, Immediate(0xFFFFFF00));
240  j(zero, &done, Label::kNear);
241  setcc(negative, reg); // 1 if negative, 0 if positive.
242  dec_b(reg); // 0 if negative, 255 if positive.
243  bind(&done);
244 }
245 
246 
247 void MacroAssembler::SlowTruncateToI(Register result_reg,
248  Register input_reg,
249  int offset) {
250  DoubleToIStub stub(input_reg, result_reg, offset, true);
251  call(stub.GetCode(isolate()), RelocInfo::CODE_TARGET);
252 }
253 
254 
255 void MacroAssembler::TruncateDoubleToI(Register result_reg,
256  XMMRegister input_reg) {
257  Label done;
258  cvttsd2si(result_reg, Operand(input_reg));
259  cmp(result_reg, 0x1);
260  j(no_overflow, &done, Label::kNear);
261 
262  sub(esp, Immediate(kDoubleSize));
263  movsd(MemOperand(esp, 0), input_reg);
264  SlowTruncateToI(result_reg, esp, 0);
265  add(esp, Immediate(kDoubleSize));
266  bind(&done);
267 }
268 
269 
270 void MacroAssembler::TruncateX87TOSToI(Register result_reg) {
271  sub(esp, Immediate(kDoubleSize));
272  fst_d(MemOperand(esp, 0));
273  SlowTruncateToI(result_reg, esp, 0);
274  add(esp, Immediate(kDoubleSize));
275 }
276 
277 
278 void MacroAssembler::X87TOSToI(Register result_reg,
279  MinusZeroMode minus_zero_mode,
280  Label* conversion_failed,
281  Label::Distance dst) {
282  Label done;
283  sub(esp, Immediate(kPointerSize));
284  fld(0);
285  fist_s(MemOperand(esp, 0));
286  fild_s(MemOperand(esp, 0));
287  pop(result_reg);
288  FCmp();
289  j(not_equal, conversion_failed, dst);
290  j(parity_even, conversion_failed, dst);
291  if (minus_zero_mode == FAIL_ON_MINUS_ZERO) {
292  test(result_reg, Operand(result_reg));
293  j(not_zero, &done, Label::kNear);
294  // To check for minus zero, we load the value again as float, and check
295  // if that is still 0.
296  sub(esp, Immediate(kPointerSize));
297  fst_s(MemOperand(esp, 0));
298  pop(result_reg);
299  test(result_reg, Operand(result_reg));
300  j(not_zero, conversion_failed, dst);
301  }
302  bind(&done);
303 }
304 
305 
306 void MacroAssembler::DoubleToI(Register result_reg,
307  XMMRegister input_reg,
308  XMMRegister scratch,
309  MinusZeroMode minus_zero_mode,
310  Label* conversion_failed,
311  Label::Distance dst) {
312  ASSERT(!input_reg.is(scratch));
313  cvttsd2si(result_reg, Operand(input_reg));
314  Cvtsi2sd(scratch, Operand(result_reg));
315  ucomisd(scratch, input_reg);
316  j(not_equal, conversion_failed, dst);
317  j(parity_even, conversion_failed, dst); // NaN.
318  if (minus_zero_mode == FAIL_ON_MINUS_ZERO) {
319  Label done;
320  // The integer converted back is equal to the original. We
321  // only have to test if we got -0 as an input.
322  test(result_reg, Operand(result_reg));
323  j(not_zero, &done, Label::kNear);
324  movmskpd(result_reg, input_reg);
325  // Bit 0 contains the sign of the double in input_reg.
326  // If input was positive, we are ok and return 0, otherwise
327  // jump to conversion_failed.
328  and_(result_reg, 1);
329  j(not_zero, conversion_failed, dst);
330  bind(&done);
331  }
332 }
333 
334 
335 void MacroAssembler::TruncateHeapNumberToI(Register result_reg,
336  Register input_reg) {
337  Label done, slow_case;
338 
339  if (CpuFeatures::IsSupported(SSE3)) {
340  CpuFeatureScope scope(this, SSE3);
341  Label convert;
342  // Use more powerful conversion when sse3 is available.
343  // Load x87 register with heap number.
344  fld_d(FieldOperand(input_reg, HeapNumber::kValueOffset));
345  // Get exponent alone and check for too-big exponent.
346  mov(result_reg, FieldOperand(input_reg, HeapNumber::kExponentOffset));
347  and_(result_reg, HeapNumber::kExponentMask);
348  const uint32_t kTooBigExponent =
349  (HeapNumber::kExponentBias + 63) << HeapNumber::kExponentShift;
350  cmp(Operand(result_reg), Immediate(kTooBigExponent));
351  j(greater_equal, &slow_case, Label::kNear);
352 
353  // Reserve space for 64 bit answer.
354  sub(Operand(esp), Immediate(kDoubleSize));
355  // Do conversion, which cannot fail because we checked the exponent.
356  fisttp_d(Operand(esp, 0));
357  mov(result_reg, Operand(esp, 0)); // Low word of answer is the result.
358  add(Operand(esp), Immediate(kDoubleSize));
359  jmp(&done, Label::kNear);
360 
361  // Slow case.
362  bind(&slow_case);
363  if (input_reg.is(result_reg)) {
364  // Input is clobbered. Restore number from fpu stack
365  sub(Operand(esp), Immediate(kDoubleSize));
366  fstp_d(Operand(esp, 0));
367  SlowTruncateToI(result_reg, esp, 0);
368  add(esp, Immediate(kDoubleSize));
369  } else {
370  fstp(0);
371  SlowTruncateToI(result_reg, input_reg);
372  }
373  } else if (CpuFeatures::IsSupported(SSE2)) {
374  CpuFeatureScope scope(this, SSE2);
375  movsd(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
376  cvttsd2si(result_reg, Operand(xmm0));
377  cmp(result_reg, 0x1);
378  j(no_overflow, &done, Label::kNear);
379  // Check if the input was 0x8000000 (kMinInt).
380  // If no, then we got an overflow and we deoptimize.
381  ExternalReference min_int = ExternalReference::address_of_min_int();
382  ucomisd(xmm0, Operand::StaticVariable(min_int));
383  j(not_equal, &slow_case, Label::kNear);
384  j(parity_even, &slow_case, Label::kNear); // NaN.
385  jmp(&done, Label::kNear);
386 
387  // Slow case.
388  bind(&slow_case);
389  if (input_reg.is(result_reg)) {
390  // Input is clobbered. Restore number from double scratch.
391  sub(esp, Immediate(kDoubleSize));
392  movsd(MemOperand(esp, 0), xmm0);
393  SlowTruncateToI(result_reg, esp, 0);
394  add(esp, Immediate(kDoubleSize));
395  } else {
396  SlowTruncateToI(result_reg, input_reg);
397  }
398  } else {
399  SlowTruncateToI(result_reg, input_reg);
400  }
401  bind(&done);
402 }
403 
404 
405 void MacroAssembler::TaggedToI(Register result_reg,
406  Register input_reg,
407  XMMRegister temp,
408  MinusZeroMode minus_zero_mode,
409  Label* lost_precision) {
410  Label done;
411  ASSERT(!temp.is(xmm0));
412 
413  cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
414  isolate()->factory()->heap_number_map());
415  j(not_equal, lost_precision, Label::kNear);
416 
417  if (CpuFeatures::IsSafeForSnapshot(SSE2)) {
418  ASSERT(!temp.is(no_xmm_reg));
419  CpuFeatureScope scope(this, SSE2);
420 
421  movsd(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
422  cvttsd2si(result_reg, Operand(xmm0));
423  Cvtsi2sd(temp, Operand(result_reg));
424  ucomisd(xmm0, temp);
425  RecordComment("Deferred TaggedToI: lost precision");
426  j(not_equal, lost_precision, Label::kNear);
427  RecordComment("Deferred TaggedToI: NaN");
428  j(parity_even, lost_precision, Label::kNear);
429  if (minus_zero_mode == FAIL_ON_MINUS_ZERO) {
430  test(result_reg, Operand(result_reg));
431  j(not_zero, &done, Label::kNear);
432  movmskpd(result_reg, xmm0);
433  and_(result_reg, 1);
434  RecordComment("Deferred TaggedToI: minus zero");
435  j(not_zero, lost_precision, Label::kNear);
436  }
437  } else {
438  // TODO(olivf) Converting a number on the fpu is actually quite slow. We
439  // should first try a fast conversion and then bailout to this slow case.
440  Label lost_precision_pop, zero_check;
441  Label* lost_precision_int = (minus_zero_mode == FAIL_ON_MINUS_ZERO)
442  ? &lost_precision_pop : lost_precision;
443  sub(esp, Immediate(kPointerSize));
444  fld_d(FieldOperand(input_reg, HeapNumber::kValueOffset));
445  if (minus_zero_mode == FAIL_ON_MINUS_ZERO) fld(0);
446  fist_s(MemOperand(esp, 0));
447  fild_s(MemOperand(esp, 0));
448  FCmp();
449  pop(result_reg);
450  j(not_equal, lost_precision_int, Label::kNear);
451  j(parity_even, lost_precision_int, Label::kNear); // NaN.
452  if (minus_zero_mode == FAIL_ON_MINUS_ZERO) {
453  test(result_reg, Operand(result_reg));
454  j(zero, &zero_check, Label::kNear);
455  fstp(0);
456  jmp(&done, Label::kNear);
457  bind(&zero_check);
458  // To check for minus zero, we load the value again as float, and check
459  // if that is still 0.
460  sub(esp, Immediate(kPointerSize));
461  fstp_s(Operand(esp, 0));
462  pop(result_reg);
463  test(result_reg, Operand(result_reg));
464  j(zero, &done, Label::kNear);
465  jmp(lost_precision, Label::kNear);
466 
467  bind(&lost_precision_pop);
468  fstp(0);
469  jmp(lost_precision, Label::kNear);
470  }
471  }
472  bind(&done);
473 }
474 
475 
476 void MacroAssembler::LoadUint32(XMMRegister dst,
477  Register src,
478  XMMRegister scratch) {
479  Label done;
480  cmp(src, Immediate(0));
481  ExternalReference uint32_bias =
482  ExternalReference::address_of_uint32_bias();
483  movsd(scratch, Operand::StaticVariable(uint32_bias));
484  Cvtsi2sd(dst, src);
485  j(not_sign, &done, Label::kNear);
486  addsd(dst, scratch);
487  bind(&done);
488 }
489 
490 
491 void MacroAssembler::LoadUint32NoSSE2(Register src) {
492  Label done;
493  push(src);
494  fild_s(Operand(esp, 0));
495  cmp(src, Immediate(0));
496  j(not_sign, &done, Label::kNear);
497  ExternalReference uint32_bias =
498  ExternalReference::address_of_uint32_bias();
499  fld_d(Operand::StaticVariable(uint32_bias));
500  faddp(1);
501  bind(&done);
502  add(esp, Immediate(kPointerSize));
503 }
504 
505 
506 void MacroAssembler::RecordWriteArray(Register object,
507  Register value,
508  Register index,
509  SaveFPRegsMode save_fp,
510  RememberedSetAction remembered_set_action,
511  SmiCheck smi_check) {
512  // First, check if a write barrier is even needed. The tests below
513  // catch stores of Smis.
514  Label done;
515 
516  // Skip barrier if writing a smi.
517  if (smi_check == INLINE_SMI_CHECK) {
518  ASSERT_EQ(0, kSmiTag);
519  test(value, Immediate(kSmiTagMask));
520  j(zero, &done);
521  }
522 
523  // Array access: calculate the destination address in the same manner as
524  // KeyedStoreIC::GenerateGeneric. Multiply a smi by 2 to get an offset
525  // into an array of words.
526  Register dst = index;
527  lea(dst, Operand(object, index, times_half_pointer_size,
528  FixedArray::kHeaderSize - kHeapObjectTag));
529 
530  RecordWrite(
531  object, dst, value, save_fp, remembered_set_action, OMIT_SMI_CHECK);
532 
533  bind(&done);
534 
535  // Clobber clobbered input registers when running with the debug-code flag
536  // turned on to provoke errors.
537  if (emit_debug_code()) {
538  mov(value, Immediate(BitCast<int32_t>(kZapValue)));
539  mov(index, Immediate(BitCast<int32_t>(kZapValue)));
540  }
541 }
542 
543 
544 void MacroAssembler::RecordWriteField(
545  Register object,
546  int offset,
547  Register value,
548  Register dst,
549  SaveFPRegsMode save_fp,
550  RememberedSetAction remembered_set_action,
551  SmiCheck smi_check) {
552  // First, check if a write barrier is even needed. The tests below
553  // catch stores of Smis.
554  Label done;
555 
556  // Skip barrier if writing a smi.
557  if (smi_check == INLINE_SMI_CHECK) {
558  JumpIfSmi(value, &done, Label::kNear);
559  }
560 
561  // Although the object register is tagged, the offset is relative to the start
562  // of the object, so so offset must be a multiple of kPointerSize.
563  ASSERT(IsAligned(offset, kPointerSize));
564 
565  lea(dst, FieldOperand(object, offset));
566  if (emit_debug_code()) {
567  Label ok;
568  test_b(dst, (1 << kPointerSizeLog2) - 1);
569  j(zero, &ok, Label::kNear);
570  int3();
571  bind(&ok);
572  }
573 
574  RecordWrite(
575  object, dst, value, save_fp, remembered_set_action, OMIT_SMI_CHECK);
576 
577  bind(&done);
578 
579  // Clobber clobbered input registers when running with the debug-code flag
580  // turned on to provoke errors.
581  if (emit_debug_code()) {
582  mov(value, Immediate(BitCast<int32_t>(kZapValue)));
583  mov(dst, Immediate(BitCast<int32_t>(kZapValue)));
584  }
585 }
586 
587 
588 void MacroAssembler::RecordWriteForMap(
589  Register object,
590  Handle<Map> map,
591  Register scratch1,
592  Register scratch2,
593  SaveFPRegsMode save_fp) {
594  Label done;
595 
596  Register address = scratch1;
597  Register value = scratch2;
598  if (emit_debug_code()) {
599  Label ok;
600  lea(address, FieldOperand(object, HeapObject::kMapOffset));
601  test_b(address, (1 << kPointerSizeLog2) - 1);
602  j(zero, &ok, Label::kNear);
603  int3();
604  bind(&ok);
605  }
606 
607  ASSERT(!object.is(value));
608  ASSERT(!object.is(address));
609  ASSERT(!value.is(address));
610  AssertNotSmi(object);
611 
612  if (!FLAG_incremental_marking) {
613  return;
614  }
615 
616  // Count number of write barriers in generated code.
617  isolate()->counters()->write_barriers_static()->Increment();
618  IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1);
619 
620  // A single check of the map's pages interesting flag suffices, since it is
621  // only set during incremental collection, and then it's also guaranteed that
622  // the from object's page's interesting flag is also set. This optimization
623  // relies on the fact that maps can never be in new space.
624  ASSERT(!isolate()->heap()->InNewSpace(*map));
625  CheckPageFlagForMap(map,
626  MemoryChunk::kPointersToHereAreInterestingMask,
627  zero,
628  &done,
629  Label::kNear);
630 
631  // Delay the initialization of |address| and |value| for the stub until it's
632  // known that the will be needed. Up until this point their values are not
633  // needed since they are embedded in the operands of instructions that need
634  // them.
635  lea(address, FieldOperand(object, HeapObject::kMapOffset));
636  mov(value, Immediate(map));
637  RecordWriteStub stub(object, value, address, OMIT_REMEMBERED_SET, save_fp);
638  CallStub(&stub);
639 
640  bind(&done);
641 
642  // Clobber clobbered input registers when running with the debug-code flag
643  // turned on to provoke errors.
644  if (emit_debug_code()) {
645  mov(value, Immediate(BitCast<int32_t>(kZapValue)));
646  mov(scratch1, Immediate(BitCast<int32_t>(kZapValue)));
647  mov(scratch2, Immediate(BitCast<int32_t>(kZapValue)));
648  }
649 }
650 
651 
652 void MacroAssembler::RecordWrite(Register object,
653  Register address,
654  Register value,
655  SaveFPRegsMode fp_mode,
656  RememberedSetAction remembered_set_action,
657  SmiCheck smi_check) {
658  ASSERT(!object.is(value));
659  ASSERT(!object.is(address));
660  ASSERT(!value.is(address));
661  AssertNotSmi(object);
662 
663  if (remembered_set_action == OMIT_REMEMBERED_SET &&
664  !FLAG_incremental_marking) {
665  return;
666  }
667 
668  if (emit_debug_code()) {
669  Label ok;
670  cmp(value, Operand(address, 0));
671  j(equal, &ok, Label::kNear);
672  int3();
673  bind(&ok);
674  }
675 
676  // Count number of write barriers in generated code.
677  isolate()->counters()->write_barriers_static()->Increment();
678  IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1);
679 
680  // First, check if a write barrier is even needed. The tests below
681  // catch stores of Smis and stores into young gen.
682  Label done;
683 
684  if (smi_check == INLINE_SMI_CHECK) {
685  // Skip barrier if writing a smi.
686  JumpIfSmi(value, &done, Label::kNear);
687  }
688 
689  CheckPageFlag(value,
690  value, // Used as scratch.
691  MemoryChunk::kPointersToHereAreInterestingMask,
692  zero,
693  &done,
694  Label::kNear);
695  CheckPageFlag(object,
696  value, // Used as scratch.
697  MemoryChunk::kPointersFromHereAreInterestingMask,
698  zero,
699  &done,
700  Label::kNear);
701 
702  RecordWriteStub stub(object, value, address, remembered_set_action, fp_mode);
703  CallStub(&stub);
704 
705  bind(&done);
706 
707  // Clobber clobbered registers when running with the debug-code flag
708  // turned on to provoke errors.
709  if (emit_debug_code()) {
710  mov(address, Immediate(BitCast<int32_t>(kZapValue)));
711  mov(value, Immediate(BitCast<int32_t>(kZapValue)));
712  }
713 }
714 
715 
716 #ifdef ENABLE_DEBUGGER_SUPPORT
717 void MacroAssembler::DebugBreak() {
718  Move(eax, Immediate(0));
719  mov(ebx, Immediate(ExternalReference(Runtime::kDebugBreak, isolate())));
720  CEntryStub ces(1);
721  call(ces.GetCode(isolate()), RelocInfo::DEBUG_BREAK);
722 }
723 #endif
724 
725 
726 void MacroAssembler::Cvtsi2sd(XMMRegister dst, const Operand& src) {
727  xorps(dst, dst);
728  cvtsi2sd(dst, src);
729 }
730 
731 
732 bool MacroAssembler::IsUnsafeImmediate(const Immediate& x) {
733  static const int kMaxImmediateBits = 17;
734  if (!RelocInfo::IsNone(x.rmode_)) return false;
735  return !is_intn(x.x_, kMaxImmediateBits);
736 }
737 
738 
739 void MacroAssembler::SafeMove(Register dst, const Immediate& x) {
740  if (IsUnsafeImmediate(x) && jit_cookie() != 0) {
741  Move(dst, Immediate(x.x_ ^ jit_cookie()));
742  xor_(dst, jit_cookie());
743  } else {
744  Move(dst, x);
745  }
746 }
747 
748 
749 void MacroAssembler::SafePush(const Immediate& x) {
750  if (IsUnsafeImmediate(x) && jit_cookie() != 0) {
751  push(Immediate(x.x_ ^ jit_cookie()));
752  xor_(Operand(esp, 0), Immediate(jit_cookie()));
753  } else {
754  push(x);
755  }
756 }
757 
758 
759 void MacroAssembler::CmpObjectType(Register heap_object,
760  InstanceType type,
761  Register map) {
762  mov(map, FieldOperand(heap_object, HeapObject::kMapOffset));
763  CmpInstanceType(map, type);
764 }
765 
766 
767 void MacroAssembler::CmpInstanceType(Register map, InstanceType type) {
768  cmpb(FieldOperand(map, Map::kInstanceTypeOffset),
769  static_cast<int8_t>(type));
770 }
771 
772 
773 void MacroAssembler::CheckFastElements(Register map,
774  Label* fail,
775  Label::Distance distance) {
780  cmpb(FieldOperand(map, Map::kBitField2Offset),
781  Map::kMaximumBitField2FastHoleyElementValue);
782  j(above, fail, distance);
783 }
784 
785 
786 void MacroAssembler::CheckFastObjectElements(Register map,
787  Label* fail,
788  Label::Distance distance) {
793  cmpb(FieldOperand(map, Map::kBitField2Offset),
794  Map::kMaximumBitField2FastHoleySmiElementValue);
795  j(below_equal, fail, distance);
796  cmpb(FieldOperand(map, Map::kBitField2Offset),
797  Map::kMaximumBitField2FastHoleyElementValue);
798  j(above, fail, distance);
799 }
800 
801 
802 void MacroAssembler::CheckFastSmiElements(Register map,
803  Label* fail,
804  Label::Distance distance) {
807  cmpb(FieldOperand(map, Map::kBitField2Offset),
808  Map::kMaximumBitField2FastHoleySmiElementValue);
809  j(above, fail, distance);
810 }
811 
812 
813 void MacroAssembler::StoreNumberToDoubleElements(
814  Register maybe_number,
815  Register elements,
816  Register key,
817  Register scratch1,
818  XMMRegister scratch2,
819  Label* fail,
820  bool specialize_for_processor,
821  int elements_offset) {
822  Label smi_value, done, maybe_nan, not_nan, is_nan, have_double_value;
823  JumpIfSmi(maybe_number, &smi_value, Label::kNear);
824 
825  CheckMap(maybe_number,
826  isolate()->factory()->heap_number_map(),
827  fail,
829 
830  // Double value, canonicalize NaN.
831  uint32_t offset = HeapNumber::kValueOffset + sizeof(kHoleNanLower32);
832  cmp(FieldOperand(maybe_number, offset),
834  j(greater_equal, &maybe_nan, Label::kNear);
835 
836  bind(&not_nan);
837  ExternalReference canonical_nan_reference =
838  ExternalReference::address_of_canonical_non_hole_nan();
839  if (CpuFeatures::IsSupported(SSE2) && specialize_for_processor) {
840  CpuFeatureScope use_sse2(this, SSE2);
841  movsd(scratch2, FieldOperand(maybe_number, HeapNumber::kValueOffset));
842  bind(&have_double_value);
843  movsd(FieldOperand(elements, key, times_4,
844  FixedDoubleArray::kHeaderSize - elements_offset),
845  scratch2);
846  } else {
847  fld_d(FieldOperand(maybe_number, HeapNumber::kValueOffset));
848  bind(&have_double_value);
849  fstp_d(FieldOperand(elements, key, times_4,
850  FixedDoubleArray::kHeaderSize - elements_offset));
851  }
852  jmp(&done);
853 
854  bind(&maybe_nan);
855  // Could be NaN or Infinity. If fraction is not zero, it's NaN, otherwise
856  // it's an Infinity, and the non-NaN code path applies.
857  j(greater, &is_nan, Label::kNear);
858  cmp(FieldOperand(maybe_number, HeapNumber::kValueOffset), Immediate(0));
859  j(zero, &not_nan);
860  bind(&is_nan);
861  if (CpuFeatures::IsSupported(SSE2) && specialize_for_processor) {
862  CpuFeatureScope use_sse2(this, SSE2);
863  movsd(scratch2, Operand::StaticVariable(canonical_nan_reference));
864  } else {
865  fld_d(Operand::StaticVariable(canonical_nan_reference));
866  }
867  jmp(&have_double_value, Label::kNear);
868 
869  bind(&smi_value);
870  // Value is a smi. Convert to a double and store.
871  // Preserve original value.
872  mov(scratch1, maybe_number);
873  SmiUntag(scratch1);
874  if (CpuFeatures::IsSupported(SSE2) && specialize_for_processor) {
875  CpuFeatureScope fscope(this, SSE2);
876  Cvtsi2sd(scratch2, scratch1);
877  movsd(FieldOperand(elements, key, times_4,
878  FixedDoubleArray::kHeaderSize - elements_offset),
879  scratch2);
880  } else {
881  push(scratch1);
882  fild_s(Operand(esp, 0));
883  pop(scratch1);
884  fstp_d(FieldOperand(elements, key, times_4,
885  FixedDoubleArray::kHeaderSize - elements_offset));
886  }
887  bind(&done);
888 }
889 
890 
891 void MacroAssembler::CompareMap(Register obj, Handle<Map> map) {
892  cmp(FieldOperand(obj, HeapObject::kMapOffset), map);
893 }
894 
895 
896 void MacroAssembler::CheckMap(Register obj,
897  Handle<Map> map,
898  Label* fail,
899  SmiCheckType smi_check_type) {
900  if (smi_check_type == DO_SMI_CHECK) {
901  JumpIfSmi(obj, fail);
902  }
903 
904  CompareMap(obj, map);
905  j(not_equal, fail);
906 }
907 
908 
909 void MacroAssembler::DispatchMap(Register obj,
910  Register unused,
911  Handle<Map> map,
912  Handle<Code> success,
913  SmiCheckType smi_check_type) {
914  Label fail;
915  if (smi_check_type == DO_SMI_CHECK) {
916  JumpIfSmi(obj, &fail);
917  }
918  cmp(FieldOperand(obj, HeapObject::kMapOffset), Immediate(map));
919  j(equal, success);
920 
921  bind(&fail);
922 }
923 
924 
925 Condition MacroAssembler::IsObjectStringType(Register heap_object,
926  Register map,
927  Register instance_type) {
928  mov(map, FieldOperand(heap_object, HeapObject::kMapOffset));
929  movzx_b(instance_type, FieldOperand(map, Map::kInstanceTypeOffset));
931  test(instance_type, Immediate(kIsNotStringMask));
932  return zero;
933 }
934 
935 
936 Condition MacroAssembler::IsObjectNameType(Register heap_object,
937  Register map,
938  Register instance_type) {
939  mov(map, FieldOperand(heap_object, HeapObject::kMapOffset));
940  movzx_b(instance_type, FieldOperand(map, Map::kInstanceTypeOffset));
941  cmpb(instance_type, static_cast<uint8_t>(LAST_NAME_TYPE));
942  return below_equal;
943 }
944 
945 
946 void MacroAssembler::IsObjectJSObjectType(Register heap_object,
947  Register map,
948  Register scratch,
949  Label* fail) {
950  mov(map, FieldOperand(heap_object, HeapObject::kMapOffset));
951  IsInstanceJSObjectType(map, scratch, fail);
952 }
953 
954 
955 void MacroAssembler::IsInstanceJSObjectType(Register map,
956  Register scratch,
957  Label* fail) {
958  movzx_b(scratch, FieldOperand(map, Map::kInstanceTypeOffset));
959  sub(scratch, Immediate(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
960  cmp(scratch,
962  j(above, fail);
963 }
964 
965 
966 void MacroAssembler::FCmp() {
967  if (CpuFeatures::IsSupported(CMOV)) {
968  fucomip();
969  fstp(0);
970  } else {
971  fucompp();
972  push(eax);
973  fnstsw_ax();
974  sahf();
975  pop(eax);
976  }
977 }
978 
979 
980 void MacroAssembler::AssertNumber(Register object) {
981  if (emit_debug_code()) {
982  Label ok;
983  JumpIfSmi(object, &ok);
984  cmp(FieldOperand(object, HeapObject::kMapOffset),
985  isolate()->factory()->heap_number_map());
986  Check(equal, kOperandNotANumber);
987  bind(&ok);
988  }
989 }
990 
991 
992 void MacroAssembler::AssertSmi(Register object) {
993  if (emit_debug_code()) {
994  test(object, Immediate(kSmiTagMask));
995  Check(equal, kOperandIsNotASmi);
996  }
997 }
998 
999 
1000 void MacroAssembler::AssertString(Register object) {
1001  if (emit_debug_code()) {
1002  test(object, Immediate(kSmiTagMask));
1003  Check(not_equal, kOperandIsASmiAndNotAString);
1004  push(object);
1005  mov(object, FieldOperand(object, HeapObject::kMapOffset));
1006  CmpInstanceType(object, FIRST_NONSTRING_TYPE);
1007  pop(object);
1008  Check(below, kOperandIsNotAString);
1009  }
1010 }
1011 
1012 
1013 void MacroAssembler::AssertName(Register object) {
1014  if (emit_debug_code()) {
1015  test(object, Immediate(kSmiTagMask));
1016  Check(not_equal, kOperandIsASmiAndNotAName);
1017  push(object);
1018  mov(object, FieldOperand(object, HeapObject::kMapOffset));
1019  CmpInstanceType(object, LAST_NAME_TYPE);
1020  pop(object);
1021  Check(below_equal, kOperandIsNotAName);
1022  }
1023 }
1024 
1025 
1026 void MacroAssembler::AssertUndefinedOrAllocationSite(Register object) {
1027  if (emit_debug_code()) {
1028  Label done_checking;
1029  AssertNotSmi(object);
1030  cmp(object, isolate()->factory()->undefined_value());
1031  j(equal, &done_checking);
1032  cmp(FieldOperand(object, 0),
1033  Immediate(isolate()->factory()->allocation_site_map()));
1034  Assert(equal, kExpectedUndefinedOrCell);
1035  bind(&done_checking);
1036  }
1037 }
1038 
1039 
1040 void MacroAssembler::AssertNotSmi(Register object) {
1041  if (emit_debug_code()) {
1042  test(object, Immediate(kSmiTagMask));
1043  Check(not_equal, kOperandIsASmi);
1044  }
1045 }
1046 
1047 
1048 void MacroAssembler::Prologue(PrologueFrameMode frame_mode) {
1049  if (frame_mode == BUILD_STUB_FRAME) {
1050  push(ebp); // Caller's frame pointer.
1051  mov(ebp, esp);
1052  push(esi); // Callee's context.
1053  push(Immediate(Smi::FromInt(StackFrame::STUB)));
1054  } else {
1055  PredictableCodeSizeScope predictible_code_size_scope(this,
1056  kNoCodeAgeSequenceLength);
1057  if (isolate()->IsCodePreAgingActive()) {
1058  // Pre-age the code.
1059  call(isolate()->builtins()->MarkCodeAsExecutedOnce(),
1060  RelocInfo::CODE_AGE_SEQUENCE);
1061  Nop(kNoCodeAgeSequenceLength - Assembler::kCallInstructionLength);
1062  } else {
1063  push(ebp); // Caller's frame pointer.
1064  mov(ebp, esp);
1065  push(esi); // Callee's context.
1066  push(edi); // Callee's JS function.
1067  }
1068  }
1069 }
1070 
1071 
1072 void MacroAssembler::EnterFrame(StackFrame::Type type) {
1073  push(ebp);
1074  mov(ebp, esp);
1075  push(esi);
1076  push(Immediate(Smi::FromInt(type)));
1077  push(Immediate(CodeObject()));
1078  if (emit_debug_code()) {
1079  cmp(Operand(esp, 0), Immediate(isolate()->factory()->undefined_value()));
1080  Check(not_equal, kCodeObjectNotProperlyPatched);
1081  }
1082 }
1083 
1084 
1085 void MacroAssembler::LeaveFrame(StackFrame::Type type) {
1086  if (emit_debug_code()) {
1087  cmp(Operand(ebp, StandardFrameConstants::kMarkerOffset),
1088  Immediate(Smi::FromInt(type)));
1089  Check(equal, kStackFrameTypesMustMatch);
1090  }
1091  leave();
1092 }
1093 
1094 
1095 void MacroAssembler::EnterExitFramePrologue() {
1096  // Set up the frame structure on the stack.
1097  ASSERT(ExitFrameConstants::kCallerSPDisplacement == +2 * kPointerSize);
1098  ASSERT(ExitFrameConstants::kCallerPCOffset == +1 * kPointerSize);
1099  ASSERT(ExitFrameConstants::kCallerFPOffset == 0 * kPointerSize);
1100  push(ebp);
1101  mov(ebp, esp);
1102 
1103  // Reserve room for entry stack pointer and push the code object.
1104  ASSERT(ExitFrameConstants::kSPOffset == -1 * kPointerSize);
1105  push(Immediate(0)); // Saved entry sp, patched before call.
1106  push(Immediate(CodeObject())); // Accessed from ExitFrame::code_slot.
1107 
1108  // Save the frame pointer and the context in top.
1109  ExternalReference c_entry_fp_address(Isolate::kCEntryFPAddress, isolate());
1110  ExternalReference context_address(Isolate::kContextAddress, isolate());
1111  mov(Operand::StaticVariable(c_entry_fp_address), ebp);
1112  mov(Operand::StaticVariable(context_address), esi);
1113 }
1114 
1115 
1116 void MacroAssembler::EnterExitFrameEpilogue(int argc, bool save_doubles) {
1117  // Optionally save all XMM registers.
1118  if (save_doubles) {
1119  CpuFeatureScope scope(this, SSE2);
1120  int space = XMMRegister::kNumRegisters * kDoubleSize + argc * kPointerSize;
1121  sub(esp, Immediate(space));
1122  const int offset = -2 * kPointerSize;
1123  for (int i = 0; i < XMMRegister::kNumRegisters; i++) {
1124  XMMRegister reg = XMMRegister::from_code(i);
1125  movsd(Operand(ebp, offset - ((i + 1) * kDoubleSize)), reg);
1126  }
1127  } else {
1128  sub(esp, Immediate(argc * kPointerSize));
1129  }
1130 
1131  // Get the required frame alignment for the OS.
1132  const int kFrameAlignment = OS::ActivationFrameAlignment();
1133  if (kFrameAlignment > 0) {
1134  ASSERT(IsPowerOf2(kFrameAlignment));
1135  and_(esp, -kFrameAlignment);
1136  }
1137 
1138  // Patch the saved entry sp.
1139  mov(Operand(ebp, ExitFrameConstants::kSPOffset), esp);
1140 }
1141 
1142 
1143 void MacroAssembler::EnterExitFrame(bool save_doubles) {
1144  EnterExitFramePrologue();
1145 
1146  // Set up argc and argv in callee-saved registers.
1147  int offset = StandardFrameConstants::kCallerSPOffset - kPointerSize;
1148  mov(edi, eax);
1149  lea(esi, Operand(ebp, eax, times_4, offset));
1150 
1151  // Reserve space for argc, argv and isolate.
1152  EnterExitFrameEpilogue(3, save_doubles);
1153 }
1154 
1155 
1156 void MacroAssembler::EnterApiExitFrame(int argc) {
1157  EnterExitFramePrologue();
1158  EnterExitFrameEpilogue(argc, false);
1159 }
1160 
1161 
1162 void MacroAssembler::LeaveExitFrame(bool save_doubles) {
1163  // Optionally restore all XMM registers.
1164  if (save_doubles) {
1165  CpuFeatureScope scope(this, SSE2);
1166  const int offset = -2 * kPointerSize;
1167  for (int i = 0; i < XMMRegister::kNumRegisters; i++) {
1168  XMMRegister reg = XMMRegister::from_code(i);
1169  movsd(reg, Operand(ebp, offset - ((i + 1) * kDoubleSize)));
1170  }
1171  }
1172 
1173  // Get the return address from the stack and restore the frame pointer.
1174  mov(ecx, Operand(ebp, 1 * kPointerSize));
1175  mov(ebp, Operand(ebp, 0 * kPointerSize));
1176 
1177  // Pop the arguments and the receiver from the caller stack.
1178  lea(esp, Operand(esi, 1 * kPointerSize));
1179 
1180  // Push the return address to get ready to return.
1181  push(ecx);
1182 
1183  LeaveExitFrameEpilogue(true);
1184 }
1185 
1186 
1187 void MacroAssembler::LeaveExitFrameEpilogue(bool restore_context) {
1188  // Restore current context from top and clear it in debug mode.
1189  ExternalReference context_address(Isolate::kContextAddress, isolate());
1190  if (restore_context) {
1191  mov(esi, Operand::StaticVariable(context_address));
1192  }
1193 #ifdef DEBUG
1194  mov(Operand::StaticVariable(context_address), Immediate(0));
1195 #endif
1196 
1197  // Clear the top frame.
1198  ExternalReference c_entry_fp_address(Isolate::kCEntryFPAddress,
1199  isolate());
1200  mov(Operand::StaticVariable(c_entry_fp_address), Immediate(0));
1201 }
1202 
1203 
1204 void MacroAssembler::LeaveApiExitFrame(bool restore_context) {
1205  mov(esp, ebp);
1206  pop(ebp);
1207 
1208  LeaveExitFrameEpilogue(restore_context);
1209 }
1210 
1211 
1212 void MacroAssembler::PushTryHandler(StackHandler::Kind kind,
1213  int handler_index) {
1214  // Adjust this code if not the case.
1215  STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
1216  STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
1217  STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
1218  STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
1219  STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
1220  STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
1221 
1222  // We will build up the handler from the bottom by pushing on the stack.
1223  // First push the frame pointer and context.
1224  if (kind == StackHandler::JS_ENTRY) {
1225  // The frame pointer does not point to a JS frame so we save NULL for
1226  // ebp. We expect the code throwing an exception to check ebp before
1227  // dereferencing it to restore the context.
1228  push(Immediate(0)); // NULL frame pointer.
1229  push(Immediate(Smi::FromInt(0))); // No context.
1230  } else {
1231  push(ebp);
1232  push(esi);
1233  }
1234  // Push the state and the code object.
1235  unsigned state =
1236  StackHandler::IndexField::encode(handler_index) |
1237  StackHandler::KindField::encode(kind);
1238  push(Immediate(state));
1239  Push(CodeObject());
1240 
1241  // Link the current handler as the next handler.
1242  ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
1243  push(Operand::StaticVariable(handler_address));
1244  // Set this new handler as the current one.
1245  mov(Operand::StaticVariable(handler_address), esp);
1246 }
1247 
1248 
1249 void MacroAssembler::PopTryHandler() {
1250  STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
1251  ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
1252  pop(Operand::StaticVariable(handler_address));
1253  add(esp, Immediate(StackHandlerConstants::kSize - kPointerSize));
1254 }
1255 
1256 
1257 void MacroAssembler::JumpToHandlerEntry() {
1258  // Compute the handler entry address and jump to it. The handler table is
1259  // a fixed array of (smi-tagged) code offsets.
1260  // eax = exception, edi = code object, edx = state.
1261  mov(ebx, FieldOperand(edi, Code::kHandlerTableOffset));
1262  shr(edx, StackHandler::kKindWidth);
1263  mov(edx, FieldOperand(ebx, edx, times_4, FixedArray::kHeaderSize));
1264  SmiUntag(edx);
1265  lea(edi, FieldOperand(edi, edx, times_1, Code::kHeaderSize));
1266  jmp(edi);
1267 }
1268 
1269 
1270 void MacroAssembler::Throw(Register value) {
1271  // Adjust this code if not the case.
1272  STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
1273  STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
1274  STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
1275  STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
1276  STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
1277  STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
1278 
1279  // The exception is expected in eax.
1280  if (!value.is(eax)) {
1281  mov(eax, value);
1282  }
1283  // Drop the stack pointer to the top of the top handler.
1284  ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
1285  mov(esp, Operand::StaticVariable(handler_address));
1286  // Restore the next handler.
1287  pop(Operand::StaticVariable(handler_address));
1288 
1289  // Remove the code object and state, compute the handler address in edi.
1290  pop(edi); // Code object.
1291  pop(edx); // Index and state.
1292 
1293  // Restore the context and frame pointer.
1294  pop(esi); // Context.
1295  pop(ebp); // Frame pointer.
1296 
1297  // If the handler is a JS frame, restore the context to the frame.
1298  // (kind == ENTRY) == (ebp == 0) == (esi == 0), so we could test either
1299  // ebp or esi.
1300  Label skip;
1301  test(esi, esi);
1302  j(zero, &skip, Label::kNear);
1303  mov(Operand(ebp, StandardFrameConstants::kContextOffset), esi);
1304  bind(&skip);
1305 
1306  JumpToHandlerEntry();
1307 }
1308 
1309 
1310 void MacroAssembler::ThrowUncatchable(Register value) {
1311  // Adjust this code if not the case.
1312  STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
1313  STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
1314  STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
1315  STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
1316  STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
1317  STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
1318 
1319  // The exception is expected in eax.
1320  if (!value.is(eax)) {
1321  mov(eax, value);
1322  }
1323  // Drop the stack pointer to the top of the top stack handler.
1324  ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
1325  mov(esp, Operand::StaticVariable(handler_address));
1326 
1327  // Unwind the handlers until the top ENTRY handler is found.
1328  Label fetch_next, check_kind;
1329  jmp(&check_kind, Label::kNear);
1330  bind(&fetch_next);
1331  mov(esp, Operand(esp, StackHandlerConstants::kNextOffset));
1332 
1333  bind(&check_kind);
1334  STATIC_ASSERT(StackHandler::JS_ENTRY == 0);
1335  test(Operand(esp, StackHandlerConstants::kStateOffset),
1336  Immediate(StackHandler::KindField::kMask));
1337  j(not_zero, &fetch_next);
1338 
1339  // Set the top handler address to next handler past the top ENTRY handler.
1340  pop(Operand::StaticVariable(handler_address));
1341 
1342  // Remove the code object and state, compute the handler address in edi.
1343  pop(edi); // Code object.
1344  pop(edx); // Index and state.
1345 
1346  // Clear the context pointer and frame pointer (0 was saved in the handler).
1347  pop(esi);
1348  pop(ebp);
1349 
1350  JumpToHandlerEntry();
1351 }
1352 
1353 
1354 void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
1355  Register scratch1,
1356  Register scratch2,
1357  Label* miss) {
1358  Label same_contexts;
1359 
1360  ASSERT(!holder_reg.is(scratch1));
1361  ASSERT(!holder_reg.is(scratch2));
1362  ASSERT(!scratch1.is(scratch2));
1363 
1364  // Load current lexical context from the stack frame.
1365  mov(scratch1, Operand(ebp, StandardFrameConstants::kContextOffset));
1366 
1367  // When generating debug code, make sure the lexical context is set.
1368  if (emit_debug_code()) {
1369  cmp(scratch1, Immediate(0));
1370  Check(not_equal, kWeShouldNotHaveAnEmptyLexicalContext);
1371  }
1372  // Load the native context of the current context.
1373  int offset =
1374  Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX * kPointerSize;
1375  mov(scratch1, FieldOperand(scratch1, offset));
1376  mov(scratch1, FieldOperand(scratch1, GlobalObject::kNativeContextOffset));
1377 
1378  // Check the context is a native context.
1379  if (emit_debug_code()) {
1380  // Read the first word and compare to native_context_map.
1381  cmp(FieldOperand(scratch1, HeapObject::kMapOffset),
1382  isolate()->factory()->native_context_map());
1383  Check(equal, kJSGlobalObjectNativeContextShouldBeANativeContext);
1384  }
1385 
1386  // Check if both contexts are the same.
1387  cmp(scratch1, FieldOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
1388  j(equal, &same_contexts);
1389 
1390  // Compare security tokens, save holder_reg on the stack so we can use it
1391  // as a temporary register.
1392  //
1393  // Check that the security token in the calling global object is
1394  // compatible with the security token in the receiving global
1395  // object.
1396  mov(scratch2,
1397  FieldOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
1398 
1399  // Check the context is a native context.
1400  if (emit_debug_code()) {
1401  cmp(scratch2, isolate()->factory()->null_value());
1402  Check(not_equal, kJSGlobalProxyContextShouldNotBeNull);
1403 
1404  // Read the first word and compare to native_context_map(),
1405  cmp(FieldOperand(scratch2, HeapObject::kMapOffset),
1406  isolate()->factory()->native_context_map());
1407  Check(equal, kJSGlobalObjectNativeContextShouldBeANativeContext);
1408  }
1409 
1410  int token_offset = Context::kHeaderSize +
1411  Context::SECURITY_TOKEN_INDEX * kPointerSize;
1412  mov(scratch1, FieldOperand(scratch1, token_offset));
1413  cmp(scratch1, FieldOperand(scratch2, token_offset));
1414  j(not_equal, miss);
1415 
1416  bind(&same_contexts);
1417 }
1418 
1419 
1420 // Compute the hash code from the untagged key. This must be kept in sync with
1421 // ComputeIntegerHash in utils.h and KeyedLoadGenericElementStub in
1422 // code-stub-hydrogen.cc
1423 //
1424 // Note: r0 will contain hash code
1425 void MacroAssembler::GetNumberHash(Register r0, Register scratch) {
1426  // Xor original key with a seed.
1427  if (Serializer::enabled()) {
1428  ExternalReference roots_array_start =
1429  ExternalReference::roots_array_start(isolate());
1430  mov(scratch, Immediate(Heap::kHashSeedRootIndex));
1431  mov(scratch,
1432  Operand::StaticArray(scratch, times_pointer_size, roots_array_start));
1433  SmiUntag(scratch);
1434  xor_(r0, scratch);
1435  } else {
1436  int32_t seed = isolate()->heap()->HashSeed();
1437  xor_(r0, Immediate(seed));
1438  }
1439 
1440  // hash = ~hash + (hash << 15);
1441  mov(scratch, r0);
1442  not_(r0);
1443  shl(scratch, 15);
1444  add(r0, scratch);
1445  // hash = hash ^ (hash >> 12);
1446  mov(scratch, r0);
1447  shr(scratch, 12);
1448  xor_(r0, scratch);
1449  // hash = hash + (hash << 2);
1450  lea(r0, Operand(r0, r0, times_4, 0));
1451  // hash = hash ^ (hash >> 4);
1452  mov(scratch, r0);
1453  shr(scratch, 4);
1454  xor_(r0, scratch);
1455  // hash = hash * 2057;
1456  imul(r0, r0, 2057);
1457  // hash = hash ^ (hash >> 16);
1458  mov(scratch, r0);
1459  shr(scratch, 16);
1460  xor_(r0, scratch);
1461 }
1462 
1463 
1464 
1465 void MacroAssembler::LoadFromNumberDictionary(Label* miss,
1466  Register elements,
1467  Register key,
1468  Register r0,
1469  Register r1,
1470  Register r2,
1471  Register result) {
1472  // Register use:
1473  //
1474  // elements - holds the slow-case elements of the receiver and is unchanged.
1475  //
1476  // key - holds the smi key on entry and is unchanged.
1477  //
1478  // Scratch registers:
1479  //
1480  // r0 - holds the untagged key on entry and holds the hash once computed.
1481  //
1482  // r1 - used to hold the capacity mask of the dictionary
1483  //
1484  // r2 - used for the index into the dictionary.
1485  //
1486  // result - holds the result on exit if the load succeeds and we fall through.
1487 
1488  Label done;
1489 
1490  GetNumberHash(r0, r1);
1491 
1492  // Compute capacity mask.
1493  mov(r1, FieldOperand(elements, SeededNumberDictionary::kCapacityOffset));
1494  shr(r1, kSmiTagSize); // convert smi to int
1495  dec(r1);
1496 
1497  // Generate an unrolled loop that performs a few probes before giving up.
1498  for (int i = 0; i < kNumberDictionaryProbes; i++) {
1499  // Use r2 for index calculations and keep the hash intact in r0.
1500  mov(r2, r0);
1501  // Compute the masked index: (hash + i + i * i) & mask.
1502  if (i > 0) {
1503  add(r2, Immediate(SeededNumberDictionary::GetProbeOffset(i)));
1504  }
1505  and_(r2, r1);
1506 
1507  // Scale the index by multiplying by the entry size.
1508  ASSERT(SeededNumberDictionary::kEntrySize == 3);
1509  lea(r2, Operand(r2, r2, times_2, 0)); // r2 = r2 * 3
1510 
1511  // Check if the key matches.
1512  cmp(key, FieldOperand(elements,
1513  r2,
1515  SeededNumberDictionary::kElementsStartOffset));
1516  if (i != (kNumberDictionaryProbes - 1)) {
1517  j(equal, &done);
1518  } else {
1519  j(not_equal, miss);
1520  }
1521  }
1522 
1523  bind(&done);
1524  // Check that the value is a normal propety.
1525  const int kDetailsOffset =
1526  SeededNumberDictionary::kElementsStartOffset + 2 * kPointerSize;
1527  ASSERT_EQ(NORMAL, 0);
1528  test(FieldOperand(elements, r2, times_pointer_size, kDetailsOffset),
1529  Immediate(PropertyDetails::TypeField::kMask << kSmiTagSize));
1530  j(not_zero, miss);
1531 
1532  // Get the value at the masked, scaled index.
1533  const int kValueOffset =
1534  SeededNumberDictionary::kElementsStartOffset + kPointerSize;
1535  mov(result, FieldOperand(elements, r2, times_pointer_size, kValueOffset));
1536 }
1537 
1538 
1539 void MacroAssembler::LoadAllocationTopHelper(Register result,
1540  Register scratch,
1542  ExternalReference allocation_top =
1543  AllocationUtils::GetAllocationTopReference(isolate(), flags);
1544 
1545  // Just return if allocation top is already known.
1546  if ((flags & RESULT_CONTAINS_TOP) != 0) {
1547  // No use of scratch if allocation top is provided.
1548  ASSERT(scratch.is(no_reg));
1549 #ifdef DEBUG
1550  // Assert that result actually contains top on entry.
1551  cmp(result, Operand::StaticVariable(allocation_top));
1552  Check(equal, kUnexpectedAllocationTop);
1553 #endif
1554  return;
1555  }
1556 
1557  // Move address of new object to result. Use scratch register if available.
1558  if (scratch.is(no_reg)) {
1559  mov(result, Operand::StaticVariable(allocation_top));
1560  } else {
1561  mov(scratch, Immediate(allocation_top));
1562  mov(result, Operand(scratch, 0));
1563  }
1564 }
1565 
1566 
1567 void MacroAssembler::UpdateAllocationTopHelper(Register result_end,
1568  Register scratch,
1569  AllocationFlags flags) {
1570  if (emit_debug_code()) {
1571  test(result_end, Immediate(kObjectAlignmentMask));
1572  Check(zero, kUnalignedAllocationInNewSpace);
1573  }
1574 
1575  ExternalReference allocation_top =
1576  AllocationUtils::GetAllocationTopReference(isolate(), flags);
1577 
1578  // Update new top. Use scratch if available.
1579  if (scratch.is(no_reg)) {
1580  mov(Operand::StaticVariable(allocation_top), result_end);
1581  } else {
1582  mov(Operand(scratch, 0), result_end);
1583  }
1584 }
1585 
1586 
1587 void MacroAssembler::Allocate(int object_size,
1588  Register result,
1589  Register result_end,
1590  Register scratch,
1591  Label* gc_required,
1592  AllocationFlags flags) {
1593  ASSERT((flags & (RESULT_CONTAINS_TOP | SIZE_IN_WORDS)) == 0);
1594  ASSERT(object_size <= Page::kMaxRegularHeapObjectSize);
1595  if (!FLAG_inline_new) {
1596  if (emit_debug_code()) {
1597  // Trash the registers to simulate an allocation failure.
1598  mov(result, Immediate(0x7091));
1599  if (result_end.is_valid()) {
1600  mov(result_end, Immediate(0x7191));
1601  }
1602  if (scratch.is_valid()) {
1603  mov(scratch, Immediate(0x7291));
1604  }
1605  }
1606  jmp(gc_required);
1607  return;
1608  }
1609  ASSERT(!result.is(result_end));
1610 
1611  // Load address of new object into result.
1612  LoadAllocationTopHelper(result, scratch, flags);
1613 
1614  ExternalReference allocation_limit =
1615  AllocationUtils::GetAllocationLimitReference(isolate(), flags);
1616 
1617  // Align the next allocation. Storing the filler map without checking top is
1618  // safe in new-space because the limit of the heap is aligned there.
1619  if ((flags & DOUBLE_ALIGNMENT) != 0) {
1620  ASSERT((flags & PRETENURE_OLD_POINTER_SPACE) == 0);
1622  Label aligned;
1623  test(result, Immediate(kDoubleAlignmentMask));
1624  j(zero, &aligned, Label::kNear);
1625  if ((flags & PRETENURE_OLD_DATA_SPACE) != 0) {
1626  cmp(result, Operand::StaticVariable(allocation_limit));
1627  j(above_equal, gc_required);
1628  }
1629  mov(Operand(result, 0),
1630  Immediate(isolate()->factory()->one_pointer_filler_map()));
1631  add(result, Immediate(kDoubleSize / 2));
1632  bind(&aligned);
1633  }
1634 
1635  // Calculate new top and bail out if space is exhausted.
1636  Register top_reg = result_end.is_valid() ? result_end : result;
1637  if (!top_reg.is(result)) {
1638  mov(top_reg, result);
1639  }
1640  add(top_reg, Immediate(object_size));
1641  j(carry, gc_required);
1642  cmp(top_reg, Operand::StaticVariable(allocation_limit));
1643  j(above, gc_required);
1644 
1645  // Update allocation top.
1646  UpdateAllocationTopHelper(top_reg, scratch, flags);
1647 
1648  // Tag result if requested.
1649  bool tag_result = (flags & TAG_OBJECT) != 0;
1650  if (top_reg.is(result)) {
1651  if (tag_result) {
1652  sub(result, Immediate(object_size - kHeapObjectTag));
1653  } else {
1654  sub(result, Immediate(object_size));
1655  }
1656  } else if (tag_result) {
1657  ASSERT(kHeapObjectTag == 1);
1658  inc(result);
1659  }
1660 }
1661 
1662 
1663 void MacroAssembler::Allocate(int header_size,
1664  ScaleFactor element_size,
1665  Register element_count,
1666  RegisterValueType element_count_type,
1667  Register result,
1668  Register result_end,
1669  Register scratch,
1670  Label* gc_required,
1671  AllocationFlags flags) {
1672  ASSERT((flags & SIZE_IN_WORDS) == 0);
1673  if (!FLAG_inline_new) {
1674  if (emit_debug_code()) {
1675  // Trash the registers to simulate an allocation failure.
1676  mov(result, Immediate(0x7091));
1677  mov(result_end, Immediate(0x7191));
1678  if (scratch.is_valid()) {
1679  mov(scratch, Immediate(0x7291));
1680  }
1681  // Register element_count is not modified by the function.
1682  }
1683  jmp(gc_required);
1684  return;
1685  }
1686  ASSERT(!result.is(result_end));
1687 
1688  // Load address of new object into result.
1689  LoadAllocationTopHelper(result, scratch, flags);
1690 
1691  ExternalReference allocation_limit =
1692  AllocationUtils::GetAllocationLimitReference(isolate(), flags);
1693 
1694  // Align the next allocation. Storing the filler map without checking top is
1695  // safe in new-space because the limit of the heap is aligned there.
1696  if ((flags & DOUBLE_ALIGNMENT) != 0) {
1697  ASSERT((flags & PRETENURE_OLD_POINTER_SPACE) == 0);
1699  Label aligned;
1700  test(result, Immediate(kDoubleAlignmentMask));
1701  j(zero, &aligned, Label::kNear);
1702  if ((flags & PRETENURE_OLD_DATA_SPACE) != 0) {
1703  cmp(result, Operand::StaticVariable(allocation_limit));
1704  j(above_equal, gc_required);
1705  }
1706  mov(Operand(result, 0),
1707  Immediate(isolate()->factory()->one_pointer_filler_map()));
1708  add(result, Immediate(kDoubleSize / 2));
1709  bind(&aligned);
1710  }
1711 
1712  // Calculate new top and bail out if space is exhausted.
1713  // We assume that element_count*element_size + header_size does not
1714  // overflow.
1715  if (element_count_type == REGISTER_VALUE_IS_SMI) {
1716  STATIC_ASSERT(static_cast<ScaleFactor>(times_2 - 1) == times_1);
1717  STATIC_ASSERT(static_cast<ScaleFactor>(times_4 - 1) == times_2);
1718  STATIC_ASSERT(static_cast<ScaleFactor>(times_8 - 1) == times_4);
1719  ASSERT(element_size >= times_2);
1720  ASSERT(kSmiTagSize == 1);
1721  element_size = static_cast<ScaleFactor>(element_size - 1);
1722  } else {
1723  ASSERT(element_count_type == REGISTER_VALUE_IS_INT32);
1724  }
1725  lea(result_end, Operand(element_count, element_size, header_size));
1726  add(result_end, result);
1727  j(carry, gc_required);
1728  cmp(result_end, Operand::StaticVariable(allocation_limit));
1729  j(above, gc_required);
1730 
1731  if ((flags & TAG_OBJECT) != 0) {
1732  ASSERT(kHeapObjectTag == 1);
1733  inc(result);
1734  }
1735 
1736  // Update allocation top.
1737  UpdateAllocationTopHelper(result_end, scratch, flags);
1738 }
1739 
1740 
1741 void MacroAssembler::Allocate(Register object_size,
1742  Register result,
1743  Register result_end,
1744  Register scratch,
1745  Label* gc_required,
1746  AllocationFlags flags) {
1747  ASSERT((flags & (RESULT_CONTAINS_TOP | SIZE_IN_WORDS)) == 0);
1748  if (!FLAG_inline_new) {
1749  if (emit_debug_code()) {
1750  // Trash the registers to simulate an allocation failure.
1751  mov(result, Immediate(0x7091));
1752  mov(result_end, Immediate(0x7191));
1753  if (scratch.is_valid()) {
1754  mov(scratch, Immediate(0x7291));
1755  }
1756  // object_size is left unchanged by this function.
1757  }
1758  jmp(gc_required);
1759  return;
1760  }
1761  ASSERT(!result.is(result_end));
1762 
1763  // Load address of new object into result.
1764  LoadAllocationTopHelper(result, scratch, flags);
1765 
1766  ExternalReference allocation_limit =
1767  AllocationUtils::GetAllocationLimitReference(isolate(), flags);
1768 
1769  // Align the next allocation. Storing the filler map without checking top is
1770  // safe in new-space because the limit of the heap is aligned there.
1771  if ((flags & DOUBLE_ALIGNMENT) != 0) {
1772  ASSERT((flags & PRETENURE_OLD_POINTER_SPACE) == 0);
1774  Label aligned;
1775  test(result, Immediate(kDoubleAlignmentMask));
1776  j(zero, &aligned, Label::kNear);
1777  if ((flags & PRETENURE_OLD_DATA_SPACE) != 0) {
1778  cmp(result, Operand::StaticVariable(allocation_limit));
1779  j(above_equal, gc_required);
1780  }
1781  mov(Operand(result, 0),
1782  Immediate(isolate()->factory()->one_pointer_filler_map()));
1783  add(result, Immediate(kDoubleSize / 2));
1784  bind(&aligned);
1785  }
1786 
1787  // Calculate new top and bail out if space is exhausted.
1788  if (!object_size.is(result_end)) {
1789  mov(result_end, object_size);
1790  }
1791  add(result_end, result);
1792  j(carry, gc_required);
1793  cmp(result_end, Operand::StaticVariable(allocation_limit));
1794  j(above, gc_required);
1795 
1796  // Tag result if requested.
1797  if ((flags & TAG_OBJECT) != 0) {
1798  ASSERT(kHeapObjectTag == 1);
1799  inc(result);
1800  }
1801 
1802  // Update allocation top.
1803  UpdateAllocationTopHelper(result_end, scratch, flags);
1804 }
1805 
1806 
1807 void MacroAssembler::UndoAllocationInNewSpace(Register object) {
1808  ExternalReference new_space_allocation_top =
1809  ExternalReference::new_space_allocation_top_address(isolate());
1810 
1811  // Make sure the object has no tag before resetting top.
1812  and_(object, Immediate(~kHeapObjectTagMask));
1813 #ifdef DEBUG
1814  cmp(object, Operand::StaticVariable(new_space_allocation_top));
1815  Check(below, kUndoAllocationOfNonAllocatedMemory);
1816 #endif
1817  mov(Operand::StaticVariable(new_space_allocation_top), object);
1818 }
1819 
1820 
1821 void MacroAssembler::AllocateHeapNumber(Register result,
1822  Register scratch1,
1823  Register scratch2,
1824  Label* gc_required) {
1825  // Allocate heap number in new space.
1826  Allocate(HeapNumber::kSize, result, scratch1, scratch2, gc_required,
1827  TAG_OBJECT);
1828 
1829  // Set the map.
1830  mov(FieldOperand(result, HeapObject::kMapOffset),
1831  Immediate(isolate()->factory()->heap_number_map()));
1832 }
1833 
1834 
1835 void MacroAssembler::AllocateTwoByteString(Register result,
1836  Register length,
1837  Register scratch1,
1838  Register scratch2,
1839  Register scratch3,
1840  Label* gc_required) {
1841  // Calculate the number of bytes needed for the characters in the string while
1842  // observing object alignment.
1843  ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
1844  ASSERT(kShortSize == 2);
1845  // scratch1 = length * 2 + kObjectAlignmentMask.
1846  lea(scratch1, Operand(length, length, times_1, kObjectAlignmentMask));
1847  and_(scratch1, Immediate(~kObjectAlignmentMask));
1848 
1849  // Allocate two byte string in new space.
1850  Allocate(SeqTwoByteString::kHeaderSize,
1851  times_1,
1852  scratch1,
1854  result,
1855  scratch2,
1856  scratch3,
1857  gc_required,
1858  TAG_OBJECT);
1859 
1860  // Set the map, length and hash field.
1861  mov(FieldOperand(result, HeapObject::kMapOffset),
1862  Immediate(isolate()->factory()->string_map()));
1863  mov(scratch1, length);
1864  SmiTag(scratch1);
1865  mov(FieldOperand(result, String::kLengthOffset), scratch1);
1866  mov(FieldOperand(result, String::kHashFieldOffset),
1867  Immediate(String::kEmptyHashField));
1868 }
1869 
1870 
1871 void MacroAssembler::AllocateAsciiString(Register result,
1872  Register length,
1873  Register scratch1,
1874  Register scratch2,
1875  Register scratch3,
1876  Label* gc_required) {
1877  // Calculate the number of bytes needed for the characters in the string while
1878  // observing object alignment.
1879  ASSERT((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
1880  mov(scratch1, length);
1881  ASSERT(kCharSize == 1);
1882  add(scratch1, Immediate(kObjectAlignmentMask));
1883  and_(scratch1, Immediate(~kObjectAlignmentMask));
1884 
1885  // Allocate ASCII string in new space.
1886  Allocate(SeqOneByteString::kHeaderSize,
1887  times_1,
1888  scratch1,
1890  result,
1891  scratch2,
1892  scratch3,
1893  gc_required,
1894  TAG_OBJECT);
1895 
1896  // Set the map, length and hash field.
1897  mov(FieldOperand(result, HeapObject::kMapOffset),
1898  Immediate(isolate()->factory()->ascii_string_map()));
1899  mov(scratch1, length);
1900  SmiTag(scratch1);
1901  mov(FieldOperand(result, String::kLengthOffset), scratch1);
1902  mov(FieldOperand(result, String::kHashFieldOffset),
1903  Immediate(String::kEmptyHashField));
1904 }
1905 
1906 
1907 void MacroAssembler::AllocateAsciiString(Register result,
1908  int length,
1909  Register scratch1,
1910  Register scratch2,
1911  Label* gc_required) {
1912  ASSERT(length > 0);
1913 
1914  // Allocate ASCII string in new space.
1915  Allocate(SeqOneByteString::SizeFor(length), result, scratch1, scratch2,
1916  gc_required, TAG_OBJECT);
1917 
1918  // Set the map, length and hash field.
1919  mov(FieldOperand(result, HeapObject::kMapOffset),
1920  Immediate(isolate()->factory()->ascii_string_map()));
1921  mov(FieldOperand(result, String::kLengthOffset),
1922  Immediate(Smi::FromInt(length)));
1923  mov(FieldOperand(result, String::kHashFieldOffset),
1924  Immediate(String::kEmptyHashField));
1925 }
1926 
1927 
1928 void MacroAssembler::AllocateTwoByteConsString(Register result,
1929  Register scratch1,
1930  Register scratch2,
1931  Label* gc_required) {
1932  // Allocate heap number in new space.
1933  Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
1934  TAG_OBJECT);
1935 
1936  // Set the map. The other fields are left uninitialized.
1937  mov(FieldOperand(result, HeapObject::kMapOffset),
1938  Immediate(isolate()->factory()->cons_string_map()));
1939 }
1940 
1941 
1942 void MacroAssembler::AllocateAsciiConsString(Register result,
1943  Register scratch1,
1944  Register scratch2,
1945  Label* gc_required) {
1946  Label allocate_new_space, install_map;
1947  AllocationFlags flags = TAG_OBJECT;
1948 
1949  ExternalReference high_promotion_mode = ExternalReference::
1950  new_space_high_promotion_mode_active_address(isolate());
1951 
1952  test(Operand::StaticVariable(high_promotion_mode), Immediate(1));
1953  j(zero, &allocate_new_space);
1954 
1955  Allocate(ConsString::kSize,
1956  result,
1957  scratch1,
1958  scratch2,
1959  gc_required,
1960  static_cast<AllocationFlags>(flags | PRETENURE_OLD_POINTER_SPACE));
1961  jmp(&install_map);
1962 
1963  bind(&allocate_new_space);
1964  Allocate(ConsString::kSize,
1965  result,
1966  scratch1,
1967  scratch2,
1968  gc_required,
1969  flags);
1970 
1971  bind(&install_map);
1972  // Set the map. The other fields are left uninitialized.
1973  mov(FieldOperand(result, HeapObject::kMapOffset),
1974  Immediate(isolate()->factory()->cons_ascii_string_map()));
1975 }
1976 
1977 
1978 void MacroAssembler::AllocateTwoByteSlicedString(Register result,
1979  Register scratch1,
1980  Register scratch2,
1981  Label* gc_required) {
1982  // Allocate heap number in new space.
1983  Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
1984  TAG_OBJECT);
1985 
1986  // Set the map. The other fields are left uninitialized.
1987  mov(FieldOperand(result, HeapObject::kMapOffset),
1988  Immediate(isolate()->factory()->sliced_string_map()));
1989 }
1990 
1991 
1992 void MacroAssembler::AllocateAsciiSlicedString(Register result,
1993  Register scratch1,
1994  Register scratch2,
1995  Label* gc_required) {
1996  // Allocate heap number in new space.
1997  Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
1998  TAG_OBJECT);
1999 
2000  // Set the map. The other fields are left uninitialized.
2001  mov(FieldOperand(result, HeapObject::kMapOffset),
2002  Immediate(isolate()->factory()->sliced_ascii_string_map()));
2003 }
2004 
2005 
2006 // Copy memory, byte-by-byte, from source to destination. Not optimized for
2007 // long or aligned copies. The contents of scratch and length are destroyed.
2008 // Source and destination are incremented by length.
2009 // Many variants of movsb, loop unrolling, word moves, and indexed operands
2010 // have been tried here already, and this is fastest.
2011 // A simpler loop is faster on small copies, but 30% slower on large ones.
2012 // The cld() instruction must have been emitted, to set the direction flag(),
2013 // before calling this function.
2014 void MacroAssembler::CopyBytes(Register source,
2015  Register destination,
2016  Register length,
2017  Register scratch) {
2018  Label short_loop, len4, len8, len12, done, short_string;
2019  ASSERT(source.is(esi));
2020  ASSERT(destination.is(edi));
2021  ASSERT(length.is(ecx));
2022  cmp(length, Immediate(4));
2023  j(below, &short_string, Label::kNear);
2024 
2025  // Because source is 4-byte aligned in our uses of this function,
2026  // we keep source aligned for the rep_movs call by copying the odd bytes
2027  // at the end of the ranges.
2028  mov(scratch, Operand(source, length, times_1, -4));
2029  mov(Operand(destination, length, times_1, -4), scratch);
2030 
2031  cmp(length, Immediate(8));
2032  j(below_equal, &len4, Label::kNear);
2033  cmp(length, Immediate(12));
2034  j(below_equal, &len8, Label::kNear);
2035  cmp(length, Immediate(16));
2036  j(below_equal, &len12, Label::kNear);
2037 
2038  mov(scratch, ecx);
2039  shr(ecx, 2);
2040  rep_movs();
2041  and_(scratch, Immediate(0x3));
2042  add(destination, scratch);
2043  jmp(&done, Label::kNear);
2044 
2045  bind(&len12);
2046  mov(scratch, Operand(source, 8));
2047  mov(Operand(destination, 8), scratch);
2048  bind(&len8);
2049  mov(scratch, Operand(source, 4));
2050  mov(Operand(destination, 4), scratch);
2051  bind(&len4);
2052  mov(scratch, Operand(source, 0));
2053  mov(Operand(destination, 0), scratch);
2054  add(destination, length);
2055  jmp(&done, Label::kNear);
2056 
2057  bind(&short_string);
2058  test(length, length);
2059  j(zero, &done, Label::kNear);
2060 
2061  bind(&short_loop);
2062  mov_b(scratch, Operand(source, 0));
2063  mov_b(Operand(destination, 0), scratch);
2064  inc(source);
2065  inc(destination);
2066  dec(length);
2067  j(not_zero, &short_loop);
2068 
2069  bind(&done);
2070 }
2071 
2072 
2073 void MacroAssembler::InitializeFieldsWithFiller(Register start_offset,
2074  Register end_offset,
2075  Register filler) {
2076  Label loop, entry;
2077  jmp(&entry);
2078  bind(&loop);
2079  mov(Operand(start_offset, 0), filler);
2080  add(start_offset, Immediate(kPointerSize));
2081  bind(&entry);
2082  cmp(start_offset, end_offset);
2083  j(less, &loop);
2084 }
2085 
2086 
2087 void MacroAssembler::BooleanBitTest(Register object,
2088  int field_offset,
2089  int bit_index) {
2090  bit_index += kSmiTagSize + kSmiShiftSize;
2092  int byte_index = bit_index / kBitsPerByte;
2093  int byte_bit_index = bit_index & (kBitsPerByte - 1);
2094  test_b(FieldOperand(object, field_offset + byte_index),
2095  static_cast<byte>(1 << byte_bit_index));
2096 }
2097 
2098 
2099 
2100 void MacroAssembler::NegativeZeroTest(Register result,
2101  Register op,
2102  Label* then_label) {
2103  Label ok;
2104  test(result, result);
2105  j(not_zero, &ok);
2106  test(op, op);
2107  j(sign, then_label);
2108  bind(&ok);
2109 }
2110 
2111 
2112 void MacroAssembler::NegativeZeroTest(Register result,
2113  Register op1,
2114  Register op2,
2115  Register scratch,
2116  Label* then_label) {
2117  Label ok;
2118  test(result, result);
2119  j(not_zero, &ok);
2120  mov(scratch, op1);
2121  or_(scratch, op2);
2122  j(sign, then_label);
2123  bind(&ok);
2124 }
2125 
2126 
2127 void MacroAssembler::TryGetFunctionPrototype(Register function,
2128  Register result,
2129  Register scratch,
2130  Label* miss,
2131  bool miss_on_bound_function) {
2132  // Check that the receiver isn't a smi.
2133  JumpIfSmi(function, miss);
2134 
2135  // Check that the function really is a function.
2136  CmpObjectType(function, JS_FUNCTION_TYPE, result);
2137  j(not_equal, miss);
2138 
2139  if (miss_on_bound_function) {
2140  // If a bound function, go to miss label.
2141  mov(scratch,
2142  FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
2143  BooleanBitTest(scratch, SharedFunctionInfo::kCompilerHintsOffset,
2144  SharedFunctionInfo::kBoundFunction);
2145  j(not_zero, miss);
2146  }
2147 
2148  // Make sure that the function has an instance prototype.
2149  Label non_instance;
2150  movzx_b(scratch, FieldOperand(result, Map::kBitFieldOffset));
2151  test(scratch, Immediate(1 << Map::kHasNonInstancePrototype));
2152  j(not_zero, &non_instance);
2153 
2154  // Get the prototype or initial map from the function.
2155  mov(result,
2156  FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
2157 
2158  // If the prototype or initial map is the hole, don't return it and
2159  // simply miss the cache instead. This will allow us to allocate a
2160  // prototype object on-demand in the runtime system.
2161  cmp(result, Immediate(isolate()->factory()->the_hole_value()));
2162  j(equal, miss);
2163 
2164  // If the function does not have an initial map, we're done.
2165  Label done;
2166  CmpObjectType(result, MAP_TYPE, scratch);
2167  j(not_equal, &done);
2168 
2169  // Get the prototype from the initial map.
2170  mov(result, FieldOperand(result, Map::kPrototypeOffset));
2171  jmp(&done);
2172 
2173  // Non-instance prototype: Fetch prototype from constructor field
2174  // in initial map.
2175  bind(&non_instance);
2176  mov(result, FieldOperand(result, Map::kConstructorOffset));
2177 
2178  // All done.
2179  bind(&done);
2180 }
2181 
2182 
2183 void MacroAssembler::CallStub(CodeStub* stub, TypeFeedbackId ast_id) {
2184  ASSERT(AllowThisStubCall(stub)); // Calls are not allowed in some stubs.
2185  call(stub->GetCode(isolate()), RelocInfo::CODE_TARGET, ast_id);
2186 }
2187 
2188 
2189 void MacroAssembler::TailCallStub(CodeStub* stub) {
2190  jmp(stub->GetCode(isolate()), RelocInfo::CODE_TARGET);
2191 }
2192 
2193 
2194 void MacroAssembler::StubReturn(int argc) {
2195  ASSERT(argc >= 1 && generating_stub());
2196  ret((argc - 1) * kPointerSize);
2197 }
2198 
2199 
2200 bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
2201  return has_frame_ || !stub->SometimesSetsUpAFrame();
2202 }
2203 
2204 
2205 void MacroAssembler::IllegalOperation(int num_arguments) {
2206  if (num_arguments > 0) {
2207  add(esp, Immediate(num_arguments * kPointerSize));
2208  }
2209  mov(eax, Immediate(isolate()->factory()->undefined_value()));
2210 }
2211 
2212 
2213 void MacroAssembler::IndexFromHash(Register hash, Register index) {
2214  // The assert checks that the constants for the maximum number of digits
2215  // for an array index cached in the hash field and the number of bits
2216  // reserved for it does not conflict.
2217  ASSERT(TenToThe(String::kMaxCachedArrayIndexLength) <
2218  (1 << String::kArrayIndexValueBits));
2219  // We want the smi-tagged index in key. kArrayIndexValueMask has zeros in
2220  // the low kHashShift bits.
2221  and_(hash, String::kArrayIndexValueMask);
2222  STATIC_ASSERT(String::kHashShift >= kSmiTagSize && kSmiTag == 0);
2223  if (String::kHashShift > kSmiTagSize) {
2224  shr(hash, String::kHashShift - kSmiTagSize);
2225  }
2226  if (!index.is(hash)) {
2227  mov(index, hash);
2228  }
2229 }
2230 
2231 
2232 void MacroAssembler::CallRuntime(const Runtime::Function* f,
2233  int num_arguments,
2234  SaveFPRegsMode save_doubles) {
2235  // If the expected number of arguments of the runtime function is
2236  // constant, we check that the actual number of arguments match the
2237  // expectation.
2238  if (f->nargs >= 0 && f->nargs != num_arguments) {
2239  IllegalOperation(num_arguments);
2240  return;
2241  }
2242 
2243  // TODO(1236192): Most runtime routines don't need the number of
2244  // arguments passed in because it is constant. At some point we
2245  // should remove this need and make the runtime routine entry code
2246  // smarter.
2247  Move(eax, Immediate(num_arguments));
2248  mov(ebx, Immediate(ExternalReference(f, isolate())));
2249  CEntryStub ces(1, CpuFeatures::IsSupported(SSE2) ? save_doubles
2250  : kDontSaveFPRegs);
2251  CallStub(&ces);
2252 }
2253 
2254 
2255 void MacroAssembler::CallExternalReference(ExternalReference ref,
2256  int num_arguments) {
2257  mov(eax, Immediate(num_arguments));
2258  mov(ebx, Immediate(ref));
2259 
2260  CEntryStub stub(1);
2261  CallStub(&stub);
2262 }
2263 
2264 
2265 void MacroAssembler::TailCallExternalReference(const ExternalReference& ext,
2266  int num_arguments,
2267  int result_size) {
2268  // TODO(1236192): Most runtime routines don't need the number of
2269  // arguments passed in because it is constant. At some point we
2270  // should remove this need and make the runtime routine entry code
2271  // smarter.
2272  Move(eax, Immediate(num_arguments));
2273  JumpToExternalReference(ext);
2274 }
2275 
2276 
2277 void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid,
2278  int num_arguments,
2279  int result_size) {
2280  TailCallExternalReference(ExternalReference(fid, isolate()),
2281  num_arguments,
2282  result_size);
2283 }
2284 
2285 
2286 Operand ApiParameterOperand(int index) {
2287  return Operand(esp, index * kPointerSize);
2288 }
2289 
2290 
2291 void MacroAssembler::PrepareCallApiFunction(int argc) {
2292  EnterApiExitFrame(argc);
2293  if (emit_debug_code()) {
2294  mov(esi, Immediate(BitCast<int32_t>(kZapValue)));
2295  }
2296 }
2297 
2298 
2299 void MacroAssembler::CallApiFunctionAndReturn(
2300  Register function_address,
2301  Address thunk_address,
2302  Operand thunk_last_arg,
2303  int stack_space,
2304  Operand return_value_operand,
2305  Operand* context_restore_operand) {
2306  ExternalReference next_address =
2307  ExternalReference::handle_scope_next_address(isolate());
2308  ExternalReference limit_address =
2309  ExternalReference::handle_scope_limit_address(isolate());
2310  ExternalReference level_address =
2311  ExternalReference::handle_scope_level_address(isolate());
2312 
2313  ASSERT(edx.is(function_address));
2314  // Allocate HandleScope in callee-save registers.
2315  mov(ebx, Operand::StaticVariable(next_address));
2316  mov(edi, Operand::StaticVariable(limit_address));
2317  add(Operand::StaticVariable(level_address), Immediate(1));
2318 
2319  if (FLAG_log_timer_events) {
2320  FrameScope frame(this, StackFrame::MANUAL);
2321  PushSafepointRegisters();
2322  PrepareCallCFunction(1, eax);
2323  mov(Operand(esp, 0),
2324  Immediate(ExternalReference::isolate_address(isolate())));
2325  CallCFunction(ExternalReference::log_enter_external_function(isolate()), 1);
2326  PopSafepointRegisters();
2327  }
2328 
2329 
2330  Label profiler_disabled;
2331  Label end_profiler_check;
2332  bool* is_profiling_flag =
2333  isolate()->cpu_profiler()->is_profiling_address();
2334  STATIC_ASSERT(sizeof(*is_profiling_flag) == 1);
2335  mov(eax, Immediate(reinterpret_cast<Address>(is_profiling_flag)));
2336  cmpb(Operand(eax, 0), 0);
2337  j(zero, &profiler_disabled);
2338 
2339  // Additional parameter is the address of the actual getter function.
2340  mov(thunk_last_arg, function_address);
2341  // Call the api function.
2342  call(thunk_address, RelocInfo::RUNTIME_ENTRY);
2343  jmp(&end_profiler_check);
2344 
2345  bind(&profiler_disabled);
2346  // Call the api function.
2347  call(function_address);
2348  bind(&end_profiler_check);
2349 
2350  if (FLAG_log_timer_events) {
2351  FrameScope frame(this, StackFrame::MANUAL);
2352  PushSafepointRegisters();
2353  PrepareCallCFunction(1, eax);
2354  mov(Operand(esp, 0),
2355  Immediate(ExternalReference::isolate_address(isolate())));
2356  CallCFunction(ExternalReference::log_leave_external_function(isolate()), 1);
2357  PopSafepointRegisters();
2358  }
2359 
2360  Label prologue;
2361  // Load the value from ReturnValue
2362  mov(eax, return_value_operand);
2363 
2364  Label promote_scheduled_exception;
2365  Label exception_handled;
2366  Label delete_allocated_handles;
2367  Label leave_exit_frame;
2368 
2369  bind(&prologue);
2370  // No more valid handles (the result handle was the last one). Restore
2371  // previous handle scope.
2372  mov(Operand::StaticVariable(next_address), ebx);
2373  sub(Operand::StaticVariable(level_address), Immediate(1));
2374  Assert(above_equal, kInvalidHandleScopeLevel);
2375  cmp(edi, Operand::StaticVariable(limit_address));
2376  j(not_equal, &delete_allocated_handles);
2377  bind(&leave_exit_frame);
2378 
2379  // Check if the function scheduled an exception.
2380  ExternalReference scheduled_exception_address =
2381  ExternalReference::scheduled_exception_address(isolate());
2382  cmp(Operand::StaticVariable(scheduled_exception_address),
2383  Immediate(isolate()->factory()->the_hole_value()));
2384  j(not_equal, &promote_scheduled_exception);
2385  bind(&exception_handled);
2386 
2387 #if ENABLE_EXTRA_CHECKS
2388  // Check if the function returned a valid JavaScript value.
2389  Label ok;
2390  Register return_value = eax;
2391  Register map = ecx;
2392 
2393  JumpIfSmi(return_value, &ok, Label::kNear);
2394  mov(map, FieldOperand(return_value, HeapObject::kMapOffset));
2395 
2396  CmpInstanceType(map, FIRST_NONSTRING_TYPE);
2397  j(below, &ok, Label::kNear);
2398 
2399  CmpInstanceType(map, FIRST_SPEC_OBJECT_TYPE);
2400  j(above_equal, &ok, Label::kNear);
2401 
2402  cmp(map, isolate()->factory()->heap_number_map());
2403  j(equal, &ok, Label::kNear);
2404 
2405  cmp(return_value, isolate()->factory()->undefined_value());
2406  j(equal, &ok, Label::kNear);
2407 
2408  cmp(return_value, isolate()->factory()->true_value());
2409  j(equal, &ok, Label::kNear);
2410 
2411  cmp(return_value, isolate()->factory()->false_value());
2412  j(equal, &ok, Label::kNear);
2413 
2414  cmp(return_value, isolate()->factory()->null_value());
2415  j(equal, &ok, Label::kNear);
2416 
2417  Abort(kAPICallReturnedInvalidObject);
2418 
2419  bind(&ok);
2420 #endif
2421 
2422  bool restore_context = context_restore_operand != NULL;
2423  if (restore_context) {
2424  mov(esi, *context_restore_operand);
2425  }
2426  LeaveApiExitFrame(!restore_context);
2427  ret(stack_space * kPointerSize);
2428 
2429  bind(&promote_scheduled_exception);
2430  {
2431  FrameScope frame(this, StackFrame::INTERNAL);
2432  CallRuntime(Runtime::kHiddenPromoteScheduledException, 0);
2433  }
2434  jmp(&exception_handled);
2435 
2436  // HandleScope limit has changed. Delete allocated extensions.
2437  ExternalReference delete_extensions =
2438  ExternalReference::delete_handle_scope_extensions(isolate());
2439  bind(&delete_allocated_handles);
2440  mov(Operand::StaticVariable(limit_address), edi);
2441  mov(edi, eax);
2442  mov(Operand(esp, 0),
2443  Immediate(ExternalReference::isolate_address(isolate())));
2444  mov(eax, Immediate(delete_extensions));
2445  call(eax);
2446  mov(eax, edi);
2447  jmp(&leave_exit_frame);
2448 }
2449 
2450 
2451 void MacroAssembler::JumpToExternalReference(const ExternalReference& ext) {
2452  // Set the entry point and jump to the C entry runtime stub.
2453  mov(ebx, Immediate(ext));
2454  CEntryStub ces(1);
2455  jmp(ces.GetCode(isolate()), RelocInfo::CODE_TARGET);
2456 }
2457 
2458 
2459 void MacroAssembler::InvokePrologue(const ParameterCount& expected,
2460  const ParameterCount& actual,
2461  Handle<Code> code_constant,
2462  const Operand& code_operand,
2463  Label* done,
2464  bool* definitely_mismatches,
2465  InvokeFlag flag,
2466  Label::Distance done_near,
2467  const CallWrapper& call_wrapper) {
2468  bool definitely_matches = false;
2469  *definitely_mismatches = false;
2470  Label invoke;
2471  if (expected.is_immediate()) {
2472  ASSERT(actual.is_immediate());
2473  if (expected.immediate() == actual.immediate()) {
2474  definitely_matches = true;
2475  } else {
2476  mov(eax, actual.immediate());
2477  const int sentinel = SharedFunctionInfo::kDontAdaptArgumentsSentinel;
2478  if (expected.immediate() == sentinel) {
2479  // Don't worry about adapting arguments for builtins that
2480  // don't want that done. Skip adaption code by making it look
2481  // like we have a match between expected and actual number of
2482  // arguments.
2483  definitely_matches = true;
2484  } else {
2485  *definitely_mismatches = true;
2486  mov(ebx, expected.immediate());
2487  }
2488  }
2489  } else {
2490  if (actual.is_immediate()) {
2491  // Expected is in register, actual is immediate. This is the
2492  // case when we invoke function values without going through the
2493  // IC mechanism.
2494  cmp(expected.reg(), actual.immediate());
2495  j(equal, &invoke);
2496  ASSERT(expected.reg().is(ebx));
2497  mov(eax, actual.immediate());
2498  } else if (!expected.reg().is(actual.reg())) {
2499  // Both expected and actual are in (different) registers. This
2500  // is the case when we invoke functions using call and apply.
2501  cmp(expected.reg(), actual.reg());
2502  j(equal, &invoke);
2503  ASSERT(actual.reg().is(eax));
2504  ASSERT(expected.reg().is(ebx));
2505  }
2506  }
2507 
2508  if (!definitely_matches) {
2509  Handle<Code> adaptor =
2510  isolate()->builtins()->ArgumentsAdaptorTrampoline();
2511  if (!code_constant.is_null()) {
2512  mov(edx, Immediate(code_constant));
2513  add(edx, Immediate(Code::kHeaderSize - kHeapObjectTag));
2514  } else if (!code_operand.is_reg(edx)) {
2515  mov(edx, code_operand);
2516  }
2517 
2518  if (flag == CALL_FUNCTION) {
2519  call_wrapper.BeforeCall(CallSize(adaptor, RelocInfo::CODE_TARGET));
2520  call(adaptor, RelocInfo::CODE_TARGET);
2521  call_wrapper.AfterCall();
2522  if (!*definitely_mismatches) {
2523  jmp(done, done_near);
2524  }
2525  } else {
2526  jmp(adaptor, RelocInfo::CODE_TARGET);
2527  }
2528  bind(&invoke);
2529  }
2530 }
2531 
2532 
2533 void MacroAssembler::InvokeCode(const Operand& code,
2534  const ParameterCount& expected,
2535  const ParameterCount& actual,
2536  InvokeFlag flag,
2537  const CallWrapper& call_wrapper) {
2538  // You can't call a function without a valid frame.
2539  ASSERT(flag == JUMP_FUNCTION || has_frame());
2540 
2541  Label done;
2542  bool definitely_mismatches = false;
2543  InvokePrologue(expected, actual, Handle<Code>::null(), code,
2544  &done, &definitely_mismatches, flag, Label::kNear,
2545  call_wrapper);
2546  if (!definitely_mismatches) {
2547  if (flag == CALL_FUNCTION) {
2548  call_wrapper.BeforeCall(CallSize(code));
2549  call(code);
2550  call_wrapper.AfterCall();
2551  } else {
2552  ASSERT(flag == JUMP_FUNCTION);
2553  jmp(code);
2554  }
2555  bind(&done);
2556  }
2557 }
2558 
2559 
2560 void MacroAssembler::InvokeFunction(Register fun,
2561  const ParameterCount& actual,
2562  InvokeFlag flag,
2563  const CallWrapper& call_wrapper) {
2564  // You can't call a function without a valid frame.
2565  ASSERT(flag == JUMP_FUNCTION || has_frame());
2566 
2567  ASSERT(fun.is(edi));
2568  mov(edx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
2569  mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
2570  mov(ebx, FieldOperand(edx, SharedFunctionInfo::kFormalParameterCountOffset));
2571  SmiUntag(ebx);
2572 
2573  ParameterCount expected(ebx);
2574  InvokeCode(FieldOperand(edi, JSFunction::kCodeEntryOffset),
2575  expected, actual, flag, call_wrapper);
2576 }
2577 
2578 
2579 void MacroAssembler::InvokeFunction(Register fun,
2580  const ParameterCount& expected,
2581  const ParameterCount& actual,
2582  InvokeFlag flag,
2583  const CallWrapper& call_wrapper) {
2584  // You can't call a function without a valid frame.
2585  ASSERT(flag == JUMP_FUNCTION || has_frame());
2586 
2587  ASSERT(fun.is(edi));
2588  mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
2589 
2590  InvokeCode(FieldOperand(edi, JSFunction::kCodeEntryOffset),
2591  expected, actual, flag, call_wrapper);
2592 }
2593 
2594 
2595 void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
2596  const ParameterCount& expected,
2597  const ParameterCount& actual,
2598  InvokeFlag flag,
2599  const CallWrapper& call_wrapper) {
2600  LoadHeapObject(edi, function);
2601  InvokeFunction(edi, expected, actual, flag, call_wrapper);
2602 }
2603 
2604 
2605 void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
2606  InvokeFlag flag,
2607  const CallWrapper& call_wrapper) {
2608  // You can't call a builtin without a valid frame.
2609  ASSERT(flag == JUMP_FUNCTION || has_frame());
2610 
2611  // Rely on the assertion to check that the number of provided
2612  // arguments match the expected number of arguments. Fake a
2613  // parameter count to avoid emitting code to do the check.
2614  ParameterCount expected(0);
2615  GetBuiltinFunction(edi, id);
2616  InvokeCode(FieldOperand(edi, JSFunction::kCodeEntryOffset),
2617  expected, expected, flag, call_wrapper);
2618 }
2619 
2620 
2621 void MacroAssembler::GetBuiltinFunction(Register target,
2622  Builtins::JavaScript id) {
2623  // Load the JavaScript builtin function from the builtins object.
2624  mov(target, Operand(esi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
2625  mov(target, FieldOperand(target, GlobalObject::kBuiltinsOffset));
2626  mov(target, FieldOperand(target,
2627  JSBuiltinsObject::OffsetOfFunctionWithId(id)));
2628 }
2629 
2630 
2631 void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) {
2632  ASSERT(!target.is(edi));
2633  // Load the JavaScript builtin function from the builtins object.
2634  GetBuiltinFunction(edi, id);
2635  // Load the code entry point from the function into the target register.
2636  mov(target, FieldOperand(edi, JSFunction::kCodeEntryOffset));
2637 }
2638 
2639 
2640 void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
2641  if (context_chain_length > 0) {
2642  // Move up the chain of contexts to the context containing the slot.
2643  mov(dst, Operand(esi, Context::SlotOffset(Context::PREVIOUS_INDEX)));
2644  for (int i = 1; i < context_chain_length; i++) {
2645  mov(dst, Operand(dst, Context::SlotOffset(Context::PREVIOUS_INDEX)));
2646  }
2647  } else {
2648  // Slot is in the current function context. Move it into the
2649  // destination register in case we store into it (the write barrier
2650  // cannot be allowed to destroy the context in esi).
2651  mov(dst, esi);
2652  }
2653 
2654  // We should not have found a with context by walking the context chain
2655  // (i.e., the static scope chain and runtime context chain do not agree).
2656  // A variable occurring in such a scope should have slot type LOOKUP and
2657  // not CONTEXT.
2658  if (emit_debug_code()) {
2659  cmp(FieldOperand(dst, HeapObject::kMapOffset),
2660  isolate()->factory()->with_context_map());
2661  Check(not_equal, kVariableResolvedToWithContext);
2662  }
2663 }
2664 
2665 
2666 void MacroAssembler::LoadTransitionedArrayMapConditional(
2667  ElementsKind expected_kind,
2668  ElementsKind transitioned_kind,
2669  Register map_in_out,
2670  Register scratch,
2671  Label* no_map_match) {
2672  // Load the global or builtins object from the current context.
2673  mov(scratch, Operand(esi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
2674  mov(scratch, FieldOperand(scratch, GlobalObject::kNativeContextOffset));
2675 
2676  // Check that the function's map is the same as the expected cached map.
2677  mov(scratch, Operand(scratch,
2678  Context::SlotOffset(Context::JS_ARRAY_MAPS_INDEX)));
2679 
2680  size_t offset = expected_kind * kPointerSize +
2681  FixedArrayBase::kHeaderSize;
2682  cmp(map_in_out, FieldOperand(scratch, offset));
2683  j(not_equal, no_map_match);
2684 
2685  // Use the transitioned cached map.
2686  offset = transitioned_kind * kPointerSize +
2687  FixedArrayBase::kHeaderSize;
2688  mov(map_in_out, FieldOperand(scratch, offset));
2689 }
2690 
2691 
2692 void MacroAssembler::LoadGlobalFunction(int index, Register function) {
2693  // Load the global or builtins object from the current context.
2694  mov(function,
2695  Operand(esi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
2696  // Load the native context from the global or builtins object.
2697  mov(function,
2698  FieldOperand(function, GlobalObject::kNativeContextOffset));
2699  // Load the function from the native context.
2700  mov(function, Operand(function, Context::SlotOffset(index)));
2701 }
2702 
2703 
2704 void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
2705  Register map) {
2706  // Load the initial map. The global functions all have initial maps.
2707  mov(map, FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
2708  if (emit_debug_code()) {
2709  Label ok, fail;
2710  CheckMap(map, isolate()->factory()->meta_map(), &fail, DO_SMI_CHECK);
2711  jmp(&ok);
2712  bind(&fail);
2713  Abort(kGlobalFunctionsMustHaveInitialMap);
2714  bind(&ok);
2715  }
2716 }
2717 
2718 
2719 // Store the value in register src in the safepoint register stack
2720 // slot for register dst.
2721 void MacroAssembler::StoreToSafepointRegisterSlot(Register dst, Register src) {
2722  mov(SafepointRegisterSlot(dst), src);
2723 }
2724 
2725 
2726 void MacroAssembler::StoreToSafepointRegisterSlot(Register dst, Immediate src) {
2727  mov(SafepointRegisterSlot(dst), src);
2728 }
2729 
2730 
2731 void MacroAssembler::LoadFromSafepointRegisterSlot(Register dst, Register src) {
2732  mov(dst, SafepointRegisterSlot(src));
2733 }
2734 
2735 
2736 Operand MacroAssembler::SafepointRegisterSlot(Register reg) {
2737  return Operand(esp, SafepointRegisterStackIndex(reg.code()) * kPointerSize);
2738 }
2739 
2740 
2741 int MacroAssembler::SafepointRegisterStackIndex(int reg_code) {
2742  // The registers are pushed starting with the lowest encoding,
2743  // which means that lowest encodings are furthest away from
2744  // the stack pointer.
2745  ASSERT(reg_code >= 0 && reg_code < kNumSafepointRegisters);
2746  return kNumSafepointRegisters - reg_code - 1;
2747 }
2748 
2749 
2750 void MacroAssembler::LoadHeapObject(Register result,
2751  Handle<HeapObject> object) {
2752  AllowDeferredHandleDereference embedding_raw_address;
2753  if (isolate()->heap()->InNewSpace(*object)) {
2754  Handle<Cell> cell = isolate()->factory()->NewCell(object);
2755  mov(result, Operand::ForCell(cell));
2756  } else {
2757  mov(result, object);
2758  }
2759 }
2760 
2761 
2762 void MacroAssembler::CmpHeapObject(Register reg, Handle<HeapObject> object) {
2763  AllowDeferredHandleDereference using_raw_address;
2764  if (isolate()->heap()->InNewSpace(*object)) {
2765  Handle<Cell> cell = isolate()->factory()->NewCell(object);
2766  cmp(reg, Operand::ForCell(cell));
2767  } else {
2768  cmp(reg, object);
2769  }
2770 }
2771 
2772 
2773 void MacroAssembler::PushHeapObject(Handle<HeapObject> object) {
2774  AllowDeferredHandleDereference using_raw_address;
2775  if (isolate()->heap()->InNewSpace(*object)) {
2776  Handle<Cell> cell = isolate()->factory()->NewCell(object);
2777  push(Operand::ForCell(cell));
2778  } else {
2779  Push(object);
2780  }
2781 }
2782 
2783 
2784 void MacroAssembler::Ret() {
2785  ret(0);
2786 }
2787 
2788 
2789 void MacroAssembler::Ret(int bytes_dropped, Register scratch) {
2790  if (is_uint16(bytes_dropped)) {
2791  ret(bytes_dropped);
2792  } else {
2793  pop(scratch);
2794  add(esp, Immediate(bytes_dropped));
2795  push(scratch);
2796  ret(0);
2797  }
2798 }
2799 
2800 
2801 void MacroAssembler::VerifyX87StackDepth(uint32_t depth) {
2802  // Make sure the floating point stack is either empty or has depth items.
2803  ASSERT(depth <= 7);
2804  // This is very expensive.
2805  ASSERT(FLAG_debug_code && FLAG_enable_slow_asserts);
2806 
2807  // The top-of-stack (tos) is 7 if there is one item pushed.
2808  int tos = (8 - depth) % 8;
2809  const int kTopMask = 0x3800;
2810  push(eax);
2811  fwait();
2812  fnstsw_ax();
2813  and_(eax, kTopMask);
2814  shr(eax, 11);
2815  cmp(eax, Immediate(tos));
2816  Check(equal, kUnexpectedFPUStackDepthAfterInstruction);
2817  fnclex();
2818  pop(eax);
2819 }
2820 
2821 
2822 void MacroAssembler::Drop(int stack_elements) {
2823  if (stack_elements > 0) {
2824  add(esp, Immediate(stack_elements * kPointerSize));
2825  }
2826 }
2827 
2828 
2829 void MacroAssembler::Move(Register dst, Register src) {
2830  if (!dst.is(src)) {
2831  mov(dst, src);
2832  }
2833 }
2834 
2835 
2836 void MacroAssembler::Move(Register dst, const Immediate& x) {
2837  if (x.is_zero()) {
2838  xor_(dst, dst); // Shorter than mov of 32-bit immediate 0.
2839  } else {
2840  mov(dst, x);
2841  }
2842 }
2843 
2844 
2845 void MacroAssembler::Move(const Operand& dst, const Immediate& x) {
2846  mov(dst, x);
2847 }
2848 
2849 
2850 void MacroAssembler::Move(XMMRegister dst, double val) {
2851  // TODO(titzer): recognize double constants with ExternalReferences.
2852  CpuFeatureScope scope(this, SSE2);
2853  uint64_t int_val = BitCast<uint64_t, double>(val);
2854  if (int_val == 0) {
2855  xorps(dst, dst);
2856  } else {
2857  int32_t lower = static_cast<int32_t>(int_val);
2858  int32_t upper = static_cast<int32_t>(int_val >> kBitsPerInt);
2859  push(Immediate(upper));
2860  push(Immediate(lower));
2861  movsd(dst, Operand(esp, 0));
2862  add(esp, Immediate(kDoubleSize));
2863  }
2864 }
2865 
2866 
2867 void MacroAssembler::SetCounter(StatsCounter* counter, int value) {
2868  if (FLAG_native_code_counters && counter->Enabled()) {
2869  mov(Operand::StaticVariable(ExternalReference(counter)), Immediate(value));
2870  }
2871 }
2872 
2873 
2874 void MacroAssembler::IncrementCounter(StatsCounter* counter, int value) {
2875  ASSERT(value > 0);
2876  if (FLAG_native_code_counters && counter->Enabled()) {
2877  Operand operand = Operand::StaticVariable(ExternalReference(counter));
2878  if (value == 1) {
2879  inc(operand);
2880  } else {
2881  add(operand, Immediate(value));
2882  }
2883  }
2884 }
2885 
2886 
2887 void MacroAssembler::DecrementCounter(StatsCounter* counter, int value) {
2888  ASSERT(value > 0);
2889  if (FLAG_native_code_counters && counter->Enabled()) {
2890  Operand operand = Operand::StaticVariable(ExternalReference(counter));
2891  if (value == 1) {
2892  dec(operand);
2893  } else {
2894  sub(operand, Immediate(value));
2895  }
2896  }
2897 }
2898 
2899 
2900 void MacroAssembler::IncrementCounter(Condition cc,
2901  StatsCounter* counter,
2902  int value) {
2903  ASSERT(value > 0);
2904  if (FLAG_native_code_counters && counter->Enabled()) {
2905  Label skip;
2906  j(NegateCondition(cc), &skip);
2907  pushfd();
2908  IncrementCounter(counter, value);
2909  popfd();
2910  bind(&skip);
2911  }
2912 }
2913 
2914 
2915 void MacroAssembler::DecrementCounter(Condition cc,
2916  StatsCounter* counter,
2917  int value) {
2918  ASSERT(value > 0);
2919  if (FLAG_native_code_counters && counter->Enabled()) {
2920  Label skip;
2921  j(NegateCondition(cc), &skip);
2922  pushfd();
2923  DecrementCounter(counter, value);
2924  popfd();
2925  bind(&skip);
2926  }
2927 }
2928 
2929 
2930 void MacroAssembler::Assert(Condition cc, BailoutReason reason) {
2931  if (emit_debug_code()) Check(cc, reason);
2932 }
2933 
2934 
2935 void MacroAssembler::AssertFastElements(Register elements) {
2936  if (emit_debug_code()) {
2937  Factory* factory = isolate()->factory();
2938  Label ok;
2939  cmp(FieldOperand(elements, HeapObject::kMapOffset),
2940  Immediate(factory->fixed_array_map()));
2941  j(equal, &ok);
2942  cmp(FieldOperand(elements, HeapObject::kMapOffset),
2943  Immediate(factory->fixed_double_array_map()));
2944  j(equal, &ok);
2945  cmp(FieldOperand(elements, HeapObject::kMapOffset),
2946  Immediate(factory->fixed_cow_array_map()));
2947  j(equal, &ok);
2948  Abort(kJSObjectWithFastElementsMapHasSlowElements);
2949  bind(&ok);
2950  }
2951 }
2952 
2953 
2954 void MacroAssembler::Check(Condition cc, BailoutReason reason) {
2955  Label L;
2956  j(cc, &L);
2957  Abort(reason);
2958  // will not return here
2959  bind(&L);
2960 }
2961 
2962 
2963 void MacroAssembler::CheckStackAlignment() {
2964  int frame_alignment = OS::ActivationFrameAlignment();
2965  int frame_alignment_mask = frame_alignment - 1;
2966  if (frame_alignment > kPointerSize) {
2967  ASSERT(IsPowerOf2(frame_alignment));
2968  Label alignment_as_expected;
2969  test(esp, Immediate(frame_alignment_mask));
2970  j(zero, &alignment_as_expected);
2971  // Abort if stack is not aligned.
2972  int3();
2973  bind(&alignment_as_expected);
2974  }
2975 }
2976 
2977 
2978 void MacroAssembler::Abort(BailoutReason reason) {
2979 #ifdef DEBUG
2980  const char* msg = GetBailoutReason(reason);
2981  if (msg != NULL) {
2982  RecordComment("Abort message: ");
2983  RecordComment(msg);
2984  }
2985 
2986  if (FLAG_trap_on_abort) {
2987  int3();
2988  return;
2989  }
2990 #endif
2991 
2992  push(eax);
2993  push(Immediate(reinterpret_cast<intptr_t>(Smi::FromInt(reason))));
2994  // Disable stub call restrictions to always allow calls to abort.
2995  if (!has_frame_) {
2996  // We don't actually want to generate a pile of code for this, so just
2997  // claim there is a stack frame, without generating one.
2998  FrameScope scope(this, StackFrame::NONE);
2999  CallRuntime(Runtime::kAbort, 1);
3000  } else {
3001  CallRuntime(Runtime::kAbort, 1);
3002  }
3003  // will not return here
3004  int3();
3005 }
3006 
3007 
3008 void MacroAssembler::Throw(BailoutReason reason) {
3009 #ifdef DEBUG
3010  const char* msg = GetBailoutReason(reason);
3011  if (msg != NULL) {
3012  RecordComment("Throw message: ");
3013  RecordComment(msg);
3014  }
3015 #endif
3016 
3017  push(eax);
3018  push(Immediate(Smi::FromInt(reason)));
3019  // Disable stub call restrictions to always allow calls to throw.
3020  if (!has_frame_) {
3021  // We don't actually want to generate a pile of code for this, so just
3022  // claim there is a stack frame, without generating one.
3023  FrameScope scope(this, StackFrame::NONE);
3024  CallRuntime(Runtime::kHiddenThrowMessage, 1);
3025  } else {
3026  CallRuntime(Runtime::kHiddenThrowMessage, 1);
3027  }
3028  // will not return here
3029  int3();
3030 }
3031 
3032 
3033 void MacroAssembler::ThrowIf(Condition cc, BailoutReason reason) {
3034  Label L;
3035  j(NegateCondition(cc), &L);
3036  Throw(reason);
3037  // will not return here
3038  bind(&L);
3039 }
3040 
3041 
3042 void MacroAssembler::LoadInstanceDescriptors(Register map,
3043  Register descriptors) {
3044  mov(descriptors, FieldOperand(map, Map::kDescriptorsOffset));
3045 }
3046 
3047 
3048 void MacroAssembler::NumberOfOwnDescriptors(Register dst, Register map) {
3049  mov(dst, FieldOperand(map, Map::kBitField3Offset));
3050  DecodeField<Map::NumberOfOwnDescriptorsBits>(dst);
3051 }
3052 
3053 
3054 void MacroAssembler::LoadPowerOf2(XMMRegister dst,
3055  Register scratch,
3056  int power) {
3057  ASSERT(is_uintn(power + HeapNumber::kExponentBias,
3058  HeapNumber::kExponentBits));
3059  mov(scratch, Immediate(power + HeapNumber::kExponentBias));
3060  movd(dst, scratch);
3061  psllq(dst, HeapNumber::kMantissaBits);
3062 }
3063 
3064 
3065 void MacroAssembler::LookupNumberStringCache(Register object,
3066  Register result,
3067  Register scratch1,
3068  Register scratch2,
3069  Label* not_found) {
3070  // Use of registers. Register result is used as a temporary.
3071  Register number_string_cache = result;
3072  Register mask = scratch1;
3073  Register scratch = scratch2;
3074 
3075  // Load the number string cache.
3076  LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex);
3077  // Make the hash mask from the length of the number string cache. It
3078  // contains two elements (number and string) for each cache entry.
3079  mov(mask, FieldOperand(number_string_cache, FixedArray::kLengthOffset));
3080  shr(mask, kSmiTagSize + 1); // Untag length and divide it by two.
3081  sub(mask, Immediate(1)); // Make mask.
3082 
3083  // Calculate the entry in the number string cache. The hash value in the
3084  // number string cache for smis is just the smi value, and the hash for
3085  // doubles is the xor of the upper and lower words. See
3086  // Heap::GetNumberStringCache.
3087  Label smi_hash_calculated;
3088  Label load_result_from_cache;
3089  Label not_smi;
3090  STATIC_ASSERT(kSmiTag == 0);
3091  JumpIfNotSmi(object, &not_smi, Label::kNear);
3092  mov(scratch, object);
3093  SmiUntag(scratch);
3094  jmp(&smi_hash_calculated, Label::kNear);
3095  bind(&not_smi);
3096  cmp(FieldOperand(object, HeapObject::kMapOffset),
3097  isolate()->factory()->heap_number_map());
3098  j(not_equal, not_found);
3099  STATIC_ASSERT(8 == kDoubleSize);
3100  mov(scratch, FieldOperand(object, HeapNumber::kValueOffset));
3101  xor_(scratch, FieldOperand(object, HeapNumber::kValueOffset + 4));
3102  // Object is heap number and hash is now in scratch. Calculate cache index.
3103  and_(scratch, mask);
3104  Register index = scratch;
3105  Register probe = mask;
3106  mov(probe,
3107  FieldOperand(number_string_cache,
3108  index,
3110  FixedArray::kHeaderSize));
3111  JumpIfSmi(probe, not_found);
3112  if (CpuFeatures::IsSupported(SSE2)) {
3113  CpuFeatureScope fscope(this, SSE2);
3114  movsd(xmm0, FieldOperand(object, HeapNumber::kValueOffset));
3115  ucomisd(xmm0, FieldOperand(probe, HeapNumber::kValueOffset));
3116  } else {
3117  fld_d(FieldOperand(object, HeapNumber::kValueOffset));
3118  fld_d(FieldOperand(probe, HeapNumber::kValueOffset));
3119  FCmp();
3120  }
3121  j(parity_even, not_found); // Bail out if NaN is involved.
3122  j(not_equal, not_found); // The cache did not contain this value.
3123  jmp(&load_result_from_cache, Label::kNear);
3124 
3125  bind(&smi_hash_calculated);
3126  // Object is smi and hash is now in scratch. Calculate cache index.
3127  and_(scratch, mask);
3128  // Check if the entry is the smi we are looking for.
3129  cmp(object,
3130  FieldOperand(number_string_cache,
3131  index,
3133  FixedArray::kHeaderSize));
3134  j(not_equal, not_found);
3135 
3136  // Get the result from the cache.
3137  bind(&load_result_from_cache);
3138  mov(result,
3139  FieldOperand(number_string_cache,
3140  index,
3142  FixedArray::kHeaderSize + kPointerSize));
3143  IncrementCounter(isolate()->counters()->number_to_string_native(), 1);
3144 }
3145 
3146 
3147 void MacroAssembler::JumpIfInstanceTypeIsNotSequentialAscii(
3148  Register instance_type,
3149  Register scratch,
3150  Label* failure) {
3151  if (!scratch.is(instance_type)) {
3152  mov(scratch, instance_type);
3153  }
3154  and_(scratch,
3156  cmp(scratch, kStringTag | kSeqStringTag | kOneByteStringTag);
3157  j(not_equal, failure);
3158 }
3159 
3160 
3161 void MacroAssembler::JumpIfNotBothSequentialAsciiStrings(Register object1,
3162  Register object2,
3163  Register scratch1,
3164  Register scratch2,
3165  Label* failure) {
3166  // Check that both objects are not smis.
3167  STATIC_ASSERT(kSmiTag == 0);
3168  mov(scratch1, object1);
3169  and_(scratch1, object2);
3170  JumpIfSmi(scratch1, failure);
3171 
3172  // Load instance type for both strings.
3173  mov(scratch1, FieldOperand(object1, HeapObject::kMapOffset));
3174  mov(scratch2, FieldOperand(object2, HeapObject::kMapOffset));
3175  movzx_b(scratch1, FieldOperand(scratch1, Map::kInstanceTypeOffset));
3176  movzx_b(scratch2, FieldOperand(scratch2, Map::kInstanceTypeOffset));
3177 
3178  // Check that both are flat ASCII strings.
3179  const int kFlatAsciiStringMask =
3181  const int kFlatAsciiStringTag =
3183  // Interleave bits from both instance types and compare them in one check.
3184  ASSERT_EQ(0, kFlatAsciiStringMask & (kFlatAsciiStringMask << 3));
3185  and_(scratch1, kFlatAsciiStringMask);
3186  and_(scratch2, kFlatAsciiStringMask);
3187  lea(scratch1, Operand(scratch1, scratch2, times_8, 0));
3188  cmp(scratch1, kFlatAsciiStringTag | (kFlatAsciiStringTag << 3));
3189  j(not_equal, failure);
3190 }
3191 
3192 
3193 void MacroAssembler::JumpIfNotUniqueName(Operand operand,
3194  Label* not_unique_name,
3195  Label::Distance distance) {
3197  Label succeed;
3198  test(operand, Immediate(kIsNotStringMask | kIsNotInternalizedMask));
3199  j(zero, &succeed);
3200  cmpb(operand, static_cast<uint8_t>(SYMBOL_TYPE));
3201  j(not_equal, not_unique_name, distance);
3202 
3203  bind(&succeed);
3204 }
3205 
3206 
3207 void MacroAssembler::EmitSeqStringSetCharCheck(Register string,
3208  Register index,
3209  Register value,
3210  uint32_t encoding_mask) {
3211  Label is_object;
3212  JumpIfNotSmi(string, &is_object, Label::kNear);
3213  Abort(kNonObject);
3214  bind(&is_object);
3215 
3216  push(value);
3217  mov(value, FieldOperand(string, HeapObject::kMapOffset));
3218  movzx_b(value, FieldOperand(value, Map::kInstanceTypeOffset));
3219 
3220  and_(value, Immediate(kStringRepresentationMask | kStringEncodingMask));
3221  cmp(value, Immediate(encoding_mask));
3222  pop(value);
3223  Check(equal, kUnexpectedStringType);
3224 
3225  // The index is assumed to be untagged coming in, tag it to compare with the
3226  // string length without using a temp register, it is restored at the end of
3227  // this function.
3228  SmiTag(index);
3229  Check(no_overflow, kIndexIsTooLarge);
3230 
3231  cmp(index, FieldOperand(string, String::kLengthOffset));
3232  Check(less, kIndexIsTooLarge);
3233 
3234  cmp(index, Immediate(Smi::FromInt(0)));
3235  Check(greater_equal, kIndexIsNegative);
3236 
3237  // Restore the index
3238  SmiUntag(index);
3239 }
3240 
3241 
3242 void MacroAssembler::PrepareCallCFunction(int num_arguments, Register scratch) {
3243  int frame_alignment = OS::ActivationFrameAlignment();
3244  if (frame_alignment != 0) {
3245  // Make stack end at alignment and make room for num_arguments words
3246  // and the original value of esp.
3247  mov(scratch, esp);
3248  sub(esp, Immediate((num_arguments + 1) * kPointerSize));
3249  ASSERT(IsPowerOf2(frame_alignment));
3250  and_(esp, -frame_alignment);
3251  mov(Operand(esp, num_arguments * kPointerSize), scratch);
3252  } else {
3253  sub(esp, Immediate(num_arguments * kPointerSize));
3254  }
3255 }
3256 
3257 
3258 void MacroAssembler::CallCFunction(ExternalReference function,
3259  int num_arguments) {
3260  // Trashing eax is ok as it will be the return value.
3261  mov(eax, Immediate(function));
3262  CallCFunction(eax, num_arguments);
3263 }
3264 
3265 
3266 void MacroAssembler::CallCFunction(Register function,
3267  int num_arguments) {
3268  ASSERT(has_frame());
3269  // Check stack alignment.
3270  if (emit_debug_code()) {
3271  CheckStackAlignment();
3272  }
3273 
3274  call(function);
3275  if (OS::ActivationFrameAlignment() != 0) {
3276  mov(esp, Operand(esp, num_arguments * kPointerSize));
3277  } else {
3278  add(esp, Immediate(num_arguments * kPointerSize));
3279  }
3280 }
3281 
3282 
3283 bool AreAliased(Register r1, Register r2, Register r3, Register r4) {
3284  if (r1.is(r2)) return true;
3285  if (r1.is(r3)) return true;
3286  if (r1.is(r4)) return true;
3287  if (r2.is(r3)) return true;
3288  if (r2.is(r4)) return true;
3289  if (r3.is(r4)) return true;
3290  return false;
3291 }
3292 
3293 
3294 CodePatcher::CodePatcher(byte* address, int size)
3295  : address_(address),
3296  size_(size),
3297  masm_(NULL, address, size + Assembler::kGap) {
3298  // Create a new macro assembler pointing to the address of the code to patch.
3299  // The size is adjusted with kGap on order for the assembler to generate size
3300  // bytes of instructions without failing with buffer size constraints.
3301  ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
3302 }
3303 
3304 
3305 CodePatcher::~CodePatcher() {
3306  // Indicate that code has changed.
3307  CPU::FlushICache(address_, size_);
3308 
3309  // Check that the code was patched as expected.
3310  ASSERT(masm_.pc_ == address_ + size_);
3311  ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
3312 }
3313 
3314 
3315 void MacroAssembler::CheckPageFlag(
3316  Register object,
3317  Register scratch,
3318  int mask,
3319  Condition cc,
3320  Label* condition_met,
3321  Label::Distance condition_met_distance) {
3322  ASSERT(cc == zero || cc == not_zero);
3323  if (scratch.is(object)) {
3324  and_(scratch, Immediate(~Page::kPageAlignmentMask));
3325  } else {
3326  mov(scratch, Immediate(~Page::kPageAlignmentMask));
3327  and_(scratch, object);
3328  }
3329  if (mask < (1 << kBitsPerByte)) {
3330  test_b(Operand(scratch, MemoryChunk::kFlagsOffset),
3331  static_cast<uint8_t>(mask));
3332  } else {
3333  test(Operand(scratch, MemoryChunk::kFlagsOffset), Immediate(mask));
3334  }
3335  j(cc, condition_met, condition_met_distance);
3336 }
3337 
3338 
3339 void MacroAssembler::CheckPageFlagForMap(
3340  Handle<Map> map,
3341  int mask,
3342  Condition cc,
3343  Label* condition_met,
3344  Label::Distance condition_met_distance) {
3345  ASSERT(cc == zero || cc == not_zero);
3346  Page* page = Page::FromAddress(map->address());
3347  ExternalReference reference(ExternalReference::page_flags(page));
3348  // The inlined static address check of the page's flags relies
3349  // on maps never being compacted.
3350  ASSERT(!isolate()->heap()->mark_compact_collector()->
3351  IsOnEvacuationCandidate(*map));
3352  if (mask < (1 << kBitsPerByte)) {
3353  test_b(Operand::StaticVariable(reference), static_cast<uint8_t>(mask));
3354  } else {
3355  test(Operand::StaticVariable(reference), Immediate(mask));
3356  }
3357  j(cc, condition_met, condition_met_distance);
3358 }
3359 
3360 
3361 void MacroAssembler::CheckMapDeprecated(Handle<Map> map,
3362  Register scratch,
3363  Label* if_deprecated) {
3364  if (map->CanBeDeprecated()) {
3365  mov(scratch, map);
3366  mov(scratch, FieldOperand(scratch, Map::kBitField3Offset));
3367  and_(scratch, Immediate(Smi::FromInt(Map::Deprecated::kMask)));
3368  j(not_zero, if_deprecated);
3369  }
3370 }
3371 
3372 
3373 void MacroAssembler::JumpIfBlack(Register object,
3374  Register scratch0,
3375  Register scratch1,
3376  Label* on_black,
3377  Label::Distance on_black_near) {
3378  HasColor(object, scratch0, scratch1,
3379  on_black, on_black_near,
3380  1, 0); // kBlackBitPattern.
3381  ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
3382 }
3383 
3384 
3385 void MacroAssembler::HasColor(Register object,
3386  Register bitmap_scratch,
3387  Register mask_scratch,
3388  Label* has_color,
3389  Label::Distance has_color_distance,
3390  int first_bit,
3391  int second_bit) {
3392  ASSERT(!AreAliased(object, bitmap_scratch, mask_scratch, ecx));
3393 
3394  GetMarkBits(object, bitmap_scratch, mask_scratch);
3395 
3396  Label other_color, word_boundary;
3397  test(mask_scratch, Operand(bitmap_scratch, MemoryChunk::kHeaderSize));
3398  j(first_bit == 1 ? zero : not_zero, &other_color, Label::kNear);
3399  add(mask_scratch, mask_scratch); // Shift left 1 by adding.
3400  j(zero, &word_boundary, Label::kNear);
3401  test(mask_scratch, Operand(bitmap_scratch, MemoryChunk::kHeaderSize));
3402  j(second_bit == 1 ? not_zero : zero, has_color, has_color_distance);
3403  jmp(&other_color, Label::kNear);
3404 
3405  bind(&word_boundary);
3406  test_b(Operand(bitmap_scratch, MemoryChunk::kHeaderSize + kPointerSize), 1);
3407 
3408  j(second_bit == 1 ? not_zero : zero, has_color, has_color_distance);
3409  bind(&other_color);
3410 }
3411 
3412 
3413 void MacroAssembler::GetMarkBits(Register addr_reg,
3414  Register bitmap_reg,
3415  Register mask_reg) {
3416  ASSERT(!AreAliased(addr_reg, mask_reg, bitmap_reg, ecx));
3417  mov(bitmap_reg, Immediate(~Page::kPageAlignmentMask));
3418  and_(bitmap_reg, addr_reg);
3419  mov(ecx, addr_reg);
3420  int shift =
3421  Bitmap::kBitsPerCellLog2 + kPointerSizeLog2 - Bitmap::kBytesPerCellLog2;
3422  shr(ecx, shift);
3423  and_(ecx,
3424  (Page::kPageAlignmentMask >> shift) & ~(Bitmap::kBytesPerCell - 1));
3425 
3426  add(bitmap_reg, ecx);
3427  mov(ecx, addr_reg);
3428  shr(ecx, kPointerSizeLog2);
3429  and_(ecx, (1 << Bitmap::kBitsPerCellLog2) - 1);
3430  mov(mask_reg, Immediate(1));
3431  shl_cl(mask_reg);
3432 }
3433 
3434 
3435 void MacroAssembler::EnsureNotWhite(
3436  Register value,
3437  Register bitmap_scratch,
3438  Register mask_scratch,
3439  Label* value_is_white_and_not_data,
3440  Label::Distance distance) {
3441  ASSERT(!AreAliased(value, bitmap_scratch, mask_scratch, ecx));
3442  GetMarkBits(value, bitmap_scratch, mask_scratch);
3443 
3444  // If the value is black or grey we don't need to do anything.
3445  ASSERT(strcmp(Marking::kWhiteBitPattern, "00") == 0);
3446  ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
3447  ASSERT(strcmp(Marking::kGreyBitPattern, "11") == 0);
3448  ASSERT(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
3449 
3450  Label done;
3451 
3452  // Since both black and grey have a 1 in the first position and white does
3453  // not have a 1 there we only need to check one bit.
3454  test(mask_scratch, Operand(bitmap_scratch, MemoryChunk::kHeaderSize));
3455  j(not_zero, &done, Label::kNear);
3456 
3457  if (emit_debug_code()) {
3458  // Check for impossible bit pattern.
3459  Label ok;
3460  push(mask_scratch);
3461  // shl. May overflow making the check conservative.
3462  add(mask_scratch, mask_scratch);
3463  test(mask_scratch, Operand(bitmap_scratch, MemoryChunk::kHeaderSize));
3464  j(zero, &ok, Label::kNear);
3465  int3();
3466  bind(&ok);
3467  pop(mask_scratch);
3468  }
3469 
3470  // Value is white. We check whether it is data that doesn't need scanning.
3471  // Currently only checks for HeapNumber and non-cons strings.
3472  Register map = ecx; // Holds map while checking type.
3473  Register length = ecx; // Holds length of object after checking type.
3474  Label not_heap_number;
3475  Label is_data_object;
3476 
3477  // Check for heap-number
3478  mov(map, FieldOperand(value, HeapObject::kMapOffset));
3479  cmp(map, isolate()->factory()->heap_number_map());
3480  j(not_equal, &not_heap_number, Label::kNear);
3481  mov(length, Immediate(HeapNumber::kSize));
3482  jmp(&is_data_object, Label::kNear);
3483 
3484  bind(&not_heap_number);
3485  // Check for strings.
3487  ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
3488  // If it's a string and it's not a cons string then it's an object containing
3489  // no GC pointers.
3490  Register instance_type = ecx;
3491  movzx_b(instance_type, FieldOperand(map, Map::kInstanceTypeOffset));
3492  test_b(instance_type, kIsIndirectStringMask | kIsNotStringMask);
3493  j(not_zero, value_is_white_and_not_data);
3494  // It's a non-indirect (non-cons and non-slice) string.
3495  // If it's external, the length is just ExternalString::kSize.
3496  // Otherwise it's String::kHeaderSize + string->length() * (1 or 2).
3497  Label not_external;
3498  // External strings are the only ones with the kExternalStringTag bit
3499  // set.
3502  test_b(instance_type, kExternalStringTag);
3503  j(zero, &not_external, Label::kNear);
3504  mov(length, Immediate(ExternalString::kSize));
3505  jmp(&is_data_object, Label::kNear);
3506 
3507  bind(&not_external);
3508  // Sequential string, either ASCII or UC16.
3509  ASSERT(kOneByteStringTag == 0x04);
3510  and_(length, Immediate(kStringEncodingMask));
3511  xor_(length, Immediate(kStringEncodingMask));
3512  add(length, Immediate(0x04));
3513  // Value now either 4 (if ASCII) or 8 (if UC16), i.e., char-size shifted
3514  // by 2. If we multiply the string length as smi by this, it still
3515  // won't overflow a 32-bit value.
3516  ASSERT_EQ(SeqOneByteString::kMaxSize, SeqTwoByteString::kMaxSize);
3517  ASSERT(SeqOneByteString::kMaxSize <=
3518  static_cast<int>(0xffffffffu >> (2 + kSmiTagSize)));
3519  imul(length, FieldOperand(value, String::kLengthOffset));
3520  shr(length, 2 + kSmiTagSize + kSmiShiftSize);
3521  add(length, Immediate(SeqString::kHeaderSize + kObjectAlignmentMask));
3522  and_(length, Immediate(~kObjectAlignmentMask));
3523 
3524  bind(&is_data_object);
3525  // Value is a data object, and it is white. Mark it black. Since we know
3526  // that the object is white we can make it black by flipping one bit.
3527  or_(Operand(bitmap_scratch, MemoryChunk::kHeaderSize), mask_scratch);
3528 
3529  and_(bitmap_scratch, Immediate(~Page::kPageAlignmentMask));
3530  add(Operand(bitmap_scratch, MemoryChunk::kLiveBytesOffset),
3531  length);
3532  if (emit_debug_code()) {
3533  mov(length, Operand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
3534  cmp(length, Operand(bitmap_scratch, MemoryChunk::kSizeOffset));
3535  Check(less_equal, kLiveBytesCountOverflowChunkSize);
3536  }
3537 
3538  bind(&done);
3539 }
3540 
3541 
3542 void MacroAssembler::EnumLength(Register dst, Register map) {
3543  STATIC_ASSERT(Map::EnumLengthBits::kShift == 0);
3544  mov(dst, FieldOperand(map, Map::kBitField3Offset));
3545  and_(dst, Immediate(Smi::FromInt(Map::EnumLengthBits::kMask)));
3546 }
3547 
3548 
3549 void MacroAssembler::CheckEnumCache(Label* call_runtime) {
3550  Label next, start;
3551  mov(ecx, eax);
3552 
3553  // Check if the enum length field is properly initialized, indicating that
3554  // there is an enum cache.
3555  mov(ebx, FieldOperand(ecx, HeapObject::kMapOffset));
3556 
3557  EnumLength(edx, ebx);
3558  cmp(edx, Immediate(Smi::FromInt(kInvalidEnumCacheSentinel)));
3559  j(equal, call_runtime);
3560 
3561  jmp(&start);
3562 
3563  bind(&next);
3564  mov(ebx, FieldOperand(ecx, HeapObject::kMapOffset));
3565 
3566  // For all objects but the receiver, check that the cache is empty.
3567  EnumLength(edx, ebx);
3568  cmp(edx, Immediate(Smi::FromInt(0)));
3569  j(not_equal, call_runtime);
3570 
3571  bind(&start);
3572 
3573  // Check that there are no elements. Register rcx contains the current JS
3574  // object we've reached through the prototype chain.
3575  Label no_elements;
3576  mov(ecx, FieldOperand(ecx, JSObject::kElementsOffset));
3577  cmp(ecx, isolate()->factory()->empty_fixed_array());
3578  j(equal, &no_elements);
3579 
3580  // Second chance, the object may be using the empty slow element dictionary.
3581  cmp(ecx, isolate()->factory()->empty_slow_element_dictionary());
3582  j(not_equal, call_runtime);
3583 
3584  bind(&no_elements);
3585  mov(ecx, FieldOperand(ebx, Map::kPrototypeOffset));
3586  cmp(ecx, isolate()->factory()->null_value());
3587  j(not_equal, &next);
3588 }
3589 
3590 
3591 void MacroAssembler::TestJSArrayForAllocationMemento(
3592  Register receiver_reg,
3593  Register scratch_reg,
3594  Label* no_memento_found) {
3595  ExternalReference new_space_start =
3596  ExternalReference::new_space_start(isolate());
3597  ExternalReference new_space_allocation_top =
3598  ExternalReference::new_space_allocation_top_address(isolate());
3599 
3600  lea(scratch_reg, Operand(receiver_reg,
3601  JSArray::kSize + AllocationMemento::kSize - kHeapObjectTag));
3602  cmp(scratch_reg, Immediate(new_space_start));
3603  j(less, no_memento_found);
3604  cmp(scratch_reg, Operand::StaticVariable(new_space_allocation_top));
3605  j(greater, no_memento_found);
3606  cmp(MemOperand(scratch_reg, -AllocationMemento::kSize),
3607  Immediate(isolate()->factory()->allocation_memento_map()));
3608 }
3609 
3610 
3611 void MacroAssembler::JumpIfDictionaryInPrototypeChain(
3612  Register object,
3613  Register scratch0,
3614  Register scratch1,
3615  Label* found) {
3616  ASSERT(!scratch1.is(scratch0));
3617  Factory* factory = isolate()->factory();
3618  Register current = scratch0;
3619  Label loop_again;
3620 
3621  // scratch contained elements pointer.
3622  mov(current, object);
3623 
3624  // Loop based on the map going up the prototype chain.
3625  bind(&loop_again);
3626  mov(current, FieldOperand(current, HeapObject::kMapOffset));
3627  mov(scratch1, FieldOperand(current, Map::kBitField2Offset));
3628  and_(scratch1, Map::kElementsKindMask);
3629  shr(scratch1, Map::kElementsKindShift);
3630  cmp(scratch1, Immediate(DICTIONARY_ELEMENTS));
3631  j(equal, found);
3632  mov(current, FieldOperand(current, Map::kPrototypeOffset));
3633  cmp(current, Immediate(factory->null_value()));
3634  j(not_equal, &loop_again);
3635 }
3636 
3637 
3638 void MacroAssembler::TruncatingDiv(Register dividend, int32_t divisor) {
3639  ASSERT(!dividend.is(eax));
3640  ASSERT(!dividend.is(edx));
3641  MultiplierAndShift ms(divisor);
3642  mov(eax, Immediate(ms.multiplier()));
3643  imul(dividend);
3644  if (divisor > 0 && ms.multiplier() < 0) add(edx, dividend);
3645  if (divisor < 0 && ms.multiplier() > 0) sub(edx, dividend);
3646  if (ms.shift() > 0) sar(edx, ms.shift());
3647  mov(eax, dividend);
3648  shr(eax, 31);
3649  add(edx, eax);
3650 }
3651 
3652 
3653 } } // namespace v8::internal
3654 
3655 #endif // V8_TARGET_ARCH_IA32
byte * Address
Definition: globals.h:186
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter NULL
unsigned char byte
Definition: disasm.h:33
const intptr_t kSmiTagMask
Definition: v8.h:5480
const intptr_t kDoubleAlignmentMask
Definition: v8globals.h:53
const uint32_t kNaNOrInfinityLowerBoundUpper32
Definition: v8globals.h:456
const Register r3
const int kNumRegisters
Definition: constants-arm.h:57
const Register esp
TypeImpl< ZoneTypeConfig > Type
int int32_t
Definition: unicode.cc:47
const uint32_t kIsNotInternalizedMask
Definition: objects.h:603
#define ASSERT(condition)
Definition: checks.h:329
const int kPointerSizeLog2
Definition: globals.h:281
const uint32_t kStringRepresentationMask
Definition: objects.h:615
const Register r2
PerThreadAssertScopeDebugOnly< DEFERRED_HANDLE_DEREFERENCE_ASSERT, true > AllowDeferredHandleDereference
Definition: assert-scope.h:234
const bool FLAG_enable_slow_asserts
Definition: checks.h:307
const intptr_t kObjectAlignmentMask
Definition: v8globals.h:45
const intptr_t kHeapObjectTagMask
Definition: v8.h:5475
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long expose natives in global object expose freeBuffer extension expose gc extension under the specified name expose externalize string extension number of stack frames to capture disable builtin natives files print name of functions for which code is generated use random jit cookie to mask large constants trace lazy optimization use adaptive optimizations always try to OSR functions trace optimize function deoptimization minimum length for automatic enable preparsing maximum number of optimization attempts before giving up cache prototype transitions trace debugging JSON request response trace out of bounds accesses to external arrays trace_js_array_abuse automatically set the debug break flag when debugger commands are in the queue abort by crashing maximum length of function source code printed in a stack trace max size of the new max size of the old max size of executable always perform global GCs print one trace line following each garbage collection do not print trace line after scavenger collection print statistics of the maximum memory committed for the heap in only print modified registers Don t break for ASM_UNIMPLEMENTED_BREAK macros print stack trace when an illegal exception is thrown randomize hashes to avoid predictable hash Fixed seed to use to hash property Print the time it takes to deserialize the snapshot testing_bool_flag testing_int_flag string flag tmp file in which to serialize heap Print the time it takes to lazily compile hydrogen code stubs concurrent_recompilation concurrent_sweeping Print usage including flags
bool is_intn(int64_t x, unsigned n)
Definition: utils.h:1102
const Register edi
kInstanceClassNameOffset flag
Definition: objects-inl.h:5115
const uint32_t kNotStringTag
Definition: objects.h:599
const Register ebp
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object size
Definition: flags.cc:211
const Register eax
const int kDoubleSize
Definition: globals.h:266
PrologueFrameMode
Definition: frames.h:957
const uint32_t kIsIndirectStringMask
Definition: objects.h:622
const int kPointerSize
Definition: globals.h:268
Operand FieldOperand(Register object, int offset)
const Register ecx
const Address kZapValue
Definition: v8globals.h:82
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf map
const int kHeapObjectTag
Definition: v8.h:5473
bool IsAligned(T value, U alignment)
Definition: utils.h:211
const uint32_t kHoleNanLower32
Definition: v8globals.h:455
const int kBitsPerByte
Definition: globals.h:287
const Register r0
bool IsPowerOf2(T x)
Definition: utils.h:51
int TenToThe(int exponent)
Definition: utils.h:880
MacroAssembler(Isolate *isolate, void *buffer, int size)
const uint32_t kStringTag
Definition: objects.h:598
const int kBitsPerInt
Definition: globals.h:290
const uint32_t kInternalizedTag
Definition: objects.h:605
InvokeFlag
AllocationFlags
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array shift
void Load(const v8::FunctionCallbackInfo< v8::Value > &args)
Definition: shell.cc:171
bool is(Register reg) const
const uint32_t kIsNotStringMask
Definition: objects.h:597
const Register r1
const char * GetBailoutReason(BailoutReason reason)
Definition: objects.cc:16437
const Register ebx
const int kNumSafepointRegisters
Definition: frames-arm.h:92
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra code(assertions) for debugging") DEFINE_bool(code_comments
const intptr_t kPointerAlignment
Definition: v8globals.h:48
const int kSmiShiftSize
Definition: v8.h:5539
Operand ApiParameterOperand(int index)
const int kSmiTagSize
Definition: v8.h:5479
void CopyBytes(uint8_t *target, uint8_t *source)
Definition: runtime.cc:1309
#define no_xmm_reg
Condition NegateCondition(Condition cond)
#define ASSERT_EQ(v1, v2)
Definition: checks.h:330
const Register esi
const int kShortSize
Definition: globals.h:262
const uint32_t kOneByteStringTag
Definition: objects.h:611
const int kSmiTag
Definition: v8.h:5478
const uint32_t kIsIndirectStringTag
Definition: objects.h:623
HeapObject * obj
#define RUNTIME_ENTRY(name, nargs, ressize)
const Register no_reg
#define STATIC_ASSERT(test)
Definition: checks.h:341
const Register edx
#define xmm0
const intptr_t kDoubleAlignment
Definition: v8globals.h:52
const int kCharSize
Definition: globals.h:261
const uint32_t kStringEncodingMask
Definition: objects.h:609
bool is_uintn(int64_t x, unsigned n)
Definition: utils.h:1108
bool AreAliased(const CPURegister &reg1, const CPURegister &reg2, const CPURegister &reg3=NoReg, const CPURegister &reg4=NoReg, const CPURegister &reg5=NoReg, const CPURegister &reg6=NoReg, const CPURegister &reg7=NoReg, const CPURegister &reg8=NoReg)
const Register r4