v8  3.25.30(node0.11.13)
V8 is Google's open source JavaScript engine
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
codegen-ia32.cc
Go to the documentation of this file.
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 
28 #include "v8.h"
29 
30 #if V8_TARGET_ARCH_IA32
31 
32 #include "codegen.h"
33 #include "heap.h"
34 #include "macro-assembler.h"
35 
36 namespace v8 {
37 namespace internal {
38 
39 
40 // -------------------------------------------------------------------------
41 // Platform-specific RuntimeCallHelper functions.
42 
43 void StubRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
44  masm->EnterFrame(StackFrame::INTERNAL);
45  ASSERT(!masm->has_frame());
46  masm->set_has_frame(true);
47 }
48 
49 
50 void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
51  masm->LeaveFrame(StackFrame::INTERNAL);
52  ASSERT(masm->has_frame());
53  masm->set_has_frame(false);
54 }
55 
56 
57 #define __ masm.
58 
59 
61  if (!CpuFeatures::IsSupported(SSE2)) return &std::exp;
62  if (!FLAG_fast_math) return &std::exp;
63  size_t actual_size;
64  byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB, &actual_size, true));
65  if (buffer == NULL) return &std::exp;
66  ExternalReference::InitializeMathExpData();
67 
68  MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
69  // esp[1 * kPointerSize]: raw double input
70  // esp[0 * kPointerSize]: return address
71  {
72  CpuFeatureScope use_sse2(&masm, SSE2);
73  XMMRegister input = xmm1;
74  XMMRegister result = xmm2;
75  __ movsd(input, Operand(esp, 1 * kPointerSize));
76  __ push(eax);
77  __ push(ebx);
78 
79  MathExpGenerator::EmitMathExp(&masm, input, result, xmm0, eax, ebx);
80 
81  __ pop(ebx);
82  __ pop(eax);
83  __ movsd(Operand(esp, 1 * kPointerSize), result);
84  __ fld_d(Operand(esp, 1 * kPointerSize));
85  __ Ret();
86  }
87 
88  CodeDesc desc;
89  masm.GetCode(&desc);
90  ASSERT(!RelocInfo::RequiresRelocation(desc));
91 
92  CPU::FlushICache(buffer, actual_size);
93  OS::ProtectCode(buffer, actual_size);
94  return FUNCTION_CAST<UnaryMathFunction>(buffer);
95 }
96 
97 
99  size_t actual_size;
100  // Allocate buffer in executable space.
101  byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB,
102  &actual_size,
103  true));
104  // If SSE2 is not available, we can use libc's implementation to ensure
105  // consistency since code by fullcodegen's calls into runtime in that case.
106  if (buffer == NULL || !CpuFeatures::IsSupported(SSE2)) return &std::sqrt;
107  MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
108  // esp[1 * kPointerSize]: raw double input
109  // esp[0 * kPointerSize]: return address
110  // Move double input into registers.
111  {
112  CpuFeatureScope use_sse2(&masm, SSE2);
113  __ movsd(xmm0, Operand(esp, 1 * kPointerSize));
114  __ sqrtsd(xmm0, xmm0);
115  __ movsd(Operand(esp, 1 * kPointerSize), xmm0);
116  // Load result into floating point register as return value.
117  __ fld_d(Operand(esp, 1 * kPointerSize));
118  __ Ret();
119  }
120 
121  CodeDesc desc;
122  masm.GetCode(&desc);
123  ASSERT(!RelocInfo::RequiresRelocation(desc));
124 
125  CPU::FlushICache(buffer, actual_size);
126  OS::ProtectCode(buffer, actual_size);
127  return FUNCTION_CAST<UnaryMathFunction>(buffer);
128 }
129 
130 
131 // Helper functions for CreateMemMoveFunction.
132 #undef __
133 #define __ ACCESS_MASM(masm)
134 
135 enum Direction { FORWARD, BACKWARD };
136 enum Alignment { MOVE_ALIGNED, MOVE_UNALIGNED };
137 
138 // Expects registers:
139 // esi - source, aligned if alignment == ALIGNED
140 // edi - destination, always aligned
141 // ecx - count (copy size in bytes)
142 // edx - loop count (number of 64 byte chunks)
143 void MemMoveEmitMainLoop(MacroAssembler* masm,
144  Label* move_last_15,
145  Direction direction,
146  Alignment alignment) {
147  Register src = esi;
148  Register dst = edi;
149  Register count = ecx;
150  Register loop_count = edx;
151  Label loop, move_last_31, move_last_63;
152  __ cmp(loop_count, 0);
153  __ j(equal, &move_last_63);
154  __ bind(&loop);
155  // Main loop. Copy in 64 byte chunks.
156  if (direction == BACKWARD) __ sub(src, Immediate(0x40));
157  __ movdq(alignment == MOVE_ALIGNED, xmm0, Operand(src, 0x00));
158  __ movdq(alignment == MOVE_ALIGNED, xmm1, Operand(src, 0x10));
159  __ movdq(alignment == MOVE_ALIGNED, xmm2, Operand(src, 0x20));
160  __ movdq(alignment == MOVE_ALIGNED, xmm3, Operand(src, 0x30));
161  if (direction == FORWARD) __ add(src, Immediate(0x40));
162  if (direction == BACKWARD) __ sub(dst, Immediate(0x40));
163  __ movdqa(Operand(dst, 0x00), xmm0);
164  __ movdqa(Operand(dst, 0x10), xmm1);
165  __ movdqa(Operand(dst, 0x20), xmm2);
166  __ movdqa(Operand(dst, 0x30), xmm3);
167  if (direction == FORWARD) __ add(dst, Immediate(0x40));
168  __ dec(loop_count);
169  __ j(not_zero, &loop);
170  // At most 63 bytes left to copy.
171  __ bind(&move_last_63);
172  __ test(count, Immediate(0x20));
173  __ j(zero, &move_last_31);
174  if (direction == BACKWARD) __ sub(src, Immediate(0x20));
175  __ movdq(alignment == MOVE_ALIGNED, xmm0, Operand(src, 0x00));
176  __ movdq(alignment == MOVE_ALIGNED, xmm1, Operand(src, 0x10));
177  if (direction == FORWARD) __ add(src, Immediate(0x20));
178  if (direction == BACKWARD) __ sub(dst, Immediate(0x20));
179  __ movdqa(Operand(dst, 0x00), xmm0);
180  __ movdqa(Operand(dst, 0x10), xmm1);
181  if (direction == FORWARD) __ add(dst, Immediate(0x20));
182  // At most 31 bytes left to copy.
183  __ bind(&move_last_31);
184  __ test(count, Immediate(0x10));
185  __ j(zero, move_last_15);
186  if (direction == BACKWARD) __ sub(src, Immediate(0x10));
187  __ movdq(alignment == MOVE_ALIGNED, xmm0, Operand(src, 0));
188  if (direction == FORWARD) __ add(src, Immediate(0x10));
189  if (direction == BACKWARD) __ sub(dst, Immediate(0x10));
190  __ movdqa(Operand(dst, 0), xmm0);
191  if (direction == FORWARD) __ add(dst, Immediate(0x10));
192 }
193 
194 
195 void MemMoveEmitPopAndReturn(MacroAssembler* masm) {
196  __ pop(esi);
197  __ pop(edi);
198  __ ret(0);
199 }
200 
201 
202 #undef __
203 #define __ masm.
204 
205 
206 class LabelConverter {
207  public:
208  explicit LabelConverter(byte* buffer) : buffer_(buffer) {}
209  int32_t address(Label* l) const {
210  return reinterpret_cast<int32_t>(buffer_) + l->pos();
211  }
212  private:
213  byte* buffer_;
214 };
215 
216 
217 OS::MemMoveFunction CreateMemMoveFunction() {
218  size_t actual_size;
219  // Allocate buffer in executable space.
220  byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB, &actual_size, true));
221  if (buffer == NULL) return NULL;
222  MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
223  LabelConverter conv(buffer);
224 
225  // Generated code is put into a fixed, unmovable buffer, and not into
226  // the V8 heap. We can't, and don't, refer to any relocatable addresses
227  // (e.g. the JavaScript nan-object).
228 
229  // 32-bit C declaration function calls pass arguments on stack.
230 
231  // Stack layout:
232  // esp[12]: Third argument, size.
233  // esp[8]: Second argument, source pointer.
234  // esp[4]: First argument, destination pointer.
235  // esp[0]: return address
236 
237  const int kDestinationOffset = 1 * kPointerSize;
238  const int kSourceOffset = 2 * kPointerSize;
239  const int kSizeOffset = 3 * kPointerSize;
240 
241  // When copying up to this many bytes, use special "small" handlers.
242  const size_t kSmallCopySize = 8;
243  // When copying up to this many bytes, use special "medium" handlers.
244  const size_t kMediumCopySize = 63;
245  // When non-overlapping region of src and dst is less than this,
246  // use a more careful implementation (slightly slower).
247  const size_t kMinMoveDistance = 16;
248  // Note that these values are dictated by the implementation below,
249  // do not just change them and hope things will work!
250 
251  int stack_offset = 0; // Update if we change the stack height.
252 
253  Label backward, backward_much_overlap;
254  Label forward_much_overlap, small_size, medium_size, pop_and_return;
255  __ push(edi);
256  __ push(esi);
257  stack_offset += 2 * kPointerSize;
258  Register dst = edi;
259  Register src = esi;
260  Register count = ecx;
261  Register loop_count = edx;
262  __ mov(dst, Operand(esp, stack_offset + kDestinationOffset));
263  __ mov(src, Operand(esp, stack_offset + kSourceOffset));
264  __ mov(count, Operand(esp, stack_offset + kSizeOffset));
265 
266  __ cmp(dst, src);
267  __ j(equal, &pop_and_return);
268 
270  CpuFeatureScope sse2_scope(&masm, SSE2);
271  __ prefetch(Operand(src, 0), 1);
272  __ cmp(count, kSmallCopySize);
273  __ j(below_equal, &small_size);
274  __ cmp(count, kMediumCopySize);
275  __ j(below_equal, &medium_size);
276  __ cmp(dst, src);
277  __ j(above, &backward);
278 
279  {
280  // |dst| is a lower address than |src|. Copy front-to-back.
281  Label unaligned_source, move_last_15, skip_last_move;
282  __ mov(eax, src);
283  __ sub(eax, dst);
284  __ cmp(eax, kMinMoveDistance);
285  __ j(below, &forward_much_overlap);
286  // Copy first 16 bytes.
287  __ movdqu(xmm0, Operand(src, 0));
288  __ movdqu(Operand(dst, 0), xmm0);
289  // Determine distance to alignment: 16 - (dst & 0xF).
290  __ mov(edx, dst);
291  __ and_(edx, 0xF);
292  __ neg(edx);
293  __ add(edx, Immediate(16));
294  __ add(dst, edx);
295  __ add(src, edx);
296  __ sub(count, edx);
297  // dst is now aligned. Main copy loop.
298  __ mov(loop_count, count);
299  __ shr(loop_count, 6);
300  // Check if src is also aligned.
301  __ test(src, Immediate(0xF));
302  __ j(not_zero, &unaligned_source);
303  // Copy loop for aligned source and destination.
304  MemMoveEmitMainLoop(&masm, &move_last_15, FORWARD, MOVE_ALIGNED);
305  // At most 15 bytes to copy. Copy 16 bytes at end of string.
306  __ bind(&move_last_15);
307  __ and_(count, 0xF);
308  __ j(zero, &skip_last_move, Label::kNear);
309  __ movdqu(xmm0, Operand(src, count, times_1, -0x10));
310  __ movdqu(Operand(dst, count, times_1, -0x10), xmm0);
311  __ bind(&skip_last_move);
312  MemMoveEmitPopAndReturn(&masm);
313 
314  // Copy loop for unaligned source and aligned destination.
315  __ bind(&unaligned_source);
316  MemMoveEmitMainLoop(&masm, &move_last_15, FORWARD, MOVE_UNALIGNED);
317  __ jmp(&move_last_15);
318 
319  // Less than kMinMoveDistance offset between dst and src.
320  Label loop_until_aligned, last_15_much_overlap;
321  __ bind(&loop_until_aligned);
322  __ mov_b(eax, Operand(src, 0));
323  __ inc(src);
324  __ mov_b(Operand(dst, 0), eax);
325  __ inc(dst);
326  __ dec(count);
327  __ bind(&forward_much_overlap); // Entry point into this block.
328  __ test(dst, Immediate(0xF));
329  __ j(not_zero, &loop_until_aligned);
330  // dst is now aligned, src can't be. Main copy loop.
331  __ mov(loop_count, count);
332  __ shr(loop_count, 6);
333  MemMoveEmitMainLoop(&masm, &last_15_much_overlap,
334  FORWARD, MOVE_UNALIGNED);
335  __ bind(&last_15_much_overlap);
336  __ and_(count, 0xF);
337  __ j(zero, &pop_and_return);
338  __ cmp(count, kSmallCopySize);
339  __ j(below_equal, &small_size);
340  __ jmp(&medium_size);
341  }
342 
343  {
344  // |dst| is a higher address than |src|. Copy backwards.
345  Label unaligned_source, move_first_15, skip_last_move;
346  __ bind(&backward);
347  // |dst| and |src| always point to the end of what's left to copy.
348  __ add(dst, count);
349  __ add(src, count);
350  __ mov(eax, dst);
351  __ sub(eax, src);
352  __ cmp(eax, kMinMoveDistance);
353  __ j(below, &backward_much_overlap);
354  // Copy last 16 bytes.
355  __ movdqu(xmm0, Operand(src, -0x10));
356  __ movdqu(Operand(dst, -0x10), xmm0);
357  // Find distance to alignment: dst & 0xF
358  __ mov(edx, dst);
359  __ and_(edx, 0xF);
360  __ sub(dst, edx);
361  __ sub(src, edx);
362  __ sub(count, edx);
363  // dst is now aligned. Main copy loop.
364  __ mov(loop_count, count);
365  __ shr(loop_count, 6);
366  // Check if src is also aligned.
367  __ test(src, Immediate(0xF));
368  __ j(not_zero, &unaligned_source);
369  // Copy loop for aligned source and destination.
370  MemMoveEmitMainLoop(&masm, &move_first_15, BACKWARD, MOVE_ALIGNED);
371  // At most 15 bytes to copy. Copy 16 bytes at beginning of string.
372  __ bind(&move_first_15);
373  __ and_(count, 0xF);
374  __ j(zero, &skip_last_move, Label::kNear);
375  __ sub(src, count);
376  __ sub(dst, count);
377  __ movdqu(xmm0, Operand(src, 0));
378  __ movdqu(Operand(dst, 0), xmm0);
379  __ bind(&skip_last_move);
380  MemMoveEmitPopAndReturn(&masm);
381 
382  // Copy loop for unaligned source and aligned destination.
383  __ bind(&unaligned_source);
384  MemMoveEmitMainLoop(&masm, &move_first_15, BACKWARD, MOVE_UNALIGNED);
385  __ jmp(&move_first_15);
386 
387  // Less than kMinMoveDistance offset between dst and src.
388  Label loop_until_aligned, first_15_much_overlap;
389  __ bind(&loop_until_aligned);
390  __ dec(src);
391  __ dec(dst);
392  __ mov_b(eax, Operand(src, 0));
393  __ mov_b(Operand(dst, 0), eax);
394  __ dec(count);
395  __ bind(&backward_much_overlap); // Entry point into this block.
396  __ test(dst, Immediate(0xF));
397  __ j(not_zero, &loop_until_aligned);
398  // dst is now aligned, src can't be. Main copy loop.
399  __ mov(loop_count, count);
400  __ shr(loop_count, 6);
401  MemMoveEmitMainLoop(&masm, &first_15_much_overlap,
402  BACKWARD, MOVE_UNALIGNED);
403  __ bind(&first_15_much_overlap);
404  __ and_(count, 0xF);
405  __ j(zero, &pop_and_return);
406  // Small/medium handlers expect dst/src to point to the beginning.
407  __ sub(dst, count);
408  __ sub(src, count);
409  __ cmp(count, kSmallCopySize);
410  __ j(below_equal, &small_size);
411  __ jmp(&medium_size);
412  }
413  {
414  // Special handlers for 9 <= copy_size < 64. No assumptions about
415  // alignment or move distance, so all reads must be unaligned and
416  // must happen before any writes.
417  Label medium_handlers, f9_16, f17_32, f33_48, f49_63;
418 
419  __ bind(&f9_16);
420  __ movsd(xmm0, Operand(src, 0));
421  __ movsd(xmm1, Operand(src, count, times_1, -8));
422  __ movsd(Operand(dst, 0), xmm0);
423  __ movsd(Operand(dst, count, times_1, -8), xmm1);
424  MemMoveEmitPopAndReturn(&masm);
425 
426  __ bind(&f17_32);
427  __ movdqu(xmm0, Operand(src, 0));
428  __ movdqu(xmm1, Operand(src, count, times_1, -0x10));
429  __ movdqu(Operand(dst, 0x00), xmm0);
430  __ movdqu(Operand(dst, count, times_1, -0x10), xmm1);
431  MemMoveEmitPopAndReturn(&masm);
432 
433  __ bind(&f33_48);
434  __ movdqu(xmm0, Operand(src, 0x00));
435  __ movdqu(xmm1, Operand(src, 0x10));
436  __ movdqu(xmm2, Operand(src, count, times_1, -0x10));
437  __ movdqu(Operand(dst, 0x00), xmm0);
438  __ movdqu(Operand(dst, 0x10), xmm1);
439  __ movdqu(Operand(dst, count, times_1, -0x10), xmm2);
440  MemMoveEmitPopAndReturn(&masm);
441 
442  __ bind(&f49_63);
443  __ movdqu(xmm0, Operand(src, 0x00));
444  __ movdqu(xmm1, Operand(src, 0x10));
445  __ movdqu(xmm2, Operand(src, 0x20));
446  __ movdqu(xmm3, Operand(src, count, times_1, -0x10));
447  __ movdqu(Operand(dst, 0x00), xmm0);
448  __ movdqu(Operand(dst, 0x10), xmm1);
449  __ movdqu(Operand(dst, 0x20), xmm2);
450  __ movdqu(Operand(dst, count, times_1, -0x10), xmm3);
451  MemMoveEmitPopAndReturn(&masm);
452 
453  __ bind(&medium_handlers);
454  __ dd(conv.address(&f9_16));
455  __ dd(conv.address(&f17_32));
456  __ dd(conv.address(&f33_48));
457  __ dd(conv.address(&f49_63));
458 
459  __ bind(&medium_size); // Entry point into this block.
460  __ mov(eax, count);
461  __ dec(eax);
462  __ shr(eax, 4);
463  if (FLAG_debug_code) {
464  Label ok;
465  __ cmp(eax, 3);
466  __ j(below_equal, &ok);
467  __ int3();
468  __ bind(&ok);
469  }
470  __ mov(eax, Operand(eax, times_4, conv.address(&medium_handlers)));
471  __ jmp(eax);
472  }
473  {
474  // Specialized copiers for copy_size <= 8 bytes.
475  Label small_handlers, f0, f1, f2, f3, f4, f5_8;
476  __ bind(&f0);
477  MemMoveEmitPopAndReturn(&masm);
478 
479  __ bind(&f1);
480  __ mov_b(eax, Operand(src, 0));
481  __ mov_b(Operand(dst, 0), eax);
482  MemMoveEmitPopAndReturn(&masm);
483 
484  __ bind(&f2);
485  __ mov_w(eax, Operand(src, 0));
486  __ mov_w(Operand(dst, 0), eax);
487  MemMoveEmitPopAndReturn(&masm);
488 
489  __ bind(&f3);
490  __ mov_w(eax, Operand(src, 0));
491  __ mov_b(edx, Operand(src, 2));
492  __ mov_w(Operand(dst, 0), eax);
493  __ mov_b(Operand(dst, 2), edx);
494  MemMoveEmitPopAndReturn(&masm);
495 
496  __ bind(&f4);
497  __ mov(eax, Operand(src, 0));
498  __ mov(Operand(dst, 0), eax);
499  MemMoveEmitPopAndReturn(&masm);
500 
501  __ bind(&f5_8);
502  __ mov(eax, Operand(src, 0));
503  __ mov(edx, Operand(src, count, times_1, -4));
504  __ mov(Operand(dst, 0), eax);
505  __ mov(Operand(dst, count, times_1, -4), edx);
506  MemMoveEmitPopAndReturn(&masm);
507 
508  __ bind(&small_handlers);
509  __ dd(conv.address(&f0));
510  __ dd(conv.address(&f1));
511  __ dd(conv.address(&f2));
512  __ dd(conv.address(&f3));
513  __ dd(conv.address(&f4));
514  __ dd(conv.address(&f5_8));
515  __ dd(conv.address(&f5_8));
516  __ dd(conv.address(&f5_8));
517  __ dd(conv.address(&f5_8));
518 
519  __ bind(&small_size); // Entry point into this block.
520  if (FLAG_debug_code) {
521  Label ok;
522  __ cmp(count, 8);
523  __ j(below_equal, &ok);
524  __ int3();
525  __ bind(&ok);
526  }
527  __ mov(eax, Operand(count, times_4, conv.address(&small_handlers)));
528  __ jmp(eax);
529  }
530  } else {
531  // No SSE2.
532  Label forward;
533  __ cmp(count, 0);
534  __ j(equal, &pop_and_return);
535  __ cmp(dst, src);
536  __ j(above, &backward);
537  __ jmp(&forward);
538  {
539  // Simple forward copier.
540  Label forward_loop_1byte, forward_loop_4byte;
541  __ bind(&forward_loop_4byte);
542  __ mov(eax, Operand(src, 0));
543  __ sub(count, Immediate(4));
544  __ add(src, Immediate(4));
545  __ mov(Operand(dst, 0), eax);
546  __ add(dst, Immediate(4));
547  __ bind(&forward); // Entry point.
548  __ cmp(count, 3);
549  __ j(above, &forward_loop_4byte);
550  __ bind(&forward_loop_1byte);
551  __ cmp(count, 0);
552  __ j(below_equal, &pop_and_return);
553  __ mov_b(eax, Operand(src, 0));
554  __ dec(count);
555  __ inc(src);
556  __ mov_b(Operand(dst, 0), eax);
557  __ inc(dst);
558  __ jmp(&forward_loop_1byte);
559  }
560  {
561  // Simple backward copier.
562  Label backward_loop_1byte, backward_loop_4byte, entry_shortcut;
563  __ bind(&backward);
564  __ add(src, count);
565  __ add(dst, count);
566  __ cmp(count, 3);
567  __ j(below_equal, &entry_shortcut);
568 
569  __ bind(&backward_loop_4byte);
570  __ sub(src, Immediate(4));
571  __ sub(count, Immediate(4));
572  __ mov(eax, Operand(src, 0));
573  __ sub(dst, Immediate(4));
574  __ mov(Operand(dst, 0), eax);
575  __ cmp(count, 3);
576  __ j(above, &backward_loop_4byte);
577  __ bind(&backward_loop_1byte);
578  __ cmp(count, 0);
579  __ j(below_equal, &pop_and_return);
580  __ bind(&entry_shortcut);
581  __ dec(src);
582  __ dec(count);
583  __ mov_b(eax, Operand(src, 0));
584  __ dec(dst);
585  __ mov_b(Operand(dst, 0), eax);
586  __ jmp(&backward_loop_1byte);
587  }
588  }
589 
590  __ bind(&pop_and_return);
591  MemMoveEmitPopAndReturn(&masm);
592 
593  CodeDesc desc;
594  masm.GetCode(&desc);
595  ASSERT(!RelocInfo::RequiresRelocation(desc));
596  CPU::FlushICache(buffer, actual_size);
597  OS::ProtectCode(buffer, actual_size);
598  // TODO(jkummerow): It would be nice to register this code creation event
599  // with the PROFILE / GDBJIT system.
600  return FUNCTION_CAST<OS::MemMoveFunction>(buffer);
601 }
602 
603 
604 #undef __
605 
606 // -------------------------------------------------------------------------
607 // Code generators
608 
609 #define __ ACCESS_MASM(masm)
610 
611 
613  MacroAssembler* masm, AllocationSiteMode mode,
614  Label* allocation_memento_found) {
615  // ----------- S t a t e -------------
616  // -- eax : value
617  // -- ebx : target map
618  // -- ecx : key
619  // -- edx : receiver
620  // -- esp[0] : return address
621  // -----------------------------------
622  if (mode == TRACK_ALLOCATION_SITE) {
623  ASSERT(allocation_memento_found != NULL);
624  __ JumpIfJSArrayHasAllocationMemento(edx, edi, allocation_memento_found);
625  }
626 
627  // Set transitioned map.
629  __ RecordWriteField(edx,
631  ebx,
632  edi,
636 }
637 
638 
640  MacroAssembler* masm, AllocationSiteMode mode, Label* fail) {
641  // ----------- S t a t e -------------
642  // -- eax : value
643  // -- ebx : target map
644  // -- ecx : key
645  // -- edx : receiver
646  // -- esp[0] : return address
647  // -----------------------------------
648  Label loop, entry, convert_hole, gc_required, only_change_map;
649 
650  if (mode == TRACK_ALLOCATION_SITE) {
651  __ JumpIfJSArrayHasAllocationMemento(edx, edi, fail);
652  }
653 
654  // Check for empty arrays, which only require a map transition and no changes
655  // to the backing store.
657  __ cmp(edi, Immediate(masm->isolate()->factory()->empty_fixed_array()));
658  __ j(equal, &only_change_map);
659 
660  __ push(eax);
661  __ push(ebx);
662 
664 
665  // Allocate new FixedDoubleArray.
666  // edx: receiver
667  // edi: length of source FixedArray (smi-tagged)
671  REGISTER_VALUE_IS_SMI, eax, ebx, no_reg, &gc_required, flags);
672 
673  // eax: destination FixedDoubleArray
674  // edi: number of elements
675  // edx: receiver
677  Immediate(masm->isolate()->factory()->fixed_double_array_map()));
680  // Replace receiver's backing store with newly created FixedDoubleArray.
682  __ mov(ebx, eax);
683  __ RecordWriteField(edx,
685  ebx,
686  edi,
690 
692 
693  // Prepare for conversion loop.
694  ExternalReference canonical_the_hole_nan_reference =
695  ExternalReference::address_of_the_hole_nan();
696  XMMRegister the_hole_nan = xmm1;
698  CpuFeatureScope use_sse2(masm, SSE2);
699  __ movsd(the_hole_nan,
700  Operand::StaticVariable(canonical_the_hole_nan_reference));
701  }
702  __ jmp(&entry);
703 
704  // Call into runtime if GC is required.
705  __ bind(&gc_required);
706  // Restore registers before jumping into runtime.
708  __ pop(ebx);
709  __ pop(eax);
710  __ jmp(fail);
711 
712  // Convert and copy elements
713  // esi: source FixedArray
714  __ bind(&loop);
716  // ebx: current element from source
717  // edi: index of current element
718  __ JumpIfNotSmi(ebx, &convert_hole);
719 
720  // Normal smi, convert it to double and store.
721  __ SmiUntag(ebx);
723  CpuFeatureScope fscope(masm, SSE2);
724  __ Cvtsi2sd(xmm0, ebx);
726  xmm0);
727  } else {
728  __ push(ebx);
729  __ fild_s(Operand(esp, 0));
730  __ pop(ebx);
732  }
733  __ jmp(&entry);
734 
735  // Found hole, store hole_nan_as_double instead.
736  __ bind(&convert_hole);
737 
738  if (FLAG_debug_code) {
739  __ cmp(ebx, masm->isolate()->factory()->the_hole_value());
740  __ Assert(equal, kObjectFoundInSmiOnlyArray);
741  }
742 
744  CpuFeatureScope use_sse2(masm, SSE2);
746  the_hole_nan);
747  } else {
748  __ fld_d(Operand::StaticVariable(canonical_the_hole_nan_reference));
750  }
751 
752  __ bind(&entry);
753  __ sub(edi, Immediate(Smi::FromInt(1)));
754  __ j(not_sign, &loop);
755 
756  __ pop(ebx);
757  __ pop(eax);
758 
759  // Restore esi.
761 
762  __ bind(&only_change_map);
763  // eax: value
764  // ebx: target map
765  // Set transitioned map.
767  __ RecordWriteField(edx,
769  ebx,
770  edi,
774 }
775 
776 
778  MacroAssembler* masm, AllocationSiteMode mode, Label* fail) {
779  // ----------- S t a t e -------------
780  // -- eax : value
781  // -- ebx : target map
782  // -- ecx : key
783  // -- edx : receiver
784  // -- esp[0] : return address
785  // -----------------------------------
786  Label loop, entry, convert_hole, gc_required, only_change_map, success;
787 
788  if (mode == TRACK_ALLOCATION_SITE) {
789  __ JumpIfJSArrayHasAllocationMemento(edx, edi, fail);
790  }
791 
792  // Check for empty arrays, which only require a map transition and no changes
793  // to the backing store.
795  __ cmp(edi, Immediate(masm->isolate()->factory()->empty_fixed_array()));
796  __ j(equal, &only_change_map);
797 
798  __ push(eax);
799  __ push(edx);
800  __ push(ebx);
801 
803 
804  // Allocate new FixedArray.
805  // ebx: length of source FixedDoubleArray (smi-tagged)
806  __ lea(edi, Operand(ebx, times_2, FixedArray::kHeaderSize));
807  __ Allocate(edi, eax, esi, no_reg, &gc_required, TAG_OBJECT);
808 
809  // eax: destination FixedArray
810  // ebx: number of elements
812  Immediate(masm->isolate()->factory()->fixed_array_map()));
815 
816  __ jmp(&entry);
817 
818  // ebx: target map
819  // edx: receiver
820  // Set transitioned map.
821  __ bind(&only_change_map);
823  __ RecordWriteField(edx,
825  ebx,
826  edi,
830  __ jmp(&success);
831 
832  // Call into runtime if GC is required.
833  __ bind(&gc_required);
835  __ pop(ebx);
836  __ pop(edx);
837  __ pop(eax);
838  __ jmp(fail);
839 
840  // Box doubles into heap numbers.
841  // edi: source FixedDoubleArray
842  // eax: destination FixedArray
843  __ bind(&loop);
844  // ebx: index of current element (smi-tagged)
845  uint32_t offset = FixedDoubleArray::kHeaderSize + sizeof(kHoleNanLower32);
846  __ cmp(FieldOperand(edi, ebx, times_4, offset), Immediate(kHoleNanUpper32));
847  __ j(equal, &convert_hole);
848 
849  // Non-hole double, copy value into a heap number.
850  __ AllocateHeapNumber(edx, esi, no_reg, &gc_required);
851  // edx: new heap number
853  CpuFeatureScope fscope(masm, SSE2);
854  __ movsd(xmm0,
857  } else {
860  __ mov(esi, FieldOperand(edi, ebx, times_4, offset));
862  }
864  __ mov(esi, ebx);
865  __ RecordWriteArray(eax,
866  edx,
867  esi,
871  __ jmp(&entry, Label::kNear);
872 
873  // Replace the-hole NaN with the-hole pointer.
874  __ bind(&convert_hole);
876  masm->isolate()->factory()->the_hole_value());
877 
878  __ bind(&entry);
879  __ sub(ebx, Immediate(Smi::FromInt(1)));
880  __ j(not_sign, &loop);
881 
882  __ pop(ebx);
883  __ pop(edx);
884  // ebx: target map
885  // edx: receiver
886  // Set transitioned map.
888  __ RecordWriteField(edx,
890  ebx,
891  edi,
895  // Replace receiver's backing store with newly created and filled FixedArray.
897  __ RecordWriteField(edx,
899  eax,
900  edi,
904 
905  // Restore registers.
906  __ pop(eax);
908 
909  __ bind(&success);
910 }
911 
912 
913 void StringCharLoadGenerator::Generate(MacroAssembler* masm,
914  Factory* factory,
915  Register string,
916  Register index,
917  Register result,
918  Label* call_runtime) {
919  // Fetch the instance type of the receiver into result register.
920  __ mov(result, FieldOperand(string, HeapObject::kMapOffset));
921  __ movzx_b(result, FieldOperand(result, Map::kInstanceTypeOffset));
922 
923  // We need special handling for indirect strings.
924  Label check_sequential;
925  __ test(result, Immediate(kIsIndirectStringMask));
926  __ j(zero, &check_sequential, Label::kNear);
927 
928  // Dispatch on the indirect string shape: slice or cons.
929  Label cons_string;
930  __ test(result, Immediate(kSlicedNotConsMask));
931  __ j(zero, &cons_string, Label::kNear);
932 
933  // Handle slices.
934  Label indirect_string_loaded;
935  __ mov(result, FieldOperand(string, SlicedString::kOffsetOffset));
936  __ SmiUntag(result);
937  __ add(index, result);
938  __ mov(string, FieldOperand(string, SlicedString::kParentOffset));
939  __ jmp(&indirect_string_loaded, Label::kNear);
940 
941  // Handle cons strings.
942  // Check whether the right hand side is the empty string (i.e. if
943  // this is really a flat string in a cons string). If that is not
944  // the case we would rather go to the runtime system now to flatten
945  // the string.
946  __ bind(&cons_string);
948  Immediate(factory->empty_string()));
949  __ j(not_equal, call_runtime);
950  __ mov(string, FieldOperand(string, ConsString::kFirstOffset));
951 
952  __ bind(&indirect_string_loaded);
953  __ mov(result, FieldOperand(string, HeapObject::kMapOffset));
954  __ movzx_b(result, FieldOperand(result, Map::kInstanceTypeOffset));
955 
956  // Distinguish sequential and external strings. Only these two string
957  // representations can reach here (slices and flat cons strings have been
958  // reduced to the underlying sequential or external string).
959  Label seq_string;
960  __ bind(&check_sequential);
962  __ test(result, Immediate(kStringRepresentationMask));
963  __ j(zero, &seq_string, Label::kNear);
964 
965  // Handle external strings.
966  Label ascii_external, done;
967  if (FLAG_debug_code) {
968  // Assert that we do not have a cons or slice (indirect strings) here.
969  // Sequential strings have already been ruled out.
970  __ test(result, Immediate(kIsIndirectStringMask));
971  __ Assert(zero, kExternalStringExpectedButNotFound);
972  }
973  // Rule out short external strings.
975  __ test_b(result, kShortExternalStringMask);
976  __ j(not_zero, call_runtime);
977  // Check encoding.
979  __ test_b(result, kStringEncodingMask);
981  __ j(not_equal, &ascii_external, Label::kNear);
982  // Two-byte string.
983  __ movzx_w(result, Operand(result, index, times_2, 0));
984  __ jmp(&done, Label::kNear);
985  __ bind(&ascii_external);
986  // Ascii string.
987  __ movzx_b(result, Operand(result, index, times_1, 0));
988  __ jmp(&done, Label::kNear);
989 
990  // Dispatch on the encoding: ASCII or two-byte.
991  Label ascii;
992  __ bind(&seq_string);
995  __ test(result, Immediate(kStringEncodingMask));
996  __ j(not_zero, &ascii, Label::kNear);
997 
998  // Two-byte string.
999  // Load the two-byte character code into the result register.
1000  __ movzx_w(result, FieldOperand(string,
1001  index,
1002  times_2,
1004  __ jmp(&done, Label::kNear);
1005 
1006  // Ascii string.
1007  // Load the byte into the result register.
1008  __ bind(&ascii);
1009  __ movzx_b(result, FieldOperand(string,
1010  index,
1011  times_1,
1013  __ bind(&done);
1014 }
1015 
1016 
1017 static Operand ExpConstant(int index) {
1018  return Operand::StaticVariable(ExternalReference::math_exp_constants(index));
1019 }
1020 
1021 
1022 void MathExpGenerator::EmitMathExp(MacroAssembler* masm,
1023  XMMRegister input,
1024  XMMRegister result,
1025  XMMRegister double_scratch,
1026  Register temp1,
1027  Register temp2) {
1028  ASSERT(!input.is(double_scratch));
1029  ASSERT(!input.is(result));
1030  ASSERT(!result.is(double_scratch));
1031  ASSERT(!temp1.is(temp2));
1032  ASSERT(ExternalReference::math_exp_constants(0).address() != NULL);
1033 
1034  Label done;
1035 
1036  __ movsd(double_scratch, ExpConstant(0));
1037  __ xorpd(result, result);
1038  __ ucomisd(double_scratch, input);
1039  __ j(above_equal, &done);
1040  __ ucomisd(input, ExpConstant(1));
1041  __ movsd(result, ExpConstant(2));
1042  __ j(above_equal, &done);
1043  __ movsd(double_scratch, ExpConstant(3));
1044  __ movsd(result, ExpConstant(4));
1045  __ mulsd(double_scratch, input);
1046  __ addsd(double_scratch, result);
1047  __ movd(temp2, double_scratch);
1048  __ subsd(double_scratch, result);
1049  __ movsd(result, ExpConstant(6));
1050  __ mulsd(double_scratch, ExpConstant(5));
1051  __ subsd(double_scratch, input);
1052  __ subsd(result, double_scratch);
1053  __ movsd(input, double_scratch);
1054  __ mulsd(input, double_scratch);
1055  __ mulsd(result, input);
1056  __ mov(temp1, temp2);
1057  __ mulsd(result, ExpConstant(7));
1058  __ subsd(result, double_scratch);
1059  __ add(temp1, Immediate(0x1ff800));
1060  __ addsd(result, ExpConstant(8));
1061  __ and_(temp2, Immediate(0x7ff));
1062  __ shr(temp1, 11);
1063  __ shl(temp1, 20);
1064  __ movd(input, temp1);
1065  __ pshufd(input, input, static_cast<uint8_t>(0xe1)); // Order: 11 10 00 01
1066  __ movsd(double_scratch, Operand::StaticArray(
1067  temp2, times_8, ExternalReference::math_exp_log_table()));
1068  __ orps(input, double_scratch);
1069  __ mulsd(result, input);
1070  __ bind(&done);
1071 }
1072 
1073 #undef __
1074 
1075 
1076 static byte* GetNoCodeAgeSequence(uint32_t* length) {
1077  static bool initialized = false;
1078  static byte sequence[kNoCodeAgeSequenceLength];
1079  *length = kNoCodeAgeSequenceLength;
1080  if (!initialized) {
1081  // The sequence of instructions that is patched out for aging code is the
1082  // following boilerplate stack-building prologue that is found both in
1083  // FUNCTION and OPTIMIZED_FUNCTION code:
1084  CodePatcher patcher(sequence, kNoCodeAgeSequenceLength);
1085  patcher.masm()->push(ebp);
1086  patcher.masm()->mov(ebp, esp);
1087  patcher.masm()->push(esi);
1088  patcher.masm()->push(edi);
1089  initialized = true;
1090  }
1091  return sequence;
1092 }
1093 
1094 
1095 bool Code::IsYoungSequence(byte* sequence) {
1096  uint32_t young_length;
1097  byte* young_sequence = GetNoCodeAgeSequence(&young_length);
1098  bool result = (!memcmp(sequence, young_sequence, young_length));
1099  ASSERT(result || *sequence == kCallOpcode);
1100  return result;
1101 }
1102 
1103 
1104 void Code::GetCodeAgeAndParity(byte* sequence, Age* age,
1105  MarkingParity* parity) {
1106  if (IsYoungSequence(sequence)) {
1107  *age = kNoAgeCodeAge;
1108  *parity = NO_MARKING_PARITY;
1109  } else {
1110  sequence++; // Skip the kCallOpcode byte
1111  Address target_address = sequence + *reinterpret_cast<int*>(sequence) +
1113  Code* stub = GetCodeFromTargetAddress(target_address);
1114  GetCodeAgeAndParity(stub, age, parity);
1115  }
1116 }
1117 
1118 
1119 void Code::PatchPlatformCodeAge(Isolate* isolate,
1120  byte* sequence,
1121  Code::Age age,
1122  MarkingParity parity) {
1123  uint32_t young_length;
1124  byte* young_sequence = GetNoCodeAgeSequence(&young_length);
1125  if (age == kNoAgeCodeAge) {
1126  CopyBytes(sequence, young_sequence, young_length);
1127  CPU::FlushICache(sequence, young_length);
1128  } else {
1129  Code* stub = GetCodeAgeStub(isolate, age, parity);
1130  CodePatcher patcher(sequence, young_length);
1131  patcher.masm()->call(stub->instruction_start(), RelocInfo::NONE32);
1132  }
1133 }
1134 
1135 
1136 } } // namespace v8::internal
1137 
1138 #endif // V8_TARGET_ARCH_IA32
byte * Address
Definition: globals.h:186
static const int kResourceDataOffset
Definition: objects.h:9244
const FPURegister f4
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter NULL
Definition: flags.cc:269
STATIC_CHECK((kStringRepresentationMask|kStringEncodingMask)==Internals::kFullStringRepresentationMask)
const uint32_t kTwoByteStringTag
Definition: objects.h:610
const FPURegister f0
static Smi * FromInt(int value)
Definition: objects-inl.h:1209
const int KB
Definition: globals.h:245
const Register esp
int int32_t
Definition: unicode.cc:47
static bool IsSupported(CpuFeature f)
Definition: assembler-arm.h:68
#define ASSERT(condition)
Definition: checks.h:329
static const int kContextOffset
Definition: frames.h:185
virtual void AfterCall(MacroAssembler *masm) const
const FPURegister f3
double(* UnaryMathFunction)(double x)
Definition: codegen.h:119
const uint32_t kStringRepresentationMask
Definition: objects.h:615
const uint32_t kShortExternalStringMask
Definition: objects.h:643
UnaryMathFunction CreateExpFunction()
const Register edi
uint8_t byte
Definition: globals.h:185
static const int kFirstOffset
Definition: objects.h:9165
static const int kParentOffset
Definition: objects.h:9209
const Register ebp
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long mode(MIPS only)") DEFINE_string(expose_natives_as
STATIC_ASSERT(sizeof(CPURegister)==sizeof(Register))
const Register eax
static const int kValueOffset
Definition: objects.h:1971
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long expose natives in global object expose freeBuffer extension expose gc extension under the specified name expose externalize string extension number of stack frames to capture disable builtin natives files print name of functions for which code is generated use random jit cookie to mask large constants trace lazy optimization use adaptive optimizations always try to OSR functions trace optimize function deoptimization minimum length for automatic enable preparsing maximum number of optimization attempts before giving up cache prototype transitions trace debugging JSON request response trace out of bounds accesses to external arrays trace_js_array_abuse automatically set the debug break flag when debugger commands are in the queue abort by crashing maximum length of function source code printed in a stack trace max size of the new max size of the old max size of executable always perform global GCs print one trace line following each garbage collection do not print trace line after scavenger collection print statistics of the maximum memory committed for the heap in only print modified registers Don t break for ASM_UNIMPLEMENTED_BREAK macros print stack trace when an illegal exception is thrown randomize hashes to avoid predictable hash Fixed seed to use to hash property Print the time it takes to deserialize the snapshot testing_bool_flag testing_int_flag string flag tmp file in which to serialize heap Print the time it takes to lazily compile hydrogen code stubs concurrent_recompilation concurrent_sweeping Print usage including flags
Definition: flags.cc:665
const uint32_t kHoleNanUpper32
Definition: v8globals.h:454
const XMMRegister xmm1
const uint32_t kIsIndirectStringMask
Definition: objects.h:622
static void ProtectCode(void *address, const size_t size)
const int kPointerSize
Definition: globals.h:268
Operand FieldOperand(Register object, int offset)
const Register ecx
const uint32_t kHoleNanLower32
Definition: v8globals.h:455
#define __
UnaryMathFunction CreateSqrtFunction()
static Code * GetCodeFromTargetAddress(Address address)
Definition: objects-inl.h:4662
const uint32_t kShortExternalStringTag
Definition: objects.h:644
static void Generate(MacroAssembler *masm, Register string, Register index, Register result, Label *call_runtime)
static const int kHeaderSize
Definition: objects.h:9042
static const int kElementsOffset
Definition: objects.h:2756
const FPURegister f2
static const int kCallTargetAddressOffset
static const int kOffsetOffset
Definition: objects.h:9210
const XMMRegister xmm3
static const int kHeaderSize
Definition: objects.h:3016
AllocationFlags
static const int kMapOffset
Definition: objects.h:1890
const uint32_t kSlicedNotConsMask
Definition: objects.h:633
static const int kLengthOffset
Definition: objects.h:3015
const Register ebx
static const int kSecondOffset
Definition: objects.h:9166
static void * Allocate(const size_t requested, size_t *allocated, bool is_executable)
static void GenerateMapChangeElementsTransition(MacroAssembler *masm, AllocationSiteMode mode, Label *allocation_memento_found)
void CopyBytes(uint8_t *target, uint8_t *source)
Definition: runtime.cc:1309
const FPURegister f1
const Register esi
static void GenerateSmiToDouble(MacroAssembler *masm, AllocationSiteMode mode, Label *fail)
const uint32_t kOneByteStringTag
Definition: objects.h:611
static bool IsYoungSequence(byte *sequence)
const Register no_reg
static void EmitMathExp(MacroAssembler *masm, DwVfpRegister input, DwVfpRegister result, DwVfpRegister double_scratch1, DwVfpRegister double_scratch2, Register temp1, Register temp2, Register temp3)
virtual void BeforeCall(MacroAssembler *masm) const
const XMMRegister xmm2
const Register edx
static void GenerateDoubleToObject(MacroAssembler *masm, AllocationSiteMode mode, Label *fail)
const uint32_t kStringEncodingMask
Definition: objects.h:609
static const int kInstanceTypeOffset
Definition: objects.h:6459
const XMMRegister xmm0