v8  3.11.10(node0.8.26)
V8 is Google's open source JavaScript engine
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
codegen-ia32.cc
Go to the documentation of this file.
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 
28 #include "v8.h"
29 
30 #if defined(V8_TARGET_ARCH_IA32)
31 
32 #include "codegen.h"
33 #include "heap.h"
34 #include "macro-assembler.h"
35 
36 namespace v8 {
37 namespace internal {
38 
39 
40 // -------------------------------------------------------------------------
41 // Platform-specific RuntimeCallHelper functions.
42 
43 void StubRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
44  masm->EnterFrame(StackFrame::INTERNAL);
45  ASSERT(!masm->has_frame());
46  masm->set_has_frame(true);
47 }
48 
49 
50 void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
51  masm->LeaveFrame(StackFrame::INTERNAL);
52  ASSERT(masm->has_frame());
53  masm->set_has_frame(false);
54 }
55 
56 
57 #define __ masm.
58 
59 
61  size_t actual_size;
62  // Allocate buffer in executable space.
63  byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB,
64  &actual_size,
65  true));
66  if (buffer == NULL) {
67  // Fallback to library function if function cannot be created.
68  switch (type) {
69  case TranscendentalCache::SIN: return &sin;
70  case TranscendentalCache::COS: return &cos;
71  case TranscendentalCache::TAN: return &tan;
72  case TranscendentalCache::LOG: return &log;
73  default: UNIMPLEMENTED();
74  }
75  }
76 
77  MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
78  // esp[1 * kPointerSize]: raw double input
79  // esp[0 * kPointerSize]: return address
80  // Move double input into registers.
81 
82  __ push(ebx);
83  __ push(edx);
84  __ push(edi);
85  __ fld_d(Operand(esp, 4 * kPointerSize));
86  __ mov(ebx, Operand(esp, 4 * kPointerSize));
87  __ mov(edx, Operand(esp, 5 * kPointerSize));
89  // The return value is expected to be on ST(0) of the FPU stack.
90  __ pop(edi);
91  __ pop(edx);
92  __ pop(ebx);
93  __ Ret();
94 
95  CodeDesc desc;
96  masm.GetCode(&desc);
97  ASSERT(desc.reloc_size == 0);
98 
99  CPU::FlushICache(buffer, actual_size);
100  OS::ProtectCode(buffer, actual_size);
101  return FUNCTION_CAST<UnaryMathFunction>(buffer);
102 }
103 
104 
106  size_t actual_size;
107  // Allocate buffer in executable space.
108  byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB,
109  &actual_size,
110  true));
111  // If SSE2 is not available, we can use libc's implementation to ensure
112  // consistency since code by fullcodegen's calls into runtime in that case.
113  if (buffer == NULL || !CpuFeatures::IsSupported(SSE2)) return &sqrt;
114  MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
115  // esp[1 * kPointerSize]: raw double input
116  // esp[0 * kPointerSize]: return address
117  // Move double input into registers.
118  {
119  CpuFeatures::Scope use_sse2(SSE2);
120  __ movdbl(xmm0, Operand(esp, 1 * kPointerSize));
121  __ sqrtsd(xmm0, xmm0);
122  __ movdbl(Operand(esp, 1 * kPointerSize), xmm0);
123  // Load result into floating point register as return value.
124  __ fld_d(Operand(esp, 1 * kPointerSize));
125  __ Ret();
126  }
127 
128  CodeDesc desc;
129  masm.GetCode(&desc);
130  ASSERT(desc.reloc_size == 0);
131 
132  CPU::FlushICache(buffer, actual_size);
133  OS::ProtectCode(buffer, actual_size);
134  return FUNCTION_CAST<UnaryMathFunction>(buffer);
135 }
136 
137 
138 static void MemCopyWrapper(void* dest, const void* src, size_t size) {
139  memcpy(dest, src, size);
140 }
141 
142 
143 OS::MemCopyFunction CreateMemCopyFunction() {
144  size_t actual_size;
145  // Allocate buffer in executable space.
146  byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB,
147  &actual_size,
148  true));
149  if (buffer == NULL) return &MemCopyWrapper;
150  MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
151 
152  // Generated code is put into a fixed, unmovable, buffer, and not into
153  // the V8 heap. We can't, and don't, refer to any relocatable addresses
154  // (e.g. the JavaScript nan-object).
155 
156  // 32-bit C declaration function calls pass arguments on stack.
157 
158  // Stack layout:
159  // esp[12]: Third argument, size.
160  // esp[8]: Second argument, source pointer.
161  // esp[4]: First argument, destination pointer.
162  // esp[0]: return address
163 
164  const int kDestinationOffset = 1 * kPointerSize;
165  const int kSourceOffset = 2 * kPointerSize;
166  const int kSizeOffset = 3 * kPointerSize;
167 
168  int stack_offset = 0; // Update if we change the stack height.
169 
170  if (FLAG_debug_code) {
171  __ cmp(Operand(esp, kSizeOffset + stack_offset),
172  Immediate(OS::kMinComplexMemCopy));
173  Label ok;
174  __ j(greater_equal, &ok);
175  __ int3();
176  __ bind(&ok);
177  }
179  CpuFeatures::Scope enable(SSE2);
180  __ push(edi);
181  __ push(esi);
182  stack_offset += 2 * kPointerSize;
183  Register dst = edi;
184  Register src = esi;
185  Register count = ecx;
186  __ mov(dst, Operand(esp, stack_offset + kDestinationOffset));
187  __ mov(src, Operand(esp, stack_offset + kSourceOffset));
188  __ mov(count, Operand(esp, stack_offset + kSizeOffset));
189 
190 
191  __ movdqu(xmm0, Operand(src, 0));
192  __ movdqu(Operand(dst, 0), xmm0);
193  __ mov(edx, dst);
194  __ and_(edx, 0xF);
195  __ neg(edx);
196  __ add(edx, Immediate(16));
197  __ add(dst, edx);
198  __ add(src, edx);
199  __ sub(count, edx);
200 
201  // edi is now aligned. Check if esi is also aligned.
202  Label unaligned_source;
203  __ test(src, Immediate(0x0F));
204  __ j(not_zero, &unaligned_source);
205  {
206  // Copy loop for aligned source and destination.
207  __ mov(edx, count);
208  Register loop_count = ecx;
209  Register count = edx;
210  __ shr(loop_count, 5);
211  {
212  // Main copy loop.
213  Label loop;
214  __ bind(&loop);
215  __ prefetch(Operand(src, 0x20), 1);
216  __ movdqa(xmm0, Operand(src, 0x00));
217  __ movdqa(xmm1, Operand(src, 0x10));
218  __ add(src, Immediate(0x20));
219 
220  __ movdqa(Operand(dst, 0x00), xmm0);
221  __ movdqa(Operand(dst, 0x10), xmm1);
222  __ add(dst, Immediate(0x20));
223 
224  __ dec(loop_count);
225  __ j(not_zero, &loop);
226  }
227 
228  // At most 31 bytes to copy.
229  Label move_less_16;
230  __ test(count, Immediate(0x10));
231  __ j(zero, &move_less_16);
232  __ movdqa(xmm0, Operand(src, 0));
233  __ add(src, Immediate(0x10));
234  __ movdqa(Operand(dst, 0), xmm0);
235  __ add(dst, Immediate(0x10));
236  __ bind(&move_less_16);
237 
238  // At most 15 bytes to copy. Copy 16 bytes at end of string.
239  __ and_(count, 0xF);
240  __ movdqu(xmm0, Operand(src, count, times_1, -0x10));
241  __ movdqu(Operand(dst, count, times_1, -0x10), xmm0);
242 
243  __ mov(eax, Operand(esp, stack_offset + kDestinationOffset));
244  __ pop(esi);
245  __ pop(edi);
246  __ ret(0);
247  }
248  __ Align(16);
249  {
250  // Copy loop for unaligned source and aligned destination.
251  // If source is not aligned, we can't read it as efficiently.
252  __ bind(&unaligned_source);
253  __ mov(edx, ecx);
254  Register loop_count = ecx;
255  Register count = edx;
256  __ shr(loop_count, 5);
257  {
258  // Main copy loop
259  Label loop;
260  __ bind(&loop);
261  __ prefetch(Operand(src, 0x20), 1);
262  __ movdqu(xmm0, Operand(src, 0x00));
263  __ movdqu(xmm1, Operand(src, 0x10));
264  __ add(src, Immediate(0x20));
265 
266  __ movdqa(Operand(dst, 0x00), xmm0);
267  __ movdqa(Operand(dst, 0x10), xmm1);
268  __ add(dst, Immediate(0x20));
269 
270  __ dec(loop_count);
271  __ j(not_zero, &loop);
272  }
273 
274  // At most 31 bytes to copy.
275  Label move_less_16;
276  __ test(count, Immediate(0x10));
277  __ j(zero, &move_less_16);
278  __ movdqu(xmm0, Operand(src, 0));
279  __ add(src, Immediate(0x10));
280  __ movdqa(Operand(dst, 0), xmm0);
281  __ add(dst, Immediate(0x10));
282  __ bind(&move_less_16);
283 
284  // At most 15 bytes to copy. Copy 16 bytes at end of string.
285  __ and_(count, 0x0F);
286  __ movdqu(xmm0, Operand(src, count, times_1, -0x10));
287  __ movdqu(Operand(dst, count, times_1, -0x10), xmm0);
288 
289  __ mov(eax, Operand(esp, stack_offset + kDestinationOffset));
290  __ pop(esi);
291  __ pop(edi);
292  __ ret(0);
293  }
294 
295  } else {
296  // SSE2 not supported. Unlikely to happen in practice.
297  __ push(edi);
298  __ push(esi);
299  stack_offset += 2 * kPointerSize;
300  __ cld();
301  Register dst = edi;
302  Register src = esi;
303  Register count = ecx;
304  __ mov(dst, Operand(esp, stack_offset + kDestinationOffset));
305  __ mov(src, Operand(esp, stack_offset + kSourceOffset));
306  __ mov(count, Operand(esp, stack_offset + kSizeOffset));
307 
308  // Copy the first word.
309  __ mov(eax, Operand(src, 0));
310  __ mov(Operand(dst, 0), eax);
311 
312  // Increment src,dstso that dst is aligned.
313  __ mov(edx, dst);
314  __ and_(edx, 0x03);
315  __ neg(edx);
316  __ add(edx, Immediate(4)); // edx = 4 - (dst & 3)
317  __ add(dst, edx);
318  __ add(src, edx);
319  __ sub(count, edx);
320  // edi is now aligned, ecx holds number of remaning bytes to copy.
321 
322  __ mov(edx, count);
323  count = edx;
324  __ shr(ecx, 2); // Make word count instead of byte count.
325  __ rep_movs();
326 
327  // At most 3 bytes left to copy. Copy 4 bytes at end of string.
328  __ and_(count, 3);
329  __ mov(eax, Operand(src, count, times_1, -4));
330  __ mov(Operand(dst, count, times_1, -4), eax);
331 
332  __ mov(eax, Operand(esp, stack_offset + kDestinationOffset));
333  __ pop(esi);
334  __ pop(edi);
335  __ ret(0);
336  }
337 
338  CodeDesc desc;
339  masm.GetCode(&desc);
340  ASSERT(desc.reloc_size == 0);
341 
342  CPU::FlushICache(buffer, actual_size);
343  OS::ProtectCode(buffer, actual_size);
344  return FUNCTION_CAST<OS::MemCopyFunction>(buffer);
345 }
346 
347 #undef __
348 
349 // -------------------------------------------------------------------------
350 // Code generators
351 
352 #define __ ACCESS_MASM(masm)
353 
355  MacroAssembler* masm) {
356  // ----------- S t a t e -------------
357  // -- eax : value
358  // -- ebx : target map
359  // -- ecx : key
360  // -- edx : receiver
361  // -- esp[0] : return address
362  // -----------------------------------
363  // Set transitioned map.
365  __ RecordWriteField(edx,
367  ebx,
368  edi,
372 }
373 
374 
376  MacroAssembler* masm, Label* fail) {
377  // ----------- S t a t e -------------
378  // -- eax : value
379  // -- ebx : target map
380  // -- ecx : key
381  // -- edx : receiver
382  // -- esp[0] : return address
383  // -----------------------------------
384  Label loop, entry, convert_hole, gc_required, only_change_map;
385 
386  // Check for empty arrays, which only require a map transition and no changes
387  // to the backing store.
389  __ cmp(edi, Immediate(masm->isolate()->factory()->empty_fixed_array()));
390  __ j(equal, &only_change_map);
391 
392  __ push(eax);
393  __ push(ebx);
394 
396 
397  // Allocate new FixedDoubleArray.
398  // edx: receiver
399  // edi: length of source FixedArray (smi-tagged)
400  __ lea(esi, Operand(edi,
401  times_4,
403  __ AllocateInNewSpace(esi, eax, ebx, no_reg, &gc_required, TAG_OBJECT);
404 
405  Label aligned, aligned_done;
406  __ test(eax, Immediate(kDoubleAlignmentMask - kHeapObjectTag));
407  __ j(zero, &aligned, Label::kNear);
408  __ mov(FieldOperand(eax, 0),
409  Immediate(masm->isolate()->factory()->one_pointer_filler_map()));
410  __ add(eax, Immediate(kPointerSize));
411  __ jmp(&aligned_done);
412 
413  __ bind(&aligned);
414  __ mov(Operand(eax, esi, times_1, -kPointerSize-1),
415  Immediate(masm->isolate()->factory()->one_pointer_filler_map()));
416 
417  __ bind(&aligned_done);
418 
419  // eax: destination FixedDoubleArray
420  // edi: number of elements
421  // edx: receiver
423  Immediate(masm->isolate()->factory()->fixed_double_array_map()));
426  // Replace receiver's backing store with newly created FixedDoubleArray.
428  __ mov(ebx, eax);
429  __ RecordWriteField(edx,
431  ebx,
432  edi,
436 
438 
439  // Prepare for conversion loop.
440  ExternalReference canonical_the_hole_nan_reference =
441  ExternalReference::address_of_the_hole_nan();
442  XMMRegister the_hole_nan = xmm1;
444  CpuFeatures::Scope use_sse2(SSE2);
445  __ movdbl(the_hole_nan,
446  Operand::StaticVariable(canonical_the_hole_nan_reference));
447  }
448  __ jmp(&entry);
449 
450  // Call into runtime if GC is required.
451  __ bind(&gc_required);
452  // Restore registers before jumping into runtime.
454  __ pop(ebx);
455  __ pop(eax);
456  __ jmp(fail);
457 
458  // Convert and copy elements
459  // esi: source FixedArray
460  __ bind(&loop);
462  // ebx: current element from source
463  // edi: index of current element
464  __ JumpIfNotSmi(ebx, &convert_hole);
465 
466  // Normal smi, convert it to double and store.
467  __ SmiUntag(ebx);
469  CpuFeatures::Scope fscope(SSE2);
470  __ cvtsi2sd(xmm0, ebx);
472  xmm0);
473  } else {
474  __ push(ebx);
475  __ fild_s(Operand(esp, 0));
476  __ pop(ebx);
478  }
479  __ jmp(&entry);
480 
481  // Found hole, store hole_nan_as_double instead.
482  __ bind(&convert_hole);
483 
484  if (FLAG_debug_code) {
485  __ cmp(ebx, masm->isolate()->factory()->the_hole_value());
486  __ Assert(equal, "object found in smi-only array");
487  }
488 
490  CpuFeatures::Scope use_sse2(SSE2);
492  the_hole_nan);
493  } else {
494  __ fld_d(Operand::StaticVariable(canonical_the_hole_nan_reference));
496  }
497 
498  __ bind(&entry);
499  __ sub(edi, Immediate(Smi::FromInt(1)));
500  __ j(not_sign, &loop);
501 
502  __ pop(ebx);
503  __ pop(eax);
504 
505  // Restore esi.
507 
508  __ bind(&only_change_map);
509  // eax: value
510  // ebx: target map
511  // Set transitioned map.
513  __ RecordWriteField(edx,
515  ebx,
516  edi,
520 }
521 
522 
524  MacroAssembler* masm, Label* fail) {
525  // ----------- S t a t e -------------
526  // -- eax : value
527  // -- ebx : target map
528  // -- ecx : key
529  // -- edx : receiver
530  // -- esp[0] : return address
531  // -----------------------------------
532  Label loop, entry, convert_hole, gc_required, only_change_map, success;
533 
534  // Check for empty arrays, which only require a map transition and no changes
535  // to the backing store.
537  __ cmp(edi, Immediate(masm->isolate()->factory()->empty_fixed_array()));
538  __ j(equal, &only_change_map);
539 
540  __ push(eax);
541  __ push(edx);
542  __ push(ebx);
543 
545 
546  // Allocate new FixedArray.
547  // ebx: length of source FixedDoubleArray (smi-tagged)
548  __ lea(edi, Operand(ebx, times_2, FixedArray::kHeaderSize));
549  __ AllocateInNewSpace(edi, eax, esi, no_reg, &gc_required, TAG_OBJECT);
550 
551  // eax: destination FixedArray
552  // ebx: number of elements
554  Immediate(masm->isolate()->factory()->fixed_array_map()));
557 
558  __ jmp(&entry);
559 
560  // ebx: target map
561  // edx: receiver
562  // Set transitioned map.
563  __ bind(&only_change_map);
565  __ RecordWriteField(edx,
567  ebx,
568  edi,
572  __ jmp(&success);
573 
574  // Call into runtime if GC is required.
575  __ bind(&gc_required);
577  __ pop(ebx);
578  __ pop(edx);
579  __ pop(eax);
580  __ jmp(fail);
581 
582  // Box doubles into heap numbers.
583  // edi: source FixedDoubleArray
584  // eax: destination FixedArray
585  __ bind(&loop);
586  // ebx: index of current element (smi-tagged)
587  uint32_t offset = FixedDoubleArray::kHeaderSize + sizeof(kHoleNanLower32);
588  __ cmp(FieldOperand(edi, ebx, times_4, offset), Immediate(kHoleNanUpper32));
589  __ j(equal, &convert_hole);
590 
591  // Non-hole double, copy value into a heap number.
592  __ AllocateHeapNumber(edx, esi, no_reg, &gc_required);
593  // edx: new heap number
595  CpuFeatures::Scope fscope(SSE2);
596  __ movdbl(xmm0,
599  } else {
602  __ mov(esi, FieldOperand(edi, ebx, times_4, offset));
604  }
606  __ mov(esi, ebx);
607  __ RecordWriteArray(eax,
608  edx,
609  esi,
613  __ jmp(&entry, Label::kNear);
614 
615  // Replace the-hole NaN with the-hole pointer.
616  __ bind(&convert_hole);
618  masm->isolate()->factory()->the_hole_value());
619 
620  __ bind(&entry);
621  __ sub(ebx, Immediate(Smi::FromInt(1)));
622  __ j(not_sign, &loop);
623 
624  __ pop(ebx);
625  __ pop(edx);
626  // ebx: target map
627  // edx: receiver
628  // Set transitioned map.
630  __ RecordWriteField(edx,
632  ebx,
633  edi,
637  // Replace receiver's backing store with newly created and filled FixedArray.
639  __ RecordWriteField(edx,
641  eax,
642  edi,
646 
647  // Restore registers.
648  __ pop(eax);
650 
651  __ bind(&success);
652 }
653 
654 
655 void StringCharLoadGenerator::Generate(MacroAssembler* masm,
656  Factory* factory,
657  Register string,
658  Register index,
659  Register result,
660  Label* call_runtime) {
661  // Fetch the instance type of the receiver into result register.
662  __ mov(result, FieldOperand(string, HeapObject::kMapOffset));
663  __ movzx_b(result, FieldOperand(result, Map::kInstanceTypeOffset));
664 
665  // We need special handling for indirect strings.
666  Label check_sequential;
667  __ test(result, Immediate(kIsIndirectStringMask));
668  __ j(zero, &check_sequential, Label::kNear);
669 
670  // Dispatch on the indirect string shape: slice or cons.
671  Label cons_string;
672  __ test(result, Immediate(kSlicedNotConsMask));
673  __ j(zero, &cons_string, Label::kNear);
674 
675  // Handle slices.
676  Label indirect_string_loaded;
677  __ mov(result, FieldOperand(string, SlicedString::kOffsetOffset));
678  __ SmiUntag(result);
679  __ add(index, result);
680  __ mov(string, FieldOperand(string, SlicedString::kParentOffset));
681  __ jmp(&indirect_string_loaded, Label::kNear);
682 
683  // Handle cons strings.
684  // Check whether the right hand side is the empty string (i.e. if
685  // this is really a flat string in a cons string). If that is not
686  // the case we would rather go to the runtime system now to flatten
687  // the string.
688  __ bind(&cons_string);
690  Immediate(factory->empty_string()));
691  __ j(not_equal, call_runtime);
692  __ mov(string, FieldOperand(string, ConsString::kFirstOffset));
693 
694  __ bind(&indirect_string_loaded);
695  __ mov(result, FieldOperand(string, HeapObject::kMapOffset));
696  __ movzx_b(result, FieldOperand(result, Map::kInstanceTypeOffset));
697 
698  // Distinguish sequential and external strings. Only these two string
699  // representations can reach here (slices and flat cons strings have been
700  // reduced to the underlying sequential or external string).
701  Label seq_string;
702  __ bind(&check_sequential);
704  __ test(result, Immediate(kStringRepresentationMask));
705  __ j(zero, &seq_string, Label::kNear);
706 
707  // Handle external strings.
708  Label ascii_external, done;
709  if (FLAG_debug_code) {
710  // Assert that we do not have a cons or slice (indirect strings) here.
711  // Sequential strings have already been ruled out.
712  __ test(result, Immediate(kIsIndirectStringMask));
713  __ Assert(zero, "external string expected, but not found");
714  }
715  // Rule out short external strings.
717  __ test_b(result, kShortExternalStringMask);
718  __ j(not_zero, call_runtime);
719  // Check encoding.
721  __ test_b(result, kStringEncodingMask);
723  __ j(not_equal, &ascii_external, Label::kNear);
724  // Two-byte string.
725  __ movzx_w(result, Operand(result, index, times_2, 0));
726  __ jmp(&done, Label::kNear);
727  __ bind(&ascii_external);
728  // Ascii string.
729  __ movzx_b(result, Operand(result, index, times_1, 0));
730  __ jmp(&done, Label::kNear);
731 
732  // Dispatch on the encoding: ASCII or two-byte.
733  Label ascii;
734  __ bind(&seq_string);
737  __ test(result, Immediate(kStringEncodingMask));
738  __ j(not_zero, &ascii, Label::kNear);
739 
740  // Two-byte string.
741  // Load the two-byte character code into the result register.
742  __ movzx_w(result, FieldOperand(string,
743  index,
744  times_2,
746  __ jmp(&done, Label::kNear);
747 
748  // Ascii string.
749  // Load the byte into the result register.
750  __ bind(&ascii);
751  __ movzx_b(result, FieldOperand(string,
752  index,
753  times_1,
755  __ bind(&done);
756 }
757 
758 #undef __
759 
760 } } // namespace v8::internal
761 
762 #endif // V8_TARGET_ARCH_IA32
static const int kResourceDataOffset
Definition: objects.h:7517
STATIC_CHECK((kStringRepresentationMask|kStringEncodingMask)==Internals::kFullStringRepresentationMask)
const intptr_t kDoubleAlignmentMask
Definition: v8globals.h:53
const uint32_t kTwoByteStringTag
Definition: objects.h:450
static Smi * FromInt(int value)
Definition: objects-inl.h:973
const int KB
Definition: globals.h:221
const Register esp
static bool IsSupported(CpuFeature f)
#define ASSERT(condition)
Definition: checks.h:270
virtual void AfterCall(MacroAssembler *masm) const
double(* UnaryMathFunction)(double x)
Definition: codegen.h:90
const uint32_t kStringRepresentationMask
Definition: objects.h:455
static void GenerateOperation(MacroAssembler *masm, TranscendentalCache::Type type)
const uint32_t kShortExternalStringMask
Definition: objects.h:483
static const int kMinComplexMemCopy
Definition: platform.h:317
const Register edi
uint8_t byte
Definition: globals.h:171
static const int kFirstOffset
Definition: objects.h:7420
static const int kParentOffset
Definition: objects.h:7473
const Register ebp
STATIC_ASSERT((FixedDoubleArray::kHeaderSize &kDoubleAlignmentMask)==0)
const Register eax
static const int kValueOffset
Definition: objects.h:1307
const uint32_t kHoleNanUpper32
Definition: v8globals.h:476
const XMMRegister xmm1
const uint32_t kIsIndirectStringMask
Definition: objects.h:462
static void ProtectCode(void *address, const size_t size)
const int kPointerSize
Definition: globals.h:234
Operand FieldOperand(Register object, int offset)
const Register ecx
const int kHeapObjectTag
Definition: v8.h:3848
const uint32_t kHoleNanLower32
Definition: v8globals.h:477
#define __
UnaryMathFunction CreateSqrtFunction()
static void GenerateSmiToDouble(MacroAssembler *masm, Label *fail)
const uint32_t kShortExternalStringTag
Definition: objects.h:484
static void GenerateMapChangeElementsTransition(MacroAssembler *masm)
static void Generate(MacroAssembler *masm, Register string, Register index, Register result, Label *call_runtime)
static const int kHeaderSize
Definition: objects.h:7282
static const int kElementsOffset
Definition: objects.h:2114
static const int kOffsetOffset
Definition: objects.h:7474
static const int kHeaderSize
Definition: objects.h:2233
static const int kMapOffset
Definition: objects.h:1219
static void GenerateDoubleToObject(MacroAssembler *masm, Label *fail)
const uint32_t kSlicedNotConsMask
Definition: objects.h:473
static const int kLengthOffset
Definition: objects.h:2232
const Register ebx
static const int kSecondOffset
Definition: objects.h:7421
#define UNIMPLEMENTED()
Definition: checks.h:48
static void * Allocate(const size_t requested, size_t *allocated, bool is_executable)
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination trace on stack replacement optimize closures functions with arguments object optimize functions containing for in loops profiler considers IC stability primitive functions trigger their own optimization re try self optimization if it failed insert an interrupt check at function exit execution budget before interrupt is triggered call count before self optimization self_optimization count_based_interrupts weighted_back_edges trace_opt emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of SAHF instruction if enable use of VFP3 instructions if available this implies enabling ARMv7 enable use of ARMv7 instructions if enable use of MIPS FPU instructions if NULL
Definition: flags.cc:274
const Register esi
const Register no_reg
static void FlushICache(void *start, size_t size)
virtual void BeforeCall(MacroAssembler *masm) const
const uint32_t kAsciiStringTag
Definition: objects.h:451
const Register edx
UnaryMathFunction CreateTranscendentalFunction(TranscendentalCache::Type type)
FlagType type() const
Definition: flags.cc:1358
const uint32_t kStringEncodingMask
Definition: objects.h:449
static const int kInstanceTypeOffset
Definition: objects.h:4992
const XMMRegister xmm0