30 #if V8_TARGET_ARCH_IA32 
   45   ASSERT(!masm->has_frame());
 
   46   masm->set_has_frame(
true);
 
   53   masm->set_has_frame(
false);
 
   62   if (!FLAG_fast_math) 
return &std::exp;
 
   65   if (buffer == 
NULL) 
return &std::exp;
 
   66   ExternalReference::InitializeMathExpData();
 
   68   MacroAssembler masm(
NULL, buffer, static_cast<int>(actual_size));
 
   72     CpuFeatureScope use_sse2(&masm, 
SSE2);
 
   73     XMMRegister input = 
xmm1;
 
   74     XMMRegister result = 
xmm2;
 
   90   ASSERT(!RelocInfo::RequiresRelocation(desc));
 
   92   CPU::FlushICache(buffer, actual_size);
 
   94   return FUNCTION_CAST<UnaryMathFunction>(buffer);
 
  107   MacroAssembler masm(
NULL, buffer, static_cast<int>(actual_size));
 
  112     CpuFeatureScope use_sse2(&masm, 
SSE2);
 
  123   ASSERT(!RelocInfo::RequiresRelocation(desc));
 
  125   CPU::FlushICache(buffer, actual_size);
 
  127   return FUNCTION_CAST<UnaryMathFunction>(buffer);
 
  133 #define __ ACCESS_MASM(masm) 
  135 enum Direction { FORWARD, BACKWARD };
 
  136 enum Alignment { MOVE_ALIGNED, MOVE_UNALIGNED };
 
  143 void MemMoveEmitMainLoop(MacroAssembler* masm,
 
  146                          Alignment alignment) {
 
  149   Register count = 
ecx;
 
  150   Register loop_count = 
edx;
 
  151   Label loop, move_last_31, move_last_63;
 
  152   __ cmp(loop_count, 0);
 
  156   if (direction == BACKWARD) 
__ sub(src, Immediate(0x40));
 
  157   __ movdq(alignment == MOVE_ALIGNED, 
xmm0, Operand(src, 0x00));
 
  158   __ movdq(alignment == MOVE_ALIGNED, 
xmm1, Operand(src, 0x10));
 
  159   __ movdq(alignment == MOVE_ALIGNED, 
xmm2, Operand(src, 0x20));
 
  160   __ movdq(alignment == MOVE_ALIGNED, 
xmm3, Operand(src, 0x30));
 
  161   if (direction == FORWARD) 
__ add(src, Immediate(0x40));
 
  162   if (direction == BACKWARD) 
__ sub(dst, Immediate(0x40));
 
  163   __ movdqa(Operand(dst, 0x00), 
xmm0);
 
  164   __ movdqa(Operand(dst, 0x10), 
xmm1);
 
  165   __ movdqa(Operand(dst, 0x20), 
xmm2);
 
  166   __ movdqa(Operand(dst, 0x30), 
xmm3);
 
  167   if (direction == FORWARD) 
__ add(dst, Immediate(0x40));
 
  171   __ bind(&move_last_63);
 
  172   __ test(count, Immediate(0x20));
 
  173   __ j(
zero, &move_last_31);
 
  174   if (direction == BACKWARD) 
__ sub(src, Immediate(0x20));
 
  175   __ movdq(alignment == MOVE_ALIGNED, 
xmm0, Operand(src, 0x00));
 
  176   __ movdq(alignment == MOVE_ALIGNED, 
xmm1, Operand(src, 0x10));
 
  177   if (direction == FORWARD) 
__ add(src, Immediate(0x20));
 
  178   if (direction == BACKWARD) 
__ sub(dst, Immediate(0x20));
 
  179   __ movdqa(Operand(dst, 0x00), 
xmm0);
 
  180   __ movdqa(Operand(dst, 0x10), 
xmm1);
 
  181   if (direction == FORWARD) 
__ add(dst, Immediate(0x20));
 
  183   __ bind(&move_last_31);
 
  184   __ test(count, Immediate(0x10));
 
  186   if (direction == BACKWARD) 
__ sub(src, Immediate(0x10));
 
  187   __ movdq(alignment == MOVE_ALIGNED, 
xmm0, Operand(src, 0));
 
  188   if (direction == FORWARD) 
__ add(src, Immediate(0x10));
 
  189   if (direction == BACKWARD) 
__ sub(dst, Immediate(0x10));
 
  190   __ movdqa(Operand(dst, 0), 
xmm0);
 
  191   if (direction == FORWARD) 
__ add(dst, Immediate(0x10));
 
  195 void MemMoveEmitPopAndReturn(MacroAssembler* masm) {
 
  206 class LabelConverter {
 
  208   explicit LabelConverter(
byte* buffer) : buffer_(buffer) {}
 
  209   int32_t address(Label* l)
 const {
 
  210     return reinterpret_cast<int32_t>(buffer_) + l->pos();
 
  217 OS::MemMoveFunction CreateMemMoveFunction() {
 
  222   MacroAssembler masm(
NULL, buffer, static_cast<int>(actual_size));
 
  223   LabelConverter conv(buffer);
 
  242   const size_t kSmallCopySize = 8;
 
  244   const size_t kMediumCopySize = 63;
 
  247   const size_t kMinMoveDistance = 16;
 
  251   int stack_offset = 0;  
 
  253   Label backward, backward_much_overlap;
 
  254   Label forward_much_overlap, small_size, medium_size, pop_and_return;
 
  260   Register count = 
ecx;
 
  261   Register loop_count = 
edx;
 
  262   __ mov(dst, Operand(
esp, stack_offset + kDestinationOffset));
 
  263   __ mov(src, Operand(
esp, stack_offset + kSourceOffset));
 
  264   __ mov(count, Operand(
esp, stack_offset + kSizeOffset));
 
  270     CpuFeatureScope sse2_scope(&masm, 
SSE2);
 
  271     __ prefetch(Operand(src, 0), 1);
 
  272     __ cmp(count, kSmallCopySize);
 
  274     __ cmp(count, kMediumCopySize);
 
  281       Label unaligned_source, move_last_15, skip_last_move;
 
  284       __ cmp(
eax, kMinMoveDistance);
 
  285       __ j(
below, &forward_much_overlap);
 
  287       __ movdqu(
xmm0, Operand(src, 0));
 
  288       __ movdqu(Operand(dst, 0), 
xmm0);
 
  293       __ add(
edx, Immediate(16));
 
  298       __ mov(loop_count, count);
 
  299       __ shr(loop_count, 6);
 
  301       __ test(src, Immediate(0xF));
 
  304       MemMoveEmitMainLoop(&masm, &move_last_15, FORWARD, MOVE_ALIGNED);
 
  306       __ bind(&move_last_15);
 
  308       __ j(
zero, &skip_last_move, Label::kNear);
 
  311       __ bind(&skip_last_move);
 
  312       MemMoveEmitPopAndReturn(&masm);
 
  315       __ bind(&unaligned_source);
 
  316       MemMoveEmitMainLoop(&masm, &move_last_15, FORWARD, MOVE_UNALIGNED);
 
  317       __ jmp(&move_last_15);
 
  320       Label loop_until_aligned, last_15_much_overlap;
 
  321       __ bind(&loop_until_aligned);
 
  322       __ mov_b(
eax, Operand(src, 0));
 
  324       __ mov_b(Operand(dst, 0), 
eax);
 
  327       __ bind(&forward_much_overlap);  
 
  328       __ test(dst, Immediate(0xF));
 
  331       __ mov(loop_count, count);
 
  332       __ shr(loop_count, 6);
 
  333       MemMoveEmitMainLoop(&masm, &last_15_much_overlap,
 
  334                           FORWARD, MOVE_UNALIGNED);
 
  335       __ bind(&last_15_much_overlap);
 
  337       __ j(
zero, &pop_and_return);
 
  338       __ cmp(count, kSmallCopySize);
 
  340       __ jmp(&medium_size);
 
  345       Label unaligned_source, move_first_15, skip_last_move;
 
  352       __ cmp(
eax, kMinMoveDistance);
 
  353       __ j(
below, &backward_much_overlap);
 
  355       __ movdqu(
xmm0, Operand(src, -0x10));
 
  356       __ movdqu(Operand(dst, -0x10), 
xmm0);
 
  364       __ mov(loop_count, count);
 
  365       __ shr(loop_count, 6);
 
  367       __ test(src, Immediate(0xF));
 
  370       MemMoveEmitMainLoop(&masm, &move_first_15, BACKWARD, MOVE_ALIGNED);
 
  372       __ bind(&move_first_15);
 
  374       __ j(
zero, &skip_last_move, Label::kNear);
 
  377       __ movdqu(
xmm0, Operand(src, 0));
 
  378       __ movdqu(Operand(dst, 0), 
xmm0);
 
  379       __ bind(&skip_last_move);
 
  380       MemMoveEmitPopAndReturn(&masm);
 
  383       __ bind(&unaligned_source);
 
  384       MemMoveEmitMainLoop(&masm, &move_first_15, BACKWARD, MOVE_UNALIGNED);
 
  385       __ jmp(&move_first_15);
 
  388       Label loop_until_aligned, first_15_much_overlap;
 
  389       __ bind(&loop_until_aligned);
 
  392       __ mov_b(
eax, Operand(src, 0));
 
  393       __ mov_b(Operand(dst, 0), 
eax);
 
  395       __ bind(&backward_much_overlap);  
 
  396       __ test(dst, Immediate(0xF));
 
  399       __ mov(loop_count, count);
 
  400       __ shr(loop_count, 6);
 
  401       MemMoveEmitMainLoop(&masm, &first_15_much_overlap,
 
  402                           BACKWARD, MOVE_UNALIGNED);
 
  403       __ bind(&first_15_much_overlap);
 
  405       __ j(
zero, &pop_and_return);
 
  409       __ cmp(count, kSmallCopySize);
 
  411       __ jmp(&medium_size);
 
  417       Label medium_handlers, f9_16, f17_32, f33_48, f49_63;
 
  420       __ movsd(
xmm0, Operand(src, 0));
 
  422       __ movsd(Operand(dst, 0), 
xmm0);
 
  424       MemMoveEmitPopAndReturn(&masm);
 
  427       __ movdqu(
xmm0, Operand(src, 0));
 
  429       __ movdqu(Operand(dst, 0x00), 
xmm0);
 
  431       MemMoveEmitPopAndReturn(&masm);
 
  434       __ movdqu(
xmm0, Operand(src, 0x00));
 
  435       __ movdqu(
xmm1, Operand(src, 0x10));
 
  437       __ movdqu(Operand(dst, 0x00), 
xmm0);
 
  438       __ movdqu(Operand(dst, 0x10), 
xmm1);
 
  440       MemMoveEmitPopAndReturn(&masm);
 
  443       __ movdqu(
xmm0, Operand(src, 0x00));
 
  444       __ movdqu(
xmm1, Operand(src, 0x10));
 
  445       __ movdqu(
xmm2, Operand(src, 0x20));
 
  447       __ movdqu(Operand(dst, 0x00), 
xmm0);
 
  448       __ movdqu(Operand(dst, 0x10), 
xmm1);
 
  449       __ movdqu(Operand(dst, 0x20), 
xmm2);
 
  451       MemMoveEmitPopAndReturn(&masm);
 
  453       __ bind(&medium_handlers);
 
  454       __ dd(conv.address(&f9_16));
 
  455       __ dd(conv.address(&f17_32));
 
  456       __ dd(conv.address(&f33_48));
 
  457       __ dd(conv.address(&f49_63));
 
  459       __ bind(&medium_size);  
 
  463       if (FLAG_debug_code) {
 
  475       Label small_handlers, 
f0, 
f1, 
f2, 
f3, 
f4, f5_8;
 
  477       MemMoveEmitPopAndReturn(&masm);
 
  480       __ mov_b(
eax, Operand(src, 0));
 
  481       __ mov_b(Operand(dst, 0), 
eax);
 
  482       MemMoveEmitPopAndReturn(&masm);
 
  485       __ mov_w(
eax, Operand(src, 0));
 
  486       __ mov_w(Operand(dst, 0), 
eax);
 
  487       MemMoveEmitPopAndReturn(&masm);
 
  490       __ mov_w(
eax, Operand(src, 0));
 
  491       __ mov_b(
edx, Operand(src, 2));
 
  492       __ mov_w(Operand(dst, 0), 
eax);
 
  493       __ mov_b(Operand(dst, 2), 
edx);
 
  494       MemMoveEmitPopAndReturn(&masm);
 
  497       __ mov(
eax, Operand(src, 0));
 
  498       __ mov(Operand(dst, 0), 
eax);
 
  499       MemMoveEmitPopAndReturn(&masm);
 
  502       __ mov(
eax, Operand(src, 0));
 
  504       __ mov(Operand(dst, 0), 
eax);
 
  506       MemMoveEmitPopAndReturn(&masm);
 
  508       __ bind(&small_handlers);
 
  509       __ dd(conv.address(&f0));
 
  510       __ dd(conv.address(&f1));
 
  511       __ dd(conv.address(&f2));
 
  512       __ dd(conv.address(&f3));
 
  513       __ dd(conv.address(&f4));
 
  514       __ dd(conv.address(&f5_8));
 
  515       __ dd(conv.address(&f5_8));
 
  516       __ dd(conv.address(&f5_8));
 
  517       __ dd(conv.address(&f5_8));
 
  519       __ bind(&small_size);  
 
  520       if (FLAG_debug_code) {
 
  527       __ mov(
eax, Operand(count, 
times_4, conv.address(&small_handlers)));
 
  540       Label forward_loop_1byte, forward_loop_4byte;
 
  541       __ bind(&forward_loop_4byte);
 
  542       __ mov(
eax, Operand(src, 0));
 
  543       __ sub(count, Immediate(4));
 
  544       __ add(src, Immediate(4));
 
  545       __ mov(Operand(dst, 0), 
eax);
 
  546       __ add(dst, Immediate(4));
 
  549       __ j(
above, &forward_loop_4byte);
 
  550       __ bind(&forward_loop_1byte);
 
  553       __ mov_b(
eax, Operand(src, 0));
 
  556       __ mov_b(Operand(dst, 0), 
eax);
 
  558       __ jmp(&forward_loop_1byte);
 
  562       Label backward_loop_1byte, backward_loop_4byte, entry_shortcut;
 
  569       __ bind(&backward_loop_4byte);
 
  570       __ sub(src, Immediate(4));
 
  571       __ sub(count, Immediate(4));
 
  572       __ mov(
eax, Operand(src, 0));
 
  573       __ sub(dst, Immediate(4));
 
  574       __ mov(Operand(dst, 0), 
eax);
 
  576       __ j(
above, &backward_loop_4byte);
 
  577       __ bind(&backward_loop_1byte);
 
  580       __ bind(&entry_shortcut);
 
  583       __ mov_b(
eax, Operand(src, 0));
 
  585       __ mov_b(Operand(dst, 0), 
eax);
 
  586       __ jmp(&backward_loop_1byte);
 
  590   __ bind(&pop_and_return);
 
  591   MemMoveEmitPopAndReturn(&masm);
 
  595   ASSERT(!RelocInfo::RequiresRelocation(desc));
 
  596   CPU::FlushICache(buffer, actual_size);
 
  600   return FUNCTION_CAST<OS::MemMoveFunction>(buffer);
 
  609 #define __ ACCESS_MASM(masm) 
  614     Label* allocation_memento_found) {
 
  624     __ JumpIfJSArrayHasAllocationMemento(
edx, 
edi, allocation_memento_found);
 
  629   __ RecordWriteField(
edx,
 
  648   Label loop, entry, convert_hole, gc_required, only_change_map;
 
  651     __ JumpIfJSArrayHasAllocationMemento(
edx, 
edi, fail);
 
  657   __ cmp(
edi, Immediate(masm->isolate()->factory()->empty_fixed_array()));
 
  677          Immediate(masm->isolate()->factory()->fixed_double_array_map()));
 
  683   __ RecordWriteField(
edx,
 
  694   ExternalReference canonical_the_hole_nan_reference =
 
  695       ExternalReference::address_of_the_hole_nan();
 
  696   XMMRegister the_hole_nan = 
xmm1;
 
  698     CpuFeatureScope use_sse2(masm, 
SSE2);
 
  699     __ movsd(the_hole_nan,
 
  700               Operand::StaticVariable(canonical_the_hole_nan_reference));
 
  705   __ bind(&gc_required);
 
  718   __ JumpIfNotSmi(
ebx, &convert_hole);
 
  723     CpuFeatureScope fscope(masm, 
SSE2);
 
  729     __ fild_s(Operand(
esp, 0));
 
  736   __ bind(&convert_hole);
 
  738   if (FLAG_debug_code) {
 
  739     __ cmp(
ebx, masm->isolate()->factory()->the_hole_value());
 
  740     __ Assert(
equal, kObjectFoundInSmiOnlyArray);
 
  744     CpuFeatureScope use_sse2(masm, 
SSE2);
 
  748     __ fld_d(Operand::StaticVariable(canonical_the_hole_nan_reference));
 
  762   __ bind(&only_change_map);
 
  767   __ RecordWriteField(
edx,
 
  786   Label loop, entry, convert_hole, gc_required, only_change_map, success;
 
  789     __ JumpIfJSArrayHasAllocationMemento(
edx, 
edi, fail);
 
  795   __ cmp(
edi, Immediate(masm->isolate()->factory()->empty_fixed_array()));
 
  812          Immediate(masm->isolate()->factory()->fixed_array_map()));
 
  821   __ bind(&only_change_map);
 
  823   __ RecordWriteField(
edx,
 
  833   __ bind(&gc_required);
 
  853     CpuFeatureScope fscope(masm, 
SSE2);
 
  865   __ RecordWriteArray(
eax,
 
  871   __ jmp(&entry, Label::kNear);
 
  874   __ bind(&convert_hole);
 
  876          masm->isolate()->factory()->the_hole_value());
 
  888   __ RecordWriteField(
edx,
 
  897   __ RecordWriteField(
edx,
 
  918                                        Label* call_runtime) {
 
  924   Label check_sequential;
 
  926   __ j(
zero, &check_sequential, Label::kNear);
 
  931   __ j(
zero, &cons_string, Label::kNear);
 
  934   Label indirect_string_loaded;
 
  937   __ add(index, result);
 
  939   __ jmp(&indirect_string_loaded, Label::kNear);
 
  946   __ bind(&cons_string);
 
  948          Immediate(factory->empty_string()));
 
  952   __ bind(&indirect_string_loaded);
 
  960   __ bind(&check_sequential);
 
  963   __ j(
zero, &seq_string, Label::kNear);
 
  966   Label ascii_external, done;
 
  967   if (FLAG_debug_code) {
 
  971     __ Assert(
zero, kExternalStringExpectedButNotFound);
 
  983   __ movzx_w(result, Operand(result, index, 
times_2, 0));
 
  984   __ jmp(&done, Label::kNear);
 
  985   __ bind(&ascii_external);
 
  987   __ movzx_b(result, Operand(result, index, 
times_1, 0));
 
  988   __ jmp(&done, Label::kNear);
 
  992   __ bind(&seq_string);
 
 1004   __ jmp(&done, Label::kNear);
 
 1017 static Operand ExpConstant(
int index) {
 
 1018   return Operand::StaticVariable(ExternalReference::math_exp_constants(index));
 
 1025                                    XMMRegister double_scratch,
 
 1028   ASSERT(!input.is(double_scratch));
 
 1029   ASSERT(!input.is(result));
 
 1030   ASSERT(!result.is(double_scratch));
 
 1031   ASSERT(!temp1.is(temp2));
 
 1032   ASSERT(ExternalReference::math_exp_constants(0).address() != 
NULL);
 
 1036   __ movsd(double_scratch, ExpConstant(0));
 
 1037   __ xorpd(result, result);
 
 1038   __ ucomisd(double_scratch, input);
 
 1040   __ ucomisd(input, ExpConstant(1));
 
 1041   __ movsd(result, ExpConstant(2));
 
 1043   __ movsd(double_scratch, ExpConstant(3));
 
 1044   __ movsd(result, ExpConstant(4));
 
 1045   __ mulsd(double_scratch, input);
 
 1046   __ addsd(double_scratch, result);
 
 1047   __ movd(temp2, double_scratch);
 
 1048   __ subsd(double_scratch, result);
 
 1049   __ movsd(result, ExpConstant(6));
 
 1050   __ mulsd(double_scratch, ExpConstant(5));
 
 1051   __ subsd(double_scratch, input);
 
 1052   __ subsd(result, double_scratch);
 
 1053   __ movsd(input, double_scratch);
 
 1054   __ mulsd(input, double_scratch);
 
 1055   __ mulsd(result, input);
 
 1056   __ mov(temp1, temp2);
 
 1057   __ mulsd(result, ExpConstant(7));
 
 1058   __ subsd(result, double_scratch);
 
 1059   __ add(temp1, Immediate(0x1ff800));
 
 1060   __ addsd(result, ExpConstant(8));
 
 1061   __ and_(temp2, Immediate(0x7ff));
 
 1064   __ movd(input, temp1);
 
 1065   __ pshufd(input, input, static_cast<uint8_t>(0xe1));  
 
 1066   __ movsd(double_scratch, Operand::StaticArray(
 
 1067       temp2, 
times_8, ExternalReference::math_exp_log_table()));
 
 1068   __ orps(input, double_scratch);
 
 1069   __ mulsd(result, input);
 
 1076 static byte* GetNoCodeAgeSequence(uint32_t* length) {
 
 1077   static bool initialized = 
false;
 
 1078   static byte sequence[kNoCodeAgeSequenceLength];
 
 1079   *length = kNoCodeAgeSequenceLength;
 
 1084     CodePatcher patcher(sequence, kNoCodeAgeSequenceLength);
 
 1085     patcher.masm()->push(
ebp);
 
 1086     patcher.masm()->mov(
ebp, 
esp);
 
 1087     patcher.masm()->push(
esi);
 
 1088     patcher.masm()->push(
edi);
 
 1096   uint32_t young_length;
 
 1097   byte* young_sequence = GetNoCodeAgeSequence(&young_length);
 
 1098   bool result = (!memcmp(sequence, young_sequence, young_length));
 
 1099   ASSERT(result || *sequence == kCallOpcode);
 
 1104 void Code::GetCodeAgeAndParity(
byte* sequence, Age* age,
 
 1111     Address target_address = sequence + *
reinterpret_cast<int*
>(sequence) +
 
 1114     GetCodeAgeAndParity(stub, age, parity);
 
 1119 void Code::PatchPlatformCodeAge(Isolate* isolate,
 
 1123   uint32_t young_length;
 
 1124   byte* young_sequence = GetNoCodeAgeSequence(&young_length);
 
 1126     CopyBytes(sequence, young_sequence, young_length);
 
 1127     CPU::FlushICache(sequence, young_length);
 
 1129     Code* stub = GetCodeAgeStub(isolate, age, parity);
 
 1130     CodePatcher patcher(sequence, young_length);
 
 1131     patcher.masm()->call(stub->instruction_start(), RelocInfo::NONE32);
 
 1138 #endif  // V8_TARGET_ARCH_IA32 
static const int kResourceDataOffset
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter NULL
STATIC_CHECK((kStringRepresentationMask|kStringEncodingMask)==Internals::kFullStringRepresentationMask)
const uint32_t kTwoByteStringTag
static Smi * FromInt(int value)
static bool IsSupported(CpuFeature f)
#define ASSERT(condition)
static const int kContextOffset
virtual void AfterCall(MacroAssembler *masm) const 
double(* UnaryMathFunction)(double x)
const uint32_t kStringRepresentationMask
const uint32_t kShortExternalStringMask
UnaryMathFunction CreateExpFunction()
static const int kFirstOffset
static const int kParentOffset
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long mode(MIPS only)")  DEFINE_string(expose_natives_as
STATIC_ASSERT(sizeof(CPURegister)==sizeof(Register))
static const int kValueOffset
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long expose natives in global object expose freeBuffer extension expose gc extension under the specified name expose externalize string extension number of stack frames to capture disable builtin natives files print name of functions for which code is generated use random jit cookie to mask large constants trace lazy optimization use adaptive optimizations always try to OSR functions trace optimize function deoptimization minimum length for automatic enable preparsing maximum number of optimization attempts before giving up cache prototype transitions trace debugging JSON request response trace out of bounds accesses to external arrays trace_js_array_abuse automatically set the debug break flag when debugger commands are in the queue abort by crashing maximum length of function source code printed in a stack trace max size of the new max size of the old max size of executable always perform global GCs print one trace line following each garbage collection do not print trace line after scavenger collection print statistics of the maximum memory committed for the heap in only print modified registers Don t break for ASM_UNIMPLEMENTED_BREAK macros print stack trace when an illegal exception is thrown randomize hashes to avoid predictable hash Fixed seed to use to hash property Print the time it takes to deserialize the snapshot testing_bool_flag testing_int_flag string flag tmp file in which to serialize heap Print the time it takes to lazily compile hydrogen code stubs concurrent_recompilation concurrent_sweeping Print usage including flags
const uint32_t kHoleNanUpper32
const uint32_t kIsIndirectStringMask
static void ProtectCode(void *address, const size_t size)
Operand FieldOperand(Register object, int offset)
const uint32_t kHoleNanLower32
UnaryMathFunction CreateSqrtFunction()
static Code * GetCodeFromTargetAddress(Address address)
const uint32_t kShortExternalStringTag
static void Generate(MacroAssembler *masm, Register string, Register index, Register result, Label *call_runtime)
static const int kHeaderSize
static const int kElementsOffset
static const int kCallTargetAddressOffset
static const int kOffsetOffset
static const int kHeaderSize
static const int kMapOffset
const uint32_t kSlicedNotConsMask
static const int kLengthOffset
static const int kSecondOffset
static void * Allocate(const size_t requested, size_t *allocated, bool is_executable)
static void GenerateMapChangeElementsTransition(MacroAssembler *masm, AllocationSiteMode mode, Label *allocation_memento_found)
void CopyBytes(uint8_t *target, uint8_t *source)
static void GenerateSmiToDouble(MacroAssembler *masm, AllocationSiteMode mode, Label *fail)
const uint32_t kOneByteStringTag
static bool IsYoungSequence(byte *sequence)
static void EmitMathExp(MacroAssembler *masm, DwVfpRegister input, DwVfpRegister result, DwVfpRegister double_scratch1, DwVfpRegister double_scratch2, Register temp1, Register temp2, Register temp3)
virtual void BeforeCall(MacroAssembler *masm) const 
static void GenerateDoubleToObject(MacroAssembler *masm, AllocationSiteMode mode, Label *fail)
const uint32_t kStringEncodingMask
static const int kInstanceTypeOffset