30 #if V8_TARGET_ARCH_IA32
43 #ifndef V8_INTERPRETED_REGEXP
101 #define __ ACCESS_MASM(masm_)
105 int registers_to_save,
107 : NativeRegExpMacroAssembler(zone),
108 masm_(new MacroAssembler(zone->isolate(),
NULL, kRegExpCodeSize)),
110 num_registers_(registers_to_save),
111 num_saved_registers_(registers_to_save),
118 __ jmp(&entry_label_);
119 __ bind(&start_label_);
123 RegExpMacroAssemblerIA32::~RegExpMacroAssemblerIA32() {
126 entry_label_.Unuse();
127 start_label_.Unuse();
128 success_label_.Unuse();
129 backtrack_label_.Unuse();
131 check_preempt_label_.Unuse();
132 stack_overflow_label_.Unuse();
136 int RegExpMacroAssemblerIA32::stack_limit_slack() {
137 return RegExpStack::kStackLimitSlack;
141 void RegExpMacroAssemblerIA32::AdvanceCurrentPosition(
int by) {
143 __ add(
edi, Immediate(by * char_size()));
148 void RegExpMacroAssemblerIA32::AdvanceRegister(
int reg,
int by) {
150 ASSERT(reg < num_registers_);
152 __ add(register_location(reg), Immediate(by));
157 void RegExpMacroAssemblerIA32::Backtrack() {
161 __ add(
ebx, Immediate(masm_->CodeObject()));
166 void RegExpMacroAssemblerIA32::Bind(Label* label) {
171 void RegExpMacroAssemblerIA32::CheckCharacter(uint32_t c, Label* on_equal) {
172 __ cmp(current_character(), c);
173 BranchOrBacktrack(
equal, on_equal);
177 void RegExpMacroAssemblerIA32::CheckCharacterGT(
uc16 limit, Label* on_greater) {
178 __ cmp(current_character(), limit);
179 BranchOrBacktrack(
greater, on_greater);
183 void RegExpMacroAssemblerIA32::CheckAtStart(Label* on_at_start) {
186 __ cmp(Operand(
ebp, kStartIndex), Immediate(0));
187 BranchOrBacktrack(
not_equal, ¬_at_start);
190 __ cmp(
eax, Operand(
ebp, kInputStart));
191 BranchOrBacktrack(
equal, on_at_start);
192 __ bind(¬_at_start);
196 void RegExpMacroAssemblerIA32::CheckNotAtStart(Label* on_not_at_start) {
198 __ cmp(Operand(
ebp, kStartIndex), Immediate(0));
199 BranchOrBacktrack(
not_equal, on_not_at_start);
202 __ cmp(
eax, Operand(
ebp, kInputStart));
203 BranchOrBacktrack(
not_equal, on_not_at_start);
207 void RegExpMacroAssemblerIA32::CheckCharacterLT(
uc16 limit, Label* on_less) {
208 __ cmp(current_character(), limit);
209 BranchOrBacktrack(
less, on_less);
213 void RegExpMacroAssemblerIA32::CheckGreedyLoop(Label* on_equal) {
215 __ cmp(
edi, Operand(backtrack_stackpointer(), 0));
219 __ bind(&fallthrough);
223 void RegExpMacroAssemblerIA32::CheckNotBackReferenceIgnoreCase(
225 Label* on_no_match) {
227 __ mov(
edx, register_location(start_reg));
228 __ mov(
ebx, register_location(start_reg + 1));
234 BranchOrBacktrack(
less, on_no_match);
243 BranchOrBacktrack(
greater, on_no_match);
245 if (mode_ == ASCII) {
248 Label loop_increment;
251 __ push(backtrack_stackpointer());
261 __ cmpb_al(Operand(
edx, 0));
267 __ cmp(
ecx, static_cast<int32_t>(
'z' -
'a'));
268 Label convert_capture;
271 __ sub(
ecx, Immediate(224 -
'a'));
272 __ cmp(
ecx, Immediate(254 - 224));
274 __ cmp(
ecx, Immediate(247 - 224));
276 __ bind(&convert_capture);
284 __ bind(&loop_increment);
286 __ add(
edx, Immediate(1));
287 __ add(
edi, Immediate(1));
295 __ pop(backtrack_stackpointer());
301 __ pop(backtrack_stackpointer());
311 __ push(backtrack_stackpointer());
314 static const int argument_count = 4;
315 __ PrepareCallCFunction(argument_count,
ecx);
325 Immediate(ExternalReference::isolate_address(isolate())));
339 AllowExternalCallThatCantCauseGC scope(masm_);
340 ExternalReference compare =
341 ExternalReference::re_case_insensitive_compare_uc16(isolate());
342 __ CallCFunction(compare, argument_count);
346 __ pop(backtrack_stackpointer());
352 BranchOrBacktrack(
zero, on_no_match);
356 __ bind(&fallthrough);
360 void RegExpMacroAssemblerIA32::CheckNotBackReference(
362 Label* on_no_match) {
368 __ mov(
edx, register_location(start_reg));
369 __ mov(
eax, register_location(start_reg + 1));
372 BranchOrBacktrack(
less, on_no_match);
379 BranchOrBacktrack(
greater, on_no_match);
382 __ push(backtrack_stackpointer());
391 if (mode_ == ASCII) {
393 __ cmpb_al(Operand(
ebx, 0));
397 __ cmpw_ax(Operand(
ebx, 0));
401 __ add(
edx, Immediate(char_size()));
402 __ add(
ebx, Immediate(char_size()));
410 __ pop(backtrack_stackpointer());
418 __ pop(backtrack_stackpointer());
420 __ bind(&fallthrough);
424 void RegExpMacroAssemblerIA32::CheckNotCharacter(uint32_t c,
425 Label* on_not_equal) {
426 __ cmp(current_character(), c);
427 BranchOrBacktrack(
not_equal, on_not_equal);
431 void RegExpMacroAssemblerIA32::CheckCharacterAfterAnd(uint32_t c,
435 __ test(current_character(), Immediate(mask));
438 __ and_(
eax, current_character());
441 BranchOrBacktrack(
equal, on_equal);
445 void RegExpMacroAssemblerIA32::CheckNotCharacterAfterAnd(uint32_t c,
447 Label* on_not_equal) {
449 __ test(current_character(), Immediate(mask));
452 __ and_(
eax, current_character());
455 BranchOrBacktrack(
not_equal, on_not_equal);
459 void RegExpMacroAssemblerIA32::CheckNotCharacterAfterMinusAnd(
463 Label* on_not_equal) {
464 ASSERT(minus < String::kMaxUtf16CodeUnit);
465 __ lea(
eax, Operand(current_character(), -minus));
467 __ test(
eax, Immediate(mask));
472 BranchOrBacktrack(
not_equal, on_not_equal);
476 void RegExpMacroAssemblerIA32::CheckCharacterInRange(
479 Label* on_in_range) {
480 __ lea(
eax, Operand(current_character(), -from));
481 __ cmp(
eax, to - from);
486 void RegExpMacroAssemblerIA32::CheckCharacterNotInRange(
489 Label* on_not_in_range) {
490 __ lea(
eax, Operand(current_character(), -from));
491 __ cmp(
eax, to - from);
492 BranchOrBacktrack(
above, on_not_in_range);
496 void RegExpMacroAssemblerIA32::CheckBitInTable(
497 Handle<ByteArray> table,
499 __ mov(
eax, Immediate(table));
500 Register index = current_character();
501 if (mode_ != ASCII || kTableMask != String::kMaxOneByteCharCode) {
502 __ mov(
ebx, kTableSize - 1);
503 __ and_(
ebx, current_character());
507 BranchOrBacktrack(
not_equal, on_bit_set);
511 bool RegExpMacroAssemblerIA32::CheckSpecialCharacterClass(
uc16 type,
512 Label* on_no_match) {
518 if (mode_ == ASCII) {
521 __ cmp(current_character(),
' ');
522 __ j(
equal, &success, Label::kNear);
524 __ lea(
eax, Operand(current_character(), -
'\t'));
525 __ cmp(
eax,
'\r' -
'\t');
528 __ cmp(
eax, 0x00a0 -
'\t');
529 BranchOrBacktrack(
not_equal, on_no_match);
539 __ lea(
eax, Operand(current_character(), -
'0'));
540 __ cmp(
eax,
'9' -
'0');
541 BranchOrBacktrack(
above, on_no_match);
545 __ lea(
eax, Operand(current_character(), -
'0'));
546 __ cmp(
eax,
'9' -
'0');
551 __ mov(
eax, current_character());
552 __ xor_(
eax, Immediate(0x01));
554 __ sub(
eax, Immediate(0x0b));
555 __ cmp(
eax, 0x0c - 0x0b);
561 __ sub(
eax, Immediate(0x2028 - 0x0b));
562 __ cmp(
eax, 0x2029 - 0x2028);
568 if (mode_ != ASCII) {
570 __ cmp(current_character(), Immediate(
'z'));
571 BranchOrBacktrack(
above, on_no_match);
574 ExternalReference word_map = ExternalReference::re_word_character_map();
575 __ test_b(current_character(),
576 Operand::StaticArray(current_character(),
times_1, word_map));
577 BranchOrBacktrack(
zero, on_no_match);
582 if (mode_ != ASCII) {
584 __ cmp(current_character(), Immediate(
'z'));
588 ExternalReference word_map = ExternalReference::re_word_character_map();
589 __ test_b(current_character(),
590 Operand::StaticArray(current_character(),
times_1, word_map));
591 BranchOrBacktrack(
not_zero, on_no_match);
592 if (mode_ != ASCII) {
604 __ mov(
eax, current_character());
605 __ xor_(
eax, Immediate(0x01));
607 __ sub(
eax, Immediate(0x0b));
608 __ cmp(
eax, 0x0c - 0x0b);
609 if (mode_ == ASCII) {
610 BranchOrBacktrack(
above, on_no_match);
618 __ sub(
eax, Immediate(0x2028 - 0x0b));
620 BranchOrBacktrack(
above, on_no_match);
635 __ Move(
eax, Immediate(FAILURE));
637 __ jmp(&exit_label_);
641 Handle<HeapObject> RegExpMacroAssemblerIA32::GetCode(Handle<String> source) {
647 __ bind(&entry_label_);
651 FrameScope scope(masm_, StackFrame::MANUAL);
661 __ push(Immediate(0));
662 __ push(Immediate(0));
665 Label stack_limit_hit;
668 ExternalReference stack_limit =
669 ExternalReference::address_of_stack_limit(isolate());
671 __ sub(
ecx, Operand::StaticVariable(stack_limit));
680 __ mov(
eax, EXCEPTION);
683 __ bind(&stack_limit_hit);
684 CallCheckStackGuardState(
ebx);
691 __ mov(
ebx, Operand(
ebp, kStartIndex));
696 __ mov(
esi, Operand(
ebp, kInputEnd));
698 __ mov(
edi, Operand(
ebp, kInputStart));
712 __ mov(Operand(
ebp, kInputStartMinusOne),
eax);
717 const int kPageSize = 4096;
719 for (
int i = num_saved_registers_ + kRegistersPerPage - 1;
721 i += kRegistersPerPage) {
722 __ mov(register_location(i),
eax);
726 Label load_char_start_regexp, start_regexp;
728 __ cmp(Operand(
ebp, kStartIndex), Immediate(0));
729 __ j(
not_equal, &load_char_start_regexp, Label::kNear);
730 __ mov(current_character(),
'\n');
731 __ jmp(&start_regexp, Label::kNear);
734 __ bind(&load_char_start_regexp);
736 LoadCurrentCharacterUnchecked(-1, 1);
737 __ bind(&start_regexp);
740 if (num_saved_registers_ > 0) {
744 if (num_saved_registers_ > 8) {
745 __ mov(
ecx, kRegisterZero);
753 for (
int i = 0; i < num_saved_registers_; i++) {
754 __ mov(register_location(i),
eax);
760 __ mov(backtrack_stackpointer(), Operand(
ebp, kStackHighEnd));
762 __ jmp(&start_label_);
765 if (success_label_.is_linked()) {
767 __ bind(&success_label_);
768 if (num_saved_registers_ > 0) {
770 __ mov(
ebx, Operand(
ebp, kRegisterOutput));
771 __ mov(
ecx, Operand(
ebp, kInputEnd));
772 __ mov(
edx, Operand(
ebp, kStartIndex));
773 __ sub(
ecx, Operand(
ebp, kInputStart));
779 for (
int i = 0; i < num_saved_registers_; i++) {
780 __ mov(
eax, register_location(i));
781 if (i == 0 && global_with_zero_length_check()) {
797 __ inc(Operand(
ebp, kSuccessfulCaptures));
800 __ mov(
ecx, Operand(
ebp, kNumOutputRegisters));
801 __ sub(
ecx, Immediate(num_saved_registers_));
803 __ cmp(
ecx, Immediate(num_saved_registers_));
806 __ mov(Operand(
ebp, kNumOutputRegisters),
ecx);
808 __ add(Operand(
ebp, kRegisterOutput),
812 __ mov(
eax, Operand(
ebp, kInputStartMinusOne));
814 if (global_with_zero_length_check()) {
822 __ j(
zero, &exit_label_, Label::kNear);
825 __ add(
edi, Immediate(2));
831 __ jmp(&load_char_start_regexp);
833 __ mov(
eax, Immediate(SUCCESS));
837 __ bind(&exit_label_);
840 __ mov(
eax, Operand(
ebp, kSuccessfulCaptures));
843 __ bind(&return_eax);
845 __ lea(
esp, Operand(
ebp, kBackup_ebx));
855 if (backtrack_label_.is_linked()) {
856 __ bind(&backtrack_label_);
860 Label exit_with_exception;
863 if (check_preempt_label_.is_linked()) {
864 SafeCallTarget(&check_preempt_label_);
866 __ push(backtrack_stackpointer());
869 CallCheckStackGuardState(
ebx);
876 __ pop(backtrack_stackpointer());
878 __ mov(
esi, Operand(
ebp, kInputEnd));
883 if (stack_overflow_label_.is_linked()) {
884 SafeCallTarget(&stack_overflow_label_);
893 static const int num_arguments = 3;
894 __ PrepareCallCFunction(num_arguments,
ebx);
896 Immediate(ExternalReference::isolate_address(isolate())));
897 __ lea(
eax, Operand(
ebp, kStackHighEnd));
900 ExternalReference grow_stack =
901 ExternalReference::re_grow_stack(isolate());
902 __ CallCFunction(grow_stack, num_arguments);
906 __ j(
equal, &exit_with_exception);
908 __ mov(backtrack_stackpointer(),
eax);
915 if (exit_with_exception.is_linked()) {
917 __ bind(&exit_with_exception);
919 __ mov(
eax, EXCEPTION);
924 masm_->GetCode(&code_desc);
926 isolate()->factory()->NewCode(code_desc,
927 Code::ComputeFlags(Code::REGEXP),
928 masm_->CodeObject());
929 PROFILE(isolate(), RegExpCodeCreateEvent(*code, *source));
930 return Handle<HeapObject>::cast(code);
934 void RegExpMacroAssemblerIA32::GoTo(Label* to) {
939 void RegExpMacroAssemblerIA32::IfRegisterGE(
int reg,
942 __ cmp(register_location(reg), Immediate(comparand));
947 void RegExpMacroAssemblerIA32::IfRegisterLT(
int reg,
950 __ cmp(register_location(reg), Immediate(comparand));
951 BranchOrBacktrack(
less, if_lt);
955 void RegExpMacroAssemblerIA32::IfRegisterEqPos(
int reg,
957 __ cmp(
edi, register_location(reg));
958 BranchOrBacktrack(
equal, if_eq);
962 RegExpMacroAssembler::IrregexpImplementation
963 RegExpMacroAssemblerIA32::Implementation() {
964 return kIA32Implementation;
968 void RegExpMacroAssemblerIA32::LoadCurrentCharacter(
int cp_offset,
969 Label* on_end_of_input,
973 ASSERT(cp_offset < (1<<30));
975 CheckPosition(cp_offset + characters - 1, on_end_of_input);
977 LoadCurrentCharacterUnchecked(cp_offset, characters);
981 void RegExpMacroAssemblerIA32::PopCurrentPosition() {
986 void RegExpMacroAssemblerIA32::PopRegister(
int register_index) {
988 __ mov(register_location(register_index),
eax);
992 void RegExpMacroAssemblerIA32::PushBacktrack(Label* label) {
993 Push(Immediate::CodeRelativeOffset(label));
998 void RegExpMacroAssemblerIA32::PushCurrentPosition() {
1003 void RegExpMacroAssemblerIA32::PushRegister(
int register_index,
1004 StackCheckFlag check_stack_limit) {
1005 __ mov(
eax, register_location(register_index));
1007 if (check_stack_limit) CheckStackLimit();
1011 void RegExpMacroAssemblerIA32::ReadCurrentPositionFromRegister(
int reg) {
1012 __ mov(
edi, register_location(reg));
1016 void RegExpMacroAssemblerIA32::ReadStackPointerFromRegister(
int reg) {
1017 __ mov(backtrack_stackpointer(), register_location(reg));
1018 __ add(backtrack_stackpointer(), Operand(
ebp, kStackHighEnd));
1021 void RegExpMacroAssemblerIA32::SetCurrentPositionFromEnd(
int by) {
1022 Label after_position;
1023 __ cmp(
edi, -by * char_size());
1025 __ mov(
edi, -by * char_size());
1029 LoadCurrentCharacterUnchecked(-1, 1);
1030 __ bind(&after_position);
1034 void RegExpMacroAssemblerIA32::SetRegister(
int register_index,
int to) {
1035 ASSERT(register_index >= num_saved_registers_);
1036 __ mov(register_location(register_index), Immediate(to));
1040 bool RegExpMacroAssemblerIA32::Succeed() {
1041 __ jmp(&success_label_);
1046 void RegExpMacroAssemblerIA32::WriteCurrentPositionToRegister(
int reg,
1048 if (cp_offset == 0) {
1049 __ mov(register_location(reg),
edi);
1051 __ lea(
eax, Operand(
edi, cp_offset * char_size()));
1052 __ mov(register_location(reg),
eax);
1057 void RegExpMacroAssemblerIA32::ClearRegisters(
int reg_from,
int reg_to) {
1058 ASSERT(reg_from <= reg_to);
1059 __ mov(
eax, Operand(
ebp, kInputStartMinusOne));
1060 for (
int reg = reg_from; reg <= reg_to; reg++) {
1061 __ mov(register_location(reg),
eax);
1066 void RegExpMacroAssemblerIA32::WriteStackPointerToRegister(
int reg) {
1067 __ mov(
eax, backtrack_stackpointer());
1068 __ sub(
eax, Operand(
ebp, kStackHighEnd));
1069 __ mov(register_location(reg),
eax);
1075 void RegExpMacroAssemblerIA32::CallCheckStackGuardState(Register scratch) {
1076 static const int num_arguments = 3;
1077 __ PrepareCallCFunction(num_arguments, scratch);
1085 ExternalReference check_stack_guard =
1086 ExternalReference::re_check_stack_guard_state(isolate());
1087 __ CallCFunction(check_stack_guard, num_arguments);
1092 template <
typename T>
1093 static T& frame_entry(
Address re_frame,
int frame_offset) {
1094 return reinterpret_cast<T&
>(Memory::int32_at(re_frame + frame_offset));
1098 int RegExpMacroAssemblerIA32::CheckStackGuardState(
Address* return_address,
1101 Isolate* isolate = frame_entry<Isolate*>(re_frame, kIsolate);
1102 if (isolate->stack_guard()->IsStackOverflow()) {
1103 isolate->StackOverflow();
1112 if (frame_entry<int>(re_frame, kDirectCall) == 1) {
1117 HandleScope handles(isolate);
1118 Handle<Code> code_handle(re_code);
1120 Handle<String> subject(frame_entry<String*>(re_frame, kInputString));
1123 bool is_ascii = subject->IsOneByteRepresentationUnderneath();
1125 ASSERT(re_code->instruction_start() <= *return_address);
1126 ASSERT(*return_address <=
1127 re_code->instruction_start() + re_code->instruction_size());
1129 MaybeObject* result = Execution::HandleStackGuardInterrupt(isolate);
1131 if (*code_handle != re_code) {
1132 int delta = code_handle->address() - re_code->address();
1134 *return_address += delta;
1137 if (result->IsException()) {
1141 Handle<String> subject_tmp = subject;
1142 int slice_offset = 0;
1145 if (StringShape(*subject_tmp).IsCons()) {
1146 subject_tmp = Handle<String>(ConsString::cast(*subject_tmp)->first());
1147 }
else if (StringShape(*subject_tmp).IsSliced()) {
1148 SlicedString* slice = SlicedString::cast(*subject_tmp);
1149 subject_tmp = Handle<String>(slice->parent());
1150 slice_offset = slice->offset();
1154 if (subject_tmp->IsOneByteRepresentation() != is_ascii) {
1165 ASSERT(StringShape(*subject_tmp).IsSequential() ||
1166 StringShape(*subject_tmp).IsExternal());
1169 const byte* start_address = frame_entry<const byte*>(re_frame, kInputStart);
1173 int start_index = frame_entry<int>(re_frame, kStartIndex);
1174 const byte* new_address = StringCharacterPosition(*subject_tmp,
1175 start_index + slice_offset);
1177 if (start_address != new_address) {
1180 const byte* end_address = frame_entry<const byte* >(re_frame, kInputEnd);
1181 int byte_length =
static_cast<int>(end_address - start_address);
1182 frame_entry<const String*>(re_frame, kInputString) = *subject;
1183 frame_entry<const byte*>(re_frame, kInputStart) = new_address;
1184 frame_entry<const byte*>(re_frame, kInputEnd) = new_address + byte_length;
1185 }
else if (frame_entry<const String*>(re_frame, kInputString) != *subject) {
1189 frame_entry<const String*>(re_frame, kInputString) = *subject;
1196 Operand RegExpMacroAssemblerIA32::register_location(
int register_index) {
1197 ASSERT(register_index < (1<<30));
1198 if (num_registers_ <= register_index) {
1199 num_registers_ = register_index + 1;
1205 void RegExpMacroAssemblerIA32::CheckPosition(
int cp_offset,
1206 Label* on_outside_input) {
1207 __ cmp(
edi, -cp_offset * char_size());
1212 void RegExpMacroAssemblerIA32::BranchOrBacktrack(
Condition condition,
1214 if (condition < 0) {
1223 __ j(condition, &backtrack_label_);
1226 __ j(condition, to);
1230 void RegExpMacroAssemblerIA32::SafeCall(Label* to) {
1232 __ push(Immediate::CodeRelativeOffset(&return_to));
1234 __ bind(&return_to);
1238 void RegExpMacroAssemblerIA32::SafeReturn() {
1240 __ add(
ebx, Immediate(masm_->CodeObject()));
1245 void RegExpMacroAssemblerIA32::SafeCallTarget(Label*
name) {
1250 void RegExpMacroAssemblerIA32::Push(Register source) {
1251 ASSERT(!source.is(backtrack_stackpointer()));
1254 __ mov(Operand(backtrack_stackpointer(), 0), source);
1258 void RegExpMacroAssemblerIA32::Push(Immediate value) {
1261 __ mov(Operand(backtrack_stackpointer(), 0), value);
1265 void RegExpMacroAssemblerIA32::Pop(Register target) {
1266 ASSERT(!target.is(backtrack_stackpointer()));
1267 __ mov(target, Operand(backtrack_stackpointer(), 0));
1273 void RegExpMacroAssemblerIA32::CheckPreemption() {
1276 ExternalReference stack_limit =
1277 ExternalReference::address_of_stack_limit(isolate());
1278 __ cmp(
esp, Operand::StaticVariable(stack_limit));
1281 SafeCall(&check_preempt_label_);
1283 __ bind(&no_preempt);
1287 void RegExpMacroAssemblerIA32::CheckStackLimit() {
1288 Label no_stack_overflow;
1289 ExternalReference stack_limit =
1290 ExternalReference::address_of_regexp_stack_limit(isolate());
1291 __ cmp(backtrack_stackpointer(), Operand::StaticVariable(stack_limit));
1292 __ j(
above, &no_stack_overflow);
1294 SafeCall(&stack_overflow_label_);
1296 __ bind(&no_stack_overflow);
1300 void RegExpMacroAssemblerIA32::LoadCurrentCharacterUnchecked(
int cp_offset,
1302 if (mode_ == ASCII) {
1303 if (characters == 4) {
1305 }
else if (characters == 2) {
1306 __ movzx_w(current_character(), Operand(
esi,
edi,
times_1, cp_offset));
1309 __ movzx_b(current_character(), Operand(
esi,
edi,
times_1, cp_offset));
1313 if (characters == 2) {
1314 __ mov(current_character(),
1318 __ movzx_w(current_character(),
1327 #endif // V8_INTERPRETED_REGEXP
1331 #endif // V8_TARGET_ARCH_IA32
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter NULL
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter NULL
void Fail(const v8::FunctionCallbackInfo< v8::Value > &args)
#define PROFILE(IsolateGetter, Call)
#define ASSERT(condition)
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long expose natives in global object expose freeBuffer extension expose gc extension under the specified name expose externalize string extension number of stack frames to capture disable builtin natives files print name of functions for which code is generated use random jit cookie to mask large constants trace lazy optimization use adaptive optimizations always try to OSR functions trace optimize function deoptimization minimum length for automatic enable preparsing maximum number of optimization attempts before giving up cache prototype transitions trace debugging JSON request response trace out of bounds accesses to external arrays trace_js_array_abuse automatically set the debug break flag when debugger commands are in the queue abort by crashing maximum length of function source code printed in a stack trace max size of the new max size of the old max size of executable always perform global GCs print one trace line following each garbage collection do not print trace line after scavenger collection print statistics of the maximum memory committed for the heap in name
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long mode(MIPS only)") DEFINE_string(expose_natives_as
RegExpMacroAssemblerIA32(Mode mode, int registers_to_save, Zone *zone)
Operand FieldOperand(Register object, int offset)
#define T(name, string, precedence)
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra code(assertions) for debugging") DEFINE_bool(code_comments
#define ASSERT_EQ(v1, v2)
#define STATIC_ASSERT(test)