30 #if V8_TARGET_ARCH_ARM
44 #ifndef V8_INTERPRETED_REGEXP
119 #define __ ACCESS_MASM(masm_)
123 int registers_to_save,
125 : NativeRegExpMacroAssembler(zone),
126 masm_(new MacroAssembler(zone->isolate(),
NULL, kRegExpCodeSize)),
128 num_registers_(registers_to_save),
129 num_saved_registers_(registers_to_save),
136 __ jmp(&entry_label_);
137 __ bind(&start_label_);
141 RegExpMacroAssemblerARM::~RegExpMacroAssemblerARM() {
144 entry_label_.Unuse();
145 start_label_.Unuse();
146 success_label_.Unuse();
147 backtrack_label_.Unuse();
149 check_preempt_label_.Unuse();
150 stack_overflow_label_.Unuse();
154 int RegExpMacroAssemblerARM::stack_limit_slack() {
155 return RegExpStack::kStackLimitSlack;
159 void RegExpMacroAssemblerARM::AdvanceCurrentPosition(
int by) {
161 __ add(current_input_offset(),
162 current_input_offset(), Operand(by * char_size()));
167 void RegExpMacroAssemblerARM::AdvanceRegister(
int reg,
int by) {
169 ASSERT(reg < num_registers_);
171 __ ldr(
r0, register_location(reg));
172 __ add(
r0,
r0, Operand(by));
173 __ str(
r0, register_location(reg));
178 void RegExpMacroAssemblerARM::Backtrack() {
182 __ add(
pc,
r0, Operand(code_pointer()));
186 void RegExpMacroAssemblerARM::Bind(Label* label) {
191 void RegExpMacroAssemblerARM::CheckCharacter(uint32_t c, Label* on_equal) {
192 __ cmp(current_character(), Operand(c));
193 BranchOrBacktrack(
eq, on_equal);
197 void RegExpMacroAssemblerARM::CheckCharacterGT(
uc16 limit, Label* on_greater) {
198 __ cmp(current_character(), Operand(limit));
199 BranchOrBacktrack(
gt, on_greater);
203 void RegExpMacroAssemblerARM::CheckAtStart(Label* on_at_start) {
207 __ cmp(
r0, Operand::Zero());
208 BranchOrBacktrack(
ne, ¬_at_start);
212 __ add(
r0, end_of_input_address(), Operand(current_input_offset()));
214 BranchOrBacktrack(
eq, on_at_start);
215 __ bind(¬_at_start);
219 void RegExpMacroAssemblerARM::CheckNotAtStart(Label* on_not_at_start) {
222 __ cmp(
r0, Operand::Zero());
223 BranchOrBacktrack(
ne, on_not_at_start);
226 __ add(
r0, end_of_input_address(), Operand(current_input_offset()));
228 BranchOrBacktrack(
ne, on_not_at_start);
232 void RegExpMacroAssemblerARM::CheckCharacterLT(
uc16 limit, Label* on_less) {
233 __ cmp(current_character(), Operand(limit));
234 BranchOrBacktrack(
lt, on_less);
238 void RegExpMacroAssemblerARM::CheckGreedyLoop(Label* on_equal) {
240 __ cmp(current_input_offset(),
r0);
241 __ add(backtrack_stackpointer(),
243 BranchOrBacktrack(
eq, on_equal);
247 void RegExpMacroAssemblerARM::CheckNotBackReferenceIgnoreCase(
249 Label* on_no_match) {
251 __ ldr(
r0, register_location(start_reg));
252 __ ldr(
r1, register_location(start_reg + 1));
257 __ b(
eq, &fallthrough);
260 __ cmn(
r1, Operand(current_input_offset()));
261 BranchOrBacktrack(
gt, on_no_match);
263 if (mode_ == ASCII) {
270 __ add(
r0,
r0, Operand(end_of_input_address()));
271 __ add(
r2, end_of_input_address(), Operand(current_input_offset()));
283 __ b(
eq, &loop_check);
286 __ orr(
r3,
r3, Operand(0x20));
287 __ orr(
r4,
r4, Operand(0x20));
290 __ sub(
r3,
r3, Operand(
'a'));
291 __ cmp(
r3, Operand(
'z' -
'a'));
292 __ b(
ls, &loop_check);
294 __ sub(
r3,
r3, Operand(224 -
'a'));
295 __ cmp(
r3, Operand(254 - 224));
297 __ cmp(
r3, Operand(247 - 224));
300 __ bind(&loop_check);
306 BranchOrBacktrack(
al, on_no_match);
310 __ sub(current_input_offset(),
r2, end_of_input_address());
313 int argument_count = 4;
314 __ PrepareCallCFunction(argument_count,
r2);
327 __ add(
r0,
r0, Operand(end_of_input_address()));
333 __ add(
r1, current_input_offset(), Operand(end_of_input_address()));
335 __ mov(
r3, Operand(ExternalReference::isolate_address(isolate())));
338 AllowExternalCallThatCantCauseGC scope(masm_);
339 ExternalReference
function =
340 ExternalReference::re_case_insensitive_compare_uc16(isolate());
341 __ CallCFunction(
function, argument_count);
345 __ cmp(
r0, Operand::Zero());
346 BranchOrBacktrack(
eq, on_no_match);
348 __ add(current_input_offset(), current_input_offset(), Operand(
r4));
351 __ bind(&fallthrough);
355 void RegExpMacroAssemblerARM::CheckNotBackReference(
357 Label* on_no_match) {
362 __ ldr(
r0, register_location(start_reg));
363 __ ldr(
r1, register_location(start_reg + 1));
366 __ b(
eq, &fallthrough);
369 __ cmn(
r1, Operand(current_input_offset()));
370 BranchOrBacktrack(
gt, on_no_match);
373 __ add(
r0,
r0, Operand(end_of_input_address()));
374 __ add(
r2, end_of_input_address(), Operand(current_input_offset()));
379 if (mode_ == ASCII) {
388 BranchOrBacktrack(
ne, on_no_match);
393 __ sub(current_input_offset(),
r2, end_of_input_address());
394 __ bind(&fallthrough);
398 void RegExpMacroAssemblerARM::CheckNotCharacter(
unsigned c,
399 Label* on_not_equal) {
400 __ cmp(current_character(), Operand(c));
401 BranchOrBacktrack(
ne, on_not_equal);
405 void RegExpMacroAssemblerARM::CheckCharacterAfterAnd(uint32_t c,
409 __ tst(current_character(), Operand(mask));
411 __ and_(
r0, current_character(), Operand(mask));
412 __ cmp(
r0, Operand(c));
414 BranchOrBacktrack(
eq, on_equal);
418 void RegExpMacroAssemblerARM::CheckNotCharacterAfterAnd(
unsigned c,
420 Label* on_not_equal) {
422 __ tst(current_character(), Operand(mask));
424 __ and_(
r0, current_character(), Operand(mask));
425 __ cmp(
r0, Operand(c));
427 BranchOrBacktrack(
ne, on_not_equal);
431 void RegExpMacroAssemblerARM::CheckNotCharacterAfterMinusAnd(
435 Label* on_not_equal) {
436 ASSERT(minus < String::kMaxUtf16CodeUnit);
437 __ sub(
r0, current_character(), Operand(minus));
438 __ and_(
r0,
r0, Operand(mask));
439 __ cmp(
r0, Operand(c));
440 BranchOrBacktrack(
ne, on_not_equal);
444 void RegExpMacroAssemblerARM::CheckCharacterInRange(
447 Label* on_in_range) {
448 __ sub(
r0, current_character(), Operand(from));
449 __ cmp(
r0, Operand(to - from));
450 BranchOrBacktrack(
ls, on_in_range);
454 void RegExpMacroAssemblerARM::CheckCharacterNotInRange(
457 Label* on_not_in_range) {
458 __ sub(
r0, current_character(), Operand(from));
459 __ cmp(
r0, Operand(to - from));
460 BranchOrBacktrack(
hi, on_not_in_range);
464 void RegExpMacroAssemblerARM::CheckBitInTable(
465 Handle<ByteArray> table,
467 __ mov(
r0, Operand(table));
468 if (mode_ != ASCII || kTableMask != String::kMaxOneByteCharCode) {
469 __ and_(
r1, current_character(), Operand(kTableSize - 1));
477 __ cmp(
r0, Operand::Zero());
478 BranchOrBacktrack(
ne, on_bit_set);
482 bool RegExpMacroAssemblerARM::CheckSpecialCharacterClass(
uc16 type,
483 Label* on_no_match) {
489 if (mode_ == ASCII) {
492 __ cmp(current_character(), Operand(
' '));
495 __ sub(
r0, current_character(), Operand(
'\t'));
496 __ cmp(
r0, Operand(
'\r' -
'\t'));
499 __ cmp(
r0, Operand(0x00a0 -
'\t'));
500 BranchOrBacktrack(
ne, on_no_match);
510 __ sub(
r0, current_character(), Operand(
'0'));
511 __ cmp(
r0, Operand(
'9' -
'0'));
512 BranchOrBacktrack(
hi, on_no_match);
516 __ sub(
r0, current_character(), Operand(
'0'));
517 __ cmp(
r0, Operand(
'9' -
'0'));
518 BranchOrBacktrack(
ls, on_no_match);
522 __ eor(
r0, current_character(), Operand(0x01));
524 __ sub(
r0,
r0, Operand(0x0b));
525 __ cmp(
r0, Operand(0x0c - 0x0b));
526 BranchOrBacktrack(
ls, on_no_match);
531 __ sub(
r0,
r0, Operand(0x2028 - 0x0b));
532 __ cmp(
r0, Operand(1));
533 BranchOrBacktrack(
ls, on_no_match);
539 __ eor(
r0, current_character(), Operand(0x01));
541 __ sub(
r0,
r0, Operand(0x0b));
542 __ cmp(
r0, Operand(0x0c - 0x0b));
543 if (mode_ == ASCII) {
544 BranchOrBacktrack(
hi, on_no_match);
551 __ sub(
r0,
r0, Operand(0x2028 - 0x0b));
552 __ cmp(
r0, Operand(1));
553 BranchOrBacktrack(
hi, on_no_match);
559 if (mode_ != ASCII) {
561 __ cmp(current_character(), Operand(
'z'));
562 BranchOrBacktrack(
hi, on_no_match);
564 ExternalReference
map = ExternalReference::re_word_character_map();
565 __ mov(
r0, Operand(map));
567 __ cmp(
r0, Operand::Zero());
568 BranchOrBacktrack(
eq, on_no_match);
573 if (mode_ != ASCII) {
575 __ cmp(current_character(), Operand(
'z'));
578 ExternalReference map = ExternalReference::re_word_character_map();
579 __ mov(
r0, Operand(map));
581 __ cmp(
r0, Operand::Zero());
582 BranchOrBacktrack(
ne, on_no_match);
583 if (mode_ != ASCII) {
599 __ mov(
r0, Operand(FAILURE));
600 __ jmp(&exit_label_);
604 Handle<HeapObject> RegExpMacroAssemblerARM::GetCode(Handle<String> source) {
610 __ bind(&entry_label_);
614 FrameScope scope(masm_, StackFrame::MANUAL);
625 __ stm(
db_w,
sp, argument_registers | registers_to_retain |
lr.
bit());
629 __ mov(
r0, Operand::Zero());
633 Label stack_limit_hit;
636 ExternalReference stack_limit =
637 ExternalReference::address_of_stack_limit(isolate());
638 __ mov(
r0, Operand(stack_limit));
642 __ b(
ls, &stack_limit_hit);
649 __ mov(
r0, Operand(EXCEPTION));
652 __ bind(&stack_limit_hit);
653 CallCheckStackGuardState(
r0);
654 __ cmp(
r0, Operand::Zero());
656 __ b(
ne, &return_r0);
663 __ ldr(end_of_input_address(),
MemOperand(frame_pointer(), kInputEnd));
667 __ sub(current_input_offset(),
r0, end_of_input_address());
671 __ sub(
r0, current_input_offset(), Operand(char_size()));
672 __ sub(
r0,
r0, Operand(
r1,
LSL, (mode_ == UC16) ? 1 : 0));
678 __ mov(code_pointer(), Operand(masm_->CodeObject()));
680 Label load_char_start_regexp, start_regexp;
682 __ cmp(
r1, Operand::Zero());
683 __ b(
ne, &load_char_start_regexp);
684 __ mov(current_character(), Operand(
'\n'),
LeaveCC,
eq);
685 __ jmp(&start_regexp);
688 __ bind(&load_char_start_regexp);
690 LoadCurrentCharacterUnchecked(-1, 1);
691 __ bind(&start_regexp);
694 if (num_saved_registers_ > 0) {
696 if (num_saved_registers_ > 8) {
698 __ add(
r1, frame_pointer(), Operand(kRegisterZero));
699 __ mov(
r2, Operand(num_saved_registers_));
704 __ b(
ne, &init_loop);
706 for (
int i = 0; i < num_saved_registers_; i++) {
707 __ str(
r0, register_location(i));
713 __ ldr(backtrack_stackpointer(),
MemOperand(frame_pointer(), kStackHighEnd));
715 __ jmp(&start_label_);
718 if (success_label_.is_linked()) {
720 __ bind(&success_label_);
721 if (num_saved_registers_ > 0) {
726 __ sub(
r1, end_of_input_address(),
r1);
739 for (
int i = 0; i < num_saved_registers_; i += 2) {
740 __ ldr(
r2, register_location(i));
741 __ ldr(
r3, register_location(i + 1));
742 if (i == 0 && global_with_zero_length_check()) {
764 __ add(
r0,
r0, Operand(1));
768 __ sub(
r1,
r1, Operand(num_saved_registers_));
770 __ cmp(
r1, Operand(num_saved_registers_));
771 __ b(
lt, &return_r0);
781 if (global_with_zero_length_check()) {
784 __ cmp(current_input_offset(),
r4);
786 __ b(
ne, &load_char_start_regexp);
788 __ cmp(current_input_offset(), Operand::Zero());
789 __ b(
eq, &exit_label_);
791 __ add(current_input_offset(),
792 current_input_offset(),
793 Operand((mode_ == UC16) ? 2 : 1));
796 __ b(&load_char_start_regexp);
798 __ mov(
r0, Operand(SUCCESS));
803 __ bind(&exit_label_);
810 __ mov(
sp, frame_pointer());
815 if (backtrack_label_.is_linked()) {
816 __ bind(&backtrack_label_);
820 Label exit_with_exception;
823 if (check_preempt_label_.is_linked()) {
824 SafeCallTarget(&check_preempt_label_);
826 CallCheckStackGuardState(
r0);
827 __ cmp(
r0, Operand::Zero());
830 __ b(
ne, &return_r0);
833 __ ldr(end_of_input_address(),
MemOperand(frame_pointer(), kInputEnd));
838 if (stack_overflow_label_.is_linked()) {
839 SafeCallTarget(&stack_overflow_label_);
844 static const int num_arguments = 3;
845 __ PrepareCallCFunction(num_arguments,
r0);
846 __ mov(
r0, backtrack_stackpointer());
847 __ add(
r1, frame_pointer(), Operand(kStackHighEnd));
848 __ mov(
r2, Operand(ExternalReference::isolate_address(isolate())));
849 ExternalReference grow_stack =
850 ExternalReference::re_grow_stack(isolate());
851 __ CallCFunction(grow_stack, num_arguments);
854 __ cmp(
r0, Operand::Zero());
855 __ b(
eq, &exit_with_exception);
857 __ mov(backtrack_stackpointer(),
r0);
862 if (exit_with_exception.is_linked()) {
864 __ bind(&exit_with_exception);
866 __ mov(
r0, Operand(EXCEPTION));
871 masm_->GetCode(&code_desc);
872 Handle<Code>
code = isolate()->factory()->NewCode(
873 code_desc, Code::ComputeFlags(Code::REGEXP), masm_->CodeObject());
874 PROFILE(masm_->isolate(), RegExpCodeCreateEvent(*code, *source));
875 return Handle<HeapObject>::cast(code);
879 void RegExpMacroAssemblerARM::GoTo(Label* to) {
880 BranchOrBacktrack(
al, to);
884 void RegExpMacroAssemblerARM::IfRegisterGE(
int reg,
887 __ ldr(
r0, register_location(reg));
888 __ cmp(
r0, Operand(comparand));
889 BranchOrBacktrack(
ge, if_ge);
893 void RegExpMacroAssemblerARM::IfRegisterLT(
int reg,
896 __ ldr(
r0, register_location(reg));
897 __ cmp(
r0, Operand(comparand));
898 BranchOrBacktrack(
lt, if_lt);
902 void RegExpMacroAssemblerARM::IfRegisterEqPos(
int reg,
904 __ ldr(
r0, register_location(reg));
905 __ cmp(
r0, Operand(current_input_offset()));
906 BranchOrBacktrack(
eq, if_eq);
910 RegExpMacroAssembler::IrregexpImplementation
911 RegExpMacroAssemblerARM::Implementation() {
912 return kARMImplementation;
916 void RegExpMacroAssemblerARM::LoadCurrentCharacter(
int cp_offset,
917 Label* on_end_of_input,
921 ASSERT(cp_offset < (1<<30));
923 CheckPosition(cp_offset + characters - 1, on_end_of_input);
925 LoadCurrentCharacterUnchecked(cp_offset, characters);
929 void RegExpMacroAssemblerARM::PopCurrentPosition() {
930 Pop(current_input_offset());
934 void RegExpMacroAssemblerARM::PopRegister(
int register_index) {
936 __ str(
r0, register_location(register_index));
940 void RegExpMacroAssemblerARM::PushBacktrack(Label* label) {
941 __ mov_label_offset(
r0, label);
947 void RegExpMacroAssemblerARM::PushCurrentPosition() {
948 Push(current_input_offset());
952 void RegExpMacroAssemblerARM::PushRegister(
int register_index,
953 StackCheckFlag check_stack_limit) {
954 __ ldr(
r0, register_location(register_index));
956 if (check_stack_limit) CheckStackLimit();
960 void RegExpMacroAssemblerARM::ReadCurrentPositionFromRegister(
int reg) {
961 __ ldr(current_input_offset(), register_location(reg));
965 void RegExpMacroAssemblerARM::ReadStackPointerFromRegister(
int reg) {
966 __ ldr(backtrack_stackpointer(), register_location(reg));
968 __ add(backtrack_stackpointer(), backtrack_stackpointer(), Operand(
r0));
972 void RegExpMacroAssemblerARM::SetCurrentPositionFromEnd(
int by) {
973 Label after_position;
974 __ cmp(current_input_offset(), Operand(-by * char_size()));
975 __ b(
ge, &after_position);
976 __ mov(current_input_offset(), Operand(-by * char_size()));
980 LoadCurrentCharacterUnchecked(-1, 1);
981 __ bind(&after_position);
985 void RegExpMacroAssemblerARM::SetRegister(
int register_index,
int to) {
986 ASSERT(register_index >= num_saved_registers_);
987 __ mov(
r0, Operand(to));
988 __ str(
r0, register_location(register_index));
992 bool RegExpMacroAssemblerARM::Succeed() {
993 __ jmp(&success_label_);
998 void RegExpMacroAssemblerARM::WriteCurrentPositionToRegister(
int reg,
1000 if (cp_offset == 0) {
1001 __ str(current_input_offset(), register_location(reg));
1003 __ add(
r0, current_input_offset(), Operand(cp_offset * char_size()));
1004 __ str(
r0, register_location(reg));
1009 void RegExpMacroAssemblerARM::ClearRegisters(
int reg_from,
int reg_to) {
1010 ASSERT(reg_from <= reg_to);
1012 for (
int reg = reg_from; reg <= reg_to; reg++) {
1013 __ str(
r0, register_location(reg));
1018 void RegExpMacroAssemblerARM::WriteStackPointerToRegister(
int reg) {
1020 __ sub(
r0, backtrack_stackpointer(),
r1);
1021 __ str(
r0, register_location(reg));
1027 void RegExpMacroAssemblerARM::CallCheckStackGuardState(Register scratch) {
1028 __ PrepareCallCFunction(3, scratch);
1031 __ mov(
r2, frame_pointer());
1033 __ mov(
r1, Operand(masm_->CodeObject()));
1036 int stack_alignment = OS::ActivationFrameAlignment();
1038 __ sub(
sp,
sp, Operand(stack_alignment));
1043 ExternalReference stack_guard_check =
1044 ExternalReference::re_check_stack_guard_state(isolate());
1045 __ mov(
ip, Operand(stack_guard_check));
1046 DirectCEntryStub stub;
1047 stub.GenerateCall(masm_,
ip);
1050 __ add(
sp,
sp, Operand(stack_alignment));
1052 ASSERT(stack_alignment != 0);
1055 __ mov(code_pointer(), Operand(masm_->CodeObject()));
1060 template <
typename T>
1061 static T& frame_entry(
Address re_frame,
int frame_offset) {
1062 return reinterpret_cast<T&
>(Memory::int32_at(re_frame + frame_offset));
1066 int RegExpMacroAssemblerARM::CheckStackGuardState(
Address* return_address,
1069 Isolate* isolate = frame_entry<Isolate*>(re_frame, kIsolate);
1070 if (isolate->stack_guard()->IsStackOverflow()) {
1071 isolate->StackOverflow();
1080 if (frame_entry<int>(re_frame, kDirectCall) == 1) {
1085 HandleScope handles(isolate);
1086 Handle<Code> code_handle(re_code);
1088 Handle<String> subject(frame_entry<String*>(re_frame, kInputString));
1091 bool is_ascii = subject->IsOneByteRepresentationUnderneath();
1093 ASSERT(re_code->instruction_start() <= *return_address);
1094 ASSERT(*return_address <=
1095 re_code->instruction_start() + re_code->instruction_size());
1097 MaybeObject* result = Execution::HandleStackGuardInterrupt(isolate);
1099 if (*code_handle != re_code) {
1100 int delta = code_handle->address() - re_code->address();
1102 *return_address += delta;
1105 if (result->IsException()) {
1109 Handle<String> subject_tmp = subject;
1110 int slice_offset = 0;
1113 if (StringShape(*subject_tmp).IsCons()) {
1114 subject_tmp = Handle<String>(ConsString::cast(*subject_tmp)->first());
1115 }
else if (StringShape(*subject_tmp).IsSliced()) {
1116 SlicedString* slice = SlicedString::cast(*subject_tmp);
1117 subject_tmp = Handle<String>(slice->parent());
1118 slice_offset = slice->offset();
1122 if (subject_tmp->IsOneByteRepresentation() != is_ascii) {
1133 ASSERT(StringShape(*subject_tmp).IsSequential() ||
1134 StringShape(*subject_tmp).IsExternal());
1137 const byte* start_address = frame_entry<const byte*>(re_frame, kInputStart);
1141 int start_index = frame_entry<int>(re_frame, kStartIndex);
1142 const byte* new_address = StringCharacterPosition(*subject_tmp,
1143 start_index + slice_offset);
1145 if (start_address != new_address) {
1148 const byte* end_address = frame_entry<const byte* >(re_frame, kInputEnd);
1149 int byte_length =
static_cast<int>(end_address - start_address);
1150 frame_entry<const String*>(re_frame, kInputString) = *subject;
1151 frame_entry<const byte*>(re_frame, kInputStart) = new_address;
1152 frame_entry<const byte*>(re_frame, kInputEnd) = new_address + byte_length;
1153 }
else if (frame_entry<const String*>(re_frame, kInputString) != *subject) {
1157 frame_entry<const String*>(re_frame, kInputString) = *subject;
1164 MemOperand RegExpMacroAssemblerARM::register_location(
int register_index) {
1165 ASSERT(register_index < (1<<30));
1166 if (num_registers_ <= register_index) {
1167 num_registers_ = register_index + 1;
1174 void RegExpMacroAssemblerARM::CheckPosition(
int cp_offset,
1175 Label* on_outside_input) {
1176 __ cmp(current_input_offset(), Operand(-cp_offset * char_size()));
1177 BranchOrBacktrack(
ge, on_outside_input);
1181 void RegExpMacroAssemblerARM::BranchOrBacktrack(
Condition condition,
1183 if (condition ==
al) {
1192 __ b(condition, &backtrack_label_);
1195 __ b(condition, to);
1199 void RegExpMacroAssemblerARM::SafeCall(Label* to,
Condition cond) {
1204 void RegExpMacroAssemblerARM::SafeReturn() {
1206 __ add(
pc,
lr, Operand(masm_->CodeObject()));
1210 void RegExpMacroAssemblerARM::SafeCallTarget(Label*
name) {
1212 __ sub(
lr,
lr, Operand(masm_->CodeObject()));
1217 void RegExpMacroAssemblerARM::Push(Register source) {
1218 ASSERT(!source.is(backtrack_stackpointer()));
1224 void RegExpMacroAssemblerARM::Pop(Register target) {
1225 ASSERT(!target.is(backtrack_stackpointer()));
1231 void RegExpMacroAssemblerARM::CheckPreemption() {
1233 ExternalReference stack_limit =
1234 ExternalReference::address_of_stack_limit(isolate());
1235 __ mov(
r0, Operand(stack_limit));
1238 SafeCall(&check_preempt_label_,
ls);
1242 void RegExpMacroAssemblerARM::CheckStackLimit() {
1243 ExternalReference stack_limit =
1244 ExternalReference::address_of_regexp_stack_limit(isolate());
1245 __ mov(
r0, Operand(stack_limit));
1247 __ cmp(backtrack_stackpointer(), Operand(
r0));
1248 SafeCall(&stack_overflow_label_,
ls);
1252 bool RegExpMacroAssemblerARM::CanReadUnaligned() {
1257 void RegExpMacroAssemblerARM::LoadCurrentCharacterUnchecked(
int cp_offset,
1259 Register offset = current_input_offset();
1260 if (cp_offset != 0) {
1262 __ add(
r4, current_input_offset(), Operand(cp_offset * char_size()));
1269 if (!CanReadUnaligned()) {
1273 if (mode_ == ASCII) {
1274 if (characters == 4) {
1275 __ ldr(current_character(),
MemOperand(end_of_input_address(), offset));
1276 }
else if (characters == 2) {
1277 __ ldrh(current_character(),
MemOperand(end_of_input_address(), offset));
1280 __ ldrb(current_character(),
MemOperand(end_of_input_address(), offset));
1284 if (characters == 2) {
1285 __ ldr(current_character(),
MemOperand(end_of_input_address(), offset));
1288 __ ldrh(current_character(),
MemOperand(end_of_input_address(), offset));
1296 #endif // V8_INTERPRETED_REGEXP
1300 #endif // V8_TARGET_ARCH_ARM
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter NULL
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter NULL
void Fail(const v8::FunctionCallbackInfo< v8::Value > &args)
#define PROFILE(IsolateGetter, Call)
#define ASSERT(condition)
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long expose natives in global object expose freeBuffer extension expose gc extension under the specified name expose externalize string extension number of stack frames to capture disable builtin natives files print name of functions for which code is generated use random jit cookie to mask large constants trace lazy optimization use adaptive optimizations always try to OSR functions trace optimize function deoptimization minimum length for automatic enable preparsing maximum number of optimization attempts before giving up cache prototype transitions trace debugging JSON request response trace out of bounds accesses to external arrays trace_js_array_abuse automatically set the debug break flag when debugger commands are in the queue abort by crashing maximum length of function source code printed in a stack trace max size of the new max size of the old max size of executable always perform global GCs print one trace line following each garbage collection do not print trace line after scavenger collection print statistics of the maximum memory committed for the heap in name
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long mode(MIPS only)") DEFINE_string(expose_natives_as
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf map
bool IsAligned(T value, U alignment)
#define T(name, string, precedence)
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra code(assertions) for debugging") DEFINE_bool(code_comments
#define ASSERT_EQ(v1, v2)
RegExpMacroAssemblerARM(Mode mode, int registers_to_save, Zone *zone)