30 #if V8_TARGET_ARCH_MIPS
43 #ifndef V8_INTERPRETED_REGEXP
118 #define __ ACCESS_MASM(masm_)
122 int registers_to_save,
124 : NativeRegExpMacroAssembler(zone),
125 masm_(new MacroAssembler(zone->isolate(),
NULL, kRegExpCodeSize)),
127 num_registers_(registers_to_save),
128 num_saved_registers_(registers_to_save),
134 internal_failure_label_() {
136 __ jmp(&entry_label_);
139 __ bind(&internal_failure_label_);
140 __ li(v0, Operand(FAILURE));
142 __ bind(&start_label_);
146 RegExpMacroAssemblerMIPS::~RegExpMacroAssemblerMIPS() {
149 entry_label_.Unuse();
150 start_label_.Unuse();
151 success_label_.Unuse();
152 backtrack_label_.Unuse();
154 check_preempt_label_.Unuse();
155 stack_overflow_label_.Unuse();
156 internal_failure_label_.Unuse();
160 int RegExpMacroAssemblerMIPS::stack_limit_slack() {
161 return RegExpStack::kStackLimitSlack;
165 void RegExpMacroAssemblerMIPS::AdvanceCurrentPosition(
int by) {
167 __ Addu(current_input_offset(),
168 current_input_offset(), Operand(by * char_size()));
173 void RegExpMacroAssemblerMIPS::AdvanceRegister(
int reg,
int by) {
175 ASSERT(reg < num_registers_);
177 __ lw(a0, register_location(reg));
178 __ Addu(a0, a0, Operand(by));
179 __ sw(a0, register_location(reg));
184 void RegExpMacroAssemblerMIPS::Backtrack() {
188 __ Addu(a0, a0, code_pointer());
193 void RegExpMacroAssemblerMIPS::Bind(Label* label) {
198 void RegExpMacroAssemblerMIPS::CheckCharacter(uint32_t c, Label* on_equal) {
199 BranchOrBacktrack(on_equal,
eq, current_character(), Operand(c));
203 void RegExpMacroAssemblerMIPS::CheckCharacterGT(
uc16 limit, Label* on_greater) {
204 BranchOrBacktrack(on_greater,
gt, current_character(), Operand(limit));
208 void RegExpMacroAssemblerMIPS::CheckAtStart(Label* on_at_start) {
212 BranchOrBacktrack(¬_at_start,
ne, a0, Operand(zero_reg));
216 __ Addu(a0, end_of_input_address(), Operand(current_input_offset()));
217 BranchOrBacktrack(on_at_start,
eq, a0, Operand(a1));
218 __ bind(¬_at_start);
222 void RegExpMacroAssemblerMIPS::CheckNotAtStart(Label* on_not_at_start) {
225 BranchOrBacktrack(on_not_at_start,
ne, a0, Operand(zero_reg));
228 __ Addu(a0, end_of_input_address(), Operand(current_input_offset()));
229 BranchOrBacktrack(on_not_at_start,
ne, a0, Operand(a1));
233 void RegExpMacroAssemblerMIPS::CheckCharacterLT(
uc16 limit, Label* on_less) {
234 BranchOrBacktrack(on_less,
lt, current_character(), Operand(limit));
238 void RegExpMacroAssemblerMIPS::CheckGreedyLoop(Label* on_equal) {
239 Label backtrack_non_equal;
241 __ Branch(&backtrack_non_equal,
ne, current_input_offset(), Operand(a0));
242 __ Addu(backtrack_stackpointer(),
243 backtrack_stackpointer(),
245 __ bind(&backtrack_non_equal);
246 BranchOrBacktrack(on_equal,
eq, current_input_offset(), Operand(a0));
250 void RegExpMacroAssemblerMIPS::CheckNotBackReferenceIgnoreCase(
252 Label* on_no_match) {
254 __ lw(a0, register_location(start_reg));
255 __ lw(a1, register_location(start_reg + 1));
260 __ Branch(&fallthrough,
eq, a1, Operand(zero_reg));
262 __ Addu(t5, a1, current_input_offset());
264 BranchOrBacktrack(on_no_match,
gt, t5, Operand(zero_reg));
266 if (mode_ == ASCII) {
273 __ Addu(a0, a0, Operand(end_of_input_address()));
274 __ Addu(a2, end_of_input_address(), Operand(current_input_offset()));
275 __ Addu(a1, a0, Operand(a1));
284 __ addiu(a0, a0, char_size());
286 __ addiu(a2, a2, char_size());
288 __ Branch(&loop_check,
eq, t0, Operand(a3));
291 __ Or(a3, a3, Operand(0x20));
292 __ Or(t0, t0, Operand(0x20));
293 __ Branch(&fail,
ne, t0, Operand(a3));
294 __ Subu(a3, a3, Operand(
'a'));
295 __ Branch(&loop_check,
ls, a3, Operand(
'z' -
'a'));
297 __ Subu(a3, a3, Operand(224 -
'a'));
299 __ Branch(&fail,
hi, a3, Operand(254 - 224));
301 __ Branch(&fail,
eq, a3, Operand(247 - 224));
303 __ bind(&loop_check);
304 __ Branch(&loop,
lt, a0, Operand(a1));
312 __ Subu(current_input_offset(), a2, end_of_input_address());
316 RegList regexp_registers_to_retain = current_input_offset().bit() |
317 current_character().bit() | backtrack_stackpointer().bit();
318 __ MultiPush(regexp_registers_to_retain);
320 int argument_count = 4;
321 __ PrepareCallCFunction(argument_count, a2);
334 __ Addu(a0, a0, Operand(end_of_input_address()));
340 __ Addu(a1, current_input_offset(), Operand(end_of_input_address()));
342 __ li(a3, Operand(ExternalReference::isolate_address(masm_->isolate())));
345 AllowExternalCallThatCantCauseGC scope(masm_);
346 ExternalReference
function =
347 ExternalReference::re_case_insensitive_compare_uc16(masm_->isolate());
348 __ CallCFunction(
function, argument_count);
352 __ MultiPop(regexp_registers_to_retain);
354 __ lw(end_of_input_address(),
MemOperand(frame_pointer(), kInputEnd));
357 BranchOrBacktrack(on_no_match,
eq, v0, Operand(zero_reg));
359 __ Addu(current_input_offset(), current_input_offset(), Operand(
s3));
362 __ bind(&fallthrough);
366 void RegExpMacroAssemblerMIPS::CheckNotBackReference(
368 Label* on_no_match) {
373 __ lw(a0, register_location(start_reg));
374 __ lw(a1, register_location(start_reg + 1));
377 __ Branch(&fallthrough,
eq, a1, Operand(zero_reg));
379 __ Addu(t5, a1, current_input_offset());
381 BranchOrBacktrack(on_no_match,
gt, t5, Operand(zero_reg));
384 __ Addu(a0, a0, Operand(end_of_input_address()));
385 __ Addu(a2, end_of_input_address(), Operand(current_input_offset()));
386 __ Addu(a1, a1, Operand(a0));
390 if (mode_ == ASCII) {
392 __ addiu(a0, a0, char_size());
394 __ addiu(a2, a2, char_size());
398 __ addiu(a0, a0, char_size());
400 __ addiu(a2, a2, char_size());
402 BranchOrBacktrack(on_no_match,
ne, a3, Operand(t0));
403 __ Branch(&loop,
lt, a0, Operand(a1));
406 __ Subu(current_input_offset(), a2, end_of_input_address());
407 __ bind(&fallthrough);
411 void RegExpMacroAssemblerMIPS::CheckNotCharacter(uint32_t c,
412 Label* on_not_equal) {
413 BranchOrBacktrack(on_not_equal,
ne, current_character(), Operand(c));
417 void RegExpMacroAssemblerMIPS::CheckCharacterAfterAnd(uint32_t c,
420 __ And(a0, current_character(), Operand(mask));
421 Operand rhs = (c == 0) ? Operand(zero_reg) : Operand(c);
422 BranchOrBacktrack(on_equal,
eq, a0, rhs);
426 void RegExpMacroAssemblerMIPS::CheckNotCharacterAfterAnd(uint32_t c,
428 Label* on_not_equal) {
429 __ And(a0, current_character(), Operand(mask));
430 Operand rhs = (c == 0) ? Operand(zero_reg) : Operand(c);
431 BranchOrBacktrack(on_not_equal,
ne, a0, rhs);
435 void RegExpMacroAssemblerMIPS::CheckNotCharacterAfterMinusAnd(
439 Label* on_not_equal) {
440 ASSERT(minus < String::kMaxUtf16CodeUnit);
441 __ Subu(a0, current_character(), Operand(minus));
442 __ And(a0, a0, Operand(mask));
443 BranchOrBacktrack(on_not_equal,
ne, a0, Operand(c));
447 void RegExpMacroAssemblerMIPS::CheckCharacterInRange(
450 Label* on_in_range) {
451 __ Subu(a0, current_character(), Operand(from));
453 BranchOrBacktrack(on_in_range,
ls, a0, Operand(to - from));
457 void RegExpMacroAssemblerMIPS::CheckCharacterNotInRange(
460 Label* on_not_in_range) {
461 __ Subu(a0, current_character(), Operand(from));
463 BranchOrBacktrack(on_not_in_range,
hi, a0, Operand(to - from));
467 void RegExpMacroAssemblerMIPS::CheckBitInTable(
468 Handle<ByteArray> table,
470 __ li(a0, Operand(table));
471 if (mode_ != ASCII || kTableMask != String::kMaxOneByteCharCode) {
472 __ And(a1, current_character(), Operand(kTableSize - 1));
475 __ Addu(a0, a0, current_character());
479 BranchOrBacktrack(on_bit_set,
ne, a0, Operand(zero_reg));
483 bool RegExpMacroAssemblerMIPS::CheckSpecialCharacterClass(
uc16 type,
484 Label* on_no_match) {
490 if (mode_ == ASCII) {
493 __ Branch(&success,
eq, current_character(), Operand(
' '));
495 __ Subu(a0, current_character(), Operand(
'\t'));
496 __ Branch(&success,
ls, a0, Operand(
'\r' -
'\t'));
498 BranchOrBacktrack(on_no_match,
ne, a0, Operand(0x00a0 -
'\t'));
508 __ Subu(a0, current_character(), Operand(
'0'));
509 BranchOrBacktrack(on_no_match,
hi, a0, Operand(
'9' -
'0'));
513 __ Subu(a0, current_character(), Operand(
'0'));
514 BranchOrBacktrack(on_no_match,
ls, a0, Operand(
'9' -
'0'));
518 __ Xor(a0, current_character(), Operand(0x01));
520 __ Subu(a0, a0, Operand(0x0b));
521 BranchOrBacktrack(on_no_match,
ls, a0, Operand(0x0c - 0x0b));
526 __ Subu(a0, a0, Operand(0x2028 - 0x0b));
527 BranchOrBacktrack(on_no_match,
ls, a0, Operand(1));
533 __ Xor(a0, current_character(), Operand(0x01));
535 __ Subu(a0, a0, Operand(0x0b));
536 if (mode_ == ASCII) {
537 BranchOrBacktrack(on_no_match,
hi, a0, Operand(0x0c - 0x0b));
540 BranchOrBacktrack(&done,
ls, a0, Operand(0x0c - 0x0b));
544 __ Subu(a0, a0, Operand(0x2028 - 0x0b));
545 BranchOrBacktrack(on_no_match,
hi, a0, Operand(1));
551 if (mode_ != ASCII) {
553 BranchOrBacktrack(on_no_match,
hi, current_character(), Operand(
'z'));
555 ExternalReference
map = ExternalReference::re_word_character_map();
556 __ li(a0, Operand(map));
557 __ Addu(a0, a0, current_character());
559 BranchOrBacktrack(on_no_match,
eq, a0, Operand(zero_reg));
564 if (mode_ != ASCII) {
566 __ Branch(&done,
hi, current_character(), Operand(
'z'));
568 ExternalReference map = ExternalReference::re_word_character_map();
569 __ li(a0, Operand(map));
570 __ Addu(a0, a0, current_character());
572 BranchOrBacktrack(on_no_match,
ne, a0, Operand(zero_reg));
573 if (mode_ != ASCII) {
589 __ li(v0, Operand(FAILURE));
590 __ jmp(&exit_label_);
594 Handle<HeapObject> RegExpMacroAssemblerMIPS::GetCode(Handle<String> source) {
596 if (masm_->has_exception()) {
600 __ bind_to(&entry_label_, internal_failure_label_.pos());
606 __ bind(&entry_label_);
610 FrameScope scope(masm_, StackFrame::MANUAL);
620 RegList argument_registers = a0.bit() | a1.bit() | a2.bit() | a3.bit();
621 __ MultiPush(argument_registers | registers_to_retain | ra.bit());
625 __ mov(a0, zero_reg);
630 Label stack_limit_hit;
633 ExternalReference stack_limit =
634 ExternalReference::address_of_stack_limit(masm_->isolate());
635 __ li(a0, Operand(stack_limit));
639 __ Branch(&stack_limit_hit,
le, a0, Operand(zero_reg));
645 __ li(v0, Operand(EXCEPTION));
648 __ bind(&stack_limit_hit);
649 CallCheckStackGuardState(a0);
651 __ Branch(&return_v0,
ne, v0, Operand(zero_reg));
657 __ lw(end_of_input_address(),
MemOperand(frame_pointer(), kInputEnd));
661 __ Subu(current_input_offset(), a0, end_of_input_address());
665 __ Subu(a0, current_input_offset(), Operand(char_size()));
666 __ sll(t5, a1, (mode_ == UC16) ? 1 : 0);
670 __ sw(a0,
MemOperand(frame_pointer(), kInputStartMinusOne));
675 Label load_char_start_regexp, start_regexp;
677 __ Branch(&load_char_start_regexp,
ne, a1, Operand(zero_reg));
678 __ li(current_character(), Operand(
'\n'));
679 __ jmp(&start_regexp);
682 __ bind(&load_char_start_regexp);
684 LoadCurrentCharacterUnchecked(-1, 1);
685 __ bind(&start_regexp);
688 if (num_saved_registers_ > 0) {
690 if (num_saved_registers_ > 8) {
692 __ Addu(a1, frame_pointer(), Operand(kRegisterZero));
693 __ li(a2, Operand(num_saved_registers_));
698 __ Subu(a2, a2, Operand(1));
699 __ Branch(&init_loop,
ne, a2, Operand(zero_reg));
701 for (
int i = 0; i < num_saved_registers_; i++) {
702 __ sw(a0, register_location(i));
708 __ lw(backtrack_stackpointer(),
MemOperand(frame_pointer(), kStackHighEnd));
710 __ jmp(&start_label_);
714 if (success_label_.is_linked()) {
716 __ bind(&success_label_);
717 if (num_saved_registers_ > 0) {
720 __ lw(a0,
MemOperand(frame_pointer(), kRegisterOutput));
722 __ Subu(a1, end_of_input_address(), a1);
728 __ Addu(a1, a1, Operand(a2));
735 for (
int i = 0; i < num_saved_registers_; i += 2) {
736 __ lw(a2, register_location(i));
737 __ lw(a3, register_location(i + 1));
738 if (i == 0 && global_with_zero_length_check()) {
748 __ Addu(a2, a1, Operand(a2));
749 __ Addu(a3, a1, Operand(a3));
760 __ lw(a0,
MemOperand(frame_pointer(), kSuccessfulCaptures));
761 __ lw(a1,
MemOperand(frame_pointer(), kNumOutputRegisters));
762 __ lw(a2,
MemOperand(frame_pointer(), kRegisterOutput));
765 __ sw(a0,
MemOperand(frame_pointer(), kSuccessfulCaptures));
768 __ Subu(a1, a1, num_saved_registers_);
771 __ Branch(&return_v0,
lt, a1, Operand(num_saved_registers_));
773 __ sw(a1,
MemOperand(frame_pointer(), kNumOutputRegisters));
776 __ sw(a2,
MemOperand(frame_pointer(), kRegisterOutput));
779 __ lw(a0,
MemOperand(frame_pointer(), kInputStartMinusOne));
781 if (global_with_zero_length_check()) {
786 &load_char_start_regexp,
ne, current_input_offset(), Operand(t7));
788 __ Branch(&exit_label_,
eq, current_input_offset(),
791 __ Addu(current_input_offset(),
792 current_input_offset(),
793 Operand((mode_ == UC16) ? 2 : 1));
796 __ Branch(&load_char_start_regexp);
798 __ li(v0, Operand(SUCCESS));
802 __ bind(&exit_label_);
804 __ lw(v0,
MemOperand(frame_pointer(), kSuccessfulCaptures));
809 __ mov(
sp, frame_pointer());
811 __ MultiPop(registers_to_retain | ra.bit());
815 if (backtrack_label_.is_linked()) {
816 __ bind(&backtrack_label_);
820 Label exit_with_exception;
823 if (check_preempt_label_.is_linked()) {
824 SafeCallTarget(&check_preempt_label_);
826 RegList regexp_registers_to_retain = current_input_offset().bit() |
827 current_character().bit() | backtrack_stackpointer().bit();
828 __ MultiPush(regexp_registers_to_retain);
829 CallCheckStackGuardState(a0);
830 __ MultiPop(regexp_registers_to_retain);
833 __ Branch(&return_v0,
ne, v0, Operand(zero_reg));
836 __ lw(end_of_input_address(),
MemOperand(frame_pointer(), kInputEnd));
842 if (stack_overflow_label_.is_linked()) {
843 SafeCallTarget(&stack_overflow_label_);
846 RegList regexp_registers = current_input_offset().bit() |
847 current_character().bit();
848 __ MultiPush(regexp_registers);
851 static const int num_arguments = 3;
852 __ PrepareCallCFunction(num_arguments, a0);
853 __ mov(a0, backtrack_stackpointer());
854 __ Addu(a1, frame_pointer(), Operand(kStackHighEnd));
855 __ li(a2, Operand(ExternalReference::isolate_address(masm_->isolate())));
856 ExternalReference grow_stack =
857 ExternalReference::re_grow_stack(masm_->isolate());
858 __ CallCFunction(grow_stack, num_arguments);
860 __ MultiPop(regexp_registers);
863 __ Branch(&exit_with_exception,
eq, v0, Operand(zero_reg));
865 __ mov(backtrack_stackpointer(), v0);
868 __ lw(end_of_input_address(),
MemOperand(frame_pointer(), kInputEnd));
872 if (exit_with_exception.is_linked()) {
874 __ bind(&exit_with_exception);
876 __ li(v0, Operand(EXCEPTION));
882 masm_->GetCode(&code_desc);
883 Handle<Code>
code = isolate()->factory()->NewCode(
884 code_desc, Code::ComputeFlags(Code::REGEXP), masm_->CodeObject());
885 LOG(masm_->isolate(), RegExpCodeCreateEvent(*code, *source));
886 return Handle<HeapObject>::cast(code);
890 void RegExpMacroAssemblerMIPS::GoTo(Label* to) {
900 void RegExpMacroAssemblerMIPS::IfRegisterGE(
int reg,
903 __ lw(a0, register_location(reg));
904 BranchOrBacktrack(if_ge,
ge, a0, Operand(comparand));
908 void RegExpMacroAssemblerMIPS::IfRegisterLT(
int reg,
911 __ lw(a0, register_location(reg));
912 BranchOrBacktrack(if_lt,
lt, a0, Operand(comparand));
916 void RegExpMacroAssemblerMIPS::IfRegisterEqPos(
int reg,
918 __ lw(a0, register_location(reg));
919 BranchOrBacktrack(if_eq,
eq, a0, Operand(current_input_offset()));
923 RegExpMacroAssembler::IrregexpImplementation
924 RegExpMacroAssemblerMIPS::Implementation() {
925 return kMIPSImplementation;
929 void RegExpMacroAssemblerMIPS::LoadCurrentCharacter(
int cp_offset,
930 Label* on_end_of_input,
934 ASSERT(cp_offset < (1<<30));
936 CheckPosition(cp_offset + characters - 1, on_end_of_input);
938 LoadCurrentCharacterUnchecked(cp_offset, characters);
942 void RegExpMacroAssemblerMIPS::PopCurrentPosition() {
943 Pop(current_input_offset());
947 void RegExpMacroAssemblerMIPS::PopRegister(
int register_index) {
949 __ sw(a0, register_location(register_index));
953 void RegExpMacroAssemblerMIPS::PushBacktrack(Label* label) {
954 if (label->is_bound()) {
955 int target = label->pos();
958 Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
959 Label after_constant;
960 __ Branch(&after_constant);
961 int offset = masm_->pc_offset();
964 masm_->label_at_put(label, offset);
965 __ bind(&after_constant);
966 if (is_int16(cp_offset)) {
969 __ Addu(a0, code_pointer(), cp_offset);
978 void RegExpMacroAssemblerMIPS::PushCurrentPosition() {
979 Push(current_input_offset());
983 void RegExpMacroAssemblerMIPS::PushRegister(
int register_index,
984 StackCheckFlag check_stack_limit) {
985 __ lw(a0, register_location(register_index));
987 if (check_stack_limit) CheckStackLimit();
991 void RegExpMacroAssemblerMIPS::ReadCurrentPositionFromRegister(
int reg) {
992 __ lw(current_input_offset(), register_location(reg));
996 void RegExpMacroAssemblerMIPS::ReadStackPointerFromRegister(
int reg) {
997 __ lw(backtrack_stackpointer(), register_location(reg));
999 __ Addu(backtrack_stackpointer(), backtrack_stackpointer(), Operand(a0));
1003 void RegExpMacroAssemblerMIPS::SetCurrentPositionFromEnd(
int by) {
1004 Label after_position;
1005 __ Branch(&after_position,
1007 current_input_offset(),
1008 Operand(-by * char_size()));
1009 __ li(current_input_offset(), -by * char_size());
1013 LoadCurrentCharacterUnchecked(-1, 1);
1014 __ bind(&after_position);
1018 void RegExpMacroAssemblerMIPS::SetRegister(
int register_index,
int to) {
1019 ASSERT(register_index >= num_saved_registers_);
1020 __ li(a0, Operand(to));
1021 __ sw(a0, register_location(register_index));
1025 bool RegExpMacroAssemblerMIPS::Succeed() {
1026 __ jmp(&success_label_);
1031 void RegExpMacroAssemblerMIPS::WriteCurrentPositionToRegister(
int reg,
1033 if (cp_offset == 0) {
1034 __ sw(current_input_offset(), register_location(reg));
1036 __ Addu(a0, current_input_offset(), Operand(cp_offset * char_size()));
1037 __ sw(a0, register_location(reg));
1042 void RegExpMacroAssemblerMIPS::ClearRegisters(
int reg_from,
int reg_to) {
1043 ASSERT(reg_from <= reg_to);
1044 __ lw(a0,
MemOperand(frame_pointer(), kInputStartMinusOne));
1045 for (
int reg = reg_from; reg <= reg_to; reg++) {
1046 __ sw(a0, register_location(reg));
1051 void RegExpMacroAssemblerMIPS::WriteStackPointerToRegister(
int reg) {
1053 __ Subu(a0, backtrack_stackpointer(), a1);
1054 __ sw(a0, register_location(reg));
1058 bool RegExpMacroAssemblerMIPS::CanReadUnaligned() {
1065 void RegExpMacroAssemblerMIPS::CallCheckStackGuardState(Register scratch) {
1066 int stack_alignment = OS::ActivationFrameAlignment();
1069 __ mov(scratch,
sp);
1072 __ And(
sp,
sp, Operand(-stack_alignment));
1075 __ mov(a2, frame_pointer());
1081 __ Subu(
sp,
sp, Operand(stack_alignment));
1096 ExternalReference stack_guard_check =
1097 ExternalReference::re_check_stack_guard_state(masm_->isolate());
1098 __ li(t9, Operand(stack_guard_check));
1099 DirectCEntryStub stub;
1100 stub.GenerateCall(masm_, t9);
1115 __ li(code_pointer(), Operand(masm_->CodeObject()));
1120 template <
typename T>
1121 static T& frame_entry(
Address re_frame,
int frame_offset) {
1122 return reinterpret_cast<T&
>(Memory::int32_at(re_frame + frame_offset));
1126 int RegExpMacroAssemblerMIPS::CheckStackGuardState(
Address* return_address,
1129 Isolate* isolate = frame_entry<Isolate*>(re_frame, kIsolate);
1130 if (isolate->stack_guard()->IsStackOverflow()) {
1131 isolate->StackOverflow();
1140 if (frame_entry<int>(re_frame, kDirectCall) == 1) {
1145 HandleScope handles(isolate);
1146 Handle<Code> code_handle(re_code);
1148 Handle<String> subject(frame_entry<String*>(re_frame, kInputString));
1150 bool is_ascii = subject->IsOneByteRepresentationUnderneath();
1152 ASSERT(re_code->instruction_start() <= *return_address);
1153 ASSERT(*return_address <=
1154 re_code->instruction_start() + re_code->instruction_size());
1156 MaybeObject* result = Execution::HandleStackGuardInterrupt(isolate);
1158 if (*code_handle != re_code) {
1159 int delta = code_handle->address() - re_code->address();
1161 *return_address += delta;
1164 if (result->IsException()) {
1168 Handle<String> subject_tmp = subject;
1169 int slice_offset = 0;
1172 if (StringShape(*subject_tmp).IsCons()) {
1173 subject_tmp = Handle<String>(ConsString::cast(*subject_tmp)->first());
1174 }
else if (StringShape(*subject_tmp).IsSliced()) {
1175 SlicedString* slice = SlicedString::cast(*subject_tmp);
1176 subject_tmp = Handle<String>(slice->parent());
1177 slice_offset = slice->offset();
1181 if (subject_tmp->IsOneByteRepresentation() != is_ascii) {
1192 ASSERT(StringShape(*subject_tmp).IsSequential() ||
1193 StringShape(*subject_tmp).IsExternal());
1196 const byte* start_address = frame_entry<const byte*>(re_frame, kInputStart);
1200 int start_index = frame_entry<int>(re_frame, kStartIndex);
1201 const byte* new_address = StringCharacterPosition(*subject_tmp,
1202 start_index + slice_offset);
1204 if (start_address != new_address) {
1207 const byte* end_address = frame_entry<const byte* >(re_frame, kInputEnd);
1208 int byte_length =
static_cast<int>(end_address - start_address);
1209 frame_entry<const String*>(re_frame, kInputString) = *subject;
1210 frame_entry<const byte*>(re_frame, kInputStart) = new_address;
1211 frame_entry<const byte*>(re_frame, kInputEnd) = new_address + byte_length;
1212 }
else if (frame_entry<const String*>(re_frame, kInputString) != *subject) {
1216 frame_entry<const String*>(re_frame, kInputString) = *subject;
1223 MemOperand RegExpMacroAssemblerMIPS::register_location(
int register_index) {
1224 ASSERT(register_index < (1<<30));
1225 if (num_registers_ <= register_index) {
1226 num_registers_ = register_index + 1;
1233 void RegExpMacroAssemblerMIPS::CheckPosition(
int cp_offset,
1234 Label* on_outside_input) {
1235 BranchOrBacktrack(on_outside_input,
1237 current_input_offset(),
1238 Operand(-cp_offset * char_size()));
1242 void RegExpMacroAssemblerMIPS::BranchOrBacktrack(Label* to,
1245 const Operand& rt) {
1246 if (condition ==
al) {
1255 __ Branch(&backtrack_label_, condition, rs, rt);
1258 __ Branch(to, condition, rs, rt);
1262 void RegExpMacroAssemblerMIPS::SafeCall(Label* to,
1265 const Operand& rt) {
1266 __ BranchAndLink(to, cond, rs, rt);
1270 void RegExpMacroAssemblerMIPS::SafeReturn() {
1272 __ Addu(t5, ra, Operand(masm_->CodeObject()));
1277 void RegExpMacroAssemblerMIPS::SafeCallTarget(Label*
name) {
1279 __ Subu(ra, ra, Operand(masm_->CodeObject()));
1284 void RegExpMacroAssemblerMIPS::Push(Register source) {
1285 ASSERT(!source.is(backtrack_stackpointer()));
1286 __ Addu(backtrack_stackpointer(),
1287 backtrack_stackpointer(),
1293 void RegExpMacroAssemblerMIPS::Pop(Register target) {
1294 ASSERT(!target.is(backtrack_stackpointer()));
1296 __ Addu(backtrack_stackpointer(), backtrack_stackpointer(),
kPointerSize);
1300 void RegExpMacroAssemblerMIPS::CheckPreemption() {
1302 ExternalReference stack_limit =
1303 ExternalReference::address_of_stack_limit(masm_->isolate());
1304 __ li(a0, Operand(stack_limit));
1306 SafeCall(&check_preempt_label_,
ls,
sp, Operand(a0));
1310 void RegExpMacroAssemblerMIPS::CheckStackLimit() {
1311 ExternalReference stack_limit =
1312 ExternalReference::address_of_regexp_stack_limit(masm_->isolate());
1314 __ li(a0, Operand(stack_limit));
1316 SafeCall(&stack_overflow_label_,
ls, backtrack_stackpointer(), Operand(a0));
1320 void RegExpMacroAssemblerMIPS::LoadCurrentCharacterUnchecked(
int cp_offset,
1322 Register offset = current_input_offset();
1323 if (cp_offset != 0) {
1325 __ Addu(t7, current_input_offset(), Operand(cp_offset * char_size()));
1331 __ Addu(t5, end_of_input_address(), Operand(offset));
1332 if (mode_ == ASCII) {
1343 #endif // V8_INTERPRETED_REGEXP
1347 #endif // V8_TARGET_ARCH_MIPS
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter NULL
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter NULL
const int kCArgsSlotsSize
void Fail(const v8::FunctionCallbackInfo< v8::Value > &args)
RegExpMacroAssemblerMIPS(Mode mode, int registers_to_save, Zone *zone)
#define LOG(isolate, Call)
#define ASSERT(condition)
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long expose natives in global object expose freeBuffer extension expose gc extension under the specified name expose externalize string extension number of stack frames to capture disable builtin natives files print name of functions for which code is generated use random jit cookie to mask large constants trace lazy optimization use adaptive optimizations always try to OSR functions trace optimize function deoptimization minimum length for automatic enable preparsing maximum number of optimization attempts before giving up cache prototype transitions trace debugging JSON request response trace out of bounds accesses to external arrays trace_js_array_abuse automatically set the debug break flag when debugger commands are in the queue abort by crashing maximum length of function source code printed in a stack trace max size of the new max size of the old max size of executable always perform global GCs print one trace line following each garbage collection do not print trace line after scavenger collection print statistics of the maximum memory committed for the heap in name
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long mode(MIPS only)") DEFINE_string(expose_natives_as
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf map
bool IsAligned(T value, U alignment)
#define T(name, string, precedence)
MemOperand FieldMemOperand(Register object, int offset)
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra code(assertions) for debugging") DEFINE_bool(code_comments
#define ASSERT_EQ(v1, v2)