39 : slow_safe_compiler_(
false),
40 global_mode_(NOT_GLOBAL),
50 #ifdef V8_HOST_CAN_READ_UNALIGNED
58 #ifndef V8_INTERPRETED_REGEXP // Avoid unused code, e.g., on ARM.
70 return FLAG_enable_unaligned_accesses && !
slow_safe();
77 ASSERT(subject->IsExternalString() || subject->IsSeqString());
79 ASSERT(start_index <= subject->length());
82 if (StringShape(subject).IsExternal()) {
84 address =
reinterpret_cast<const byte*
>(data);
86 ASSERT(subject->IsSeqOneByteString());
88 address =
reinterpret_cast<const byte*
>(data);
90 return address + start_index;
93 if (StringShape(subject).IsExternal()) {
96 ASSERT(subject->IsSeqTwoByteString());
99 return reinterpret_cast<const byte*
>(data + start_index);
107 int offsets_vector_length,
111 ASSERT(subject->IsFlat());
112 ASSERT(previous_index >= 0);
113 ASSERT(previous_index <= subject->length());
119 String* subject_ptr = *subject;
121 int start_offset = previous_index;
122 int char_length = subject_ptr->
length() - start_offset;
123 int slice_offset = 0;
127 if (StringShape(subject_ptr).IsCons()) {
130 }
else if (StringShape(subject_ptr).IsSliced()) {
132 subject_ptr = slice->
parent();
133 slice_offset = slice->
offset();
137 ASSERT(subject_ptr->IsExternalString() || subject_ptr->IsSeqString());
139 int char_size_shift = is_ascii ? 0 : 1;
141 const byte* input_start =
143 int byte_length = char_length << char_size_shift;
144 const byte* input_end = input_start + byte_length;
151 offsets_vector_length,
161 const byte* input_start,
162 const byte* input_end,
188 return static_cast<Result>(result);
193 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u,
194 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u,
195 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u,
196 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u,
198 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u,
199 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u,
200 0xffu, 0xffu, 0xffu, 0xffu, 0xffu, 0xffu, 0xffu, 0xffu,
201 0xffu, 0xffu, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u,
203 0x00u, 0xffu, 0xffu, 0xffu, 0xffu, 0xffu, 0xffu, 0xffu,
204 0xffu, 0xffu, 0xffu, 0xffu, 0xffu, 0xffu, 0xffu, 0xffu,
205 0xffu, 0xffu, 0xffu, 0xffu, 0xffu, 0xffu, 0xffu, 0xffu,
206 0xffu, 0xffu, 0xffu, 0x00u, 0x00u, 0x00u, 0x00u, 0xffu,
208 0x00u, 0xffu, 0xffu, 0xffu, 0xffu, 0xffu, 0xffu, 0xffu,
209 0xffu, 0xffu, 0xffu, 0xffu, 0xffu, 0xffu, 0xffu, 0xffu,
210 0xffu, 0xffu, 0xffu, 0xffu, 0xffu, 0xffu, 0xffu, 0xffu,
211 0xffu, 0xffu, 0xffu, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u,
213 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u,
214 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u,
215 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u,
216 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u,
218 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u,
219 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u,
220 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u,
221 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u,
223 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u,
224 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u,
225 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u,
226 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u,
228 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u,
229 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u,
230 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u,
231 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u,
245 ASSERT(byte_length % 2 == 0);
246 uc16* substring1 =
reinterpret_cast<uc16*
>(byte_offset1);
247 uc16* substring2 =
reinterpret_cast<uc16*
>(byte_offset2);
248 size_t length = byte_length >> 1;
250 for (
size_t i = 0; i < length; i++) {
255 canonicalize->
get(c1,
'\0', s1);
258 canonicalize->
get(c2,
'\0', s2);
259 if (s1[0] != s2[0]) {
275 ASSERT(old_stack_base == *stack_base);
276 ASSERT(stack_pointer <= old_stack_base);
277 ASSERT(static_cast<size_t>(old_stack_base - stack_pointer) <= size);
279 if (new_stack_base ==
NULL) {
282 *stack_base = new_stack_base;
283 intptr_t stack_content_size = old_stack_base - stack_pointer;
284 return new_stack_base - stack_content_size;
287 #endif // V8_INTERPRETED_REGEXP
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter NULL
Failure * StackOverflow()
virtual bool CanReadUnaligned()
#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7, p8)
virtual ~NativeRegExpMacroAssembler()
Address EnsureCapacity(size_t size)
static Result Execute(Code *code, String *input, int start_offset, const byte *input_start, const byte *input_end, int *output, int output_size, Isolate *isolate)
static ExternalTwoByteString * cast(Object *obj)
virtual ~RegExpMacroAssembler()
static SeqOneByteString * cast(Object *obj)
RegExpStack * regexp_stack()
#define ASSERT(condition)
RegExpMacroAssembler(Zone *zone)
RegExpStack * stack() const
static ExternalAsciiString * cast(Object *obj)
int get(uchar c, uchar n, uchar *result)
virtual bool CanReadUnaligned()
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object size
static SlicedString * cast(Object *obj)
unibrow::Mapping< unibrow::Ecma262Canonicalize > * regexp_macro_assembler_canonicalize()
const uint8_t * GetChars()
static int CaseInsensitiveCompareUC16(Address byte_offset1, Address byte_offset2, size_t byte_length, Isolate *isolate)
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra code(assertions) for debugging") DEFINE_bool(code_comments
bool has_pending_exception()
static SeqTwoByteString * cast(Object *obj)
static const byte * StringCharacterPosition(String *subject, int start_index)
static Result Match(Handle< Code > regexp, Handle< String > subject, int *offsets_vector, int offsets_vector_length, int previous_index, Isolate *isolate)
#define ASSERT_EQ(v1, v2)
static Address GrowStack(Address stack_pointer, Address *stack_top, Isolate *isolate)
bool IsOneByteRepresentation()
NativeRegExpMacroAssembler(Zone *zone)
static ConsString * cast(Object *obj)
static const byte word_character_map[256]
const uint16_t * GetChars()