v8  3.25.30(node0.11.13)
V8 is Google's open source JavaScript engine
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
deoptimizer-ia32.cc
Go to the documentation of this file.
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 
28 #include "v8.h"
29 
30 #if V8_TARGET_ARCH_IA32
31 
32 #include "codegen.h"
33 #include "deoptimizer.h"
34 #include "full-codegen.h"
35 #include "safepoint-table.h"
36 
37 namespace v8 {
38 namespace internal {
39 
40 const int Deoptimizer::table_entry_size_ = 10;
41 
42 
45 }
46 
47 
49  Isolate* isolate = code->GetIsolate();
50  HandleScope scope(isolate);
51 
52  // Compute the size of relocation information needed for the code
53  // patching in Deoptimizer::DeoptimizeFunction.
54  int min_reloc_size = 0;
55  int prev_pc_offset = 0;
56  DeoptimizationInputData* deopt_data =
57  DeoptimizationInputData::cast(code->deoptimization_data());
58  for (int i = 0; i < deopt_data->DeoptCount(); i++) {
59  int pc_offset = deopt_data->Pc(i)->value();
60  if (pc_offset == -1) continue;
61  ASSERT_GE(pc_offset, prev_pc_offset);
62  int pc_delta = pc_offset - prev_pc_offset;
63  // We use RUNTIME_ENTRY reloc info which has a size of 2 bytes
64  // if encodable with small pc delta encoding and up to 6 bytes
65  // otherwise.
66  if (pc_delta <= RelocInfo::kMaxSmallPCDelta) {
67  min_reloc_size += 2;
68  } else {
69  min_reloc_size += 6;
70  }
71  prev_pc_offset = pc_offset;
72  }
73 
74  // If the relocation information is not big enough we create a new
75  // relocation info object that is padded with comments to make it
76  // big enough for lazy doptimization.
77  int reloc_length = code->relocation_info()->length();
78  if (min_reloc_size > reloc_length) {
79  int comment_reloc_size = RelocInfo::kMinRelocCommentSize;
80  // Padding needed.
81  int min_padding = min_reloc_size - reloc_length;
82  // Number of comments needed to take up at least that much space.
83  int additional_comments =
84  (min_padding + comment_reloc_size - 1) / comment_reloc_size;
85  // Actual padding size.
86  int padding = additional_comments * comment_reloc_size;
87  // Allocate new relocation info and copy old relocation to the end
88  // of the new relocation info array because relocation info is
89  // written and read backwards.
90  Factory* factory = isolate->factory();
91  Handle<ByteArray> new_reloc =
92  factory->NewByteArray(reloc_length + padding, TENURED);
93  OS::MemCopy(new_reloc->GetDataStartAddress() + padding,
94  code->relocation_info()->GetDataStartAddress(),
95  reloc_length);
96  // Create a relocation writer to write the comments in the padding
97  // space. Use position 0 for everything to ensure short encoding.
98  RelocInfoWriter reloc_info_writer(
99  new_reloc->GetDataStartAddress() + padding, 0);
100  intptr_t comment_string
101  = reinterpret_cast<intptr_t>(RelocInfo::kFillerCommentString);
102  RelocInfo rinfo(0, RelocInfo::COMMENT, comment_string, NULL);
103  for (int i = 0; i < additional_comments; ++i) {
104 #ifdef DEBUG
105  byte* pos_before = reloc_info_writer.pos();
106 #endif
107  reloc_info_writer.Write(&rinfo);
108  ASSERT(RelocInfo::kMinRelocCommentSize ==
109  pos_before - reloc_info_writer.pos());
110  }
111  // Replace relocation information on the code object.
112  code->set_relocation_info(*new_reloc);
113  }
114 }
115 
116 
117 void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) {
118  Address code_start_address = code->instruction_start();
119 
120  if (FLAG_zap_code_space) {
121  // Fail hard and early if we enter this code object again.
122  byte* pointer = code->FindCodeAgeSequence();
123  if (pointer != NULL) {
124  pointer += kNoCodeAgeSequenceLength;
125  } else {
126  pointer = code->instruction_start();
127  }
128  CodePatcher patcher(pointer, 1);
129  patcher.masm()->int3();
130 
131  DeoptimizationInputData* data =
132  DeoptimizationInputData::cast(code->deoptimization_data());
133  int osr_offset = data->OsrPcOffset()->value();
134  if (osr_offset > 0) {
135  CodePatcher osr_patcher(code->instruction_start() + osr_offset, 1);
136  osr_patcher.masm()->int3();
137  }
138  }
139 
140  // We will overwrite the code's relocation info in-place. Relocation info
141  // is written backward. The relocation info is the payload of a byte
142  // array. Later on we will slide this to the start of the byte array and
143  // create a filler object in the remaining space.
144  ByteArray* reloc_info = code->relocation_info();
145  Address reloc_end_address = reloc_info->address() + reloc_info->Size();
146  RelocInfoWriter reloc_info_writer(reloc_end_address, code_start_address);
147 
148  // Since the call is a relative encoding, write new
149  // reloc info. We do not need any of the existing reloc info because the
150  // existing code will not be used again (we zap it in debug builds).
151  //
152  // Emit call to lazy deoptimization at all lazy deopt points.
153  DeoptimizationInputData* deopt_data =
154  DeoptimizationInputData::cast(code->deoptimization_data());
155  SharedFunctionInfo* shared =
156  SharedFunctionInfo::cast(deopt_data->SharedFunctionInfo());
157  shared->EvictFromOptimizedCodeMap(code, "deoptimized code");
158 #ifdef DEBUG
159  Address prev_call_address = NULL;
160 #endif
161  // For each LLazyBailout instruction insert a call to the corresponding
162  // deoptimization entry.
163  for (int i = 0; i < deopt_data->DeoptCount(); i++) {
164  if (deopt_data->Pc(i)->value() == -1) continue;
165  // Patch lazy deoptimization entry.
166  Address call_address = code_start_address + deopt_data->Pc(i)->value();
167  CodePatcher patcher(call_address, patch_size());
168  Address deopt_entry = GetDeoptimizationEntry(isolate, i, LAZY);
169  patcher.masm()->call(deopt_entry, RelocInfo::NONE32);
170  // We use RUNTIME_ENTRY for deoptimization bailouts.
171  RelocInfo rinfo(call_address + 1, // 1 after the call opcode.
173  reinterpret_cast<intptr_t>(deopt_entry),
174  NULL);
175  reloc_info_writer.Write(&rinfo);
176  ASSERT_GE(reloc_info_writer.pos(),
177  reloc_info->address() + ByteArray::kHeaderSize);
178  ASSERT(prev_call_address == NULL ||
179  call_address >= prev_call_address + patch_size());
180  ASSERT(call_address + patch_size() <= code->instruction_end());
181 #ifdef DEBUG
182  prev_call_address = call_address;
183 #endif
184  }
185 
186  // Move the relocation info to the beginning of the byte array.
187  int new_reloc_size = reloc_end_address - reloc_info_writer.pos();
188  OS::MemMove(
189  code->relocation_start(), reloc_info_writer.pos(), new_reloc_size);
190 
191  // The relocation info is in place, update the size.
192  reloc_info->set_length(new_reloc_size);
193 
194  // Handle the junk part after the new relocation info. We will create
195  // a non-live object in the extra space at the end of the former reloc info.
196  Address junk_address = reloc_info->address() + reloc_info->Size();
197  ASSERT(junk_address <= reloc_end_address);
198  isolate->heap()->CreateFillerObjectAt(junk_address,
199  reloc_end_address - junk_address);
200 }
201 
202 
203 void Deoptimizer::FillInputFrame(Address tos, JavaScriptFrame* frame) {
204  // Set the register values. The values are not important as there are no
205  // callee saved registers in JavaScript frames, so all registers are
206  // spilled. Registers ebp and esp are set to the correct values though.
207 
208  for (int i = 0; i < Register::kNumRegisters; i++) {
209  input_->SetRegister(i, i * 4);
210  }
211  input_->SetRegister(esp.code(), reinterpret_cast<intptr_t>(frame->sp()));
212  input_->SetRegister(ebp.code(), reinterpret_cast<intptr_t>(frame->fp()));
213  for (int i = 0; i < DoubleRegister::NumAllocatableRegisters(); i++) {
214  input_->SetDoubleRegister(i, 0.0);
215  }
216 
217  // Fill the frame content from the actual data on the frame.
218  for (unsigned i = 0; i < input_->GetFrameSize(); i += kPointerSize) {
219  input_->SetFrameSlot(i, Memory::uint32_at(tos + i));
220  }
221 }
222 
223 
224 void Deoptimizer::SetPlatformCompiledStubRegisters(
225  FrameDescription* output_frame, CodeStubInterfaceDescriptor* descriptor) {
226  intptr_t handler =
227  reinterpret_cast<intptr_t>(descriptor->deoptimization_handler_);
228  int params = descriptor->GetHandlerParameterCount();
229  output_frame->SetRegister(eax.code(), params);
230  output_frame->SetRegister(ebx.code(), handler);
231 }
232 
233 
234 void Deoptimizer::CopyDoubleRegisters(FrameDescription* output_frame) {
235  if (!CpuFeatures::IsSupported(SSE2)) return;
236  for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; ++i) {
237  double double_value = input_->GetDoubleRegister(i);
238  output_frame->SetDoubleRegister(i, double_value);
239  }
240 }
241 
242 
243 bool Deoptimizer::HasAlignmentPadding(JSFunction* function) {
244  int parameter_count = function->shared()->formal_parameter_count() + 1;
245  unsigned input_frame_size = input_->GetFrameSize();
246  unsigned alignment_state_offset =
247  input_frame_size - parameter_count * kPointerSize -
249  kPointerSize;
252  int32_t alignment_state = input_->GetFrameSlot(alignment_state_offset);
253  return (alignment_state == kAlignmentPaddingPushed);
254 }
255 
256 
257 Code* Deoptimizer::NotifyStubFailureBuiltin() {
259  Builtins::kNotifyStubFailureSaveDoubles : Builtins::kNotifyStubFailure;
260  return isolate_->builtins()->builtin(name);
261 }
262 
263 
264 #define __ masm()->
265 
266 void Deoptimizer::EntryGenerator::Generate() {
267  GeneratePrologue();
268 
269  // Save all general purpose registers before messing with them.
271 
272  const int kDoubleRegsSize = kDoubleSize *
274  __ sub(esp, Immediate(kDoubleRegsSize));
276  CpuFeatureScope scope(masm(), SSE2);
277  for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; ++i) {
278  XMMRegister xmm_reg = XMMRegister::FromAllocationIndex(i);
279  int offset = i * kDoubleSize;
280  __ movsd(Operand(esp, offset), xmm_reg);
281  }
282  }
283 
284  __ pushad();
285 
286  const int kSavedRegistersAreaSize = kNumberOfRegisters * kPointerSize +
287  kDoubleRegsSize;
288 
289  // Get the bailout id from the stack.
290  __ mov(ebx, Operand(esp, kSavedRegistersAreaSize));
291 
292  // Get the address of the location in the code object
293  // and compute the fp-to-sp delta in register edx.
294  __ mov(ecx, Operand(esp, kSavedRegistersAreaSize + 1 * kPointerSize));
295  __ lea(edx, Operand(esp, kSavedRegistersAreaSize + 2 * kPointerSize));
296 
297  __ sub(edx, ebp);
298  __ neg(edx);
299 
300  // Allocate a new deoptimizer object.
301  __ PrepareCallCFunction(6, eax);
303  __ mov(Operand(esp, 0 * kPointerSize), eax); // Function.
304  __ mov(Operand(esp, 1 * kPointerSize), Immediate(type())); // Bailout type.
305  __ mov(Operand(esp, 2 * kPointerSize), ebx); // Bailout id.
306  __ mov(Operand(esp, 3 * kPointerSize), ecx); // Code address or 0.
307  __ mov(Operand(esp, 4 * kPointerSize), edx); // Fp-to-sp delta.
308  __ mov(Operand(esp, 5 * kPointerSize),
309  Immediate(ExternalReference::isolate_address(isolate())));
310  {
311  AllowExternalCallThatCantCauseGC scope(masm());
312  __ CallCFunction(ExternalReference::new_deoptimizer_function(isolate()), 6);
313  }
314 
315  // Preserve deoptimizer object in register eax and get the input
316  // frame descriptor pointer.
317  __ mov(ebx, Operand(eax, Deoptimizer::input_offset()));
318 
319  // Fill in the input registers.
320  for (int i = kNumberOfRegisters - 1; i >= 0; i--) {
321  int offset = (i * kPointerSize) + FrameDescription::registers_offset();
322  __ pop(Operand(ebx, offset));
323  }
324 
325  int double_regs_offset = FrameDescription::double_registers_offset();
327  CpuFeatureScope scope(masm(), SSE2);
328  // Fill in the double input registers.
329  for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; ++i) {
330  int dst_offset = i * kDoubleSize + double_regs_offset;
331  int src_offset = i * kDoubleSize;
332  __ movsd(xmm0, Operand(esp, src_offset));
333  __ movsd(Operand(ebx, dst_offset), xmm0);
334  }
335  }
336 
337  // Clear FPU all exceptions.
338  // TODO(ulan): Find out why the TOP register is not zero here in some cases,
339  // and check that the generated code never deoptimizes with unbalanced stack.
340  __ fnclex();
341 
342  // Remove the bailout id, return address and the double registers.
343  __ add(esp, Immediate(kDoubleRegsSize + 2 * kPointerSize));
344 
345  // Compute a pointer to the unwinding limit in register ecx; that is
346  // the first stack slot not part of the input frame.
347  __ mov(ecx, Operand(ebx, FrameDescription::frame_size_offset()));
348  __ add(ecx, esp);
349 
350  // Unwind the stack down to - but not including - the unwinding
351  // limit and copy the contents of the activation frame to the input
352  // frame description.
354  Label pop_loop_header;
355  __ jmp(&pop_loop_header);
356  Label pop_loop;
357  __ bind(&pop_loop);
358  __ pop(Operand(edx, 0));
359  __ add(edx, Immediate(sizeof(uint32_t)));
360  __ bind(&pop_loop_header);
361  __ cmp(ecx, esp);
362  __ j(not_equal, &pop_loop);
363 
364  // Compute the output frame in the deoptimizer.
365  __ push(eax);
366  __ PrepareCallCFunction(1, ebx);
367  __ mov(Operand(esp, 0 * kPointerSize), eax);
368  {
369  AllowExternalCallThatCantCauseGC scope(masm());
370  __ CallCFunction(
371  ExternalReference::compute_output_frames_function(isolate()), 1);
372  }
373  __ pop(eax);
374 
375  // If frame was dynamically aligned, pop padding.
376  Label no_padding;
378  Immediate(0));
379  __ j(equal, &no_padding);
380  __ pop(ecx);
381  if (FLAG_debug_code) {
382  __ cmp(ecx, Immediate(kAlignmentZapValue));
383  __ Assert(equal, kAlignmentMarkerExpected);
384  }
385  __ bind(&no_padding);
386 
387  // Replace the current frame with the output frames.
388  Label outer_push_loop, inner_push_loop,
389  outer_loop_header, inner_loop_header;
390  // Outer loop state: eax = current FrameDescription**, edx = one past the
391  // last FrameDescription**.
392  __ mov(edx, Operand(eax, Deoptimizer::output_count_offset()));
393  __ mov(eax, Operand(eax, Deoptimizer::output_offset()));
394  __ lea(edx, Operand(eax, edx, times_4, 0));
395  __ jmp(&outer_loop_header);
396  __ bind(&outer_push_loop);
397  // Inner loop state: ebx = current FrameDescription*, ecx = loop index.
398  __ mov(ebx, Operand(eax, 0));
399  __ mov(ecx, Operand(ebx, FrameDescription::frame_size_offset()));
400  __ jmp(&inner_loop_header);
401  __ bind(&inner_push_loop);
402  __ sub(ecx, Immediate(sizeof(uint32_t)));
404  __ bind(&inner_loop_header);
405  __ test(ecx, ecx);
406  __ j(not_zero, &inner_push_loop);
407  __ add(eax, Immediate(kPointerSize));
408  __ bind(&outer_loop_header);
409  __ cmp(eax, edx);
410  __ j(below, &outer_push_loop);
411 
412  // In case of a failed STUB, we have to restore the XMM registers.
414  CpuFeatureScope scope(masm(), SSE2);
415  for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; ++i) {
416  XMMRegister xmm_reg = XMMRegister::FromAllocationIndex(i);
417  int src_offset = i * kDoubleSize + double_regs_offset;
418  __ movsd(xmm_reg, Operand(ebx, src_offset));
419  }
420  }
421 
422  // Push state, pc, and continuation from the last output frame.
423  __ push(Operand(ebx, FrameDescription::state_offset()));
424  __ push(Operand(ebx, FrameDescription::pc_offset()));
425  __ push(Operand(ebx, FrameDescription::continuation_offset()));
426 
427 
428  // Push the registers from the last output frame.
429  for (int i = 0; i < kNumberOfRegisters; i++) {
430  int offset = (i * kPointerSize) + FrameDescription::registers_offset();
431  __ push(Operand(ebx, offset));
432  }
433 
434  // Restore the registers from the stack.
435  __ popad();
436 
437  // Return to the continuation point.
438  __ ret(0);
439 }
440 
441 
443  // Create a sequence of deoptimization entries.
444  Label done;
445  for (int i = 0; i < count(); i++) {
446  int start = masm()->pc_offset();
447  USE(start);
448  __ push_imm32(i);
449  __ jmp(&done);
450  ASSERT(masm()->pc_offset() - start == table_entry_size_);
451  }
452  __ bind(&done);
453 }
454 
455 
456 void FrameDescription::SetCallerPc(unsigned offset, intptr_t value) {
457  SetFrameSlot(offset, value);
458 }
459 
460 
461 void FrameDescription::SetCallerFp(unsigned offset, intptr_t value) {
462  SetFrameSlot(offset, value);
463 }
464 
465 
466 void FrameDescription::SetCallerConstantPool(unsigned offset, intptr_t value) {
467  // No out-of-line constant pool support.
468  UNREACHABLE();
469 }
470 
471 
472 #undef __
473 
474 
475 } } // namespace v8::internal
476 
477 #endif // V8_TARGET_ARCH_IA32
byte * Address
Definition: globals.h:186
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter NULL
Definition: flags.cc:269
void SetCallerPc(unsigned offset, intptr_t value)
static const int kNumAllocatableRegisters
Code * builtin(Name name)
Definition: builtins.h:322
static const int kFixedFrameSize
Definition: frames.h:181
void SetFrameSlot(unsigned offset, intptr_t value)
Definition: deoptimizer.h:503
const Register esp
Builtins * builtins()
Definition: isolate.h:948
int int32_t
Definition: unicode.cc:47
static XMMRegister FromAllocationIndex(int index)
static bool IsSupported(CpuFeature f)
Definition: assembler-arm.h:68
#define ASSERT(condition)
Definition: checks.h:329
#define ASSERT_GE(v1, v2)
Definition: checks.h:332
static SharedFunctionInfo * cast(Object *obj)
double GetDoubleRegister(unsigned n) const
Definition: deoptimizer.h:526
const int kAlignmentPaddingPushed
Definition: frames-ia32.h:57
static const int kNumRegisters
static int double_registers_offset()
Definition: deoptimizer.h:582
uint8_t byte
Definition: globals.h:185
const Register ebp
#define UNREACHABLE()
Definition: checks.h:52
static int output_offset()
Definition: deoptimizer.h:250
Isolate * isolate() const
Definition: deoptimizer.h:303
const Register eax
const int kDoubleSize
Definition: globals.h:266
static void MemCopy(void *dest, const void *src, size_t size)
Definition: platform.h:399
static Address GetDeoptimizationEntry(Isolate *isolate, int id, BailoutType type, GetEntryMode mode=ENSURE_ENTRY_CODE)
Definition: deoptimizer.cc:683
const int kPointerSize
Definition: globals.h:268
const Register ecx
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra code(assertions) for debugging") DEFINE_bool(code_comments
#define __
void SetRegister(unsigned n, intptr_t value)
Definition: deoptimizer.h:531
static const int kDynamicAlignmentStateOffset
Definition: frames-ia32.h:103
static void MemMove(void *dest, const void *src, size_t size)
Definition: platform.h:402
const int kAlignmentZapValue
Definition: frames-ia32.h:58
static void EnsureRelocSpaceForLazyDeoptimization(Handle< Code > code)
void SetCallerConstantPool(unsigned offset, intptr_t value)
uint32_t GetFrameSize() const
Definition: deoptimizer.h:485
static int output_count_offset()
Definition: deoptimizer.h:247
static const int kHeaderSize
Definition: objects.h:3016
const unsigned kNumberOfRegisters
intptr_t GetFrameSlot(unsigned offset)
Definition: deoptimizer.h:494
static const int kCallInstructionLength
const Register ebx
void USE(T)
Definition: globals.h:341
virtual void GeneratePrologue()
static uint32_t & uint32_at(Address addr)
Definition: v8memory.h:47
#define RUNTIME_ENTRY(name, nargs, ressize)
const Register edx
void SetDoubleRegister(unsigned n, double value)
Definition: deoptimizer.h:536
void SetCallerFp(unsigned offset, intptr_t value)
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long expose natives in global object expose freeBuffer extension expose gc extension under the specified name expose externalize string extension number of stack frames to capture disable builtin natives files print name of functions for which code is generated use random jit cookie to mask large constants trace lazy optimization use adaptive optimizations always try to OSR functions trace optimize function deoptimization minimum length for automatic enable preparsing maximum number of optimization attempts before giving up cache prototype transitions trace debugging JSON request response trace out of bounds accesses to external arrays trace_js_array_abuse automatically set the debug break flag when debugger commands are in the queue abort by crashing maximum length of function source code printed in a stack trace max size of the new max size of the old max size of executable always perform global GCs print one trace line following each garbage collection do not print trace line after scavenger collection print statistics of the maximum memory committed for the heap in name
Definition: flags.cc:505
void EvictFromOptimizedCodeMap(Code *optimized_code, const char *reason)
Definition: objects.cc:9593
static int has_alignment_padding_offset()
Definition: deoptimizer.h:252
static DeoptimizationInputData * cast(Object *obj)
const XMMRegister xmm0