v8  3.14.5(node0.10.28)
V8 is Google's open source JavaScript engine
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
deoptimizer-x64.cc
Go to the documentation of this file.
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 
28 #include "v8.h"
29 
30 #if defined(V8_TARGET_ARCH_X64)
31 
32 #include "codegen.h"
33 #include "deoptimizer.h"
34 #include "full-codegen.h"
35 #include "safepoint-table.h"
36 
37 namespace v8 {
38 namespace internal {
39 
40 
41 const int Deoptimizer::table_entry_size_ = 10;
42 
43 
46 }
47 
48 
49 void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
50  HandleScope scope;
51  AssertNoAllocation no_allocation;
52 
53  if (!function->IsOptimized()) return;
54 
55  // The optimized code is going to be patched, so we cannot use it
56  // any more. Play safe and reset the whole cache.
57  function->shared()->ClearOptimizedCodeMap();
58 
59  // Get the optimized code.
60  Code* code = function->code();
61 
62  // Invalidate the relocation information, as it will become invalid by the
63  // code patching below, and is not needed any more.
64  code->InvalidateRelocation();
65 
66  // For each LLazyBailout instruction insert a absolute call to the
67  // corresponding deoptimization entry, or a short call to an absolute
68  // jump if space is short. The absolute jumps are put in a table just
69  // before the safepoint table (space was allocated there when the Code
70  // object was created, if necessary).
71 
72  Address instruction_start = function->code()->instruction_start();
73 #ifdef DEBUG
74  Address prev_call_address = NULL;
75 #endif
76  DeoptimizationInputData* deopt_data =
77  DeoptimizationInputData::cast(code->deoptimization_data());
78  for (int i = 0; i < deopt_data->DeoptCount(); i++) {
79  if (deopt_data->Pc(i)->value() == -1) continue;
80  // Position where Call will be patched in.
81  Address call_address = instruction_start + deopt_data->Pc(i)->value();
82  // There is room enough to write a long call instruction because we pad
83  // LLazyBailout instructions with nops if necessary.
84  CodePatcher patcher(call_address, Assembler::kCallInstructionLength);
85  patcher.masm()->Call(GetDeoptimizationEntry(i, LAZY), RelocInfo::NONE);
86  ASSERT(prev_call_address == NULL ||
87  call_address >= prev_call_address + patch_size());
88  ASSERT(call_address + patch_size() <= code->instruction_end());
89 #ifdef DEBUG
90  prev_call_address = call_address;
91 #endif
92  }
93 
94  Isolate* isolate = code->GetIsolate();
95 
96  // Add the deoptimizing code to the list.
98  DeoptimizerData* data = isolate->deoptimizer_data();
99  node->set_next(data->deoptimizing_code_list_);
100  data->deoptimizing_code_list_ = node;
101 
102  // We might be in the middle of incremental marking with compaction.
103  // Tell collector to treat this code object in a special way and
104  // ignore all slots that might have been recorded on it.
105  isolate->heap()->mark_compact_collector()->InvalidateCode(code);
106 
107  ReplaceCodeForRelatedFunctions(function, code);
108 
109  if (FLAG_trace_deopt) {
110  PrintF("[forced deoptimization: ");
111  function->PrintName();
112  PrintF(" / %" V8PRIxPTR "]\n", reinterpret_cast<intptr_t>(function));
113  }
114 }
115 
116 
117 static const byte kJnsInstruction = 0x79;
118 static const byte kJnsOffset = 0x1f;
119 static const byte kJaeInstruction = 0x73;
120 static const byte kJaeOffset = 0x07;
121 static const byte kCallInstruction = 0xe8;
122 static const byte kNopByteOne = 0x66;
123 static const byte kNopByteTwo = 0x90;
124 
125 void Deoptimizer::PatchStackCheckCodeAt(Code* unoptimized_code,
126  Address pc_after,
127  Code* check_code,
128  Code* replacement_code) {
129  Address call_target_address = pc_after - kIntSize;
130  ASSERT_EQ(check_code->entry(),
131  Assembler::target_address_at(call_target_address));
132  // The stack check code matches the pattern:
133  //
134  // cmp rsp, <limit>
135  // jae ok
136  // call <stack guard>
137  // test rax, <loop nesting depth>
138  // ok: ...
139  //
140  // We will patch away the branch so the code is:
141  //
142  // cmp rsp, <limit> ;; Not changed
143  // nop
144  // nop
145  // call <on-stack replacment>
146  // test rax, <loop nesting depth>
147  // ok:
148  //
149  if (FLAG_count_based_interrupts) {
150  ASSERT_EQ(kJnsInstruction, *(call_target_address - 3));
151  ASSERT_EQ(kJnsOffset, *(call_target_address - 2));
152  } else {
153  ASSERT_EQ(kJaeInstruction, *(call_target_address - 3));
154  ASSERT_EQ(kJaeOffset, *(call_target_address - 2));
155  }
156  ASSERT_EQ(kCallInstruction, *(call_target_address - 1));
157  *(call_target_address - 3) = kNopByteOne;
158  *(call_target_address - 2) = kNopByteTwo;
159  Assembler::set_target_address_at(call_target_address,
160  replacement_code->entry());
161 
162  unoptimized_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch(
163  unoptimized_code, call_target_address, replacement_code);
164 }
165 
166 
167 void Deoptimizer::RevertStackCheckCodeAt(Code* unoptimized_code,
168  Address pc_after,
169  Code* check_code,
170  Code* replacement_code) {
171  Address call_target_address = pc_after - kIntSize;
172  ASSERT(replacement_code->entry() ==
173  Assembler::target_address_at(call_target_address));
174  // Replace the nops from patching (Deoptimizer::PatchStackCheckCode) to
175  // restore the conditional branch.
176  ASSERT_EQ(kNopByteOne, *(call_target_address - 3));
177  ASSERT_EQ(kNopByteTwo, *(call_target_address - 2));
178  ASSERT_EQ(kCallInstruction, *(call_target_address - 1));
179  if (FLAG_count_based_interrupts) {
180  *(call_target_address - 3) = kJnsInstruction;
181  *(call_target_address - 2) = kJnsOffset;
182  } else {
183  *(call_target_address - 3) = kJaeInstruction;
184  *(call_target_address - 2) = kJaeOffset;
185  }
186  Assembler::set_target_address_at(call_target_address,
187  check_code->entry());
188 
189  check_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch(
190  unoptimized_code, call_target_address, check_code);
191 }
192 
193 
194 static int LookupBailoutId(DeoptimizationInputData* data, BailoutId ast_id) {
195  ByteArray* translations = data->TranslationByteArray();
196  int length = data->DeoptCount();
197  for (int i = 0; i < length; i++) {
198  if (data->AstId(i) == ast_id) {
199  TranslationIterator it(translations, data->TranslationIndex(i)->value());
200  int value = it.Next();
201  ASSERT(Translation::BEGIN == static_cast<Translation::Opcode>(value));
202  // Read the number of frames.
203  value = it.Next();
204  if (value == 1) return i;
205  }
206  }
207  UNREACHABLE();
208  return -1;
209 }
210 
211 
212 void Deoptimizer::DoComputeOsrOutputFrame() {
213  DeoptimizationInputData* data = DeoptimizationInputData::cast(
214  optimized_code_->deoptimization_data());
215  unsigned ast_id = data->OsrAstId()->value();
216  // TODO(kasperl): This should not be the bailout_id_. It should be
217  // the ast id. Confusing.
218  ASSERT(bailout_id_ == ast_id);
219 
220  int bailout_id = LookupBailoutId(data, BailoutId(ast_id));
221  unsigned translation_index = data->TranslationIndex(bailout_id)->value();
222  ByteArray* translations = data->TranslationByteArray();
223 
224  TranslationIterator iterator(translations, translation_index);
225  Translation::Opcode opcode =
226  static_cast<Translation::Opcode>(iterator.Next());
227  ASSERT(Translation::BEGIN == opcode);
228  USE(opcode);
229  int count = iterator.Next();
230  iterator.Skip(1); // Drop JS frame count.
231  ASSERT(count == 1);
232  USE(count);
233 
234  opcode = static_cast<Translation::Opcode>(iterator.Next());
235  USE(opcode);
236  ASSERT(Translation::JS_FRAME == opcode);
237  unsigned node_id = iterator.Next();
238  USE(node_id);
239  ASSERT(node_id == ast_id);
240  int closure_id = iterator.Next();
241  USE(closure_id);
242  ASSERT_EQ(Translation::kSelfLiteralId, closure_id);
243  unsigned height = iterator.Next();
244  unsigned height_in_bytes = height * kPointerSize;
245  USE(height_in_bytes);
246 
247  unsigned fixed_size = ComputeFixedSize(function_);
248  unsigned input_frame_size = input_->GetFrameSize();
249  ASSERT(fixed_size + height_in_bytes == input_frame_size);
250 
251  unsigned stack_slot_size = optimized_code_->stack_slots() * kPointerSize;
252  unsigned outgoing_height = data->ArgumentsStackHeight(bailout_id)->value();
253  unsigned outgoing_size = outgoing_height * kPointerSize;
254  unsigned output_frame_size = fixed_size + stack_slot_size + outgoing_size;
255  ASSERT(outgoing_size == 0); // OSR does not happen in the middle of a call.
256 
257  if (FLAG_trace_osr) {
258  PrintF("[on-stack replacement: begin 0x%08" V8PRIxPTR " ",
259  reinterpret_cast<intptr_t>(function_));
260  function_->PrintName();
261  PrintF(" => node=%u, frame=%d->%d]\n",
262  ast_id,
263  input_frame_size,
264  output_frame_size);
265  }
266 
267  // There's only one output frame in the OSR case.
268  output_count_ = 1;
269  output_ = new FrameDescription*[1];
270  output_[0] = new(output_frame_size) FrameDescription(
271  output_frame_size, function_);
272  output_[0]->SetFrameType(StackFrame::JAVA_SCRIPT);
273 
274  // Clear the incoming parameters in the optimized frame to avoid
275  // confusing the garbage collector.
276  unsigned output_offset = output_frame_size - kPointerSize;
277  int parameter_count = function_->shared()->formal_parameter_count() + 1;
278  for (int i = 0; i < parameter_count; ++i) {
279  output_[0]->SetFrameSlot(output_offset, 0);
280  output_offset -= kPointerSize;
281  }
282 
283  // Translate the incoming parameters. This may overwrite some of the
284  // incoming argument slots we've just cleared.
285  int input_offset = input_frame_size - kPointerSize;
286  bool ok = true;
287  int limit = input_offset - (parameter_count * kPointerSize);
288  while (ok && input_offset > limit) {
289  ok = DoOsrTranslateCommand(&iterator, &input_offset);
290  }
291 
292  // There are no translation commands for the caller's pc and fp, the
293  // context, and the function. Set them up explicitly.
296  i -= kPointerSize) {
297  intptr_t input_value = input_->GetFrameSlot(input_offset);
298  if (FLAG_trace_osr) {
299  const char* name = "UNKNOWN";
300  switch (i) {
302  name = "caller's pc";
303  break;
305  name = "fp";
306  break;
308  name = "context";
309  break;
311  name = "function";
312  break;
313  }
314  PrintF(" [rsp + %d] <- 0x%08" V8PRIxPTR " ; [rsp + %d] "
315  "(fixed part - %s)\n",
316  output_offset,
317  input_value,
318  input_offset,
319  name);
320  }
321  output_[0]->SetFrameSlot(output_offset, input_->GetFrameSlot(input_offset));
322  input_offset -= kPointerSize;
323  output_offset -= kPointerSize;
324  }
325 
326  // Translate the rest of the frame.
327  while (ok && input_offset >= 0) {
328  ok = DoOsrTranslateCommand(&iterator, &input_offset);
329  }
330 
331  // If translation of any command failed, continue using the input frame.
332  if (!ok) {
333  delete output_[0];
334  output_[0] = input_;
335  output_[0]->SetPc(reinterpret_cast<intptr_t>(from_));
336  } else {
337  // Set up the frame pointer and the context pointer.
338  output_[0]->SetRegister(rbp.code(), input_->GetRegister(rbp.code()));
339  output_[0]->SetRegister(rsi.code(), input_->GetRegister(rsi.code()));
340 
341  unsigned pc_offset = data->OsrPcOffset()->value();
342  intptr_t pc = reinterpret_cast<intptr_t>(
343  optimized_code_->entry() + pc_offset);
344  output_[0]->SetPc(pc);
345  }
346  Code* continuation =
347  function_->GetIsolate()->builtins()->builtin(Builtins::kNotifyOSR);
348  output_[0]->SetContinuation(
349  reinterpret_cast<intptr_t>(continuation->entry()));
350 
351  if (FLAG_trace_osr) {
352  PrintF("[on-stack replacement translation %s: 0x%08" V8PRIxPTR " ",
353  ok ? "finished" : "aborted",
354  reinterpret_cast<intptr_t>(function_));
355  function_->PrintName();
356  PrintF(" => pc=0x%0" V8PRIxPTR "]\n", output_[0]->GetPc());
357  }
358 }
359 
360 
361 void Deoptimizer::DoComputeArgumentsAdaptorFrame(TranslationIterator* iterator,
362  int frame_index) {
363  JSFunction* function = JSFunction::cast(ComputeLiteral(iterator->Next()));
364  unsigned height = iterator->Next();
365  unsigned height_in_bytes = height * kPointerSize;
366  if (FLAG_trace_deopt) {
367  PrintF(" translating arguments adaptor => height=%d\n", height_in_bytes);
368  }
369 
370  unsigned fixed_frame_size = ArgumentsAdaptorFrameConstants::kFrameSize;
371  unsigned output_frame_size = height_in_bytes + fixed_frame_size;
372 
373  // Allocate and store the output frame description.
374  FrameDescription* output_frame =
375  new(output_frame_size) FrameDescription(output_frame_size, function);
376  output_frame->SetFrameType(StackFrame::ARGUMENTS_ADAPTOR);
377 
378  // Arguments adaptor can not be topmost or bottommost.
379  ASSERT(frame_index > 0 && frame_index < output_count_ - 1);
380  ASSERT(output_[frame_index] == NULL);
381  output_[frame_index] = output_frame;
382 
383  // The top address of the frame is computed from the previous
384  // frame's top and this frame's size.
385  intptr_t top_address;
386  top_address = output_[frame_index - 1]->GetTop() - output_frame_size;
387  output_frame->SetTop(top_address);
388 
389  // Compute the incoming parameter translation.
390  int parameter_count = height;
391  unsigned output_offset = output_frame_size;
392  for (int i = 0; i < parameter_count; ++i) {
393  output_offset -= kPointerSize;
394  DoTranslateCommand(iterator, frame_index, output_offset);
395  }
396 
397  // Read caller's PC from the previous frame.
398  output_offset -= kPointerSize;
399  intptr_t callers_pc = output_[frame_index - 1]->GetPc();
400  output_frame->SetFrameSlot(output_offset, callers_pc);
401  if (FLAG_trace_deopt) {
402  PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
403  V8PRIxPTR " ; caller's pc\n",
404  top_address + output_offset, output_offset, callers_pc);
405  }
406 
407  // Read caller's FP from the previous frame, and set this frame's FP.
408  output_offset -= kPointerSize;
409  intptr_t value = output_[frame_index - 1]->GetFp();
410  output_frame->SetFrameSlot(output_offset, value);
411  intptr_t fp_value = top_address + output_offset;
412  output_frame->SetFp(fp_value);
413  if (FLAG_trace_deopt) {
414  PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
415  V8PRIxPTR " ; caller's fp\n",
416  fp_value, output_offset, value);
417  }
418 
419  // A marker value is used in place of the context.
420  output_offset -= kPointerSize;
421  intptr_t context = reinterpret_cast<intptr_t>(
423  output_frame->SetFrameSlot(output_offset, context);
424  if (FLAG_trace_deopt) {
425  PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
426  V8PRIxPTR " ; context (adaptor sentinel)\n",
427  top_address + output_offset, output_offset, context);
428  }
429 
430  // The function was mentioned explicitly in the ARGUMENTS_ADAPTOR_FRAME.
431  output_offset -= kPointerSize;
432  value = reinterpret_cast<intptr_t>(function);
433  output_frame->SetFrameSlot(output_offset, value);
434  if (FLAG_trace_deopt) {
435  PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
436  V8PRIxPTR " ; function\n",
437  top_address + output_offset, output_offset, value);
438  }
439 
440  // Number of incoming arguments.
441  output_offset -= kPointerSize;
442  value = reinterpret_cast<intptr_t>(Smi::FromInt(height - 1));
443  output_frame->SetFrameSlot(output_offset, value);
444  if (FLAG_trace_deopt) {
445  PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
446  V8PRIxPTR " ; argc (%d)\n",
447  top_address + output_offset, output_offset, value, height - 1);
448  }
449 
450  ASSERT(0 == output_offset);
451 
452  Builtins* builtins = isolate_->builtins();
453  Code* adaptor_trampoline =
454  builtins->builtin(Builtins::kArgumentsAdaptorTrampoline);
455  intptr_t pc_value = reinterpret_cast<intptr_t>(
456  adaptor_trampoline->instruction_start() +
457  isolate_->heap()->arguments_adaptor_deopt_pc_offset()->value());
458  output_frame->SetPc(pc_value);
459 }
460 
461 
462 void Deoptimizer::DoComputeConstructStubFrame(TranslationIterator* iterator,
463  int frame_index) {
464  Builtins* builtins = isolate_->builtins();
465  Code* construct_stub = builtins->builtin(Builtins::kJSConstructStubGeneric);
466  JSFunction* function = JSFunction::cast(ComputeLiteral(iterator->Next()));
467  unsigned height = iterator->Next();
468  unsigned height_in_bytes = height * kPointerSize;
469  if (FLAG_trace_deopt) {
470  PrintF(" translating construct stub => height=%d\n", height_in_bytes);
471  }
472 
473  unsigned fixed_frame_size = 7 * kPointerSize;
474  unsigned output_frame_size = height_in_bytes + fixed_frame_size;
475 
476  // Allocate and store the output frame description.
477  FrameDescription* output_frame =
478  new(output_frame_size) FrameDescription(output_frame_size, function);
479  output_frame->SetFrameType(StackFrame::CONSTRUCT);
480 
481  // Construct stub can not be topmost or bottommost.
482  ASSERT(frame_index > 0 && frame_index < output_count_ - 1);
483  ASSERT(output_[frame_index] == NULL);
484  output_[frame_index] = output_frame;
485 
486  // The top address of the frame is computed from the previous
487  // frame's top and this frame's size.
488  intptr_t top_address;
489  top_address = output_[frame_index - 1]->GetTop() - output_frame_size;
490  output_frame->SetTop(top_address);
491 
492  // Compute the incoming parameter translation.
493  int parameter_count = height;
494  unsigned output_offset = output_frame_size;
495  for (int i = 0; i < parameter_count; ++i) {
496  output_offset -= kPointerSize;
497  DoTranslateCommand(iterator, frame_index, output_offset);
498  }
499 
500  // Read caller's PC from the previous frame.
501  output_offset -= kPointerSize;
502  intptr_t callers_pc = output_[frame_index - 1]->GetPc();
503  output_frame->SetFrameSlot(output_offset, callers_pc);
504  if (FLAG_trace_deopt) {
505  PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
506  V8PRIxPTR " ; caller's pc\n",
507  top_address + output_offset, output_offset, callers_pc);
508  }
509 
510  // Read caller's FP from the previous frame, and set this frame's FP.
511  output_offset -= kPointerSize;
512  intptr_t value = output_[frame_index - 1]->GetFp();
513  output_frame->SetFrameSlot(output_offset, value);
514  intptr_t fp_value = top_address + output_offset;
515  output_frame->SetFp(fp_value);
516  if (FLAG_trace_deopt) {
517  PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
518  V8PRIxPTR " ; caller's fp\n",
519  fp_value, output_offset, value);
520  }
521 
522  // The context can be gotten from the previous frame.
523  output_offset -= kPointerSize;
524  value = output_[frame_index - 1]->GetContext();
525  output_frame->SetFrameSlot(output_offset, value);
526  if (FLAG_trace_deopt) {
527  PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
528  V8PRIxPTR " ; context\n",
529  top_address + output_offset, output_offset, value);
530  }
531 
532  // A marker value is used in place of the function.
533  output_offset -= kPointerSize;
534  value = reinterpret_cast<intptr_t>(Smi::FromInt(StackFrame::CONSTRUCT));
535  output_frame->SetFrameSlot(output_offset, value);
536  if (FLAG_trace_deopt) {
537  PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
538  V8PRIxPTR " ; function (construct sentinel)\n",
539  top_address + output_offset, output_offset, value);
540  }
541 
542  // The output frame reflects a JSConstructStubGeneric frame.
543  output_offset -= kPointerSize;
544  value = reinterpret_cast<intptr_t>(construct_stub);
545  output_frame->SetFrameSlot(output_offset, value);
546  if (FLAG_trace_deopt) {
547  PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
548  V8PRIxPTR " ; code object\n",
549  top_address + output_offset, output_offset, value);
550  }
551 
552  // Number of incoming arguments.
553  output_offset -= kPointerSize;
554  value = reinterpret_cast<intptr_t>(Smi::FromInt(height - 1));
555  output_frame->SetFrameSlot(output_offset, value);
556  if (FLAG_trace_deopt) {
557  PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
558  V8PRIxPTR " ; argc (%d)\n",
559  top_address + output_offset, output_offset, value, height - 1);
560  }
561 
562  // The newly allocated object was passed as receiver in the artificial
563  // constructor stub environment created by HEnvironment::CopyForInlining().
564  output_offset -= kPointerSize;
565  value = output_frame->GetFrameSlot(output_frame_size - kPointerSize);
566  output_frame->SetFrameSlot(output_offset, value);
567  if (FLAG_trace_deopt) {
568  PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
569  V8PRIxPTR " ; allocated receiver\n",
570  top_address + output_offset, output_offset, value);
571  }
572 
573  ASSERT(0 == output_offset);
574 
575  intptr_t pc = reinterpret_cast<intptr_t>(
576  construct_stub->instruction_start() +
577  isolate_->heap()->construct_stub_deopt_pc_offset()->value());
578  output_frame->SetPc(pc);
579 }
580 
581 
582 void Deoptimizer::DoComputeAccessorStubFrame(TranslationIterator* iterator,
583  int frame_index,
584  bool is_setter_stub_frame) {
585  JSFunction* accessor = JSFunction::cast(ComputeLiteral(iterator->Next()));
586  // The receiver (and the implicit return value, if any) are expected in
587  // registers by the LoadIC/StoreIC, so they don't belong to the output stack
588  // frame. This means that we have to use a height of 0.
589  unsigned height = 0;
590  unsigned height_in_bytes = height * kPointerSize;
591  const char* kind = is_setter_stub_frame ? "setter" : "getter";
592  if (FLAG_trace_deopt) {
593  PrintF(" translating %s stub => height=%u\n", kind, height_in_bytes);
594  }
595 
596  // We need 1 stack entry for the return address + 4 stack entries from
597  // StackFrame::INTERNAL (FP, context, frame type, code object, see
598  // MacroAssembler::EnterFrame). For a setter stub frame we need one additional
599  // entry for the implicit return value, see
600  // StoreStubCompiler::CompileStoreViaSetter.
601  unsigned fixed_frame_entries = 1 + 4 + (is_setter_stub_frame ? 1 : 0);
602  unsigned fixed_frame_size = fixed_frame_entries * kPointerSize;
603  unsigned output_frame_size = height_in_bytes + fixed_frame_size;
604 
605  // Allocate and store the output frame description.
606  FrameDescription* output_frame =
607  new(output_frame_size) FrameDescription(output_frame_size, accessor);
608  output_frame->SetFrameType(StackFrame::INTERNAL);
609 
610  // A frame for an accessor stub can not be the topmost or bottommost one.
611  ASSERT(frame_index > 0 && frame_index < output_count_ - 1);
612  ASSERT(output_[frame_index] == NULL);
613  output_[frame_index] = output_frame;
614 
615  // The top address of the frame is computed from the previous frame's top and
616  // this frame's size.
617  intptr_t top_address = output_[frame_index - 1]->GetTop() - output_frame_size;
618  output_frame->SetTop(top_address);
619 
620  unsigned output_offset = output_frame_size;
621 
622  // Read caller's PC from the previous frame.
623  output_offset -= kPointerSize;
624  intptr_t callers_pc = output_[frame_index - 1]->GetPc();
625  output_frame->SetFrameSlot(output_offset, callers_pc);
626  if (FLAG_trace_deopt) {
627  PrintF(" 0x%08" V8PRIxPTR ": [top + %u] <- 0x%08" V8PRIxPTR
628  " ; caller's pc\n",
629  top_address + output_offset, output_offset, callers_pc);
630  }
631 
632  // Read caller's FP from the previous frame, and set this frame's FP.
633  output_offset -= kPointerSize;
634  intptr_t value = output_[frame_index - 1]->GetFp();
635  output_frame->SetFrameSlot(output_offset, value);
636  intptr_t fp_value = top_address + output_offset;
637  output_frame->SetFp(fp_value);
638  if (FLAG_trace_deopt) {
639  PrintF(" 0x%08" V8PRIxPTR ": [top + %u] <- 0x%08" V8PRIxPTR
640  " ; caller's fp\n",
641  fp_value, output_offset, value);
642  }
643 
644  // The context can be gotten from the previous frame.
645  output_offset -= kPointerSize;
646  value = output_[frame_index - 1]->GetContext();
647  output_frame->SetFrameSlot(output_offset, value);
648  if (FLAG_trace_deopt) {
649  PrintF(" 0x%08" V8PRIxPTR ": [top + %u] <- 0x%08" V8PRIxPTR
650  " ; context\n",
651  top_address + output_offset, output_offset, value);
652  }
653 
654  // A marker value is used in place of the function.
655  output_offset -= kPointerSize;
656  value = reinterpret_cast<intptr_t>(Smi::FromInt(StackFrame::INTERNAL));
657  output_frame->SetFrameSlot(output_offset, value);
658  if (FLAG_trace_deopt) {
659  PrintF(" 0x%08" V8PRIxPTR ": [top + %u] <- 0x%08" V8PRIxPTR
660  " ; function (%s sentinel)\n",
661  top_address + output_offset, output_offset, value, kind);
662  }
663 
664  // Get Code object from accessor stub.
665  output_offset -= kPointerSize;
666  Builtins::Name name = is_setter_stub_frame ?
667  Builtins::kStoreIC_Setter_ForDeopt :
668  Builtins::kLoadIC_Getter_ForDeopt;
669  Code* accessor_stub = isolate_->builtins()->builtin(name);
670  value = reinterpret_cast<intptr_t>(accessor_stub);
671  output_frame->SetFrameSlot(output_offset, value);
672  if (FLAG_trace_deopt) {
673  PrintF(" 0x%08" V8PRIxPTR ": [top + %u] <- 0x%08" V8PRIxPTR
674  " ; code object\n",
675  top_address + output_offset, output_offset, value);
676  }
677 
678  // Skip receiver.
679  Translation::Opcode opcode =
680  static_cast<Translation::Opcode>(iterator->Next());
681  iterator->Skip(Translation::NumberOfOperandsFor(opcode));
682 
683  if (is_setter_stub_frame) {
684  // The implicit return value was part of the artificial setter stub
685  // environment.
686  output_offset -= kPointerSize;
687  DoTranslateCommand(iterator, frame_index, output_offset);
688  }
689 
690  ASSERT(0 == output_offset);
691 
692  Smi* offset = is_setter_stub_frame ?
693  isolate_->heap()->setter_stub_deopt_pc_offset() :
694  isolate_->heap()->getter_stub_deopt_pc_offset();
695  intptr_t pc = reinterpret_cast<intptr_t>(
696  accessor_stub->instruction_start() + offset->value());
697  output_frame->SetPc(pc);
698 }
699 
700 
701 void Deoptimizer::DoComputeJSFrame(TranslationIterator* iterator,
702  int frame_index) {
703  BailoutId node_id = BailoutId(iterator->Next());
704  JSFunction* function;
705  if (frame_index != 0) {
706  function = JSFunction::cast(ComputeLiteral(iterator->Next()));
707  } else {
708  int closure_id = iterator->Next();
709  USE(closure_id);
710  ASSERT_EQ(Translation::kSelfLiteralId, closure_id);
711  function = function_;
712  }
713  unsigned height = iterator->Next();
714  unsigned height_in_bytes = height * kPointerSize;
715  if (FLAG_trace_deopt) {
716  PrintF(" translating ");
717  function->PrintName();
718  PrintF(" => node=%d, height=%d\n", node_id.ToInt(), height_in_bytes);
719  }
720 
721  // The 'fixed' part of the frame consists of the incoming parameters and
722  // the part described by JavaScriptFrameConstants.
723  unsigned fixed_frame_size = ComputeFixedSize(function);
724  unsigned input_frame_size = input_->GetFrameSize();
725  unsigned output_frame_size = height_in_bytes + fixed_frame_size;
726 
727  // Allocate and store the output frame description.
728  FrameDescription* output_frame =
729  new(output_frame_size) FrameDescription(output_frame_size, function);
730  output_frame->SetFrameType(StackFrame::JAVA_SCRIPT);
731 
732  bool is_bottommost = (0 == frame_index);
733  bool is_topmost = (output_count_ - 1 == frame_index);
734  ASSERT(frame_index >= 0 && frame_index < output_count_);
735  ASSERT(output_[frame_index] == NULL);
736  output_[frame_index] = output_frame;
737 
738  // The top address for the bottommost output frame can be computed from
739  // the input frame pointer and the output frame's height. For all
740  // subsequent output frames, it can be computed from the previous one's
741  // top address and the current frame's size.
742  intptr_t top_address;
743  if (is_bottommost) {
744  // 2 = context and function in the frame.
745  top_address =
746  input_->GetRegister(rbp.code()) - (2 * kPointerSize) - height_in_bytes;
747  } else {
748  top_address = output_[frame_index - 1]->GetTop() - output_frame_size;
749  }
750  output_frame->SetTop(top_address);
751 
752  // Compute the incoming parameter translation.
753  int parameter_count = function->shared()->formal_parameter_count() + 1;
754  unsigned output_offset = output_frame_size;
755  unsigned input_offset = input_frame_size;
756  for (int i = 0; i < parameter_count; ++i) {
757  output_offset -= kPointerSize;
758  DoTranslateCommand(iterator, frame_index, output_offset);
759  }
760  input_offset -= (parameter_count * kPointerSize);
761 
762  // There are no translation commands for the caller's pc and fp, the
763  // context, and the function. Synthesize their values and set them up
764  // explicitly.
765  //
766  // The caller's pc for the bottommost output frame is the same as in the
767  // input frame. For all subsequent output frames, it can be read from the
768  // previous one. This frame's pc can be computed from the non-optimized
769  // function code and AST id of the bailout.
770  output_offset -= kPointerSize;
771  input_offset -= kPointerSize;
772  intptr_t value;
773  if (is_bottommost) {
774  value = input_->GetFrameSlot(input_offset);
775  } else {
776  value = output_[frame_index - 1]->GetPc();
777  }
778  output_frame->SetFrameSlot(output_offset, value);
779  if (FLAG_trace_deopt) {
780  PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
781  V8PRIxPTR " ; caller's pc\n",
782  top_address + output_offset, output_offset, value);
783  }
784 
785  // The caller's frame pointer for the bottommost output frame is the same
786  // as in the input frame. For all subsequent output frames, it can be
787  // read from the previous one. Also compute and set this frame's frame
788  // pointer.
789  output_offset -= kPointerSize;
790  input_offset -= kPointerSize;
791  if (is_bottommost) {
792  value = input_->GetFrameSlot(input_offset);
793  } else {
794  value = output_[frame_index - 1]->GetFp();
795  }
796  output_frame->SetFrameSlot(output_offset, value);
797  intptr_t fp_value = top_address + output_offset;
798  ASSERT(!is_bottommost || input_->GetRegister(rbp.code()) == fp_value);
799  output_frame->SetFp(fp_value);
800  if (is_topmost) output_frame->SetRegister(rbp.code(), fp_value);
801  if (FLAG_trace_deopt) {
802  PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
803  V8PRIxPTR " ; caller's fp\n",
804  fp_value, output_offset, value);
805  }
806 
807  // For the bottommost output frame the context can be gotten from the input
808  // frame. For all subsequent output frames it can be gotten from the function
809  // so long as we don't inline functions that need local contexts.
810  output_offset -= kPointerSize;
811  input_offset -= kPointerSize;
812  if (is_bottommost) {
813  value = input_->GetFrameSlot(input_offset);
814  } else {
815  value = reinterpret_cast<intptr_t>(function->context());
816  }
817  output_frame->SetFrameSlot(output_offset, value);
818  output_frame->SetContext(value);
819  if (is_topmost) output_frame->SetRegister(rsi.code(), value);
820  if (FLAG_trace_deopt) {
821  PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
822  V8PRIxPTR "; context\n",
823  top_address + output_offset, output_offset, value);
824  }
825 
826  // The function was mentioned explicitly in the BEGIN_FRAME.
827  output_offset -= kPointerSize;
828  input_offset -= kPointerSize;
829  value = reinterpret_cast<intptr_t>(function);
830  // The function for the bottommost output frame should also agree with the
831  // input frame.
832  ASSERT(!is_bottommost || input_->GetFrameSlot(input_offset) == value);
833  output_frame->SetFrameSlot(output_offset, value);
834  if (FLAG_trace_deopt) {
835  PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
836  V8PRIxPTR "; function\n",
837  top_address + output_offset, output_offset, value);
838  }
839 
840  // Translate the rest of the frame.
841  for (unsigned i = 0; i < height; ++i) {
842  output_offset -= kPointerSize;
843  DoTranslateCommand(iterator, frame_index, output_offset);
844  }
845  ASSERT(0 == output_offset);
846 
847  // Compute this frame's PC, state, and continuation.
848  Code* non_optimized_code = function->shared()->code();
849  FixedArray* raw_data = non_optimized_code->deoptimization_data();
850  DeoptimizationOutputData* data = DeoptimizationOutputData::cast(raw_data);
851  Address start = non_optimized_code->instruction_start();
852  unsigned pc_and_state = GetOutputInfo(data, node_id, function->shared());
853  unsigned pc_offset = FullCodeGenerator::PcField::decode(pc_and_state);
854  intptr_t pc_value = reinterpret_cast<intptr_t>(start + pc_offset);
855  output_frame->SetPc(pc_value);
856 
859  output_frame->SetState(Smi::FromInt(state));
860 
861  // Set the continuation for the topmost frame.
862  if (is_topmost && bailout_type_ != DEBUGGER) {
863  Code* continuation = (bailout_type_ == EAGER)
864  ? isolate_->builtins()->builtin(Builtins::kNotifyDeoptimized)
865  : isolate_->builtins()->builtin(Builtins::kNotifyLazyDeoptimized);
866  output_frame->SetContinuation(
867  reinterpret_cast<intptr_t>(continuation->entry()));
868  }
869 }
870 
871 
872 void Deoptimizer::FillInputFrame(Address tos, JavaScriptFrame* frame) {
873  // Set the register values. The values are not important as there are no
874  // callee saved registers in JavaScript frames, so all registers are
875  // spilled. Registers rbp and rsp are set to the correct values though.
876  for (int i = 0; i < Register::kNumRegisters; i++) {
877  input_->SetRegister(i, i * 4);
878  }
879  input_->SetRegister(rsp.code(), reinterpret_cast<intptr_t>(frame->sp()));
880  input_->SetRegister(rbp.code(), reinterpret_cast<intptr_t>(frame->fp()));
881  for (int i = 0; i < DoubleRegister::kNumAllocatableRegisters; i++) {
882  input_->SetDoubleRegister(i, 0.0);
883  }
884 
885  // Fill the frame content from the actual data on the frame.
886  for (unsigned i = 0; i < input_->GetFrameSize(); i += kPointerSize) {
887  input_->SetFrameSlot(i, Memory::uint64_at(tos + i));
888  }
889 }
890 
891 
892 #define __ masm()->
893 
894 void Deoptimizer::EntryGenerator::Generate() {
895  GeneratePrologue();
896 
897  // Save all general purpose registers before messing with them.
898  const int kNumberOfRegisters = Register::kNumRegisters;
899 
900  const int kDoubleRegsSize = kDoubleSize *
902  __ subq(rsp, Immediate(kDoubleRegsSize));
903 
904  for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; ++i) {
905  XMMRegister xmm_reg = XMMRegister::FromAllocationIndex(i);
906  int offset = i * kDoubleSize;
907  __ movsd(Operand(rsp, offset), xmm_reg);
908  }
909 
910  // We push all registers onto the stack, even though we do not need
911  // to restore all later.
912  for (int i = 0; i < kNumberOfRegisters; i++) {
913  Register r = Register::from_code(i);
914  __ push(r);
915  }
916 
917  const int kSavedRegistersAreaSize = kNumberOfRegisters * kPointerSize +
918  kDoubleRegsSize;
919 
920  // When calling new_deoptimizer_function we need to pass the last argument
921  // on the stack on windows and in r8 on linux. The remaining arguments are
922  // all passed in registers (different ones on linux and windows though).
923 
924 #ifdef _WIN64
925  Register arg4 = r9;
926  Register arg3 = r8;
927  Register arg2 = rdx;
928  Register arg1 = rcx;
929 #else
930  Register arg4 = rcx;
931  Register arg3 = rdx;
932  Register arg2 = rsi;
933  Register arg1 = rdi;
934 #endif
935 
936  // We use this to keep the value of the fifth argument temporarily.
937  // Unfortunately we can't store it directly in r8 (used for passing
938  // this on linux), since it is another parameter passing register on windows.
939  Register arg5 = r11;
940 
941  // Get the bailout id from the stack.
942  __ movq(arg3, Operand(rsp, kSavedRegistersAreaSize));
943 
944  // Get the address of the location in the code object if possible
945  // and compute the fp-to-sp delta in register arg5.
946  if (type() == EAGER) {
947  __ Set(arg4, 0);
948  __ lea(arg5, Operand(rsp, kSavedRegistersAreaSize + 1 * kPointerSize));
949  } else {
950  __ movq(arg4, Operand(rsp, kSavedRegistersAreaSize + 1 * kPointerSize));
951  __ lea(arg5, Operand(rsp, kSavedRegistersAreaSize + 2 * kPointerSize));
952  }
953 
954  __ subq(arg5, rbp);
955  __ neg(arg5);
956 
957  // Allocate a new deoptimizer object.
958  __ PrepareCallCFunction(6);
960  __ movq(arg1, rax);
961  __ Set(arg2, type());
962  // Args 3 and 4 are already in the right registers.
963 
964  // On windows put the arguments on the stack (PrepareCallCFunction
965  // has created space for this). On linux pass the arguments in r8 and r9.
966 #ifdef _WIN64
967  __ movq(Operand(rsp, 4 * kPointerSize), arg5);
968  __ LoadAddress(arg5, ExternalReference::isolate_address());
969  __ movq(Operand(rsp, 5 * kPointerSize), arg5);
970 #else
971  __ movq(r8, arg5);
972  __ LoadAddress(r9, ExternalReference::isolate_address());
973 #endif
974 
975  Isolate* isolate = masm()->isolate();
976 
977  {
978  AllowExternalCallThatCantCauseGC scope(masm());
979  __ CallCFunction(ExternalReference::new_deoptimizer_function(isolate), 6);
980  }
981  // Preserve deoptimizer object in register rax and get the input
982  // frame descriptor pointer.
983  __ movq(rbx, Operand(rax, Deoptimizer::input_offset()));
984 
985  // Fill in the input registers.
986  for (int i = kNumberOfRegisters -1; i >= 0; i--) {
987  int offset = (i * kPointerSize) + FrameDescription::registers_offset();
988  __ pop(Operand(rbx, offset));
989  }
990 
991  // Fill in the double input registers.
992  int double_regs_offset = FrameDescription::double_registers_offset();
993  for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; i++) {
994  int dst_offset = i * kDoubleSize + double_regs_offset;
995  __ pop(Operand(rbx, dst_offset));
996  }
997 
998  // Remove the bailout id from the stack.
999  if (type() == EAGER) {
1000  __ addq(rsp, Immediate(kPointerSize));
1001  } else {
1002  __ addq(rsp, Immediate(2 * kPointerSize));
1003  }
1004 
1005  // Compute a pointer to the unwinding limit in register rcx; that is
1006  // the first stack slot not part of the input frame.
1007  __ movq(rcx, Operand(rbx, FrameDescription::frame_size_offset()));
1008  __ addq(rcx, rsp);
1009 
1010  // Unwind the stack down to - but not including - the unwinding
1011  // limit and copy the contents of the activation frame to the input
1012  // frame description.
1014  Label pop_loop;
1015  __ bind(&pop_loop);
1016  __ pop(Operand(rdx, 0));
1017  __ addq(rdx, Immediate(sizeof(intptr_t)));
1018  __ cmpq(rcx, rsp);
1019  __ j(not_equal, &pop_loop);
1020 
1021  // Compute the output frame in the deoptimizer.
1022  __ push(rax);
1023  __ PrepareCallCFunction(2);
1024  __ movq(arg1, rax);
1025  __ LoadAddress(arg2, ExternalReference::isolate_address());
1026  {
1027  AllowExternalCallThatCantCauseGC scope(masm());
1028  __ CallCFunction(
1029  ExternalReference::compute_output_frames_function(isolate), 2);
1030  }
1031  __ pop(rax);
1032 
1033  // Replace the current frame with the output frames.
1034  Label outer_push_loop, inner_push_loop;
1035  // Outer loop state: rax = current FrameDescription**, rdx = one past the
1036  // last FrameDescription**.
1037  __ movl(rdx, Operand(rax, Deoptimizer::output_count_offset()));
1038  __ movq(rax, Operand(rax, Deoptimizer::output_offset()));
1039  __ lea(rdx, Operand(rax, rdx, times_8, 0));
1040  __ bind(&outer_push_loop);
1041  // Inner loop state: rbx = current FrameDescription*, rcx = loop index.
1042  __ movq(rbx, Operand(rax, 0));
1043  __ movq(rcx, Operand(rbx, FrameDescription::frame_size_offset()));
1044  __ bind(&inner_push_loop);
1045  __ subq(rcx, Immediate(sizeof(intptr_t)));
1047  __ testq(rcx, rcx);
1048  __ j(not_zero, &inner_push_loop);
1049  __ addq(rax, Immediate(kPointerSize));
1050  __ cmpq(rax, rdx);
1051  __ j(below, &outer_push_loop);
1052 
1053  // In case of OSR, we have to restore the XMM registers.
1054  if (type() == OSR) {
1055  for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; ++i) {
1056  XMMRegister xmm_reg = XMMRegister::FromAllocationIndex(i);
1057  int src_offset = i * kDoubleSize + double_regs_offset;
1058  __ movsd(xmm_reg, Operand(rbx, src_offset));
1059  }
1060  }
1061 
1062  // Push state, pc, and continuation from the last output frame.
1063  if (type() != OSR) {
1064  __ push(Operand(rbx, FrameDescription::state_offset()));
1065  }
1066  __ push(Operand(rbx, FrameDescription::pc_offset()));
1067  __ push(Operand(rbx, FrameDescription::continuation_offset()));
1068 
1069  // Push the registers from the last output frame.
1070  for (int i = 0; i < kNumberOfRegisters; i++) {
1071  int offset = (i * kPointerSize) + FrameDescription::registers_offset();
1072  __ push(Operand(rbx, offset));
1073  }
1074 
1075  // Restore the registers from the stack.
1076  for (int i = kNumberOfRegisters - 1; i >= 0 ; i--) {
1077  Register r = Register::from_code(i);
1078  // Do not restore rsp, simply pop the value into the next register
1079  // and overwrite this afterwards.
1080  if (r.is(rsp)) {
1081  ASSERT(i > 0);
1082  r = Register::from_code(i - 1);
1083  }
1084  __ pop(r);
1085  }
1086 
1087  // Set up the roots register.
1088  __ InitializeRootRegister();
1089  __ InitializeSmiConstantRegister();
1090 
1091  // Return to the continuation point.
1092  __ ret(0);
1093 }
1094 
1095 
1097  // Create a sequence of deoptimization entries.
1098  Label done;
1099  for (int i = 0; i < count(); i++) {
1100  int start = masm()->pc_offset();
1101  USE(start);
1102  __ push_imm32(i);
1103  __ jmp(&done);
1104  ASSERT(masm()->pc_offset() - start == table_entry_size_);
1105  }
1106  __ bind(&done);
1107 }
1108 
1109 #undef __
1110 
1111 
1112 } } // namespace v8::internal
1113 
1114 #endif // V8_TARGET_ARCH_X64
byte * Address
Definition: globals.h:157
const Register rdx
Code * builtin(Name name)
Definition: builtins.h:320
static DeoptimizationOutputData * cast(Object *obj)
#define V8PRIxPTR
Definition: globals.h:189
void PrintF(const char *format,...)
Definition: v8utils.cc:40
const Register r11
unsigned stack_slots()
Definition: objects-inl.h:3318
static Smi * FromInt(int value)
Definition: objects-inl.h:981
const Register rbp
void SetFrameSlot(unsigned offset, intptr_t value)
Definition: deoptimizer.h:424
const Register rsi
Builtins * builtins()
Definition: isolate.h:924
static XMMRegister FromAllocationIndex(int index)
static const int kNumAllocatableRegisters
static const int kCallInstructionLength
#define ASSERT(condition)
Definition: checks.h:270
static void DeoptimizeFunction(JSFunction *function)
intptr_t GetContext() const
Definition: deoptimizer.h:457
void SetFrameType(StackFrame::Type type)
Definition: deoptimizer.h:466
const int kIntSize
Definition: globals.h:217
static const int kNumRegisters
Definition: assembler-arm.h:73
static int double_registers_offset()
Definition: deoptimizer.h:484
uint8_t byte
Definition: globals.h:156
#define UNREACHABLE()
Definition: checks.h:50
static int output_offset()
Definition: deoptimizer.h:240
const int kDoubleSize
Definition: globals.h:218
const Register r9
const int kPointerSize
Definition: globals.h:220
static void set_target_address_at(Address pc, Address target)
const Register rbx
const Register rsp
#define __
void SetRegister(unsigned n, intptr_t value)
Definition: deoptimizer.h:438
static unsigned decode(uint32_t value)
Definition: utils.h:273
const Register pc
friend class DeoptimizingCodeListNode
Definition: deoptimizer.h:370
static Register from_code(int code)
const Register rax
const Register rdi
static int GetOutputInfo(DeoptimizationOutputData *data, BailoutId node_id, SharedFunctionInfo *shared)
Definition: deoptimizer.cc:498
static void ReplaceCodeForRelatedFunctions(JSFunction *function, Code *code)
uint32_t GetFrameSize() const
Definition: deoptimizer.h:394
void SetContinuation(intptr_t pc)
Definition: deoptimizer.h:463
static int output_count_offset()
Definition: deoptimizer.h:237
static const int kNumAllocatableRegisters
static void RevertStackCheckCodeAt(Code *unoptimized_code, Address pc_after, Code *check_code, Code *replacement_code)
static Address target_address_at(Address pc)
intptr_t GetFrameSlot(unsigned offset)
Definition: deoptimizer.h:403
static Address GetDeoptimizationEntry(int id, BailoutType type)
Definition: deoptimizer.cc:457
const Register r8
const Register rcx
#define ASSERT_EQ(v1, v2)
Definition: checks.h:271
friend class FrameDescription
Definition: deoptimizer.h:369
static uint64_t & uint64_at(Address addr)
Definition: v8memory.h:55
void USE(T)
Definition: globals.h:289
virtual void GeneratePrologue()
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination use dead code elimination trace on stack replacement optimize closures cache optimized code for closures functions with arguments object loop weight for representation inference allow uint32 values on optimize frames if they are used only in safe operations track parallel recompilation enable all profiler experiments number of stack frames inspected by the profiler call recompile stub directly when self optimizing trigger profiler ticks based on counting instead of timing weight back edges by jump distance for interrupt triggering percentage of ICs that must have type info to allow optimization watch_ic_patching retry_self_opt interrupt_at_exit extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of SAHF instruction if enable use of VFP3 instructions if available this implies enabling ARMv7 and VFP2 enable use of VFP2 instructions if available enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of MIPS FPU instructions if NULL
Definition: flags.cc:301
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination use dead code elimination trace on stack replacement optimize closures cache optimized code for closures functions with arguments object loop weight for representation inference allow uint32 values on optimize frames if they are used only in safe operations track parallel recompilation enable all profiler experiments number of stack frames inspected by the profiler call recompile stub directly when self optimizing trigger profiler ticks based on counting instead of timing weight back edges by jump distance for interrupt triggering percentage of ICs that must have type info to allow optimization watch_ic_patching retry_self_opt interrupt_at_exit extra verbose compilation tracing generate extra code(assertions) for debugging") DEFINE_bool(code_comments
intptr_t GetRegister(unsigned n) const
Definition: deoptimizer.h:428
void SetDoubleRegister(unsigned n, double value)
Definition: deoptimizer.h:443
static void PatchStackCheckCodeAt(Code *unoptimized_code, Address pc_after, Code *check_code, Code *replacement_code)
static DeoptimizationInputData * cast(Object *obj)
static JSFunction * cast(Object *obj)