v8  3.14.5(node0.10.28)
V8 is Google's open source JavaScript engine
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
deoptimizer-arm.cc
Go to the documentation of this file.
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 
28 #include "v8.h"
29 
30 #include "codegen.h"
31 #include "deoptimizer.h"
32 #include "full-codegen.h"
33 #include "safepoint-table.h"
34 
35 namespace v8 {
36 namespace internal {
37 
38 const int Deoptimizer::table_entry_size_ = 16;
39 
40 
42  const int kCallInstructionSizeInWords = 3;
43  return kCallInstructionSizeInWords * Assembler::kInstrSize;
44 }
45 
46 
48  HandleScope scope;
49  AssertNoAllocation no_allocation;
50 
51  if (!function->IsOptimized()) return;
52 
53  // The optimized code is going to be patched, so we cannot use it
54  // any more. Play safe and reset the whole cache.
55  function->shared()->ClearOptimizedCodeMap();
56 
57  // Get the optimized code.
58  Code* code = function->code();
59  Address code_start_address = code->instruction_start();
60 
61  // Invalidate the relocation information, as it will become invalid by the
62  // code patching below, and is not needed any more.
63  code->InvalidateRelocation();
64 
65  // For each LLazyBailout instruction insert a call to the corresponding
66  // deoptimization entry.
67  DeoptimizationInputData* deopt_data =
68  DeoptimizationInputData::cast(code->deoptimization_data());
69 #ifdef DEBUG
70  Address prev_call_address = NULL;
71 #endif
72  for (int i = 0; i < deopt_data->DeoptCount(); i++) {
73  if (deopt_data->Pc(i)->value() == -1) continue;
74  Address call_address = code_start_address + deopt_data->Pc(i)->value();
75  Address deopt_entry = GetDeoptimizationEntry(i, LAZY);
76  // We need calls to have a predictable size in the unoptimized code, but
77  // this is optimized code, so we don't have to have a predictable size.
78  int call_size_in_bytes =
81  int call_size_in_words = call_size_in_bytes / Assembler::kInstrSize;
82  ASSERT(call_size_in_bytes % Assembler::kInstrSize == 0);
83  ASSERT(call_size_in_bytes <= patch_size());
84  CodePatcher patcher(call_address, call_size_in_words);
85  patcher.masm()->Call(deopt_entry, RelocInfo::NONE);
86  ASSERT(prev_call_address == NULL ||
87  call_address >= prev_call_address + patch_size());
88  ASSERT(call_address + patch_size() <= code->instruction_end());
89 #ifdef DEBUG
90  prev_call_address = call_address;
91 #endif
92  }
93 
94  Isolate* isolate = code->GetIsolate();
95 
96  // Add the deoptimizing code to the list.
98  DeoptimizerData* data = isolate->deoptimizer_data();
99  node->set_next(data->deoptimizing_code_list_);
100  data->deoptimizing_code_list_ = node;
101 
102  // We might be in the middle of incremental marking with compaction.
103  // Tell collector to treat this code object in a special way and
104  // ignore all slots that might have been recorded on it.
105  isolate->heap()->mark_compact_collector()->InvalidateCode(code);
106 
107  ReplaceCodeForRelatedFunctions(function, code);
108 
109  if (FLAG_trace_deopt) {
110  PrintF("[forced deoptimization: ");
111  function->PrintName();
112  PrintF(" / %x]\n", reinterpret_cast<uint32_t>(function));
113  }
114 }
115 
116 
117 static const int32_t kBranchBeforeStackCheck = 0x2a000001;
118 static const int32_t kBranchBeforeInterrupt = 0x5a000004;
119 
120 
122  Address pc_after,
123  Code* check_code,
124  Code* replacement_code) {
125  const int kInstrSize = Assembler::kInstrSize;
126  // The call of the stack guard check has the following form:
127  // e1 5d 00 0c cmp sp, <limit>
128  // 2a 00 00 01 bcs ok
129  // e5 9f c? ?? ldr ip, [pc, <stack guard address>]
130  // e1 2f ff 3c blx ip
131  ASSERT(Memory::int32_at(pc_after - kInstrSize) == kBlxIp);
133  Assembler::instr_at(pc_after - 2 * kInstrSize)));
134  if (FLAG_count_based_interrupts) {
135  ASSERT_EQ(kBranchBeforeInterrupt,
136  Memory::int32_at(pc_after - 3 * kInstrSize));
137  } else {
138  ASSERT_EQ(kBranchBeforeStackCheck,
139  Memory::int32_at(pc_after - 3 * kInstrSize));
140  }
141 
142  // We patch the code to the following form:
143  // e1 5d 00 0c cmp sp, <limit>
144  // e1 a0 00 00 mov r0, r0 (NOP)
145  // e5 9f c? ?? ldr ip, [pc, <on-stack replacement address>]
146  // e1 2f ff 3c blx ip
147  // and overwrite the constant containing the
148  // address of the stack check stub.
149 
150  // Replace conditional jump with NOP.
151  CodePatcher patcher(pc_after - 3 * kInstrSize, 1);
152  patcher.masm()->nop();
153 
154  // Replace the stack check address in the constant pool
155  // with the entry address of the replacement code.
156  uint32_t stack_check_address_offset = Memory::uint16_at(pc_after -
157  2 * kInstrSize) & 0xfff;
158  Address stack_check_address_pointer = pc_after + stack_check_address_offset;
159  ASSERT(Memory::uint32_at(stack_check_address_pointer) ==
160  reinterpret_cast<uint32_t>(check_code->entry()));
161  Memory::uint32_at(stack_check_address_pointer) =
162  reinterpret_cast<uint32_t>(replacement_code->entry());
163 
164  unoptimized_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch(
165  unoptimized_code, pc_after - 2 * kInstrSize, replacement_code);
166 }
167 
168 
170  Address pc_after,
171  Code* check_code,
172  Code* replacement_code) {
173  const int kInstrSize = Assembler::kInstrSize;
174  ASSERT(Memory::int32_at(pc_after - kInstrSize) == kBlxIp);
176  Assembler::instr_at(pc_after - 2 * kInstrSize)));
177 
178  // Replace NOP with conditional jump.
179  CodePatcher patcher(pc_after - 3 * kInstrSize, 1);
180  if (FLAG_count_based_interrupts) {
181  patcher.masm()->b(+16, pl);
182  ASSERT_EQ(kBranchBeforeInterrupt,
183  Memory::int32_at(pc_after - 3 * kInstrSize));
184  } else {
185  patcher.masm()->b(+4, cs);
186  ASSERT_EQ(kBranchBeforeStackCheck,
187  Memory::int32_at(pc_after - 3 * kInstrSize));
188  }
189 
190  // Replace the stack check address in the constant pool
191  // with the entry address of the replacement code.
192  uint32_t stack_check_address_offset = Memory::uint16_at(pc_after -
193  2 * kInstrSize) & 0xfff;
194  Address stack_check_address_pointer = pc_after + stack_check_address_offset;
195  ASSERT(Memory::uint32_at(stack_check_address_pointer) ==
196  reinterpret_cast<uint32_t>(replacement_code->entry()));
197  Memory::uint32_at(stack_check_address_pointer) =
198  reinterpret_cast<uint32_t>(check_code->entry());
199 
201  unoptimized_code, pc_after - 2 * kInstrSize, check_code);
202 }
203 
204 
205 static int LookupBailoutId(DeoptimizationInputData* data, BailoutId ast_id) {
206  ByteArray* translations = data->TranslationByteArray();
207  int length = data->DeoptCount();
208  for (int i = 0; i < length; i++) {
209  if (data->AstId(i) == ast_id) {
210  TranslationIterator it(translations, data->TranslationIndex(i)->value());
211  int value = it.Next();
212  ASSERT(Translation::BEGIN == static_cast<Translation::Opcode>(value));
213  // Read the number of frames.
214  value = it.Next();
215  if (value == 1) return i;
216  }
217  }
218  UNREACHABLE();
219  return -1;
220 }
221 
222 
223 void Deoptimizer::DoComputeOsrOutputFrame() {
224  DeoptimizationInputData* data = DeoptimizationInputData::cast(
225  optimized_code_->deoptimization_data());
226  unsigned ast_id = data->OsrAstId()->value();
227 
228  int bailout_id = LookupBailoutId(data, BailoutId(ast_id));
229  unsigned translation_index = data->TranslationIndex(bailout_id)->value();
230  ByteArray* translations = data->TranslationByteArray();
231 
232  TranslationIterator iterator(translations, translation_index);
233  Translation::Opcode opcode =
234  static_cast<Translation::Opcode>(iterator.Next());
235  ASSERT(Translation::BEGIN == opcode);
236  USE(opcode);
237  int count = iterator.Next();
238  iterator.Skip(1); // Drop JS frame count.
239  ASSERT(count == 1);
240  USE(count);
241 
242  opcode = static_cast<Translation::Opcode>(iterator.Next());
243  USE(opcode);
244  ASSERT(Translation::JS_FRAME == opcode);
245  unsigned node_id = iterator.Next();
246  USE(node_id);
247  ASSERT(node_id == ast_id);
248  int closure_id = iterator.Next();
249  USE(closure_id);
250  ASSERT_EQ(Translation::kSelfLiteralId, closure_id);
251  unsigned height = iterator.Next();
252  unsigned height_in_bytes = height * kPointerSize;
253  USE(height_in_bytes);
254 
255  unsigned fixed_size = ComputeFixedSize(function_);
256  unsigned input_frame_size = input_->GetFrameSize();
257  ASSERT(fixed_size + height_in_bytes == input_frame_size);
258 
259  unsigned stack_slot_size = optimized_code_->stack_slots() * kPointerSize;
260  unsigned outgoing_height = data->ArgumentsStackHeight(bailout_id)->value();
261  unsigned outgoing_size = outgoing_height * kPointerSize;
262  unsigned output_frame_size = fixed_size + stack_slot_size + outgoing_size;
263  ASSERT(outgoing_size == 0); // OSR does not happen in the middle of a call.
264 
265  if (FLAG_trace_osr) {
266  PrintF("[on-stack replacement: begin 0x%08" V8PRIxPTR " ",
267  reinterpret_cast<intptr_t>(function_));
268  function_->PrintName();
269  PrintF(" => node=%u, frame=%d->%d]\n",
270  ast_id,
271  input_frame_size,
272  output_frame_size);
273  }
274 
275  // There's only one output frame in the OSR case.
276  output_count_ = 1;
277  output_ = new FrameDescription*[1];
278  output_[0] = new(output_frame_size) FrameDescription(
279  output_frame_size, function_);
280  output_[0]->SetFrameType(StackFrame::JAVA_SCRIPT);
281 
282  // Clear the incoming parameters in the optimized frame to avoid
283  // confusing the garbage collector.
284  unsigned output_offset = output_frame_size - kPointerSize;
285  int parameter_count = function_->shared()->formal_parameter_count() + 1;
286  for (int i = 0; i < parameter_count; ++i) {
287  output_[0]->SetFrameSlot(output_offset, 0);
288  output_offset -= kPointerSize;
289  }
290 
291  // Translate the incoming parameters. This may overwrite some of the
292  // incoming argument slots we've just cleared.
293  int input_offset = input_frame_size - kPointerSize;
294  bool ok = true;
295  int limit = input_offset - (parameter_count * kPointerSize);
296  while (ok && input_offset > limit) {
297  ok = DoOsrTranslateCommand(&iterator, &input_offset);
298  }
299 
300  // There are no translation commands for the caller's pc and fp, the
301  // context, and the function. Set them up explicitly.
304  i -= kPointerSize) {
305  uint32_t input_value = input_->GetFrameSlot(input_offset);
306  if (FLAG_trace_osr) {
307  const char* name = "UNKNOWN";
308  switch (i) {
310  name = "caller's pc";
311  break;
313  name = "fp";
314  break;
316  name = "context";
317  break;
319  name = "function";
320  break;
321  }
322  PrintF(" [sp + %d] <- 0x%08x ; [sp + %d] (fixed part - %s)\n",
323  output_offset,
324  input_value,
325  input_offset,
326  name);
327  }
328 
329  output_[0]->SetFrameSlot(output_offset, input_->GetFrameSlot(input_offset));
330  input_offset -= kPointerSize;
331  output_offset -= kPointerSize;
332  }
333 
334  // Translate the rest of the frame.
335  while (ok && input_offset >= 0) {
336  ok = DoOsrTranslateCommand(&iterator, &input_offset);
337  }
338 
339  // If translation of any command failed, continue using the input frame.
340  if (!ok) {
341  delete output_[0];
342  output_[0] = input_;
343  output_[0]->SetPc(reinterpret_cast<uint32_t>(from_));
344  } else {
345  // Set up the frame pointer and the context pointer.
346  output_[0]->SetRegister(fp.code(), input_->GetRegister(fp.code()));
347  output_[0]->SetRegister(cp.code(), input_->GetRegister(cp.code()));
348 
349  unsigned pc_offset = data->OsrPcOffset()->value();
350  uint32_t pc = reinterpret_cast<uint32_t>(
351  optimized_code_->entry() + pc_offset);
352  output_[0]->SetPc(pc);
353  }
354  Code* continuation = isolate_->builtins()->builtin(Builtins::kNotifyOSR);
355  output_[0]->SetContinuation(
356  reinterpret_cast<uint32_t>(continuation->entry()));
357 
358  if (FLAG_trace_osr) {
359  PrintF("[on-stack replacement translation %s: 0x%08" V8PRIxPTR " ",
360  ok ? "finished" : "aborted",
361  reinterpret_cast<intptr_t>(function_));
362  function_->PrintName();
363  PrintF(" => pc=0x%0x]\n", output_[0]->GetPc());
364  }
365 }
366 
367 
368 void Deoptimizer::DoComputeArgumentsAdaptorFrame(TranslationIterator* iterator,
369  int frame_index) {
370  JSFunction* function = JSFunction::cast(ComputeLiteral(iterator->Next()));
371  unsigned height = iterator->Next();
372  unsigned height_in_bytes = height * kPointerSize;
373  if (FLAG_trace_deopt) {
374  PrintF(" translating arguments adaptor => height=%d\n", height_in_bytes);
375  }
376 
377  unsigned fixed_frame_size = ArgumentsAdaptorFrameConstants::kFrameSize;
378  unsigned output_frame_size = height_in_bytes + fixed_frame_size;
379 
380  // Allocate and store the output frame description.
381  FrameDescription* output_frame =
382  new(output_frame_size) FrameDescription(output_frame_size, function);
383  output_frame->SetFrameType(StackFrame::ARGUMENTS_ADAPTOR);
384 
385  // Arguments adaptor can not be topmost or bottommost.
386  ASSERT(frame_index > 0 && frame_index < output_count_ - 1);
387  ASSERT(output_[frame_index] == NULL);
388  output_[frame_index] = output_frame;
389 
390  // The top address of the frame is computed from the previous
391  // frame's top and this frame's size.
392  uint32_t top_address;
393  top_address = output_[frame_index - 1]->GetTop() - output_frame_size;
394  output_frame->SetTop(top_address);
395 
396  // Compute the incoming parameter translation.
397  int parameter_count = height;
398  unsigned output_offset = output_frame_size;
399  for (int i = 0; i < parameter_count; ++i) {
400  output_offset -= kPointerSize;
401  DoTranslateCommand(iterator, frame_index, output_offset);
402  }
403 
404  // Read caller's PC from the previous frame.
405  output_offset -= kPointerSize;
406  intptr_t callers_pc = output_[frame_index - 1]->GetPc();
407  output_frame->SetFrameSlot(output_offset, callers_pc);
408  if (FLAG_trace_deopt) {
409  PrintF(" 0x%08x: [top + %d] <- 0x%08x ; caller's pc\n",
410  top_address + output_offset, output_offset, callers_pc);
411  }
412 
413  // Read caller's FP from the previous frame, and set this frame's FP.
414  output_offset -= kPointerSize;
415  intptr_t value = output_[frame_index - 1]->GetFp();
416  output_frame->SetFrameSlot(output_offset, value);
417  intptr_t fp_value = top_address + output_offset;
418  output_frame->SetFp(fp_value);
419  if (FLAG_trace_deopt) {
420  PrintF(" 0x%08x: [top + %d] <- 0x%08x ; caller's fp\n",
421  fp_value, output_offset, value);
422  }
423 
424  // A marker value is used in place of the context.
425  output_offset -= kPointerSize;
426  intptr_t context = reinterpret_cast<intptr_t>(
428  output_frame->SetFrameSlot(output_offset, context);
429  if (FLAG_trace_deopt) {
430  PrintF(" 0x%08x: [top + %d] <- 0x%08x ; context (adaptor sentinel)\n",
431  top_address + output_offset, output_offset, context);
432  }
433 
434  // The function was mentioned explicitly in the ARGUMENTS_ADAPTOR_FRAME.
435  output_offset -= kPointerSize;
436  value = reinterpret_cast<intptr_t>(function);
437  output_frame->SetFrameSlot(output_offset, value);
438  if (FLAG_trace_deopt) {
439  PrintF(" 0x%08x: [top + %d] <- 0x%08x ; function\n",
440  top_address + output_offset, output_offset, value);
441  }
442 
443  // Number of incoming arguments.
444  output_offset -= kPointerSize;
445  value = reinterpret_cast<uint32_t>(Smi::FromInt(height - 1));
446  output_frame->SetFrameSlot(output_offset, value);
447  if (FLAG_trace_deopt) {
448  PrintF(" 0x%08x: [top + %d] <- 0x%08x ; argc (%d)\n",
449  top_address + output_offset, output_offset, value, height - 1);
450  }
451 
452  ASSERT(0 == output_offset);
453 
454  Builtins* builtins = isolate_->builtins();
455  Code* adaptor_trampoline =
456  builtins->builtin(Builtins::kArgumentsAdaptorTrampoline);
457  uint32_t pc = reinterpret_cast<uint32_t>(
458  adaptor_trampoline->instruction_start() +
459  isolate_->heap()->arguments_adaptor_deopt_pc_offset()->value());
460  output_frame->SetPc(pc);
461 }
462 
463 
464 void Deoptimizer::DoComputeConstructStubFrame(TranslationIterator* iterator,
465  int frame_index) {
466  Builtins* builtins = isolate_->builtins();
467  Code* construct_stub = builtins->builtin(Builtins::kJSConstructStubGeneric);
468  JSFunction* function = JSFunction::cast(ComputeLiteral(iterator->Next()));
469  unsigned height = iterator->Next();
470  unsigned height_in_bytes = height * kPointerSize;
471  if (FLAG_trace_deopt) {
472  PrintF(" translating construct stub => height=%d\n", height_in_bytes);
473  }
474 
475  unsigned fixed_frame_size = 8 * kPointerSize;
476  unsigned output_frame_size = height_in_bytes + fixed_frame_size;
477 
478  // Allocate and store the output frame description.
479  FrameDescription* output_frame =
480  new(output_frame_size) FrameDescription(output_frame_size, function);
481  output_frame->SetFrameType(StackFrame::CONSTRUCT);
482 
483  // Construct stub can not be topmost or bottommost.
484  ASSERT(frame_index > 0 && frame_index < output_count_ - 1);
485  ASSERT(output_[frame_index] == NULL);
486  output_[frame_index] = output_frame;
487 
488  // The top address of the frame is computed from the previous
489  // frame's top and this frame's size.
490  uint32_t top_address;
491  top_address = output_[frame_index - 1]->GetTop() - output_frame_size;
492  output_frame->SetTop(top_address);
493 
494  // Compute the incoming parameter translation.
495  int parameter_count = height;
496  unsigned output_offset = output_frame_size;
497  for (int i = 0; i < parameter_count; ++i) {
498  output_offset -= kPointerSize;
499  DoTranslateCommand(iterator, frame_index, output_offset);
500  }
501 
502  // Read caller's PC from the previous frame.
503  output_offset -= kPointerSize;
504  intptr_t callers_pc = output_[frame_index - 1]->GetPc();
505  output_frame->SetFrameSlot(output_offset, callers_pc);
506  if (FLAG_trace_deopt) {
507  PrintF(" 0x%08x: [top + %d] <- 0x%08x ; caller's pc\n",
508  top_address + output_offset, output_offset, callers_pc);
509  }
510 
511  // Read caller's FP from the previous frame, and set this frame's FP.
512  output_offset -= kPointerSize;
513  intptr_t value = output_[frame_index - 1]->GetFp();
514  output_frame->SetFrameSlot(output_offset, value);
515  intptr_t fp_value = top_address + output_offset;
516  output_frame->SetFp(fp_value);
517  if (FLAG_trace_deopt) {
518  PrintF(" 0x%08x: [top + %d] <- 0x%08x ; caller's fp\n",
519  fp_value, output_offset, value);
520  }
521 
522  // The context can be gotten from the previous frame.
523  output_offset -= kPointerSize;
524  value = output_[frame_index - 1]->GetContext();
525  output_frame->SetFrameSlot(output_offset, value);
526  if (FLAG_trace_deopt) {
527  PrintF(" 0x%08x: [top + %d] <- 0x%08x ; context\n",
528  top_address + output_offset, output_offset, value);
529  }
530 
531  // A marker value is used in place of the function.
532  output_offset -= kPointerSize;
533  value = reinterpret_cast<intptr_t>(Smi::FromInt(StackFrame::CONSTRUCT));
534  output_frame->SetFrameSlot(output_offset, value);
535  if (FLAG_trace_deopt) {
536  PrintF(" 0x%08x: [top + %d] <- 0x%08x ; function (construct sentinel)\n",
537  top_address + output_offset, output_offset, value);
538  }
539 
540  // The output frame reflects a JSConstructStubGeneric frame.
541  output_offset -= kPointerSize;
542  value = reinterpret_cast<intptr_t>(construct_stub);
543  output_frame->SetFrameSlot(output_offset, value);
544  if (FLAG_trace_deopt) {
545  PrintF(" 0x%08x: [top + %d] <- 0x%08x ; code object\n",
546  top_address + output_offset, output_offset, value);
547  }
548 
549  // Number of incoming arguments.
550  output_offset -= kPointerSize;
551  value = reinterpret_cast<uint32_t>(Smi::FromInt(height - 1));
552  output_frame->SetFrameSlot(output_offset, value);
553  if (FLAG_trace_deopt) {
554  PrintF(" 0x%08x: [top + %d] <- 0x%08x ; argc (%d)\n",
555  top_address + output_offset, output_offset, value, height - 1);
556  }
557 
558  // Constructor function being invoked by the stub.
559  output_offset -= kPointerSize;
560  value = reinterpret_cast<intptr_t>(function);
561  output_frame->SetFrameSlot(output_offset, value);
562  if (FLAG_trace_deopt) {
563  PrintF(" 0x%08x: [top + %d] <- 0x%08x ; constructor function\n",
564  top_address + output_offset, output_offset, value);
565  }
566 
567  // The newly allocated object was passed as receiver in the artificial
568  // constructor stub environment created by HEnvironment::CopyForInlining().
569  output_offset -= kPointerSize;
570  value = output_frame->GetFrameSlot(output_frame_size - kPointerSize);
571  output_frame->SetFrameSlot(output_offset, value);
572  if (FLAG_trace_deopt) {
573  PrintF(" 0x%08x: [top + %d] <- 0x%08x ; allocated receiver\n",
574  top_address + output_offset, output_offset, value);
575  }
576 
577  ASSERT(0 == output_offset);
578 
579  uint32_t pc = reinterpret_cast<uint32_t>(
580  construct_stub->instruction_start() +
581  isolate_->heap()->construct_stub_deopt_pc_offset()->value());
582  output_frame->SetPc(pc);
583 }
584 
585 
586 void Deoptimizer::DoComputeAccessorStubFrame(TranslationIterator* iterator,
587  int frame_index,
588  bool is_setter_stub_frame) {
589  JSFunction* accessor = JSFunction::cast(ComputeLiteral(iterator->Next()));
590  // The receiver (and the implicit return value, if any) are expected in
591  // registers by the LoadIC/StoreIC, so they don't belong to the output stack
592  // frame. This means that we have to use a height of 0.
593  unsigned height = 0;
594  unsigned height_in_bytes = height * kPointerSize;
595  const char* kind = is_setter_stub_frame ? "setter" : "getter";
596  if (FLAG_trace_deopt) {
597  PrintF(" translating %s stub => height=%u\n", kind, height_in_bytes);
598  }
599 
600  // We need 5 stack entries from StackFrame::INTERNAL (lr, fp, cp, frame type,
601  // code object, see MacroAssembler::EnterFrame). For a setter stub frames we
602  // need one additional entry for the implicit return value, see
603  // StoreStubCompiler::CompileStoreViaSetter.
604  unsigned fixed_frame_entries = 5 + (is_setter_stub_frame ? 1 : 0);
605  unsigned fixed_frame_size = fixed_frame_entries * kPointerSize;
606  unsigned output_frame_size = height_in_bytes + fixed_frame_size;
607 
608  // Allocate and store the output frame description.
609  FrameDescription* output_frame =
610  new(output_frame_size) FrameDescription(output_frame_size, accessor);
611  output_frame->SetFrameType(StackFrame::INTERNAL);
612 
613  // A frame for an accessor stub can not be the topmost or bottommost one.
614  ASSERT(frame_index > 0 && frame_index < output_count_ - 1);
615  ASSERT(output_[frame_index] == NULL);
616  output_[frame_index] = output_frame;
617 
618  // The top address of the frame is computed from the previous frame's top and
619  // this frame's size.
620  uint32_t top_address = output_[frame_index - 1]->GetTop() - output_frame_size;
621  output_frame->SetTop(top_address);
622 
623  unsigned output_offset = output_frame_size;
624 
625  // Read caller's PC from the previous frame.
626  output_offset -= kPointerSize;
627  intptr_t callers_pc = output_[frame_index - 1]->GetPc();
628  output_frame->SetFrameSlot(output_offset, callers_pc);
629  if (FLAG_trace_deopt) {
630  PrintF(" 0x%08" V8PRIxPTR ": [top + %u] <- 0x%08" V8PRIxPTR
631  " ; caller's pc\n",
632  top_address + output_offset, output_offset, callers_pc);
633  }
634 
635  // Read caller's FP from the previous frame, and set this frame's FP.
636  output_offset -= kPointerSize;
637  intptr_t value = output_[frame_index - 1]->GetFp();
638  output_frame->SetFrameSlot(output_offset, value);
639  intptr_t fp_value = top_address + output_offset;
640  output_frame->SetFp(fp_value);
641  if (FLAG_trace_deopt) {
642  PrintF(" 0x%08" V8PRIxPTR ": [top + %u] <- 0x%08" V8PRIxPTR
643  " ; caller's fp\n",
644  fp_value, output_offset, value);
645  }
646 
647  // The context can be gotten from the previous frame.
648  output_offset -= kPointerSize;
649  value = output_[frame_index - 1]->GetContext();
650  output_frame->SetFrameSlot(output_offset, value);
651  if (FLAG_trace_deopt) {
652  PrintF(" 0x%08" V8PRIxPTR ": [top + %u] <- 0x%08" V8PRIxPTR
653  " ; context\n",
654  top_address + output_offset, output_offset, value);
655  }
656 
657  // A marker value is used in place of the function.
658  output_offset -= kPointerSize;
659  value = reinterpret_cast<intptr_t>(Smi::FromInt(StackFrame::INTERNAL));
660  output_frame->SetFrameSlot(output_offset, value);
661  if (FLAG_trace_deopt) {
662  PrintF(" 0x%08" V8PRIxPTR ": [top + %u] <- 0x%08" V8PRIxPTR
663  " ; function (%s sentinel)\n",
664  top_address + output_offset, output_offset, value, kind);
665  }
666 
667  // Get Code object from accessor stub.
668  output_offset -= kPointerSize;
669  Builtins::Name name = is_setter_stub_frame ?
670  Builtins::kStoreIC_Setter_ForDeopt :
671  Builtins::kLoadIC_Getter_ForDeopt;
672  Code* accessor_stub = isolate_->builtins()->builtin(name);
673  value = reinterpret_cast<intptr_t>(accessor_stub);
674  output_frame->SetFrameSlot(output_offset, value);
675  if (FLAG_trace_deopt) {
676  PrintF(" 0x%08" V8PRIxPTR ": [top + %u] <- 0x%08" V8PRIxPTR
677  " ; code object\n",
678  top_address + output_offset, output_offset, value);
679  }
680 
681  // Skip receiver.
682  Translation::Opcode opcode =
683  static_cast<Translation::Opcode>(iterator->Next());
684  iterator->Skip(Translation::NumberOfOperandsFor(opcode));
685 
686  if (is_setter_stub_frame) {
687  // The implicit return value was part of the artificial setter stub
688  // environment.
689  output_offset -= kPointerSize;
690  DoTranslateCommand(iterator, frame_index, output_offset);
691  }
692 
693  ASSERT(0 == output_offset);
694 
695  Smi* offset = is_setter_stub_frame ?
696  isolate_->heap()->setter_stub_deopt_pc_offset() :
697  isolate_->heap()->getter_stub_deopt_pc_offset();
698  intptr_t pc = reinterpret_cast<intptr_t>(
699  accessor_stub->instruction_start() + offset->value());
700  output_frame->SetPc(pc);
701 }
702 
703 
704 // This code is very similar to ia32 code, but relies on register names (fp, sp)
705 // and how the frame is laid out.
706 void Deoptimizer::DoComputeJSFrame(TranslationIterator* iterator,
707  int frame_index) {
708  // Read the ast node id, function, and frame height for this output frame.
709  BailoutId node_id = BailoutId(iterator->Next());
710  JSFunction* function;
711  if (frame_index != 0) {
712  function = JSFunction::cast(ComputeLiteral(iterator->Next()));
713  } else {
714  int closure_id = iterator->Next();
715  USE(closure_id);
716  ASSERT_EQ(Translation::kSelfLiteralId, closure_id);
717  function = function_;
718  }
719  unsigned height = iterator->Next();
720  unsigned height_in_bytes = height * kPointerSize;
721  if (FLAG_trace_deopt) {
722  PrintF(" translating ");
723  function->PrintName();
724  PrintF(" => node=%d, height=%d\n", node_id.ToInt(), height_in_bytes);
725  }
726 
727  // The 'fixed' part of the frame consists of the incoming parameters and
728  // the part described by JavaScriptFrameConstants.
729  unsigned fixed_frame_size = ComputeFixedSize(function);
730  unsigned input_frame_size = input_->GetFrameSize();
731  unsigned output_frame_size = height_in_bytes + fixed_frame_size;
732 
733  // Allocate and store the output frame description.
734  FrameDescription* output_frame =
735  new(output_frame_size) FrameDescription(output_frame_size, function);
736  output_frame->SetFrameType(StackFrame::JAVA_SCRIPT);
737 
738  bool is_bottommost = (0 == frame_index);
739  bool is_topmost = (output_count_ - 1 == frame_index);
740  ASSERT(frame_index >= 0 && frame_index < output_count_);
741  ASSERT(output_[frame_index] == NULL);
742  output_[frame_index] = output_frame;
743 
744  // The top address for the bottommost output frame can be computed from
745  // the input frame pointer and the output frame's height. For all
746  // subsequent output frames, it can be computed from the previous one's
747  // top address and the current frame's size.
748  uint32_t top_address;
749  if (is_bottommost) {
750  // 2 = context and function in the frame.
751  top_address =
752  input_->GetRegister(fp.code()) - (2 * kPointerSize) - height_in_bytes;
753  } else {
754  top_address = output_[frame_index - 1]->GetTop() - output_frame_size;
755  }
756  output_frame->SetTop(top_address);
757 
758  // Compute the incoming parameter translation.
759  int parameter_count = function->shared()->formal_parameter_count() + 1;
760  unsigned output_offset = output_frame_size;
761  unsigned input_offset = input_frame_size;
762  for (int i = 0; i < parameter_count; ++i) {
763  output_offset -= kPointerSize;
764  DoTranslateCommand(iterator, frame_index, output_offset);
765  }
766  input_offset -= (parameter_count * kPointerSize);
767 
768  // There are no translation commands for the caller's pc and fp, the
769  // context, and the function. Synthesize their values and set them up
770  // explicitly.
771  //
772  // The caller's pc for the bottommost output frame is the same as in the
773  // input frame. For all subsequent output frames, it can be read from the
774  // previous one. This frame's pc can be computed from the non-optimized
775  // function code and AST id of the bailout.
776  output_offset -= kPointerSize;
777  input_offset -= kPointerSize;
778  intptr_t value;
779  if (is_bottommost) {
780  value = input_->GetFrameSlot(input_offset);
781  } else {
782  value = output_[frame_index - 1]->GetPc();
783  }
784  output_frame->SetFrameSlot(output_offset, value);
785  if (FLAG_trace_deopt) {
786  PrintF(" 0x%08x: [top + %d] <- 0x%08x ; caller's pc\n",
787  top_address + output_offset, output_offset, value);
788  }
789 
790  // The caller's frame pointer for the bottommost output frame is the same
791  // as in the input frame. For all subsequent output frames, it can be
792  // read from the previous one. Also compute and set this frame's frame
793  // pointer.
794  output_offset -= kPointerSize;
795  input_offset -= kPointerSize;
796  if (is_bottommost) {
797  value = input_->GetFrameSlot(input_offset);
798  } else {
799  value = output_[frame_index - 1]->GetFp();
800  }
801  output_frame->SetFrameSlot(output_offset, value);
802  intptr_t fp_value = top_address + output_offset;
803  ASSERT(!is_bottommost || input_->GetRegister(fp.code()) == fp_value);
804  output_frame->SetFp(fp_value);
805  if (is_topmost) {
806  output_frame->SetRegister(fp.code(), fp_value);
807  }
808  if (FLAG_trace_deopt) {
809  PrintF(" 0x%08x: [top + %d] <- 0x%08x ; caller's fp\n",
810  fp_value, output_offset, value);
811  }
812 
813  // For the bottommost output frame the context can be gotten from the input
814  // frame. For all subsequent output frames it can be gotten from the function
815  // so long as we don't inline functions that need local contexts.
816  output_offset -= kPointerSize;
817  input_offset -= kPointerSize;
818  if (is_bottommost) {
819  value = input_->GetFrameSlot(input_offset);
820  } else {
821  value = reinterpret_cast<intptr_t>(function->context());
822  }
823  output_frame->SetFrameSlot(output_offset, value);
824  output_frame->SetContext(value);
825  if (is_topmost) output_frame->SetRegister(cp.code(), value);
826  if (FLAG_trace_deopt) {
827  PrintF(" 0x%08x: [top + %d] <- 0x%08x ; context\n",
828  top_address + output_offset, output_offset, value);
829  }
830 
831  // The function was mentioned explicitly in the BEGIN_FRAME.
832  output_offset -= kPointerSize;
833  input_offset -= kPointerSize;
834  value = reinterpret_cast<uint32_t>(function);
835  // The function for the bottommost output frame should also agree with the
836  // input frame.
837  ASSERT(!is_bottommost || input_->GetFrameSlot(input_offset) == value);
838  output_frame->SetFrameSlot(output_offset, value);
839  if (FLAG_trace_deopt) {
840  PrintF(" 0x%08x: [top + %d] <- 0x%08x ; function\n",
841  top_address + output_offset, output_offset, value);
842  }
843 
844  // Translate the rest of the frame.
845  for (unsigned i = 0; i < height; ++i) {
846  output_offset -= kPointerSize;
847  DoTranslateCommand(iterator, frame_index, output_offset);
848  }
849  ASSERT(0 == output_offset);
850 
851  // Compute this frame's PC, state, and continuation.
852  Code* non_optimized_code = function->shared()->code();
853  FixedArray* raw_data = non_optimized_code->deoptimization_data();
854  DeoptimizationOutputData* data = DeoptimizationOutputData::cast(raw_data);
855  Address start = non_optimized_code->instruction_start();
856  unsigned pc_and_state = GetOutputInfo(data, node_id, function->shared());
857  unsigned pc_offset = FullCodeGenerator::PcField::decode(pc_and_state);
858  uint32_t pc_value = reinterpret_cast<uint32_t>(start + pc_offset);
859  output_frame->SetPc(pc_value);
860  if (is_topmost) {
861  output_frame->SetRegister(pc.code(), pc_value);
862  }
863 
866  output_frame->SetState(Smi::FromInt(state));
867 
868 
869  // Set the continuation for the topmost frame.
870  if (is_topmost && bailout_type_ != DEBUGGER) {
871  Builtins* builtins = isolate_->builtins();
872  Code* continuation = (bailout_type_ == EAGER)
873  ? builtins->builtin(Builtins::kNotifyDeoptimized)
874  : builtins->builtin(Builtins::kNotifyLazyDeoptimized);
875  output_frame->SetContinuation(
876  reinterpret_cast<uint32_t>(continuation->entry()));
877  }
878 }
879 
880 
881 void Deoptimizer::FillInputFrame(Address tos, JavaScriptFrame* frame) {
882  // Set the register values. The values are not important as there are no
883  // callee saved registers in JavaScript frames, so all registers are
884  // spilled. Registers fp and sp are set to the correct values though.
885 
886  for (int i = 0; i < Register::kNumRegisters; i++) {
887  input_->SetRegister(i, i * 4);
888  }
889  input_->SetRegister(sp.code(), reinterpret_cast<intptr_t>(frame->sp()));
890  input_->SetRegister(fp.code(), reinterpret_cast<intptr_t>(frame->fp()));
891  for (int i = 0; i < DoubleRegister::kNumAllocatableRegisters; i++) {
892  input_->SetDoubleRegister(i, 0.0);
893  }
894 
895  // Fill the frame content from the actual data on the frame.
896  for (unsigned i = 0; i < input_->GetFrameSize(); i += kPointerSize) {
897  input_->SetFrameSlot(i, Memory::uint32_at(tos + i));
898  }
899 }
900 
901 
902 #define __ masm()->
903 
904 // This code tries to be close to ia32 code so that any changes can be
905 // easily ported.
906 void Deoptimizer::EntryGenerator::Generate() {
907  GeneratePrologue();
908 
909  Isolate* isolate = masm()->isolate();
910 
911  CpuFeatures::Scope scope(VFP3);
912  // Save all general purpose registers before messing with them.
913  const int kNumberOfRegisters = Register::kNumRegisters;
914 
915  // Everything but pc, lr and ip which will be saved but not restored.
916  RegList restored_regs = kJSCallerSaved | kCalleeSaved | ip.bit();
917 
918  const int kDoubleRegsSize =
920 
921  // Save all VFP registers before messing with them.
922  DwVfpRegister first = DwVfpRegister::FromAllocationIndex(0);
923  DwVfpRegister last =
925  DwVfpRegister::kNumAllocatableRegisters - 1);
926  ASSERT(last.code() > first.code());
927  ASSERT((last.code() - first.code()) ==
928  (DwVfpRegister::kNumAllocatableRegisters - 1));
929 #ifdef DEBUG
930  for (int i = 0; i <= (DwVfpRegister::kNumAllocatableRegisters - 1); i++) {
931  ASSERT((DwVfpRegister::FromAllocationIndex(i).code() <= last.code()) &&
932  (DwVfpRegister::FromAllocationIndex(i).code() >= first.code()));
933  }
934 #endif
935  __ vstm(db_w, sp, first, last);
936 
937  // Push all 16 registers (needed to populate FrameDescription::registers_).
938  // TODO(1588) Note that using pc with stm is deprecated, so we should perhaps
939  // handle this a bit differently.
940  __ stm(db_w, sp, restored_regs | sp.bit() | lr.bit() | pc.bit());
941 
942  const int kSavedRegistersAreaSize =
943  (kNumberOfRegisters * kPointerSize) + kDoubleRegsSize;
944 
945  // Get the bailout id from the stack.
946  __ ldr(r2, MemOperand(sp, kSavedRegistersAreaSize));
947 
948  // Get the address of the location in the code object if possible (r3) (return
949  // address for lazy deoptimization) and compute the fp-to-sp delta in
950  // register r4.
951  if (type() == EAGER) {
952  __ mov(r3, Operand(0));
953  // Correct one word for bailout id.
954  __ add(r4, sp, Operand(kSavedRegistersAreaSize + (1 * kPointerSize)));
955  } else if (type() == OSR) {
956  __ mov(r3, lr);
957  // Correct one word for bailout id.
958  __ add(r4, sp, Operand(kSavedRegistersAreaSize + (1 * kPointerSize)));
959  } else {
960  __ mov(r3, lr);
961  // Correct two words for bailout id and return address.
962  __ add(r4, sp, Operand(kSavedRegistersAreaSize + (2 * kPointerSize)));
963  }
964  __ sub(r4, fp, r4);
965 
966  // Allocate a new deoptimizer object.
967  // Pass four arguments in r0 to r3 and fifth argument on stack.
968  __ PrepareCallCFunction(6, r5);
970  __ mov(r1, Operand(type())); // bailout type,
971  // r2: bailout id already loaded.
972  // r3: code address or 0 already loaded.
973  __ str(r4, MemOperand(sp, 0 * kPointerSize)); // Fp-to-sp delta.
974  __ mov(r5, Operand(ExternalReference::isolate_address()));
975  __ str(r5, MemOperand(sp, 1 * kPointerSize)); // Isolate.
976  // Call Deoptimizer::New().
977  {
978  AllowExternalCallThatCantCauseGC scope(masm());
979  __ CallCFunction(ExternalReference::new_deoptimizer_function(isolate), 6);
980  }
981 
982  // Preserve "deoptimizer" object in register r0 and get the input
983  // frame descriptor pointer to r1 (deoptimizer->input_);
985 
986  // Copy core registers into FrameDescription::registers_[kNumRegisters].
987  ASSERT(Register::kNumRegisters == kNumberOfRegisters);
988  for (int i = 0; i < kNumberOfRegisters; i++) {
989  int offset = (i * kPointerSize) + FrameDescription::registers_offset();
990  __ ldr(r2, MemOperand(sp, i * kPointerSize));
991  __ str(r2, MemOperand(r1, offset));
992  }
993 
994  // Copy VFP registers to
995  // double_registers_[DoubleRegister::kNumAllocatableRegisters]
996  int double_regs_offset = FrameDescription::double_registers_offset();
997  for (int i = 0; i < DwVfpRegister::kNumAllocatableRegisters; ++i) {
998  int dst_offset = i * kDoubleSize + double_regs_offset;
999  int src_offset = i * kDoubleSize + kNumberOfRegisters * kPointerSize;
1000  __ vldr(d0, sp, src_offset);
1001  __ vstr(d0, r1, dst_offset);
1002  }
1003 
1004  // Remove the bailout id, eventually return address, and the saved registers
1005  // from the stack.
1006  if (type() == EAGER || type() == OSR) {
1007  __ add(sp, sp, Operand(kSavedRegistersAreaSize + (1 * kPointerSize)));
1008  } else {
1009  __ add(sp, sp, Operand(kSavedRegistersAreaSize + (2 * kPointerSize)));
1010  }
1011 
1012  // Compute a pointer to the unwinding limit in register r2; that is
1013  // the first stack slot not part of the input frame.
1015  __ add(r2, r2, sp);
1016 
1017  // Unwind the stack down to - but not including - the unwinding
1018  // limit and copy the contents of the activation frame to the input
1019  // frame description.
1020  __ add(r3, r1, Operand(FrameDescription::frame_content_offset()));
1021  Label pop_loop;
1022  __ bind(&pop_loop);
1023  __ pop(r4);
1024  __ str(r4, MemOperand(r3, 0));
1025  __ add(r3, r3, Operand(sizeof(uint32_t)));
1026  __ cmp(r2, sp);
1027  __ b(ne, &pop_loop);
1028 
1029  // Compute the output frame in the deoptimizer.
1030  __ push(r0); // Preserve deoptimizer object across call.
1031  // r0: deoptimizer object; r1: scratch.
1032  __ PrepareCallCFunction(1, r1);
1033  // Call Deoptimizer::ComputeOutputFrames().
1034  {
1035  AllowExternalCallThatCantCauseGC scope(masm());
1036  __ CallCFunction(
1037  ExternalReference::compute_output_frames_function(isolate), 1);
1038  }
1039  __ pop(r0); // Restore deoptimizer object (class Deoptimizer).
1040 
1041  // Replace the current (input) frame with the output frames.
1042  Label outer_push_loop, inner_push_loop;
1043  // Outer loop state: r0 = current "FrameDescription** output_",
1044  // r1 = one past the last FrameDescription**.
1046  __ ldr(r0, MemOperand(r0, Deoptimizer::output_offset())); // r0 is output_.
1047  __ add(r1, r0, Operand(r1, LSL, 2));
1048  __ bind(&outer_push_loop);
1049  // Inner loop state: r2 = current FrameDescription*, r3 = loop index.
1050  __ ldr(r2, MemOperand(r0, 0)); // output_[ix]
1052  __ bind(&inner_push_loop);
1053  __ sub(r3, r3, Operand(sizeof(uint32_t)));
1054  __ add(r6, r2, Operand(r3));
1056  __ push(r7);
1057  __ cmp(r3, Operand(0));
1058  __ b(ne, &inner_push_loop); // test for gt?
1059  __ add(r0, r0, Operand(kPointerSize));
1060  __ cmp(r0, r1);
1061  __ b(lt, &outer_push_loop);
1062 
1063  // Push state, pc, and continuation from the last output frame.
1064  if (type() != OSR) {
1066  __ push(r6);
1067  }
1068 
1070  __ push(r6);
1072  __ push(r6);
1073 
1074  // Push the registers from the last output frame.
1075  for (int i = kNumberOfRegisters - 1; i >= 0; i--) {
1076  int offset = (i * kPointerSize) + FrameDescription::registers_offset();
1077  __ ldr(r6, MemOperand(r2, offset));
1078  __ push(r6);
1079  }
1080 
1081  // Restore the registers from the stack.
1082  __ ldm(ia_w, sp, restored_regs); // all but pc registers.
1083  __ pop(ip); // remove sp
1084  __ pop(ip); // remove lr
1085 
1086  __ InitializeRootRegister();
1087 
1088  __ pop(ip); // remove pc
1089  __ pop(r7); // get continuation, leave pc on stack
1090  __ pop(lr);
1091  __ Jump(r7);
1092  __ stop("Unreachable.");
1093 }
1094 
1095 
1097  // Create a sequence of deoptimization entries. Note that any
1098  // registers may be still live.
1099  Label done;
1100  for (int i = 0; i < count(); i++) {
1101  int start = masm()->pc_offset();
1102  USE(start);
1103  if (type() == EAGER) {
1104  __ nop();
1105  } else {
1106  // Emulate ia32 like call by pushing return address to stack.
1107  __ push(lr);
1108  }
1109  __ mov(ip, Operand(i));
1110  __ push(ip);
1111  __ b(&done);
1112  ASSERT(masm()->pc_offset() - start == table_entry_size_);
1113  }
1114  __ bind(&done);
1115 }
1116 
1117 #undef __
1118 
1119 } } // namespace v8::internal
byte * Address
Definition: globals.h:157
Code * builtin(Name name)
Definition: builtins.h:320
static DeoptimizationOutputData * cast(Object *obj)
void InvalidateRelocation()
Definition: objects.cc:8278
#define V8PRIxPTR
Definition: globals.h:189
const Register r3
void PrintF(const char *format,...)
Definition: v8utils.cc:40
unsigned stack_slots()
Definition: objects-inl.h:3318
const Register cp
static Smi * FromInt(int value)
Definition: objects-inl.h:981
void SetFrameSlot(unsigned offset, intptr_t value)
Definition: deoptimizer.h:424
const DwVfpRegister d0
const Register r6
void b(int branch_offset, Condition cond=al)
Builtins * builtins()
Definition: isolate.h:924
int int32_t
Definition: unicode.cc:47
uint32_t RegList
Definition: frames.h:38
static const int kNumAllocatableRegisters
void nop(int type=0)
byte * instruction_end()
Definition: objects-inl.h:4654
#define ASSERT(condition)
Definition: checks.h:270
const RegList kJSCallerSaved
Definition: frames-arm.h:47
static void DeoptimizeFunction(JSFunction *function)
intptr_t GetContext() const
Definition: deoptimizer.h:457
static uint16_t & uint16_at(Address addr)
Definition: v8memory.h:43
void RecordCodeTargetPatch(Code *host, Address pc, HeapObject *value)
void SetFrameType(StackFrame::Type type)
Definition: deoptimizer.h:466
const Register r2
static const int kNumRegisters
Definition: assembler-arm.h:73
static int double_registers_offset()
Definition: deoptimizer.h:484
static DwVfpRegister FromAllocationIndex(int index)
const Register sp
#define UNREACHABLE()
Definition: checks.h:50
static int output_offset()
Definition: deoptimizer.h:240
const int kDoubleSize
Definition: globals.h:218
const Register ip
byte * instruction_start()
Definition: objects-inl.h:4649
const int kPointerSize
Definition: globals.h:220
const RegList kCalleeSaved
Definition: frames-arm.h:63
static int32_t & int32_at(Address addr)
Definition: v8memory.h:51
const Instr kBlxIp
#define __
void SetRegister(unsigned n, intptr_t value)
Definition: deoptimizer.h:438
static unsigned decode(uint32_t value)
Definition: utils.h:273
const Register pc
friend class DeoptimizingCodeListNode
Definition: deoptimizer.h:370
void set_next(DeoptimizingCodeListNode *next)
Definition: deoptimizer.h:662
static int GetOutputInfo(DeoptimizationOutputData *data, BailoutId node_id, SharedFunctionInfo *shared)
Definition: deoptimizer.cc:498
static void ReplaceCodeForRelatedFunctions(JSFunction *function, Code *code)
const Register r0
DeoptimizerData * deoptimizer_data()
Definition: isolate.h:838
uint32_t GetFrameSize() const
Definition: deoptimizer.h:394
void SetContinuation(intptr_t pc)
Definition: deoptimizer.h:463
static int output_count_offset()
Definition: deoptimizer.h:237
const Register lr
static void RevertStackCheckCodeAt(Code *unoptimized_code, Address pc_after, Code *check_code, Code *replacement_code)
intptr_t GetFrameSlot(unsigned offset)
Definition: deoptimizer.h:403
const Register r1
static Address GetDeoptimizationEntry(int id, BailoutType type)
Definition: deoptimizer.cc:457
IncrementalMarking * incremental_marking()
Definition: heap.h:1553
#define ASSERT_EQ(v1, v2)
Definition: checks.h:271
friend class FrameDescription
Definition: deoptimizer.h:369
void Call(Register target, Condition cond=al)
void USE(T)
Definition: globals.h:289
virtual void GeneratePrologue()
static int CallSizeNotPredictableCodeSize(Address target, RelocInfo::Mode rmode, Condition cond=al)
static const int kInstrSize
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination use dead code elimination trace on stack replacement optimize closures cache optimized code for closures functions with arguments object loop weight for representation inference allow uint32 values on optimize frames if they are used only in safe operations track parallel recompilation enable all profiler experiments number of stack frames inspected by the profiler call recompile stub directly when self optimizing trigger profiler ticks based on counting instead of timing weight back edges by jump distance for interrupt triggering percentage of ICs that must have type info to allow optimization watch_ic_patching retry_self_opt interrupt_at_exit extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of SAHF instruction if enable use of VFP3 instructions if available this implies enabling ARMv7 and VFP2 enable use of VFP2 instructions if available enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of MIPS FPU instructions if NULL
Definition: flags.cc:301
static uint32_t & uint32_at(Address addr)
Definition: v8memory.h:47
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination use dead code elimination trace on stack replacement optimize closures cache optimized code for closures functions with arguments object loop weight for representation inference allow uint32 values on optimize frames if they are used only in safe operations track parallel recompilation enable all profiler experiments number of stack frames inspected by the profiler call recompile stub directly when self optimizing trigger profiler ticks based on counting instead of timing weight back edges by jump distance for interrupt triggering percentage of ICs that must have type info to allow optimization watch_ic_patching retry_self_opt interrupt_at_exit extra verbose compilation tracing generate extra code(assertions) for debugging") DEFINE_bool(code_comments
const Register fp
intptr_t GetRegister(unsigned n) const
Definition: deoptimizer.h:428
void SetDoubleRegister(unsigned n, double value)
Definition: deoptimizer.h:443
static void PatchStackCheckCodeAt(Code *unoptimized_code, Address pc_after, Code *check_code, Code *replacement_code)
MarkCompactCollector * mark_compact_collector()
Definition: heap.h:1541
const Register r5
static bool IsLdrPcImmediateOffset(Instr instr)
static DeoptimizationInputData * cast(Object *obj)
const Register r4
const Register r7
static JSFunction * cast(Object *obj)