v8  3.14.5(node0.10.28)
V8 is Google's open source JavaScript engine
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
deoptimizer-mips.cc
Go to the documentation of this file.
1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 
28 #include "v8.h"
29 
30 #include "codegen.h"
31 #include "deoptimizer.h"
32 #include "full-codegen.h"
33 #include "safepoint-table.h"
34 
35 namespace v8 {
36 namespace internal {
37 
38 
40  const int kCallInstructionSizeInWords = 4;
41  return kCallInstructionSizeInWords * Assembler::kInstrSize;
42 }
43 
44 
45 void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
46  HandleScope scope;
47  AssertNoAllocation no_allocation;
48 
49  if (!function->IsOptimized()) return;
50 
51  // The optimized code is going to be patched, so we cannot use it
52  // any more. Play safe and reset the whole cache.
53  function->shared()->ClearOptimizedCodeMap();
54 
55  // Get the optimized code.
56  Code* code = function->code();
57  Address code_start_address = code->instruction_start();
58 
59  // Invalidate the relocation information, as it will become invalid by the
60  // code patching below, and is not needed any more.
61  code->InvalidateRelocation();
62 
63  // For each LLazyBailout instruction insert a call to the corresponding
64  // deoptimization entry.
65  DeoptimizationInputData* deopt_data =
66  DeoptimizationInputData::cast(code->deoptimization_data());
67 #ifdef DEBUG
68  Address prev_call_address = NULL;
69 #endif
70  for (int i = 0; i < deopt_data->DeoptCount(); i++) {
71  if (deopt_data->Pc(i)->value() == -1) continue;
72  Address call_address = code_start_address + deopt_data->Pc(i)->value();
73  Address deopt_entry = GetDeoptimizationEntry(i, LAZY);
74  int call_size_in_bytes = MacroAssembler::CallSize(deopt_entry,
76  int call_size_in_words = call_size_in_bytes / Assembler::kInstrSize;
77  ASSERT(call_size_in_bytes % Assembler::kInstrSize == 0);
78  ASSERT(call_size_in_bytes <= patch_size());
79  CodePatcher patcher(call_address, call_size_in_words);
80  patcher.masm()->Call(deopt_entry, RelocInfo::NONE);
81  ASSERT(prev_call_address == NULL ||
82  call_address >= prev_call_address + patch_size());
83  ASSERT(call_address + patch_size() <= code->instruction_end());
84 
85 #ifdef DEBUG
86  prev_call_address = call_address;
87 #endif
88  }
89 
90  Isolate* isolate = code->GetIsolate();
91 
92  // Add the deoptimizing code to the list.
94  DeoptimizerData* data = isolate->deoptimizer_data();
95  node->set_next(data->deoptimizing_code_list_);
96  data->deoptimizing_code_list_ = node;
97 
98  // We might be in the middle of incremental marking with compaction.
99  // Tell collector to treat this code object in a special way and
100  // ignore all slots that might have been recorded on it.
101  isolate->heap()->mark_compact_collector()->InvalidateCode(code);
102 
103  ReplaceCodeForRelatedFunctions(function, code);
104 
105  if (FLAG_trace_deopt) {
106  PrintF("[forced deoptimization: ");
107  function->PrintName();
108  PrintF(" / %x]\n", reinterpret_cast<uint32_t>(function));
109 #ifdef DEBUG
110  if (FLAG_print_code) {
111  code->PrintLn();
112  }
113 #endif
114  }
115 }
116 
117 
118 void Deoptimizer::PatchStackCheckCodeAt(Code* unoptimized_code,
119  Address pc_after,
120  Code* check_code,
121  Code* replacement_code) {
122  const int kInstrSize = Assembler::kInstrSize;
123  // This structure comes from FullCodeGenerator::EmitStackCheck.
124  // The call of the stack guard check has the following form:
125  // sltu at, sp, t0 / slt at, a3, zero_reg (in case of count based interrupts)
126  // beq at, zero_reg, ok
127  // lui t9, <stack guard address> upper
128  // ori t9, <stack guard address> lower
129  // jalr t9
130  // nop
131  // ----- pc_after points here
132 
133  ASSERT(Assembler::IsBeq(Assembler::instr_at(pc_after - 5 * kInstrSize)));
134 
135  // Replace the sltu instruction with load-imm 1 to at, so beq is not taken.
136  CodePatcher patcher(pc_after - 6 * kInstrSize, 1);
137  patcher.masm()->addiu(at, zero_reg, 1);
138 
139  // Replace the stack check address in the load-immediate (lui/ori pair)
140  // with the entry address of the replacement code.
141  ASSERT(reinterpret_cast<uint32_t>(
142  Assembler::target_address_at(pc_after - 4 * kInstrSize)) ==
143  reinterpret_cast<uint32_t>(check_code->entry()));
144  Assembler::set_target_address_at(pc_after - 4 * kInstrSize,
145  replacement_code->entry());
146 
147  // We patched the code to the following form:
148  // addiu at, zero_reg, 1
149  // beq at, zero_reg, ok ;; Not changed
150  // lui t9, <on-stack replacement address> upper
151  // ori t9, <on-stack replacement address> lower
152  // jalr t9 ;; Not changed
153  // nop ;; Not changed
154  // ----- pc_after points here
155 
156  unoptimized_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch(
157  unoptimized_code, pc_after - 4 * kInstrSize, replacement_code);
158 }
159 
160 
161 void Deoptimizer::RevertStackCheckCodeAt(Code* unoptimized_code,
162  Address pc_after,
163  Code* check_code,
164  Code* replacement_code) {
165  // Exact opposite of the function above.
166  const int kInstrSize = Assembler::kInstrSize;
168  Assembler::instr_at(pc_after - 6 * kInstrSize)));
169  ASSERT(Assembler::IsBeq(Assembler::instr_at(pc_after - 5 * kInstrSize)));
170 
171  // Restore the sltu instruction so beq can be taken again.
172  CodePatcher patcher(pc_after - 6 * kInstrSize, 1);
173  if (FLAG_count_based_interrupts) {
174  patcher.masm()->slt(at, a3, zero_reg);
175  } else {
176  patcher.masm()->sltu(at, sp, t0);
177  }
178 
179  // Replace the on-stack replacement address in the load-immediate (lui/ori
180  // pair) with the entry address of the normal stack-check code.
181  ASSERT(reinterpret_cast<uint32_t>(
182  Assembler::target_address_at(pc_after - 4 * kInstrSize)) ==
183  reinterpret_cast<uint32_t>(replacement_code->entry()));
184  Assembler::set_target_address_at(pc_after - 4 * kInstrSize,
185  check_code->entry());
186 
187  check_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch(
188  unoptimized_code, pc_after - 4 * kInstrSize, check_code);
189 }
190 
191 
192 static int LookupBailoutId(DeoptimizationInputData* data, BailoutId ast_id) {
193  ByteArray* translations = data->TranslationByteArray();
194  int length = data->DeoptCount();
195  for (int i = 0; i < length; i++) {
196  if (data->AstId(i) == ast_id) {
197  TranslationIterator it(translations, data->TranslationIndex(i)->value());
198  int value = it.Next();
199  ASSERT(Translation::BEGIN == static_cast<Translation::Opcode>(value));
200  // Read the number of frames.
201  value = it.Next();
202  if (value == 1) return i;
203  }
204  }
205  UNREACHABLE();
206  return -1;
207 }
208 
209 
210 void Deoptimizer::DoComputeOsrOutputFrame() {
211  DeoptimizationInputData* data = DeoptimizationInputData::cast(
212  optimized_code_->deoptimization_data());
213  unsigned ast_id = data->OsrAstId()->value();
214 
215  int bailout_id = LookupBailoutId(data, BailoutId(ast_id));
216  unsigned translation_index = data->TranslationIndex(bailout_id)->value();
217  ByteArray* translations = data->TranslationByteArray();
218 
219  TranslationIterator iterator(translations, translation_index);
220  Translation::Opcode opcode =
221  static_cast<Translation::Opcode>(iterator.Next());
222  ASSERT(Translation::BEGIN == opcode);
223  USE(opcode);
224  int count = iterator.Next();
225  iterator.Skip(1); // Drop JS frame count.
226  ASSERT(count == 1);
227  USE(count);
228 
229  opcode = static_cast<Translation::Opcode>(iterator.Next());
230  USE(opcode);
231  ASSERT(Translation::JS_FRAME == opcode);
232  unsigned node_id = iterator.Next();
233  USE(node_id);
234  ASSERT(node_id == ast_id);
235  int closure_id = iterator.Next();
236  USE(closure_id);
237  ASSERT_EQ(Translation::kSelfLiteralId, closure_id);
238  unsigned height = iterator.Next();
239  unsigned height_in_bytes = height * kPointerSize;
240  USE(height_in_bytes);
241 
242  unsigned fixed_size = ComputeFixedSize(function_);
243  unsigned input_frame_size = input_->GetFrameSize();
244  ASSERT(fixed_size + height_in_bytes == input_frame_size);
245 
246  unsigned stack_slot_size = optimized_code_->stack_slots() * kPointerSize;
247  unsigned outgoing_height = data->ArgumentsStackHeight(bailout_id)->value();
248  unsigned outgoing_size = outgoing_height * kPointerSize;
249  unsigned output_frame_size = fixed_size + stack_slot_size + outgoing_size;
250  ASSERT(outgoing_size == 0); // OSR does not happen in the middle of a call.
251 
252  if (FLAG_trace_osr) {
253  PrintF("[on-stack replacement: begin 0x%08" V8PRIxPTR " ",
254  reinterpret_cast<intptr_t>(function_));
255  function_->PrintName();
256  PrintF(" => node=%u, frame=%d->%d]\n",
257  ast_id,
258  input_frame_size,
259  output_frame_size);
260  }
261 
262  // There's only one output frame in the OSR case.
263  output_count_ = 1;
264  output_ = new FrameDescription*[1];
265  output_[0] = new(output_frame_size) FrameDescription(
266  output_frame_size, function_);
267  output_[0]->SetFrameType(StackFrame::JAVA_SCRIPT);
268 
269  // Clear the incoming parameters in the optimized frame to avoid
270  // confusing the garbage collector.
271  unsigned output_offset = output_frame_size - kPointerSize;
272  int parameter_count = function_->shared()->formal_parameter_count() + 1;
273  for (int i = 0; i < parameter_count; ++i) {
274  output_[0]->SetFrameSlot(output_offset, 0);
275  output_offset -= kPointerSize;
276  }
277 
278  // Translate the incoming parameters. This may overwrite some of the
279  // incoming argument slots we've just cleared.
280  int input_offset = input_frame_size - kPointerSize;
281  bool ok = true;
282  int limit = input_offset - (parameter_count * kPointerSize);
283  while (ok && input_offset > limit) {
284  ok = DoOsrTranslateCommand(&iterator, &input_offset);
285  }
286 
287  // There are no translation commands for the caller's pc and fp, the
288  // context, and the function. Set them up explicitly.
291  i -= kPointerSize) {
292  uint32_t input_value = input_->GetFrameSlot(input_offset);
293  if (FLAG_trace_osr) {
294  const char* name = "UNKNOWN";
295  switch (i) {
297  name = "caller's pc";
298  break;
300  name = "fp";
301  break;
303  name = "context";
304  break;
306  name = "function";
307  break;
308  }
309  PrintF(" [sp + %d] <- 0x%08x ; [sp + %d] (fixed part - %s)\n",
310  output_offset,
311  input_value,
312  input_offset,
313  name);
314  }
315 
316  output_[0]->SetFrameSlot(output_offset, input_->GetFrameSlot(input_offset));
317  input_offset -= kPointerSize;
318  output_offset -= kPointerSize;
319  }
320 
321  // Translate the rest of the frame.
322  while (ok && input_offset >= 0) {
323  ok = DoOsrTranslateCommand(&iterator, &input_offset);
324  }
325 
326  // If translation of any command failed, continue using the input frame.
327  if (!ok) {
328  delete output_[0];
329  output_[0] = input_;
330  output_[0]->SetPc(reinterpret_cast<uint32_t>(from_));
331  } else {
332  // Set up the frame pointer and the context pointer.
333  output_[0]->SetRegister(fp.code(), input_->GetRegister(fp.code()));
334  output_[0]->SetRegister(cp.code(), input_->GetRegister(cp.code()));
335 
336  unsigned pc_offset = data->OsrPcOffset()->value();
337  uint32_t pc = reinterpret_cast<uint32_t>(
338  optimized_code_->entry() + pc_offset);
339  output_[0]->SetPc(pc);
340  }
341  Code* continuation = isolate_->builtins()->builtin(Builtins::kNotifyOSR);
342  output_[0]->SetContinuation(
343  reinterpret_cast<uint32_t>(continuation->entry()));
344 
345  if (FLAG_trace_osr) {
346  PrintF("[on-stack replacement translation %s: 0x%08" V8PRIxPTR " ",
347  ok ? "finished" : "aborted",
348  reinterpret_cast<intptr_t>(function_));
349  function_->PrintName();
350  PrintF(" => pc=0x%0x]\n", output_[0]->GetPc());
351  }
352 }
353 
354 
355 void Deoptimizer::DoComputeArgumentsAdaptorFrame(TranslationIterator* iterator,
356  int frame_index) {
357  JSFunction* function = JSFunction::cast(ComputeLiteral(iterator->Next()));
358  unsigned height = iterator->Next();
359  unsigned height_in_bytes = height * kPointerSize;
360  if (FLAG_trace_deopt) {
361  PrintF(" translating arguments adaptor => height=%d\n", height_in_bytes);
362  }
363 
364  unsigned fixed_frame_size = ArgumentsAdaptorFrameConstants::kFrameSize;
365  unsigned output_frame_size = height_in_bytes + fixed_frame_size;
366 
367  // Allocate and store the output frame description.
368  FrameDescription* output_frame =
369  new(output_frame_size) FrameDescription(output_frame_size, function);
370  output_frame->SetFrameType(StackFrame::ARGUMENTS_ADAPTOR);
371 
372  // Arguments adaptor can not be topmost or bottommost.
373  ASSERT(frame_index > 0 && frame_index < output_count_ - 1);
374  ASSERT(output_[frame_index] == NULL);
375  output_[frame_index] = output_frame;
376 
377  // The top address of the frame is computed from the previous
378  // frame's top and this frame's size.
379  uint32_t top_address;
380  top_address = output_[frame_index - 1]->GetTop() - output_frame_size;
381  output_frame->SetTop(top_address);
382 
383  // Compute the incoming parameter translation.
384  int parameter_count = height;
385  unsigned output_offset = output_frame_size;
386  for (int i = 0; i < parameter_count; ++i) {
387  output_offset -= kPointerSize;
388  DoTranslateCommand(iterator, frame_index, output_offset);
389  }
390 
391  // Read caller's PC from the previous frame.
392  output_offset -= kPointerSize;
393  intptr_t callers_pc = output_[frame_index - 1]->GetPc();
394  output_frame->SetFrameSlot(output_offset, callers_pc);
395  if (FLAG_trace_deopt) {
396  PrintF(" 0x%08x: [top + %d] <- 0x%08x ; caller's pc\n",
397  top_address + output_offset, output_offset, callers_pc);
398  }
399 
400  // Read caller's FP from the previous frame, and set this frame's FP.
401  output_offset -= kPointerSize;
402  intptr_t value = output_[frame_index - 1]->GetFp();
403  output_frame->SetFrameSlot(output_offset, value);
404  intptr_t fp_value = top_address + output_offset;
405  output_frame->SetFp(fp_value);
406  if (FLAG_trace_deopt) {
407  PrintF(" 0x%08x: [top + %d] <- 0x%08x ; caller's fp\n",
408  fp_value, output_offset, value);
409  }
410 
411  // A marker value is used in place of the context.
412  output_offset -= kPointerSize;
413  intptr_t context = reinterpret_cast<intptr_t>(
415  output_frame->SetFrameSlot(output_offset, context);
416  if (FLAG_trace_deopt) {
417  PrintF(" 0x%08x: [top + %d] <- 0x%08x ; context (adaptor sentinel)\n",
418  top_address + output_offset, output_offset, context);
419  }
420 
421  // The function was mentioned explicitly in the ARGUMENTS_ADAPTOR_FRAME.
422  output_offset -= kPointerSize;
423  value = reinterpret_cast<intptr_t>(function);
424  output_frame->SetFrameSlot(output_offset, value);
425  if (FLAG_trace_deopt) {
426  PrintF(" 0x%08x: [top + %d] <- 0x%08x ; function\n",
427  top_address + output_offset, output_offset, value);
428  }
429 
430  // Number of incoming arguments.
431  output_offset -= kPointerSize;
432  value = reinterpret_cast<uint32_t>(Smi::FromInt(height - 1));
433  output_frame->SetFrameSlot(output_offset, value);
434  if (FLAG_trace_deopt) {
435  PrintF(" 0x%08x: [top + %d] <- 0x%08x ; argc (%d)\n",
436  top_address + output_offset, output_offset, value, height - 1);
437  }
438 
439  ASSERT(0 == output_offset);
440 
441  Builtins* builtins = isolate_->builtins();
442  Code* adaptor_trampoline =
443  builtins->builtin(Builtins::kArgumentsAdaptorTrampoline);
444  uint32_t pc = reinterpret_cast<uint32_t>(
445  adaptor_trampoline->instruction_start() +
446  isolate_->heap()->arguments_adaptor_deopt_pc_offset()->value());
447  output_frame->SetPc(pc);
448 }
449 
450 
451 void Deoptimizer::DoComputeConstructStubFrame(TranslationIterator* iterator,
452  int frame_index) {
453  Builtins* builtins = isolate_->builtins();
454  Code* construct_stub = builtins->builtin(Builtins::kJSConstructStubGeneric);
455  JSFunction* function = JSFunction::cast(ComputeLiteral(iterator->Next()));
456  unsigned height = iterator->Next();
457  unsigned height_in_bytes = height * kPointerSize;
458  if (FLAG_trace_deopt) {
459  PrintF(" translating construct stub => height=%d\n", height_in_bytes);
460  }
461 
462  unsigned fixed_frame_size = 8 * kPointerSize;
463  unsigned output_frame_size = height_in_bytes + fixed_frame_size;
464 
465  // Allocate and store the output frame description.
466  FrameDescription* output_frame =
467  new(output_frame_size) FrameDescription(output_frame_size, function);
468  output_frame->SetFrameType(StackFrame::CONSTRUCT);
469 
470  // Construct stub can not be topmost or bottommost.
471  ASSERT(frame_index > 0 && frame_index < output_count_ - 1);
472  ASSERT(output_[frame_index] == NULL);
473  output_[frame_index] = output_frame;
474 
475  // The top address of the frame is computed from the previous
476  // frame's top and this frame's size.
477  uint32_t top_address;
478  top_address = output_[frame_index - 1]->GetTop() - output_frame_size;
479  output_frame->SetTop(top_address);
480 
481  // Compute the incoming parameter translation.
482  int parameter_count = height;
483  unsigned output_offset = output_frame_size;
484  for (int i = 0; i < parameter_count; ++i) {
485  output_offset -= kPointerSize;
486  DoTranslateCommand(iterator, frame_index, output_offset);
487  }
488 
489  // Read caller's PC from the previous frame.
490  output_offset -= kPointerSize;
491  intptr_t callers_pc = output_[frame_index - 1]->GetPc();
492  output_frame->SetFrameSlot(output_offset, callers_pc);
493  if (FLAG_trace_deopt) {
494  PrintF(" 0x%08x: [top + %d] <- 0x%08x ; caller's pc\n",
495  top_address + output_offset, output_offset, callers_pc);
496  }
497 
498  // Read caller's FP from the previous frame, and set this frame's FP.
499  output_offset -= kPointerSize;
500  intptr_t value = output_[frame_index - 1]->GetFp();
501  output_frame->SetFrameSlot(output_offset, value);
502  intptr_t fp_value = top_address + output_offset;
503  output_frame->SetFp(fp_value);
504  if (FLAG_trace_deopt) {
505  PrintF(" 0x%08x: [top + %d] <- 0x%08x ; caller's fp\n",
506  fp_value, output_offset, value);
507  }
508 
509  // The context can be gotten from the previous frame.
510  output_offset -= kPointerSize;
511  value = output_[frame_index - 1]->GetContext();
512  output_frame->SetFrameSlot(output_offset, value);
513  if (FLAG_trace_deopt) {
514  PrintF(" 0x%08x: [top + %d] <- 0x%08x ; context\n",
515  top_address + output_offset, output_offset, value);
516  }
517 
518  // A marker value is used in place of the function.
519  output_offset -= kPointerSize;
520  value = reinterpret_cast<intptr_t>(Smi::FromInt(StackFrame::CONSTRUCT));
521  output_frame->SetFrameSlot(output_offset, value);
522  if (FLAG_trace_deopt) {
523  PrintF(" 0x%08x: [top + %d] <- 0x%08x ; function (construct sentinel)\n",
524  top_address + output_offset, output_offset, value);
525  }
526 
527  // The output frame reflects a JSConstructStubGeneric frame.
528  output_offset -= kPointerSize;
529  value = reinterpret_cast<intptr_t>(construct_stub);
530  output_frame->SetFrameSlot(output_offset, value);
531  if (FLAG_trace_deopt) {
532  PrintF(" 0x%08x: [top + %d] <- 0x%08x ; code object\n",
533  top_address + output_offset, output_offset, value);
534  }
535 
536  // Number of incoming arguments.
537  output_offset -= kPointerSize;
538  value = reinterpret_cast<uint32_t>(Smi::FromInt(height - 1));
539  output_frame->SetFrameSlot(output_offset, value);
540  if (FLAG_trace_deopt) {
541  PrintF(" 0x%08x: [top + %d] <- 0x%08x ; argc (%d)\n",
542  top_address + output_offset, output_offset, value, height - 1);
543  }
544 
545  // Constructor function being invoked by the stub.
546  output_offset -= kPointerSize;
547  value = reinterpret_cast<intptr_t>(function);
548  output_frame->SetFrameSlot(output_offset, value);
549  if (FLAG_trace_deopt) {
550  PrintF(" 0x%08x: [top + %d] <- 0x%08x ; constructor function\n",
551  top_address + output_offset, output_offset, value);
552  }
553 
554  // The newly allocated object was passed as receiver in the artificial
555  // constructor stub environment created by HEnvironment::CopyForInlining().
556  output_offset -= kPointerSize;
557  value = output_frame->GetFrameSlot(output_frame_size - kPointerSize);
558  output_frame->SetFrameSlot(output_offset, value);
559  if (FLAG_trace_deopt) {
560  PrintF(" 0x%08x: [top + %d] <- 0x%08x ; allocated receiver\n",
561  top_address + output_offset, output_offset, value);
562  }
563 
564  ASSERT(0 == output_offset);
565 
566  uint32_t pc = reinterpret_cast<uint32_t>(
567  construct_stub->instruction_start() +
568  isolate_->heap()->construct_stub_deopt_pc_offset()->value());
569  output_frame->SetPc(pc);
570 }
571 
572 
573 void Deoptimizer::DoComputeAccessorStubFrame(TranslationIterator* iterator,
574  int frame_index,
575  bool is_setter_stub_frame) {
576  JSFunction* accessor = JSFunction::cast(ComputeLiteral(iterator->Next()));
577  // The receiver (and the implicit return value, if any) are expected in
578  // registers by the LoadIC/StoreIC, so they don't belong to the output stack
579  // frame. This means that we have to use a height of 0.
580  unsigned height = 0;
581  unsigned height_in_bytes = height * kPointerSize;
582  const char* kind = is_setter_stub_frame ? "setter" : "getter";
583  if (FLAG_trace_deopt) {
584  PrintF(" translating %s stub => height=%u\n", kind, height_in_bytes);
585  }
586 
587  // We need 5 stack entries from StackFrame::INTERNAL (ra, fp, cp, frame type,
588  // code object, see MacroAssembler::EnterFrame). For a setter stub frame we
589  // need one additional entry for the implicit return value, see
590  // StoreStubCompiler::CompileStoreViaSetter.
591  unsigned fixed_frame_entries = 5 + (is_setter_stub_frame ? 1 : 0);
592  unsigned fixed_frame_size = fixed_frame_entries * kPointerSize;
593  unsigned output_frame_size = height_in_bytes + fixed_frame_size;
594 
595  // Allocate and store the output frame description.
596  FrameDescription* output_frame =
597  new(output_frame_size) FrameDescription(output_frame_size, accessor);
598  output_frame->SetFrameType(StackFrame::INTERNAL);
599 
600  // A frame for an accessor stub can not be the topmost or bottommost one.
601  ASSERT(frame_index > 0 && frame_index < output_count_ - 1);
602  ASSERT(output_[frame_index] == NULL);
603  output_[frame_index] = output_frame;
604 
605  // The top address of the frame is computed from the previous frame's top and
606  // this frame's size.
607  uint32_t top_address = output_[frame_index - 1]->GetTop() - output_frame_size;
608  output_frame->SetTop(top_address);
609 
610  unsigned output_offset = output_frame_size;
611 
612  // Read caller's PC from the previous frame.
613  output_offset -= kPointerSize;
614  intptr_t value = output_[frame_index - 1]->GetPc();
615  output_frame->SetFrameSlot(output_offset, value);
616  if (FLAG_trace_deopt) {
617  PrintF(" 0x%08" V8PRIxPTR ": [top + %u] <- 0x%08" V8PRIxPTR
618  " ; caller's pc\n",
619  top_address + output_offset, output_offset, value);
620  }
621 
622  // Read caller's FP from the previous frame, and set this frame's FP.
623  output_offset -= kPointerSize;
624  value = output_[frame_index - 1]->GetFp();
625  output_frame->SetFrameSlot(output_offset, value);
626  intptr_t fp_value = top_address + output_offset;
627  output_frame->SetFp(fp_value);
628  if (FLAG_trace_deopt) {
629  PrintF(" 0x%08" V8PRIxPTR ": [top + %u] <- 0x%08" V8PRIxPTR
630  " ; caller's fp\n",
631  fp_value, output_offset, value);
632  }
633 
634  // The context can be gotten from the previous frame.
635  output_offset -= kPointerSize;
636  value = output_[frame_index - 1]->GetContext();
637  output_frame->SetFrameSlot(output_offset, value);
638  if (FLAG_trace_deopt) {
639  PrintF(" 0x%08" V8PRIxPTR ": [top + %u] <- 0x%08" V8PRIxPTR
640  " ; context\n",
641  top_address + output_offset, output_offset, value);
642  }
643 
644  // A marker value is used in place of the function.
645  output_offset -= kPointerSize;
646  value = reinterpret_cast<intptr_t>(Smi::FromInt(StackFrame::INTERNAL));
647  output_frame->SetFrameSlot(output_offset, value);
648  if (FLAG_trace_deopt) {
649  PrintF(" 0x%08" V8PRIxPTR ": [top + %u] <- 0x%08" V8PRIxPTR
650  " ; function (%s sentinel)\n",
651  top_address + output_offset, output_offset, value, kind);
652  }
653 
654  // Get Code object from accessor stub.
655  output_offset -= kPointerSize;
656  Builtins::Name name = is_setter_stub_frame ?
657  Builtins::kStoreIC_Setter_ForDeopt :
658  Builtins::kLoadIC_Getter_ForDeopt;
659  Code* accessor_stub = isolate_->builtins()->builtin(name);
660  value = reinterpret_cast<intptr_t>(accessor_stub);
661  output_frame->SetFrameSlot(output_offset, value);
662  if (FLAG_trace_deopt) {
663  PrintF(" 0x%08" V8PRIxPTR ": [top + %u] <- 0x%08" V8PRIxPTR
664  " ; code object\n",
665  top_address + output_offset, output_offset, value);
666  }
667 
668  // Skip receiver.
669  Translation::Opcode opcode =
670  static_cast<Translation::Opcode>(iterator->Next());
671  iterator->Skip(Translation::NumberOfOperandsFor(opcode));
672 
673  if (is_setter_stub_frame) {
674  // The implicit return value was part of the artificial setter stub
675  // environment.
676  output_offset -= kPointerSize;
677  DoTranslateCommand(iterator, frame_index, output_offset);
678  }
679 
680  ASSERT(0 == output_offset);
681 
682  Smi* offset = is_setter_stub_frame ?
683  isolate_->heap()->setter_stub_deopt_pc_offset() :
684  isolate_->heap()->getter_stub_deopt_pc_offset();
685  intptr_t pc = reinterpret_cast<intptr_t>(
686  accessor_stub->instruction_start() + offset->value());
687  output_frame->SetPc(pc);
688 }
689 
690 
691 // This code is very similar to ia32/arm code, but relies on register names
692 // (fp, sp) and how the frame is laid out.
693 void Deoptimizer::DoComputeJSFrame(TranslationIterator* iterator,
694  int frame_index) {
695  // Read the ast node id, function, and frame height for this output frame.
696  BailoutId node_id = BailoutId(iterator->Next());
697  JSFunction* function;
698  if (frame_index != 0) {
699  function = JSFunction::cast(ComputeLiteral(iterator->Next()));
700  } else {
701  int closure_id = iterator->Next();
702  USE(closure_id);
703  ASSERT_EQ(Translation::kSelfLiteralId, closure_id);
704  function = function_;
705  }
706  unsigned height = iterator->Next();
707  unsigned height_in_bytes = height * kPointerSize;
708  if (FLAG_trace_deopt) {
709  PrintF(" translating ");
710  function->PrintName();
711  PrintF(" => node=%d, height=%d\n", node_id.ToInt(), height_in_bytes);
712  }
713 
714  // The 'fixed' part of the frame consists of the incoming parameters and
715  // the part described by JavaScriptFrameConstants.
716  unsigned fixed_frame_size = ComputeFixedSize(function);
717  unsigned input_frame_size = input_->GetFrameSize();
718  unsigned output_frame_size = height_in_bytes + fixed_frame_size;
719 
720  // Allocate and store the output frame description.
721  FrameDescription* output_frame =
722  new(output_frame_size) FrameDescription(output_frame_size, function);
723  output_frame->SetFrameType(StackFrame::JAVA_SCRIPT);
724 
725  bool is_bottommost = (0 == frame_index);
726  bool is_topmost = (output_count_ - 1 == frame_index);
727  ASSERT(frame_index >= 0 && frame_index < output_count_);
728  ASSERT(output_[frame_index] == NULL);
729  output_[frame_index] = output_frame;
730 
731  // The top address for the bottommost output frame can be computed from
732  // the input frame pointer and the output frame's height. For all
733  // subsequent output frames, it can be computed from the previous one's
734  // top address and the current frame's size.
735  uint32_t top_address;
736  if (is_bottommost) {
737  // 2 = context and function in the frame.
738  top_address =
739  input_->GetRegister(fp.code()) - (2 * kPointerSize) - height_in_bytes;
740  } else {
741  top_address = output_[frame_index - 1]->GetTop() - output_frame_size;
742  }
743  output_frame->SetTop(top_address);
744 
745  // Compute the incoming parameter translation.
746  int parameter_count = function->shared()->formal_parameter_count() + 1;
747  unsigned output_offset = output_frame_size;
748  unsigned input_offset = input_frame_size;
749  for (int i = 0; i < parameter_count; ++i) {
750  output_offset -= kPointerSize;
751  DoTranslateCommand(iterator, frame_index, output_offset);
752  }
753  input_offset -= (parameter_count * kPointerSize);
754 
755  // There are no translation commands for the caller's pc and fp, the
756  // context, and the function. Synthesize their values and set them up
757  // explicitly.
758  //
759  // The caller's pc for the bottommost output frame is the same as in the
760  // input frame. For all subsequent output frames, it can be read from the
761  // previous one. This frame's pc can be computed from the non-optimized
762  // function code and AST id of the bailout.
763  output_offset -= kPointerSize;
764  input_offset -= kPointerSize;
765  intptr_t value;
766  if (is_bottommost) {
767  value = input_->GetFrameSlot(input_offset);
768  } else {
769  value = output_[frame_index - 1]->GetPc();
770  }
771  output_frame->SetFrameSlot(output_offset, value);
772  if (FLAG_trace_deopt) {
773  PrintF(" 0x%08x: [top + %d] <- 0x%08x ; caller's pc\n",
774  top_address + output_offset, output_offset, value);
775  }
776 
777  // The caller's frame pointer for the bottommost output frame is the same
778  // as in the input frame. For all subsequent output frames, it can be
779  // read from the previous one. Also compute and set this frame's frame
780  // pointer.
781  output_offset -= kPointerSize;
782  input_offset -= kPointerSize;
783  if (is_bottommost) {
784  value = input_->GetFrameSlot(input_offset);
785  } else {
786  value = output_[frame_index - 1]->GetFp();
787  }
788  output_frame->SetFrameSlot(output_offset, value);
789  intptr_t fp_value = top_address + output_offset;
790  ASSERT(!is_bottommost || input_->GetRegister(fp.code()) == fp_value);
791  output_frame->SetFp(fp_value);
792  if (is_topmost) {
793  output_frame->SetRegister(fp.code(), fp_value);
794  }
795  if (FLAG_trace_deopt) {
796  PrintF(" 0x%08x: [top + %d] <- 0x%08x ; caller's fp\n",
797  fp_value, output_offset, value);
798  }
799 
800  // For the bottommost output frame the context can be gotten from the input
801  // frame. For all subsequent output frames it can be gotten from the function
802  // so long as we don't inline functions that need local contexts.
803  output_offset -= kPointerSize;
804  input_offset -= kPointerSize;
805  if (is_bottommost) {
806  value = input_->GetFrameSlot(input_offset);
807  } else {
808  value = reinterpret_cast<intptr_t>(function->context());
809  }
810  output_frame->SetFrameSlot(output_offset, value);
811  output_frame->SetContext(value);
812  if (is_topmost) output_frame->SetRegister(cp.code(), value);
813  if (FLAG_trace_deopt) {
814  PrintF(" 0x%08x: [top + %d] <- 0x%08x ; context\n",
815  top_address + output_offset, output_offset, value);
816  }
817 
818  // The function was mentioned explicitly in the BEGIN_FRAME.
819  output_offset -= kPointerSize;
820  input_offset -= kPointerSize;
821  value = reinterpret_cast<uint32_t>(function);
822  // The function for the bottommost output frame should also agree with the
823  // input frame.
824  ASSERT(!is_bottommost || input_->GetFrameSlot(input_offset) == value);
825  output_frame->SetFrameSlot(output_offset, value);
826  if (FLAG_trace_deopt) {
827  PrintF(" 0x%08x: [top + %d] <- 0x%08x ; function\n",
828  top_address + output_offset, output_offset, value);
829  }
830 
831  // Translate the rest of the frame.
832  for (unsigned i = 0; i < height; ++i) {
833  output_offset -= kPointerSize;
834  DoTranslateCommand(iterator, frame_index, output_offset);
835  }
836  ASSERT(0 == output_offset);
837 
838  // Compute this frame's PC, state, and continuation.
839  Code* non_optimized_code = function->shared()->code();
840  FixedArray* raw_data = non_optimized_code->deoptimization_data();
841  DeoptimizationOutputData* data = DeoptimizationOutputData::cast(raw_data);
842  Address start = non_optimized_code->instruction_start();
843  unsigned pc_and_state = GetOutputInfo(data, node_id, function->shared());
844  unsigned pc_offset = FullCodeGenerator::PcField::decode(pc_and_state);
845  uint32_t pc_value = reinterpret_cast<uint32_t>(start + pc_offset);
846  output_frame->SetPc(pc_value);
847 
850  output_frame->SetState(Smi::FromInt(state));
851 
852 
853  // Set the continuation for the topmost frame.
854  if (is_topmost && bailout_type_ != DEBUGGER) {
855  Builtins* builtins = isolate_->builtins();
856  Code* continuation = (bailout_type_ == EAGER)
857  ? builtins->builtin(Builtins::kNotifyDeoptimized)
858  : builtins->builtin(Builtins::kNotifyLazyDeoptimized);
859  output_frame->SetContinuation(
860  reinterpret_cast<uint32_t>(continuation->entry()));
861  }
862 }
863 
864 void Deoptimizer::FillInputFrame(Address tos, JavaScriptFrame* frame) {
865  // Set the register values. The values are not important as there are no
866  // callee saved registers in JavaScript frames, so all registers are
867  // spilled. Registers fp and sp are set to the correct values though.
868 
869  for (int i = 0; i < Register::kNumRegisters; i++) {
870  input_->SetRegister(i, i * 4);
871  }
872  input_->SetRegister(sp.code(), reinterpret_cast<intptr_t>(frame->sp()));
873  input_->SetRegister(fp.code(), reinterpret_cast<intptr_t>(frame->fp()));
874  for (int i = 0; i < DoubleRegister::kNumAllocatableRegisters; i++) {
875  input_->SetDoubleRegister(i, 0.0);
876  }
877 
878  // Fill the frame content from the actual data on the frame.
879  for (unsigned i = 0; i < input_->GetFrameSize(); i += kPointerSize) {
880  input_->SetFrameSlot(i, Memory::uint32_at(tos + i));
881  }
882 }
883 
884 
885 #define __ masm()->
886 
887 
888 // This code tries to be close to ia32 code so that any changes can be
889 // easily ported.
890 void Deoptimizer::EntryGenerator::Generate() {
891  GeneratePrologue();
892 
893  Isolate* isolate = masm()->isolate();
894 
895  CpuFeatures::Scope scope(FPU);
896  // Unlike on ARM we don't save all the registers, just the useful ones.
897  // For the rest, there are gaps on the stack, so the offsets remain the same.
898  const int kNumberOfRegisters = Register::kNumRegisters;
899 
900  RegList restored_regs = kJSCallerSaved | kCalleeSaved;
901  RegList saved_regs = restored_regs | sp.bit() | ra.bit();
902 
903  const int kDoubleRegsSize =
905 
906  // Save all FPU registers before messing with them.
907  __ Subu(sp, sp, Operand(kDoubleRegsSize));
908  for (int i = 0; i < FPURegister::kNumAllocatableRegisters; ++i) {
909  FPURegister fpu_reg = FPURegister::FromAllocationIndex(i);
910  int offset = i * kDoubleSize;
911  __ sdc1(fpu_reg, MemOperand(sp, offset));
912  }
913 
914  // Push saved_regs (needed to populate FrameDescription::registers_).
915  // Leave gaps for other registers.
916  __ Subu(sp, sp, kNumberOfRegisters * kPointerSize);
917  for (int16_t i = kNumberOfRegisters - 1; i >= 0; i--) {
918  if ((saved_regs & (1 << i)) != 0) {
919  __ sw(ToRegister(i), MemOperand(sp, kPointerSize * i));
920  }
921  }
922 
923  const int kSavedRegistersAreaSize =
924  (kNumberOfRegisters * kPointerSize) + kDoubleRegsSize;
925 
926  // Get the bailout id from the stack.
927  __ lw(a2, MemOperand(sp, kSavedRegistersAreaSize));
928 
929  // Get the address of the location in the code object if possible (a3) (return
930  // address for lazy deoptimization) and compute the fp-to-sp delta in
931  // register t0.
932  if (type() == EAGER) {
933  __ mov(a3, zero_reg);
934  // Correct one word for bailout id.
935  __ Addu(t0, sp, Operand(kSavedRegistersAreaSize + (1 * kPointerSize)));
936  } else if (type() == OSR) {
937  __ mov(a3, ra);
938  // Correct one word for bailout id.
939  __ Addu(t0, sp, Operand(kSavedRegistersAreaSize + (1 * kPointerSize)));
940  } else {
941  __ mov(a3, ra);
942  // Correct two words for bailout id and return address.
943  __ Addu(t0, sp, Operand(kSavedRegistersAreaSize + (2 * kPointerSize)));
944  }
945 
946  __ Subu(t0, fp, t0);
947 
948  // Allocate a new deoptimizer object.
949  // Pass four arguments in a0 to a3 and fifth & sixth arguments on stack.
950  __ PrepareCallCFunction(6, t1);
952  __ li(a1, Operand(type())); // bailout type,
953  // a2: bailout id already loaded.
954  // a3: code address or 0 already loaded.
955  __ sw(t0, CFunctionArgumentOperand(5)); // Fp-to-sp delta.
956  __ li(t1, Operand(ExternalReference::isolate_address()));
957  __ sw(t1, CFunctionArgumentOperand(6)); // Isolate.
958  // Call Deoptimizer::New().
959  {
960  AllowExternalCallThatCantCauseGC scope(masm());
961  __ CallCFunction(ExternalReference::new_deoptimizer_function(isolate), 6);
962  }
963 
964  // Preserve "deoptimizer" object in register v0 and get the input
965  // frame descriptor pointer to a1 (deoptimizer->input_);
966  // Move deopt-obj to a0 for call to Deoptimizer::ComputeOutputFrames() below.
967  __ mov(a0, v0);
968  __ lw(a1, MemOperand(v0, Deoptimizer::input_offset()));
969 
970  // Copy core registers into FrameDescription::registers_[kNumRegisters].
971  ASSERT(Register::kNumRegisters == kNumberOfRegisters);
972  for (int i = 0; i < kNumberOfRegisters; i++) {
973  int offset = (i * kPointerSize) + FrameDescription::registers_offset();
974  if ((saved_regs & (1 << i)) != 0) {
975  __ lw(a2, MemOperand(sp, i * kPointerSize));
976  __ sw(a2, MemOperand(a1, offset));
977  } else if (FLAG_debug_code) {
978  __ li(a2, kDebugZapValue);
979  __ sw(a2, MemOperand(a1, offset));
980  }
981  }
982 
983  // Copy FPU registers to
984  // double_registers_[DoubleRegister::kNumAllocatableRegisters]
985  int double_regs_offset = FrameDescription::double_registers_offset();
986  for (int i = 0; i < FPURegister::kNumAllocatableRegisters; ++i) {
987  int dst_offset = i * kDoubleSize + double_regs_offset;
988  int src_offset = i * kDoubleSize + kNumberOfRegisters * kPointerSize;
989  __ ldc1(f0, MemOperand(sp, src_offset));
990  __ sdc1(f0, MemOperand(a1, dst_offset));
991  }
992 
993  // Remove the bailout id, eventually return address, and the saved registers
994  // from the stack.
995  if (type() == EAGER || type() == OSR) {
996  __ Addu(sp, sp, Operand(kSavedRegistersAreaSize + (1 * kPointerSize)));
997  } else {
998  __ Addu(sp, sp, Operand(kSavedRegistersAreaSize + (2 * kPointerSize)));
999  }
1000 
1001  // Compute a pointer to the unwinding limit in register a2; that is
1002  // the first stack slot not part of the input frame.
1004  __ Addu(a2, a2, sp);
1005 
1006  // Unwind the stack down to - but not including - the unwinding
1007  // limit and copy the contents of the activation frame to the input
1008  // frame description.
1009  __ Addu(a3, a1, Operand(FrameDescription::frame_content_offset()));
1010  Label pop_loop;
1011  __ bind(&pop_loop);
1012  __ pop(t0);
1013  __ sw(t0, MemOperand(a3, 0));
1014  __ Branch(USE_DELAY_SLOT, &pop_loop, ne, a2, Operand(sp));
1015  __ addiu(a3, a3, sizeof(uint32_t)); // In delay slot.
1016 
1017  // Compute the output frame in the deoptimizer.
1018  __ push(a0); // Preserve deoptimizer object across call.
1019  // a0: deoptimizer object; a1: scratch.
1020  __ PrepareCallCFunction(1, a1);
1021  // Call Deoptimizer::ComputeOutputFrames().
1022  {
1023  AllowExternalCallThatCantCauseGC scope(masm());
1024  __ CallCFunction(
1025  ExternalReference::compute_output_frames_function(isolate), 1);
1026  }
1027  __ pop(a0); // Restore deoptimizer object (class Deoptimizer).
1028 
1029  // Replace the current (input) frame with the output frames.
1030  Label outer_push_loop, inner_push_loop;
1031  // Outer loop state: a0 = current "FrameDescription** output_",
1032  // a1 = one past the last FrameDescription**.
1034  __ lw(a0, MemOperand(a0, Deoptimizer::output_offset())); // a0 is output_.
1035  __ sll(a1, a1, kPointerSizeLog2); // Count to offset.
1036  __ addu(a1, a0, a1); // a1 = one past the last FrameDescription**.
1037  __ bind(&outer_push_loop);
1038  // Inner loop state: a2 = current FrameDescription*, a3 = loop index.
1039  __ lw(a2, MemOperand(a0, 0)); // output_[ix]
1041  __ bind(&inner_push_loop);
1042  __ Subu(a3, a3, Operand(sizeof(uint32_t)));
1043  __ Addu(t2, a2, Operand(a3));
1045  __ push(t3);
1046  __ Branch(&inner_push_loop, ne, a3, Operand(zero_reg));
1047 
1048  __ Addu(a0, a0, Operand(kPointerSize));
1049  __ Branch(&outer_push_loop, lt, a0, Operand(a1));
1050 
1051 
1052  // Push state, pc, and continuation from the last output frame.
1053  if (type() != OSR) {
1055  __ push(t2);
1056  }
1057 
1058  __ lw(t2, MemOperand(a2, FrameDescription::pc_offset()));
1059  __ push(t2);
1061  __ push(t2);
1062 
1063 
1064  // Technically restoring 'at' should work unless zero_reg is also restored
1065  // but it's safer to check for this.
1066  ASSERT(!(at.bit() & restored_regs));
1067  // Restore the registers from the last output frame.
1068  __ mov(at, a2);
1069  for (int i = kNumberOfRegisters - 1; i >= 0; i--) {
1070  int offset = (i * kPointerSize) + FrameDescription::registers_offset();
1071  if ((restored_regs & (1 << i)) != 0) {
1072  __ lw(ToRegister(i), MemOperand(at, offset));
1073  }
1074  }
1075 
1076  __ InitializeRootRegister();
1077 
1078  __ pop(at); // Get continuation, leave pc on stack.
1079  __ pop(ra);
1080  __ Jump(at);
1081  __ stop("Unreachable.");
1082 }
1083 
1084 
1085 // Maximum size of a table entry generated below.
1086 const int Deoptimizer::table_entry_size_ = 9 * Assembler::kInstrSize;
1087 
1089  Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm());
1090 
1091  // Create a sequence of deoptimization entries. Note that any
1092  // registers may be still live.
1093  Label table_start;
1094  __ bind(&table_start);
1095  for (int i = 0; i < count(); i++) {
1096  Label start;
1097  __ bind(&start);
1098  if (type() != EAGER) {
1099  // Emulate ia32 like call by pushing return address to stack.
1100  __ addiu(sp, sp, -2 * kPointerSize);
1101  __ sw(ra, MemOperand(sp, 1 * kPointerSize));
1102  } else {
1103  __ addiu(sp, sp, -1 * kPointerSize);
1104  }
1105  // Jump over the remaining deopt entries (including this one).
1106  // This code is always reached by calling Jump, which puts the target (label
1107  // start) into t9.
1108  const int remaining_entries = (count() - i) * table_entry_size_;
1109  __ Addu(t9, t9, remaining_entries);
1110  // 'at' was clobbered so we can only load the current entry value here.
1111  __ li(at, i);
1112  __ jr(t9); // Expose delay slot.
1113  __ sw(at, MemOperand(sp, 0 * kPointerSize)); // In the delay slot.
1114 
1115  // Pad the rest of the code.
1116  while (table_entry_size_ > (masm()->SizeOfCodeGeneratedSince(&start))) {
1117  __ nop();
1118  }
1119 
1120  ASSERT_EQ(table_entry_size_, masm()->SizeOfCodeGeneratedSince(&start));
1121  }
1122 
1123  ASSERT_EQ(masm()->SizeOfCodeGeneratedSince(&table_start),
1124  count() * table_entry_size_);
1125 }
1126 
1127 #undef __
1128 
1129 
1130 } } // namespace v8::internal
byte * Address
Definition: globals.h:157
Code * builtin(Name name)
Definition: builtins.h:320
static DeoptimizationOutputData * cast(Object *obj)
#define V8PRIxPTR
Definition: globals.h:189
static bool IsAddImmediate(Instr instr)
void PrintF(const char *format,...)
Definition: v8utils.cc:40
unsigned stack_slots()
Definition: objects-inl.h:3318
const Register cp
const FPURegister f0
static Smi * FromInt(int value)
Definition: objects-inl.h:981
void SetFrameSlot(unsigned offset, intptr_t value)
Definition: deoptimizer.h:424
Builtins * builtins()
Definition: isolate.h:924
uint32_t RegList
Definition: frames.h:38
static const int kNumAllocatableRegisters
#define ASSERT(condition)
Definition: checks.h:270
const RegList kJSCallerSaved
Definition: frames-arm.h:47
static void DeoptimizeFunction(JSFunction *function)
const int kPointerSizeLog2
Definition: globals.h:232
intptr_t GetContext() const
Definition: deoptimizer.h:457
void SetFrameType(StackFrame::Type type)
Definition: deoptimizer.h:466
static const int kNumRegisters
Definition: assembler-arm.h:73
static int double_registers_offset()
Definition: deoptimizer.h:484
const Register sp
#define UNREACHABLE()
Definition: checks.h:50
static int output_offset()
Definition: deoptimizer.h:240
const int kDoubleSize
Definition: globals.h:218
const int kPointerSize
Definition: globals.h:220
static void set_target_address_at(Address pc, Address target)
const RegList kCalleeSaved
Definition: frames-arm.h:63
void SetRegister(unsigned n, intptr_t value)
Definition: deoptimizer.h:438
static unsigned decode(uint32_t value)
Definition: utils.h:273
const Register pc
friend class DeoptimizingCodeListNode
Definition: deoptimizer.h:370
static int GetOutputInfo(DeoptimizationOutputData *data, BailoutId node_id, SharedFunctionInfo *shared)
Definition: deoptimizer.cc:498
static void ReplaceCodeForRelatedFunctions(JSFunction *function, Code *code)
static int CallSize(Register target, Condition cond=al)
friend class BlockTrampolinePoolScope
uint32_t GetFrameSize() const
Definition: deoptimizer.h:394
void SetContinuation(intptr_t pc)
Definition: deoptimizer.h:463
static int output_count_offset()
Definition: deoptimizer.h:237
static void RevertStackCheckCodeAt(Code *unoptimized_code, Address pc_after, Code *check_code, Code *replacement_code)
static Address target_address_at(Address pc)
intptr_t GetFrameSlot(unsigned offset)
Definition: deoptimizer.h:403
MemOperand CFunctionArgumentOperand(int index)
static Address GetDeoptimizationEntry(int id, BailoutType type)
Definition: deoptimizer.cc:457
#define ASSERT_EQ(v1, v2)
Definition: checks.h:271
friend class FrameDescription
Definition: deoptimizer.h:369
void USE(T)
Definition: globals.h:289
virtual void GeneratePrologue()
#define __
const uint32_t kDebugZapValue
Definition: v8globals.h:84
static bool IsBeq(Instr instr)
static const int kInstrSize
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination use dead code elimination trace on stack replacement optimize closures cache optimized code for closures functions with arguments object loop weight for representation inference allow uint32 values on optimize frames if they are used only in safe operations track parallel recompilation enable all profiler experiments number of stack frames inspected by the profiler call recompile stub directly when self optimizing trigger profiler ticks based on counting instead of timing weight back edges by jump distance for interrupt triggering percentage of ICs that must have type info to allow optimization watch_ic_patching retry_self_opt interrupt_at_exit extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of SAHF instruction if enable use of VFP3 instructions if available this implies enabling ARMv7 and VFP2 enable use of VFP2 instructions if available enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of MIPS FPU instructions if NULL
Definition: flags.cc:301
static uint32_t & uint32_at(Address addr)
Definition: v8memory.h:47
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination use dead code elimination trace on stack replacement optimize closures cache optimized code for closures functions with arguments object loop weight for representation inference allow uint32 values on optimize frames if they are used only in safe operations track parallel recompilation enable all profiler experiments number of stack frames inspected by the profiler call recompile stub directly when self optimizing trigger profiler ticks based on counting instead of timing weight back edges by jump distance for interrupt triggering percentage of ICs that must have type info to allow optimization watch_ic_patching retry_self_opt interrupt_at_exit extra verbose compilation tracing generate extra code(assertions) for debugging") DEFINE_bool(code_comments
static const int kNumAllocatableRegisters
const Register fp
intptr_t GetRegister(unsigned n) const
Definition: deoptimizer.h:428
static FPURegister FromAllocationIndex(int index)
signed short int16_t
Definition: unicode.cc:45
void SetDoubleRegister(unsigned n, double value)
Definition: deoptimizer.h:443
Register ToRegister(int num)
static void PatchStackCheckCodeAt(Code *unoptimized_code, Address pc_after, Code *check_code, Code *replacement_code)
static DeoptimizationInputData * cast(Object *obj)
static JSFunction * cast(Object *obj)