v8  3.25.30(node0.11.13)
V8 is Google's open source JavaScript engine
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
deoptimizer.cc
Go to the documentation of this file.
1 // Copyright 2013 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 
28 #include "v8.h"
29 
30 #include "accessors.h"
31 #include "codegen.h"
32 #include "deoptimizer.h"
33 #include "disasm.h"
34 #include "full-codegen.h"
35 #include "global-handles.h"
36 #include "macro-assembler.h"
37 #include "prettyprinter.h"
38 
39 
40 namespace v8 {
41 namespace internal {
42 
43 static MemoryChunk* AllocateCodeChunk(MemoryAllocator* allocator) {
44  return allocator->AllocateChunk(Deoptimizer::GetMaxDeoptTableSize(),
46 #if defined(__native_client__)
47  // The Native Client port of V8 uses an interpreter,
48  // so code pages don't need PROT_EXEC.
50 #else
51  EXECUTABLE,
52 #endif
53  NULL);
54 }
55 
56 
58  : allocator_(allocator),
59 #ifdef ENABLE_DEBUGGER_SUPPORT
60  deoptimized_frame_info_(NULL),
61 #endif
62  current_(NULL) {
63  for (int i = 0; i < Deoptimizer::kBailoutTypesWithCodeEntry; ++i) {
64  deopt_entry_code_entries_[i] = -1;
65  deopt_entry_code_[i] = AllocateCodeChunk(allocator);
66  }
67 }
68 
69 
71  for (int i = 0; i < Deoptimizer::kBailoutTypesWithCodeEntry; ++i) {
72  allocator_->Free(deopt_entry_code_[i]);
73  deopt_entry_code_[i] = NULL;
74  }
75 }
76 
77 
78 #ifdef ENABLE_DEBUGGER_SUPPORT
79 void DeoptimizerData::Iterate(ObjectVisitor* v) {
80  if (deoptimized_frame_info_ != NULL) {
81  deoptimized_frame_info_->Iterate(v);
82  }
83 }
84 #endif
85 
86 
87 Code* Deoptimizer::FindDeoptimizingCode(Address addr) {
88  if (function_->IsHeapObject()) {
89  // Search all deoptimizing code in the native context of the function.
90  Context* native_context = function_->context()->native_context();
91  Object* element = native_context->DeoptimizedCodeListHead();
92  while (!element->IsUndefined()) {
93  Code* code = Code::cast(element);
94  ASSERT(code->kind() == Code::OPTIMIZED_FUNCTION);
95  if (code->contains(addr)) return code;
96  element = code->next_code_link();
97  }
98  }
99  return NULL;
100 }
101 
102 
103 // We rely on this function not causing a GC. It is called from generated code
104 // without having a real stack frame in place.
106  BailoutType type,
107  unsigned bailout_id,
108  Address from,
109  int fp_to_sp_delta,
110  Isolate* isolate) {
111  Deoptimizer* deoptimizer = new Deoptimizer(isolate,
112  function,
113  type,
114  bailout_id,
115  from,
116  fp_to_sp_delta,
117  NULL);
118  ASSERT(isolate->deoptimizer_data()->current_ == NULL);
119  isolate->deoptimizer_data()->current_ = deoptimizer;
120  return deoptimizer;
121 }
122 
123 
124 // No larger than 2K on all platforms
125 static const int kDeoptTableMaxEpilogueCodeSize = 2 * KB;
126 
127 
129  int entries_size =
130  Deoptimizer::kMaxNumberOfEntries * Deoptimizer::table_entry_size_;
131  int commit_page_size = static_cast<int>(OS::CommitPageSize());
132  int page_count = ((kDeoptTableMaxEpilogueCodeSize + entries_size - 1) /
133  commit_page_size) + 1;
134  return static_cast<size_t>(commit_page_size * page_count);
135 }
136 
137 
139  Deoptimizer* result = isolate->deoptimizer_data()->current_;
140  ASSERT(result != NULL);
141  result->DeleteFrameDescriptions();
142  isolate->deoptimizer_data()->current_ = NULL;
143  return result;
144 }
145 
146 
148  if (jsframe_index == 0) return 0;
149 
150  int frame_index = 0;
151  while (jsframe_index >= 0) {
152  FrameDescription* frame = output_[frame_index];
153  if (frame->GetFrameType() == StackFrame::JAVA_SCRIPT) {
154  jsframe_index--;
155  }
156  frame_index++;
157  }
158 
159  return frame_index - 1;
160 }
161 
162 
163 #ifdef ENABLE_DEBUGGER_SUPPORT
164 DeoptimizedFrameInfo* Deoptimizer::DebuggerInspectableFrame(
165  JavaScriptFrame* frame,
166  int jsframe_index,
167  Isolate* isolate) {
168  ASSERT(frame->is_optimized());
169  ASSERT(isolate->deoptimizer_data()->deoptimized_frame_info_ == NULL);
170 
171  // Get the function and code from the frame.
172  JSFunction* function = frame->function();
173  Code* code = frame->LookupCode();
174 
175  // Locate the deoptimization point in the code. As we are at a call the
176  // return address must be at a place in the code with deoptimization support.
177  SafepointEntry safepoint_entry = code->GetSafepointEntry(frame->pc());
178  int deoptimization_index = safepoint_entry.deoptimization_index();
179  ASSERT(deoptimization_index != Safepoint::kNoDeoptimizationIndex);
180 
181  // Always use the actual stack slots when calculating the fp to sp
182  // delta adding two for the function and context.
183  unsigned stack_slots = code->stack_slots();
184  unsigned fp_to_sp_delta = (stack_slots * kPointerSize) +
186 
187  Deoptimizer* deoptimizer = new Deoptimizer(isolate,
188  function,
190  deoptimization_index,
191  frame->pc(),
192  fp_to_sp_delta,
193  code);
194  Address tos = frame->fp() - fp_to_sp_delta;
195  deoptimizer->FillInputFrame(tos, frame);
196 
197  // Calculate the output frames.
199 
200  // Create the GC safe output frame information and register it for GC
201  // handling.
202  ASSERT_LT(jsframe_index, deoptimizer->jsframe_count());
203 
204  // Convert JS frame index into frame index.
205  int frame_index = deoptimizer->ConvertJSFrameIndexToFrameIndex(jsframe_index);
206 
207  bool has_arguments_adaptor =
208  frame_index > 0 &&
209  deoptimizer->output_[frame_index - 1]->GetFrameType() ==
211 
212  int construct_offset = has_arguments_adaptor ? 2 : 1;
213  bool has_construct_stub =
214  frame_index >= construct_offset &&
215  deoptimizer->output_[frame_index - construct_offset]->GetFrameType() ==
216  StackFrame::CONSTRUCT;
217 
219  frame_index,
220  has_arguments_adaptor,
221  has_construct_stub);
222  isolate->deoptimizer_data()->deoptimized_frame_info_ = info;
223 
224  // Get the "simulated" top and size for the requested frame.
225  FrameDescription* parameters_frame =
226  deoptimizer->output_[
227  has_arguments_adaptor ? (frame_index - 1) : frame_index];
228 
229  uint32_t parameters_size = (info->parameters_count() + 1) * kPointerSize;
230  Address parameters_top = reinterpret_cast<Address>(
231  parameters_frame->GetTop() + (parameters_frame->GetFrameSize() -
232  parameters_size));
233 
234  uint32_t expressions_size = info->expression_count() * kPointerSize;
235  Address expressions_top = reinterpret_cast<Address>(
236  deoptimizer->output_[frame_index]->GetTop());
237 
238  // Done with the GC-unsafe frame descriptions. This re-enables allocation.
239  deoptimizer->DeleteFrameDescriptions();
240 
241  // Allocate a heap number for the doubles belonging to this frame.
242  deoptimizer->MaterializeHeapNumbersForDebuggerInspectableFrame(
243  parameters_top, parameters_size, expressions_top, expressions_size, info);
244 
245  // Finished using the deoptimizer instance.
246  delete deoptimizer;
247 
248  return info;
249 }
250 
251 
252 void Deoptimizer::DeleteDebuggerInspectableFrame(DeoptimizedFrameInfo* info,
253  Isolate* isolate) {
254  ASSERT(isolate->deoptimizer_data()->deoptimized_frame_info_ == info);
255  delete info;
256  isolate->deoptimizer_data()->deoptimized_frame_info_ = NULL;
257 }
258 #endif
259 
260 void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
261  int count,
262  BailoutType type) {
263  TableEntryGenerator generator(masm, type, count);
264  generator.Generate();
265 }
266 
267 
268 void Deoptimizer::VisitAllOptimizedFunctionsForContext(
269  Context* context, OptimizedFunctionVisitor* visitor) {
270  DisallowHeapAllocation no_allocation;
271 
272  ASSERT(context->IsNativeContext());
273 
274  visitor->EnterContext(context);
275 
276  // Visit the list of optimized functions, removing elements that
277  // no longer refer to optimized code.
278  JSFunction* prev = NULL;
279  Object* element = context->OptimizedFunctionsListHead();
280  while (!element->IsUndefined()) {
281  JSFunction* function = JSFunction::cast(element);
282  Object* next = function->next_function_link();
283  if (function->code()->kind() != Code::OPTIMIZED_FUNCTION ||
284  (visitor->VisitFunction(function),
285  function->code()->kind() != Code::OPTIMIZED_FUNCTION)) {
286  // The function no longer refers to optimized code, or the visitor
287  // changed the code to which it refers to no longer be optimized code.
288  // Remove the function from this list.
289  if (prev != NULL) {
290  prev->set_next_function_link(next);
291  } else {
292  context->SetOptimizedFunctionsListHead(next);
293  }
294  // The visitor should not alter the link directly.
295  ASSERT(function->next_function_link() == next);
296  // Set the next function link to undefined to indicate it is no longer
297  // in the optimized functions list.
298  function->set_next_function_link(context->GetHeap()->undefined_value());
299  } else {
300  // The visitor should not alter the link directly.
301  ASSERT(function->next_function_link() == next);
302  // preserve this element.
303  prev = function;
304  }
305  element = next;
306  }
307 
308  visitor->LeaveContext(context);
309 }
310 
311 
313  Isolate* isolate,
314  OptimizedFunctionVisitor* visitor) {
315  DisallowHeapAllocation no_allocation;
316 
317  // Run through the list of all native contexts.
318  Object* context = isolate->heap()->native_contexts_list();
319  while (!context->IsUndefined()) {
320  VisitAllOptimizedFunctionsForContext(Context::cast(context), visitor);
321  context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
322  }
323 }
324 
325 
326 // Unlink functions referring to code marked for deoptimization, then move
327 // marked code from the optimized code list to the deoptimized code list,
328 // and patch code for lazy deopt.
329 void Deoptimizer::DeoptimizeMarkedCodeForContext(Context* context) {
330  DisallowHeapAllocation no_allocation;
331 
332  // A "closure" that unlinks optimized code that is going to be
333  // deoptimized from the functions that refer to it.
334  class SelectedCodeUnlinker: public OptimizedFunctionVisitor {
335  public:
336  virtual void EnterContext(Context* context) { } // Don't care.
337  virtual void LeaveContext(Context* context) { } // Don't care.
338  virtual void VisitFunction(JSFunction* function) {
339  Code* code = function->code();
340  if (!code->marked_for_deoptimization()) return;
341 
342  // Unlink this function and evict from optimized code map.
343  SharedFunctionInfo* shared = function->shared();
344  function->set_code(shared->code());
345 
346  if (FLAG_trace_deopt) {
347  CodeTracer::Scope scope(code->GetHeap()->isolate()->GetCodeTracer());
348  PrintF(scope.file(), "[deoptimizer unlinked: ");
349  function->PrintName(scope.file());
350  PrintF(scope.file(),
351  " / %" V8PRIxPTR "]\n", reinterpret_cast<intptr_t>(function));
352  }
353  }
354  };
355 
356  // Unlink all functions that refer to marked code.
357  SelectedCodeUnlinker unlinker;
358  VisitAllOptimizedFunctionsForContext(context, &unlinker);
359 
360  Isolate* isolate = context->GetHeap()->isolate();
361 #ifdef DEBUG
362  Code* topmost_optimized_code = NULL;
363  bool safe_to_deopt_topmost_optimized_code = false;
364  // Make sure all activations of optimized code can deopt at their current PC.
365  // The topmost optimized code has special handling because it cannot be
366  // deoptimized due to weak object dependency.
367  for (StackFrameIterator it(isolate, isolate->thread_local_top());
368  !it.done(); it.Advance()) {
369  StackFrame::Type type = it.frame()->type();
370  if (type == StackFrame::OPTIMIZED) {
371  Code* code = it.frame()->LookupCode();
372  if (FLAG_trace_deopt) {
373  JSFunction* function =
374  static_cast<OptimizedFrame*>(it.frame())->function();
375  CodeTracer::Scope scope(isolate->GetCodeTracer());
376  PrintF(scope.file(), "[deoptimizer found activation of function: ");
377  function->PrintName(scope.file());
378  PrintF(scope.file(),
379  " / %" V8PRIxPTR "]\n", reinterpret_cast<intptr_t>(function));
380  }
381  SafepointEntry safepoint = code->GetSafepointEntry(it.frame()->pc());
382  int deopt_index = safepoint.deoptimization_index();
383  bool safe_to_deopt = deopt_index != Safepoint::kNoDeoptimizationIndex;
384  CHECK(topmost_optimized_code == NULL || safe_to_deopt);
385  if (topmost_optimized_code == NULL) {
386  topmost_optimized_code = code;
387  safe_to_deopt_topmost_optimized_code = safe_to_deopt;
388  }
389  }
390  }
391 #endif
392 
393  // Move marked code from the optimized code list to the deoptimized
394  // code list, collecting them into a ZoneList.
395  Zone zone(isolate);
396  ZoneList<Code*> codes(10, &zone);
397 
398  // Walk over all optimized code objects in this native context.
399  Code* prev = NULL;
400  Object* element = context->OptimizedCodeListHead();
401  while (!element->IsUndefined()) {
402  Code* code = Code::cast(element);
403  ASSERT(code->kind() == Code::OPTIMIZED_FUNCTION);
404  Object* next = code->next_code_link();
405  if (code->marked_for_deoptimization()) {
406  // Put the code into the list for later patching.
407  codes.Add(code, &zone);
408 
409  if (prev != NULL) {
410  // Skip this code in the optimized code list.
411  prev->set_next_code_link(next);
412  } else {
413  // There was no previous node, the next node is the new head.
414  context->SetOptimizedCodeListHead(next);
415  }
416 
417  // Move the code to the _deoptimized_ code list.
418  code->set_next_code_link(context->DeoptimizedCodeListHead());
419  context->SetDeoptimizedCodeListHead(code);
420  } else {
421  // Not marked; preserve this element.
422  prev = code;
423  }
424  element = next;
425  }
426 
427  // TODO(titzer): we need a handle scope only because of the macro assembler,
428  // which is only used in EnsureCodeForDeoptimizationEntry.
429  HandleScope scope(isolate);
430 
431  // Now patch all the codes for deoptimization.
432  for (int i = 0; i < codes.length(); i++) {
433 #ifdef DEBUG
434  if (codes[i] == topmost_optimized_code) {
435  ASSERT(safe_to_deopt_topmost_optimized_code);
436  }
437 #endif
438  // It is finally time to die, code object.
439  // Do platform-specific patching to force any activations to lazy deopt.
440  PatchCodeForDeoptimization(isolate, codes[i]);
441 
442  // We might be in the middle of incremental marking with compaction.
443  // Tell collector to treat this code object in a special way and
444  // ignore all slots that might have been recorded on it.
445  isolate->heap()->mark_compact_collector()->InvalidateCode(codes[i]);
446  }
447 }
448 
449 
451  if (FLAG_trace_deopt) {
452  CodeTracer::Scope scope(isolate->GetCodeTracer());
453  PrintF(scope.file(), "[deoptimize all code in all contexts]\n");
454  }
455  DisallowHeapAllocation no_allocation;
456  // For all contexts, mark all code, then deoptimize.
457  Object* context = isolate->heap()->native_contexts_list();
458  while (!context->IsUndefined()) {
459  Context* native_context = Context::cast(context);
460  MarkAllCodeForContext(native_context);
461  DeoptimizeMarkedCodeForContext(native_context);
462  context = native_context->get(Context::NEXT_CONTEXT_LINK);
463  }
464 }
465 
466 
468  if (FLAG_trace_deopt) {
469  CodeTracer::Scope scope(isolate->GetCodeTracer());
470  PrintF(scope.file(), "[deoptimize marked code in all contexts]\n");
471  }
472  DisallowHeapAllocation no_allocation;
473  // For all contexts, deoptimize code already marked.
474  Object* context = isolate->heap()->native_contexts_list();
475  while (!context->IsUndefined()) {
476  Context* native_context = Context::cast(context);
477  DeoptimizeMarkedCodeForContext(native_context);
478  context = native_context->get(Context::NEXT_CONTEXT_LINK);
479  }
480 }
481 
482 
484  if (FLAG_trace_deopt) {
485  CodeTracer::Scope scope(object->GetHeap()->isolate()->GetCodeTracer());
486  PrintF(scope.file(), "[deoptimize global object @ 0x%08" V8PRIxPTR "]\n",
487  reinterpret_cast<intptr_t>(object));
488  }
489  if (object->IsJSGlobalProxy()) {
490  Object* proto = object->GetPrototype();
491  ASSERT(proto->IsJSGlobalObject());
492  Context* native_context = GlobalObject::cast(proto)->native_context();
493  MarkAllCodeForContext(native_context);
494  DeoptimizeMarkedCodeForContext(native_context);
495  } else if (object->IsGlobalObject()) {
496  Context* native_context = GlobalObject::cast(object)->native_context();
497  MarkAllCodeForContext(native_context);
498  DeoptimizeMarkedCodeForContext(native_context);
499  }
500 }
501 
502 
503 void Deoptimizer::MarkAllCodeForContext(Context* context) {
504  Object* element = context->OptimizedCodeListHead();
505  while (!element->IsUndefined()) {
506  Code* code = Code::cast(element);
507  ASSERT(code->kind() == Code::OPTIMIZED_FUNCTION);
508  code->set_marked_for_deoptimization(true);
509  element = code->next_code_link();
510  }
511 }
512 
513 
514 void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
515  Code* code = function->code();
516  if (code->kind() == Code::OPTIMIZED_FUNCTION) {
517  // Mark the code for deoptimization and unlink any functions that also
518  // refer to that code. The code cannot be shared across native contexts,
519  // so we only need to search one.
520  code->set_marked_for_deoptimization(true);
521  DeoptimizeMarkedCodeForContext(function->context()->native_context());
522  }
523 }
524 
525 
527  deoptimizer->DoComputeOutputFrames();
528 }
529 
530 
532  StackFrame::Type frame_type) {
533  switch (deopt_type) {
534  case EAGER:
535  case SOFT:
536  case LAZY:
537  case DEBUGGER:
538  return (frame_type == StackFrame::STUB)
539  ? FLAG_trace_stub_failures
540  : FLAG_trace_deopt;
541  }
542  UNREACHABLE();
543  return false;
544 }
545 
546 
548  switch (type) {
549  case EAGER: return "eager";
550  case SOFT: return "soft";
551  case LAZY: return "lazy";
552  case DEBUGGER: return "debugger";
553  }
554  UNREACHABLE();
555  return NULL;
556 }
557 
558 
559 Deoptimizer::Deoptimizer(Isolate* isolate,
560  JSFunction* function,
561  BailoutType type,
562  unsigned bailout_id,
563  Address from,
564  int fp_to_sp_delta,
565  Code* optimized_code)
566  : isolate_(isolate),
567  function_(function),
568  bailout_id_(bailout_id),
569  bailout_type_(type),
570  from_(from),
571  fp_to_sp_delta_(fp_to_sp_delta),
572  has_alignment_padding_(0),
573  input_(NULL),
574  output_count_(0),
575  jsframe_count_(0),
576  output_(NULL),
577  deferred_objects_tagged_values_(0),
578  deferred_objects_double_values_(0),
579  deferred_objects_(0),
580  deferred_heap_numbers_(0),
581  jsframe_functions_(0),
582  jsframe_has_adapted_arguments_(0),
583  materialized_values_(NULL),
584  materialized_objects_(NULL),
585  materialization_value_index_(0),
586  materialization_object_index_(0),
587  trace_scope_(NULL) {
588  // For COMPILED_STUBs called from builtins, the function pointer is a SMI
589  // indicating an internal frame.
590  if (function->IsSmi()) {
591  function = NULL;
592  }
593  ASSERT(from != NULL);
594  if (function != NULL && function->IsOptimized()) {
595  function->shared()->increment_deopt_count();
596  if (bailout_type_ == Deoptimizer::SOFT) {
597  isolate->counters()->soft_deopts_executed()->Increment();
598  // Soft deopts shouldn't count against the overall re-optimization count
599  // that can eventually lead to disabling optimization for a function.
600  int opt_count = function->shared()->opt_count();
601  if (opt_count > 0) opt_count--;
602  function->shared()->set_opt_count(opt_count);
603  }
604  }
605  compiled_code_ = FindOptimizedCode(function, optimized_code);
606 
607 #if DEBUG
608  ASSERT(compiled_code_ != NULL);
609  if (type == EAGER || type == SOFT || type == LAZY) {
610  ASSERT(compiled_code_->kind() != Code::FUNCTION);
611  }
612 #endif
613 
614  StackFrame::Type frame_type = function == NULL
616  : StackFrame::JAVA_SCRIPT;
617  trace_scope_ = TraceEnabledFor(type, frame_type) ?
618  new CodeTracer::Scope(isolate->GetCodeTracer()) : NULL;
619 #ifdef DEBUG
620  CHECK(AllowHeapAllocation::IsAllowed());
621  disallow_heap_allocation_ = new DisallowHeapAllocation();
622 #endif // DEBUG
623  unsigned size = ComputeInputFrameSize();
624  input_ = new(size) FrameDescription(size, function);
625  input_->SetFrameType(frame_type);
626 }
627 
628 
629 Code* Deoptimizer::FindOptimizedCode(JSFunction* function,
630  Code* optimized_code) {
631  switch (bailout_type_) {
632  case Deoptimizer::SOFT:
633  case Deoptimizer::EAGER:
634  case Deoptimizer::LAZY: {
635  Code* compiled_code = FindDeoptimizingCode(from_);
636  return (compiled_code == NULL)
637  ? static_cast<Code*>(isolate_->FindCodeObject(from_))
638  : compiled_code;
639  }
641  ASSERT(optimized_code->contains(from_));
642  return optimized_code;
643  }
644  UNREACHABLE();
645  return NULL;
646 }
647 
648 
649 void Deoptimizer::PrintFunctionName() {
650  if (function_->IsJSFunction()) {
651  function_->PrintName(trace_scope_->file());
652  } else {
653  PrintF(trace_scope_->file(),
654  "%s", Code::Kind2String(compiled_code_->kind()));
655  }
656 }
657 
658 
660  ASSERT(input_ == NULL && output_ == NULL);
661  ASSERT(disallow_heap_allocation_ == NULL);
662  delete trace_scope_;
663 }
664 
665 
666 void Deoptimizer::DeleteFrameDescriptions() {
667  delete input_;
668  for (int i = 0; i < output_count_; ++i) {
669  if (output_[i] != input_) delete output_[i];
670  }
671  delete[] output_;
672  input_ = NULL;
673  output_ = NULL;
674 #ifdef DEBUG
675  CHECK(!AllowHeapAllocation::IsAllowed());
676  CHECK(disallow_heap_allocation_ != NULL);
677  delete disallow_heap_allocation_;
678  disallow_heap_allocation_ = NULL;
679 #endif // DEBUG
680 }
681 
682 
684  int id,
685  BailoutType type,
686  GetEntryMode mode) {
687  ASSERT(id >= 0);
688  if (id >= kMaxNumberOfEntries) return NULL;
689  if (mode == ENSURE_ENTRY_CODE) {
690  EnsureCodeForDeoptimizationEntry(isolate, type, id);
691  } else {
693  }
694  DeoptimizerData* data = isolate->deoptimizer_data();
696  MemoryChunk* base = data->deopt_entry_code_[type];
697  return base->area_start() + (id * table_entry_size_);
698 }
699 
700 
702  Address addr,
703  BailoutType type) {
704  DeoptimizerData* data = isolate->deoptimizer_data();
705  MemoryChunk* base = data->deopt_entry_code_[type];
706  Address start = base->area_start();
707  if (base == NULL ||
708  addr < start ||
709  addr >= start + (kMaxNumberOfEntries * table_entry_size_)) {
711  }
712  ASSERT_EQ(0,
713  static_cast<int>(addr - start) % table_entry_size_);
714  return static_cast<int>(addr - start) / table_entry_size_;
715 }
716 
717 
719  BailoutId id,
720  SharedFunctionInfo* shared) {
721  // TODO(kasperl): For now, we do a simple linear search for the PC
722  // offset associated with the given node id. This should probably be
723  // changed to a binary search.
724  int length = data->DeoptPoints();
725  for (int i = 0; i < length; i++) {
726  if (data->AstId(i) == id) {
727  return data->PcAndState(i)->value();
728  }
729  }
730  PrintF(stderr, "[couldn't find pc offset for node=%d]\n", id.ToInt());
731  PrintF(stderr, "[method: %s]\n", shared->DebugName()->ToCString().get());
732  // Print the source code if available.
733  HeapStringAllocator string_allocator;
734  StringStream stream(&string_allocator);
735  shared->SourceCodePrint(&stream, -1);
736  PrintF(stderr, "[source:\n%s\n]", stream.ToCString().get());
737 
738  FATAL("unable to find pc offset during deoptimization");
739  return -1;
740 }
741 
742 
744  int length = 0;
745  // Count all entries in the deoptimizing code list of every context.
746  Object* context = isolate->heap()->native_contexts_list();
747  while (!context->IsUndefined()) {
748  Context* native_context = Context::cast(context);
749  Object* element = native_context->DeoptimizedCodeListHead();
750  while (!element->IsUndefined()) {
751  Code* code = Code::cast(element);
752  ASSERT(code->kind() == Code::OPTIMIZED_FUNCTION);
753  length++;
754  element = code->next_code_link();
755  }
756  context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
757  }
758  return length;
759 }
760 
761 
762 // We rely on this function not causing a GC. It is called from generated code
763 // without having a real stack frame in place.
764 void Deoptimizer::DoComputeOutputFrames() {
765  // Print some helpful diagnostic information.
766  if (FLAG_log_timer_events &&
767  compiled_code_->kind() == Code::OPTIMIZED_FUNCTION) {
768  LOG(isolate(), CodeDeoptEvent(compiled_code_));
769  }
770  ElapsedTimer timer;
771 
772  // Determine basic deoptimization information. The optimized frame is
773  // described by the input data.
774  DeoptimizationInputData* input_data =
775  DeoptimizationInputData::cast(compiled_code_->deoptimization_data());
776 
777  if (trace_scope_ != NULL) {
778  timer.Start();
779  PrintF(trace_scope_->file(),
780  "[deoptimizing (DEOPT %s): begin 0x%08" V8PRIxPTR " ",
781  MessageFor(bailout_type_),
782  reinterpret_cast<intptr_t>(function_));
783  PrintFunctionName();
784  PrintF(trace_scope_->file(),
785  " (opt #%d) @%d, FP to SP delta: %d]\n",
786  input_data->OptimizationId()->value(),
787  bailout_id_,
788  fp_to_sp_delta_);
789  if (bailout_type_ == EAGER || bailout_type_ == SOFT) {
790  compiled_code_->PrintDeoptLocation(trace_scope_->file(), bailout_id_);
791  }
792  }
793 
794  BailoutId node_id = input_data->AstId(bailout_id_);
795  ByteArray* translations = input_data->TranslationByteArray();
796  unsigned translation_index =
797  input_data->TranslationIndex(bailout_id_)->value();
798 
799  // Do the input frame to output frame(s) translation.
800  TranslationIterator iterator(translations, translation_index);
801  Translation::Opcode opcode =
802  static_cast<Translation::Opcode>(iterator.Next());
803  ASSERT(Translation::BEGIN == opcode);
804  USE(opcode);
805  // Read the number of output frames and allocate an array for their
806  // descriptions.
807  int count = iterator.Next();
808  iterator.Next(); // Drop JS frames count.
809  ASSERT(output_ == NULL);
810  output_ = new FrameDescription*[count];
811  for (int i = 0; i < count; ++i) {
812  output_[i] = NULL;
813  }
814  output_count_ = count;
815 
816  Register fp_reg = JavaScriptFrame::fp_register();
817  stack_fp_ = reinterpret_cast<Address>(
818  input_->GetRegister(fp_reg.code()) +
819  has_alignment_padding_ * kPointerSize);
820 
821  // Translate each output frame.
822  for (int i = 0; i < count; ++i) {
823  // Read the ast node id, function, and frame height for this output frame.
824  Translation::Opcode opcode =
825  static_cast<Translation::Opcode>(iterator.Next());
826  switch (opcode) {
827  case Translation::JS_FRAME:
828  DoComputeJSFrame(&iterator, i);
829  jsframe_count_++;
830  break;
831  case Translation::ARGUMENTS_ADAPTOR_FRAME:
832  DoComputeArgumentsAdaptorFrame(&iterator, i);
833  break;
834  case Translation::CONSTRUCT_STUB_FRAME:
835  DoComputeConstructStubFrame(&iterator, i);
836  break;
837  case Translation::GETTER_STUB_FRAME:
838  DoComputeAccessorStubFrame(&iterator, i, false);
839  break;
840  case Translation::SETTER_STUB_FRAME:
841  DoComputeAccessorStubFrame(&iterator, i, true);
842  break;
843  case Translation::COMPILED_STUB_FRAME:
844  DoComputeCompiledStubFrame(&iterator, i);
845  break;
846  case Translation::BEGIN:
848  case Translation::INT32_REGISTER:
849  case Translation::UINT32_REGISTER:
850  case Translation::DOUBLE_REGISTER:
851  case Translation::STACK_SLOT:
852  case Translation::INT32_STACK_SLOT:
853  case Translation::UINT32_STACK_SLOT:
854  case Translation::DOUBLE_STACK_SLOT:
855  case Translation::LITERAL:
856  case Translation::ARGUMENTS_OBJECT:
857  default:
858  UNREACHABLE();
859  break;
860  }
861  }
862 
863  // Print some helpful diagnostic information.
864  if (trace_scope_ != NULL) {
865  double ms = timer.Elapsed().InMillisecondsF();
866  int index = output_count_ - 1; // Index of the topmost frame.
867  JSFunction* function = output_[index]->GetFunction();
868  PrintF(trace_scope_->file(),
869  "[deoptimizing (%s): end 0x%08" V8PRIxPTR " ",
870  MessageFor(bailout_type_),
871  reinterpret_cast<intptr_t>(function));
872  PrintFunctionName();
873  PrintF(trace_scope_->file(),
874  " @%d => node=%d, pc=0x%08" V8PRIxPTR ", state=%s, alignment=%s,"
875  " took %0.3f ms]\n",
876  bailout_id_,
877  node_id.ToInt(),
878  output_[index]->GetPc(),
880  static_cast<FullCodeGenerator::State>(
881  output_[index]->GetState()->value())),
882  has_alignment_padding_ ? "with padding" : "no padding",
883  ms);
884  }
885 }
886 
887 
888 void Deoptimizer::DoComputeJSFrame(TranslationIterator* iterator,
889  int frame_index) {
890  BailoutId node_id = BailoutId(iterator->Next());
891  JSFunction* function;
892  if (frame_index != 0) {
893  function = JSFunction::cast(ComputeLiteral(iterator->Next()));
894  } else {
895  int closure_id = iterator->Next();
896  USE(closure_id);
897  ASSERT_EQ(Translation::kSelfLiteralId, closure_id);
898  function = function_;
899  }
900  unsigned height = iterator->Next();
901  unsigned height_in_bytes = height * kPointerSize;
902  if (trace_scope_ != NULL) {
903  PrintF(trace_scope_->file(), " translating ");
904  function->PrintName(trace_scope_->file());
905  PrintF(trace_scope_->file(),
906  " => node=%d, height=%d\n", node_id.ToInt(), height_in_bytes);
907  }
908 
909  // The 'fixed' part of the frame consists of the incoming parameters and
910  // the part described by JavaScriptFrameConstants.
911  unsigned fixed_frame_size = ComputeFixedSize(function);
912  unsigned input_frame_size = input_->GetFrameSize();
913  unsigned output_frame_size = height_in_bytes + fixed_frame_size;
914 
915  // Allocate and store the output frame description.
916  FrameDescription* output_frame =
917  new(output_frame_size) FrameDescription(output_frame_size, function);
918  output_frame->SetFrameType(StackFrame::JAVA_SCRIPT);
919 
920  bool is_bottommost = (0 == frame_index);
921  bool is_topmost = (output_count_ - 1 == frame_index);
922  ASSERT(frame_index >= 0 && frame_index < output_count_);
923  ASSERT(output_[frame_index] == NULL);
924  output_[frame_index] = output_frame;
925 
926  // The top address for the bottommost output frame can be computed from
927  // the input frame pointer and the output frame's height. For all
928  // subsequent output frames, it can be computed from the previous one's
929  // top address and the current frame's size.
930  Register fp_reg = JavaScriptFrame::fp_register();
931  intptr_t top_address;
932  if (is_bottommost) {
933  // Determine whether the input frame contains alignment padding.
934  has_alignment_padding_ = HasAlignmentPadding(function) ? 1 : 0;
935  // 2 = context and function in the frame.
936  // If the optimized frame had alignment padding, adjust the frame pointer
937  // to point to the new position of the old frame pointer after padding
938  // is removed. Subtract 2 * kPointerSize for the context and function slots.
939  top_address = input_->GetRegister(fp_reg.code()) -
941  height_in_bytes + has_alignment_padding_ * kPointerSize;
942  } else {
943  top_address = output_[frame_index - 1]->GetTop() - output_frame_size;
944  }
945  output_frame->SetTop(top_address);
946 
947  // Compute the incoming parameter translation.
948  int parameter_count = function->shared()->formal_parameter_count() + 1;
949  unsigned output_offset = output_frame_size;
950  unsigned input_offset = input_frame_size;
951  for (int i = 0; i < parameter_count; ++i) {
952  output_offset -= kPointerSize;
953  DoTranslateCommand(iterator, frame_index, output_offset);
954  }
955  input_offset -= (parameter_count * kPointerSize);
956 
957  // There are no translation commands for the caller's pc and fp, the
958  // context, and the function. Synthesize their values and set them up
959  // explicitly.
960  //
961  // The caller's pc for the bottommost output frame is the same as in the
962  // input frame. For all subsequent output frames, it can be read from the
963  // previous one. This frame's pc can be computed from the non-optimized
964  // function code and AST id of the bailout.
965  output_offset -= kPCOnStackSize;
966  input_offset -= kPCOnStackSize;
967  intptr_t value;
968  if (is_bottommost) {
969  value = input_->GetFrameSlot(input_offset);
970  } else {
971  value = output_[frame_index - 1]->GetPc();
972  }
973  output_frame->SetCallerPc(output_offset, value);
974  if (trace_scope_ != NULL) {
975  PrintF(trace_scope_->file(),
976  " 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
977  V8PRIxPTR " ; caller's pc\n",
978  top_address + output_offset, output_offset, value);
979  }
980 
981  // The caller's frame pointer for the bottommost output frame is the same
982  // as in the input frame. For all subsequent output frames, it can be
983  // read from the previous one. Also compute and set this frame's frame
984  // pointer.
985  output_offset -= kFPOnStackSize;
986  input_offset -= kFPOnStackSize;
987  if (is_bottommost) {
988  value = input_->GetFrameSlot(input_offset);
989  } else {
990  value = output_[frame_index - 1]->GetFp();
991  }
992  output_frame->SetCallerFp(output_offset, value);
993  intptr_t fp_value = top_address + output_offset;
994  ASSERT(!is_bottommost || (input_->GetRegister(fp_reg.code()) +
995  has_alignment_padding_ * kPointerSize) == fp_value);
996  output_frame->SetFp(fp_value);
997  if (is_topmost) output_frame->SetRegister(fp_reg.code(), fp_value);
998  if (trace_scope_ != NULL) {
999  PrintF(trace_scope_->file(),
1000  " 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
1001  V8PRIxPTR " ; caller's fp\n",
1002  fp_value, output_offset, value);
1003  }
1004  ASSERT(!is_bottommost || !has_alignment_padding_ ||
1005  (fp_value & kPointerSize) != 0);
1006 
1007  if (FLAG_enable_ool_constant_pool) {
1008  // For the bottommost output frame the constant pool pointer can be gotten
1009  // from the input frame. For subsequent output frames, it can be read from
1010  // the previous frame.
1011  output_offset -= kPointerSize;
1012  input_offset -= kPointerSize;
1013  if (is_bottommost) {
1014  value = input_->GetFrameSlot(input_offset);
1015  } else {
1016  value = output_[frame_index - 1]->GetConstantPool();
1017  }
1018  output_frame->SetCallerConstantPool(output_offset, value);
1019  if (trace_scope_) {
1020  PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
1021  V8PRIxPTR "; caller's constant_pool\n",
1022  top_address + output_offset, output_offset, value);
1023  }
1024  }
1025 
1026  // For the bottommost output frame the context can be gotten from the input
1027  // frame. For all subsequent output frames it can be gotten from the function
1028  // so long as we don't inline functions that need local contexts.
1029  Register context_reg = JavaScriptFrame::context_register();
1030  output_offset -= kPointerSize;
1031  input_offset -= kPointerSize;
1032  if (is_bottommost) {
1033  value = input_->GetFrameSlot(input_offset);
1034  } else {
1035  value = reinterpret_cast<intptr_t>(function->context());
1036  }
1037  output_frame->SetFrameSlot(output_offset, value);
1038  output_frame->SetContext(value);
1039  if (is_topmost) output_frame->SetRegister(context_reg.code(), value);
1040  if (trace_scope_ != NULL) {
1041  PrintF(trace_scope_->file(),
1042  " 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
1043  V8PRIxPTR "; context\n",
1044  top_address + output_offset, output_offset, value);
1045  }
1046 
1047  // The function was mentioned explicitly in the BEGIN_FRAME.
1048  output_offset -= kPointerSize;
1049  input_offset -= kPointerSize;
1050  value = reinterpret_cast<intptr_t>(function);
1051  // The function for the bottommost output frame should also agree with the
1052  // input frame.
1053  ASSERT(!is_bottommost || input_->GetFrameSlot(input_offset) == value);
1054  output_frame->SetFrameSlot(output_offset, value);
1055  if (trace_scope_ != NULL) {
1056  PrintF(trace_scope_->file(),
1057  " 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
1058  V8PRIxPTR "; function\n",
1059  top_address + output_offset, output_offset, value);
1060  }
1061 
1062  // Translate the rest of the frame.
1063  for (unsigned i = 0; i < height; ++i) {
1064  output_offset -= kPointerSize;
1065  DoTranslateCommand(iterator, frame_index, output_offset);
1066  }
1067  ASSERT(0 == output_offset);
1068 
1069  // Compute this frame's PC, state, and continuation.
1070  Code* non_optimized_code = function->shared()->code();
1071  FixedArray* raw_data = non_optimized_code->deoptimization_data();
1072  DeoptimizationOutputData* data = DeoptimizationOutputData::cast(raw_data);
1073  Address start = non_optimized_code->instruction_start();
1074  unsigned pc_and_state = GetOutputInfo(data, node_id, function->shared());
1075  unsigned pc_offset = FullCodeGenerator::PcField::decode(pc_and_state);
1076  intptr_t pc_value = reinterpret_cast<intptr_t>(start + pc_offset);
1077  output_frame->SetPc(pc_value);
1078 
1079  // Update constant pool.
1080  if (FLAG_enable_ool_constant_pool) {
1081  intptr_t constant_pool_value =
1082  reinterpret_cast<intptr_t>(non_optimized_code->constant_pool());
1083  output_frame->SetConstantPool(constant_pool_value);
1084  if (is_topmost) {
1085  Register constant_pool_reg =
1087  output_frame->SetRegister(constant_pool_reg.code(), constant_pool_value);
1088  }
1089  }
1090 
1091  FullCodeGenerator::State state =
1093  output_frame->SetState(Smi::FromInt(state));
1094 
1095  // Set the continuation for the topmost frame.
1096  if (is_topmost && bailout_type_ != DEBUGGER) {
1097  Builtins* builtins = isolate_->builtins();
1098  Code* continuation = builtins->builtin(Builtins::kNotifyDeoptimized);
1099  if (bailout_type_ == LAZY) {
1100  continuation = builtins->builtin(Builtins::kNotifyLazyDeoptimized);
1101  } else if (bailout_type_ == SOFT) {
1102  continuation = builtins->builtin(Builtins::kNotifySoftDeoptimized);
1103  } else {
1104  ASSERT(bailout_type_ == EAGER);
1105  }
1106  output_frame->SetContinuation(
1107  reinterpret_cast<intptr_t>(continuation->entry()));
1108  }
1109 }
1110 
1111 
1112 void Deoptimizer::DoComputeArgumentsAdaptorFrame(TranslationIterator* iterator,
1113  int frame_index) {
1114  JSFunction* function = JSFunction::cast(ComputeLiteral(iterator->Next()));
1115  unsigned height = iterator->Next();
1116  unsigned height_in_bytes = height * kPointerSize;
1117  if (trace_scope_ != NULL) {
1118  PrintF(trace_scope_->file(),
1119  " translating arguments adaptor => height=%d\n", height_in_bytes);
1120  }
1121 
1122  unsigned fixed_frame_size = ArgumentsAdaptorFrameConstants::kFrameSize;
1123  unsigned output_frame_size = height_in_bytes + fixed_frame_size;
1124 
1125  // Allocate and store the output frame description.
1126  FrameDescription* output_frame =
1127  new(output_frame_size) FrameDescription(output_frame_size, function);
1128  output_frame->SetFrameType(StackFrame::ARGUMENTS_ADAPTOR);
1129 
1130  // Arguments adaptor can not be topmost or bottommost.
1131  ASSERT(frame_index > 0 && frame_index < output_count_ - 1);
1132  ASSERT(output_[frame_index] == NULL);
1133  output_[frame_index] = output_frame;
1134 
1135  // The top address of the frame is computed from the previous
1136  // frame's top and this frame's size.
1137  intptr_t top_address;
1138  top_address = output_[frame_index - 1]->GetTop() - output_frame_size;
1139  output_frame->SetTop(top_address);
1140 
1141  // Compute the incoming parameter translation.
1142  int parameter_count = height;
1143  unsigned output_offset = output_frame_size;
1144  for (int i = 0; i < parameter_count; ++i) {
1145  output_offset -= kPointerSize;
1146  DoTranslateCommand(iterator, frame_index, output_offset);
1147  }
1148 
1149  // Read caller's PC from the previous frame.
1150  output_offset -= kPCOnStackSize;
1151  intptr_t callers_pc = output_[frame_index - 1]->GetPc();
1152  output_frame->SetCallerPc(output_offset, callers_pc);
1153  if (trace_scope_ != NULL) {
1154  PrintF(trace_scope_->file(),
1155  " 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
1156  V8PRIxPTR " ; caller's pc\n",
1157  top_address + output_offset, output_offset, callers_pc);
1158  }
1159 
1160  // Read caller's FP from the previous frame, and set this frame's FP.
1161  output_offset -= kFPOnStackSize;
1162  intptr_t value = output_[frame_index - 1]->GetFp();
1163  output_frame->SetCallerFp(output_offset, value);
1164  intptr_t fp_value = top_address + output_offset;
1165  output_frame->SetFp(fp_value);
1166  if (trace_scope_ != NULL) {
1167  PrintF(trace_scope_->file(),
1168  " 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
1169  V8PRIxPTR " ; caller's fp\n",
1170  fp_value, output_offset, value);
1171  }
1172 
1173  if (FLAG_enable_ool_constant_pool) {
1174  // Read the caller's constant pool from the previous frame.
1175  output_offset -= kPointerSize;
1176  value = output_[frame_index - 1]->GetConstantPool();
1177  output_frame->SetCallerConstantPool(output_offset, value);
1178  if (trace_scope_) {
1179  PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
1180  V8PRIxPTR "; caller's constant_pool\n",
1181  top_address + output_offset, output_offset, value);
1182  }
1183  }
1184 
1185  // A marker value is used in place of the context.
1186  output_offset -= kPointerSize;
1187  intptr_t context = reinterpret_cast<intptr_t>(
1189  output_frame->SetFrameSlot(output_offset, context);
1190  if (trace_scope_ != NULL) {
1191  PrintF(trace_scope_->file(),
1192  " 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
1193  V8PRIxPTR " ; context (adaptor sentinel)\n",
1194  top_address + output_offset, output_offset, context);
1195  }
1196 
1197  // The function was mentioned explicitly in the ARGUMENTS_ADAPTOR_FRAME.
1198  output_offset -= kPointerSize;
1199  value = reinterpret_cast<intptr_t>(function);
1200  output_frame->SetFrameSlot(output_offset, value);
1201  if (trace_scope_ != NULL) {
1202  PrintF(trace_scope_->file(),
1203  " 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
1204  V8PRIxPTR " ; function\n",
1205  top_address + output_offset, output_offset, value);
1206  }
1207 
1208  // Number of incoming arguments.
1209  output_offset -= kPointerSize;
1210  value = reinterpret_cast<intptr_t>(Smi::FromInt(height - 1));
1211  output_frame->SetFrameSlot(output_offset, value);
1212  if (trace_scope_ != NULL) {
1213  PrintF(trace_scope_->file(),
1214  " 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
1215  V8PRIxPTR " ; argc (%d)\n",
1216  top_address + output_offset, output_offset, value, height - 1);
1217  }
1218 
1219  ASSERT(0 == output_offset);
1220 
1221  Builtins* builtins = isolate_->builtins();
1222  Code* adaptor_trampoline =
1223  builtins->builtin(Builtins::kArgumentsAdaptorTrampoline);
1224  intptr_t pc_value = reinterpret_cast<intptr_t>(
1225  adaptor_trampoline->instruction_start() +
1226  isolate_->heap()->arguments_adaptor_deopt_pc_offset()->value());
1227  output_frame->SetPc(pc_value);
1228  if (FLAG_enable_ool_constant_pool) {
1229  intptr_t constant_pool_value =
1230  reinterpret_cast<intptr_t>(adaptor_trampoline->constant_pool());
1231  output_frame->SetConstantPool(constant_pool_value);
1232  }
1233 }
1234 
1235 
1236 void Deoptimizer::DoComputeConstructStubFrame(TranslationIterator* iterator,
1237  int frame_index) {
1238  Builtins* builtins = isolate_->builtins();
1239  Code* construct_stub = builtins->builtin(Builtins::kJSConstructStubGeneric);
1240  JSFunction* function = JSFunction::cast(ComputeLiteral(iterator->Next()));
1241  unsigned height = iterator->Next();
1242  unsigned height_in_bytes = height * kPointerSize;
1243  if (trace_scope_ != NULL) {
1244  PrintF(trace_scope_->file(),
1245  " translating construct stub => height=%d\n", height_in_bytes);
1246  }
1247 
1248  unsigned fixed_frame_size = ConstructFrameConstants::kFrameSize;
1249  unsigned output_frame_size = height_in_bytes + fixed_frame_size;
1250 
1251  // Allocate and store the output frame description.
1252  FrameDescription* output_frame =
1253  new(output_frame_size) FrameDescription(output_frame_size, function);
1254  output_frame->SetFrameType(StackFrame::CONSTRUCT);
1255 
1256  // Construct stub can not be topmost or bottommost.
1257  ASSERT(frame_index > 0 && frame_index < output_count_ - 1);
1258  ASSERT(output_[frame_index] == NULL);
1259  output_[frame_index] = output_frame;
1260 
1261  // The top address of the frame is computed from the previous
1262  // frame's top and this frame's size.
1263  intptr_t top_address;
1264  top_address = output_[frame_index - 1]->GetTop() - output_frame_size;
1265  output_frame->SetTop(top_address);
1266 
1267  // Compute the incoming parameter translation.
1268  int parameter_count = height;
1269  unsigned output_offset = output_frame_size;
1270  for (int i = 0; i < parameter_count; ++i) {
1271  output_offset -= kPointerSize;
1272  int deferred_object_index = deferred_objects_.length();
1273  DoTranslateCommand(iterator, frame_index, output_offset);
1274  // The allocated receiver of a construct stub frame is passed as the
1275  // receiver parameter through the translation. It might be encoding
1276  // a captured object, patch the slot address for a captured object.
1277  if (i == 0 && deferred_objects_.length() > deferred_object_index) {
1278  ASSERT(!deferred_objects_[deferred_object_index].is_arguments());
1279  deferred_objects_[deferred_object_index].patch_slot_address(top_address);
1280  }
1281  }
1282 
1283  // Read caller's PC from the previous frame.
1284  output_offset -= kPCOnStackSize;
1285  intptr_t callers_pc = output_[frame_index - 1]->GetPc();
1286  output_frame->SetCallerPc(output_offset, callers_pc);
1287  if (trace_scope_ != NULL) {
1288  PrintF(trace_scope_->file(),
1289  " 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
1290  V8PRIxPTR " ; caller's pc\n",
1291  top_address + output_offset, output_offset, callers_pc);
1292  }
1293 
1294  // Read caller's FP from the previous frame, and set this frame's FP.
1295  output_offset -= kFPOnStackSize;
1296  intptr_t value = output_[frame_index - 1]->GetFp();
1297  output_frame->SetCallerFp(output_offset, value);
1298  intptr_t fp_value = top_address + output_offset;
1299  output_frame->SetFp(fp_value);
1300  if (trace_scope_ != NULL) {
1301  PrintF(trace_scope_->file(),
1302  " 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
1303  V8PRIxPTR " ; caller's fp\n",
1304  fp_value, output_offset, value);
1305  }
1306 
1307  if (FLAG_enable_ool_constant_pool) {
1308  // Read the caller's constant pool from the previous frame.
1309  output_offset -= kPointerSize;
1310  value = output_[frame_index - 1]->GetConstantPool();
1311  output_frame->SetCallerConstantPool(output_offset, value);
1312  if (trace_scope_) {
1313  PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
1314  V8PRIxPTR " ; caller's constant pool\n",
1315  top_address + output_offset, output_offset, value);
1316  }
1317  }
1318 
1319  // The context can be gotten from the previous frame.
1320  output_offset -= kPointerSize;
1321  value = output_[frame_index - 1]->GetContext();
1322  output_frame->SetFrameSlot(output_offset, value);
1323  if (trace_scope_ != NULL) {
1324  PrintF(trace_scope_->file(),
1325  " 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
1326  V8PRIxPTR " ; context\n",
1327  top_address + output_offset, output_offset, value);
1328  }
1329 
1330  // A marker value is used in place of the function.
1331  output_offset -= kPointerSize;
1332  value = reinterpret_cast<intptr_t>(Smi::FromInt(StackFrame::CONSTRUCT));
1333  output_frame->SetFrameSlot(output_offset, value);
1334  if (trace_scope_ != NULL) {
1335  PrintF(trace_scope_->file(),
1336  " 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
1337  V8PRIxPTR " ; function (construct sentinel)\n",
1338  top_address + output_offset, output_offset, value);
1339  }
1340 
1341  // The output frame reflects a JSConstructStubGeneric frame.
1342  output_offset -= kPointerSize;
1343  value = reinterpret_cast<intptr_t>(construct_stub);
1344  output_frame->SetFrameSlot(output_offset, value);
1345  if (trace_scope_ != NULL) {
1346  PrintF(trace_scope_->file(),
1347  " 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
1348  V8PRIxPTR " ; code object\n",
1349  top_address + output_offset, output_offset, value);
1350  }
1351 
1352  // Number of incoming arguments.
1353  output_offset -= kPointerSize;
1354  value = reinterpret_cast<intptr_t>(Smi::FromInt(height - 1));
1355  output_frame->SetFrameSlot(output_offset, value);
1356  if (trace_scope_ != NULL) {
1357  PrintF(trace_scope_->file(),
1358  " 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
1359  V8PRIxPTR " ; argc (%d)\n",
1360  top_address + output_offset, output_offset, value, height - 1);
1361  }
1362 
1363  // Constructor function being invoked by the stub (only present on some
1364  // architectures, indicated by kConstructorOffset).
1366  output_offset -= kPointerSize;
1367  value = reinterpret_cast<intptr_t>(function);
1368  output_frame->SetFrameSlot(output_offset, value);
1369  if (trace_scope_ != NULL) {
1370  PrintF(trace_scope_->file(),
1371  " 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
1372  V8PRIxPTR " ; constructor function\n",
1373  top_address + output_offset, output_offset, value);
1374  }
1375  }
1376 
1377  // The newly allocated object was passed as receiver in the artificial
1378  // constructor stub environment created by HEnvironment::CopyForInlining().
1379  output_offset -= kPointerSize;
1380  value = output_frame->GetFrameSlot(output_frame_size - kPointerSize);
1381  output_frame->SetFrameSlot(output_offset, value);
1382  if (trace_scope_ != NULL) {
1383  PrintF(trace_scope_->file(),
1384  " 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
1385  V8PRIxPTR " ; allocated receiver\n",
1386  top_address + output_offset, output_offset, value);
1387  }
1388 
1389  ASSERT(0 == output_offset);
1390 
1391  intptr_t pc = reinterpret_cast<intptr_t>(
1392  construct_stub->instruction_start() +
1393  isolate_->heap()->construct_stub_deopt_pc_offset()->value());
1394  output_frame->SetPc(pc);
1395  if (FLAG_enable_ool_constant_pool) {
1396  intptr_t constant_pool_value =
1397  reinterpret_cast<intptr_t>(construct_stub->constant_pool());
1398  output_frame->SetConstantPool(constant_pool_value);
1399  }
1400 }
1401 
1402 
1403 void Deoptimizer::DoComputeAccessorStubFrame(TranslationIterator* iterator,
1404  int frame_index,
1405  bool is_setter_stub_frame) {
1406  JSFunction* accessor = JSFunction::cast(ComputeLiteral(iterator->Next()));
1407  // The receiver (and the implicit return value, if any) are expected in
1408  // registers by the LoadIC/StoreIC, so they don't belong to the output stack
1409  // frame. This means that we have to use a height of 0.
1410  unsigned height = 0;
1411  unsigned height_in_bytes = height * kPointerSize;
1412  const char* kind = is_setter_stub_frame ? "setter" : "getter";
1413  if (trace_scope_ != NULL) {
1414  PrintF(trace_scope_->file(),
1415  " translating %s stub => height=%u\n", kind, height_in_bytes);
1416  }
1417 
1418  // We need 1 stack entry for the return address and enough entries for the
1419  // StackFrame::INTERNAL (FP, context, frame type, code object and constant
1420  // pool (if FLAG_enable_ool_constant_pool)- see MacroAssembler::EnterFrame).
1421  // For a setter stub frame we need one additional entry for the implicit
1422  // return value, see StoreStubCompiler::CompileStoreViaSetter.
1423  unsigned fixed_frame_entries =
1425  (is_setter_stub_frame ? 1 : 0);
1426  unsigned fixed_frame_size = fixed_frame_entries * kPointerSize;
1427  unsigned output_frame_size = height_in_bytes + fixed_frame_size;
1428 
1429  // Allocate and store the output frame description.
1430  FrameDescription* output_frame =
1431  new(output_frame_size) FrameDescription(output_frame_size, accessor);
1432  output_frame->SetFrameType(StackFrame::INTERNAL);
1433 
1434  // A frame for an accessor stub can not be the topmost or bottommost one.
1435  ASSERT(frame_index > 0 && frame_index < output_count_ - 1);
1436  ASSERT(output_[frame_index] == NULL);
1437  output_[frame_index] = output_frame;
1438 
1439  // The top address of the frame is computed from the previous frame's top and
1440  // this frame's size.
1441  intptr_t top_address = output_[frame_index - 1]->GetTop() - output_frame_size;
1442  output_frame->SetTop(top_address);
1443 
1444  unsigned output_offset = output_frame_size;
1445 
1446  // Read caller's PC from the previous frame.
1447  output_offset -= kPCOnStackSize;
1448  intptr_t callers_pc = output_[frame_index - 1]->GetPc();
1449  output_frame->SetCallerPc(output_offset, callers_pc);
1450  if (trace_scope_ != NULL) {
1451  PrintF(trace_scope_->file(),
1452  " 0x%08" V8PRIxPTR ": [top + %u] <- 0x%08" V8PRIxPTR
1453  " ; caller's pc\n",
1454  top_address + output_offset, output_offset, callers_pc);
1455  }
1456 
1457  // Read caller's FP from the previous frame, and set this frame's FP.
1458  output_offset -= kFPOnStackSize;
1459  intptr_t value = output_[frame_index - 1]->GetFp();
1460  output_frame->SetCallerFp(output_offset, value);
1461  intptr_t fp_value = top_address + output_offset;
1462  output_frame->SetFp(fp_value);
1463  if (trace_scope_ != NULL) {
1464  PrintF(trace_scope_->file(),
1465  " 0x%08" V8PRIxPTR ": [top + %u] <- 0x%08" V8PRIxPTR
1466  " ; caller's fp\n",
1467  fp_value, output_offset, value);
1468  }
1469 
1470  if (FLAG_enable_ool_constant_pool) {
1471  // Read the caller's constant pool from the previous frame.
1472  output_offset -= kPointerSize;
1473  value = output_[frame_index - 1]->GetConstantPool();
1474  output_frame->SetCallerConstantPool(output_offset, value);
1475  if (trace_scope_) {
1476  PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
1477  V8PRIxPTR " ; caller's constant pool\n",
1478  top_address + output_offset, output_offset, value);
1479  }
1480  }
1481 
1482  // The context can be gotten from the previous frame.
1483  output_offset -= kPointerSize;
1484  value = output_[frame_index - 1]->GetContext();
1485  output_frame->SetFrameSlot(output_offset, value);
1486  if (trace_scope_ != NULL) {
1487  PrintF(trace_scope_->file(),
1488  " 0x%08" V8PRIxPTR ": [top + %u] <- 0x%08" V8PRIxPTR
1489  " ; context\n",
1490  top_address + output_offset, output_offset, value);
1491  }
1492 
1493  // A marker value is used in place of the function.
1494  output_offset -= kPointerSize;
1495  value = reinterpret_cast<intptr_t>(Smi::FromInt(StackFrame::INTERNAL));
1496  output_frame->SetFrameSlot(output_offset, value);
1497  if (trace_scope_ != NULL) {
1498  PrintF(trace_scope_->file(),
1499  " 0x%08" V8PRIxPTR ": [top + %u] <- 0x%08" V8PRIxPTR
1500  " ; function (%s sentinel)\n",
1501  top_address + output_offset, output_offset, value, kind);
1502  }
1503 
1504  // Get Code object from accessor stub.
1505  output_offset -= kPointerSize;
1506  Builtins::Name name = is_setter_stub_frame ?
1507  Builtins::kStoreIC_Setter_ForDeopt :
1508  Builtins::kLoadIC_Getter_ForDeopt;
1509  Code* accessor_stub = isolate_->builtins()->builtin(name);
1510  value = reinterpret_cast<intptr_t>(accessor_stub);
1511  output_frame->SetFrameSlot(output_offset, value);
1512  if (trace_scope_ != NULL) {
1513  PrintF(trace_scope_->file(),
1514  " 0x%08" V8PRIxPTR ": [top + %u] <- 0x%08" V8PRIxPTR
1515  " ; code object\n",
1516  top_address + output_offset, output_offset, value);
1517  }
1518 
1519  // Skip receiver.
1520  Translation::Opcode opcode =
1521  static_cast<Translation::Opcode>(iterator->Next());
1522  iterator->Skip(Translation::NumberOfOperandsFor(opcode));
1523 
1524  if (is_setter_stub_frame) {
1525  // The implicit return value was part of the artificial setter stub
1526  // environment.
1527  output_offset -= kPointerSize;
1528  DoTranslateCommand(iterator, frame_index, output_offset);
1529  }
1530 
1531  ASSERT(0 == output_offset);
1532 
1533  Smi* offset = is_setter_stub_frame ?
1534  isolate_->heap()->setter_stub_deopt_pc_offset() :
1535  isolate_->heap()->getter_stub_deopt_pc_offset();
1536  intptr_t pc = reinterpret_cast<intptr_t>(
1537  accessor_stub->instruction_start() + offset->value());
1538  output_frame->SetPc(pc);
1539  if (FLAG_enable_ool_constant_pool) {
1540  intptr_t constant_pool_value =
1541  reinterpret_cast<intptr_t>(accessor_stub->constant_pool());
1542  output_frame->SetConstantPool(constant_pool_value);
1543  }
1544 }
1545 
1546 
1547 void Deoptimizer::DoComputeCompiledStubFrame(TranslationIterator* iterator,
1548  int frame_index) {
1549  //
1550  // FROM TO
1551  // | .... | | .... |
1552  // +-------------------------+ +-------------------------+
1553  // | JSFunction continuation | | JSFunction continuation |
1554  // +-------------------------+ +-------------------------+
1555  // | | saved frame (FP) | | saved frame (FP) |
1556  // | +=========================+<-fpreg +=========================+<-fpreg
1557  // | |constant pool (if ool_cp)| |constant pool (if ool_cp)|
1558  // | +-------------------------+ +-------------------------|
1559  // | | JSFunction context | | JSFunction context |
1560  // v +-------------------------+ +-------------------------|
1561  // | COMPILED_STUB marker | | STUB_FAILURE marker |
1562  // +-------------------------+ +-------------------------+
1563  // | | | caller args.arguments_ |
1564  // | ... | +-------------------------+
1565  // | | | caller args.length_ |
1566  // |-------------------------|<-spreg +-------------------------+
1567  // | caller args pointer |
1568  // +-------------------------+
1569  // | caller stack param 1 |
1570  // parameters in registers +-------------------------+
1571  // and spilled to stack | .... |
1572  // +-------------------------+
1573  // | caller stack param n |
1574  // +-------------------------+<-spreg
1575  // reg = number of parameters
1576  // reg = failure handler address
1577  // reg = saved frame
1578  // reg = JSFunction context
1579  //
1580 
1581  ASSERT(compiled_code_->is_crankshafted() &&
1582  compiled_code_->kind() != Code::OPTIMIZED_FUNCTION);
1583  int major_key = compiled_code_->major_key();
1584  CodeStubInterfaceDescriptor* descriptor =
1585  isolate_->code_stub_interface_descriptor(major_key);
1586 
1587  // The output frame must have room for all pushed register parameters
1588  // and the standard stack frame slots. Include space for an argument
1589  // object to the callee and optionally the space to pass the argument
1590  // object to the stub failure handler.
1591  ASSERT(descriptor->register_param_count_ >= 0);
1592  int height_in_bytes = kPointerSize * descriptor->register_param_count_ +
1593  sizeof(Arguments) + kPointerSize;
1594  int fixed_frame_size = StandardFrameConstants::kFixedFrameSize;
1595  int input_frame_size = input_->GetFrameSize();
1596  int output_frame_size = height_in_bytes + fixed_frame_size;
1597  if (trace_scope_ != NULL) {
1598  PrintF(trace_scope_->file(),
1599  " translating %s => StubFailureTrampolineStub, height=%d\n",
1600  CodeStub::MajorName(static_cast<CodeStub::Major>(major_key), false),
1601  height_in_bytes);
1602  }
1603 
1604  // The stub failure trampoline is a single frame.
1605  FrameDescription* output_frame =
1606  new(output_frame_size) FrameDescription(output_frame_size, NULL);
1607  output_frame->SetFrameType(StackFrame::STUB_FAILURE_TRAMPOLINE);
1608  ASSERT(frame_index == 0);
1609  output_[frame_index] = output_frame;
1610 
1611  // The top address for the output frame can be computed from the input
1612  // frame pointer and the output frame's height. Subtract space for the
1613  // context and function slots.
1614  Register fp_reg = StubFailureTrampolineFrame::fp_register();
1615  intptr_t top_address = input_->GetRegister(fp_reg.code()) -
1617  output_frame->SetTop(top_address);
1618 
1619  // Read caller's PC (JSFunction continuation) from the input frame.
1620  unsigned input_frame_offset = input_frame_size - kPCOnStackSize;
1621  unsigned output_frame_offset = output_frame_size - kFPOnStackSize;
1622  intptr_t value = input_->GetFrameSlot(input_frame_offset);
1623  output_frame->SetCallerPc(output_frame_offset, value);
1624  if (trace_scope_ != NULL) {
1625  PrintF(trace_scope_->file(),
1626  " 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
1627  V8PRIxPTR " ; caller's pc\n",
1628  top_address + output_frame_offset, output_frame_offset, value);
1629  }
1630 
1631  // Read caller's FP from the input frame, and set this frame's FP.
1632  input_frame_offset -= kFPOnStackSize;
1633  value = input_->GetFrameSlot(input_frame_offset);
1634  output_frame_offset -= kFPOnStackSize;
1635  output_frame->SetCallerFp(output_frame_offset, value);
1636  intptr_t frame_ptr = input_->GetRegister(fp_reg.code());
1637  output_frame->SetRegister(fp_reg.code(), frame_ptr);
1638  output_frame->SetFp(frame_ptr);
1639  if (trace_scope_ != NULL) {
1640  PrintF(trace_scope_->file(),
1641  " 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
1642  V8PRIxPTR " ; caller's fp\n",
1643  top_address + output_frame_offset, output_frame_offset, value);
1644  }
1645 
1646  if (FLAG_enable_ool_constant_pool) {
1647  // Read the caller's constant pool from the input frame.
1648  input_frame_offset -= kPointerSize;
1649  value = input_->GetFrameSlot(input_frame_offset);
1650  output_frame_offset -= kPointerSize;
1651  output_frame->SetCallerConstantPool(output_frame_offset, value);
1652  if (trace_scope_) {
1653  PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
1654  V8PRIxPTR " ; caller's constant_pool\n",
1655  top_address + output_frame_offset, output_frame_offset, value);
1656  }
1657  }
1658 
1659  // The context can be gotten from the input frame.
1660  Register context_reg = StubFailureTrampolineFrame::context_register();
1661  input_frame_offset -= kPointerSize;
1662  value = input_->GetFrameSlot(input_frame_offset);
1663  output_frame->SetRegister(context_reg.code(), value);
1664  output_frame_offset -= kPointerSize;
1665  output_frame->SetFrameSlot(output_frame_offset, value);
1666  ASSERT(reinterpret_cast<Object*>(value)->IsContext());
1667  if (trace_scope_ != NULL) {
1668  PrintF(trace_scope_->file(),
1669  " 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
1670  V8PRIxPTR " ; context\n",
1671  top_address + output_frame_offset, output_frame_offset, value);
1672  }
1673 
1674  // A marker value is used in place of the function.
1675  output_frame_offset -= kPointerSize;
1676  value = reinterpret_cast<intptr_t>(
1677  Smi::FromInt(StackFrame::STUB_FAILURE_TRAMPOLINE));
1678  output_frame->SetFrameSlot(output_frame_offset, value);
1679  if (trace_scope_ != NULL) {
1680  PrintF(trace_scope_->file(),
1681  " 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
1682  V8PRIxPTR " ; function (stub failure sentinel)\n",
1683  top_address + output_frame_offset, output_frame_offset, value);
1684  }
1685 
1686  intptr_t caller_arg_count = 0;
1687  bool arg_count_known = !descriptor->stack_parameter_count_.is_valid();
1688 
1689  // Build the Arguments object for the caller's parameters and a pointer to it.
1690  output_frame_offset -= kPointerSize;
1691  int args_arguments_offset = output_frame_offset;
1692  intptr_t the_hole = reinterpret_cast<intptr_t>(
1693  isolate_->heap()->the_hole_value());
1694  if (arg_count_known) {
1695  value = frame_ptr + StandardFrameConstants::kCallerSPOffset +
1696  (caller_arg_count - 1) * kPointerSize;
1697  } else {
1698  value = the_hole;
1699  }
1700 
1701  output_frame->SetFrameSlot(args_arguments_offset, value);
1702  if (trace_scope_ != NULL) {
1703  PrintF(trace_scope_->file(),
1704  " 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
1705  V8PRIxPTR " ; args.arguments %s\n",
1706  top_address + args_arguments_offset, args_arguments_offset, value,
1707  arg_count_known ? "" : "(the hole)");
1708  }
1709 
1710  output_frame_offset -= kPointerSize;
1711  int length_frame_offset = output_frame_offset;
1712  value = arg_count_known ? caller_arg_count : the_hole;
1713  output_frame->SetFrameSlot(length_frame_offset, value);
1714  if (trace_scope_ != NULL) {
1715  PrintF(trace_scope_->file(),
1716  " 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
1717  V8PRIxPTR " ; args.length %s\n",
1718  top_address + length_frame_offset, length_frame_offset, value,
1719  arg_count_known ? "" : "(the hole)");
1720  }
1721 
1722  output_frame_offset -= kPointerSize;
1723  value = frame_ptr + StandardFrameConstants::kCallerSPOffset -
1724  (output_frame_size - output_frame_offset) + kPointerSize;
1725  output_frame->SetFrameSlot(output_frame_offset, value);
1726  if (trace_scope_ != NULL) {
1727  PrintF(trace_scope_->file(),
1728  " 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
1729  V8PRIxPTR " ; args*\n",
1730  top_address + output_frame_offset, output_frame_offset, value);
1731  }
1732 
1733  // Copy the register parameters to the failure frame.
1734  int arguments_length_offset = -1;
1735  for (int i = 0; i < descriptor->register_param_count_; ++i) {
1736  output_frame_offset -= kPointerSize;
1737  DoTranslateCommand(iterator, 0, output_frame_offset);
1738 
1739  if (!arg_count_known && descriptor->IsParameterCountRegister(i)) {
1740  arguments_length_offset = output_frame_offset;
1741  }
1742  }
1743 
1744  ASSERT(0 == output_frame_offset);
1745 
1746  if (!arg_count_known) {
1747  ASSERT(arguments_length_offset >= 0);
1748  // We know it's a smi because 1) the code stub guarantees the stack
1749  // parameter count is in smi range, and 2) the DoTranslateCommand in the
1750  // parameter loop above translated that to a tagged value.
1751  Smi* smi_caller_arg_count = reinterpret_cast<Smi*>(
1752  output_frame->GetFrameSlot(arguments_length_offset));
1753  caller_arg_count = smi_caller_arg_count->value();
1754  output_frame->SetFrameSlot(length_frame_offset, caller_arg_count);
1755  if (trace_scope_ != NULL) {
1756  PrintF(trace_scope_->file(),
1757  " 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
1758  V8PRIxPTR " ; args.length\n",
1759  top_address + length_frame_offset, length_frame_offset,
1760  caller_arg_count);
1761  }
1762  value = frame_ptr + StandardFrameConstants::kCallerSPOffset +
1763  (caller_arg_count - 1) * kPointerSize;
1764  output_frame->SetFrameSlot(args_arguments_offset, value);
1765  if (trace_scope_ != NULL) {
1766  PrintF(trace_scope_->file(),
1767  " 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
1768  V8PRIxPTR " ; args.arguments\n",
1769  top_address + args_arguments_offset, args_arguments_offset,
1770  value);
1771  }
1772  }
1773 
1774  // Copy the double registers from the input into the output frame.
1775  CopyDoubleRegisters(output_frame);
1776 
1777  // Fill registers containing handler and number of parameters.
1778  SetPlatformCompiledStubRegisters(output_frame, descriptor);
1779 
1780  // Compute this frame's PC, state, and continuation.
1781  Code* trampoline = NULL;
1782  StubFunctionMode function_mode = descriptor->function_mode_;
1783  StubFailureTrampolineStub(function_mode).FindCodeInCache(&trampoline,
1784  isolate_);
1785  ASSERT(trampoline != NULL);
1786  output_frame->SetPc(reinterpret_cast<intptr_t>(
1787  trampoline->instruction_start()));
1788  if (FLAG_enable_ool_constant_pool) {
1789  Register constant_pool_reg =
1791  intptr_t constant_pool_value =
1792  reinterpret_cast<intptr_t>(trampoline->constant_pool());
1793  output_frame->SetConstantPool(constant_pool_value);
1794  output_frame->SetRegister(constant_pool_reg.code(), constant_pool_value);
1795  }
1796  output_frame->SetState(Smi::FromInt(FullCodeGenerator::NO_REGISTERS));
1797  Code* notify_failure = NotifyStubFailureBuiltin();
1798  output_frame->SetContinuation(
1799  reinterpret_cast<intptr_t>(notify_failure->entry()));
1800 }
1801 
1802 
1803 Handle<Object> Deoptimizer::MaterializeNextHeapObject() {
1804  int object_index = materialization_object_index_++;
1805  ObjectMaterializationDescriptor desc = deferred_objects_[object_index];
1806  const int length = desc.object_length();
1807 
1808  if (desc.duplicate_object() >= 0) {
1809  // Found a previously materialized object by de-duplication.
1810  object_index = desc.duplicate_object();
1811  materialized_objects_->Add(Handle<Object>());
1812  } else if (desc.is_arguments() && ArgumentsObjectIsAdapted(object_index)) {
1813  // Use the arguments adapter frame we just built to materialize the
1814  // arguments object. FunctionGetArguments can't throw an exception.
1815  Handle<JSFunction> function = ArgumentsObjectFunction(object_index);
1816  Handle<JSObject> arguments = Handle<JSObject>::cast(
1818  materialized_objects_->Add(arguments);
1819  // To keep consistent object counters, we still materialize the
1820  // nested values (but we throw them away).
1821  for (int i = 0; i < length; ++i) {
1822  MaterializeNextValue();
1823  }
1824  } else if (desc.is_arguments()) {
1825  // Construct an arguments object and copy the parameters to a newly
1826  // allocated arguments object backing store.
1827  Handle<JSFunction> function = ArgumentsObjectFunction(object_index);
1828  Handle<JSObject> arguments =
1829  isolate_->factory()->NewArgumentsObject(function, length);
1830  Handle<FixedArray> array = isolate_->factory()->NewFixedArray(length);
1831  ASSERT(array->length() == length);
1832  arguments->set_elements(*array);
1833  materialized_objects_->Add(arguments);
1834  for (int i = 0; i < length; ++i) {
1835  Handle<Object> value = MaterializeNextValue();
1836  array->set(i, *value);
1837  }
1838  } else {
1839  // Dispatch on the instance type of the object to be materialized.
1840  // We also need to make sure that the representation of all fields
1841  // in the given object are general enough to hold a tagged value.
1843  Handle<Map>::cast(MaterializeNextValue()), Representation::Tagged());
1844  switch (map->instance_type()) {
1845  case HEAP_NUMBER_TYPE: {
1846  // Reuse the HeapNumber value directly as it is already properly
1847  // tagged and skip materializing the HeapNumber explicitly.
1848  Handle<Object> object = MaterializeNextValue();
1849  if (object_index < prev_materialized_count_) {
1850  materialized_objects_->Add(Handle<Object>(
1851  previously_materialized_objects_->get(object_index), isolate_));
1852  } else {
1853  materialized_objects_->Add(object);
1854  }
1855  materialization_value_index_ += kDoubleSize / kPointerSize - 1;
1856  break;
1857  }
1858  case JS_OBJECT_TYPE: {
1859  Handle<JSObject> object =
1860  isolate_->factory()->NewJSObjectFromMap(map, NOT_TENURED, false);
1861  if (object_index < prev_materialized_count_) {
1862  materialized_objects_->Add(Handle<Object>(
1863  previously_materialized_objects_->get(object_index), isolate_));
1864  } else {
1865  materialized_objects_->Add(object);
1866  }
1867  Handle<Object> properties = MaterializeNextValue();
1868  Handle<Object> elements = MaterializeNextValue();
1869  object->set_properties(FixedArray::cast(*properties));
1870  object->set_elements(FixedArrayBase::cast(*elements));
1871  for (int i = 0; i < length - 3; ++i) {
1872  Handle<Object> value = MaterializeNextValue();
1873  object->FastPropertyAtPut(i, *value);
1874  }
1875  break;
1876  }
1877  case JS_ARRAY_TYPE: {
1878  Handle<JSArray> object =
1879  isolate_->factory()->NewJSArray(0, map->elements_kind());
1880  if (object_index < prev_materialized_count_) {
1881  materialized_objects_->Add(Handle<Object>(
1882  previously_materialized_objects_->get(object_index), isolate_));
1883  } else {
1884  materialized_objects_->Add(object);
1885  }
1886  Handle<Object> properties = MaterializeNextValue();
1887  Handle<Object> elements = MaterializeNextValue();
1888  Handle<Object> length = MaterializeNextValue();
1889  object->set_properties(FixedArray::cast(*properties));
1890  object->set_elements(FixedArrayBase::cast(*elements));
1891  object->set_length(*length);
1892  break;
1893  }
1894  default:
1895  PrintF(stderr,
1896  "[couldn't handle instance type %d]\n", map->instance_type());
1897  UNREACHABLE();
1898  }
1899  }
1900 
1901  return materialized_objects_->at(object_index);
1902 }
1903 
1904 
1905 Handle<Object> Deoptimizer::MaterializeNextValue() {
1906  int value_index = materialization_value_index_++;
1907  Handle<Object> value = materialized_values_->at(value_index);
1908  if (*value == isolate_->heap()->arguments_marker()) {
1909  value = MaterializeNextHeapObject();
1910  }
1911  return value;
1912 }
1913 
1914 
1915 void Deoptimizer::MaterializeHeapObjects(JavaScriptFrameIterator* it) {
1916  ASSERT_NE(DEBUGGER, bailout_type_);
1917 
1918  MaterializedObjectStore* materialized_store =
1919  isolate_->materialized_object_store();
1920  previously_materialized_objects_ = materialized_store->Get(stack_fp_);
1921  prev_materialized_count_ = previously_materialized_objects_.is_null() ?
1922  0 : previously_materialized_objects_->length();
1923 
1924  // Walk all JavaScript output frames with the given frame iterator.
1925  for (int frame_index = 0; frame_index < jsframe_count(); ++frame_index) {
1926  if (frame_index != 0) it->Advance();
1927  JavaScriptFrame* frame = it->frame();
1928  jsframe_functions_.Add(handle(frame->function(), isolate_));
1929  jsframe_has_adapted_arguments_.Add(frame->has_adapted_arguments());
1930  }
1931 
1932  // Handlify all tagged object values before triggering any allocation.
1933  List<Handle<Object> > values(deferred_objects_tagged_values_.length());
1934  for (int i = 0; i < deferred_objects_tagged_values_.length(); ++i) {
1935  values.Add(Handle<Object>(deferred_objects_tagged_values_[i], isolate_));
1936  }
1937 
1938  // Play it safe and clear all unhandlified values before we continue.
1939  deferred_objects_tagged_values_.Clear();
1940 
1941  // Materialize all heap numbers before looking at arguments because when the
1942  // output frames are used to materialize arguments objects later on they need
1943  // to already contain valid heap numbers.
1944  for (int i = 0; i < deferred_heap_numbers_.length(); i++) {
1945  HeapNumberMaterializationDescriptor<Address> d = deferred_heap_numbers_[i];
1946  Handle<Object> num = isolate_->factory()->NewNumber(d.value());
1947  if (trace_scope_ != NULL) {
1948  PrintF(trace_scope_->file(),
1949  "Materialized a new heap number %p [%e] in slot %p\n",
1950  reinterpret_cast<void*>(*num),
1951  d.value(),
1952  d.destination());
1953  }
1954  Memory::Object_at(d.destination()) = *num;
1955  }
1956 
1957  // Materialize all heap numbers required for arguments/captured objects.
1958  for (int i = 0; i < deferred_objects_double_values_.length(); i++) {
1959  HeapNumberMaterializationDescriptor<int> d =
1960  deferred_objects_double_values_[i];
1961  Handle<Object> num = isolate_->factory()->NewNumber(d.value());
1962  if (trace_scope_ != NULL) {
1963  PrintF(trace_scope_->file(),
1964  "Materialized a new heap number %p [%e] for object at %d\n",
1965  reinterpret_cast<void*>(*num),
1966  d.value(),
1967  d.destination());
1968  }
1969  ASSERT(values.at(d.destination())->IsTheHole());
1970  values.Set(d.destination(), num);
1971  }
1972 
1973  // Play it safe and clear all object double values before we continue.
1974  deferred_objects_double_values_.Clear();
1975 
1976  // Materialize arguments/captured objects.
1977  if (!deferred_objects_.is_empty()) {
1978  List<Handle<Object> > materialized_objects(deferred_objects_.length());
1979  materialized_objects_ = &materialized_objects;
1980  materialized_values_ = &values;
1981 
1982  while (materialization_object_index_ < deferred_objects_.length()) {
1983  int object_index = materialization_object_index_;
1984  ObjectMaterializationDescriptor descriptor =
1985  deferred_objects_.at(object_index);
1986 
1987  // Find a previously materialized object by de-duplication or
1988  // materialize a new instance of the object if necessary. Store
1989  // the materialized object into the frame slot.
1990  Handle<Object> object = MaterializeNextHeapObject();
1991  Memory::Object_at(descriptor.slot_address()) = *object;
1992  if (trace_scope_ != NULL) {
1993  if (descriptor.is_arguments()) {
1994  PrintF(trace_scope_->file(),
1995  "Materialized %sarguments object of length %d for %p: ",
1996  ArgumentsObjectIsAdapted(object_index) ? "(adapted) " : "",
1997  Handle<JSObject>::cast(object)->elements()->length(),
1998  reinterpret_cast<void*>(descriptor.slot_address()));
1999  } else {
2000  PrintF(trace_scope_->file(),
2001  "Materialized captured object of size %d for %p: ",
2002  Handle<HeapObject>::cast(object)->Size(),
2003  reinterpret_cast<void*>(descriptor.slot_address()));
2004  }
2005  object->ShortPrint(trace_scope_->file());
2006  PrintF(trace_scope_->file(), "\n");
2007  }
2008  }
2009 
2010  ASSERT(materialization_object_index_ == materialized_objects_->length());
2011  ASSERT(materialization_value_index_ == materialized_values_->length());
2012  }
2013 
2014  if (prev_materialized_count_ > 0) {
2015  materialized_store->Remove(stack_fp_);
2016  }
2017 }
2018 
2019 
2020 #ifdef ENABLE_DEBUGGER_SUPPORT
2021 void Deoptimizer::MaterializeHeapNumbersForDebuggerInspectableFrame(
2022  Address parameters_top,
2023  uint32_t parameters_size,
2024  Address expressions_top,
2025  uint32_t expressions_size,
2026  DeoptimizedFrameInfo* info) {
2027  ASSERT_EQ(DEBUGGER, bailout_type_);
2028  Address parameters_bottom = parameters_top + parameters_size;
2029  Address expressions_bottom = expressions_top + expressions_size;
2030  for (int i = 0; i < deferred_heap_numbers_.length(); i++) {
2031  HeapNumberMaterializationDescriptor<Address> d = deferred_heap_numbers_[i];
2032 
2033  // Check of the heap number to materialize actually belong to the frame
2034  // being extracted.
2035  Address slot = d.destination();
2036  if (parameters_top <= slot && slot < parameters_bottom) {
2037  Handle<Object> num = isolate_->factory()->NewNumber(d.value());
2038 
2039  int index = (info->parameters_count() - 1) -
2040  static_cast<int>(slot - parameters_top) / kPointerSize;
2041 
2042  if (trace_scope_ != NULL) {
2043  PrintF(trace_scope_->file(),
2044  "Materializing a new heap number %p [%e] in slot %p"
2045  "for parameter slot #%d\n",
2046  reinterpret_cast<void*>(*num),
2047  d.value(),
2048  d.destination(),
2049  index);
2050  }
2051 
2052  info->SetParameter(index, *num);
2053  } else if (expressions_top <= slot && slot < expressions_bottom) {
2054  Handle<Object> num = isolate_->factory()->NewNumber(d.value());
2055 
2056  int index = info->expression_count() - 1 -
2057  static_cast<int>(slot - expressions_top) / kPointerSize;
2058 
2059  if (trace_scope_ != NULL) {
2060  PrintF(trace_scope_->file(),
2061  "Materializing a new heap number %p [%e] in slot %p"
2062  "for expression slot #%d\n",
2063  reinterpret_cast<void*>(*num),
2064  d.value(),
2065  d.destination(),
2066  index);
2067  }
2068 
2069  info->SetExpression(index, *num);
2070  }
2071  }
2072 }
2073 #endif
2074 
2075 
2076 static const char* TraceValueType(bool is_smi) {
2077  if (is_smi) {
2078  return "smi";
2079  }
2080 
2081  return "heap number";
2082 }
2083 
2084 
2085 void Deoptimizer::DoTranslateObject(TranslationIterator* iterator,
2086  int object_index,
2087  int field_index) {
2088  disasm::NameConverter converter;
2089  Address object_slot = deferred_objects_[object_index].slot_address();
2090 
2091  Translation::Opcode opcode =
2092  static_cast<Translation::Opcode>(iterator->Next());
2093 
2094  switch (opcode) {
2095  case Translation::BEGIN:
2096  case Translation::JS_FRAME:
2097  case Translation::ARGUMENTS_ADAPTOR_FRAME:
2098  case Translation::CONSTRUCT_STUB_FRAME:
2099  case Translation::GETTER_STUB_FRAME:
2100  case Translation::SETTER_STUB_FRAME:
2101  case Translation::COMPILED_STUB_FRAME:
2102  UNREACHABLE();
2103  return;
2104 
2105  case Translation::REGISTER: {
2106  int input_reg = iterator->Next();
2107  intptr_t input_value = input_->GetRegister(input_reg);
2108  if (trace_scope_ != NULL) {
2109  PrintF(trace_scope_->file(),
2110  " object @0x%08" V8PRIxPTR ": [field #%d] <- ",
2111  reinterpret_cast<intptr_t>(object_slot),
2112  field_index);
2113  PrintF(trace_scope_->file(),
2114  "0x%08" V8PRIxPTR " ; %s ", input_value,
2115  converter.NameOfCPURegister(input_reg));
2116  reinterpret_cast<Object*>(input_value)->ShortPrint(
2117  trace_scope_->file());
2118  PrintF(trace_scope_->file(),
2119  "\n");
2120  }
2121  AddObjectTaggedValue(input_value);
2122  return;
2123  }
2124 
2125  case Translation::INT32_REGISTER: {
2126  int input_reg = iterator->Next();
2127  intptr_t value = input_->GetRegister(input_reg);
2128  bool is_smi = Smi::IsValid(value);
2129  if (trace_scope_ != NULL) {
2130  PrintF(trace_scope_->file(),
2131  " object @0x%08" V8PRIxPTR ": [field #%d] <- ",
2132  reinterpret_cast<intptr_t>(object_slot),
2133  field_index);
2134  PrintF(trace_scope_->file(),
2135  "%" V8PRIdPTR " ; %s (%s)\n", value,
2136  converter.NameOfCPURegister(input_reg),
2137  TraceValueType(is_smi));
2138  }
2139  if (is_smi) {
2140  intptr_t tagged_value =
2141  reinterpret_cast<intptr_t>(Smi::FromInt(static_cast<int>(value)));
2142  AddObjectTaggedValue(tagged_value);
2143  } else {
2144  double double_value = static_cast<double>(static_cast<int32_t>(value));
2145  AddObjectDoubleValue(double_value);
2146  }
2147  return;
2148  }
2149 
2150  case Translation::UINT32_REGISTER: {
2151  int input_reg = iterator->Next();
2152  uintptr_t value = static_cast<uintptr_t>(input_->GetRegister(input_reg));
2153  bool is_smi = (value <= static_cast<uintptr_t>(Smi::kMaxValue));
2154  if (trace_scope_ != NULL) {
2155  PrintF(trace_scope_->file(),
2156  " object @0x%08" V8PRIxPTR ": [field #%d] <- ",
2157  reinterpret_cast<intptr_t>(object_slot),
2158  field_index);
2159  PrintF(trace_scope_->file(),
2160  "%" V8PRIdPTR " ; uint %s (%s)\n", value,
2161  converter.NameOfCPURegister(input_reg),
2162  TraceValueType(is_smi));
2163  }
2164  if (is_smi) {
2165  intptr_t tagged_value =
2166  reinterpret_cast<intptr_t>(Smi::FromInt(static_cast<int>(value)));
2167  AddObjectTaggedValue(tagged_value);
2168  } else {
2169  double double_value = static_cast<double>(static_cast<uint32_t>(value));
2170  AddObjectDoubleValue(double_value);
2171  }
2172  return;
2173  }
2174 
2175  case Translation::DOUBLE_REGISTER: {
2176  int input_reg = iterator->Next();
2177  double value = input_->GetDoubleRegister(input_reg);
2178  if (trace_scope_ != NULL) {
2179  PrintF(trace_scope_->file(),
2180  " object @0x%08" V8PRIxPTR ": [field #%d] <- ",
2181  reinterpret_cast<intptr_t>(object_slot),
2182  field_index);
2183  PrintF(trace_scope_->file(),
2184  "%e ; %s\n", value,
2186  }
2187  AddObjectDoubleValue(value);
2188  return;
2189  }
2190 
2191  case Translation::STACK_SLOT: {
2192  int input_slot_index = iterator->Next();
2193  unsigned input_offset = input_->GetOffsetFromSlotIndex(input_slot_index);
2194  intptr_t input_value = input_->GetFrameSlot(input_offset);
2195  if (trace_scope_ != NULL) {
2196  PrintF(trace_scope_->file(),
2197  " object @0x%08" V8PRIxPTR ": [field #%d] <- ",
2198  reinterpret_cast<intptr_t>(object_slot),
2199  field_index);
2200  PrintF(trace_scope_->file(),
2201  "0x%08" V8PRIxPTR " ; [sp + %d] ", input_value, input_offset);
2202  reinterpret_cast<Object*>(input_value)->ShortPrint(
2203  trace_scope_->file());
2204  PrintF(trace_scope_->file(),
2205  "\n");
2206  }
2207  AddObjectTaggedValue(input_value);
2208  return;
2209  }
2210 
2211  case Translation::INT32_STACK_SLOT: {
2212  int input_slot_index = iterator->Next();
2213  unsigned input_offset = input_->GetOffsetFromSlotIndex(input_slot_index);
2214  intptr_t value = input_->GetFrameSlot(input_offset);
2215  bool is_smi = Smi::IsValid(value);
2216  if (trace_scope_ != NULL) {
2217  PrintF(trace_scope_->file(),
2218  " object @0x%08" V8PRIxPTR ": [field #%d] <- ",
2219  reinterpret_cast<intptr_t>(object_slot),
2220  field_index);
2221  PrintF(trace_scope_->file(),
2222  "%" V8PRIdPTR " ; [sp + %d] (%s)\n",
2223  value, input_offset, TraceValueType(is_smi));
2224  }
2225  if (is_smi) {
2226  intptr_t tagged_value =
2227  reinterpret_cast<intptr_t>(Smi::FromInt(static_cast<int>(value)));
2228  AddObjectTaggedValue(tagged_value);
2229  } else {
2230  double double_value = static_cast<double>(static_cast<int32_t>(value));
2231  AddObjectDoubleValue(double_value);
2232  }
2233  return;
2234  }
2235 
2236  case Translation::UINT32_STACK_SLOT: {
2237  int input_slot_index = iterator->Next();
2238  unsigned input_offset = input_->GetOffsetFromSlotIndex(input_slot_index);
2239  uintptr_t value =
2240  static_cast<uintptr_t>(input_->GetFrameSlot(input_offset));
2241  bool is_smi = (value <= static_cast<uintptr_t>(Smi::kMaxValue));
2242  if (trace_scope_ != NULL) {
2243  PrintF(trace_scope_->file(),
2244  " object @0x%08" V8PRIxPTR ": [field #%d] <- ",
2245  reinterpret_cast<intptr_t>(object_slot),
2246  field_index);
2247  PrintF(trace_scope_->file(),
2248  "%" V8PRIdPTR " ; [sp + %d] (uint %s)\n",
2249  value, input_offset, TraceValueType(is_smi));
2250  }
2251  if (is_smi) {
2252  intptr_t tagged_value =
2253  reinterpret_cast<intptr_t>(Smi::FromInt(static_cast<int>(value)));
2254  AddObjectTaggedValue(tagged_value);
2255  } else {
2256  double double_value = static_cast<double>(static_cast<uint32_t>(value));
2257  AddObjectDoubleValue(double_value);
2258  }
2259  return;
2260  }
2261 
2262  case Translation::DOUBLE_STACK_SLOT: {
2263  int input_slot_index = iterator->Next();
2264  unsigned input_offset = input_->GetOffsetFromSlotIndex(input_slot_index);
2265  double value = input_->GetDoubleFrameSlot(input_offset);
2266  if (trace_scope_ != NULL) {
2267  PrintF(trace_scope_->file(),
2268  " object @0x%08" V8PRIxPTR ": [field #%d] <- ",
2269  reinterpret_cast<intptr_t>(object_slot),
2270  field_index);
2271  PrintF(trace_scope_->file(),
2272  "%e ; [sp + %d]\n", value, input_offset);
2273  }
2274  AddObjectDoubleValue(value);
2275  return;
2276  }
2277 
2278  case Translation::LITERAL: {
2279  Object* literal = ComputeLiteral(iterator->Next());
2280  if (trace_scope_ != NULL) {
2281  PrintF(trace_scope_->file(),
2282  " object @0x%08" V8PRIxPTR ": [field #%d] <- ",
2283  reinterpret_cast<intptr_t>(object_slot),
2284  field_index);
2285  literal->ShortPrint(trace_scope_->file());
2286  PrintF(trace_scope_->file(),
2287  " ; literal\n");
2288  }
2289  intptr_t value = reinterpret_cast<intptr_t>(literal);
2290  AddObjectTaggedValue(value);
2291  return;
2292  }
2293 
2294  case Translation::DUPLICATED_OBJECT: {
2295  int object_index = iterator->Next();
2296  if (trace_scope_ != NULL) {
2297  PrintF(trace_scope_->file(),
2298  " nested @0x%08" V8PRIxPTR ": [field #%d] <- ",
2299  reinterpret_cast<intptr_t>(object_slot),
2300  field_index);
2301  isolate_->heap()->arguments_marker()->ShortPrint(trace_scope_->file());
2302  PrintF(trace_scope_->file(),
2303  " ; duplicate of object #%d\n", object_index);
2304  }
2305  // Use the materialization marker value as a sentinel and fill in
2306  // the object after the deoptimized frame is built.
2307  intptr_t value = reinterpret_cast<intptr_t>(
2308  isolate_->heap()->arguments_marker());
2309  AddObjectDuplication(0, object_index);
2310  AddObjectTaggedValue(value);
2311  return;
2312  }
2313 
2314  case Translation::ARGUMENTS_OBJECT:
2315  case Translation::CAPTURED_OBJECT: {
2316  int length = iterator->Next();
2317  bool is_args = opcode == Translation::ARGUMENTS_OBJECT;
2318  if (trace_scope_ != NULL) {
2319  PrintF(trace_scope_->file(),
2320  " nested @0x%08" V8PRIxPTR ": [field #%d] <- ",
2321  reinterpret_cast<intptr_t>(object_slot),
2322  field_index);
2323  isolate_->heap()->arguments_marker()->ShortPrint(trace_scope_->file());
2324  PrintF(trace_scope_->file(),
2325  " ; object (length = %d, is_args = %d)\n", length, is_args);
2326  }
2327  // Use the materialization marker value as a sentinel and fill in
2328  // the object after the deoptimized frame is built.
2329  intptr_t value = reinterpret_cast<intptr_t>(
2330  isolate_->heap()->arguments_marker());
2331  AddObjectStart(0, length, is_args);
2332  AddObjectTaggedValue(value);
2333  // We save the object values on the side and materialize the actual
2334  // object after the deoptimized frame is built.
2335  int object_index = deferred_objects_.length() - 1;
2336  for (int i = 0; i < length; i++) {
2337  DoTranslateObject(iterator, object_index, i);
2338  }
2339  return;
2340  }
2341  }
2342 }
2343 
2344 
2345 void Deoptimizer::DoTranslateCommand(TranslationIterator* iterator,
2346  int frame_index,
2347  unsigned output_offset) {
2348  disasm::NameConverter converter;
2349  // A GC-safe temporary placeholder that we can put in the output frame.
2350  const intptr_t kPlaceholder = reinterpret_cast<intptr_t>(Smi::FromInt(0));
2351 
2352  Translation::Opcode opcode =
2353  static_cast<Translation::Opcode>(iterator->Next());
2354 
2355  switch (opcode) {
2356  case Translation::BEGIN:
2357  case Translation::JS_FRAME:
2358  case Translation::ARGUMENTS_ADAPTOR_FRAME:
2359  case Translation::CONSTRUCT_STUB_FRAME:
2360  case Translation::GETTER_STUB_FRAME:
2361  case Translation::SETTER_STUB_FRAME:
2362  case Translation::COMPILED_STUB_FRAME:
2363  UNREACHABLE();
2364  return;
2365 
2366  case Translation::REGISTER: {
2367  int input_reg = iterator->Next();
2368  intptr_t input_value = input_->GetRegister(input_reg);
2369  if (trace_scope_ != NULL) {
2370  PrintF(
2371  trace_scope_->file(),
2372  " 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08" V8PRIxPTR " ; %s ",
2373  output_[frame_index]->GetTop() + output_offset,
2374  output_offset,
2375  input_value,
2376  converter.NameOfCPURegister(input_reg));
2377  reinterpret_cast<Object*>(input_value)->ShortPrint(
2378  trace_scope_->file());
2379  PrintF(trace_scope_->file(), "\n");
2380  }
2381  output_[frame_index]->SetFrameSlot(output_offset, input_value);
2382  return;
2383  }
2384 
2385  case Translation::INT32_REGISTER: {
2386  int input_reg = iterator->Next();
2387  intptr_t value = input_->GetRegister(input_reg);
2388  bool is_smi = Smi::IsValid(value);
2389  if (trace_scope_ != NULL) {
2390  PrintF(
2391  trace_scope_->file(),
2392  " 0x%08" V8PRIxPTR ": [top + %d] <- %" V8PRIdPTR " ; %s (%s)\n",
2393  output_[frame_index]->GetTop() + output_offset,
2394  output_offset,
2395  value,
2396  converter.NameOfCPURegister(input_reg),
2397  TraceValueType(is_smi));
2398  }
2399  if (is_smi) {
2400  intptr_t tagged_value =
2401  reinterpret_cast<intptr_t>(Smi::FromInt(static_cast<int>(value)));
2402  output_[frame_index]->SetFrameSlot(output_offset, tagged_value);
2403  } else {
2404  // We save the untagged value on the side and store a GC-safe
2405  // temporary placeholder in the frame.
2406  AddDoubleValue(output_[frame_index]->GetTop() + output_offset,
2407  static_cast<double>(static_cast<int32_t>(value)));
2408  output_[frame_index]->SetFrameSlot(output_offset, kPlaceholder);
2409  }
2410  return;
2411  }
2412 
2413  case Translation::UINT32_REGISTER: {
2414  int input_reg = iterator->Next();
2415  uintptr_t value = static_cast<uintptr_t>(input_->GetRegister(input_reg));
2416  bool is_smi = value <= static_cast<uintptr_t>(Smi::kMaxValue);
2417  if (trace_scope_ != NULL) {
2418  PrintF(
2419  trace_scope_->file(),
2420  " 0x%08" V8PRIxPTR ": [top + %d] <- %" V8PRIuPTR
2421  " ; uint %s (%s)\n",
2422  output_[frame_index]->GetTop() + output_offset,
2423  output_offset,
2424  value,
2425  converter.NameOfCPURegister(input_reg),
2426  TraceValueType(is_smi));
2427  }
2428  if (is_smi) {
2429  intptr_t tagged_value =
2430  reinterpret_cast<intptr_t>(Smi::FromInt(static_cast<int>(value)));
2431  output_[frame_index]->SetFrameSlot(output_offset, tagged_value);
2432  } else {
2433  // We save the untagged value on the side and store a GC-safe
2434  // temporary placeholder in the frame.
2435  AddDoubleValue(output_[frame_index]->GetTop() + output_offset,
2436  static_cast<double>(static_cast<uint32_t>(value)));
2437  output_[frame_index]->SetFrameSlot(output_offset, kPlaceholder);
2438  }
2439  return;
2440  }
2441 
2442  case Translation::DOUBLE_REGISTER: {
2443  int input_reg = iterator->Next();
2444  double value = input_->GetDoubleRegister(input_reg);
2445  if (trace_scope_ != NULL) {
2446  PrintF(trace_scope_->file(),
2447  " 0x%08" V8PRIxPTR ": [top + %d] <- %e ; %s\n",
2448  output_[frame_index]->GetTop() + output_offset,
2449  output_offset,
2450  value,
2452  }
2453  // We save the untagged value on the side and store a GC-safe
2454  // temporary placeholder in the frame.
2455  AddDoubleValue(output_[frame_index]->GetTop() + output_offset, value);
2456  output_[frame_index]->SetFrameSlot(output_offset, kPlaceholder);
2457  return;
2458  }
2459 
2460  case Translation::STACK_SLOT: {
2461  int input_slot_index = iterator->Next();
2462  unsigned input_offset = input_->GetOffsetFromSlotIndex(input_slot_index);
2463  intptr_t input_value = input_->GetFrameSlot(input_offset);
2464  if (trace_scope_ != NULL) {
2465  PrintF(trace_scope_->file(),
2466  " 0x%08" V8PRIxPTR ": ",
2467  output_[frame_index]->GetTop() + output_offset);
2468  PrintF(trace_scope_->file(),
2469  "[top + %d] <- 0x%08" V8PRIxPTR " ; [sp + %d] ",
2470  output_offset,
2471  input_value,
2472  input_offset);
2473  reinterpret_cast<Object*>(input_value)->ShortPrint(
2474  trace_scope_->file());
2475  PrintF(trace_scope_->file(), "\n");
2476  }
2477  output_[frame_index]->SetFrameSlot(output_offset, input_value);
2478  return;
2479  }
2480 
2481  case Translation::INT32_STACK_SLOT: {
2482  int input_slot_index = iterator->Next();
2483  unsigned input_offset = input_->GetOffsetFromSlotIndex(input_slot_index);
2484  intptr_t value = input_->GetFrameSlot(input_offset);
2485  bool is_smi = Smi::IsValid(value);
2486  if (trace_scope_ != NULL) {
2487  PrintF(trace_scope_->file(),
2488  " 0x%08" V8PRIxPTR ": ",
2489  output_[frame_index]->GetTop() + output_offset);
2490  PrintF(trace_scope_->file(),
2491  "[top + %d] <- %" V8PRIdPTR " ; [sp + %d] (%s)\n",
2492  output_offset,
2493  value,
2494  input_offset,
2495  TraceValueType(is_smi));
2496  }
2497  if (is_smi) {
2498  intptr_t tagged_value =
2499  reinterpret_cast<intptr_t>(Smi::FromInt(static_cast<int>(value)));
2500  output_[frame_index]->SetFrameSlot(output_offset, tagged_value);
2501  } else {
2502  // We save the untagged value on the side and store a GC-safe
2503  // temporary placeholder in the frame.
2504  AddDoubleValue(output_[frame_index]->GetTop() + output_offset,
2505  static_cast<double>(static_cast<int32_t>(value)));
2506  output_[frame_index]->SetFrameSlot(output_offset, kPlaceholder);
2507  }
2508  return;
2509  }
2510 
2511  case Translation::UINT32_STACK_SLOT: {
2512  int input_slot_index = iterator->Next();
2513  unsigned input_offset = input_->GetOffsetFromSlotIndex(input_slot_index);
2514  uintptr_t value =
2515  static_cast<uintptr_t>(input_->GetFrameSlot(input_offset));
2516  bool is_smi = value <= static_cast<uintptr_t>(Smi::kMaxValue);
2517  if (trace_scope_ != NULL) {
2518  PrintF(trace_scope_->file(),
2519  " 0x%08" V8PRIxPTR ": ",
2520  output_[frame_index]->GetTop() + output_offset);
2521  PrintF(trace_scope_->file(),
2522  "[top + %d] <- %" V8PRIuPTR " ; [sp + %d] (uint32 %s)\n",
2523  output_offset,
2524  value,
2525  input_offset,
2526  TraceValueType(is_smi));
2527  }
2528  if (is_smi) {
2529  intptr_t tagged_value =
2530  reinterpret_cast<intptr_t>(Smi::FromInt(static_cast<int>(value)));
2531  output_[frame_index]->SetFrameSlot(output_offset, tagged_value);
2532  } else {
2533  // We save the untagged value on the side and store a GC-safe
2534  // temporary placeholder in the frame.
2535  AddDoubleValue(output_[frame_index]->GetTop() + output_offset,
2536  static_cast<double>(static_cast<uint32_t>(value)));
2537  output_[frame_index]->SetFrameSlot(output_offset, kPlaceholder);
2538  }
2539  return;
2540  }
2541 
2542  case Translation::DOUBLE_STACK_SLOT: {
2543  int input_slot_index = iterator->Next();
2544  unsigned input_offset = input_->GetOffsetFromSlotIndex(input_slot_index);
2545  double value = input_->GetDoubleFrameSlot(input_offset);
2546  if (trace_scope_ != NULL) {
2547  PrintF(trace_scope_->file(),
2548  " 0x%08" V8PRIxPTR ": [top + %d] <- %e ; [sp + %d]\n",
2549  output_[frame_index]->GetTop() + output_offset,
2550  output_offset,
2551  value,
2552  input_offset);
2553  }
2554  // We save the untagged value on the side and store a GC-safe
2555  // temporary placeholder in the frame.
2556  AddDoubleValue(output_[frame_index]->GetTop() + output_offset, value);
2557  output_[frame_index]->SetFrameSlot(output_offset, kPlaceholder);
2558  return;
2559  }
2560 
2561  case Translation::LITERAL: {
2562  Object* literal = ComputeLiteral(iterator->Next());
2563  if (trace_scope_ != NULL) {
2564  PrintF(trace_scope_->file(),
2565  " 0x%08" V8PRIxPTR ": [top + %d] <- ",
2566  output_[frame_index]->GetTop() + output_offset,
2567  output_offset);
2568  literal->ShortPrint(trace_scope_->file());
2569  PrintF(trace_scope_->file(), " ; literal\n");
2570  }
2571  intptr_t value = reinterpret_cast<intptr_t>(literal);
2572  output_[frame_index]->SetFrameSlot(output_offset, value);
2573  return;
2574  }
2575 
2576  case Translation::DUPLICATED_OBJECT: {
2577  int object_index = iterator->Next();
2578  if (trace_scope_ != NULL) {
2579  PrintF(trace_scope_->file(),
2580  " 0x%08" V8PRIxPTR ": [top + %d] <- ",
2581  output_[frame_index]->GetTop() + output_offset,
2582  output_offset);
2583  isolate_->heap()->arguments_marker()->ShortPrint(trace_scope_->file());
2584  PrintF(trace_scope_->file(),
2585  " ; duplicate of object #%d\n", object_index);
2586  }
2587  // Use the materialization marker value as a sentinel and fill in
2588  // the object after the deoptimized frame is built.
2589  intptr_t value = reinterpret_cast<intptr_t>(
2590  isolate_->heap()->arguments_marker());
2591  AddObjectDuplication(output_[frame_index]->GetTop() + output_offset,
2592  object_index);
2593  output_[frame_index]->SetFrameSlot(output_offset, value);
2594  return;
2595  }
2596 
2597  case Translation::ARGUMENTS_OBJECT:
2598  case Translation::CAPTURED_OBJECT: {
2599  int length = iterator->Next();
2600  bool is_args = opcode == Translation::ARGUMENTS_OBJECT;
2601  if (trace_scope_ != NULL) {
2602  PrintF(trace_scope_->file(),
2603  " 0x%08" V8PRIxPTR ": [top + %d] <- ",
2604  output_[frame_index]->GetTop() + output_offset,
2605  output_offset);
2606  isolate_->heap()->arguments_marker()->ShortPrint(trace_scope_->file());
2607  PrintF(trace_scope_->file(),
2608  " ; object (length = %d, is_args = %d)\n", length, is_args);
2609  }
2610  // Use the materialization marker value as a sentinel and fill in
2611  // the object after the deoptimized frame is built.
2612  intptr_t value = reinterpret_cast<intptr_t>(
2613  isolate_->heap()->arguments_marker());
2614  AddObjectStart(output_[frame_index]->GetTop() + output_offset,
2615  length, is_args);
2616  output_[frame_index]->SetFrameSlot(output_offset, value);
2617  // We save the object values on the side and materialize the actual
2618  // object after the deoptimized frame is built.
2619  int object_index = deferred_objects_.length() - 1;
2620  for (int i = 0; i < length; i++) {
2621  DoTranslateObject(iterator, object_index, i);
2622  }
2623  return;
2624  }
2625  }
2626 }
2627 
2628 
2629 unsigned Deoptimizer::ComputeInputFrameSize() const {
2630  unsigned fixed_size = ComputeFixedSize(function_);
2631  // The fp-to-sp delta already takes the context, constant pool pointer and the
2632  // function into account so we have to avoid double counting them.
2633  unsigned result = fixed_size + fp_to_sp_delta_ -
2635 #ifdef DEBUG
2636  if (compiled_code_->kind() == Code::OPTIMIZED_FUNCTION) {
2637  unsigned stack_slots = compiled_code_->stack_slots();
2638  unsigned outgoing_size = ComputeOutgoingArgumentSize();
2639  ASSERT(result == fixed_size + (stack_slots * kPointerSize) + outgoing_size);
2640  }
2641 #endif
2642  return result;
2643 }
2644 
2645 
2646 unsigned Deoptimizer::ComputeFixedSize(JSFunction* function) const {
2647  // The fixed part of the frame consists of the return address, frame
2648  // pointer, function, context, and all the incoming arguments.
2649  return ComputeIncomingArgumentSize(function) +
2651 }
2652 
2653 
2654 unsigned Deoptimizer::ComputeIncomingArgumentSize(JSFunction* function) const {
2655  // The incoming arguments is the values for formal parameters and
2656  // the receiver. Every slot contains a pointer.
2657  if (function->IsSmi()) {
2659  return 0;
2660  }
2661  unsigned arguments = function->shared()->formal_parameter_count() + 1;
2662  return arguments * kPointerSize;
2663 }
2664 
2665 
2666 unsigned Deoptimizer::ComputeOutgoingArgumentSize() const {
2667  DeoptimizationInputData* data = DeoptimizationInputData::cast(
2668  compiled_code_->deoptimization_data());
2669  unsigned height = data->ArgumentsStackHeight(bailout_id_)->value();
2670  return height * kPointerSize;
2671 }
2672 
2673 
2674 Object* Deoptimizer::ComputeLiteral(int index) const {
2675  DeoptimizationInputData* data = DeoptimizationInputData::cast(
2676  compiled_code_->deoptimization_data());
2677  FixedArray* literals = data->LiteralArray();
2678  return literals->get(index);
2679 }
2680 
2681 
2682 void Deoptimizer::AddObjectStart(intptr_t slot, int length, bool is_args) {
2683  ObjectMaterializationDescriptor object_desc(
2684  reinterpret_cast<Address>(slot), jsframe_count_, length, -1, is_args);
2685  deferred_objects_.Add(object_desc);
2686 }
2687 
2688 
2689 void Deoptimizer::AddObjectDuplication(intptr_t slot, int object_index) {
2690  ObjectMaterializationDescriptor object_desc(
2691  reinterpret_cast<Address>(slot), jsframe_count_, -1, object_index, false);
2692  deferred_objects_.Add(object_desc);
2693 }
2694 
2695 
2696 void Deoptimizer::AddObjectTaggedValue(intptr_t value) {
2697  deferred_objects_tagged_values_.Add(reinterpret_cast<Object*>(value));
2698 }
2699 
2700 
2701 void Deoptimizer::AddObjectDoubleValue(double value) {
2702  deferred_objects_tagged_values_.Add(isolate()->heap()->the_hole_value());
2703  HeapNumberMaterializationDescriptor<int> value_desc(
2704  deferred_objects_tagged_values_.length() - 1, value);
2705  deferred_objects_double_values_.Add(value_desc);
2706 }
2707 
2708 
2709 void Deoptimizer::AddDoubleValue(intptr_t slot_address, double value) {
2710  HeapNumberMaterializationDescriptor<Address> value_desc(
2711  reinterpret_cast<Address>(slot_address), value);
2712  deferred_heap_numbers_.Add(value_desc);
2713 }
2714 
2715 
2717  BailoutType type,
2718  int max_entry_id) {
2719  // We cannot run this if the serializer is enabled because this will
2720  // cause us to emit relocation information for the external
2721  // references. This is fine because the deoptimizer's code section
2722  // isn't meant to be serialized at all.
2723  ASSERT(type == EAGER || type == SOFT || type == LAZY);
2724  DeoptimizerData* data = isolate->deoptimizer_data();
2725  int entry_count = data->deopt_entry_code_entries_[type];
2726  if (max_entry_id < entry_count) return;
2727  entry_count = Max(entry_count, Deoptimizer::kMinNumberOfEntries);
2728  while (max_entry_id >= entry_count) entry_count *= 2;
2729  ASSERT(entry_count <= Deoptimizer::kMaxNumberOfEntries);
2730 
2731  MacroAssembler masm(isolate, NULL, 16 * KB);
2732  masm.set_emit_debug_code(false);
2733  GenerateDeoptimizationEntries(&masm, entry_count, type);
2734  CodeDesc desc;
2735  masm.GetCode(&desc);
2736  ASSERT(!RelocInfo::RequiresRelocation(desc));
2737 
2738  MemoryChunk* chunk = data->deopt_entry_code_[type];
2739  ASSERT(static_cast<int>(Deoptimizer::GetMaxDeoptTableSize()) >=
2740  desc.instr_size);
2741  chunk->CommitArea(desc.instr_size);
2742  CopyBytes(chunk->area_start(), desc.buffer,
2743  static_cast<size_t>(desc.instr_size));
2744  CPU::FlushICache(chunk->area_start(), desc.instr_size);
2745 
2746  data->deopt_entry_code_entries_[type] = entry_count;
2747 }
2748 
2749 
2751  JSFunction* function)
2752  : frame_size_(frame_size),
2753  function_(function),
2754  top_(kZapUint32),
2755  pc_(kZapUint32),
2756  fp_(kZapUint32),
2757  context_(kZapUint32),
2758  constant_pool_(kZapUint32) {
2759  // Zap all the registers.
2760  for (int r = 0; r < Register::kNumRegisters; r++) {
2761  // TODO(jbramley): It isn't safe to use kZapUint32 here. If the register
2762  // isn't used before the next safepoint, the GC will try to scan it as a
2763  // tagged value. kZapUint32 looks like a valid tagged pointer, but it isn't.
2764  SetRegister(r, kZapUint32);
2765  }
2766 
2767  // Zap all the slots.
2768  for (unsigned o = 0; o < frame_size; o += kPointerSize) {
2769  SetFrameSlot(o, kZapUint32);
2770  }
2771 }
2772 
2773 
2774 int FrameDescription::ComputeFixedSize() {
2776  (ComputeParametersCount() + 1) * kPointerSize;
2777 }
2778 
2779 
2781  if (slot_index >= 0) {
2782  // Local or spill slots. Skip the fixed part of the frame
2783  // including all arguments.
2784  unsigned base = GetFrameSize() - ComputeFixedSize();
2785  return base - ((slot_index + 1) * kPointerSize);
2786  } else {
2787  // Incoming parameter.
2788  int arg_size = (ComputeParametersCount() + 1) * kPointerSize;
2789  unsigned base = GetFrameSize() - arg_size;
2790  return base - ((slot_index + 1) * kPointerSize);
2791  }
2792 }
2793 
2794 
2796  switch (type_) {
2797  case StackFrame::JAVA_SCRIPT:
2798  return function_->shared()->formal_parameter_count();
2800  // Last slot contains number of incomming arguments as a smi.
2801  // Can't use GetExpression(0) because it would cause infinite recursion.
2802  return reinterpret_cast<Smi*>(*GetFrameSlotPointer(0))->value();
2803  }
2804  case StackFrame::STUB:
2805  return -1; // Minus receiver.
2806  default:
2807  UNREACHABLE();
2808  return 0;
2809  }
2810 }
2811 
2812 
2814  ASSERT(index >= 0);
2815  ASSERT(index < ComputeParametersCount());
2816  // The slot indexes for incoming arguments are negative.
2817  unsigned offset = GetOffsetFromSlotIndex(index - ComputeParametersCount());
2818  return reinterpret_cast<Object*>(*GetFrameSlotPointer(offset));
2819 }
2820 
2821 
2823  ASSERT_EQ(StackFrame::JAVA_SCRIPT, type_);
2824  unsigned size = GetFrameSize() - ComputeFixedSize();
2825  return size / kPointerSize;
2826 }
2827 
2828 
2830  ASSERT_EQ(StackFrame::JAVA_SCRIPT, type_);
2831  unsigned offset = GetOffsetFromSlotIndex(index);
2832  return reinterpret_cast<Object*>(*GetFrameSlotPointer(offset));
2833 }
2834 
2835 
2836 void TranslationBuffer::Add(int32_t value, Zone* zone) {
2837  // Encode the sign bit in the least significant bit.
2838  bool is_negative = (value < 0);
2839  uint32_t bits = ((is_negative ? -value : value) << 1) |
2840  static_cast<int32_t>(is_negative);
2841  // Encode the individual bytes using the least significant bit of
2842  // each byte to indicate whether or not more bytes follow.
2843  do {
2844  uint32_t next = bits >> 7;
2845  contents_.Add(((bits << 1) & 0xFF) | (next != 0), zone);
2846  bits = next;
2847  } while (bits != 0);
2848 }
2849 
2850 
2851 int32_t TranslationIterator::Next() {
2852  // Run through the bytes until we reach one with a least significant
2853  // bit of zero (marks the end).
2854  uint32_t bits = 0;
2855  for (int i = 0; true; i += 7) {
2856  ASSERT(HasNext());
2857  uint8_t next = buffer_->get(index_++);
2858  bits |= (next >> 1) << i;
2859  if ((next & 1) == 0) break;
2860  }
2861  // The bits encode the sign in the least significant bit.
2862  bool is_negative = (bits & 1) == 1;
2863  int32_t result = bits >> 1;
2864  return is_negative ? -result : result;
2865 }
2866 
2867 
2868 Handle<ByteArray> TranslationBuffer::CreateByteArray(Factory* factory) {
2869  int length = contents_.length();
2870  Handle<ByteArray> result = factory->NewByteArray(length, TENURED);
2871  OS::MemCopy(
2872  result->GetDataStartAddress(), contents_.ToVector().start(), length);
2873  return result;
2874 }
2875 
2876 
2877 void Translation::BeginConstructStubFrame(int literal_id, unsigned height) {
2878  buffer_->Add(CONSTRUCT_STUB_FRAME, zone());
2879  buffer_->Add(literal_id, zone());
2880  buffer_->Add(height, zone());
2881 }
2882 
2883 
2884 void Translation::BeginGetterStubFrame(int literal_id) {
2885  buffer_->Add(GETTER_STUB_FRAME, zone());
2886  buffer_->Add(literal_id, zone());
2887 }
2888 
2889 
2890 void Translation::BeginSetterStubFrame(int literal_id) {
2891  buffer_->Add(SETTER_STUB_FRAME, zone());
2892  buffer_->Add(literal_id, zone());
2893 }
2894 
2895 
2896 void Translation::BeginArgumentsAdaptorFrame(int literal_id, unsigned height) {
2897  buffer_->Add(ARGUMENTS_ADAPTOR_FRAME, zone());
2898  buffer_->Add(literal_id, zone());
2899  buffer_->Add(height, zone());
2900 }
2901 
2902 
2903 void Translation::BeginJSFrame(BailoutId node_id,
2904  int literal_id,
2905  unsigned height) {
2906  buffer_->Add(JS_FRAME, zone());
2907  buffer_->Add(node_id.ToInt(), zone());
2908  buffer_->Add(literal_id, zone());
2909  buffer_->Add(height, zone());
2910 }
2911 
2912 
2913 void Translation::BeginCompiledStubFrame() {
2914  buffer_->Add(COMPILED_STUB_FRAME, zone());
2915 }
2916 
2917 
2918 void Translation::BeginArgumentsObject(int args_length) {
2919  buffer_->Add(ARGUMENTS_OBJECT, zone());
2920  buffer_->Add(args_length, zone());
2921 }
2922 
2923 
2924 void Translation::BeginCapturedObject(int length) {
2925  buffer_->Add(CAPTURED_OBJECT, zone());
2926  buffer_->Add(length, zone());
2927 }
2928 
2929 
2930 void Translation::DuplicateObject(int object_index) {
2931  buffer_->Add(DUPLICATED_OBJECT, zone());
2932  buffer_->Add(object_index, zone());
2933 }
2934 
2935 
2936 void Translation::StoreRegister(Register reg) {
2937  buffer_->Add(REGISTER, zone());
2938  buffer_->Add(reg.code(), zone());
2939 }
2940 
2941 
2942 void Translation::StoreInt32Register(Register reg) {
2943  buffer_->Add(INT32_REGISTER, zone());
2944  buffer_->Add(reg.code(), zone());
2945 }
2946 
2947 
2948 void Translation::StoreUint32Register(Register reg) {
2949  buffer_->Add(UINT32_REGISTER, zone());
2950  buffer_->Add(reg.code(), zone());
2951 }
2952 
2953 
2954 void Translation::StoreDoubleRegister(DoubleRegister reg) {
2955  buffer_->Add(DOUBLE_REGISTER, zone());
2956  buffer_->Add(DoubleRegister::ToAllocationIndex(reg), zone());
2957 }
2958 
2959 
2960 void Translation::StoreStackSlot(int index) {
2961  buffer_->Add(STACK_SLOT, zone());
2962  buffer_->Add(index, zone());
2963 }
2964 
2965 
2966 void Translation::StoreInt32StackSlot(int index) {
2967  buffer_->Add(INT32_STACK_SLOT, zone());
2968  buffer_->Add(index, zone());
2969 }
2970 
2971 
2972 void Translation::StoreUint32StackSlot(int index) {
2973  buffer_->Add(UINT32_STACK_SLOT, zone());
2974  buffer_->Add(index, zone());
2975 }
2976 
2977 
2978 void Translation::StoreDoubleStackSlot(int index) {
2979  buffer_->Add(DOUBLE_STACK_SLOT, zone());
2980  buffer_->Add(index, zone());
2981 }
2982 
2983 
2984 void Translation::StoreLiteral(int literal_id) {
2985  buffer_->Add(LITERAL, zone());
2986  buffer_->Add(literal_id, zone());
2987 }
2988 
2989 
2990 void Translation::StoreArgumentsObject(bool args_known,
2991  int args_index,
2992  int args_length) {
2993  buffer_->Add(ARGUMENTS_OBJECT, zone());
2994  buffer_->Add(args_known, zone());
2995  buffer_->Add(args_index, zone());
2996  buffer_->Add(args_length, zone());
2997 }
2998 
2999 
3000 int Translation::NumberOfOperandsFor(Opcode opcode) {
3001  switch (opcode) {
3002  case GETTER_STUB_FRAME:
3003  case SETTER_STUB_FRAME:
3004  case DUPLICATED_OBJECT:
3005  case ARGUMENTS_OBJECT:
3006  case CAPTURED_OBJECT:
3007  case REGISTER:
3008  case INT32_REGISTER:
3009  case UINT32_REGISTER:
3010  case DOUBLE_REGISTER:
3011  case STACK_SLOT:
3012  case INT32_STACK_SLOT:
3013  case UINT32_STACK_SLOT:
3014  case DOUBLE_STACK_SLOT:
3015  case LITERAL:
3016  case COMPILED_STUB_FRAME:
3017  return 1;
3018  case BEGIN:
3019  case ARGUMENTS_ADAPTOR_FRAME:
3020  case CONSTRUCT_STUB_FRAME:
3021  return 2;
3022  case JS_FRAME:
3023  return 3;
3024  }
3025  UNREACHABLE();
3026  return -1;
3027 }
3028 
3029 
3030 #if defined(OBJECT_PRINT) || defined(ENABLE_DISASSEMBLER)
3031 
3032 const char* Translation::StringFor(Opcode opcode) {
3033 #define TRANSLATION_OPCODE_CASE(item) case item: return #item;
3034  switch (opcode) {
3035  TRANSLATION_OPCODE_LIST(TRANSLATION_OPCODE_CASE)
3036  }
3037 #undef TRANSLATION_OPCODE_CASE
3038  UNREACHABLE();
3039  return "";
3040 }
3041 
3042 #endif
3043 
3044 
3045 // We can't intermix stack decoding and allocations because
3046 // deoptimization infrastracture is not GC safe.
3047 // Thus we build a temporary structure in malloced space.
3048 SlotRef SlotRefValueBuilder::ComputeSlotForNextArgument(
3049  Translation::Opcode opcode,
3050  TranslationIterator* iterator,
3051  DeoptimizationInputData* data,
3052  JavaScriptFrame* frame) {
3053  switch (opcode) {
3054  case Translation::BEGIN:
3055  case Translation::JS_FRAME:
3056  case Translation::ARGUMENTS_ADAPTOR_FRAME:
3057  case Translation::CONSTRUCT_STUB_FRAME:
3058  case Translation::GETTER_STUB_FRAME:
3059  case Translation::SETTER_STUB_FRAME:
3060  // Peeled off before getting here.
3061  break;
3062 
3063  case Translation::DUPLICATED_OBJECT: {
3064  return SlotRef::NewDuplicateObject(iterator->Next());
3065  }
3066 
3067  case Translation::ARGUMENTS_OBJECT:
3068  return SlotRef::NewArgumentsObject(iterator->Next());
3069 
3070  case Translation::CAPTURED_OBJECT: {
3071  return SlotRef::NewDeferredObject(iterator->Next());
3072  }
3073 
3074  case Translation::REGISTER:
3075  case Translation::INT32_REGISTER:
3076  case Translation::UINT32_REGISTER:
3077  case Translation::DOUBLE_REGISTER:
3078  // We are at safepoint which corresponds to call. All registers are
3079  // saved by caller so there would be no live registers at this
3080  // point. Thus these translation commands should not be used.
3081  break;
3082 
3083  case Translation::STACK_SLOT: {
3084  int slot_index = iterator->Next();
3085  Address slot_addr = SlotAddress(frame, slot_index);
3086  return SlotRef(slot_addr, SlotRef::TAGGED);
3087  }
3088 
3089  case Translation::INT32_STACK_SLOT: {
3090  int slot_index = iterator->Next();
3091  Address slot_addr = SlotAddress(frame, slot_index);
3092  return SlotRef(slot_addr, SlotRef::INT32);
3093  }
3094 
3095  case Translation::UINT32_STACK_SLOT: {
3096  int slot_index = iterator->Next();
3097  Address slot_addr = SlotAddress(frame, slot_index);
3098  return SlotRef(slot_addr, SlotRef::UINT32);
3099  }
3100 
3101  case Translation::DOUBLE_STACK_SLOT: {
3102  int slot_index = iterator->Next();
3103  Address slot_addr = SlotAddress(frame, slot_index);
3104  return SlotRef(slot_addr, SlotRef::DOUBLE);
3105  }
3106 
3107  case Translation::LITERAL: {
3108  int literal_index = iterator->Next();
3109  return SlotRef(data->GetIsolate(),
3110  data->LiteralArray()->get(literal_index));
3111  }
3112 
3113  case Translation::COMPILED_STUB_FRAME:
3114  UNREACHABLE();
3115  break;
3116  }
3117 
3118  FATAL("We should never get here - unexpected deopt info.");
3119  return SlotRef();
3120 }
3121 
3122 
3123 SlotRefValueBuilder::SlotRefValueBuilder(JavaScriptFrame* frame,
3124  int inlined_jsframe_index,
3126  : current_slot_(0), args_length_(-1), first_slot_index_(-1) {
3127  DisallowHeapAllocation no_gc;
3128 
3129  int deopt_index = Safepoint::kNoDeoptimizationIndex;
3130  DeoptimizationInputData* data =
3131  static_cast<OptimizedFrame*>(frame)->GetDeoptimizationData(&deopt_index);
3132  TranslationIterator it(data->TranslationByteArray(),
3133  data->TranslationIndex(deopt_index)->value());
3134  Translation::Opcode opcode = static_cast<Translation::Opcode>(it.Next());
3135  ASSERT(opcode == Translation::BEGIN);
3136  it.Next(); // Drop frame count.
3137 
3138  stack_frame_id_ = frame->fp();
3139 
3140  int jsframe_count = it.Next();
3141  USE(jsframe_count);
3142  ASSERT(jsframe_count > inlined_jsframe_index);
3143  int jsframes_to_skip = inlined_jsframe_index;
3144  int number_of_slots = -1; // Number of slots inside our frame (yet unknown)
3145  bool should_deopt = false;
3146  while (number_of_slots != 0) {
3147  opcode = static_cast<Translation::Opcode>(it.Next());
3148  bool processed = false;
3149  if (opcode == Translation::ARGUMENTS_ADAPTOR_FRAME) {
3150  if (jsframes_to_skip == 0) {
3151  ASSERT(Translation::NumberOfOperandsFor(opcode) == 2);
3152 
3153  it.Skip(1); // literal id
3154  int height = it.Next();
3155 
3156  // Skip the translation command for the receiver.
3157  it.Skip(Translation::NumberOfOperandsFor(
3158  static_cast<Translation::Opcode>(it.Next())));
3159 
3160  // We reached the arguments adaptor frame corresponding to the
3161  // inlined function in question. Number of arguments is height - 1.
3162  first_slot_index_ = slot_refs_.length();
3163  args_length_ = height - 1;
3164  number_of_slots = height - 1;
3165  processed = true;
3166  }
3167  } else if (opcode == Translation::JS_FRAME) {
3168  if (jsframes_to_skip == 0) {
3169  // Skip over operands to advance to the next opcode.
3170  it.Skip(Translation::NumberOfOperandsFor(opcode));
3171 
3172  // Skip the translation command for the receiver.
3173  it.Skip(Translation::NumberOfOperandsFor(
3174  static_cast<Translation::Opcode>(it.Next())));
3175 
3176  // We reached the frame corresponding to the inlined function
3177  // in question. Process the translation commands for the
3178  // arguments. Number of arguments is equal to the number of
3179  // format parameter count.
3180  first_slot_index_ = slot_refs_.length();
3181  args_length_ = formal_parameter_count;
3182  number_of_slots = formal_parameter_count;
3183  processed = true;
3184  }
3185  jsframes_to_skip--;
3186  } else if (opcode != Translation::BEGIN &&
3187  opcode != Translation::CONSTRUCT_STUB_FRAME &&
3188  opcode != Translation::GETTER_STUB_FRAME &&
3189  opcode != Translation::SETTER_STUB_FRAME &&
3190  opcode != Translation::COMPILED_STUB_FRAME) {
3191  slot_refs_.Add(ComputeSlotForNextArgument(opcode, &it, data, frame));
3192 
3193  if (first_slot_index_ >= 0) {
3194  // We have found the beginning of our frame -> make sure we count
3195  // the nested slots of captured objects
3196  number_of_slots--;
3197  SlotRef& slot = slot_refs_.last();
3198  ASSERT(slot.Representation() != SlotRef::ARGUMENTS_OBJECT);
3199  number_of_slots += slot.GetChildrenCount();
3200  if (slot.Representation() == SlotRef::DEFERRED_OBJECT ||
3201  slot.Representation() == SlotRef::DUPLICATE_OBJECT) {
3202  should_deopt = true;
3203  }
3204  }
3205 
3206  processed = true;
3207  }
3208  if (!processed) {
3209  // Skip over operands to advance to the next opcode.
3210  it.Skip(Translation::NumberOfOperandsFor(opcode));
3211  }
3212  }
3213  if (should_deopt) {
3214  List<JSFunction*> functions(2);
3215  frame->GetFunctions(&functions);
3216  Deoptimizer::DeoptimizeFunction(functions[0]);
3217  }
3218 }
3219 
3220 
3221 Handle<Object> SlotRef::GetValue(Isolate* isolate) {
3222  switch (representation_) {
3223  case TAGGED:
3224  return Handle<Object>(Memory::Object_at(addr_), isolate);
3225 
3226  case INT32: {
3227  int value = Memory::int32_at(addr_);
3228  if (Smi::IsValid(value)) {
3229  return Handle<Object>(Smi::FromInt(value), isolate);
3230  } else {
3231  return isolate->factory()->NewNumberFromInt(value);
3232  }
3233  }
3234 
3235  case UINT32: {
3236  uint32_t value = Memory::uint32_at(addr_);
3237  if (value <= static_cast<uint32_t>(Smi::kMaxValue)) {
3238  return Handle<Object>(Smi::FromInt(static_cast<int>(value)), isolate);
3239  } else {
3240  return isolate->factory()->NewNumber(static_cast<double>(value));
3241  }
3242  }
3243 
3244  case DOUBLE: {
3245  double value = read_double_value(addr_);
3246  return isolate->factory()->NewNumber(value);
3247  }
3248 
3249  case LITERAL:
3250  return literal_;
3251 
3252  default:
3253  FATAL("We should never get here - unexpected deopt info.");
3254  return Handle<Object>::null();
3255  }
3256 }
3257 
3258 
3259 void SlotRefValueBuilder::Prepare(Isolate* isolate) {
3260  MaterializedObjectStore* materialized_store =
3261  isolate->materialized_object_store();
3262  previously_materialized_objects_ = materialized_store->Get(stack_frame_id_);
3263  prev_materialized_count_ = previously_materialized_objects_.is_null()
3264  ? 0 : previously_materialized_objects_->length();
3265 
3266  // Skip any materialized objects of the inlined "parent" frames.
3267  // (Note that we still need to materialize them because they might be
3268  // referred to as duplicated objects.)
3269  while (current_slot_ < first_slot_index_) {
3270  GetNext(isolate, 0);
3271  }
3272  ASSERT(current_slot_ == first_slot_index_);
3273 }
3274 
3275 
3276 Handle<Object> SlotRefValueBuilder::GetPreviouslyMaterialized(
3277  Isolate* isolate, int length) {
3278  int object_index = materialized_objects_.length();
3279  Handle<Object> return_value = Handle<Object>(
3280  previously_materialized_objects_->get(object_index), isolate);
3281  materialized_objects_.Add(return_value);
3282 
3283  // Now need to skip all the nested objects (and possibly read them from
3284  // the materialization store, too).
3285  for (int i = 0; i < length; i++) {
3286  SlotRef& slot = slot_refs_[current_slot_];
3287  current_slot_++;
3288 
3289  // We need to read all the nested objects - add them to the
3290  // number of objects we need to process.
3291  length += slot.GetChildrenCount();
3292 
3293  // Put the nested deferred/duplicate objects into our materialization
3294  // array.
3295  if (slot.Representation() == SlotRef::DEFERRED_OBJECT ||
3296  slot.Representation() == SlotRef::DUPLICATE_OBJECT) {
3297  int nested_object_index = materialized_objects_.length();
3298  Handle<Object> nested_object = Handle<Object>(
3299  previously_materialized_objects_->get(nested_object_index),
3300  isolate);
3301  materialized_objects_.Add(nested_object);
3302  }
3303  }
3304 
3305  return return_value;
3306 }
3307 
3308 
3309 Handle<Object> SlotRefValueBuilder::GetNext(Isolate* isolate, int lvl) {
3310  SlotRef& slot = slot_refs_[current_slot_];
3311  current_slot_++;
3312  switch (slot.Representation()) {
3313  case SlotRef::TAGGED:
3314  case SlotRef::INT32:
3315  case SlotRef::UINT32:
3316  case SlotRef::DOUBLE:
3317  case SlotRef::LITERAL: {
3318  return slot.GetValue(isolate);
3319  }
3320  case SlotRef::ARGUMENTS_OBJECT: {
3321  // We should never need to materialize an arguments object,
3322  // but we still need to put something into the array
3323  // so that the indexing is consistent.
3324  materialized_objects_.Add(isolate->factory()->undefined_value());
3325  int length = slot.GetChildrenCount();
3326  for (int i = 0; i < length; ++i) {
3327  // We don't need the argument, just ignore it
3328  GetNext(isolate, lvl + 1);
3329  }
3330  return isolate->factory()->undefined_value();
3331  }
3332  case SlotRef::DEFERRED_OBJECT: {
3333  int length = slot.GetChildrenCount();
3334  ASSERT(slot_refs_[current_slot_].Representation() == SlotRef::LITERAL ||
3335  slot_refs_[current_slot_].Representation() == SlotRef::TAGGED);
3336 
3337  int object_index = materialized_objects_.length();
3338  if (object_index < prev_materialized_count_) {
3339  return GetPreviouslyMaterialized(isolate, length);
3340  }
3341 
3342  Handle<Object> map_object = slot_refs_[current_slot_].GetValue(isolate);
3343  Handle<Map> map = Map::GeneralizeAllFieldRepresentations(
3344  Handle<Map>::cast(map_object), Representation::Tagged());
3345  current_slot_++;
3346  // TODO(jarin) this should be unified with the code in
3347  // Deoptimizer::MaterializeNextHeapObject()
3348  switch (map->instance_type()) {
3349  case HEAP_NUMBER_TYPE: {
3350  // Reuse the HeapNumber value directly as it is already properly
3351  // tagged and skip materializing the HeapNumber explicitly.
3352  Handle<Object> object = GetNext(isolate, lvl + 1);
3353  materialized_objects_.Add(object);
3354  // On 32-bit architectures, there is an extra slot there because
3355  // the escape analysis calculates the number of slots as
3356  // object-size/pointer-size. To account for this, we read out
3357  // any extra slots.
3358  for (int i = 0; i < length - 2; i++) {
3359  GetNext(isolate, lvl + 1);
3360  }
3361  return object;
3362  }
3363  case JS_OBJECT_TYPE: {
3364  Handle<JSObject> object =
3365  isolate->factory()->NewJSObjectFromMap(map, NOT_TENURED, false);
3366  materialized_objects_.Add(object);
3367  Handle<Object> properties = GetNext(isolate, lvl + 1);
3368  Handle<Object> elements = GetNext(isolate, lvl + 1);
3369  object->set_properties(FixedArray::cast(*properties));
3370  object->set_elements(FixedArrayBase::cast(*elements));
3371  for (int i = 0; i < length - 3; ++i) {
3372  Handle<Object> value = GetNext(isolate, lvl + 1);
3373  object->FastPropertyAtPut(i, *value);
3374  }
3375  return object;
3376  }
3377  case JS_ARRAY_TYPE: {
3378  Handle<JSArray> object =
3379  isolate->factory()->NewJSArray(0, map->elements_kind());
3380  materialized_objects_.Add(object);
3381  Handle<Object> properties = GetNext(isolate, lvl + 1);
3382  Handle<Object> elements = GetNext(isolate, lvl + 1);
3383  Handle<Object> length = GetNext(isolate, lvl + 1);
3384  object->set_properties(FixedArray::cast(*properties));
3385  object->set_elements(FixedArrayBase::cast(*elements));
3386  object->set_length(*length);
3387  return object;
3388  }
3389  default:
3390  PrintF(stderr,
3391  "[couldn't handle instance type %d]\n", map->instance_type());
3392  UNREACHABLE();
3393  break;
3394  }
3395  UNREACHABLE();
3396  }
3397 
3398  case SlotRef::DUPLICATE_OBJECT: {
3399  int object_index = slot.DuplicateObjectId();
3400  Handle<Object> object = materialized_objects_[object_index];
3401  materialized_objects_.Add(object);
3402  return object;
3403  }
3404  default:
3405  UNREACHABLE();
3406  break;
3407  }
3408 
3409  FATAL("We should never get here - unexpected deopt slot kind.");
3410  return Handle<Object>::null();
3411 }
3412 
3413 
3414 void SlotRefValueBuilder::Finish(Isolate* isolate) {
3415  // We should have processed all the slots
3416  ASSERT(slot_refs_.length() == current_slot_);
3417 
3418  if (materialized_objects_.length() > prev_materialized_count_) {
3419  // We have materialized some new objects, so we have to store them
3420  // to prevent duplicate materialization
3421  Handle<FixedArray> array = isolate->factory()->NewFixedArray(
3422  materialized_objects_.length());
3423  for (int i = 0; i < materialized_objects_.length(); i++) {
3424  array->set(i, *(materialized_objects_.at(i)));
3425  }
3426  isolate->materialized_object_store()->Set(stack_frame_id_, array);
3427  }
3428 }
3429 
3430 
3432  int index = StackIdToIndex(fp);
3433  if (index == -1) {
3434  return Handle<FixedArray>::null();
3435  }
3436  Handle<FixedArray> array = GetStackEntries();
3437  ASSERT(array->length() > index);
3438  return Handle<FixedArray>::cast(Handle<Object>(array->get(index),
3439  isolate()));
3440 }
3441 
3442 
3444  Handle<FixedArray> materialized_objects) {
3445  int index = StackIdToIndex(fp);
3446  if (index == -1) {
3447  index = frame_fps_.length();
3448  frame_fps_.Add(fp);
3449  }
3450 
3451  Handle<FixedArray> array = EnsureStackEntries(index + 1);
3452  array->set(index, *materialized_objects);
3453 }
3454 
3455 
3457  int index = StackIdToIndex(fp);
3458  ASSERT(index >= 0);
3459 
3460  frame_fps_.Remove(index);
3461  Handle<FixedArray> array = GetStackEntries();
3462  ASSERT(array->length() > index);
3463  for (int i = index; i < frame_fps_.length(); i++) {
3464  array->set(i, array->get(i + 1));
3465  }
3466  array->set(frame_fps_.length(), isolate()->heap()->undefined_value());
3467 }
3468 
3469 
3470 int MaterializedObjectStore::StackIdToIndex(Address fp) {
3471  for (int i = 0; i < frame_fps_.length(); i++) {
3472  if (frame_fps_[i] == fp) {
3473  return i;
3474  }
3475  }
3476  return -1;
3477 }
3478 
3479 
3480 Handle<FixedArray> MaterializedObjectStore::GetStackEntries() {
3481  return Handle<FixedArray>(isolate()->heap()->materialized_objects());
3482 }
3483 
3484 
3485 Handle<FixedArray> MaterializedObjectStore::EnsureStackEntries(int length) {
3486  Handle<FixedArray> array = GetStackEntries();
3487  if (array->length() >= length) {
3488  return array;
3489  }
3490 
3491  int new_length = length > 10 ? length : 10;
3492  if (new_length < 2 * array->length()) {
3493  new_length = 2 * array->length();
3494  }
3495 
3496  Handle<FixedArray> new_array =
3497  isolate()->factory()->NewFixedArray(new_length, TENURED);
3498  for (int i = 0; i < array->length(); i++) {
3499  new_array->set(i, array->get(i));
3500  }
3501  for (int i = array->length(); i < length; i++) {
3502  new_array->set(i, isolate()->heap()->undefined_value());
3503  }
3504  isolate()->heap()->public_set_materialized_objects(*new_array);
3505  return new_array;
3506 }
3507 
3508 #ifdef ENABLE_DEBUGGER_SUPPORT
3509 
3510 DeoptimizedFrameInfo::DeoptimizedFrameInfo(Deoptimizer* deoptimizer,
3511  int frame_index,
3512  bool has_arguments_adaptor,
3513  bool has_construct_stub) {
3514  FrameDescription* output_frame = deoptimizer->output_[frame_index];
3515  function_ = output_frame->GetFunction();
3516  has_construct_stub_ = has_construct_stub;
3517  expression_count_ = output_frame->GetExpressionCount();
3518  expression_stack_ = new Object*[expression_count_];
3519  // Get the source position using the unoptimized code.
3520  Address pc = reinterpret_cast<Address>(output_frame->GetPc());
3521  Code* code = Code::cast(deoptimizer->isolate()->FindCodeObject(pc));
3522  source_position_ = code->SourcePosition(pc);
3523 
3524  for (int i = 0; i < expression_count_; i++) {
3525  SetExpression(i, output_frame->GetExpression(i));
3526  }
3527 
3528  if (has_arguments_adaptor) {
3529  output_frame = deoptimizer->output_[frame_index - 1];
3530  ASSERT(output_frame->GetFrameType() == StackFrame::ARGUMENTS_ADAPTOR);
3531  }
3532 
3533  parameters_count_ = output_frame->ComputeParametersCount();
3534  parameters_ = new Object*[parameters_count_];
3535  for (int i = 0; i < parameters_count_; i++) {
3536  SetParameter(i, output_frame->GetParameter(i));
3537  }
3538 }
3539 
3540 
3541 DeoptimizedFrameInfo::~DeoptimizedFrameInfo() {
3542  delete[] expression_stack_;
3543  delete[] parameters_;
3544 }
3545 
3546 
3547 void DeoptimizedFrameInfo::Iterate(ObjectVisitor* v) {
3548  v->VisitPointer(BitCast<Object**>(&function_));
3549  v->VisitPointers(parameters_, parameters_ + parameters_count_);
3550  v->VisitPointers(expression_stack_, expression_stack_ + expression_count_);
3551 }
3552 
3553 #endif // ENABLE_DEBUGGER_SUPPORT
3554 
3555 } } // namespace v8::internal
byte * Address
Definition: globals.h:186
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter NULL
Definition: flags.cc:269
const int kMinInt
Definition: globals.h:249
Code * builtin(Name name)
Definition: builtins.h:322
static DeoptimizationOutputData * cast(Object *obj)
static const int kBailoutTypesWithCodeEntry
Definition: deoptimizer.h:135
static Object *& Object_at(Address addr)
Definition: v8memory.h:83
static void EnsureCodeForDeoptimizationEntry(Isolate *isolate, BailoutType type, int max_entry_id)
#define V8PRIxPTR
Definition: globals.h:228
uint16_t current_
Object * OptimizedCodeListHead()
Definition: contexts.cc:349
void PrintF(const char *format,...)
Definition: v8utils.cc:40
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf map
Definition: flags.cc:350
unsigned stack_slots()
Definition: objects-inl.h:4468
#define FATAL(msg)
Definition: checks.h:48
CodeTracer * GetCodeTracer()
Definition: isolate.cc:2229
void SetDeoptimizedCodeListHead(Object *head)
Definition: contexts.cc:355
static const int kFixedFrameSize
Definition: frames.h:181
REGISTER(no_reg,-1)
Isolate * isolate()
Definition: heap-inl.h:624
void SourceCodePrint(StringStream *accumulator, int max_length)
Definition: objects.cc:9957
bool CommitArea(size_t requested)
Definition: spaces.cc:513
static Smi * FromInt(int value)
Definition: objects-inl.h:1209
#define LOG(isolate, Call)
Definition: log.h:86
const int KB
Definition: globals.h:245
unsigned GetOffsetFromSlotIndex(int slot_index)
void SetFrameSlot(unsigned offset, intptr_t value)
Definition: deoptimizer.h:503
static void ComputeOutputFrames(Deoptimizer *deoptimizer)
Definition: deoptimizer.cc:526
static Handle< T > cast(Handle< S > that)
Definition: handles.h:75
T Max(T a, T b)
Definition: utils.h:227
static Register fp_register()
kSerializedDataOffset Object
Definition: objects-inl.h:5016
T & at(int i) const
Definition: list.h:90
TypeImpl< ZoneTypeConfig > Type
Builtins * builtins()
Definition: isolate.h:948
int int32_t
Definition: unicode.cc:47
DeoptimizerData(MemoryAllocator *allocator)
Definition: deoptimizer.cc:57
static const char * MessageFor(BailoutType type)
Definition: deoptimizer.cc:547
#define REGISTER(N, C)
CodeStubInterfaceDescriptor * code_stub_interface_descriptor(int index)
Definition: isolate.cc:2274
#define ASSERT(condition)
Definition: checks.h:329
static void DeoptimizeFunction(JSFunction *function)
Definition: deoptimizer.cc:514
Handle< JSFunction > function() const
Definition: deoptimizer.h:157
int SourcePosition(Address pc)
Definition: objects.cc:10383
static Context * cast(Object *context)
Definition: contexts.h:244
double GetDoubleRegister(unsigned n) const
Definition: deoptimizer.h:526
intptr_t GetContext() const
Definition: deoptimizer.h:550
void MaterializeHeapObjects(JavaScriptFrameIterator *it)
Handle< Object > NewNumber(double value, PretenureFlag pretenure=NOT_TENURED)
Definition: factory.cc:998
void SetFrameType(StackFrame::Type type)
Definition: deoptimizer.h:564
#define CHECK(condition)
Definition: checks.h:75
static bool TraceEnabledFor(BailoutType deopt_type, StackFrame::Type frame_type)
Definition: deoptimizer.cc:531
Handle< JSObject > NewArgumentsObject(Handle< Object > callee, int length)
Definition: factory.cc:1740
Factory * factory()
Definition: isolate.h:995
static Code * cast(Object *obj)
void set_emit_debug_code(bool value)
Definition: assembler.h:66
void PrintDeoptLocation(FILE *out, int bailout_id)
Definition: objects.cc:10767
Object * FindCodeObject(Address a)
Definition: isolate.cc:2286
static Smi * cast(Object *object)
static const char * AllocationIndexToString(int index)
Object * DeoptimizedCodeListHead()
Definition: contexts.cc:361
static Register constant_pool_pointer_register()
static const int kNumRegisters
void Free(MemoryChunk *chunk)
Definition: spaces.cc:751
static void VisitAllOptimizedFunctions(Isolate *isolate, OptimizedFunctionVisitor *visitor)
Definition: deoptimizer.cc:312
SmartArrayPointer< char > ToCString(AllowNullsFlag allow_nulls, RobustnessFlag robustness_flag, int offset, int length, int *length_output=0)
Definition: objects.cc:8272
JSFunction * GetFunction() const
Definition: deoptimizer.h:490
Handle< Code > compiled_code() const
Definition: deoptimizer.h:158
static void DeoptimizeGlobalObject(JSObject *object)
Definition: deoptimizer.cc:483
#define UNREACHABLE()
Definition: checks.h:52
Handle< JSObject > NewJSObjectFromMap(Handle< Map > map, PretenureFlag pretenure=NOT_TENURED, bool allocate_properties=true, Handle< AllocationSite > allocation_site=Handle< AllocationSite >::null())
Definition: factory.cc:1421
static int output_offset()
Definition: deoptimizer.h:250
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long mode(MIPS only)") DEFINE_string(expose_natives_as
Isolate * isolate() const
Definition: deoptimizer.h:303
DwVfpRegister DoubleRegister
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object size
Definition: flags.cc:211
virtual const char * NameOfCPURegister(int reg) const
static Register constant_pool_pointer_register()
const int kDoubleSize
Definition: globals.h:266
static void MemCopy(void *dest, const void *src, size_t size)
Definition: platform.h:399
Context * native_context()
Definition: contexts.cc:67
static bool IsValid(intptr_t value)
Definition: objects-inl.h:1278
static Address GetDeoptimizationEntry(Isolate *isolate, int id, BailoutType type, GetEntryMode mode=ENSURE_ENTRY_CODE)
Definition: deoptimizer.cc:683
void GetCode(CodeDesc *desc)
const int kPointerSize
Definition: globals.h:268
Object * GetPrototype(Isolate *isolate)
Definition: objects.cc:1032
T Remove(int i)
Definition: list-inl.h:125
static int GetDeoptimizationId(Isolate *isolate, Address addr, BailoutType type)
Definition: deoptimizer.cc:701
static int32_t & int32_at(Address addr)
Definition: v8memory.h:51
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra code(assertions) for debugging") DEFINE_bool(code_comments
void SetOptimizedCodeListHead(Object *head)
Definition: contexts.cc:343
const int kFPOnStackSize
Definition: globals.h:271
void SetRegister(unsigned n, intptr_t value)
Definition: deoptimizer.h:531
static const int kCallerSPOffset
Definition: frames.h:190
static const char * Kind2String(Kind kind)
Definition: objects.cc:10803
const Register pc
static void DeoptimizeAll(Isolate *isolate)
Definition: deoptimizer.cc:450
Handle< FixedArray > NewFixedArray(int size, PretenureFlag pretenure=NOT_TENURED)
Definition: factory.cc:53
static int GetDeoptimizedCodeCount(Isolate *isolate)
Definition: deoptimizer.cc:743
SafepointEntry GetSafepointEntry(Address pc)
Definition: objects.cc:10435
static const char * State2String(State state)
Definition: full-codegen.h:112
static int GetOutputInfo(DeoptimizationOutputData *data, BailoutId node_id, SharedFunctionInfo *shared)
Definition: deoptimizer.cc:718
DeoptimizerData * deoptimizer_data()
Definition: isolate.h:878
uint32_t GetFrameSize() const
Definition: deoptimizer.h:485
#define ASSERT_LT(v1, v2)
Definition: checks.h:333
#define V8PRIuPTR
Definition: globals.h:230
#define V8PRIdPTR
Definition: globals.h:229
static Handle< Object > FunctionGetArguments(Handle< JSFunction > object)
Definition: accessors.cc:687
void PrintName(FILE *out=stdout)
Definition: objects.cc:9839
static const int kFixedFrameSizeFromFp
Definition: frames.h:180
intptr_t GetFrameSlot(unsigned offset)
Definition: deoptimizer.h:494
Handle< T > handle(T *t, Isolate *isolate)
Definition: handles.h:103
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function info
Definition: flags.cc:317
friend class DeoptimizedFrameInfo
Definition: deoptimizer.h:462
Object * native_contexts_list()
Definition: heap.h:1354
static int ToAllocationIndex(DwVfpRegister reg)
static Deoptimizer * Grab(Isolate *isolate)
Definition: deoptimizer.cc:138
static const int kNotDeoptimizationEntry
Definition: deoptimizer.h:258
void CopyBytes(uint8_t *target, uint8_t *source)
Definition: runtime.cc:1309
static Handle< T > null()
Definition: handles.h:80
#define ASSERT_EQ(v1, v2)
Definition: checks.h:330
friend class FrameDescription
Definition: deoptimizer.h:461
MaterializedObjectStore * materialized_object_store()
Definition: isolate.h:880
StackFrame::Type GetFrameType() const
Definition: deoptimizer.h:563
void USE(T)
Definition: globals.h:341
kInstanceClassNameOffset kNeedsAccessCheckBit kRemovePrototypeBit kIsExpressionBit kAllowLazyCompilation kUsesArguments formal_parameter_count
Definition: objects-inl.h:5190
Counters * counters()
Definition: isolate.h:859
static Register context_register()
#define ASSERT_NE(v1, v2)
Definition: checks.h:331
PerThreadAssertScopeDebugOnly< HEAP_ALLOCATION_ASSERT, false > DisallowHeapAllocation
Definition: assert-scope.h:214
static FixedArray * cast(Object *obj)
double GetDoubleFrameSlot(unsigned offset)
Definition: deoptimizer.h:498
FrameDescription(uint32_t frame_size, JSFunction *function)
void Add(const T &element, AllocationPolicy allocator=AllocationPolicy())
Definition: list-inl.h:39
Object * get(int index)
Definition: objects-inl.h:2127
static uint32_t & uint32_at(Address addr)
Definition: v8memory.h:47
static size_t GetMaxDeoptTableSize()
Definition: deoptimizer.cc:128
void set_marked_for_deoptimization(bool flag)
Definition: objects-inl.h:4560
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric literals(0o77, 0b11)") DEFINE_bool(harmony_strings
int ConvertJSFrameIndexToFrameIndex(int jsframe_index)
Definition: deoptimizer.cc:147
const Register fp
static Representation Tagged()
intptr_t GetRegister(unsigned n) const
Definition: deoptimizer.h:513
const int kPCOnStackSize
Definition: globals.h:270
static void DeoptimizeMarkedCode(Isolate *isolate)
Definition: deoptimizer.cc:467
static GlobalObject * cast(Object *obj)
SmartArrayPointer< const char > ToCString() const
static Handle< Map > GeneralizeAllFieldRepresentations(Handle< Map > map, Representation new_representation)
Definition: objects.cc:2770
static FixedArrayBase * cast(Object *object)
Definition: objects-inl.h:2121
int jsframe_count() const
Definition: deoptimizer.h:162
Handle< FixedArray > Get(Address fp)
static const int kMaxValue
Definition: objects.h:1681
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long expose natives in global object expose freeBuffer extension expose gc extension under the specified name expose externalize string extension number of stack frames to capture disable builtin natives files print name of functions for which code is generated use random jit cookie to mask large constants trace lazy optimization use adaptive optimizations always try to OSR functions trace optimize function deoptimization minimum length for automatic enable preparsing maximum number of optimization attempts before giving up cache prototype transitions trace debugging JSON request response trace out of bounds accesses to external arrays trace_js_array_abuse automatically set the debug break flag when debugger commands are in the queue abort by crashing maximum length of function source code printed in a stack trace max size of the new max size of the old max size of executable always perform global GCs print one trace line following each garbage collection do not print trace line after scavenger collection print statistics of the maximum memory committed for the heap in name
Definition: flags.cc:505
static intptr_t CommitPageSize()
#define TRANSLATION_OPCODE_LIST(V)
Definition: deoptimizer.h:703
bool has_adapted_arguments() const
Definition: frames-inl.h:279
JSFunction * function() const
Definition: frames-inl.h:284
Handle< JSArray > NewJSArray(ElementsKind elements_kind, int length, int capacity, ArrayStorageAllocationMode mode=INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE, PretenureFlag pretenure=NOT_TENURED)
Definition: factory.cc:1437
void Set(Address fp, Handle< FixedArray > materialized_objects)
static Deoptimizer * New(JSFunction *function, BailoutType type, unsigned bailout_id, Address from, int fp_to_sp_delta, Isolate *isolate)
Definition: deoptimizer.cc:105
static DeoptimizationInputData * cast(Object *obj)
intptr_t GetConstantPool() const
Definition: deoptimizer.h:553
Object * GetExpression(int index)
Object * GetParameter(int index)
static JSFunction * cast(Object *obj)