v8  3.14.5(node0.10.28)
V8 is Google's open source JavaScript engine
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
deoptimizer.cc
Go to the documentation of this file.
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 
28 #include "v8.h"
29 
30 #include "accessors.h"
31 #include "codegen.h"
32 #include "deoptimizer.h"
33 #include "disasm.h"
34 #include "full-codegen.h"
35 #include "global-handles.h"
36 #include "macro-assembler.h"
37 #include "prettyprinter.h"
38 
39 
40 namespace v8 {
41 namespace internal {
42 
44  eager_deoptimization_entry_code_ = NULL;
45  lazy_deoptimization_entry_code_ = NULL;
46  current_ = NULL;
47  deoptimizing_code_list_ = NULL;
48 #ifdef ENABLE_DEBUGGER_SUPPORT
49  deoptimized_frame_info_ = NULL;
50 #endif
51 }
52 
53 
55  if (eager_deoptimization_entry_code_ != NULL) {
56  Isolate::Current()->memory_allocator()->Free(
57  eager_deoptimization_entry_code_);
58  eager_deoptimization_entry_code_ = NULL;
59  }
60  if (lazy_deoptimization_entry_code_ != NULL) {
61  Isolate::Current()->memory_allocator()->Free(
62  lazy_deoptimization_entry_code_);
63  lazy_deoptimization_entry_code_ = NULL;
64  }
65 }
66 
67 
68 #ifdef ENABLE_DEBUGGER_SUPPORT
69 void DeoptimizerData::Iterate(ObjectVisitor* v) {
70  if (deoptimized_frame_info_ != NULL) {
71  deoptimized_frame_info_->Iterate(v);
72  }
73 }
74 #endif
75 
76 
77 // We rely on this function not causing a GC. It is called from generated code
78 // without having a real stack frame in place.
80  BailoutType type,
81  unsigned bailout_id,
82  Address from,
83  int fp_to_sp_delta,
84  Isolate* isolate) {
85  ASSERT(isolate == Isolate::Current());
86  Deoptimizer* deoptimizer = new Deoptimizer(isolate,
87  function,
88  type,
89  bailout_id,
90  from,
91  fp_to_sp_delta,
92  NULL);
93  ASSERT(isolate->deoptimizer_data()->current_ == NULL);
94  isolate->deoptimizer_data()->current_ = deoptimizer;
95  return deoptimizer;
96 }
97 
98 
100  ASSERT(isolate == Isolate::Current());
101  Deoptimizer* result = isolate->deoptimizer_data()->current_;
102  ASSERT(result != NULL);
103  result->DeleteFrameDescriptions();
104  isolate->deoptimizer_data()->current_ = NULL;
105  return result;
106 }
107 
108 
110  if (jsframe_index == 0) return 0;
111 
112  int frame_index = 0;
113  while (jsframe_index >= 0) {
114  FrameDescription* frame = output_[frame_index];
115  if (frame->GetFrameType() == StackFrame::JAVA_SCRIPT) {
116  jsframe_index--;
117  }
118  frame_index++;
119  }
120 
121  return frame_index - 1;
122 }
123 
124 
125 #ifdef ENABLE_DEBUGGER_SUPPORT
126 DeoptimizedFrameInfo* Deoptimizer::DebuggerInspectableFrame(
127  JavaScriptFrame* frame,
128  int jsframe_index,
129  Isolate* isolate) {
130  ASSERT(isolate == Isolate::Current());
131  ASSERT(frame->is_optimized());
132  ASSERT(isolate->deoptimizer_data()->deoptimized_frame_info_ == NULL);
133 
134  // Get the function and code from the frame.
135  JSFunction* function = JSFunction::cast(frame->function());
136  Code* code = frame->LookupCode();
137 
138  // Locate the deoptimization point in the code. As we are at a call the
139  // return address must be at a place in the code with deoptimization support.
140  SafepointEntry safepoint_entry = code->GetSafepointEntry(frame->pc());
141  int deoptimization_index = safepoint_entry.deoptimization_index();
142  ASSERT(deoptimization_index != Safepoint::kNoDeoptimizationIndex);
143 
144  // Always use the actual stack slots when calculating the fp to sp
145  // delta adding two for the function and context.
146  unsigned stack_slots = code->stack_slots();
147  unsigned fp_to_sp_delta = ((stack_slots + 2) * kPointerSize);
148 
149  Deoptimizer* deoptimizer = new Deoptimizer(isolate,
150  function,
152  deoptimization_index,
153  frame->pc(),
154  fp_to_sp_delta,
155  code);
156  Address tos = frame->fp() - fp_to_sp_delta;
157  deoptimizer->FillInputFrame(tos, frame);
158 
159  // Calculate the output frames.
161 
162  // Create the GC safe output frame information and register it for GC
163  // handling.
164  ASSERT_LT(jsframe_index, deoptimizer->jsframe_count());
165 
166  // Convert JS frame index into frame index.
167  int frame_index = deoptimizer->ConvertJSFrameIndexToFrameIndex(jsframe_index);
168 
169  bool has_arguments_adaptor =
170  frame_index > 0 &&
171  deoptimizer->output_[frame_index - 1]->GetFrameType() ==
173 
174  int construct_offset = has_arguments_adaptor ? 2 : 1;
175  bool has_construct_stub =
176  frame_index >= construct_offset &&
177  deoptimizer->output_[frame_index - construct_offset]->GetFrameType() ==
178  StackFrame::CONSTRUCT;
179 
180  DeoptimizedFrameInfo* info = new DeoptimizedFrameInfo(deoptimizer,
181  frame_index,
182  has_arguments_adaptor,
183  has_construct_stub);
184  isolate->deoptimizer_data()->deoptimized_frame_info_ = info;
185 
186  // Get the "simulated" top and size for the requested frame.
187  FrameDescription* parameters_frame =
188  deoptimizer->output_[
189  has_arguments_adaptor ? (frame_index - 1) : frame_index];
190 
191  uint32_t parameters_size = (info->parameters_count() + 1) * kPointerSize;
192  Address parameters_top = reinterpret_cast<Address>(
193  parameters_frame->GetTop() + (parameters_frame->GetFrameSize() -
194  parameters_size));
195 
196  uint32_t expressions_size = info->expression_count() * kPointerSize;
197  Address expressions_top = reinterpret_cast<Address>(
198  deoptimizer->output_[frame_index]->GetTop());
199 
200  // Done with the GC-unsafe frame descriptions. This re-enables allocation.
201  deoptimizer->DeleteFrameDescriptions();
202 
203  // Allocate a heap number for the doubles belonging to this frame.
204  deoptimizer->MaterializeHeapNumbersForDebuggerInspectableFrame(
205  parameters_top, parameters_size, expressions_top, expressions_size, info);
206 
207  // Finished using the deoptimizer instance.
208  delete deoptimizer;
209 
210  return info;
211 }
212 
213 
214 void Deoptimizer::DeleteDebuggerInspectableFrame(DeoptimizedFrameInfo* info,
215  Isolate* isolate) {
216  ASSERT(isolate == Isolate::Current());
217  ASSERT(isolate->deoptimizer_data()->deoptimized_frame_info_ == info);
218  delete info;
219  isolate->deoptimizer_data()->deoptimized_frame_info_ = NULL;
220 }
221 #endif
222 
223 void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
224  int count,
225  BailoutType type) {
226  TableEntryGenerator generator(masm, type, count);
227  generator.Generate();
228 }
229 
230 
231 class DeoptimizingVisitor : public OptimizedFunctionVisitor {
232  public:
233  virtual void EnterContext(Context* context) {
234  if (FLAG_trace_deopt) {
235  PrintF("[deoptimize context: %" V8PRIxPTR "]\n",
236  reinterpret_cast<intptr_t>(context));
237  }
238  }
239 
240  virtual void VisitFunction(JSFunction* function) {
242  }
243 
244  virtual void LeaveContext(Context* context) {
245  context->ClearOptimizedFunctions();
246  }
247 };
248 
249 
251  AssertNoAllocation no_allocation;
252 
253  if (FLAG_trace_deopt) {
254  PrintF("[deoptimize all contexts]\n");
255  }
256 
257  DeoptimizingVisitor visitor;
258  VisitAllOptimizedFunctions(&visitor);
259 }
260 
261 
263  AssertNoAllocation no_allocation;
264 
265  DeoptimizingVisitor visitor;
267 }
268 
269 
271  Context* context, OptimizedFunctionVisitor* visitor) {
272  Isolate* isolate = context->GetIsolate();
273  ZoneScope zone_scope(isolate->runtime_zone(), DELETE_ON_EXIT);
274  AssertNoAllocation no_allocation;
275 
276  ASSERT(context->IsNativeContext());
277 
278  visitor->EnterContext(context);
279 
280  // Create a snapshot of the optimized functions list. This is needed because
281  // visitors might remove more than one link from the list at once.
283  Object* element = context->OptimizedFunctionsListHead();
284  while (!element->IsUndefined()) {
285  JSFunction* element_function = JSFunction::cast(element);
286  snapshot.Add(element_function, isolate->runtime_zone());
287  element = element_function->next_function_link();
288  }
289 
290  // Run through the snapshot of optimized functions and visit them.
291  for (int i = 0; i < snapshot.length(); ++i) {
292  visitor->VisitFunction(snapshot.at(i));
293  }
294 
295  visitor->LeaveContext(context);
296 }
297 
298 
300  JSObject* object, OptimizedFunctionVisitor* visitor) {
301  AssertNoAllocation no_allocation;
302 
303  if (object->IsJSGlobalProxy()) {
304  Object* proto = object->GetPrototype();
305  ASSERT(proto->IsJSGlobalObject());
307  GlobalObject::cast(proto)->native_context(), visitor);
308  } else if (object->IsGlobalObject()) {
310  GlobalObject::cast(object)->native_context(), visitor);
311  }
312 }
313 
314 
316  OptimizedFunctionVisitor* visitor) {
317  AssertNoAllocation no_allocation;
318 
319  // Run through the list of all native contexts and deoptimize.
320  Object* context = Isolate::Current()->heap()->native_contexts_list();
321  while (!context->IsUndefined()) {
322  // GC can happen when the context is not fully initialized,
323  // so the global field of the context can be undefined.
325  if (!global->IsUndefined()) {
327  visitor);
328  }
329  context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
330  }
331 }
332 
333 
334 void Deoptimizer::HandleWeakDeoptimizedCode(
335  v8::Persistent<v8::Value> obj, void* data) {
337  reinterpret_cast<DeoptimizingCodeListNode*>(data);
338  RemoveDeoptimizingCode(*node->code());
339 #ifdef DEBUG
340  node = Isolate::Current()->deoptimizer_data()->deoptimizing_code_list_;
341  while (node != NULL) {
342  ASSERT(node != reinterpret_cast<DeoptimizingCodeListNode*>(data));
343  node = node->next();
344  }
345 #endif
346 }
347 
348 
350  deoptimizer->DoComputeOutputFrames();
351 }
352 
353 
354 Deoptimizer::Deoptimizer(Isolate* isolate,
355  JSFunction* function,
356  BailoutType type,
357  unsigned bailout_id,
358  Address from,
359  int fp_to_sp_delta,
360  Code* optimized_code)
361  : isolate_(isolate),
362  function_(function),
363  bailout_id_(bailout_id),
364  bailout_type_(type),
365  from_(from),
366  fp_to_sp_delta_(fp_to_sp_delta),
367  has_alignment_padding_(0),
368  input_(NULL),
369  output_count_(0),
370  jsframe_count_(0),
371  output_(NULL),
372  deferred_arguments_objects_values_(0),
373  deferred_arguments_objects_(0),
374  deferred_heap_numbers_(0) {
375  if (FLAG_trace_deopt && type != OSR) {
376  if (type == DEBUGGER) {
377  PrintF("**** DEOPT FOR DEBUGGER: ");
378  } else {
379  PrintF("**** DEOPT: ");
380  }
381  function->PrintName();
382  PrintF(" at bailout #%u, address 0x%" V8PRIxPTR ", frame size %d\n",
383  bailout_id,
384  reinterpret_cast<intptr_t>(from),
385  fp_to_sp_delta - (2 * kPointerSize));
386  } else if (FLAG_trace_osr && type == OSR) {
387  PrintF("**** OSR: ");
388  function->PrintName();
389  PrintF(" at ast id #%u, address 0x%" V8PRIxPTR ", frame size %d\n",
390  bailout_id,
391  reinterpret_cast<intptr_t>(from),
392  fp_to_sp_delta - (2 * kPointerSize));
393  }
394  function->shared()->increment_deopt_count();
395  // Find the optimized code.
396  if (type == EAGER) {
397  ASSERT(from == NULL);
398  optimized_code_ = function_->code();
399  if (FLAG_trace_deopt && FLAG_code_comments) {
400  // Print instruction associated with this bailout.
401  const char* last_comment = NULL;
402  int mask = RelocInfo::ModeMask(RelocInfo::COMMENT)
403  | RelocInfo::ModeMask(RelocInfo::RUNTIME_ENTRY);
404  for (RelocIterator it(optimized_code_, mask); !it.done(); it.next()) {
405  RelocInfo* info = it.rinfo();
406  if (info->rmode() == RelocInfo::COMMENT) {
407  last_comment = reinterpret_cast<const char*>(info->data());
408  }
409  if (info->rmode() == RelocInfo::RUNTIME_ENTRY) {
410  unsigned id = Deoptimizer::GetDeoptimizationId(
411  info->target_address(), Deoptimizer::EAGER);
412  if (id == bailout_id && last_comment != NULL) {
413  PrintF(" %s\n", last_comment);
414  break;
415  }
416  }
417  }
418  }
419  } else if (type == LAZY) {
420  optimized_code_ = FindDeoptimizingCodeFromAddress(from);
421  ASSERT(optimized_code_ != NULL);
422  } else if (type == OSR) {
423  // The function has already been optimized and we're transitioning
424  // from the unoptimized shared version to the optimized one in the
425  // function. The return address (from) points to unoptimized code.
426  optimized_code_ = function_->code();
427  ASSERT(optimized_code_->kind() == Code::OPTIMIZED_FUNCTION);
428  ASSERT(!optimized_code_->contains(from));
429  } else if (type == DEBUGGER) {
430  optimized_code_ = optimized_code;
431  ASSERT(optimized_code_->contains(from));
432  }
433  ASSERT(HEAP->allow_allocation(false));
434  unsigned size = ComputeInputFrameSize();
435  input_ = new(size) FrameDescription(size, function);
436  input_->SetFrameType(StackFrame::JAVA_SCRIPT);
437 }
438 
439 
441  ASSERT(input_ == NULL && output_ == NULL);
442 }
443 
444 
445 void Deoptimizer::DeleteFrameDescriptions() {
446  delete input_;
447  for (int i = 0; i < output_count_; ++i) {
448  if (output_[i] != input_) delete output_[i];
449  }
450  delete[] output_;
451  input_ = NULL;
452  output_ = NULL;
453  ASSERT(!HEAP->allow_allocation(true));
454 }
455 
456 
458  ASSERT(id >= 0);
459  if (id >= kNumberOfEntries) return NULL;
460  MemoryChunk* base = NULL;
461  DeoptimizerData* data = Isolate::Current()->deoptimizer_data();
462  if (type == EAGER) {
463  if (data->eager_deoptimization_entry_code_ == NULL) {
464  data->eager_deoptimization_entry_code_ = CreateCode(type);
465  }
466  base = data->eager_deoptimization_entry_code_;
467  } else {
468  if (data->lazy_deoptimization_entry_code_ == NULL) {
469  data->lazy_deoptimization_entry_code_ = CreateCode(type);
470  }
471  base = data->lazy_deoptimization_entry_code_;
472  }
473  return
474  static_cast<Address>(base->area_start()) + (id * table_entry_size_);
475 }
476 
477 
479  MemoryChunk* base = NULL;
480  DeoptimizerData* data = Isolate::Current()->deoptimizer_data();
481  if (type == EAGER) {
482  base = data->eager_deoptimization_entry_code_;
483  } else {
484  base = data->lazy_deoptimization_entry_code_;
485  }
486  if (base == NULL ||
487  addr < base->area_start() ||
488  addr >= base->area_start() +
489  (kNumberOfEntries * table_entry_size_)) {
491  }
492  ASSERT_EQ(0,
493  static_cast<int>(addr - base->area_start()) % table_entry_size_);
494  return static_cast<int>(addr - base->area_start()) / table_entry_size_;
495 }
496 
497 
499  BailoutId id,
500  SharedFunctionInfo* shared) {
501  // TODO(kasperl): For now, we do a simple linear search for the PC
502  // offset associated with the given node id. This should probably be
503  // changed to a binary search.
504  int length = data->DeoptPoints();
505  for (int i = 0; i < length; i++) {
506  if (data->AstId(i) == id) {
507  return data->PcAndState(i)->value();
508  }
509  }
510  PrintF("[couldn't find pc offset for node=%d]\n", id.ToInt());
511  PrintF("[method: %s]\n", *shared->DebugName()->ToCString());
512  // Print the source code if available.
513  HeapStringAllocator string_allocator;
514  StringStream stream(&string_allocator);
515  shared->SourceCodePrint(&stream, -1);
516  PrintF("[source:\n%s\n]", *stream.ToCString());
517 
518  UNREACHABLE();
519  return -1;
520 }
521 
522 
524  int length = 0;
526  isolate->deoptimizer_data()->deoptimizing_code_list_;
527  while (node != NULL) {
528  length++;
529  node = node->next();
530  }
531  return length;
532 }
533 
534 
535 // We rely on this function not causing a GC. It is called from generated code
536 // without having a real stack frame in place.
537 void Deoptimizer::DoComputeOutputFrames() {
538  if (bailout_type_ == OSR) {
539  DoComputeOsrOutputFrame();
540  return;
541  }
542 
543  // Print some helpful diagnostic information.
544  int64_t start = OS::Ticks();
545  if (FLAG_trace_deopt) {
546  PrintF("[deoptimizing%s: begin 0x%08" V8PRIxPTR " ",
547  (bailout_type_ == LAZY ? " (lazy)" : ""),
548  reinterpret_cast<intptr_t>(function_));
549  function_->PrintName();
550  PrintF(" @%d]\n", bailout_id_);
551  }
552 
553  // Determine basic deoptimization information. The optimized frame is
554  // described by the input data.
555  DeoptimizationInputData* input_data =
556  DeoptimizationInputData::cast(optimized_code_->deoptimization_data());
557  BailoutId node_id = input_data->AstId(bailout_id_);
558  ByteArray* translations = input_data->TranslationByteArray();
559  unsigned translation_index =
560  input_data->TranslationIndex(bailout_id_)->value();
561 
562  // Do the input frame to output frame(s) translation.
563  TranslationIterator iterator(translations, translation_index);
564  Translation::Opcode opcode =
565  static_cast<Translation::Opcode>(iterator.Next());
566  ASSERT(Translation::BEGIN == opcode);
567  USE(opcode);
568  // Read the number of output frames and allocate an array for their
569  // descriptions.
570  int count = iterator.Next();
571  iterator.Next(); // Drop JS frames count.
572  ASSERT(output_ == NULL);
573  output_ = new FrameDescription*[count];
574  for (int i = 0; i < count; ++i) {
575  output_[i] = NULL;
576  }
577  output_count_ = count;
578 
579  // Translate each output frame.
580  for (int i = 0; i < count; ++i) {
581  // Read the ast node id, function, and frame height for this output frame.
582  Translation::Opcode opcode =
583  static_cast<Translation::Opcode>(iterator.Next());
584  switch (opcode) {
585  case Translation::JS_FRAME:
586  DoComputeJSFrame(&iterator, i);
587  jsframe_count_++;
588  break;
589  case Translation::ARGUMENTS_ADAPTOR_FRAME:
590  DoComputeArgumentsAdaptorFrame(&iterator, i);
591  break;
592  case Translation::CONSTRUCT_STUB_FRAME:
593  DoComputeConstructStubFrame(&iterator, i);
594  break;
595  case Translation::GETTER_STUB_FRAME:
596  DoComputeAccessorStubFrame(&iterator, i, false);
597  break;
598  case Translation::SETTER_STUB_FRAME:
599  DoComputeAccessorStubFrame(&iterator, i, true);
600  break;
601  case Translation::BEGIN:
603  case Translation::INT32_REGISTER:
604  case Translation::UINT32_REGISTER:
605  case Translation::DOUBLE_REGISTER:
606  case Translation::STACK_SLOT:
607  case Translation::INT32_STACK_SLOT:
608  case Translation::UINT32_STACK_SLOT:
609  case Translation::DOUBLE_STACK_SLOT:
610  case Translation::LITERAL:
611  case Translation::ARGUMENTS_OBJECT:
612  case Translation::DUPLICATE:
613  UNREACHABLE();
614  break;
615  }
616  }
617 
618  // Print some helpful diagnostic information.
619  if (FLAG_trace_deopt) {
620  double ms = static_cast<double>(OS::Ticks() - start) / 1000;
621  int index = output_count_ - 1; // Index of the topmost frame.
622  JSFunction* function = output_[index]->GetFunction();
623  PrintF("[deoptimizing: end 0x%08" V8PRIxPTR " ",
624  reinterpret_cast<intptr_t>(function));
625  function->PrintName();
626  PrintF(" => node=%d, pc=0x%08" V8PRIxPTR ", state=%s, alignment=%s,"
627  " took %0.3f ms]\n",
628  node_id.ToInt(),
629  output_[index]->GetPc(),
631  static_cast<FullCodeGenerator::State>(
632  output_[index]->GetState()->value())),
633  has_alignment_padding_ ? "with padding" : "no padding",
634  ms);
635  }
636 }
637 
638 
640  ASSERT_NE(DEBUGGER, bailout_type_);
641 
642  // Handlify all argument object values before triggering any allocation.
643  List<Handle<Object> > values(deferred_arguments_objects_values_.length());
644  for (int i = 0; i < deferred_arguments_objects_values_.length(); ++i) {
645  values.Add(Handle<Object>(deferred_arguments_objects_values_[i]));
646  }
647 
648  // Play it safe and clear all unhandlified values before we continue.
649  deferred_arguments_objects_values_.Clear();
650 
651  // Materialize all heap numbers before looking at arguments because when the
652  // output frames are used to materialize arguments objects later on they need
653  // to already contain valid heap numbers.
654  for (int i = 0; i < deferred_heap_numbers_.length(); i++) {
655  HeapNumberMaterializationDescriptor d = deferred_heap_numbers_[i];
656  Handle<Object> num = isolate_->factory()->NewNumber(d.value());
657  if (FLAG_trace_deopt) {
658  PrintF("Materializing a new heap number %p [%e] in slot %p\n",
659  reinterpret_cast<void*>(*num),
660  d.value(),
661  d.slot_address());
662  }
663  Memory::Object_at(d.slot_address()) = *num;
664  }
665 
666  // Materialize arguments objects one frame at a time.
667  for (int frame_index = 0; frame_index < jsframe_count(); ++frame_index) {
668  if (frame_index != 0) it->Advance();
669  JavaScriptFrame* frame = it->frame();
670  Handle<JSFunction> function(JSFunction::cast(frame->function()), isolate_);
671  Handle<JSObject> arguments;
672  for (int i = frame->ComputeExpressionsCount() - 1; i >= 0; --i) {
673  if (frame->GetExpression(i) == isolate_->heap()->arguments_marker()) {
674  ArgumentsObjectMaterializationDescriptor descriptor =
675  deferred_arguments_objects_.RemoveLast();
676  const int length = descriptor.arguments_length();
677  if (arguments.is_null()) {
678  if (frame->has_adapted_arguments()) {
679  // Use the arguments adapter frame we just built to materialize the
680  // arguments object. FunctionGetArguments can't throw an exception,
681  // so cast away the doubt with an assert.
682  arguments = Handle<JSObject>(JSObject::cast(
684  NULL)->ToObjectUnchecked()));
685  values.RewindBy(length);
686  } else {
687  // Construct an arguments object and copy the parameters to a newly
688  // allocated arguments object backing store.
689  arguments =
690  isolate_->factory()->NewArgumentsObject(function, length);
691  Handle<FixedArray> array =
692  isolate_->factory()->NewFixedArray(length);
693  ASSERT(array->length() == length);
694  for (int i = length - 1; i >= 0 ; --i) {
695  array->set(i, *values.RemoveLast());
696  }
697  arguments->set_elements(*array);
698  }
699  }
700  frame->SetExpression(i, *arguments);
701  ASSERT_EQ(Memory::Object_at(descriptor.slot_address()), *arguments);
702  if (FLAG_trace_deopt) {
703  PrintF("Materializing %sarguments object for %p: ",
704  frame->has_adapted_arguments() ? "(adapted) " : "",
705  reinterpret_cast<void*>(descriptor.slot_address()));
706  arguments->ShortPrint();
707  PrintF("\n");
708  }
709  }
710  }
711  }
712 }
713 
714 
715 #ifdef ENABLE_DEBUGGER_SUPPORT
716 void Deoptimizer::MaterializeHeapNumbersForDebuggerInspectableFrame(
717  Address parameters_top,
718  uint32_t parameters_size,
719  Address expressions_top,
720  uint32_t expressions_size,
721  DeoptimizedFrameInfo* info) {
722  ASSERT_EQ(DEBUGGER, bailout_type_);
723  Address parameters_bottom = parameters_top + parameters_size;
724  Address expressions_bottom = expressions_top + expressions_size;
725  for (int i = 0; i < deferred_heap_numbers_.length(); i++) {
726  HeapNumberMaterializationDescriptor d = deferred_heap_numbers_[i];
727 
728  // Check of the heap number to materialize actually belong to the frame
729  // being extracted.
730  Address slot = d.slot_address();
731  if (parameters_top <= slot && slot < parameters_bottom) {
732  Handle<Object> num = isolate_->factory()->NewNumber(d.value());
733 
734  int index = (info->parameters_count() - 1) -
735  static_cast<int>(slot - parameters_top) / kPointerSize;
736 
737  if (FLAG_trace_deopt) {
738  PrintF("Materializing a new heap number %p [%e] in slot %p"
739  "for parameter slot #%d\n",
740  reinterpret_cast<void*>(*num),
741  d.value(),
742  d.slot_address(),
743  index);
744  }
745 
746  info->SetParameter(index, *num);
747  } else if (expressions_top <= slot && slot < expressions_bottom) {
748  Handle<Object> num = isolate_->factory()->NewNumber(d.value());
749 
750  int index = info->expression_count() - 1 -
751  static_cast<int>(slot - expressions_top) / kPointerSize;
752 
753  if (FLAG_trace_deopt) {
754  PrintF("Materializing a new heap number %p [%e] in slot %p"
755  "for expression slot #%d\n",
756  reinterpret_cast<void*>(*num),
757  d.value(),
758  d.slot_address(),
759  index);
760  }
761 
762  info->SetExpression(index, *num);
763  }
764  }
765 }
766 #endif
767 
768 
769 void Deoptimizer::DoTranslateCommand(TranslationIterator* iterator,
770  int frame_index,
771  unsigned output_offset) {
772  disasm::NameConverter converter;
773  // A GC-safe temporary placeholder that we can put in the output frame.
774  const intptr_t kPlaceholder = reinterpret_cast<intptr_t>(Smi::FromInt(0));
775 
776  // Ignore commands marked as duplicate and act on the first non-duplicate.
777  Translation::Opcode opcode =
778  static_cast<Translation::Opcode>(iterator->Next());
779  while (opcode == Translation::DUPLICATE) {
780  opcode = static_cast<Translation::Opcode>(iterator->Next());
781  iterator->Skip(Translation::NumberOfOperandsFor(opcode));
782  opcode = static_cast<Translation::Opcode>(iterator->Next());
783  }
784 
785  switch (opcode) {
786  case Translation::BEGIN:
787  case Translation::JS_FRAME:
788  case Translation::ARGUMENTS_ADAPTOR_FRAME:
789  case Translation::CONSTRUCT_STUB_FRAME:
790  case Translation::GETTER_STUB_FRAME:
791  case Translation::SETTER_STUB_FRAME:
792  case Translation::DUPLICATE:
793  UNREACHABLE();
794  return;
795 
796  case Translation::REGISTER: {
797  int input_reg = iterator->Next();
798  intptr_t input_value = input_->GetRegister(input_reg);
799  if (FLAG_trace_deopt) {
800  PrintF(
801  " 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08" V8PRIxPTR " ; %s ",
802  output_[frame_index]->GetTop() + output_offset,
803  output_offset,
804  input_value,
805  converter.NameOfCPURegister(input_reg));
806  reinterpret_cast<Object*>(input_value)->ShortPrint();
807  PrintF("\n");
808  }
809  output_[frame_index]->SetFrameSlot(output_offset, input_value);
810  return;
811  }
812 
813  case Translation::INT32_REGISTER: {
814  int input_reg = iterator->Next();
815  intptr_t value = input_->GetRegister(input_reg);
816  bool is_smi = Smi::IsValid(value);
817  if (FLAG_trace_deopt) {
818  PrintF(
819  " 0x%08" V8PRIxPTR ": [top + %d] <- %" V8PRIdPTR " ; %s (%s)\n",
820  output_[frame_index]->GetTop() + output_offset,
821  output_offset,
822  value,
823  converter.NameOfCPURegister(input_reg),
824  is_smi ? "smi" : "heap number");
825  }
826  if (is_smi) {
827  intptr_t tagged_value =
828  reinterpret_cast<intptr_t>(Smi::FromInt(static_cast<int>(value)));
829  output_[frame_index]->SetFrameSlot(output_offset, tagged_value);
830  } else {
831  // We save the untagged value on the side and store a GC-safe
832  // temporary placeholder in the frame.
833  AddDoubleValue(output_[frame_index]->GetTop() + output_offset,
834  static_cast<double>(static_cast<int32_t>(value)));
835  output_[frame_index]->SetFrameSlot(output_offset, kPlaceholder);
836  }
837  return;
838  }
839 
840  case Translation::UINT32_REGISTER: {
841  int input_reg = iterator->Next();
842  uintptr_t value = static_cast<uintptr_t>(input_->GetRegister(input_reg));
843  bool is_smi = (value <= static_cast<uintptr_t>(Smi::kMaxValue));
844  if (FLAG_trace_deopt) {
845  PrintF(
846  " 0x%08" V8PRIxPTR ": [top + %d] <- %" V8PRIuPTR
847  " ; uint %s (%s)\n",
848  output_[frame_index]->GetTop() + output_offset,
849  output_offset,
850  value,
851  converter.NameOfCPURegister(input_reg),
852  is_smi ? "smi" : "heap number");
853  }
854  if (is_smi) {
855  intptr_t tagged_value =
856  reinterpret_cast<intptr_t>(Smi::FromInt(static_cast<int>(value)));
857  output_[frame_index]->SetFrameSlot(output_offset, tagged_value);
858  } else {
859  // We save the untagged value on the side and store a GC-safe
860  // temporary placeholder in the frame.
861  AddDoubleValue(output_[frame_index]->GetTop() + output_offset,
862  static_cast<double>(static_cast<uint32_t>(value)));
863  output_[frame_index]->SetFrameSlot(output_offset, kPlaceholder);
864  }
865  return;
866  }
867 
868  case Translation::DOUBLE_REGISTER: {
869  int input_reg = iterator->Next();
870  double value = input_->GetDoubleRegister(input_reg);
871  if (FLAG_trace_deopt) {
872  PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- %e ; %s\n",
873  output_[frame_index]->GetTop() + output_offset,
874  output_offset,
875  value,
877  }
878  // We save the untagged value on the side and store a GC-safe
879  // temporary placeholder in the frame.
880  AddDoubleValue(output_[frame_index]->GetTop() + output_offset, value);
881  output_[frame_index]->SetFrameSlot(output_offset, kPlaceholder);
882  return;
883  }
884 
885  case Translation::STACK_SLOT: {
886  int input_slot_index = iterator->Next();
887  unsigned input_offset =
888  input_->GetOffsetFromSlotIndex(input_slot_index);
889  intptr_t input_value = input_->GetFrameSlot(input_offset);
890  if (FLAG_trace_deopt) {
891  PrintF(" 0x%08" V8PRIxPTR ": ",
892  output_[frame_index]->GetTop() + output_offset);
893  PrintF("[top + %d] <- 0x%08" V8PRIxPTR " ; [sp + %d] ",
894  output_offset,
895  input_value,
896  input_offset);
897  reinterpret_cast<Object*>(input_value)->ShortPrint();
898  PrintF("\n");
899  }
900  output_[frame_index]->SetFrameSlot(output_offset, input_value);
901  return;
902  }
903 
904  case Translation::INT32_STACK_SLOT: {
905  int input_slot_index = iterator->Next();
906  unsigned input_offset =
907  input_->GetOffsetFromSlotIndex(input_slot_index);
908  intptr_t value = input_->GetFrameSlot(input_offset);
909  bool is_smi = Smi::IsValid(value);
910  if (FLAG_trace_deopt) {
911  PrintF(" 0x%08" V8PRIxPTR ": ",
912  output_[frame_index]->GetTop() + output_offset);
913  PrintF("[top + %d] <- %" V8PRIdPTR " ; [sp + %d] (%s)\n",
914  output_offset,
915  value,
916  input_offset,
917  is_smi ? "smi" : "heap number");
918  }
919  if (is_smi) {
920  intptr_t tagged_value =
921  reinterpret_cast<intptr_t>(Smi::FromInt(static_cast<int>(value)));
922  output_[frame_index]->SetFrameSlot(output_offset, tagged_value);
923  } else {
924  // We save the untagged value on the side and store a GC-safe
925  // temporary placeholder in the frame.
926  AddDoubleValue(output_[frame_index]->GetTop() + output_offset,
927  static_cast<double>(static_cast<int32_t>(value)));
928  output_[frame_index]->SetFrameSlot(output_offset, kPlaceholder);
929  }
930  return;
931  }
932 
933  case Translation::UINT32_STACK_SLOT: {
934  int input_slot_index = iterator->Next();
935  unsigned input_offset =
936  input_->GetOffsetFromSlotIndex(input_slot_index);
937  uintptr_t value =
938  static_cast<uintptr_t>(input_->GetFrameSlot(input_offset));
939  bool is_smi = (value <= static_cast<uintptr_t>(Smi::kMaxValue));
940  if (FLAG_trace_deopt) {
941  PrintF(" 0x%08" V8PRIxPTR ": ",
942  output_[frame_index]->GetTop() + output_offset);
943  PrintF("[top + %d] <- %" V8PRIuPTR " ; [sp + %d] (uint32 %s)\n",
944  output_offset,
945  value,
946  input_offset,
947  is_smi ? "smi" : "heap number");
948  }
949  if (is_smi) {
950  intptr_t tagged_value =
951  reinterpret_cast<intptr_t>(Smi::FromInt(static_cast<int>(value)));
952  output_[frame_index]->SetFrameSlot(output_offset, tagged_value);
953  } else {
954  // We save the untagged value on the side and store a GC-safe
955  // temporary placeholder in the frame.
956  AddDoubleValue(output_[frame_index]->GetTop() + output_offset,
957  static_cast<double>(static_cast<uint32_t>(value)));
958  output_[frame_index]->SetFrameSlot(output_offset, kPlaceholder);
959  }
960  return;
961  }
962 
963  case Translation::DOUBLE_STACK_SLOT: {
964  int input_slot_index = iterator->Next();
965  unsigned input_offset =
966  input_->GetOffsetFromSlotIndex(input_slot_index);
967  double value = input_->GetDoubleFrameSlot(input_offset);
968  if (FLAG_trace_deopt) {
969  PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- %e ; [sp + %d]\n",
970  output_[frame_index]->GetTop() + output_offset,
971  output_offset,
972  value,
973  input_offset);
974  }
975  // We save the untagged value on the side and store a GC-safe
976  // temporary placeholder in the frame.
977  AddDoubleValue(output_[frame_index]->GetTop() + output_offset, value);
978  output_[frame_index]->SetFrameSlot(output_offset, kPlaceholder);
979  return;
980  }
981 
982  case Translation::LITERAL: {
983  Object* literal = ComputeLiteral(iterator->Next());
984  if (FLAG_trace_deopt) {
985  PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- ",
986  output_[frame_index]->GetTop() + output_offset,
987  output_offset);
988  literal->ShortPrint();
989  PrintF(" ; literal\n");
990  }
991  intptr_t value = reinterpret_cast<intptr_t>(literal);
992  output_[frame_index]->SetFrameSlot(output_offset, value);
993  return;
994  }
995 
996  case Translation::ARGUMENTS_OBJECT: {
997  int args_index = iterator->Next() + 1; // Skip receiver.
998  int args_length = iterator->Next() - 1; // Skip receiver.
999  if (FLAG_trace_deopt) {
1000  PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- ",
1001  output_[frame_index]->GetTop() + output_offset,
1002  output_offset);
1003  isolate_->heap()->arguments_marker()->ShortPrint();
1004  PrintF(" ; arguments object\n");
1005  }
1006  // Use the arguments marker value as a sentinel and fill in the arguments
1007  // object after the deoptimized frame is built.
1008  intptr_t value = reinterpret_cast<intptr_t>(
1009  isolate_->heap()->arguments_marker());
1010  AddArgumentsObject(
1011  output_[frame_index]->GetTop() + output_offset, args_length);
1012  output_[frame_index]->SetFrameSlot(output_offset, value);
1013  // We save the tagged argument values on the side and materialize the
1014  // actual arguments object after the deoptimized frame is built.
1015  for (int i = 0; i < args_length; i++) {
1016  unsigned input_offset = input_->GetOffsetFromSlotIndex(args_index + i);
1017  intptr_t input_value = input_->GetFrameSlot(input_offset);
1018  AddArgumentsObjectValue(input_value);
1019  }
1020  return;
1021  }
1022  }
1023 }
1024 
1025 
1026 static bool ObjectToInt32(Object* obj, int32_t* value) {
1027  if (obj->IsSmi()) {
1028  *value = Smi::cast(obj)->value();
1029  return true;
1030  }
1031 
1032  if (obj->IsHeapNumber()) {
1033  double num = HeapNumber::cast(obj)->value();
1034  if (FastI2D(FastD2I(num)) != num) {
1035  if (FLAG_trace_osr) {
1036  PrintF("**** %g could not be converted to int32 ****\n",
1037  HeapNumber::cast(obj)->value());
1038  }
1039  return false;
1040  }
1041 
1042  *value = FastD2I(num);
1043  return true;
1044  }
1045 
1046  return false;
1047 }
1048 
1049 
1050 static bool ObjectToUint32(Object* obj, uint32_t* value) {
1051  if (obj->IsSmi()) {
1052  if (Smi::cast(obj)->value() < 0) return false;
1053 
1054  *value = static_cast<uint32_t>(Smi::cast(obj)->value());
1055  return true;
1056  }
1057 
1058  if (obj->IsHeapNumber()) {
1059  double num = HeapNumber::cast(obj)->value();
1060  if ((num < 0) || (FastUI2D(FastD2UI(num)) != num)) {
1061  if (FLAG_trace_osr) {
1062  PrintF("**** %g could not be converted to uint32 ****\n",
1063  HeapNumber::cast(obj)->value());
1064  }
1065  return false;
1066  }
1067 
1068  *value = FastD2UI(num);
1069  return true;
1070  }
1071 
1072  return false;
1073 }
1074 
1075 
1076 bool Deoptimizer::DoOsrTranslateCommand(TranslationIterator* iterator,
1077  int* input_offset) {
1078  disasm::NameConverter converter;
1079  FrameDescription* output = output_[0];
1080 
1081  // The input values are all part of the unoptimized frame so they
1082  // are all tagged pointers.
1083  uintptr_t input_value = input_->GetFrameSlot(*input_offset);
1084  Object* input_object = reinterpret_cast<Object*>(input_value);
1085 
1086  Translation::Opcode opcode =
1087  static_cast<Translation::Opcode>(iterator->Next());
1088  bool duplicate = (opcode == Translation::DUPLICATE);
1089  if (duplicate) {
1090  opcode = static_cast<Translation::Opcode>(iterator->Next());
1091  }
1092 
1093  switch (opcode) {
1094  case Translation::BEGIN:
1095  case Translation::JS_FRAME:
1096  case Translation::ARGUMENTS_ADAPTOR_FRAME:
1097  case Translation::CONSTRUCT_STUB_FRAME:
1098  case Translation::GETTER_STUB_FRAME:
1099  case Translation::SETTER_STUB_FRAME:
1100  case Translation::DUPLICATE:
1101  UNREACHABLE(); // Malformed input.
1102  return false;
1103 
1104  case Translation::REGISTER: {
1105  int output_reg = iterator->Next();
1106  if (FLAG_trace_osr) {
1107  PrintF(" %s <- 0x%08" V8PRIxPTR " ; [sp + %d]\n",
1108  converter.NameOfCPURegister(output_reg),
1109  input_value,
1110  *input_offset);
1111  }
1112  output->SetRegister(output_reg, input_value);
1113  break;
1114  }
1115 
1116  case Translation::INT32_REGISTER: {
1117  int32_t int32_value = 0;
1118  if (!ObjectToInt32(input_object, &int32_value)) return false;
1119 
1120  int output_reg = iterator->Next();
1121  if (FLAG_trace_osr) {
1122  PrintF(" %s <- %d (int32) ; [sp + %d]\n",
1123  converter.NameOfCPURegister(output_reg),
1124  int32_value,
1125  *input_offset);
1126  }
1127  output->SetRegister(output_reg, int32_value);
1128  break;
1129  }
1130 
1131  case Translation::UINT32_REGISTER: {
1132  uint32_t uint32_value = 0;
1133  if (!ObjectToUint32(input_object, &uint32_value)) return false;
1134 
1135  int output_reg = iterator->Next();
1136  if (FLAG_trace_osr) {
1137  PrintF(" %s <- %u (uint32) ; [sp + %d]\n",
1138  converter.NameOfCPURegister(output_reg),
1139  uint32_value,
1140  *input_offset);
1141  }
1142  output->SetRegister(output_reg, static_cast<int32_t>(uint32_value));
1143  }
1144 
1145 
1146  case Translation::DOUBLE_REGISTER: {
1147  // Abort OSR if we don't have a number.
1148  if (!input_object->IsNumber()) return false;
1149 
1150  int output_reg = iterator->Next();
1151  double double_value = input_object->Number();
1152  if (FLAG_trace_osr) {
1153  PrintF(" %s <- %g (double) ; [sp + %d]\n",
1155  double_value,
1156  *input_offset);
1157  }
1158  output->SetDoubleRegister(output_reg, double_value);
1159  break;
1160  }
1161 
1162  case Translation::STACK_SLOT: {
1163  int output_index = iterator->Next();
1164  unsigned output_offset =
1165  output->GetOffsetFromSlotIndex(output_index);
1166  if (FLAG_trace_osr) {
1167  PrintF(" [sp + %d] <- 0x%08" V8PRIxPTR " ; [sp + %d] ",
1168  output_offset,
1169  input_value,
1170  *input_offset);
1171  reinterpret_cast<Object*>(input_value)->ShortPrint();
1172  PrintF("\n");
1173  }
1174  output->SetFrameSlot(output_offset, input_value);
1175  break;
1176  }
1177 
1178  case Translation::INT32_STACK_SLOT: {
1179  int32_t int32_value = 0;
1180  if (!ObjectToInt32(input_object, &int32_value)) return false;
1181 
1182  int output_index = iterator->Next();
1183  unsigned output_offset =
1184  output->GetOffsetFromSlotIndex(output_index);
1185  if (FLAG_trace_osr) {
1186  PrintF(" [sp + %d] <- %d (int32) ; [sp + %d]\n",
1187  output_offset,
1188  int32_value,
1189  *input_offset);
1190  }
1191  output->SetFrameSlot(output_offset, int32_value);
1192  break;
1193  }
1194 
1195  case Translation::UINT32_STACK_SLOT: {
1196  uint32_t uint32_value = 0;
1197  if (!ObjectToUint32(input_object, &uint32_value)) return false;
1198 
1199  int output_index = iterator->Next();
1200  unsigned output_offset =
1201  output->GetOffsetFromSlotIndex(output_index);
1202  if (FLAG_trace_osr) {
1203  PrintF(" [sp + %d] <- %u (uint32) ; [sp + %d]\n",
1204  output_offset,
1205  uint32_value,
1206  *input_offset);
1207  }
1208  output->SetFrameSlot(output_offset, static_cast<int32_t>(uint32_value));
1209  break;
1210  }
1211 
1212  case Translation::DOUBLE_STACK_SLOT: {
1213  static const int kLowerOffset = 0 * kPointerSize;
1214  static const int kUpperOffset = 1 * kPointerSize;
1215 
1216  // Abort OSR if we don't have a number.
1217  if (!input_object->IsNumber()) return false;
1218 
1219  int output_index = iterator->Next();
1220  unsigned output_offset =
1221  output->GetOffsetFromSlotIndex(output_index);
1222  double double_value = input_object->Number();
1223  uint64_t int_value = BitCast<uint64_t, double>(double_value);
1224  int32_t lower = static_cast<int32_t>(int_value);
1225  int32_t upper = static_cast<int32_t>(int_value >> kBitsPerInt);
1226  if (FLAG_trace_osr) {
1227  PrintF(" [sp + %d] <- 0x%08x (upper bits of %g) ; [sp + %d]\n",
1228  output_offset + kUpperOffset,
1229  upper,
1230  double_value,
1231  *input_offset);
1232  PrintF(" [sp + %d] <- 0x%08x (lower bits of %g) ; [sp + %d]\n",
1233  output_offset + kLowerOffset,
1234  lower,
1235  double_value,
1236  *input_offset);
1237  }
1238  output->SetFrameSlot(output_offset + kLowerOffset, lower);
1239  output->SetFrameSlot(output_offset + kUpperOffset, upper);
1240  break;
1241  }
1242 
1243  case Translation::LITERAL: {
1244  // Just ignore non-materialized literals.
1245  iterator->Next();
1246  break;
1247  }
1248 
1249  case Translation::ARGUMENTS_OBJECT: {
1250  // Optimized code assumes that the argument object has not been
1251  // materialized and so bypasses it when doing arguments access.
1252  // We should have bailed out before starting the frame
1253  // translation.
1254  UNREACHABLE();
1255  return false;
1256  }
1257  }
1258 
1259  if (!duplicate) *input_offset -= kPointerSize;
1260  return true;
1261 }
1262 
1263 
1265  Code* check_code,
1266  Code* replacement_code) {
1267  // Iterate over the stack check table and patch every stack check
1268  // call to an unconditional call to the replacement code.
1269  ASSERT(unoptimized_code->kind() == Code::FUNCTION);
1270  Address stack_check_cursor = unoptimized_code->instruction_start() +
1271  unoptimized_code->stack_check_table_offset();
1272  uint32_t table_length = Memory::uint32_at(stack_check_cursor);
1273  stack_check_cursor += kIntSize;
1274  for (uint32_t i = 0; i < table_length; ++i) {
1275  uint32_t pc_offset = Memory::uint32_at(stack_check_cursor + kIntSize);
1276  Address pc_after = unoptimized_code->instruction_start() + pc_offset;
1277  PatchStackCheckCodeAt(unoptimized_code,
1278  pc_after,
1279  check_code,
1280  replacement_code);
1281  stack_check_cursor += 2 * kIntSize;
1282  }
1283 }
1284 
1285 
1287  Code* check_code,
1288  Code* replacement_code) {
1289  // Iterate over the stack check table and revert the patched
1290  // stack check calls.
1291  ASSERT(unoptimized_code->kind() == Code::FUNCTION);
1292  Address stack_check_cursor = unoptimized_code->instruction_start() +
1293  unoptimized_code->stack_check_table_offset();
1294  uint32_t table_length = Memory::uint32_at(stack_check_cursor);
1295  stack_check_cursor += kIntSize;
1296  for (uint32_t i = 0; i < table_length; ++i) {
1297  uint32_t pc_offset = Memory::uint32_at(stack_check_cursor + kIntSize);
1298  Address pc_after = unoptimized_code->instruction_start() + pc_offset;
1299  RevertStackCheckCodeAt(unoptimized_code,
1300  pc_after,
1301  check_code,
1302  replacement_code);
1303  stack_check_cursor += 2 * kIntSize;
1304  }
1305 }
1306 
1307 
1308 unsigned Deoptimizer::ComputeInputFrameSize() const {
1309  unsigned fixed_size = ComputeFixedSize(function_);
1310  // The fp-to-sp delta already takes the context and the function
1311  // into account so we have to avoid double counting them (-2).
1312  unsigned result = fixed_size + fp_to_sp_delta_ - (2 * kPointerSize);
1313 #ifdef DEBUG
1314  if (bailout_type_ == OSR) {
1315  // TODO(kasperl): It would be nice if we could verify that the
1316  // size matches with the stack height we can compute based on the
1317  // environment at the OSR entry. The code for that his built into
1318  // the DoComputeOsrOutputFrame function for now.
1319  } else {
1320  unsigned stack_slots = optimized_code_->stack_slots();
1321  unsigned outgoing_size = ComputeOutgoingArgumentSize();
1322  ASSERT(result == fixed_size + (stack_slots * kPointerSize) + outgoing_size);
1323  }
1324 #endif
1325  return result;
1326 }
1327 
1328 
1329 unsigned Deoptimizer::ComputeFixedSize(JSFunction* function) const {
1330  // The fixed part of the frame consists of the return address, frame
1331  // pointer, function, context, and all the incoming arguments.
1332  return ComputeIncomingArgumentSize(function) +
1334 }
1335 
1336 
1337 unsigned Deoptimizer::ComputeIncomingArgumentSize(JSFunction* function) const {
1338  // The incoming arguments is the values for formal parameters and
1339  // the receiver. Every slot contains a pointer.
1340  unsigned arguments = function->shared()->formal_parameter_count() + 1;
1341  return arguments * kPointerSize;
1342 }
1343 
1344 
1345 unsigned Deoptimizer::ComputeOutgoingArgumentSize() const {
1346  DeoptimizationInputData* data = DeoptimizationInputData::cast(
1347  optimized_code_->deoptimization_data());
1348  unsigned height = data->ArgumentsStackHeight(bailout_id_)->value();
1349  return height * kPointerSize;
1350 }
1351 
1352 
1353 Object* Deoptimizer::ComputeLiteral(int index) const {
1354  DeoptimizationInputData* data = DeoptimizationInputData::cast(
1355  optimized_code_->deoptimization_data());
1356  FixedArray* literals = data->LiteralArray();
1357  return literals->get(index);
1358 }
1359 
1360 
1361 void Deoptimizer::AddArgumentsObject(intptr_t slot_address, int argc) {
1362  ArgumentsObjectMaterializationDescriptor object_desc(
1363  reinterpret_cast<Address>(slot_address), argc);
1364  deferred_arguments_objects_.Add(object_desc);
1365 }
1366 
1367 
1368 void Deoptimizer::AddArgumentsObjectValue(intptr_t value) {
1369  deferred_arguments_objects_values_.Add(reinterpret_cast<Object*>(value));
1370 }
1371 
1372 
1373 void Deoptimizer::AddDoubleValue(intptr_t slot_address, double value) {
1374  HeapNumberMaterializationDescriptor value_desc(
1375  reinterpret_cast<Address>(slot_address), value);
1376  deferred_heap_numbers_.Add(value_desc);
1377 }
1378 
1379 
1380 MemoryChunk* Deoptimizer::CreateCode(BailoutType type) {
1381  // We cannot run this if the serializer is enabled because this will
1382  // cause us to emit relocation information for the external
1383  // references. This is fine because the deoptimizer's code section
1384  // isn't meant to be serialized at all.
1386 
1387  MacroAssembler masm(Isolate::Current(), NULL, 16 * KB);
1388  masm.set_emit_debug_code(false);
1389  GenerateDeoptimizationEntries(&masm, kNumberOfEntries, type);
1390  CodeDesc desc;
1391  masm.GetCode(&desc);
1392  ASSERT(desc.reloc_size == 0);
1393 
1394  MemoryChunk* chunk =
1395  Isolate::Current()->memory_allocator()->AllocateChunk(desc.instr_size,
1396  EXECUTABLE,
1397  NULL);
1398  ASSERT(chunk->area_size() >= desc.instr_size);
1399  if (chunk == NULL) {
1400  V8::FatalProcessOutOfMemory("Not enough memory for deoptimization table");
1401  }
1402  memcpy(chunk->area_start(), desc.buffer, desc.instr_size);
1403  CPU::FlushICache(chunk->area_start(), desc.instr_size);
1404  return chunk;
1405 }
1406 
1407 
1408 Code* Deoptimizer::FindDeoptimizingCodeFromAddress(Address addr) {
1409  DeoptimizingCodeListNode* node =
1410  Isolate::Current()->deoptimizer_data()->deoptimizing_code_list_;
1411  while (node != NULL) {
1412  if (node->code()->contains(addr)) return *node->code();
1413  node = node->next();
1414  }
1415  return NULL;
1416 }
1417 
1418 
1419 void Deoptimizer::RemoveDeoptimizingCode(Code* code) {
1420  DeoptimizerData* data = Isolate::Current()->deoptimizer_data();
1421  ASSERT(data->deoptimizing_code_list_ != NULL);
1422  // Run through the code objects to find this one and remove it.
1424  DeoptimizingCodeListNode* current = data->deoptimizing_code_list_;
1425  while (current != NULL) {
1426  if (*current->code() == code) {
1427  // Unlink from list. If prev is NULL we are looking at the first element.
1428  if (prev == NULL) {
1429  data->deoptimizing_code_list_ = current->next();
1430  } else {
1431  prev->set_next(current->next());
1432  }
1433  delete current;
1434  return;
1435  }
1436  // Move to next in list.
1437  prev = current;
1438  current = current->next();
1439  }
1440  // Deoptimizing code is removed through weak callback. Each object is expected
1441  // to be removed once and only once.
1442  UNREACHABLE();
1443 }
1444 
1445 
1446 static Object* CutOutRelatedFunctionsList(Context* context,
1447  Code* code,
1448  Object* undefined) {
1449  Object* result_list_head = undefined;
1450  Object* head;
1451  Object* current;
1452  current = head = context->get(Context::OPTIMIZED_FUNCTIONS_LIST);
1453  JSFunction* prev = NULL;
1454  while (current != undefined) {
1455  JSFunction* func = JSFunction::cast(current);
1456  current = func->next_function_link();
1457  if (func->code() == code) {
1458  func->set_next_function_link(result_list_head);
1459  result_list_head = func;
1460  if (prev) {
1461  prev->set_next_function_link(current);
1462  } else {
1463  head = current;
1464  }
1465  } else {
1466  prev = func;
1467  }
1468  }
1469  if (head != context->get(Context::OPTIMIZED_FUNCTIONS_LIST)) {
1470  context->set(Context::OPTIMIZED_FUNCTIONS_LIST, head);
1471  }
1472  return result_list_head;
1473 }
1474 
1475 
1477  Code* code) {
1478  Context* context = function->context()->native_context();
1479 
1480  SharedFunctionInfo* shared = function->shared();
1481 
1482  Object* undefined = Isolate::Current()->heap()->undefined_value();
1483  Object* current = CutOutRelatedFunctionsList(context, code, undefined);
1484 
1485  while (current != undefined) {
1486  JSFunction* func = JSFunction::cast(current);
1487  current = func->next_function_link();
1488  func->set_code(shared->code());
1489  func->set_next_function_link(undefined);
1490  }
1491 }
1492 
1493 
1495  JSFunction* function)
1496  : frame_size_(frame_size),
1497  function_(function),
1498  top_(kZapUint32),
1499  pc_(kZapUint32),
1500  fp_(kZapUint32),
1501  context_(kZapUint32) {
1502  // Zap all the registers.
1503  for (int r = 0; r < Register::kNumRegisters; r++) {
1504  SetRegister(r, kZapUint32);
1505  }
1506 
1507  // Zap all the slots.
1508  for (unsigned o = 0; o < frame_size; o += kPointerSize) {
1509  SetFrameSlot(o, kZapUint32);
1510  }
1511 }
1512 
1513 
1514 int FrameDescription::ComputeFixedSize() {
1517 }
1518 
1519 
1521  if (slot_index >= 0) {
1522  // Local or spill slots. Skip the fixed part of the frame
1523  // including all arguments.
1524  unsigned base = GetFrameSize() - ComputeFixedSize();
1525  return base - ((slot_index + 1) * kPointerSize);
1526  } else {
1527  // Incoming parameter.
1528  int arg_size = (ComputeParametersCount() + 1) * kPointerSize;
1529  unsigned base = GetFrameSize() - arg_size;
1530  return base - ((slot_index + 1) * kPointerSize);
1531  }
1532 }
1533 
1534 
1536  switch (type_) {
1537  case StackFrame::JAVA_SCRIPT:
1538  return function_->shared()->formal_parameter_count();
1540  // Last slot contains number of incomming arguments as a smi.
1541  // Can't use GetExpression(0) because it would cause infinite recursion.
1542  return reinterpret_cast<Smi*>(*GetFrameSlotPointer(0))->value();
1543  }
1544  default:
1545  UNREACHABLE();
1546  return 0;
1547  }
1548 }
1549 
1550 
1552  ASSERT(index >= 0);
1553  ASSERT(index < ComputeParametersCount());
1554  // The slot indexes for incoming arguments are negative.
1555  unsigned offset = GetOffsetFromSlotIndex(index - ComputeParametersCount());
1556  return reinterpret_cast<Object*>(*GetFrameSlotPointer(offset));
1557 }
1558 
1559 
1561  ASSERT_EQ(StackFrame::JAVA_SCRIPT, type_);
1562  unsigned size = GetFrameSize() - ComputeFixedSize();
1563  return size / kPointerSize;
1564 }
1565 
1566 
1568  ASSERT_EQ(StackFrame::JAVA_SCRIPT, type_);
1569  unsigned offset = GetOffsetFromSlotIndex(index);
1570  return reinterpret_cast<Object*>(*GetFrameSlotPointer(offset));
1571 }
1572 
1573 
1574 void TranslationBuffer::Add(int32_t value, Zone* zone) {
1575  // Encode the sign bit in the least significant bit.
1576  bool is_negative = (value < 0);
1577  uint32_t bits = ((is_negative ? -value : value) << 1) |
1578  static_cast<int32_t>(is_negative);
1579  // Encode the individual bytes using the least significant bit of
1580  // each byte to indicate whether or not more bytes follow.
1581  do {
1582  uint32_t next = bits >> 7;
1583  contents_.Add(((bits << 1) & 0xFF) | (next != 0), zone);
1584  bits = next;
1585  } while (bits != 0);
1586 }
1587 
1588 
1589 int32_t TranslationIterator::Next() {
1590  // Run through the bytes until we reach one with a least significant
1591  // bit of zero (marks the end).
1592  uint32_t bits = 0;
1593  for (int i = 0; true; i += 7) {
1594  ASSERT(HasNext());
1595  uint8_t next = buffer_->get(index_++);
1596  bits |= (next >> 1) << i;
1597  if ((next & 1) == 0) break;
1598  }
1599  // The bits encode the sign in the least significant bit.
1600  bool is_negative = (bits & 1) == 1;
1601  int32_t result = bits >> 1;
1602  return is_negative ? -result : result;
1603 }
1604 
1605 
1606 Handle<ByteArray> TranslationBuffer::CreateByteArray() {
1607  int length = contents_.length();
1608  Handle<ByteArray> result =
1609  Isolate::Current()->factory()->NewByteArray(length, TENURED);
1610  memcpy(result->GetDataStartAddress(), contents_.ToVector().start(), length);
1611  return result;
1612 }
1613 
1614 
1615 void Translation::BeginConstructStubFrame(int literal_id, unsigned height) {
1616  buffer_->Add(CONSTRUCT_STUB_FRAME, zone());
1617  buffer_->Add(literal_id, zone());
1618  buffer_->Add(height, zone());
1619 }
1620 
1621 
1622 void Translation::BeginGetterStubFrame(int literal_id) {
1623  buffer_->Add(GETTER_STUB_FRAME, zone());
1624  buffer_->Add(literal_id, zone());
1625 }
1626 
1627 
1628 void Translation::BeginSetterStubFrame(int literal_id) {
1629  buffer_->Add(SETTER_STUB_FRAME, zone());
1630  buffer_->Add(literal_id, zone());
1631 }
1632 
1633 
1634 void Translation::BeginArgumentsAdaptorFrame(int literal_id, unsigned height) {
1635  buffer_->Add(ARGUMENTS_ADAPTOR_FRAME, zone());
1636  buffer_->Add(literal_id, zone());
1637  buffer_->Add(height, zone());
1638 }
1639 
1640 
1641 void Translation::BeginJSFrame(BailoutId node_id,
1642  int literal_id,
1643  unsigned height) {
1644  buffer_->Add(JS_FRAME, zone());
1645  buffer_->Add(node_id.ToInt(), zone());
1646  buffer_->Add(literal_id, zone());
1647  buffer_->Add(height, zone());
1648 }
1649 
1650 
1651 void Translation::StoreRegister(Register reg) {
1652  buffer_->Add(REGISTER, zone());
1653  buffer_->Add(reg.code(), zone());
1654 }
1655 
1656 
1657 void Translation::StoreInt32Register(Register reg) {
1658  buffer_->Add(INT32_REGISTER, zone());
1659  buffer_->Add(reg.code(), zone());
1660 }
1661 
1662 
1663 void Translation::StoreUint32Register(Register reg) {
1664  buffer_->Add(UINT32_REGISTER, zone());
1665  buffer_->Add(reg.code(), zone());
1666 }
1667 
1668 
1669 void Translation::StoreDoubleRegister(DoubleRegister reg) {
1670  buffer_->Add(DOUBLE_REGISTER, zone());
1671  buffer_->Add(DoubleRegister::ToAllocationIndex(reg), zone());
1672 }
1673 
1674 
1675 void Translation::StoreStackSlot(int index) {
1676  buffer_->Add(STACK_SLOT, zone());
1677  buffer_->Add(index, zone());
1678 }
1679 
1680 
1681 void Translation::StoreInt32StackSlot(int index) {
1682  buffer_->Add(INT32_STACK_SLOT, zone());
1683  buffer_->Add(index, zone());
1684 }
1685 
1686 
1687 void Translation::StoreUint32StackSlot(int index) {
1688  buffer_->Add(UINT32_STACK_SLOT, zone());
1689  buffer_->Add(index, zone());
1690 }
1691 
1692 
1693 void Translation::StoreDoubleStackSlot(int index) {
1694  buffer_->Add(DOUBLE_STACK_SLOT, zone());
1695  buffer_->Add(index, zone());
1696 }
1697 
1698 
1699 void Translation::StoreLiteral(int literal_id) {
1700  buffer_->Add(LITERAL, zone());
1701  buffer_->Add(literal_id, zone());
1702 }
1703 
1704 
1705 void Translation::StoreArgumentsObject(int args_index, int args_length) {
1706  buffer_->Add(ARGUMENTS_OBJECT, zone());
1707  buffer_->Add(args_index, zone());
1708  buffer_->Add(args_length, zone());
1709 }
1710 
1711 
1712 void Translation::MarkDuplicate() {
1713  buffer_->Add(DUPLICATE, zone());
1714 }
1715 
1716 
1717 int Translation::NumberOfOperandsFor(Opcode opcode) {
1718  switch (opcode) {
1719  case DUPLICATE:
1720  return 0;
1721  case GETTER_STUB_FRAME:
1722  case SETTER_STUB_FRAME:
1723  case REGISTER:
1724  case INT32_REGISTER:
1725  case UINT32_REGISTER:
1726  case DOUBLE_REGISTER:
1727  case STACK_SLOT:
1728  case INT32_STACK_SLOT:
1729  case UINT32_STACK_SLOT:
1730  case DOUBLE_STACK_SLOT:
1731  case LITERAL:
1732  return 1;
1733  case BEGIN:
1734  case ARGUMENTS_ADAPTOR_FRAME:
1735  case CONSTRUCT_STUB_FRAME:
1736  case ARGUMENTS_OBJECT:
1737  return 2;
1738  case JS_FRAME:
1739  return 3;
1740  }
1741  UNREACHABLE();
1742  return -1;
1743 }
1744 
1745 
1746 #if defined(OBJECT_PRINT) || defined(ENABLE_DISASSEMBLER)
1747 
1748 const char* Translation::StringFor(Opcode opcode) {
1749  switch (opcode) {
1750  case BEGIN:
1751  return "BEGIN";
1752  case JS_FRAME:
1753  return "JS_FRAME";
1754  case ARGUMENTS_ADAPTOR_FRAME:
1755  return "ARGUMENTS_ADAPTOR_FRAME";
1756  case CONSTRUCT_STUB_FRAME:
1757  return "CONSTRUCT_STUB_FRAME";
1758  case GETTER_STUB_FRAME:
1759  return "GETTER_STUB_FRAME";
1760  case SETTER_STUB_FRAME:
1761  return "SETTER_STUB_FRAME";
1762  case REGISTER:
1763  return "REGISTER";
1764  case INT32_REGISTER:
1765  return "INT32_REGISTER";
1766  case UINT32_REGISTER:
1767  return "UINT32_REGISTER";
1768  case DOUBLE_REGISTER:
1769  return "DOUBLE_REGISTER";
1770  case STACK_SLOT:
1771  return "STACK_SLOT";
1772  case INT32_STACK_SLOT:
1773  return "INT32_STACK_SLOT";
1774  case UINT32_STACK_SLOT:
1775  return "UINT32_STACK_SLOT";
1776  case DOUBLE_STACK_SLOT:
1777  return "DOUBLE_STACK_SLOT";
1778  case LITERAL:
1779  return "LITERAL";
1780  case ARGUMENTS_OBJECT:
1781  return "ARGUMENTS_OBJECT";
1782  case DUPLICATE:
1783  return "DUPLICATE";
1784  }
1785  UNREACHABLE();
1786  return "";
1787 }
1788 
1789 #endif
1790 
1791 
1793  GlobalHandles* global_handles = Isolate::Current()->global_handles();
1794  // Globalize the code object and make it weak.
1795  code_ = Handle<Code>::cast(global_handles->Create(code));
1796  global_handles->MakeWeak(reinterpret_cast<Object**>(code_.location()),
1797  this,
1798  Deoptimizer::HandleWeakDeoptimizedCode);
1799 }
1800 
1801 
1803  GlobalHandles* global_handles = Isolate::Current()->global_handles();
1804  global_handles->Destroy(reinterpret_cast<Object**>(code_.location()));
1805 }
1806 
1807 
1808 // We can't intermix stack decoding and allocations because
1809 // deoptimization infrastracture is not GC safe.
1810 // Thus we build a temporary structure in malloced space.
1811 SlotRef SlotRef::ComputeSlotForNextArgument(TranslationIterator* iterator,
1813  JavaScriptFrame* frame) {
1814  Translation::Opcode opcode =
1815  static_cast<Translation::Opcode>(iterator->Next());
1816 
1817  switch (opcode) {
1818  case Translation::BEGIN:
1819  case Translation::JS_FRAME:
1820  case Translation::ARGUMENTS_ADAPTOR_FRAME:
1821  case Translation::CONSTRUCT_STUB_FRAME:
1822  case Translation::GETTER_STUB_FRAME:
1823  case Translation::SETTER_STUB_FRAME:
1824  // Peeled off before getting here.
1825  break;
1826 
1827  case Translation::ARGUMENTS_OBJECT:
1828  // This can be only emitted for local slots not for argument slots.
1829  break;
1830 
1831  case Translation::REGISTER:
1832  case Translation::INT32_REGISTER:
1833  case Translation::UINT32_REGISTER:
1834  case Translation::DOUBLE_REGISTER:
1835  case Translation::DUPLICATE:
1836  // We are at safepoint which corresponds to call. All registers are
1837  // saved by caller so there would be no live registers at this
1838  // point. Thus these translation commands should not be used.
1839  break;
1840 
1841  case Translation::STACK_SLOT: {
1842  int slot_index = iterator->Next();
1843  Address slot_addr = SlotAddress(frame, slot_index);
1844  return SlotRef(slot_addr, SlotRef::TAGGED);
1845  }
1846 
1847  case Translation::INT32_STACK_SLOT: {
1848  int slot_index = iterator->Next();
1849  Address slot_addr = SlotAddress(frame, slot_index);
1850  return SlotRef(slot_addr, SlotRef::INT32);
1851  }
1852 
1853  case Translation::UINT32_STACK_SLOT: {
1854  int slot_index = iterator->Next();
1855  Address slot_addr = SlotAddress(frame, slot_index);
1856  return SlotRef(slot_addr, SlotRef::UINT32);
1857  }
1858 
1859  case Translation::DOUBLE_STACK_SLOT: {
1860  int slot_index = iterator->Next();
1861  Address slot_addr = SlotAddress(frame, slot_index);
1862  return SlotRef(slot_addr, SlotRef::DOUBLE);
1863  }
1864 
1865  case Translation::LITERAL: {
1866  int literal_index = iterator->Next();
1867  return SlotRef(data->LiteralArray()->get(literal_index));
1868  }
1869  }
1870 
1871  UNREACHABLE();
1872  return SlotRef();
1873 }
1874 
1875 
1876 void SlotRef::ComputeSlotsForArguments(Vector<SlotRef>* args_slots,
1877  TranslationIterator* it,
1878  DeoptimizationInputData* data,
1879  JavaScriptFrame* frame) {
1880  // Process the translation commands for the arguments.
1881 
1882  // Skip the translation command for the receiver.
1883  it->Skip(Translation::NumberOfOperandsFor(
1884  static_cast<Translation::Opcode>(it->Next())));
1885 
1886  // Compute slots for arguments.
1887  for (int i = 0; i < args_slots->length(); ++i) {
1888  (*args_slots)[i] = ComputeSlotForNextArgument(it, data, frame);
1889  }
1890 }
1891 
1892 
1893 Vector<SlotRef> SlotRef::ComputeSlotMappingForArguments(
1894  JavaScriptFrame* frame,
1895  int inlined_jsframe_index,
1896  int formal_parameter_count) {
1897  AssertNoAllocation no_gc;
1898  int deopt_index = Safepoint::kNoDeoptimizationIndex;
1899  DeoptimizationInputData* data =
1900  static_cast<OptimizedFrame*>(frame)->GetDeoptimizationData(&deopt_index);
1901  TranslationIterator it(data->TranslationByteArray(),
1902  data->TranslationIndex(deopt_index)->value());
1903  Translation::Opcode opcode = static_cast<Translation::Opcode>(it.Next());
1904  ASSERT(opcode == Translation::BEGIN);
1905  it.Next(); // Drop frame count.
1906  int jsframe_count = it.Next();
1907  USE(jsframe_count);
1908  ASSERT(jsframe_count > inlined_jsframe_index);
1909  int jsframes_to_skip = inlined_jsframe_index;
1910  while (true) {
1911  opcode = static_cast<Translation::Opcode>(it.Next());
1912  if (opcode == Translation::ARGUMENTS_ADAPTOR_FRAME) {
1913  if (jsframes_to_skip == 0) {
1914  ASSERT(Translation::NumberOfOperandsFor(opcode) == 2);
1915 
1916  it.Skip(1); // literal id
1917  int height = it.Next();
1918 
1919  // We reached the arguments adaptor frame corresponding to the
1920  // inlined function in question. Number of arguments is height - 1.
1921  Vector<SlotRef> args_slots =
1922  Vector<SlotRef>::New(height - 1); // Minus receiver.
1923  ComputeSlotsForArguments(&args_slots, &it, data, frame);
1924  return args_slots;
1925  }
1926  } else if (opcode == Translation::JS_FRAME) {
1927  if (jsframes_to_skip == 0) {
1928  // Skip over operands to advance to the next opcode.
1929  it.Skip(Translation::NumberOfOperandsFor(opcode));
1930 
1931  // We reached the frame corresponding to the inlined function
1932  // in question. Process the translation commands for the
1933  // arguments. Number of arguments is equal to the number of
1934  // format parameter count.
1935  Vector<SlotRef> args_slots =
1936  Vector<SlotRef>::New(formal_parameter_count);
1937  ComputeSlotsForArguments(&args_slots, &it, data, frame);
1938  return args_slots;
1939  }
1940  jsframes_to_skip--;
1941  }
1942 
1943  // Skip over operands to advance to the next opcode.
1944  it.Skip(Translation::NumberOfOperandsFor(opcode));
1945  }
1946 
1947  UNREACHABLE();
1948  return Vector<SlotRef>();
1949 }
1950 
1951 #ifdef ENABLE_DEBUGGER_SUPPORT
1952 
1953 DeoptimizedFrameInfo::DeoptimizedFrameInfo(Deoptimizer* deoptimizer,
1954  int frame_index,
1955  bool has_arguments_adaptor,
1956  bool has_construct_stub) {
1957  FrameDescription* output_frame = deoptimizer->output_[frame_index];
1958  function_ = output_frame->GetFunction();
1959  has_construct_stub_ = has_construct_stub;
1960  expression_count_ = output_frame->GetExpressionCount();
1961  expression_stack_ = new Object*[expression_count_];
1962  // Get the source position using the unoptimized code.
1963  Address pc = reinterpret_cast<Address>(output_frame->GetPc());
1964  Code* code = Code::cast(Isolate::Current()->heap()->FindCodeObject(pc));
1965  source_position_ = code->SourcePosition(pc);
1966 
1967  for (int i = 0; i < expression_count_; i++) {
1968  SetExpression(i, output_frame->GetExpression(i));
1969  }
1970 
1971  if (has_arguments_adaptor) {
1972  output_frame = deoptimizer->output_[frame_index - 1];
1973  ASSERT(output_frame->GetFrameType() == StackFrame::ARGUMENTS_ADAPTOR);
1974  }
1975 
1976  parameters_count_ = output_frame->ComputeParametersCount();
1977  parameters_ = new Object*[parameters_count_];
1978  for (int i = 0; i < parameters_count_; i++) {
1979  SetParameter(i, output_frame->GetParameter(i));
1980  }
1981 }
1982 
1983 
1984 DeoptimizedFrameInfo::~DeoptimizedFrameInfo() {
1985  delete[] expression_stack_;
1986  delete[] parameters_;
1987 }
1988 
1989 
1990 void DeoptimizedFrameInfo::Iterate(ObjectVisitor* v) {
1991  v->VisitPointer(BitCast<Object**>(&function_));
1992  v->VisitPointers(parameters_, parameters_ + parameters_count_);
1993  v->VisitPointers(expression_stack_, expression_stack_ + expression_count_);
1994 }
1995 
1996 #endif // ENABLE_DEBUGGER_SUPPORT
1997 
1998 } } // namespace v8::internal
Object * GetPrototype()
Definition: objects.cc:725
byte * Address
Definition: globals.h:157
DeoptimizingCodeListNode * next() const
Definition: deoptimizer.h:661
void Destroy(Object **location)
Object * function() const
Definition: frames-inl.h:231
static Object *& Object_at(Address addr)
Definition: v8memory.h:75
#define V8PRIxPTR
Definition: globals.h:189
void PrintF(const char *format,...)
Definition: v8utils.cc:40
unsigned stack_slots()
Definition: objects-inl.h:3318
static int64_t Ticks()
REGISTER(no_reg,-1)
void SourceCodePrint(StringStream *accumulator, int max_length)
Definition: objects.cc:7963
static Smi * FromInt(int value)
Definition: objects-inl.h:981
unsigned stack_check_table_offset()
Definition: objects-inl.h:3351
const int KB
Definition: globals.h:207
unsigned GetOffsetFromSlotIndex(int slot_index)
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination use dead code elimination trace on stack replacement optimize closures cache optimized code for closures functions with arguments object loop weight for representation inference allow uint32 values on optimize frames if they are used only in safe operations track parallel recompilation enable all profiler experiments number of stack frames inspected by the profiler call recompile stub directly when self optimizing trigger profiler ticks based on counting instead of timing weight back edges by jump distance for interrupt triggering percentage of ICs that must have type info to allow optimization watch_ic_patching retry_self_opt interrupt_at_exit extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of SAHF instruction if enable use of VFP3 instructions if available this implies enabling ARMv7 and VFP2 enable use of VFP2 instructions if available enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of MIPS FPU instructions if expose natives in global object expose gc extension number of stack frames to capture disable builtin natives files print a stack trace if an assertion failure occurs use random jit cookie to mask large constants trace lazy optimization use adaptive optimizations prepare for turning on always opt minimum length for automatic enable preparsing maximum number of optimization attempts before giving up cache prototype transitions automatically set the debug break flag when debugger commands are in the queue always cause a debug break before aborting maximum length of function source code printed in a stack trace max size of the new max size of the old max size of executable always perform global GCs print one trace line following each garbage collection do not print trace line after scavenger collection print more details following each garbage collection print amount of external allocated memory after each time it is adjusted flush code that we expect not to use again before full gc do incremental marking steps track object counts and memory usage use caching Perform compaction on every full GC Never perform compaction on full GC testing only Compact code space on full incremental collections Default seed for initializing random allows verbose printing trace parsing and preparsing Check icache flushes in ARM and MIPS simulator Stack alingment in bytes in print stack trace when throwing exceptions randomize hashes to avoid predictable hash Fixed seed to use to hash property activate a timer that switches between V8 threads testing_bool_flag float flag Seed used for threading test randomness A filename with extra code to be included in the snapshot(mksnapshot only)") DEFINE_bool(help
static void RevertStackCheckCode(Code *unoptimized_code, Code *check_code, Code *replacement_code)
void SetFrameSlot(unsigned offset, intptr_t value)
Definition: deoptimizer.h:424
static void ComputeOutputFrames(Deoptimizer *deoptimizer)
Definition: deoptimizer.cc:349
static Handle< T > cast(Handle< S > that)
Definition: handles.h:81
int int32_t
Definition: unicode.cc:47
virtual void EnterContext(Context *context)
Definition: deoptimizer.cc:233
#define REGISTER(N, C)
static bool enabled()
Definition: serialize.h:481
Object * GetExpression(int index) const
Definition: frames-inl.h:153
#define ASSERT(condition)
Definition: checks.h:270
static void DeoptimizeFunction(JSFunction *function)
int ComputeExpressionsCount() const
Definition: frames.cc:586
static Context * cast(Object *context)
Definition: contexts.h:212
static void DeoptimizeAll()
Definition: deoptimizer.cc:250
double GetDoubleRegister(unsigned n) const
Definition: deoptimizer.h:433
void MaterializeHeapObjects(JavaScriptFrameIterator *it)
Definition: deoptimizer.cc:639
Handle< Object > NewNumber(double value, PretenureFlag pretenure=NOT_TENURED)
Definition: factory.cc:597
void SetFrameType(StackFrame::Type type)
Definition: deoptimizer.h:466
Handle< JSObject > NewArgumentsObject(Handle< Object > callee, int length)
Definition: factory.cc:1198
StringInputBuffer *const buffer_
Factory * factory()
Definition: isolate.h:992
virtual void VisitFunction(JSFunction *function)
Definition: deoptimizer.cc:240
static Code * cast(Object *obj)
const int kIntSize
Definition: globals.h:217
static Smi * cast(Object *object)
static const char * AllocationIndexToString(int index)
static const int kNumRegisters
Definition: assembler-arm.h:73
virtual void LeaveContext(Context *context)
Definition: deoptimizer.cc:244
bool contains(byte *pc)
Definition: objects-inl.h:4690
void ClearOptimizedFunctions()
Definition: contexts.cc:303
SmartArrayPointer< char > ToCString(AllowNullsFlag allow_nulls, RobustnessFlag robustness_flag, int offset, int length, int *length_output=0)
Definition: objects.cc:6233
JSFunction * GetFunction() const
Definition: deoptimizer.h:399
static void DeoptimizeGlobalObject(JSObject *object)
Definition: deoptimizer.cc:262
#define UNREACHABLE()
Definition: checks.h:50
DwVfpRegister DoubleRegister
virtual const char * NameOfCPURegister(int reg) const
Context * native_context()
Definition: contexts.cc:58
Handle< Object > Create(Object *value)
static bool IsValid(intptr_t value)
Definition: objects-inl.h:1059
byte * instruction_start()
Definition: objects-inl.h:4649
const int kPointerSize
Definition: globals.h:220
void SetExpression(int index, Object *value)
Definition: frames-inl.h:158
unsigned int FastD2UI(double x)
Object * OptimizedFunctionsListHead()
Definition: contexts.cc:297
void SetRegister(unsigned n, intptr_t value)
Definition: deoptimizer.h:438
const Register pc
friend class DeoptimizingCodeListNode
Definition: deoptimizer.h:370
Handle< FixedArray > NewFixedArray(int size, PretenureFlag pretenure=NOT_TENURED)
Definition: factory.cc:44
static int GetDeoptimizedCodeCount(Isolate *isolate)
Definition: deoptimizer.cc:523
static const char * State2String(State state)
Definition: full-codegen.h:109
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination use dead code elimination trace on stack replacement optimize closures cache optimized code for closures functions with arguments object loop weight for representation inference allow uint32 values on optimize frames if they are used only in safe operations track parallel recompilation enable all profiler experiments number of stack frames inspected by the profiler call recompile stub directly when self optimizing trigger profiler ticks based on counting instead of timing weight back edges by jump distance for interrupt triggering percentage of ICs that must have type info to allow optimization watch_ic_patching retry_self_opt interrupt_at_exit extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of SAHF instruction if enable use of VFP3 instructions if available this implies enabling ARMv7 and VFP2 enable use of VFP2 instructions if available enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of MIPS FPU instructions if expose natives in global object expose gc extension number of stack frames to capture disable builtin natives files print a stack trace if an assertion failure occurs use random jit cookie to mask large constants trace lazy optimization use adaptive optimizations prepare for turning on always opt minimum length for automatic enable preparsing maximum number of optimization attempts before giving up cache prototype transitions automatically set the debug break flag when debugger commands are in the queue always cause a debug break before aborting maximum length of function source code printed in a stack trace max size of the new max size of the old max size of executable always perform global GCs print one trace line following each garbage collection do not print trace line after scavenger collection print more details following each garbage collection print amount of external allocated memory after each time it is adjusted flush code that we expect not to use again before full gc do incremental marking steps track object counts and memory usage use caching Perform compaction on every full GC Never perform compaction on full GC testing only Compact code space on full incremental collections Default seed for initializing random generator(0, the default, means to use system random).") DEFINE_bool(use_verbose_printer
static int GetOutputInfo(DeoptimizationOutputData *data, BailoutId node_id, SharedFunctionInfo *shared)
Definition: deoptimizer.cc:498
double FastUI2D(unsigned x)
Definition: conversions.h:84
static void ReplaceCodeForRelatedFunctions(JSFunction *function, Code *code)
static void PatchStackCheckCode(Code *unoptimized_code, Code *check_code, Code *replacement_code)
static Vector< T > New(int length)
Definition: utils.h:370
static MaybeObject * FunctionGetArguments(Object *object, void *)
Definition: accessors.cc:575
DeoptimizerData * deoptimizer_data()
Definition: isolate.h:838
const int kBitsPerInt
Definition: globals.h:240
uint32_t GetFrameSize() const
Definition: deoptimizer.h:394
kPropertyAccessorsOffset kNamedPropertyHandlerOffset kInstanceTemplateOffset kAccessCheckInfoOffset kEvalFrominstructionsOffsetOffset kInstanceClassNameOffset kHiddenPrototypeBit kReadOnlyPrototypeBit kIsTopLevelBit kAllowLazyCompilation kUsesArguments formal_parameter_count
Definition: objects-inl.h:4003
static void VisitAllOptimizedFunctionsForContext(Context *context, OptimizedFunctionVisitor *visitor)
Definition: deoptimizer.cc:270
#define ASSERT_LT(v1, v2)
Definition: checks.h:274
#define V8PRIuPTR
Definition: globals.h:191
#define V8PRIdPTR
Definition: globals.h:190
static void RevertStackCheckCodeAt(Code *unoptimized_code, Address pc_after, Code *check_code, Code *replacement_code)
intptr_t GetFrameSlot(unsigned offset)
Definition: deoptimizer.h:403
static HeapNumber * cast(Object *obj)
bool is_null() const
Definition: handles.h:87
JavaScriptFrameIteratorTemp< StackFrameIterator > JavaScriptFrameIterator
Definition: frames.h:775
friend class DeoptimizedFrameInfo
Definition: deoptimizer.h:371
static Address GetDeoptimizationEntry(int id, BailoutType type)
Definition: deoptimizer.cc:457
static int ToAllocationIndex(DwVfpRegister reg)
static Deoptimizer * Grab(Isolate *isolate)
Definition: deoptimizer.cc:99
static const int kNotDeoptimizationEntry
Definition: deoptimizer.h:248
Zone * runtime_zone()
Definition: isolate.h:868
#define HEAP
Definition: isolate.h:1433
#define ASSERT_EQ(v1, v2)
Definition: checks.h:271
friend class FrameDescription
Definition: deoptimizer.h:369
StackFrame::Type GetFrameType() const
Definition: deoptimizer.h:465
void USE(T)
Definition: globals.h:289
#define ASSERT_NE(v1, v2)
Definition: checks.h:272
double GetDoubleFrameSlot(unsigned offset)
Definition: deoptimizer.h:407
FrameDescription(uint32_t frame_size, JSFunction *function)
void Add(const T &element, AllocationPolicy allocator=AllocationPolicy())
Definition: list-inl.h:38
static void VisitAllOptimizedFunctions(OptimizedFunctionVisitor *visitor)
Definition: deoptimizer.cc:315
double FastI2D(int x)
Definition: conversions.h:76
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination use dead code elimination trace on stack replacement optimize closures cache optimized code for closures functions with arguments object loop weight for representation inference allow uint32 values on optimize frames if they are used only in safe operations track parallel recompilation enable all profiler experiments number of stack frames inspected by the profiler call recompile stub directly when self optimizing trigger profiler ticks based on counting instead of timing weight back edges by jump distance for interrupt triggering percentage of ICs that must have type info to allow optimization watch_ic_patching retry_self_opt interrupt_at_exit extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of SAHF instruction if enable use of VFP3 instructions if available this implies enabling ARMv7 and VFP2 enable use of VFP2 instructions if available enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of MIPS FPU instructions if NULL
Definition: flags.cc:301
Object * get(int index)
Definition: objects-inl.h:1737
static uint32_t & uint32_at(Address addr)
Definition: v8memory.h:47
#define RUNTIME_ENTRY(name, nargs, ressize)
static void VisitAllOptimizedFunctionsForGlobalObject(JSObject *object, OptimizedFunctionVisitor *visitor)
Definition: deoptimizer.cc:299
int ConvertJSFrameIndexToFrameIndex(int jsframe_index)
Definition: deoptimizer.cc:109
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination use dead code elimination trace on stack replacement optimize closures cache optimized code for closures functions with arguments object loop weight for representation inference allow uint32 values on optimize frames if they are used only in safe operations track parallel recompilation enable all profiler experiments number of stack frames inspected by the profiler call recompile stub directly when self optimizing trigger profiler ticks based on counting instead of timing weight back edges by jump distance for interrupt triggering percentage of ICs that must have type info to allow optimization watch_ic_patching retry_self_opt interrupt_at_exit extra verbose compilation tracing generate extra code(assertions) for debugging") DEFINE_bool(code_comments
static void FlushICache(void *start, size_t size)
void MakeWeak(Object **location, void *parameter, WeakReferenceCallback callback)
intptr_t GetRegister(unsigned n) const
Definition: deoptimizer.h:428
static GlobalObject * cast(Object *obj)
SmartArrayPointer< const char > ToCString() const
void set_code(Code *code)
Definition: objects-inl.h:4351
int jsframe_count() const
Definition: deoptimizer.h:137
static const int kMaxValue
Definition: objects.h:1050
static void FatalProcessOutOfMemory(const char *location, bool take_snapshot=false)
static int GetDeoptimizationId(Address addr, BailoutType type)
Definition: deoptimizer.cc:478
bool has_adapted_arguments() const
Definition: frames-inl.h:226
static JSObject * cast(Object *obj)
static void PatchStackCheckCodeAt(Code *unoptimized_code, Address pc_after, Code *check_code, Code *replacement_code)
int FastD2I(double x)
Definition: conversions.h:69
static Deoptimizer * New(JSFunction *function, BailoutType type, unsigned bailout_id, Address from, int fp_to_sp_delta, Isolate *isolate)
Definition: deoptimizer.cc:79
static DeoptimizationInputData * cast(Object *obj)
Object * GetExpression(int index)
Object * GetParameter(int index)
static JSFunction * cast(Object *obj)