v8  3.14.5(node0.10.28)
V8 is Google's open source JavaScript engine
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
deoptimizer-ia32.cc
Go to the documentation of this file.
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 
28 #include "v8.h"
29 
30 #if defined(V8_TARGET_ARCH_IA32)
31 
32 #include "codegen.h"
33 #include "deoptimizer.h"
34 #include "full-codegen.h"
35 #include "safepoint-table.h"
36 
37 namespace v8 {
38 namespace internal {
39 
40 const int Deoptimizer::table_entry_size_ = 10;
41 
42 
45 }
46 
47 
49  Isolate* isolate = code->GetIsolate();
50  HandleScope scope(isolate);
51 
52  // Compute the size of relocation information needed for the code
53  // patching in Deoptimizer::DeoptimizeFunction.
54  int min_reloc_size = 0;
55  int prev_pc_offset = 0;
56  DeoptimizationInputData* deopt_data =
57  DeoptimizationInputData::cast(code->deoptimization_data());
58  for (int i = 0; i < deopt_data->DeoptCount(); i++) {
59  int pc_offset = deopt_data->Pc(i)->value();
60  if (pc_offset == -1) continue;
61  ASSERT_GE(pc_offset, prev_pc_offset);
62  int pc_delta = pc_offset - prev_pc_offset;
63  // We use RUNTIME_ENTRY reloc info which has a size of 2 bytes
64  // if encodable with small pc delta encoding and up to 6 bytes
65  // otherwise.
66  if (pc_delta <= RelocInfo::kMaxSmallPCDelta) {
67  min_reloc_size += 2;
68  } else {
69  min_reloc_size += 6;
70  }
71  prev_pc_offset = pc_offset;
72  }
73 
74  // If the relocation information is not big enough we create a new
75  // relocation info object that is padded with comments to make it
76  // big enough for lazy doptimization.
77  int reloc_length = code->relocation_info()->length();
78  if (min_reloc_size > reloc_length) {
79  int comment_reloc_size = RelocInfo::kMinRelocCommentSize;
80  // Padding needed.
81  int min_padding = min_reloc_size - reloc_length;
82  // Number of comments needed to take up at least that much space.
83  int additional_comments =
84  (min_padding + comment_reloc_size - 1) / comment_reloc_size;
85  // Actual padding size.
86  int padding = additional_comments * comment_reloc_size;
87  // Allocate new relocation info and copy old relocation to the end
88  // of the new relocation info array because relocation info is
89  // written and read backwards.
90  Factory* factory = isolate->factory();
91  Handle<ByteArray> new_reloc =
92  factory->NewByteArray(reloc_length + padding, TENURED);
93  memcpy(new_reloc->GetDataStartAddress() + padding,
94  code->relocation_info()->GetDataStartAddress(),
95  reloc_length);
96  // Create a relocation writer to write the comments in the padding
97  // space. Use position 0 for everything to ensure short encoding.
98  RelocInfoWriter reloc_info_writer(
99  new_reloc->GetDataStartAddress() + padding, 0);
100  intptr_t comment_string
101  = reinterpret_cast<intptr_t>(RelocInfo::kFillerCommentString);
102  RelocInfo rinfo(0, RelocInfo::COMMENT, comment_string, NULL);
103  for (int i = 0; i < additional_comments; ++i) {
104 #ifdef DEBUG
105  byte* pos_before = reloc_info_writer.pos();
106 #endif
107  reloc_info_writer.Write(&rinfo);
108  ASSERT(RelocInfo::kMinRelocCommentSize ==
109  pos_before - reloc_info_writer.pos());
110  }
111  // Replace relocation information on the code object.
112  code->set_relocation_info(*new_reloc);
113  }
114 }
115 
116 
117 void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
118  if (!function->IsOptimized()) return;
119 
120  // The optimized code is going to be patched, so we cannot use it
121  // any more. Play safe and reset the whole cache.
122  function->shared()->ClearOptimizedCodeMap();
123 
124  Isolate* isolate = function->GetIsolate();
125  HandleScope scope(isolate);
126  AssertNoAllocation no_allocation;
127 
128  // Get the optimized code.
129  Code* code = function->code();
130  Address code_start_address = code->instruction_start();
131 
132  // We will overwrite the code's relocation info in-place. Relocation info
133  // is written backward. The relocation info is the payload of a byte
134  // array. Later on we will slide this to the start of the byte array and
135  // create a filler object in the remaining space.
136  ByteArray* reloc_info = code->relocation_info();
137  Address reloc_end_address = reloc_info->address() + reloc_info->Size();
138  RelocInfoWriter reloc_info_writer(reloc_end_address, code_start_address);
139 
140  // For each LLazyBailout instruction insert a call to the corresponding
141  // deoptimization entry.
142 
143  // Since the call is a relative encoding, write new
144  // reloc info. We do not need any of the existing reloc info because the
145  // existing code will not be used again (we zap it in debug builds).
146  //
147  // Emit call to lazy deoptimization at all lazy deopt points.
148  DeoptimizationInputData* deopt_data =
149  DeoptimizationInputData::cast(code->deoptimization_data());
150 #ifdef DEBUG
151  Address prev_call_address = NULL;
152 #endif
153  for (int i = 0; i < deopt_data->DeoptCount(); i++) {
154  if (deopt_data->Pc(i)->value() == -1) continue;
155  // Patch lazy deoptimization entry.
156  Address call_address = code_start_address + deopt_data->Pc(i)->value();
157  CodePatcher patcher(call_address, patch_size());
158  Address deopt_entry = GetDeoptimizationEntry(i, LAZY);
159  patcher.masm()->call(deopt_entry, RelocInfo::NONE);
160  // We use RUNTIME_ENTRY for deoptimization bailouts.
161  RelocInfo rinfo(call_address + 1, // 1 after the call opcode.
163  reinterpret_cast<intptr_t>(deopt_entry),
164  NULL);
165  reloc_info_writer.Write(&rinfo);
166  ASSERT_GE(reloc_info_writer.pos(),
167  reloc_info->address() + ByteArray::kHeaderSize);
168  ASSERT(prev_call_address == NULL ||
169  call_address >= prev_call_address + patch_size());
170  ASSERT(call_address + patch_size() <= code->instruction_end());
171 #ifdef DEBUG
172  prev_call_address = call_address;
173 #endif
174  }
175 
176  // Move the relocation info to the beginning of the byte array.
177  int new_reloc_size = reloc_end_address - reloc_info_writer.pos();
178  memmove(code->relocation_start(), reloc_info_writer.pos(), new_reloc_size);
179 
180  // The relocation info is in place, update the size.
181  reloc_info->set_length(new_reloc_size);
182 
183  // Handle the junk part after the new relocation info. We will create
184  // a non-live object in the extra space at the end of the former reloc info.
185  Address junk_address = reloc_info->address() + reloc_info->Size();
186  ASSERT(junk_address <= reloc_end_address);
187  isolate->heap()->CreateFillerObjectAt(junk_address,
188  reloc_end_address - junk_address);
189 
190  // Add the deoptimizing code to the list.
192  DeoptimizerData* data = isolate->deoptimizer_data();
193  node->set_next(data->deoptimizing_code_list_);
194  data->deoptimizing_code_list_ = node;
195 
196  // We might be in the middle of incremental marking with compaction.
197  // Tell collector to treat this code object in a special way and
198  // ignore all slots that might have been recorded on it.
199  isolate->heap()->mark_compact_collector()->InvalidateCode(code);
200 
201  ReplaceCodeForRelatedFunctions(function, code);
202 
203  if (FLAG_trace_deopt) {
204  PrintF("[forced deoptimization: ");
205  function->PrintName();
206  PrintF(" / %x]\n", reinterpret_cast<uint32_t>(function));
207  }
208 }
209 
210 
211 static const byte kJnsInstruction = 0x79;
212 static const byte kJnsOffset = 0x13;
213 static const byte kJaeInstruction = 0x73;
214 static const byte kJaeOffset = 0x07;
215 static const byte kCallInstruction = 0xe8;
216 static const byte kNopByteOne = 0x66;
217 static const byte kNopByteTwo = 0x90;
218 
219 
220 void Deoptimizer::PatchStackCheckCodeAt(Code* unoptimized_code,
221  Address pc_after,
222  Code* check_code,
223  Code* replacement_code) {
224  Address call_target_address = pc_after - kIntSize;
225  ASSERT_EQ(check_code->entry(),
226  Assembler::target_address_at(call_target_address));
227  // The stack check code matches the pattern:
228  //
229  // cmp esp, <limit>
230  // jae ok
231  // call <stack guard>
232  // test eax, <loop nesting depth>
233  // ok: ...
234  //
235  // We will patch away the branch so the code is:
236  //
237  // cmp esp, <limit> ;; Not changed
238  // nop
239  // nop
240  // call <on-stack replacment>
241  // test eax, <loop nesting depth>
242  // ok:
243 
244  if (FLAG_count_based_interrupts) {
245  ASSERT_EQ(kJnsInstruction, *(call_target_address - 3));
246  ASSERT_EQ(kJnsOffset, *(call_target_address - 2));
247  } else {
248  ASSERT_EQ(kJaeInstruction, *(call_target_address - 3));
249  ASSERT_EQ(kJaeOffset, *(call_target_address - 2));
250  }
251  ASSERT_EQ(kCallInstruction, *(call_target_address - 1));
252  *(call_target_address - 3) = kNopByteOne;
253  *(call_target_address - 2) = kNopByteTwo;
254  Assembler::set_target_address_at(call_target_address,
255  replacement_code->entry());
256 
257  unoptimized_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch(
258  unoptimized_code, call_target_address, replacement_code);
259 }
260 
261 
262 void Deoptimizer::RevertStackCheckCodeAt(Code* unoptimized_code,
263  Address pc_after,
264  Code* check_code,
265  Code* replacement_code) {
266  Address call_target_address = pc_after - kIntSize;
267  ASSERT_EQ(replacement_code->entry(),
268  Assembler::target_address_at(call_target_address));
269 
270  // Replace the nops from patching (Deoptimizer::PatchStackCheckCode) to
271  // restore the conditional branch.
272  ASSERT_EQ(kNopByteOne, *(call_target_address - 3));
273  ASSERT_EQ(kNopByteTwo, *(call_target_address - 2));
274  ASSERT_EQ(kCallInstruction, *(call_target_address - 1));
275  if (FLAG_count_based_interrupts) {
276  *(call_target_address - 3) = kJnsInstruction;
277  *(call_target_address - 2) = kJnsOffset;
278  } else {
279  *(call_target_address - 3) = kJaeInstruction;
280  *(call_target_address - 2) = kJaeOffset;
281  }
282  Assembler::set_target_address_at(call_target_address,
283  check_code->entry());
284 
285  check_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch(
286  unoptimized_code, call_target_address, check_code);
287 }
288 
289 
290 static int LookupBailoutId(DeoptimizationInputData* data, BailoutId ast_id) {
291  ByteArray* translations = data->TranslationByteArray();
292  int length = data->DeoptCount();
293  for (int i = 0; i < length; i++) {
294  if (data->AstId(i) == ast_id) {
295  TranslationIterator it(translations, data->TranslationIndex(i)->value());
296  int value = it.Next();
297  ASSERT(Translation::BEGIN == static_cast<Translation::Opcode>(value));
298  // Read the number of frames.
299  value = it.Next();
300  if (value == 1) return i;
301  }
302  }
303  UNREACHABLE();
304  return -1;
305 }
306 
307 
308 void Deoptimizer::DoComputeOsrOutputFrame() {
309  DeoptimizationInputData* data = DeoptimizationInputData::cast(
310  optimized_code_->deoptimization_data());
311  unsigned ast_id = data->OsrAstId()->value();
312  // TODO(kasperl): This should not be the bailout_id_. It should be
313  // the ast id. Confusing.
314  ASSERT(bailout_id_ == ast_id);
315 
316  int bailout_id = LookupBailoutId(data, BailoutId(ast_id));
317  unsigned translation_index = data->TranslationIndex(bailout_id)->value();
318  ByteArray* translations = data->TranslationByteArray();
319 
320  TranslationIterator iterator(translations, translation_index);
321  Translation::Opcode opcode =
322  static_cast<Translation::Opcode>(iterator.Next());
323  ASSERT(Translation::BEGIN == opcode);
324  USE(opcode);
325  int count = iterator.Next();
326  iterator.Next(); // Drop JS frames count.
327  ASSERT(count == 1);
328  USE(count);
329 
330  opcode = static_cast<Translation::Opcode>(iterator.Next());
331  USE(opcode);
332  ASSERT(Translation::JS_FRAME == opcode);
333  unsigned node_id = iterator.Next();
334  USE(node_id);
335  ASSERT(node_id == ast_id);
336  int closure_id = iterator.Next();
337  USE(closure_id);
338  ASSERT_EQ(Translation::kSelfLiteralId, closure_id);
339  unsigned height = iterator.Next();
340  unsigned height_in_bytes = height * kPointerSize;
341  USE(height_in_bytes);
342 
343  unsigned fixed_size = ComputeFixedSize(function_);
344  unsigned input_frame_size = input_->GetFrameSize();
345  ASSERT(fixed_size + height_in_bytes == input_frame_size);
346 
347  unsigned stack_slot_size = optimized_code_->stack_slots() * kPointerSize;
348  unsigned outgoing_height = data->ArgumentsStackHeight(bailout_id)->value();
349  unsigned outgoing_size = outgoing_height * kPointerSize;
350  unsigned output_frame_size = fixed_size + stack_slot_size + outgoing_size;
351  ASSERT(outgoing_size == 0); // OSR does not happen in the middle of a call.
352 
353  if (FLAG_trace_osr) {
354  PrintF("[on-stack replacement: begin 0x%08" V8PRIxPTR " ",
355  reinterpret_cast<intptr_t>(function_));
356  function_->PrintName();
357  PrintF(" => node=%u, frame=%d->%d, ebp:esp=0x%08x:0x%08x]\n",
358  ast_id,
359  input_frame_size,
360  output_frame_size,
361  input_->GetRegister(ebp.code()),
362  input_->GetRegister(esp.code()));
363  }
364 
365  // There's only one output frame in the OSR case.
366  output_count_ = 1;
367  output_ = new FrameDescription*[1];
368  output_[0] = new(output_frame_size) FrameDescription(
369  output_frame_size, function_);
370  output_[0]->SetFrameType(StackFrame::JAVA_SCRIPT);
371 
372  // Clear the incoming parameters in the optimized frame to avoid
373  // confusing the garbage collector.
374  unsigned output_offset = output_frame_size - kPointerSize;
375  int parameter_count = function_->shared()->formal_parameter_count() + 1;
376  for (int i = 0; i < parameter_count; ++i) {
377  output_[0]->SetFrameSlot(output_offset, 0);
378  output_offset -= kPointerSize;
379  }
380 
381  // Translate the incoming parameters. This may overwrite some of the
382  // incoming argument slots we've just cleared.
383  int input_offset = input_frame_size - kPointerSize;
384  bool ok = true;
385  int limit = input_offset - (parameter_count * kPointerSize);
386  while (ok && input_offset > limit) {
387  ok = DoOsrTranslateCommand(&iterator, &input_offset);
388  }
389 
390  // There are no translation commands for the caller's pc and fp, the
391  // context, and the function. Set them up explicitly.
394  i -= kPointerSize) {
395  uint32_t input_value = input_->GetFrameSlot(input_offset);
396  if (FLAG_trace_osr) {
397  const char* name = "UNKNOWN";
398  switch (i) {
400  name = "caller's pc";
401  break;
403  name = "fp";
404  break;
406  name = "context";
407  break;
409  name = "function";
410  break;
411  }
412  PrintF(" [sp + %d] <- 0x%08x ; [sp + %d] (fixed part - %s)\n",
413  output_offset,
414  input_value,
415  input_offset,
416  name);
417  }
418  output_[0]->SetFrameSlot(output_offset, input_->GetFrameSlot(input_offset));
419  input_offset -= kPointerSize;
420  output_offset -= kPointerSize;
421  }
422 
423  // All OSR stack frames are dynamically aligned to an 8-byte boundary.
424  int frame_pointer = input_->GetRegister(ebp.code());
425  if ((frame_pointer & kPointerSize) != 0) {
426  frame_pointer -= kPointerSize;
427  has_alignment_padding_ = 1;
428  }
429 
430  int32_t alignment_state = (has_alignment_padding_ == 1) ?
433  if (FLAG_trace_osr) {
434  PrintF(" [sp + %d] <- 0x%08x ; (alignment state)\n",
435  output_offset,
436  alignment_state);
437  }
438  output_[0]->SetFrameSlot(output_offset, alignment_state);
439  output_offset -= kPointerSize;
440 
441  // Translate the rest of the frame.
442  while (ok && input_offset >= 0) {
443  ok = DoOsrTranslateCommand(&iterator, &input_offset);
444  }
445 
446  // If translation of any command failed, continue using the input frame.
447  if (!ok) {
448  delete output_[0];
449  output_[0] = input_;
450  output_[0]->SetPc(reinterpret_cast<uint32_t>(from_));
451  } else {
452  // Set up the frame pointer and the context pointer.
453  output_[0]->SetRegister(ebp.code(), frame_pointer);
454  output_[0]->SetRegister(esi.code(), input_->GetRegister(esi.code()));
455 
456  unsigned pc_offset = data->OsrPcOffset()->value();
457  uint32_t pc = reinterpret_cast<uint32_t>(
458  optimized_code_->entry() + pc_offset);
459  output_[0]->SetPc(pc);
460  }
461  Code* continuation =
462  function_->GetIsolate()->builtins()->builtin(Builtins::kNotifyOSR);
463  output_[0]->SetContinuation(
464  reinterpret_cast<uint32_t>(continuation->entry()));
465 
466  if (FLAG_trace_osr) {
467  PrintF("[on-stack replacement translation %s: 0x%08" V8PRIxPTR " ",
468  ok ? "finished" : "aborted",
469  reinterpret_cast<intptr_t>(function_));
470  function_->PrintName();
471  PrintF(" => pc=0x%0x]\n", output_[0]->GetPc());
472  }
473 }
474 
475 
476 void Deoptimizer::DoComputeArgumentsAdaptorFrame(TranslationIterator* iterator,
477  int frame_index) {
478  JSFunction* function = JSFunction::cast(ComputeLiteral(iterator->Next()));
479  unsigned height = iterator->Next();
480  unsigned height_in_bytes = height * kPointerSize;
481  if (FLAG_trace_deopt) {
482  PrintF(" translating arguments adaptor => height=%d\n", height_in_bytes);
483  }
484 
485  unsigned fixed_frame_size = ArgumentsAdaptorFrameConstants::kFrameSize;
486  unsigned output_frame_size = height_in_bytes + fixed_frame_size;
487 
488  // Allocate and store the output frame description.
489  FrameDescription* output_frame =
490  new(output_frame_size) FrameDescription(output_frame_size, function);
491  output_frame->SetFrameType(StackFrame::ARGUMENTS_ADAPTOR);
492 
493  // Arguments adaptor can not be topmost or bottommost.
494  ASSERT(frame_index > 0 && frame_index < output_count_ - 1);
495  ASSERT(output_[frame_index] == NULL);
496  output_[frame_index] = output_frame;
497 
498  // The top address of the frame is computed from the previous
499  // frame's top and this frame's size.
500  uint32_t top_address;
501  top_address = output_[frame_index - 1]->GetTop() - output_frame_size;
502  output_frame->SetTop(top_address);
503 
504  // Compute the incoming parameter translation.
505  int parameter_count = height;
506  unsigned output_offset = output_frame_size;
507  for (int i = 0; i < parameter_count; ++i) {
508  output_offset -= kPointerSize;
509  DoTranslateCommand(iterator, frame_index, output_offset);
510  }
511 
512  // Read caller's PC from the previous frame.
513  output_offset -= kPointerSize;
514  intptr_t callers_pc = output_[frame_index - 1]->GetPc();
515  output_frame->SetFrameSlot(output_offset, callers_pc);
516  if (FLAG_trace_deopt) {
517  PrintF(" 0x%08x: [top + %d] <- 0x%08x ; caller's pc\n",
518  top_address + output_offset, output_offset, callers_pc);
519  }
520 
521  // Read caller's FP from the previous frame, and set this frame's FP.
522  output_offset -= kPointerSize;
523  intptr_t value = output_[frame_index - 1]->GetFp();
524  output_frame->SetFrameSlot(output_offset, value);
525  intptr_t fp_value = top_address + output_offset;
526  output_frame->SetFp(fp_value);
527  if (FLAG_trace_deopt) {
528  PrintF(" 0x%08x: [top + %d] <- 0x%08x ; caller's fp\n",
529  fp_value, output_offset, value);
530  }
531 
532  // A marker value is used in place of the context.
533  output_offset -= kPointerSize;
534  intptr_t context = reinterpret_cast<intptr_t>(
536  output_frame->SetFrameSlot(output_offset, context);
537  if (FLAG_trace_deopt) {
538  PrintF(" 0x%08x: [top + %d] <- 0x%08x ; context (adaptor sentinel)\n",
539  top_address + output_offset, output_offset, context);
540  }
541 
542  // The function was mentioned explicitly in the ARGUMENTS_ADAPTOR_FRAME.
543  output_offset -= kPointerSize;
544  value = reinterpret_cast<intptr_t>(function);
545  output_frame->SetFrameSlot(output_offset, value);
546  if (FLAG_trace_deopt) {
547  PrintF(" 0x%08x: [top + %d] <- 0x%08x ; function\n",
548  top_address + output_offset, output_offset, value);
549  }
550 
551  // Number of incoming arguments.
552  output_offset -= kPointerSize;
553  value = reinterpret_cast<uint32_t>(Smi::FromInt(height - 1));
554  output_frame->SetFrameSlot(output_offset, value);
555  if (FLAG_trace_deopt) {
556  PrintF(" 0x%08x: [top + %d] <- 0x%08x ; argc (%d)\n",
557  top_address + output_offset, output_offset, value, height - 1);
558  }
559 
560  ASSERT(0 == output_offset);
561 
562  Builtins* builtins = isolate_->builtins();
563  Code* adaptor_trampoline =
564  builtins->builtin(Builtins::kArgumentsAdaptorTrampoline);
565  uint32_t pc = reinterpret_cast<uint32_t>(
566  adaptor_trampoline->instruction_start() +
567  isolate_->heap()->arguments_adaptor_deopt_pc_offset()->value());
568  output_frame->SetPc(pc);
569 }
570 
571 
572 void Deoptimizer::DoComputeConstructStubFrame(TranslationIterator* iterator,
573  int frame_index) {
574  Builtins* builtins = isolate_->builtins();
575  Code* construct_stub = builtins->builtin(Builtins::kJSConstructStubGeneric);
576  JSFunction* function = JSFunction::cast(ComputeLiteral(iterator->Next()));
577  unsigned height = iterator->Next();
578  unsigned height_in_bytes = height * kPointerSize;
579  if (FLAG_trace_deopt) {
580  PrintF(" translating construct stub => height=%d\n", height_in_bytes);
581  }
582 
583  unsigned fixed_frame_size = 7 * kPointerSize;
584  unsigned output_frame_size = height_in_bytes + fixed_frame_size;
585 
586  // Allocate and store the output frame description.
587  FrameDescription* output_frame =
588  new(output_frame_size) FrameDescription(output_frame_size, function);
589  output_frame->SetFrameType(StackFrame::CONSTRUCT);
590 
591  // Construct stub can not be topmost or bottommost.
592  ASSERT(frame_index > 0 && frame_index < output_count_ - 1);
593  ASSERT(output_[frame_index] == NULL);
594  output_[frame_index] = output_frame;
595 
596  // The top address of the frame is computed from the previous
597  // frame's top and this frame's size.
598  uint32_t top_address;
599  top_address = output_[frame_index - 1]->GetTop() - output_frame_size;
600  output_frame->SetTop(top_address);
601 
602  // Compute the incoming parameter translation.
603  int parameter_count = height;
604  unsigned output_offset = output_frame_size;
605  for (int i = 0; i < parameter_count; ++i) {
606  output_offset -= kPointerSize;
607  DoTranslateCommand(iterator, frame_index, output_offset);
608  }
609 
610  // Read caller's PC from the previous frame.
611  output_offset -= kPointerSize;
612  intptr_t callers_pc = output_[frame_index - 1]->GetPc();
613  output_frame->SetFrameSlot(output_offset, callers_pc);
614  if (FLAG_trace_deopt) {
615  PrintF(" 0x%08x: [top + %d] <- 0x%08x ; caller's pc\n",
616  top_address + output_offset, output_offset, callers_pc);
617  }
618 
619  // Read caller's FP from the previous frame, and set this frame's FP.
620  output_offset -= kPointerSize;
621  intptr_t value = output_[frame_index - 1]->GetFp();
622  output_frame->SetFrameSlot(output_offset, value);
623  intptr_t fp_value = top_address + output_offset;
624  output_frame->SetFp(fp_value);
625  if (FLAG_trace_deopt) {
626  PrintF(" 0x%08x: [top + %d] <- 0x%08x ; caller's fp\n",
627  fp_value, output_offset, value);
628  }
629 
630  // The context can be gotten from the previous frame.
631  output_offset -= kPointerSize;
632  value = output_[frame_index - 1]->GetContext();
633  output_frame->SetFrameSlot(output_offset, value);
634  if (FLAG_trace_deopt) {
635  PrintF(" 0x%08x: [top + %d] <- 0x%08x ; context\n",
636  top_address + output_offset, output_offset, value);
637  }
638 
639  // A marker value is used in place of the function.
640  output_offset -= kPointerSize;
641  value = reinterpret_cast<intptr_t>(Smi::FromInt(StackFrame::CONSTRUCT));
642  output_frame->SetFrameSlot(output_offset, value);
643  if (FLAG_trace_deopt) {
644  PrintF(" 0x%08x: [top + %d] <- 0x%08x ; function (construct sentinel)\n",
645  top_address + output_offset, output_offset, value);
646  }
647 
648  // The output frame reflects a JSConstructStubGeneric frame.
649  output_offset -= kPointerSize;
650  value = reinterpret_cast<intptr_t>(construct_stub);
651  output_frame->SetFrameSlot(output_offset, value);
652  if (FLAG_trace_deopt) {
653  PrintF(" 0x%08x: [top + %d] <- 0x%08x ; code object\n",
654  top_address + output_offset, output_offset, value);
655  }
656 
657  // Number of incoming arguments.
658  output_offset -= kPointerSize;
659  value = reinterpret_cast<uint32_t>(Smi::FromInt(height - 1));
660  output_frame->SetFrameSlot(output_offset, value);
661  if (FLAG_trace_deopt) {
662  PrintF(" 0x%08x: [top + %d] <- 0x%08x ; argc (%d)\n",
663  top_address + output_offset, output_offset, value, height - 1);
664  }
665 
666  // The newly allocated object was passed as receiver in the artificial
667  // constructor stub environment created by HEnvironment::CopyForInlining().
668  output_offset -= kPointerSize;
669  value = output_frame->GetFrameSlot(output_frame_size - kPointerSize);
670  output_frame->SetFrameSlot(output_offset, value);
671  if (FLAG_trace_deopt) {
672  PrintF(" 0x%08x: [top + %d] <- 0x%08x ; allocated receiver\n",
673  top_address + output_offset, output_offset, value);
674  }
675 
676  ASSERT(0 == output_offset);
677 
678  uint32_t pc = reinterpret_cast<uint32_t>(
679  construct_stub->instruction_start() +
680  isolate_->heap()->construct_stub_deopt_pc_offset()->value());
681  output_frame->SetPc(pc);
682 }
683 
684 
685 void Deoptimizer::DoComputeAccessorStubFrame(TranslationIterator* iterator,
686  int frame_index,
687  bool is_setter_stub_frame) {
688  JSFunction* accessor = JSFunction::cast(ComputeLiteral(iterator->Next()));
689  // The receiver (and the implicit return value, if any) are expected in
690  // registers by the LoadIC/StoreIC, so they don't belong to the output stack
691  // frame. This means that we have to use a height of 0.
692  unsigned height = 0;
693  unsigned height_in_bytes = height * kPointerSize;
694  const char* kind = is_setter_stub_frame ? "setter" : "getter";
695  if (FLAG_trace_deopt) {
696  PrintF(" translating %s stub => height=%u\n", kind, height_in_bytes);
697  }
698 
699  // We need 1 stack entry for the return address + 4 stack entries from
700  // StackFrame::INTERNAL (FP, context, frame type, code object, see
701  // MacroAssembler::EnterFrame). For a setter stub frame we need one additional
702  // entry for the implicit return value, see
703  // StoreStubCompiler::CompileStoreViaSetter.
704  unsigned fixed_frame_entries = 1 + 4 + (is_setter_stub_frame ? 1 : 0);
705  unsigned fixed_frame_size = fixed_frame_entries * kPointerSize;
706  unsigned output_frame_size = height_in_bytes + fixed_frame_size;
707 
708  // Allocate and store the output frame description.
709  FrameDescription* output_frame =
710  new(output_frame_size) FrameDescription(output_frame_size, accessor);
711  output_frame->SetFrameType(StackFrame::INTERNAL);
712 
713  // A frame for an accessor stub can not be the topmost or bottommost one.
714  ASSERT(frame_index > 0 && frame_index < output_count_ - 1);
715  ASSERT(output_[frame_index] == NULL);
716  output_[frame_index] = output_frame;
717 
718  // The top address of the frame is computed from the previous frame's top and
719  // this frame's size.
720  intptr_t top_address = output_[frame_index - 1]->GetTop() - output_frame_size;
721  output_frame->SetTop(top_address);
722 
723  unsigned output_offset = output_frame_size;
724 
725  // Read caller's PC from the previous frame.
726  output_offset -= kPointerSize;
727  intptr_t callers_pc = output_[frame_index - 1]->GetPc();
728  output_frame->SetFrameSlot(output_offset, callers_pc);
729  if (FLAG_trace_deopt) {
730  PrintF(" 0x%08" V8PRIxPTR ": [top + %u] <- 0x%08" V8PRIxPTR
731  " ; caller's pc\n",
732  top_address + output_offset, output_offset, callers_pc);
733  }
734 
735  // Read caller's FP from the previous frame, and set this frame's FP.
736  output_offset -= kPointerSize;
737  intptr_t value = output_[frame_index - 1]->GetFp();
738  output_frame->SetFrameSlot(output_offset, value);
739  intptr_t fp_value = top_address + output_offset;
740  output_frame->SetFp(fp_value);
741  if (FLAG_trace_deopt) {
742  PrintF(" 0x%08" V8PRIxPTR ": [top + %u] <- 0x%08" V8PRIxPTR
743  " ; caller's fp\n",
744  fp_value, output_offset, value);
745  }
746 
747  // The context can be gotten from the previous frame.
748  output_offset -= kPointerSize;
749  value = output_[frame_index - 1]->GetContext();
750  output_frame->SetFrameSlot(output_offset, value);
751  if (FLAG_trace_deopt) {
752  PrintF(" 0x%08" V8PRIxPTR ": [top + %u] <- 0x%08" V8PRIxPTR
753  " ; context\n",
754  top_address + output_offset, output_offset, value);
755  }
756 
757  // A marker value is used in place of the function.
758  output_offset -= kPointerSize;
759  value = reinterpret_cast<intptr_t>(Smi::FromInt(StackFrame::INTERNAL));
760  output_frame->SetFrameSlot(output_offset, value);
761  if (FLAG_trace_deopt) {
762  PrintF(" 0x%08" V8PRIxPTR ": [top + %u] <- 0x%08" V8PRIxPTR
763  " ; function (%s sentinel)\n",
764  top_address + output_offset, output_offset, value, kind);
765  }
766 
767  // Get Code object from accessor stub.
768  output_offset -= kPointerSize;
769  Builtins::Name name = is_setter_stub_frame ?
770  Builtins::kStoreIC_Setter_ForDeopt :
771  Builtins::kLoadIC_Getter_ForDeopt;
772  Code* accessor_stub = isolate_->builtins()->builtin(name);
773  value = reinterpret_cast<intptr_t>(accessor_stub);
774  output_frame->SetFrameSlot(output_offset, value);
775  if (FLAG_trace_deopt) {
776  PrintF(" 0x%08" V8PRIxPTR ": [top + %u] <- 0x%08" V8PRIxPTR
777  " ; code object\n",
778  top_address + output_offset, output_offset, value);
779  }
780 
781  // Skip receiver.
782  Translation::Opcode opcode =
783  static_cast<Translation::Opcode>(iterator->Next());
784  iterator->Skip(Translation::NumberOfOperandsFor(opcode));
785 
786  if (is_setter_stub_frame) {
787  // The implicit return value was part of the artificial setter stub
788  // environment.
789  output_offset -= kPointerSize;
790  DoTranslateCommand(iterator, frame_index, output_offset);
791  }
792 
793  ASSERT(0 == output_offset);
794 
795  Smi* offset = is_setter_stub_frame ?
796  isolate_->heap()->setter_stub_deopt_pc_offset() :
797  isolate_->heap()->getter_stub_deopt_pc_offset();
798  intptr_t pc = reinterpret_cast<intptr_t>(
799  accessor_stub->instruction_start() + offset->value());
800  output_frame->SetPc(pc);
801 }
802 
803 
804 void Deoptimizer::DoComputeJSFrame(TranslationIterator* iterator,
805  int frame_index) {
806  BailoutId node_id = BailoutId(iterator->Next());
807  JSFunction* function;
808  if (frame_index != 0) {
809  function = JSFunction::cast(ComputeLiteral(iterator->Next()));
810  } else {
811  int closure_id = iterator->Next();
812  USE(closure_id);
813  ASSERT_EQ(Translation::kSelfLiteralId, closure_id);
814  function = function_;
815  }
816  unsigned height = iterator->Next();
817  unsigned height_in_bytes = height * kPointerSize;
818  if (FLAG_trace_deopt) {
819  PrintF(" translating ");
820  function->PrintName();
821  PrintF(" => node=%d, height=%d\n", node_id.ToInt(), height_in_bytes);
822  }
823 
824  // The 'fixed' part of the frame consists of the incoming parameters and
825  // the part described by JavaScriptFrameConstants.
826  unsigned fixed_frame_size = ComputeFixedSize(function);
827  unsigned input_frame_size = input_->GetFrameSize();
828  unsigned output_frame_size = height_in_bytes + fixed_frame_size;
829 
830  // Allocate and store the output frame description.
831  FrameDescription* output_frame =
832  new(output_frame_size) FrameDescription(output_frame_size, function);
833  output_frame->SetFrameType(StackFrame::JAVA_SCRIPT);
834 
835  bool is_bottommost = (0 == frame_index);
836  bool is_topmost = (output_count_ - 1 == frame_index);
837  ASSERT(frame_index >= 0 && frame_index < output_count_);
838  ASSERT(output_[frame_index] == NULL);
839  output_[frame_index] = output_frame;
840 
841  // Compute the incoming parameter translation.
842  int parameter_count = function->shared()->formal_parameter_count() + 1;
843  unsigned output_offset = output_frame_size;
844  unsigned input_offset = input_frame_size;
845 
846  unsigned alignment_state_offset =
847  input_offset - parameter_count * kPointerSize -
849  kPointerSize;
852 
853  // The top address for the bottommost output frame can be computed from
854  // the input frame pointer and the output frame's height. For all
855  // subsequent output frames, it can be computed from the previous one's
856  // top address and the current frame's size.
857  uint32_t top_address;
858  if (is_bottommost) {
859  int32_t alignment_state = input_->GetFrameSlot(alignment_state_offset);
860  has_alignment_padding_ =
861  (alignment_state == kAlignmentPaddingPushed) ? 1 : 0;
862  // 2 = context and function in the frame.
863  // If the optimized frame had alignment padding, adjust the frame pointer
864  // to point to the new position of the old frame pointer after padding
865  // is removed. Subtract 2 * kPointerSize for the context and function slots.
866  top_address = input_->GetRegister(ebp.code()) - (2 * kPointerSize) -
867  height_in_bytes + has_alignment_padding_ * kPointerSize;
868  } else {
869  top_address = output_[frame_index - 1]->GetTop() - output_frame_size;
870  }
871  output_frame->SetTop(top_address);
872 
873  for (int i = 0; i < parameter_count; ++i) {
874  output_offset -= kPointerSize;
875  DoTranslateCommand(iterator, frame_index, output_offset);
876  }
877  input_offset -= (parameter_count * kPointerSize);
878 
879  // There are no translation commands for the caller's pc and fp, the
880  // context, and the function. Synthesize their values and set them up
881  // explicitly.
882  //
883  // The caller's pc for the bottommost output frame is the same as in the
884  // input frame. For all subsequent output frames, it can be read from the
885  // previous one. This frame's pc can be computed from the non-optimized
886  // function code and AST id of the bailout.
887  output_offset -= kPointerSize;
888  input_offset -= kPointerSize;
889  intptr_t value;
890  if (is_bottommost) {
891  value = input_->GetFrameSlot(input_offset);
892  } else {
893  value = output_[frame_index - 1]->GetPc();
894  }
895  output_frame->SetFrameSlot(output_offset, value);
896  if (FLAG_trace_deopt) {
897  PrintF(" 0x%08x: [top + %d] <- 0x%08x ; caller's pc\n",
898  top_address + output_offset, output_offset, value);
899  }
900 
901  // The caller's frame pointer for the bottommost output frame is the same
902  // as in the input frame. For all subsequent output frames, it can be
903  // read from the previous one. Also compute and set this frame's frame
904  // pointer.
905  output_offset -= kPointerSize;
906  input_offset -= kPointerSize;
907  if (is_bottommost) {
908  value = input_->GetFrameSlot(input_offset);
909  } else {
910  value = output_[frame_index - 1]->GetFp();
911  }
912  output_frame->SetFrameSlot(output_offset, value);
913  intptr_t fp_value = top_address + output_offset;
914  ASSERT(!is_bottommost ||
915  (input_->GetRegister(ebp.code()) + has_alignment_padding_ * kPointerSize) ==
916  fp_value);
917  output_frame->SetFp(fp_value);
918  if (is_topmost) output_frame->SetRegister(ebp.code(), fp_value);
919  if (FLAG_trace_deopt) {
920  PrintF(" 0x%08x: [top + %d] <- 0x%08x ; caller's fp\n",
921  fp_value, output_offset, value);
922  }
923  ASSERT(!is_bottommost || !has_alignment_padding_ ||
924  (fp_value & kPointerSize) != 0);
925 
926  // For the bottommost output frame the context can be gotten from the input
927  // frame. For all subsequent output frames it can be gotten from the function
928  // so long as we don't inline functions that need local contexts.
929  output_offset -= kPointerSize;
930  input_offset -= kPointerSize;
931  if (is_bottommost) {
932  value = input_->GetFrameSlot(input_offset);
933  } else {
934  value = reinterpret_cast<uint32_t>(function->context());
935  }
936  output_frame->SetFrameSlot(output_offset, value);
937  output_frame->SetContext(value);
938  if (is_topmost) output_frame->SetRegister(esi.code(), value);
939  if (FLAG_trace_deopt) {
940  PrintF(" 0x%08x: [top + %d] <- 0x%08x ; context\n",
941  top_address + output_offset, output_offset, value);
942  }
943 
944  // The function was mentioned explicitly in the BEGIN_FRAME.
945  output_offset -= kPointerSize;
946  input_offset -= kPointerSize;
947  value = reinterpret_cast<uint32_t>(function);
948  // The function for the bottommost output frame should also agree with the
949  // input frame.
950  ASSERT(!is_bottommost || input_->GetFrameSlot(input_offset) == value);
951  output_frame->SetFrameSlot(output_offset, value);
952  if (FLAG_trace_deopt) {
953  PrintF(" 0x%08x: [top + %d] <- 0x%08x ; function\n",
954  top_address + output_offset, output_offset, value);
955  }
956 
957  // Translate the rest of the frame.
958  for (unsigned i = 0; i < height; ++i) {
959  output_offset -= kPointerSize;
960  DoTranslateCommand(iterator, frame_index, output_offset);
961  }
962  ASSERT(0 == output_offset);
963 
964  // Compute this frame's PC, state, and continuation.
965  Code* non_optimized_code = function->shared()->code();
966  FixedArray* raw_data = non_optimized_code->deoptimization_data();
967  DeoptimizationOutputData* data = DeoptimizationOutputData::cast(raw_data);
968  Address start = non_optimized_code->instruction_start();
969  unsigned pc_and_state = GetOutputInfo(data, node_id, function->shared());
970  unsigned pc_offset = FullCodeGenerator::PcField::decode(pc_and_state);
971  uint32_t pc_value = reinterpret_cast<uint32_t>(start + pc_offset);
972  output_frame->SetPc(pc_value);
973 
976  output_frame->SetState(Smi::FromInt(state));
977 
978  // Set the continuation for the topmost frame.
979  if (is_topmost && bailout_type_ != DEBUGGER) {
980  Builtins* builtins = isolate_->builtins();
981  Code* continuation = (bailout_type_ == EAGER)
982  ? builtins->builtin(Builtins::kNotifyDeoptimized)
983  : builtins->builtin(Builtins::kNotifyLazyDeoptimized);
984  output_frame->SetContinuation(
985  reinterpret_cast<uint32_t>(continuation->entry()));
986  }
987 }
988 
989 
990 void Deoptimizer::FillInputFrame(Address tos, JavaScriptFrame* frame) {
991  // Set the register values. The values are not important as there are no
992  // callee saved registers in JavaScript frames, so all registers are
993  // spilled. Registers ebp and esp are set to the correct values though.
994 
995  for (int i = 0; i < Register::kNumRegisters; i++) {
996  input_->SetRegister(i, i * 4);
997  }
998  input_->SetRegister(esp.code(), reinterpret_cast<intptr_t>(frame->sp()));
999  input_->SetRegister(ebp.code(), reinterpret_cast<intptr_t>(frame->fp()));
1000  for (int i = 0; i < DoubleRegister::kNumAllocatableRegisters; i++) {
1001  input_->SetDoubleRegister(i, 0.0);
1002  }
1003 
1004  // Fill the frame content from the actual data on the frame.
1005  for (unsigned i = 0; i < input_->GetFrameSize(); i += kPointerSize) {
1006  input_->SetFrameSlot(i, Memory::uint32_at(tos + i));
1007  }
1008 }
1009 
1010 
1011 #define __ masm()->
1012 
1013 void Deoptimizer::EntryGenerator::Generate() {
1014  GeneratePrologue();
1015  CpuFeatures::Scope scope(SSE2);
1016 
1017  Isolate* isolate = masm()->isolate();
1018 
1019  // Save all general purpose registers before messing with them.
1020  const int kNumberOfRegisters = Register::kNumRegisters;
1021 
1022  const int kDoubleRegsSize = kDoubleSize *
1024  __ sub(esp, Immediate(kDoubleRegsSize));
1025  for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; ++i) {
1026  XMMRegister xmm_reg = XMMRegister::FromAllocationIndex(i);
1027  int offset = i * kDoubleSize;
1028  __ movdbl(Operand(esp, offset), xmm_reg);
1029  }
1030 
1031  __ pushad();
1032 
1033  const int kSavedRegistersAreaSize = kNumberOfRegisters * kPointerSize +
1034  kDoubleRegsSize;
1035 
1036  // Get the bailout id from the stack.
1037  __ mov(ebx, Operand(esp, kSavedRegistersAreaSize));
1038 
1039  // Get the address of the location in the code object if possible
1040  // and compute the fp-to-sp delta in register edx.
1041  if (type() == EAGER) {
1042  __ Set(ecx, Immediate(0));
1043  __ lea(edx, Operand(esp, kSavedRegistersAreaSize + 1 * kPointerSize));
1044  } else {
1045  __ mov(ecx, Operand(esp, kSavedRegistersAreaSize + 1 * kPointerSize));
1046  __ lea(edx, Operand(esp, kSavedRegistersAreaSize + 2 * kPointerSize));
1047  }
1048  __ sub(edx, ebp);
1049  __ neg(edx);
1050 
1051  // Allocate a new deoptimizer object.
1052  __ PrepareCallCFunction(6, eax);
1054  __ mov(Operand(esp, 0 * kPointerSize), eax); // Function.
1055  __ mov(Operand(esp, 1 * kPointerSize), Immediate(type())); // Bailout type.
1056  __ mov(Operand(esp, 2 * kPointerSize), ebx); // Bailout id.
1057  __ mov(Operand(esp, 3 * kPointerSize), ecx); // Code address or 0.
1058  __ mov(Operand(esp, 4 * kPointerSize), edx); // Fp-to-sp delta.
1059  __ mov(Operand(esp, 5 * kPointerSize),
1060  Immediate(ExternalReference::isolate_address()));
1061  {
1062  AllowExternalCallThatCantCauseGC scope(masm());
1063  __ CallCFunction(ExternalReference::new_deoptimizer_function(isolate), 6);
1064  }
1065 
1066  // Preserve deoptimizer object in register eax and get the input
1067  // frame descriptor pointer.
1068  __ mov(ebx, Operand(eax, Deoptimizer::input_offset()));
1069 
1070  // Fill in the input registers.
1071  for (int i = kNumberOfRegisters - 1; i >= 0; i--) {
1072  int offset = (i * kPointerSize) + FrameDescription::registers_offset();
1073  __ pop(Operand(ebx, offset));
1074  }
1075 
1076  // Fill in the double input registers.
1077  int double_regs_offset = FrameDescription::double_registers_offset();
1078  for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; ++i) {
1079  int dst_offset = i * kDoubleSize + double_regs_offset;
1080  int src_offset = i * kDoubleSize;
1081  __ movdbl(xmm0, Operand(esp, src_offset));
1082  __ movdbl(Operand(ebx, dst_offset), xmm0);
1083  }
1084 
1085  // Remove the bailout id and the double registers from the stack.
1086  if (type() == EAGER) {
1087  __ add(esp, Immediate(kDoubleRegsSize + kPointerSize));
1088  } else {
1089  __ add(esp, Immediate(kDoubleRegsSize + 2 * kPointerSize));
1090  }
1091 
1092  // Compute a pointer to the unwinding limit in register ecx; that is
1093  // the first stack slot not part of the input frame.
1094  __ mov(ecx, Operand(ebx, FrameDescription::frame_size_offset()));
1095  __ add(ecx, esp);
1096 
1097  // Unwind the stack down to - but not including - the unwinding
1098  // limit and copy the contents of the activation frame to the input
1099  // frame description.
1101  Label pop_loop;
1102  __ bind(&pop_loop);
1103  __ pop(Operand(edx, 0));
1104  __ add(edx, Immediate(sizeof(uint32_t)));
1105  __ cmp(ecx, esp);
1106  __ j(not_equal, &pop_loop);
1107 
1108  // Compute the output frame in the deoptimizer.
1109  __ push(eax);
1110  __ PrepareCallCFunction(1, ebx);
1111  __ mov(Operand(esp, 0 * kPointerSize), eax);
1112  {
1113  AllowExternalCallThatCantCauseGC scope(masm());
1114  __ CallCFunction(
1115  ExternalReference::compute_output_frames_function(isolate), 1);
1116  }
1117  __ pop(eax);
1118 
1119  if (type() != OSR) {
1120  // If frame was dynamically aligned, pop padding.
1121  Label no_padding;
1123  Immediate(0));
1124  __ j(equal, &no_padding);
1125  __ pop(ecx);
1126  if (FLAG_debug_code) {
1127  __ cmp(ecx, Immediate(kAlignmentZapValue));
1128  __ Assert(equal, "alignment marker expected");
1129  }
1130  __ bind(&no_padding);
1131  } else {
1132  // If frame needs dynamic alignment push padding.
1133  Label no_padding;
1135  Immediate(0));
1136  __ j(equal, &no_padding);
1137  __ push(Immediate(kAlignmentZapValue));
1138  __ bind(&no_padding);
1139  }
1140 
1141  // Replace the current frame with the output frames.
1142  Label outer_push_loop, inner_push_loop;
1143  // Outer loop state: eax = current FrameDescription**, edx = one past the
1144  // last FrameDescription**.
1145  __ mov(edx, Operand(eax, Deoptimizer::output_count_offset()));
1146  __ mov(eax, Operand(eax, Deoptimizer::output_offset()));
1147  __ lea(edx, Operand(eax, edx, times_4, 0));
1148  __ bind(&outer_push_loop);
1149  // Inner loop state: ebx = current FrameDescription*, ecx = loop index.
1150  __ mov(ebx, Operand(eax, 0));
1151  __ mov(ecx, Operand(ebx, FrameDescription::frame_size_offset()));
1152  __ bind(&inner_push_loop);
1153  __ sub(ecx, Immediate(sizeof(uint32_t)));
1155  __ test(ecx, ecx);
1156  __ j(not_zero, &inner_push_loop);
1157  __ add(eax, Immediate(kPointerSize));
1158  __ cmp(eax, edx);
1159  __ j(below, &outer_push_loop);
1160 
1161  // In case of OSR, we have to restore the XMM registers.
1162  if (type() == OSR) {
1163  for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; ++i) {
1164  XMMRegister xmm_reg = XMMRegister::FromAllocationIndex(i);
1165  int src_offset = i * kDoubleSize + double_regs_offset;
1166  __ movdbl(xmm_reg, Operand(ebx, src_offset));
1167  }
1168  }
1169 
1170  // Push state, pc, and continuation from the last output frame.
1171  if (type() != OSR) {
1172  __ push(Operand(ebx, FrameDescription::state_offset()));
1173  }
1174  __ push(Operand(ebx, FrameDescription::pc_offset()));
1175  __ push(Operand(ebx, FrameDescription::continuation_offset()));
1176 
1177 
1178  // Push the registers from the last output frame.
1179  for (int i = 0; i < kNumberOfRegisters; i++) {
1180  int offset = (i * kPointerSize) + FrameDescription::registers_offset();
1181  __ push(Operand(ebx, offset));
1182  }
1183 
1184  // Restore the registers from the stack.
1185  __ popad();
1186 
1187  // Return to the continuation point.
1188  __ ret(0);
1189 }
1190 
1191 
1193  // Create a sequence of deoptimization entries.
1194  Label done;
1195  for (int i = 0; i < count(); i++) {
1196  int start = masm()->pc_offset();
1197  USE(start);
1198  __ push_imm32(i);
1199  __ jmp(&done);
1200  ASSERT(masm()->pc_offset() - start == table_entry_size_);
1201  }
1202  __ bind(&done);
1203 }
1204 
1205 #undef __
1206 
1207 
1208 } } // namespace v8::internal
1209 
1210 #endif // V8_TARGET_ARCH_IA32
byte * Address
Definition: globals.h:157
Code * builtin(Name name)
Definition: builtins.h:320
static DeoptimizationOutputData * cast(Object *obj)
#define V8PRIxPTR
Definition: globals.h:189
void PrintF(const char *format,...)
Definition: v8utils.cc:40
unsigned stack_slots()
Definition: objects-inl.h:3318
static Smi * FromInt(int value)
Definition: objects-inl.h:981
void SetFrameSlot(unsigned offset, intptr_t value)
Definition: deoptimizer.h:424
const Register esp
Builtins * builtins()
Definition: isolate.h:924
int int32_t
Definition: unicode.cc:47
static XMMRegister FromAllocationIndex(int index)
static const int kNumAllocatableRegisters
const int kNoAlignmentPadding
Definition: frames-ia32.h:56
static const int kCallInstructionLength
#define ASSERT(condition)
Definition: checks.h:270
static void DeoptimizeFunction(JSFunction *function)
#define ASSERT_GE(v1, v2)
Definition: checks.h:273
intptr_t GetContext() const
Definition: deoptimizer.h:457
void SetFrameType(StackFrame::Type type)
Definition: deoptimizer.h:466
const int kAlignmentPaddingPushed
Definition: frames-ia32.h:57
const int kIntSize
Definition: globals.h:217
static const int kNumRegisters
Definition: assembler-arm.h:73
static int double_registers_offset()
Definition: deoptimizer.h:484
uint8_t byte
Definition: globals.h:156
const Register ebp
#define UNREACHABLE()
Definition: checks.h:50
static int output_offset()
Definition: deoptimizer.h:240
const Register eax
const int kDoubleSize
Definition: globals.h:218
const int kPointerSize
Definition: globals.h:220
const Register ecx
static void set_target_address_at(Address pc, Address target)
#define __
void SetRegister(unsigned n, intptr_t value)
Definition: deoptimizer.h:438
static unsigned decode(uint32_t value)
Definition: utils.h:273
static const int kDynamicAlignmentStateOffset
Definition: frames-ia32.h:127
const Register pc
friend class DeoptimizingCodeListNode
Definition: deoptimizer.h:370
const int kAlignmentZapValue
Definition: frames-ia32.h:58
static void EnsureRelocSpaceForLazyDeoptimization(Handle< Code > code)
static int GetOutputInfo(DeoptimizationOutputData *data, BailoutId node_id, SharedFunctionInfo *shared)
Definition: deoptimizer.cc:498
static void ReplaceCodeForRelatedFunctions(JSFunction *function, Code *code)
uint32_t GetFrameSize() const
Definition: deoptimizer.h:394
void SetContinuation(intptr_t pc)
Definition: deoptimizer.h:463
static int output_count_offset()
Definition: deoptimizer.h:237
static const int kHeaderSize
Definition: objects.h:2296
static const int kNumAllocatableRegisters
static void RevertStackCheckCodeAt(Code *unoptimized_code, Address pc_after, Code *check_code, Code *replacement_code)
static Address target_address_at(Address pc)
intptr_t GetFrameSlot(unsigned offset)
Definition: deoptimizer.h:403
const Register ebx
static Address GetDeoptimizationEntry(int id, BailoutType type)
Definition: deoptimizer.cc:457
#define ASSERT_EQ(v1, v2)
Definition: checks.h:271
const Register esi
friend class FrameDescription
Definition: deoptimizer.h:369
void USE(T)
Definition: globals.h:289
virtual void GeneratePrologue()
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination use dead code elimination trace on stack replacement optimize closures cache optimized code for closures functions with arguments object loop weight for representation inference allow uint32 values on optimize frames if they are used only in safe operations track parallel recompilation enable all profiler experiments number of stack frames inspected by the profiler call recompile stub directly when self optimizing trigger profiler ticks based on counting instead of timing weight back edges by jump distance for interrupt triggering percentage of ICs that must have type info to allow optimization watch_ic_patching retry_self_opt interrupt_at_exit extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of SAHF instruction if enable use of VFP3 instructions if available this implies enabling ARMv7 and VFP2 enable use of VFP2 instructions if available enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of MIPS FPU instructions if NULL
Definition: flags.cc:301
static uint32_t & uint32_at(Address addr)
Definition: v8memory.h:47
#define RUNTIME_ENTRY(name, nargs, ressize)
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination use dead code elimination trace on stack replacement optimize closures cache optimized code for closures functions with arguments object loop weight for representation inference allow uint32 values on optimize frames if they are used only in safe operations track parallel recompilation enable all profiler experiments number of stack frames inspected by the profiler call recompile stub directly when self optimizing trigger profiler ticks based on counting instead of timing weight back edges by jump distance for interrupt triggering percentage of ICs that must have type info to allow optimization watch_ic_patching retry_self_opt interrupt_at_exit extra verbose compilation tracing generate extra code(assertions) for debugging") DEFINE_bool(code_comments
intptr_t GetRegister(unsigned n) const
Definition: deoptimizer.h:428
const Register edx
void SetDoubleRegister(unsigned n, double value)
Definition: deoptimizer.h:443
static void PatchStackCheckCodeAt(Code *unoptimized_code, Address pc_after, Code *check_code, Code *replacement_code)
static int has_alignment_padding_offset()
Definition: deoptimizer.h:242
static DeoptimizationInputData * cast(Object *obj)
const XMMRegister xmm0
static JSFunction * cast(Object *obj)