v8  3.25.30(node0.11.13)
V8 is Google's open source JavaScript engine
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
disassembler.cc
Go to the documentation of this file.
1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 
28 #include "v8.h"
29 
30 #include "code-stubs.h"
31 #include "codegen.h"
32 #include "debug.h"
33 #include "deoptimizer.h"
34 #include "disasm.h"
35 #include "disassembler.h"
36 #include "macro-assembler.h"
37 #include "serialize.h"
38 #include "string-stream.h"
39 
40 namespace v8 {
41 namespace internal {
42 
43 #ifdef ENABLE_DISASSEMBLER
44 
45 void Disassembler::Dump(FILE* f, byte* begin, byte* end) {
46  for (byte* pc = begin; pc < end; pc++) {
47  if (f == NULL) {
48  PrintF("%" V8PRIxPTR " %4" V8PRIdPTR " %02x\n",
49  reinterpret_cast<intptr_t>(pc),
50  pc - begin,
51  *pc);
52  } else {
53  PrintF(f, "%" V8PRIxPTR " %4" V8PRIdPTR " %02x\n",
54  reinterpret_cast<uintptr_t>(pc), pc - begin, *pc);
55  }
56  }
57 }
58 
59 
60 class V8NameConverter: public disasm::NameConverter {
61  public:
62  explicit V8NameConverter(Code* code) : code_(code) {}
63  virtual const char* NameOfAddress(byte* pc) const;
64  virtual const char* NameInCode(byte* addr) const;
65  Code* code() const { return code_; }
66  private:
67  Code* code_;
68 
69  EmbeddedVector<char, 128> v8_buffer_;
70 };
71 
72 
73 const char* V8NameConverter::NameOfAddress(byte* pc) const {
74  const char* name = code_->GetIsolate()->builtins()->Lookup(pc);
75  if (name != NULL) {
76  OS::SNPrintF(v8_buffer_, "%s (%p)", name, pc);
77  return v8_buffer_.start();
78  }
79 
80  if (code_ != NULL) {
81  int offs = static_cast<int>(pc - code_->instruction_start());
82  // print as code offset, if it seems reasonable
83  if (0 <= offs && offs < code_->instruction_size()) {
84  OS::SNPrintF(v8_buffer_, "%d (%p)", offs, pc);
85  return v8_buffer_.start();
86  }
87  }
88 
90 }
91 
92 
93 const char* V8NameConverter::NameInCode(byte* addr) const {
94  // The V8NameConverter is used for well known code, so we can "safely"
95  // dereference pointers in generated code.
96  return (code_ != NULL) ? reinterpret_cast<const char*>(addr) : "";
97 }
98 
99 
100 static void DumpBuffer(FILE* f, StringBuilder* out) {
101  if (f == NULL) {
102  PrintF("%s\n", out->Finalize());
103  } else {
104  PrintF(f, "%s\n", out->Finalize());
105  }
106  out->Reset();
107 }
108 
109 
110 static const int kOutBufferSize = 2048 + String::kMaxShortPrintLength;
111 static const int kRelocInfoPosition = 57;
112 
113 static int DecodeIt(Isolate* isolate,
114  FILE* f,
115  const V8NameConverter& converter,
116  byte* begin,
117  byte* end) {
118  SealHandleScope shs(isolate);
119  DisallowHeapAllocation no_alloc;
120  ExternalReferenceEncoder ref_encoder(isolate);
121  Heap* heap = isolate->heap();
122 
125  StringBuilder out(out_buffer.start(), out_buffer.length());
126  byte* pc = begin;
127  disasm::Disassembler d(converter);
128  RelocIterator* it = NULL;
129  if (converter.code() != NULL) {
130  it = new RelocIterator(converter.code());
131  } else {
132  // No relocation information when printing code stubs.
133  }
134  int constants = -1; // no constants being decoded at the start
135 
136  while (pc < end) {
137  // First decode instruction so that we know its length.
138  byte* prev_pc = pc;
139  if (constants > 0) {
140  OS::SNPrintF(decode_buffer,
141  "%08x constant",
142  *reinterpret_cast<int32_t*>(pc));
143  constants--;
144  pc += 4;
145  } else {
146  int num_const = d.ConstantPoolSizeAt(pc);
147  if (num_const >= 0) {
148  OS::SNPrintF(decode_buffer,
149  "%08x constant pool begin",
150  *reinterpret_cast<int32_t*>(pc));
151  constants = num_const;
152  pc += 4;
153  } else if (it != NULL && !it->done() && it->rinfo()->pc() == pc &&
154  it->rinfo()->rmode() == RelocInfo::INTERNAL_REFERENCE) {
155  // raw pointer embedded in code stream, e.g., jump table
156  byte* ptr = *reinterpret_cast<byte**>(pc);
157  OS::SNPrintF(decode_buffer,
158  "%08" V8PRIxPTR " jump table entry %4" V8PRIdPTR,
159  ptr,
160  ptr - begin);
161  pc += 4;
162  } else {
163  decode_buffer[0] = '\0';
164  pc += d.InstructionDecode(decode_buffer, pc);
165  }
166  }
167 
168  // Collect RelocInfo for this instruction (prev_pc .. pc-1)
169  List<const char*> comments(4);
170  List<byte*> pcs(1);
171  List<RelocInfo::Mode> rmodes(1);
172  List<intptr_t> datas(1);
173  if (it != NULL) {
174  while (!it->done() && it->rinfo()->pc() < pc) {
175  if (RelocInfo::IsComment(it->rinfo()->rmode())) {
176  // For comments just collect the text.
177  comments.Add(reinterpret_cast<const char*>(it->rinfo()->data()));
178  } else {
179  // For other reloc info collect all data.
180  pcs.Add(it->rinfo()->pc());
181  rmodes.Add(it->rinfo()->rmode());
182  datas.Add(it->rinfo()->data());
183  }
184  it->next();
185  }
186  }
187 
188  // Comments.
189  for (int i = 0; i < comments.length(); i++) {
190  out.AddFormatted(" %s", comments[i]);
191  DumpBuffer(f, &out);
192  }
193 
194  // Instruction address and instruction offset.
195  out.AddFormatted("%p %4d ", prev_pc, prev_pc - begin);
196 
197  // Instruction.
198  out.AddFormatted("%s", decode_buffer.start());
199 
200  // Print all the reloc info for this instruction which are not comments.
201  for (int i = 0; i < pcs.length(); i++) {
202  // Put together the reloc info
203  RelocInfo relocinfo(pcs[i], rmodes[i], datas[i], converter.code());
204 
205  // Indent the printing of the reloc info.
206  if (i == 0) {
207  // The first reloc info is printed after the disassembled instruction.
208  out.AddPadding(' ', kRelocInfoPosition - out.position());
209  } else {
210  // Additional reloc infos are printed on separate lines.
211  DumpBuffer(f, &out);
212  out.AddPadding(' ', kRelocInfoPosition);
213  }
214 
215  RelocInfo::Mode rmode = relocinfo.rmode();
216  if (RelocInfo::IsPosition(rmode)) {
217  if (RelocInfo::IsStatementPosition(rmode)) {
218  out.AddFormatted(" ;; debug: statement %d", relocinfo.data());
219  } else {
220  out.AddFormatted(" ;; debug: position %d", relocinfo.data());
221  }
222  } else if (rmode == RelocInfo::EMBEDDED_OBJECT) {
223  HeapStringAllocator allocator;
224  StringStream accumulator(&allocator);
225  relocinfo.target_object()->ShortPrint(&accumulator);
226  SmartArrayPointer<const char> obj_name = accumulator.ToCString();
227  out.AddFormatted(" ;; object: %s", obj_name.get());
228  } else if (rmode == RelocInfo::EXTERNAL_REFERENCE) {
229  const char* reference_name =
230  ref_encoder.NameOfAddress(relocinfo.target_reference());
231  out.AddFormatted(" ;; external reference (%s)", reference_name);
232  } else if (RelocInfo::IsCodeTarget(rmode)) {
233  out.AddFormatted(" ;; code:");
234  if (rmode == RelocInfo::CONSTRUCT_CALL) {
235  out.AddFormatted(" constructor,");
236  }
237  Code* code = Code::GetCodeFromTargetAddress(relocinfo.target_address());
238  Code::Kind kind = code->kind();
239  if (code->is_inline_cache_stub()) {
240  if (kind == Code::LOAD_IC &&
241  LoadIC::GetContextualMode(code->extra_ic_state()) == CONTEXTUAL) {
242  out.AddFormatted(" contextual,");
243  }
244  InlineCacheState ic_state = code->ic_state();
245  out.AddFormatted(" %s, %s", Code::Kind2String(kind),
246  Code::ICState2String(ic_state));
247  if (ic_state == MONOMORPHIC) {
248  Code::StubType type = code->type();
249  out.AddFormatted(", %s", Code::StubType2String(type));
250  }
251  } else if (kind == Code::STUB || kind == Code::HANDLER) {
252  // Reverse lookup required as the minor key cannot be retrieved
253  // from the code object.
254  Object* obj = heap->code_stubs()->SlowReverseLookup(code);
255  if (obj != heap->undefined_value()) {
256  ASSERT(obj->IsSmi());
257  // Get the STUB key and extract major and minor key.
258  uint32_t key = Smi::cast(obj)->value();
259  uint32_t minor_key = CodeStub::MinorKeyFromKey(key);
260  CodeStub::Major major_key = CodeStub::GetMajorKey(code);
261  ASSERT(major_key == CodeStub::MajorKeyFromKey(key));
262  out.AddFormatted(" %s, %s, ",
263  Code::Kind2String(kind),
264  CodeStub::MajorName(major_key, false));
265  switch (major_key) {
266  case CodeStub::CallFunction: {
267  int argc =
269  out.AddFormatted("argc = %d", argc);
270  break;
271  }
272  default:
273  out.AddFormatted("minor: %d", minor_key);
274  }
275  }
276  } else {
277  out.AddFormatted(" %s", Code::Kind2String(kind));
278  }
279  if (rmode == RelocInfo::CODE_TARGET_WITH_ID) {
280  out.AddFormatted(" (id = %d)", static_cast<int>(relocinfo.data()));
281  }
282  } else if (RelocInfo::IsRuntimeEntry(rmode) &&
283  isolate->deoptimizer_data() != NULL) {
284  // A runtime entry reloinfo might be a deoptimization bailout.
285  Address addr = relocinfo.target_address();
286  int id = Deoptimizer::GetDeoptimizationId(isolate,
287  addr,
291  addr,
295  addr,
298  out.AddFormatted(" ;; %s", RelocInfo::RelocModeName(rmode));
299  } else {
300  out.AddFormatted(" ;; soft deoptimization bailout %d", id);
301  }
302  } else {
303  out.AddFormatted(" ;; lazy deoptimization bailout %d", id);
304  }
305  } else {
306  out.AddFormatted(" ;; deoptimization bailout %d", id);
307  }
308  } else {
309  out.AddFormatted(" ;; %s", RelocInfo::RelocModeName(rmode));
310  }
311  }
312  DumpBuffer(f, &out);
313  }
314 
315  // Emit comments following the last instruction (if any).
316  if (it != NULL) {
317  for ( ; !it->done(); it->next()) {
318  if (RelocInfo::IsComment(it->rinfo()->rmode())) {
319  out.AddFormatted(" %s",
320  reinterpret_cast<const char*>(it->rinfo()->data()));
321  DumpBuffer(f, &out);
322  }
323  }
324  }
325 
326  delete it;
327  return static_cast<int>(pc - begin);
328 }
329 
330 
331 int Disassembler::Decode(Isolate* isolate, FILE* f, byte* begin, byte* end) {
332  V8NameConverter defaultConverter(NULL);
333  return DecodeIt(isolate, f, defaultConverter, begin, end);
334 }
335 
336 
337 // Called by Code::CodePrint.
338 void Disassembler::Decode(FILE* f, Code* code) {
339  Isolate* isolate = code->GetIsolate();
340  int decode_size = code->is_crankshafted()
341  ? static_cast<int>(code->safepoint_table_offset())
342  : code->instruction_size();
343  // If there might be a back edge table, stop before reaching it.
344  if (code->kind() == Code::FUNCTION) {
345  decode_size =
346  Min(decode_size, static_cast<int>(code->back_edge_table_offset()));
347  }
348 
349  byte* begin = code->instruction_start();
350  byte* end = begin + decode_size;
351  V8NameConverter v8NameConverter(code);
352  DecodeIt(isolate, f, v8NameConverter, begin, end);
353 }
354 
355 #else // ENABLE_DISASSEMBLER
356 
357 void Disassembler::Dump(FILE* f, byte* begin, byte* end) {}
358 int Disassembler::Decode(Isolate* isolate, FILE* f, byte* begin, byte* end) {
359  return 0;
360 }
361 
362 
363 void Disassembler::Decode(FILE* f, Code* code) {}
364 
365 #endif // ENABLE_DISASSEMBLER
366 
367 } } // namespace v8::internal
byte * Address
Definition: globals.h:186
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter NULL
Definition: flags.cc:269
#define V8PRIxPTR
Definition: globals.h:228
void PrintF(const char *format,...)
Definition: v8utils.cc:40
static int Decode(Isolate *isolate, FILE *f, byte *begin, byte *end)
static int ExtractArgcFromMinorKey(int minor_key)
Definition: code-stubs.h:1637
static ContextualMode GetContextualMode(ExtraICState state)
Definition: ic.h:314
kSerializedDataOffset Object
Definition: objects-inl.h:5016
#define ASSERT(condition)
Definition: checks.h:329
static Smi * cast(Object *object)
uint8_t byte
Definition: globals.h:185
T * start() const
Definition: utils.h:426
static int GetDeoptimizationId(Isolate *isolate, Address addr, BailoutType type)
Definition: deoptimizer.cc:701
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra code(assertions) for debugging") DEFINE_bool(code_comments
static const char * Kind2String(Kind kind)
Definition: objects.cc:10803
const Register pc
int length() const
Definition: utils.h:420
static Code * GetCodeFromTargetAddress(Address address)
Definition: objects-inl.h:4662
virtual const char * NameOfAddress(byte *addr) const
#define V8PRIdPTR
Definition: globals.h:229
static const int kMaxShortPrintLength
Definition: objects.h:8929
static int SNPrintF(Vector< char > str, const char *format,...)
static const int kNotDeoptimizationEntry
Definition: deoptimizer.h:258
PerThreadAssertScopeDebugOnly< HEAP_ALLOCATION_ASSERT, false > DisallowHeapAllocation
Definition: assert-scope.h:214
HeapObject * obj
T Min(T a, T b)
Definition: utils.h:234
static void Dump(FILE *f, byte *begin, byte *end)
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long expose natives in global object expose freeBuffer extension expose gc extension under the specified name expose externalize string extension number of stack frames to capture disable builtin natives files print name of functions for which code is generated use random jit cookie to mask large constants trace lazy optimization use adaptive optimizations always try to OSR functions trace optimize function deoptimization minimum length for automatic enable preparsing maximum number of optimization attempts before giving up cache prototype transitions trace debugging JSON request response trace out of bounds accesses to external arrays trace_js_array_abuse automatically set the debug break flag when debugger commands are in the queue abort by crashing maximum length of function source code printed in a stack trace max size of the new max size of the old max size of executable always perform global GCs print one trace line following each garbage collection do not print trace line after scavenger collection print statistics of the maximum memory committed for the heap in name
Definition: flags.cc:505