v8  3.14.5(node0.10.28)
V8 is Google's open source JavaScript engine
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
safepoint-table.cc
Go to the documentation of this file.
1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 
28 #include "v8.h"
29 
30 #include "safepoint-table.h"
31 
32 #include "deoptimizer.h"
33 #include "disasm.h"
34 #include "macro-assembler.h"
35 #include "zone-inl.h"
36 
37 namespace v8 {
38 namespace internal {
39 
40 
41 bool SafepointEntry::HasRegisters() const {
42  ASSERT(is_valid());
44  const int num_reg_bytes = kNumSafepointRegisters >> kBitsPerByteLog2;
45  for (int i = 0; i < num_reg_bytes; i++) {
46  if (bits_[i] != SafepointTable::kNoRegisters) return true;
47  }
48  return false;
49 }
50 
51 
52 bool SafepointEntry::HasRegisterAt(int reg_index) const {
53  ASSERT(is_valid());
54  ASSERT(reg_index >= 0 && reg_index < kNumSafepointRegisters);
55  int byte_index = reg_index >> kBitsPerByteLog2;
56  int bit_index = reg_index & (kBitsPerByte - 1);
57  return (bits_[byte_index] & (1 << bit_index)) != 0;
58 }
59 
60 
61 SafepointTable::SafepointTable(Code* code) {
62  ASSERT(code->kind() == Code::OPTIMIZED_FUNCTION);
63  code_ = code;
64  Address header = code->instruction_start() + code->safepoint_table_offset();
65  length_ = Memory::uint32_at(header + kLengthOffset);
66  entry_size_ = Memory::uint32_at(header + kEntrySizeOffset);
67  pc_and_deoptimization_indexes_ = header + kHeaderSize;
68  entries_ = pc_and_deoptimization_indexes_ +
69  (length_ * kPcAndDeoptimizationIndexSize);
70  ASSERT(entry_size_ > 0);
71  STATIC_ASSERT(SafepointEntry::DeoptimizationIndexField::kMax ==
72  Safepoint::kNoDeoptimizationIndex);
73 }
74 
75 
76 SafepointEntry SafepointTable::FindEntry(Address pc) const {
77  unsigned pc_offset = static_cast<unsigned>(pc - code_->instruction_start());
78  for (unsigned i = 0; i < length(); i++) {
79  // TODO(kasperl): Replace the linear search with binary search.
80  if (GetPcOffset(i) == pc_offset) return GetEntry(i);
81  }
82  return SafepointEntry();
83 }
84 
85 
86 void SafepointTable::PrintEntry(unsigned index) const {
87  disasm::NameConverter converter;
88  SafepointEntry entry = GetEntry(index);
89  uint8_t* bits = entry.bits();
90 
91  // Print the stack slot bits.
92  if (entry_size_ > 0) {
94  const int first = kNumSafepointRegisters >> kBitsPerByteLog2;
95  int last = entry_size_ - 1;
96  for (int i = first; i < last; i++) PrintBits(bits[i], kBitsPerByte);
97  int last_bits = code_->stack_slots() - ((last - first) * kBitsPerByte);
98  PrintBits(bits[last], last_bits);
99 
100  // Print the registers (if any).
101  if (!entry.HasRegisters()) return;
102  for (int j = 0; j < kNumSafepointRegisters; j++) {
103  if (entry.HasRegisterAt(j)) {
104  PrintF(" | %s", converter.NameOfCPURegister(j));
105  }
106  }
107  }
108 }
109 
110 
111 void SafepointTable::PrintBits(uint8_t byte, int digits) {
112  ASSERT(digits >= 0 && digits <= kBitsPerByte);
113  for (int i = 0; i < digits; i++) {
114  PrintF("%c", ((byte & (1 << i)) == 0) ? '0' : '1');
115  }
116 }
117 
118 
119 void Safepoint::DefinePointerRegister(Register reg, Zone* zone) {
120  registers_->Add(reg.code(), zone);
121 }
122 
123 
124 Safepoint SafepointTableBuilder::DefineSafepoint(
125  Assembler* assembler,
126  Safepoint::Kind kind,
127  int arguments,
128  Safepoint::DeoptMode deopt_mode) {
129  ASSERT(arguments >= 0);
130  DeoptimizationInfo info;
131  info.pc = assembler->pc_offset();
132  info.arguments = arguments;
133  info.has_doubles = (kind & Safepoint::kWithDoubles);
134  deoptimization_info_.Add(info, zone_);
135  deopt_index_list_.Add(Safepoint::kNoDeoptimizationIndex, zone_);
136  if (deopt_mode == Safepoint::kNoLazyDeopt) {
137  last_lazy_safepoint_ = deopt_index_list_.length();
138  }
139  indexes_.Add(new(zone_) ZoneList<int>(8, zone_), zone_);
140  registers_.Add((kind & Safepoint::kWithRegisters)
141  ? new(zone_) ZoneList<int>(4, zone_)
142  : NULL,
143  zone_);
144  return Safepoint(indexes_.last(), registers_.last());
145 }
146 
147 
148 void SafepointTableBuilder::RecordLazyDeoptimizationIndex(int index) {
149  while (last_lazy_safepoint_ < deopt_index_list_.length()) {
150  deopt_index_list_[last_lazy_safepoint_++] = index;
151  }
152 }
153 
154 unsigned SafepointTableBuilder::GetCodeOffset() const {
155  ASSERT(emitted_);
156  return offset_;
157 }
158 
159 
160 void SafepointTableBuilder::Emit(Assembler* assembler, int bits_per_entry) {
161  // For lazy deoptimization we need space to patch a call after every call.
162  // Ensure there is always space for such patching, even if the code ends
163  // in a call.
164  int target_offset = assembler->pc_offset() + Deoptimizer::patch_size();
165  while (assembler->pc_offset() < target_offset) {
166  assembler->nop();
167  }
168 
169  // Make sure the safepoint table is properly aligned. Pad with nops.
170  assembler->Align(kIntSize);
171  assembler->RecordComment(";;; Safepoint table.");
172  offset_ = assembler->pc_offset();
173 
174  // Take the register bits into account.
175  bits_per_entry += kNumSafepointRegisters;
176 
177  // Compute the number of bytes per safepoint entry.
178  int bytes_per_entry =
179  RoundUp(bits_per_entry, kBitsPerByte) >> kBitsPerByteLog2;
180 
181  // Emit the table header.
182  int length = deoptimization_info_.length();
183  assembler->dd(length);
184  assembler->dd(bytes_per_entry);
185 
186  // Emit sorted table of pc offsets together with deoptimization indexes.
187  for (int i = 0; i < length; i++) {
188  assembler->dd(deoptimization_info_[i].pc);
189  assembler->dd(EncodeExceptPC(deoptimization_info_[i],
190  deopt_index_list_[i]));
191  }
192 
193  // Emit table of bitmaps.
194  ZoneList<uint8_t> bits(bytes_per_entry, zone_);
195  for (int i = 0; i < length; i++) {
196  ZoneList<int>* indexes = indexes_[i];
197  ZoneList<int>* registers = registers_[i];
198  bits.Clear();
199  bits.AddBlock(0, bytes_per_entry, zone_);
200 
201  // Run through the registers (if any).
202  ASSERT(IsAligned(kNumSafepointRegisters, kBitsPerByte));
203  if (registers == NULL) {
204  const int num_reg_bytes = kNumSafepointRegisters >> kBitsPerByteLog2;
205  for (int j = 0; j < num_reg_bytes; j++) {
206  bits[j] = SafepointTable::kNoRegisters;
207  }
208  } else {
209  for (int j = 0; j < registers->length(); j++) {
210  int index = registers->at(j);
211  ASSERT(index >= 0 && index < kNumSafepointRegisters);
212  int byte_index = index >> kBitsPerByteLog2;
213  int bit_index = index & (kBitsPerByte - 1);
214  bits[byte_index] |= (1 << bit_index);
215  }
216  }
217 
218  // Run through the indexes and build a bitmap.
219  for (int j = 0; j < indexes->length(); j++) {
220  int index = bits_per_entry - 1 - indexes->at(j);
221  int byte_index = index >> kBitsPerByteLog2;
222  int bit_index = index & (kBitsPerByte - 1);
223  bits[byte_index] |= (1U << bit_index);
224  }
225 
226  // Emit the bitmap for the current entry.
227  for (int k = 0; k < bytes_per_entry; k++) {
228  assembler->db(bits[k]);
229  }
230  }
231  emitted_ = true;
232 }
233 
234 
235 uint32_t SafepointTableBuilder::EncodeExceptPC(const DeoptimizationInfo& info,
236  unsigned index) {
237  uint32_t encoding = SafepointEntry::DeoptimizationIndexField::encode(index);
238  encoding |= SafepointEntry::ArgumentsField::encode(info.arguments);
239  encoding |= SafepointEntry::SaveDoublesField::encode(info.has_doubles);
240  return encoding;
241 }
242 
243 
244 
245 } } // namespace v8::internal
byte * Address
Definition: globals.h:157
void PrintF(const char *format,...)
Definition: v8utils.cc:40
const int kBitsPerByteLog2
Definition: globals.h:238
#define ASSERT(condition)
Definition: checks.h:270
const int kIntSize
Definition: globals.h:217
uint8_t byte
Definition: globals.h:156
STATIC_ASSERT((FixedDoubleArray::kHeaderSize &kDoubleAlignmentMask)==0)
virtual const char * NameOfCPURegister(int reg) const
bool IsAligned(T value, U alignment)
Definition: utils.h:206
const Register pc
T RoundUp(T x, intptr_t m)
Definition: utils.h:150
const int kBitsPerByte
Definition: globals.h:237
const int kNumSafepointRegisters
Definition: frames-arm.h:92
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination use dead code elimination trace on stack replacement optimize closures cache optimized code for closures functions with arguments object loop weight for representation inference allow uint32 values on optimize frames if they are used only in safe operations track parallel recompilation enable all profiler experiments number of stack frames inspected by the profiler call recompile stub directly when self optimizing trigger profiler ticks based on counting instead of timing weight back edges by jump distance for interrupt triggering percentage of ICs that must have type info to allow optimization watch_ic_patching retry_self_opt interrupt_at_exit extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of SAHF instruction if enable use of VFP3 instructions if available this implies enabling ARMv7 and VFP2 enable use of VFP2 instructions if available enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of MIPS FPU instructions if NULL
Definition: flags.cc:301
static uint32_t & uint32_at(Address addr)
Definition: v8memory.h:47
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination use dead code elimination trace on stack replacement optimize closures cache optimized code for closures functions with arguments object loop weight for representation inference allow uint32 values on optimize frames if they are used only in safe operations track parallel recompilation enable all profiler experiments number of stack frames inspected by the profiler call recompile stub directly when self optimizing trigger profiler ticks based on counting instead of timing weight back edges by jump distance for interrupt triggering percentage of ICs that must have type info to allow optimization watch_ic_patching retry_self_opt interrupt_at_exit extra verbose compilation tracing generate extra code(assertions) for debugging") DEFINE_bool(code_comments