v8  3.14.5(node0.10.28)
V8 is Google's open source JavaScript engine
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
lithium-gap-resolver-x64.cc
Go to the documentation of this file.
1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 
28 #include "v8.h"
29 
30 #if defined(V8_TARGET_ARCH_X64)
31 
34 
35 namespace v8 {
36 namespace internal {
37 
38 LGapResolver::LGapResolver(LCodeGen* owner)
39  : cgen_(owner), moves_(32, owner->zone()) {}
40 
41 
42 void LGapResolver::Resolve(LParallelMove* parallel_move) {
43  ASSERT(moves_.is_empty());
44  // Build up a worklist of moves.
45  BuildInitialMoveList(parallel_move);
46 
47  for (int i = 0; i < moves_.length(); ++i) {
48  LMoveOperands move = moves_[i];
49  // Skip constants to perform them last. They don't block other moves
50  // and skipping such moves with register destinations keeps those
51  // registers free for the whole algorithm.
52  if (!move.IsEliminated() && !move.source()->IsConstantOperand()) {
53  PerformMove(i);
54  }
55  }
56 
57  // Perform the moves with constant sources.
58  for (int i = 0; i < moves_.length(); ++i) {
59  if (!moves_[i].IsEliminated()) {
60  ASSERT(moves_[i].source()->IsConstantOperand());
61  EmitMove(i);
62  }
63  }
64 
65  moves_.Rewind(0);
66 }
67 
68 
69 void LGapResolver::BuildInitialMoveList(LParallelMove* parallel_move) {
70  // Perform a linear sweep of the moves to add them to the initial list of
71  // moves to perform, ignoring any move that is redundant (the source is
72  // the same as the destination, the destination is ignored and
73  // unallocated, or the move was already eliminated).
74  const ZoneList<LMoveOperands>* moves = parallel_move->move_operands();
75  for (int i = 0; i < moves->length(); ++i) {
76  LMoveOperands move = moves->at(i);
77  if (!move.IsRedundant()) moves_.Add(move, cgen_->zone());
78  }
79  Verify();
80 }
81 
82 
83 void LGapResolver::PerformMove(int index) {
84  // Each call to this function performs a move and deletes it from the move
85  // graph. We first recursively perform any move blocking this one. We
86  // mark a move as "pending" on entry to PerformMove in order to detect
87  // cycles in the move graph. We use operand swaps to resolve cycles,
88  // which means that a call to PerformMove could change any source operand
89  // in the move graph.
90 
91  ASSERT(!moves_[index].IsPending());
92  ASSERT(!moves_[index].IsRedundant());
93 
94  // Clear this move's destination to indicate a pending move. The actual
95  // destination is saved in a stack-allocated local. Recursion may allow
96  // multiple moves to be pending.
97  ASSERT(moves_[index].source() != NULL); // Or else it will look eliminated.
98  LOperand* destination = moves_[index].destination();
99  moves_[index].set_destination(NULL);
100 
101  // Perform a depth-first traversal of the move graph to resolve
102  // dependencies. Any unperformed, unpending move with a source the same
103  // as this one's destination blocks this one so recursively perform all
104  // such moves.
105  for (int i = 0; i < moves_.length(); ++i) {
106  LMoveOperands other_move = moves_[i];
107  if (other_move.Blocks(destination) && !other_move.IsPending()) {
108  // Though PerformMove can change any source operand in the move graph,
109  // this call cannot create a blocking move via a swap (this loop does
110  // not miss any). Assume there is a non-blocking move with source A
111  // and this move is blocked on source B and there is a swap of A and
112  // B. Then A and B must be involved in the same cycle (or they would
113  // not be swapped). Since this move's destination is B and there is
114  // only a single incoming edge to an operand, this move must also be
115  // involved in the same cycle. In that case, the blocking move will
116  // be created but will be "pending" when we return from PerformMove.
117  PerformMove(i);
118  }
119  }
120 
121  // We are about to resolve this move and don't need it marked as
122  // pending, so restore its destination.
123  moves_[index].set_destination(destination);
124 
125  // This move's source may have changed due to swaps to resolve cycles and
126  // so it may now be the last move in the cycle. If so remove it.
127  if (moves_[index].source()->Equals(destination)) {
128  moves_[index].Eliminate();
129  return;
130  }
131 
132  // The move may be blocked on a (at most one) pending move, in which case
133  // we have a cycle. Search for such a blocking move and perform a swap to
134  // resolve it.
135  for (int i = 0; i < moves_.length(); ++i) {
136  LMoveOperands other_move = moves_[i];
137  if (other_move.Blocks(destination)) {
138  ASSERT(other_move.IsPending());
139  EmitSwap(index);
140  return;
141  }
142  }
143 
144  // This move is not blocked.
145  EmitMove(index);
146 }
147 
148 
149 void LGapResolver::Verify() {
150 #ifdef ENABLE_SLOW_ASSERTS
151  // No operand should be the destination for more than one move.
152  for (int i = 0; i < moves_.length(); ++i) {
153  LOperand* destination = moves_[i].destination();
154  for (int j = i + 1; j < moves_.length(); ++j) {
155  SLOW_ASSERT(!destination->Equals(moves_[j].destination()));
156  }
157  }
158 #endif
159 }
160 
161 
162 #define __ ACCESS_MASM(cgen_->masm())
163 
164 
165 void LGapResolver::EmitMove(int index) {
166  LOperand* source = moves_[index].source();
167  LOperand* destination = moves_[index].destination();
168 
169  // Dispatch on the source and destination operand kinds. Not all
170  // combinations are possible.
171  if (source->IsRegister()) {
172  Register src = cgen_->ToRegister(source);
173  if (destination->IsRegister()) {
174  Register dst = cgen_->ToRegister(destination);
175  __ movq(dst, src);
176  } else {
177  ASSERT(destination->IsStackSlot());
178  Operand dst = cgen_->ToOperand(destination);
179  __ movq(dst, src);
180  }
181 
182  } else if (source->IsStackSlot()) {
183  Operand src = cgen_->ToOperand(source);
184  if (destination->IsRegister()) {
185  Register dst = cgen_->ToRegister(destination);
186  __ movq(dst, src);
187  } else {
188  ASSERT(destination->IsStackSlot());
189  Operand dst = cgen_->ToOperand(destination);
190  __ movq(kScratchRegister, src);
191  __ movq(dst, kScratchRegister);
192  }
193 
194  } else if (source->IsConstantOperand()) {
195  LConstantOperand* constant_source = LConstantOperand::cast(source);
196  if (destination->IsRegister()) {
197  Register dst = cgen_->ToRegister(destination);
198  if (cgen_->IsInteger32Constant(constant_source)) {
199  __ movl(dst, Immediate(cgen_->ToInteger32(constant_source)));
200  } else {
201  __ LoadObject(dst, cgen_->ToHandle(constant_source));
202  }
203  } else {
204  ASSERT(destination->IsStackSlot());
205  Operand dst = cgen_->ToOperand(destination);
206  if (cgen_->IsInteger32Constant(constant_source)) {
207  // Zero top 32 bits of a 64 bit spill slot that holds a 32 bit untagged
208  // value.
209  __ movq(dst, Immediate(cgen_->ToInteger32(constant_source)));
210  } else {
211  __ LoadObject(kScratchRegister, cgen_->ToHandle(constant_source));
212  __ movq(dst, kScratchRegister);
213  }
214  }
215 
216  } else if (source->IsDoubleRegister()) {
217  XMMRegister src = cgen_->ToDoubleRegister(source);
218  if (destination->IsDoubleRegister()) {
219  __ movaps(cgen_->ToDoubleRegister(destination), src);
220  } else {
221  ASSERT(destination->IsDoubleStackSlot());
222  __ movsd(cgen_->ToOperand(destination), src);
223  }
224  } else if (source->IsDoubleStackSlot()) {
225  Operand src = cgen_->ToOperand(source);
226  if (destination->IsDoubleRegister()) {
227  __ movsd(cgen_->ToDoubleRegister(destination), src);
228  } else {
229  ASSERT(destination->IsDoubleStackSlot());
230  __ movsd(xmm0, src);
231  __ movsd(cgen_->ToOperand(destination), xmm0);
232  }
233  } else {
234  UNREACHABLE();
235  }
236 
237  moves_[index].Eliminate();
238 }
239 
240 
241 void LGapResolver::EmitSwap(int index) {
242  LOperand* source = moves_[index].source();
243  LOperand* destination = moves_[index].destination();
244 
245  // Dispatch on the source and destination operand kinds. Not all
246  // combinations are possible.
247  if (source->IsRegister() && destination->IsRegister()) {
248  // Swap two general-purpose registers.
249  Register src = cgen_->ToRegister(source);
250  Register dst = cgen_->ToRegister(destination);
251  __ xchg(dst, src);
252 
253  } else if ((source->IsRegister() && destination->IsStackSlot()) ||
254  (source->IsStackSlot() && destination->IsRegister())) {
255  // Swap a general-purpose register and a stack slot.
256  Register reg =
257  cgen_->ToRegister(source->IsRegister() ? source : destination);
258  Operand mem =
259  cgen_->ToOperand(source->IsRegister() ? destination : source);
260  __ movq(kScratchRegister, mem);
261  __ movq(mem, reg);
262  __ movq(reg, kScratchRegister);
263 
264  } else if ((source->IsStackSlot() && destination->IsStackSlot()) ||
265  (source->IsDoubleStackSlot() && destination->IsDoubleStackSlot())) {
266  // Swap two stack slots or two double stack slots.
267  Operand src = cgen_->ToOperand(source);
268  Operand dst = cgen_->ToOperand(destination);
269  __ movsd(xmm0, src);
270  __ movq(kScratchRegister, dst);
271  __ movsd(dst, xmm0);
272  __ movq(src, kScratchRegister);
273 
274  } else if (source->IsDoubleRegister() && destination->IsDoubleRegister()) {
275  // Swap two double registers.
276  XMMRegister source_reg = cgen_->ToDoubleRegister(source);
277  XMMRegister destination_reg = cgen_->ToDoubleRegister(destination);
278  __ movaps(xmm0, source_reg);
279  __ movaps(source_reg, destination_reg);
280  __ movaps(destination_reg, xmm0);
281 
282  } else if (source->IsDoubleRegister() || destination->IsDoubleRegister()) {
283  // Swap a double register and a double stack slot.
284  ASSERT((source->IsDoubleRegister() && destination->IsDoubleStackSlot()) ||
285  (source->IsDoubleStackSlot() && destination->IsDoubleRegister()));
286  XMMRegister reg = cgen_->ToDoubleRegister(source->IsDoubleRegister()
287  ? source
288  : destination);
289  LOperand* other = source->IsDoubleRegister() ? destination : source;
290  ASSERT(other->IsDoubleStackSlot());
291  Operand other_operand = cgen_->ToOperand(other);
292  __ movsd(xmm0, other_operand);
293  __ movsd(other_operand, reg);
294  __ movsd(reg, xmm0);
295 
296  } else {
297  // No other combinations are possible.
298  UNREACHABLE();
299  }
300 
301  // The swap of source and destination has executed a move from source to
302  // destination.
303  moves_[index].Eliminate();
304 
305  // Any unperformed (including pending) move with a source of either
306  // this move's source or destination needs to have their source
307  // changed to reflect the state of affairs after the swap.
308  for (int i = 0; i < moves_.length(); ++i) {
309  LMoveOperands other_move = moves_[i];
310  if (other_move.Blocks(source)) {
311  moves_[i].set_source(destination);
312  } else if (other_move.Blocks(destination)) {
313  moves_[i].set_source(source);
314  }
315  }
316 }
317 
318 #undef __
319 
320 } } // namespace v8::internal
321 
322 #endif // V8_TARGET_ARCH_X64
#define SLOW_ASSERT(condition)
Definition: checks.h:276
#define ASSERT(condition)
Definition: checks.h:270
#define UNREACHABLE()
Definition: checks.h:50
static LConstantOperand * cast(LOperand *op)
Definition: lithium.h:271
#define __
const Register kScratchRegister
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination use dead code elimination trace on stack replacement optimize closures cache optimized code for closures functions with arguments object loop weight for representation inference allow uint32 values on optimize frames if they are used only in safe operations track parallel recompilation enable all profiler experiments number of stack frames inspected by the profiler call recompile stub directly when self optimizing trigger profiler ticks based on counting instead of timing weight back edges by jump distance for interrupt triggering percentage of ICs that must have type info to allow optimization watch_ic_patching retry_self_opt interrupt_at_exit extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of SAHF instruction if enable use of VFP3 instructions if available this implies enabling ARMv7 and VFP2 enable use of VFP2 instructions if available enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of MIPS FPU instructions if NULL
Definition: flags.cc:301
const XMMRegister xmm0