v8  3.25.30(node0.11.13)
V8 is Google's open source JavaScript engine
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
lithium-gap-resolver-x64.cc
Go to the documentation of this file.
1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 
28 #include "v8.h"
29 
30 #if V8_TARGET_ARCH_X64
31 
34 
35 namespace v8 {
36 namespace internal {
37 
38 LGapResolver::LGapResolver(LCodeGen* owner)
39  : cgen_(owner), moves_(32, owner->zone()) {}
40 
41 
42 void LGapResolver::Resolve(LParallelMove* parallel_move) {
43  ASSERT(moves_.is_empty());
44  // Build up a worklist of moves.
45  BuildInitialMoveList(parallel_move);
46 
47  for (int i = 0; i < moves_.length(); ++i) {
48  LMoveOperands move = moves_[i];
49  // Skip constants to perform them last. They don't block other moves
50  // and skipping such moves with register destinations keeps those
51  // registers free for the whole algorithm.
52  if (!move.IsEliminated() && !move.source()->IsConstantOperand()) {
53  PerformMove(i);
54  }
55  }
56 
57  // Perform the moves with constant sources.
58  for (int i = 0; i < moves_.length(); ++i) {
59  if (!moves_[i].IsEliminated()) {
60  ASSERT(moves_[i].source()->IsConstantOperand());
61  EmitMove(i);
62  }
63  }
64 
65  moves_.Rewind(0);
66 }
67 
68 
69 void LGapResolver::BuildInitialMoveList(LParallelMove* parallel_move) {
70  // Perform a linear sweep of the moves to add them to the initial list of
71  // moves to perform, ignoring any move that is redundant (the source is
72  // the same as the destination, the destination is ignored and
73  // unallocated, or the move was already eliminated).
74  const ZoneList<LMoveOperands>* moves = parallel_move->move_operands();
75  for (int i = 0; i < moves->length(); ++i) {
76  LMoveOperands move = moves->at(i);
77  if (!move.IsRedundant()) moves_.Add(move, cgen_->zone());
78  }
79  Verify();
80 }
81 
82 
83 void LGapResolver::PerformMove(int index) {
84  // Each call to this function performs a move and deletes it from the move
85  // graph. We first recursively perform any move blocking this one. We
86  // mark a move as "pending" on entry to PerformMove in order to detect
87  // cycles in the move graph. We use operand swaps to resolve cycles,
88  // which means that a call to PerformMove could change any source operand
89  // in the move graph.
90 
91  ASSERT(!moves_[index].IsPending());
92  ASSERT(!moves_[index].IsRedundant());
93 
94  // Clear this move's destination to indicate a pending move. The actual
95  // destination is saved in a stack-allocated local. Recursion may allow
96  // multiple moves to be pending.
97  ASSERT(moves_[index].source() != NULL); // Or else it will look eliminated.
98  LOperand* destination = moves_[index].destination();
99  moves_[index].set_destination(NULL);
100 
101  // Perform a depth-first traversal of the move graph to resolve
102  // dependencies. Any unperformed, unpending move with a source the same
103  // as this one's destination blocks this one so recursively perform all
104  // such moves.
105  for (int i = 0; i < moves_.length(); ++i) {
106  LMoveOperands other_move = moves_[i];
107  if (other_move.Blocks(destination) && !other_move.IsPending()) {
108  // Though PerformMove can change any source operand in the move graph,
109  // this call cannot create a blocking move via a swap (this loop does
110  // not miss any). Assume there is a non-blocking move with source A
111  // and this move is blocked on source B and there is a swap of A and
112  // B. Then A and B must be involved in the same cycle (or they would
113  // not be swapped). Since this move's destination is B and there is
114  // only a single incoming edge to an operand, this move must also be
115  // involved in the same cycle. In that case, the blocking move will
116  // be created but will be "pending" when we return from PerformMove.
117  PerformMove(i);
118  }
119  }
120 
121  // We are about to resolve this move and don't need it marked as
122  // pending, so restore its destination.
123  moves_[index].set_destination(destination);
124 
125  // This move's source may have changed due to swaps to resolve cycles and
126  // so it may now be the last move in the cycle. If so remove it.
127  if (moves_[index].source()->Equals(destination)) {
128  moves_[index].Eliminate();
129  return;
130  }
131 
132  // The move may be blocked on a (at most one) pending move, in which case
133  // we have a cycle. Search for such a blocking move and perform a swap to
134  // resolve it.
135  for (int i = 0; i < moves_.length(); ++i) {
136  LMoveOperands other_move = moves_[i];
137  if (other_move.Blocks(destination)) {
138  ASSERT(other_move.IsPending());
139  EmitSwap(index);
140  return;
141  }
142  }
143 
144  // This move is not blocked.
145  EmitMove(index);
146 }
147 
148 
149 void LGapResolver::Verify() {
150 #ifdef ENABLE_SLOW_ASSERTS
151  // No operand should be the destination for more than one move.
152  for (int i = 0; i < moves_.length(); ++i) {
153  LOperand* destination = moves_[i].destination();
154  for (int j = i + 1; j < moves_.length(); ++j) {
155  SLOW_ASSERT(!destination->Equals(moves_[j].destination()));
156  }
157  }
158 #endif
159 }
160 
161 
162 #define __ ACCESS_MASM(cgen_->masm())
163 
164 
165 void LGapResolver::EmitMove(int index) {
166  LOperand* source = moves_[index].source();
167  LOperand* destination = moves_[index].destination();
168 
169  // Dispatch on the source and destination operand kinds. Not all
170  // combinations are possible.
171  if (source->IsRegister()) {
172  Register src = cgen_->ToRegister(source);
173  if (destination->IsRegister()) {
174  Register dst = cgen_->ToRegister(destination);
175  __ movp(dst, src);
176  } else {
177  ASSERT(destination->IsStackSlot());
178  Operand dst = cgen_->ToOperand(destination);
179  __ movp(dst, src);
180  }
181 
182  } else if (source->IsStackSlot()) {
183  Operand src = cgen_->ToOperand(source);
184  if (destination->IsRegister()) {
185  Register dst = cgen_->ToRegister(destination);
186  __ movp(dst, src);
187  } else {
188  ASSERT(destination->IsStackSlot());
189  Operand dst = cgen_->ToOperand(destination);
190  __ movp(kScratchRegister, src);
191  __ movp(dst, kScratchRegister);
192  }
193 
194  } else if (source->IsConstantOperand()) {
195  LConstantOperand* constant_source = LConstantOperand::cast(source);
196  if (destination->IsRegister()) {
197  Register dst = cgen_->ToRegister(destination);
198  if (cgen_->IsSmiConstant(constant_source)) {
199  __ Move(dst, cgen_->ToSmi(constant_source));
200  } else if (cgen_->IsInteger32Constant(constant_source)) {
201  int32_t constant = cgen_->ToInteger32(constant_source);
202  // Do sign extension only for constant used as de-hoisted array key.
203  // Others only need zero extension, which saves 2 bytes.
204  if (cgen_->IsDehoistedKeyConstant(constant_source)) {
205  __ Set(dst, constant);
206  } else {
207  __ Set(dst, static_cast<uint32_t>(constant));
208  }
209  } else {
210  __ Move(dst, cgen_->ToHandle(constant_source));
211  }
212  } else if (destination->IsDoubleRegister()) {
213  double v = cgen_->ToDouble(constant_source);
214  uint64_t int_val = BitCast<uint64_t, double>(v);
215  XMMRegister dst = cgen_->ToDoubleRegister(destination);
216  if (int_val == 0) {
217  __ xorps(dst, dst);
218  } else {
219  __ Set(kScratchRegister, int_val);
220  __ movq(dst, kScratchRegister);
221  }
222  } else {
223  ASSERT(destination->IsStackSlot());
224  Operand dst = cgen_->ToOperand(destination);
225  if (cgen_->IsSmiConstant(constant_source)) {
226  __ Move(dst, cgen_->ToSmi(constant_source));
227  } else if (cgen_->IsInteger32Constant(constant_source)) {
228  // Do sign extension to 64 bits when stored into stack slot.
229  __ movp(dst, Immediate(cgen_->ToInteger32(constant_source)));
230  } else {
231  __ Move(kScratchRegister, cgen_->ToHandle(constant_source));
232  __ movp(dst, kScratchRegister);
233  }
234  }
235 
236  } else if (source->IsDoubleRegister()) {
237  XMMRegister src = cgen_->ToDoubleRegister(source);
238  if (destination->IsDoubleRegister()) {
239  __ movaps(cgen_->ToDoubleRegister(destination), src);
240  } else {
241  ASSERT(destination->IsDoubleStackSlot());
242  __ movsd(cgen_->ToOperand(destination), src);
243  }
244  } else if (source->IsDoubleStackSlot()) {
245  Operand src = cgen_->ToOperand(source);
246  if (destination->IsDoubleRegister()) {
247  __ movsd(cgen_->ToDoubleRegister(destination), src);
248  } else {
249  ASSERT(destination->IsDoubleStackSlot());
250  __ movsd(xmm0, src);
251  __ movsd(cgen_->ToOperand(destination), xmm0);
252  }
253  } else {
254  UNREACHABLE();
255  }
256 
257  moves_[index].Eliminate();
258 }
259 
260 
261 void LGapResolver::EmitSwap(int index) {
262  LOperand* source = moves_[index].source();
263  LOperand* destination = moves_[index].destination();
264 
265  // Dispatch on the source and destination operand kinds. Not all
266  // combinations are possible.
267  if (source->IsRegister() && destination->IsRegister()) {
268  // Swap two general-purpose registers.
269  Register src = cgen_->ToRegister(source);
270  Register dst = cgen_->ToRegister(destination);
271  __ xchgq(dst, src);
272 
273  } else if ((source->IsRegister() && destination->IsStackSlot()) ||
274  (source->IsStackSlot() && destination->IsRegister())) {
275  // Swap a general-purpose register and a stack slot.
276  Register reg =
277  cgen_->ToRegister(source->IsRegister() ? source : destination);
278  Operand mem =
279  cgen_->ToOperand(source->IsRegister() ? destination : source);
280  __ movp(kScratchRegister, mem);
281  __ movp(mem, reg);
282  __ movp(reg, kScratchRegister);
283 
284  } else if ((source->IsStackSlot() && destination->IsStackSlot()) ||
285  (source->IsDoubleStackSlot() && destination->IsDoubleStackSlot())) {
286  // Swap two stack slots or two double stack slots.
287  Operand src = cgen_->ToOperand(source);
288  Operand dst = cgen_->ToOperand(destination);
289  __ movsd(xmm0, src);
290  __ movp(kScratchRegister, dst);
291  __ movsd(dst, xmm0);
292  __ movp(src, kScratchRegister);
293 
294  } else if (source->IsDoubleRegister() && destination->IsDoubleRegister()) {
295  // Swap two double registers.
296  XMMRegister source_reg = cgen_->ToDoubleRegister(source);
297  XMMRegister destination_reg = cgen_->ToDoubleRegister(destination);
298  __ movaps(xmm0, source_reg);
299  __ movaps(source_reg, destination_reg);
300  __ movaps(destination_reg, xmm0);
301 
302  } else if (source->IsDoubleRegister() || destination->IsDoubleRegister()) {
303  // Swap a double register and a double stack slot.
304  ASSERT((source->IsDoubleRegister() && destination->IsDoubleStackSlot()) ||
305  (source->IsDoubleStackSlot() && destination->IsDoubleRegister()));
306  XMMRegister reg = cgen_->ToDoubleRegister(source->IsDoubleRegister()
307  ? source
308  : destination);
309  LOperand* other = source->IsDoubleRegister() ? destination : source;
310  ASSERT(other->IsDoubleStackSlot());
311  Operand other_operand = cgen_->ToOperand(other);
312  __ movsd(xmm0, other_operand);
313  __ movsd(other_operand, reg);
314  __ movaps(reg, xmm0);
315 
316  } else {
317  // No other combinations are possible.
318  UNREACHABLE();
319  }
320 
321  // The swap of source and destination has executed a move from source to
322  // destination.
323  moves_[index].Eliminate();
324 
325  // Any unperformed (including pending) move with a source of either
326  // this move's source or destination needs to have their source
327  // changed to reflect the state of affairs after the swap.
328  for (int i = 0; i < moves_.length(); ++i) {
329  LMoveOperands other_move = moves_[i];
330  if (other_move.Blocks(source)) {
331  moves_[i].set_source(destination);
332  } else if (other_move.Blocks(destination)) {
333  moves_[i].set_source(source);
334  }
335  }
336 }
337 
338 #undef __
339 
340 } } // namespace v8::internal
341 
342 #endif // V8_TARGET_ARCH_X64
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter NULL
Definition: flags.cc:269
#define SLOW_ASSERT(condition)
Definition: checks.h:306
int int32_t
Definition: unicode.cc:47
#define ASSERT(condition)
Definition: checks.h:329
#define UNREACHABLE()
Definition: checks.h:52
#define __
const Register kScratchRegister
const XMMRegister xmm0