v8  3.11.10(node0.8.26)
V8 is Google's open source JavaScript engine
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
lithium-gap-resolver-ia32.cc
Go to the documentation of this file.
1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 
28 #include "v8.h"
29 
30 #if defined(V8_TARGET_ARCH_IA32)
31 
34 
35 namespace v8 {
36 namespace internal {
37 
38 LGapResolver::LGapResolver(LCodeGen* owner)
39  : cgen_(owner),
40  moves_(32, owner->zone()),
41  source_uses_(),
42  destination_uses_(),
43  spilled_register_(-1) {}
44 
45 
46 void LGapResolver::Resolve(LParallelMove* parallel_move) {
47  ASSERT(HasBeenReset());
48  // Build up a worklist of moves.
49  BuildInitialMoveList(parallel_move);
50 
51  for (int i = 0; i < moves_.length(); ++i) {
52  LMoveOperands move = moves_[i];
53  // Skip constants to perform them last. They don't block other moves
54  // and skipping such moves with register destinations keeps those
55  // registers free for the whole algorithm.
56  if (!move.IsEliminated() && !move.source()->IsConstantOperand()) {
57  PerformMove(i);
58  }
59  }
60 
61  // Perform the moves with constant sources.
62  for (int i = 0; i < moves_.length(); ++i) {
63  if (!moves_[i].IsEliminated()) {
64  ASSERT(moves_[i].source()->IsConstantOperand());
65  EmitMove(i);
66  }
67  }
68 
69  Finish();
70  ASSERT(HasBeenReset());
71 }
72 
73 
74 void LGapResolver::BuildInitialMoveList(LParallelMove* parallel_move) {
75  // Perform a linear sweep of the moves to add them to the initial list of
76  // moves to perform, ignoring any move that is redundant (the source is
77  // the same as the destination, the destination is ignored and
78  // unallocated, or the move was already eliminated).
79  const ZoneList<LMoveOperands>* moves = parallel_move->move_operands();
80  for (int i = 0; i < moves->length(); ++i) {
81  LMoveOperands move = moves->at(i);
82  if (!move.IsRedundant()) AddMove(move);
83  }
84  Verify();
85 }
86 
87 
88 void LGapResolver::PerformMove(int index) {
89  // Each call to this function performs a move and deletes it from the move
90  // graph. We first recursively perform any move blocking this one. We
91  // mark a move as "pending" on entry to PerformMove in order to detect
92  // cycles in the move graph. We use operand swaps to resolve cycles,
93  // which means that a call to PerformMove could change any source operand
94  // in the move graph.
95 
96  ASSERT(!moves_[index].IsPending());
97  ASSERT(!moves_[index].IsRedundant());
98 
99  // Clear this move's destination to indicate a pending move. The actual
100  // destination is saved on the side.
101  ASSERT(moves_[index].source() != NULL); // Or else it will look eliminated.
102  LOperand* destination = moves_[index].destination();
103  moves_[index].set_destination(NULL);
104 
105  // Perform a depth-first traversal of the move graph to resolve
106  // dependencies. Any unperformed, unpending move with a source the same
107  // as this one's destination blocks this one so recursively perform all
108  // such moves.
109  for (int i = 0; i < moves_.length(); ++i) {
110  LMoveOperands other_move = moves_[i];
111  if (other_move.Blocks(destination) && !other_move.IsPending()) {
112  // Though PerformMove can change any source operand in the move graph,
113  // this call cannot create a blocking move via a swap (this loop does
114  // not miss any). Assume there is a non-blocking move with source A
115  // and this move is blocked on source B and there is a swap of A and
116  // B. Then A and B must be involved in the same cycle (or they would
117  // not be swapped). Since this move's destination is B and there is
118  // only a single incoming edge to an operand, this move must also be
119  // involved in the same cycle. In that case, the blocking move will
120  // be created but will be "pending" when we return from PerformMove.
121  PerformMove(i);
122  }
123  }
124 
125  // We are about to resolve this move and don't need it marked as
126  // pending, so restore its destination.
127  moves_[index].set_destination(destination);
128 
129  // This move's source may have changed due to swaps to resolve cycles and
130  // so it may now be the last move in the cycle. If so remove it.
131  if (moves_[index].source()->Equals(destination)) {
132  RemoveMove(index);
133  return;
134  }
135 
136  // The move may be blocked on a (at most one) pending move, in which case
137  // we have a cycle. Search for such a blocking move and perform a swap to
138  // resolve it.
139  for (int i = 0; i < moves_.length(); ++i) {
140  LMoveOperands other_move = moves_[i];
141  if (other_move.Blocks(destination)) {
142  ASSERT(other_move.IsPending());
143  EmitSwap(index);
144  return;
145  }
146  }
147 
148  // This move is not blocked.
149  EmitMove(index);
150 }
151 
152 
153 void LGapResolver::AddMove(LMoveOperands move) {
154  LOperand* source = move.source();
155  if (source->IsRegister()) ++source_uses_[source->index()];
156 
157  LOperand* destination = move.destination();
158  if (destination->IsRegister()) ++destination_uses_[destination->index()];
159 
160  moves_.Add(move, cgen_->zone());
161 }
162 
163 
164 void LGapResolver::RemoveMove(int index) {
165  LOperand* source = moves_[index].source();
166  if (source->IsRegister()) {
167  --source_uses_[source->index()];
168  ASSERT(source_uses_[source->index()] >= 0);
169  }
170 
171  LOperand* destination = moves_[index].destination();
172  if (destination->IsRegister()) {
173  --destination_uses_[destination->index()];
174  ASSERT(destination_uses_[destination->index()] >= 0);
175  }
176 
177  moves_[index].Eliminate();
178 }
179 
180 
181 int LGapResolver::CountSourceUses(LOperand* operand) {
182  int count = 0;
183  for (int i = 0; i < moves_.length(); ++i) {
184  if (!moves_[i].IsEliminated() && moves_[i].source()->Equals(operand)) {
185  ++count;
186  }
187  }
188  return count;
189 }
190 
191 
192 Register LGapResolver::GetFreeRegisterNot(Register reg) {
193  int skip_index = reg.is(no_reg) ? -1 : Register::ToAllocationIndex(reg);
194  for (int i = 0; i < Register::kNumAllocatableRegisters; ++i) {
195  if (source_uses_[i] == 0 && destination_uses_[i] > 0 && i != skip_index) {
197  }
198  }
199  return no_reg;
200 }
201 
202 
203 bool LGapResolver::HasBeenReset() {
204  if (!moves_.is_empty()) return false;
205  if (spilled_register_ >= 0) return false;
206 
207  for (int i = 0; i < Register::kNumAllocatableRegisters; ++i) {
208  if (source_uses_[i] != 0) return false;
209  if (destination_uses_[i] != 0) return false;
210  }
211  return true;
212 }
213 
214 
215 void LGapResolver::Verify() {
216 #ifdef ENABLE_SLOW_ASSERTS
217  // No operand should be the destination for more than one move.
218  for (int i = 0; i < moves_.length(); ++i) {
219  LOperand* destination = moves_[i].destination();
220  for (int j = i + 1; j < moves_.length(); ++j) {
221  SLOW_ASSERT(!destination->Equals(moves_[j].destination()));
222  }
223  }
224 #endif
225 }
226 
227 
228 #define __ ACCESS_MASM(cgen_->masm())
229 
230 void LGapResolver::Finish() {
231  if (spilled_register_ >= 0) {
232  __ pop(Register::FromAllocationIndex(spilled_register_));
233  spilled_register_ = -1;
234  }
235  moves_.Rewind(0);
236 }
237 
238 
239 void LGapResolver::EnsureRestored(LOperand* operand) {
240  if (operand->IsRegister() && operand->index() == spilled_register_) {
241  __ pop(Register::FromAllocationIndex(spilled_register_));
242  spilled_register_ = -1;
243  }
244 }
245 
246 
247 Register LGapResolver::EnsureTempRegister() {
248  // 1. We may have already spilled to create a temp register.
249  if (spilled_register_ >= 0) {
250  return Register::FromAllocationIndex(spilled_register_);
251  }
252 
253  // 2. We may have a free register that we can use without spilling.
254  Register free = GetFreeRegisterNot(no_reg);
255  if (!free.is(no_reg)) return free;
256 
257  // 3. Prefer to spill a register that is not used in any remaining move
258  // because it will not need to be restored until the end.
259  for (int i = 0; i < Register::kNumAllocatableRegisters; ++i) {
260  if (source_uses_[i] == 0 && destination_uses_[i] == 0) {
261  Register scratch = Register::FromAllocationIndex(i);
262  __ push(scratch);
263  spilled_register_ = i;
264  return scratch;
265  }
266  }
267 
268  // 4. Use an arbitrary register. Register 0 is as arbitrary as any other.
269  Register scratch = Register::FromAllocationIndex(0);
270  __ push(scratch);
271  spilled_register_ = 0;
272  return scratch;
273 }
274 
275 
276 void LGapResolver::EmitMove(int index) {
277  LOperand* source = moves_[index].source();
278  LOperand* destination = moves_[index].destination();
279  EnsureRestored(source);
280  EnsureRestored(destination);
281 
282  // Dispatch on the source and destination operand kinds. Not all
283  // combinations are possible.
284  if (source->IsRegister()) {
285  ASSERT(destination->IsRegister() || destination->IsStackSlot());
286  Register src = cgen_->ToRegister(source);
287  Operand dst = cgen_->ToOperand(destination);
288  __ mov(dst, src);
289 
290  } else if (source->IsStackSlot()) {
291  ASSERT(destination->IsRegister() || destination->IsStackSlot());
292  Operand src = cgen_->ToOperand(source);
293  if (destination->IsRegister()) {
294  Register dst = cgen_->ToRegister(destination);
295  __ mov(dst, src);
296  } else {
297  // Spill on demand to use a temporary register for memory-to-memory
298  // moves.
299  Register tmp = EnsureTempRegister();
300  Operand dst = cgen_->ToOperand(destination);
301  __ mov(tmp, src);
302  __ mov(dst, tmp);
303  }
304 
305  } else if (source->IsConstantOperand()) {
306  LConstantOperand* constant_source = LConstantOperand::cast(source);
307  if (destination->IsRegister()) {
308  Register dst = cgen_->ToRegister(destination);
309  if (cgen_->IsInteger32(constant_source)) {
310  __ Set(dst, cgen_->ToInteger32Immediate(constant_source));
311  } else {
312  __ LoadObject(dst, cgen_->ToHandle(constant_source));
313  }
314  } else {
315  ASSERT(destination->IsStackSlot());
316  Operand dst = cgen_->ToOperand(destination);
317  if (cgen_->IsInteger32(constant_source)) {
318  __ Set(dst, cgen_->ToInteger32Immediate(constant_source));
319  } else {
320  Register tmp = EnsureTempRegister();
321  __ LoadObject(tmp, cgen_->ToHandle(constant_source));
322  __ mov(dst, tmp);
323  }
324  }
325 
326  } else if (source->IsDoubleRegister()) {
327  XMMRegister src = cgen_->ToDoubleRegister(source);
328  if (destination->IsDoubleRegister()) {
329  XMMRegister dst = cgen_->ToDoubleRegister(destination);
330  __ movaps(dst, src);
331  } else {
332  ASSERT(destination->IsDoubleStackSlot());
333  Operand dst = cgen_->ToOperand(destination);
334  __ movdbl(dst, src);
335  }
336  } else if (source->IsDoubleStackSlot()) {
337  ASSERT(destination->IsDoubleRegister() ||
338  destination->IsDoubleStackSlot());
339  Operand src = cgen_->ToOperand(source);
340  if (destination->IsDoubleRegister()) {
341  XMMRegister dst = cgen_->ToDoubleRegister(destination);
342  __ movdbl(dst, src);
343  } else {
344  // We rely on having xmm0 available as a fixed scratch register.
345  Operand dst = cgen_->ToOperand(destination);
346  __ movdbl(xmm0, src);
347  __ movdbl(dst, xmm0);
348  }
349 
350  } else {
351  UNREACHABLE();
352  }
353 
354  RemoveMove(index);
355 }
356 
357 
358 void LGapResolver::EmitSwap(int index) {
359  LOperand* source = moves_[index].source();
360  LOperand* destination = moves_[index].destination();
361  EnsureRestored(source);
362  EnsureRestored(destination);
363 
364  // Dispatch on the source and destination operand kinds. Not all
365  // combinations are possible.
366  if (source->IsRegister() && destination->IsRegister()) {
367  // Register-register.
368  Register src = cgen_->ToRegister(source);
369  Register dst = cgen_->ToRegister(destination);
370  __ xchg(dst, src);
371 
372  } else if ((source->IsRegister() && destination->IsStackSlot()) ||
373  (source->IsStackSlot() && destination->IsRegister())) {
374  // Register-memory. Use a free register as a temp if possible. Do not
375  // spill on demand because the simple spill implementation cannot avoid
376  // spilling src at this point.
377  Register tmp = GetFreeRegisterNot(no_reg);
378  Register reg =
379  cgen_->ToRegister(source->IsRegister() ? source : destination);
380  Operand mem =
381  cgen_->ToOperand(source->IsRegister() ? destination : source);
382  if (tmp.is(no_reg)) {
383  __ xor_(reg, mem);
384  __ xor_(mem, reg);
385  __ xor_(reg, mem);
386  } else {
387  __ mov(tmp, mem);
388  __ mov(mem, reg);
389  __ mov(reg, tmp);
390  }
391 
392  } else if (source->IsStackSlot() && destination->IsStackSlot()) {
393  // Memory-memory. Spill on demand to use a temporary. If there is a
394  // free register after that, use it as a second temporary.
395  Register tmp0 = EnsureTempRegister();
396  Register tmp1 = GetFreeRegisterNot(tmp0);
397  Operand src = cgen_->ToOperand(source);
398  Operand dst = cgen_->ToOperand(destination);
399  if (tmp1.is(no_reg)) {
400  // Only one temp register available to us.
401  __ mov(tmp0, dst);
402  __ xor_(tmp0, src);
403  __ xor_(src, tmp0);
404  __ xor_(tmp0, src);
405  __ mov(dst, tmp0);
406  } else {
407  __ mov(tmp0, dst);
408  __ mov(tmp1, src);
409  __ mov(dst, tmp1);
410  __ mov(src, tmp0);
411  }
412  } else if (source->IsDoubleRegister() && destination->IsDoubleRegister()) {
413  // XMM register-register swap. We rely on having xmm0
414  // available as a fixed scratch register.
415  XMMRegister src = cgen_->ToDoubleRegister(source);
416  XMMRegister dst = cgen_->ToDoubleRegister(destination);
417  __ movaps(xmm0, src);
418  __ movaps(src, dst);
419  __ movaps(dst, xmm0);
420 
421  } else if (source->IsDoubleRegister() || destination->IsDoubleRegister()) {
422  // XMM register-memory swap. We rely on having xmm0
423  // available as a fixed scratch register.
424  ASSERT(source->IsDoubleStackSlot() || destination->IsDoubleStackSlot());
425  XMMRegister reg = cgen_->ToDoubleRegister(source->IsDoubleRegister()
426  ? source
427  : destination);
428  Operand other =
429  cgen_->ToOperand(source->IsDoubleRegister() ? destination : source);
430  __ movdbl(xmm0, other);
431  __ movdbl(other, reg);
432  __ movdbl(reg, Operand(xmm0));
433 
434  } else if (source->IsDoubleStackSlot() && destination->IsDoubleStackSlot()) {
435  // Double-width memory-to-memory. Spill on demand to use a general
436  // purpose temporary register and also rely on having xmm0 available as
437  // a fixed scratch register.
438  Register tmp = EnsureTempRegister();
439  Operand src0 = cgen_->ToOperand(source);
440  Operand src1 = cgen_->HighOperand(source);
441  Operand dst0 = cgen_->ToOperand(destination);
442  Operand dst1 = cgen_->HighOperand(destination);
443  __ movdbl(xmm0, dst0); // Save destination in xmm0.
444  __ mov(tmp, src0); // Then use tmp to copy source to destination.
445  __ mov(dst0, tmp);
446  __ mov(tmp, src1);
447  __ mov(dst1, tmp);
448  __ movdbl(src0, xmm0);
449 
450  } else {
451  // No other combinations are possible.
452  UNREACHABLE();
453  }
454 
455  // The swap of source and destination has executed a move from source to
456  // destination.
457  RemoveMove(index);
458 
459  // Any unperformed (including pending) move with a source of either
460  // this move's source or destination needs to have their source
461  // changed to reflect the state of affairs after the swap.
462  for (int i = 0; i < moves_.length(); ++i) {
463  LMoveOperands other_move = moves_[i];
464  if (other_move.Blocks(source)) {
465  moves_[i].set_source(destination);
466  } else if (other_move.Blocks(destination)) {
467  moves_[i].set_source(source);
468  }
469  }
470 
471  // In addition to swapping the actual uses as sources, we need to update
472  // the use counts.
473  if (source->IsRegister() && destination->IsRegister()) {
474  int temp = source_uses_[source->index()];
475  source_uses_[source->index()] = source_uses_[destination->index()];
476  source_uses_[destination->index()] = temp;
477  } else if (source->IsRegister()) {
478  // We don't have use counts for non-register operands like destination.
479  // Compute those counts now.
480  source_uses_[source->index()] = CountSourceUses(source);
481  } else if (destination->IsRegister()) {
482  source_uses_[destination->index()] = CountSourceUses(destination);
483  }
484 }
485 
486 #undef __
487 
488 } } // namespace v8::internal
489 
490 #endif // V8_TARGET_ARCH_IA32
#define SLOW_ASSERT(condition)
Definition: checks.h:276
#define ASSERT(condition)
Definition: checks.h:270
#define UNREACHABLE()
Definition: checks.h:50
static LConstantOperand * cast(LOperand *op)
Definition: lithium.h:269
static Register FromAllocationIndex(int index)
Definition: assembler-arm.h:82
#define __
static int ToAllocationIndex(Register reg)
Definition: assembler-arm.h:77
static const int kNumAllocatableRegisters
Definition: assembler-arm.h:74
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination trace on stack replacement optimize closures functions with arguments object optimize functions containing for in loops profiler considers IC stability primitive functions trigger their own optimization re try self optimization if it failed insert an interrupt check at function exit execution budget before interrupt is triggered call count before self optimization self_optimization count_based_interrupts weighted_back_edges trace_opt emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of SAHF instruction if enable use of VFP3 instructions if available this implies enabling ARMv7 enable use of ARMv7 instructions if enable use of MIPS FPU instructions if NULL
Definition: flags.cc:274
const Register no_reg
const XMMRegister xmm0