v8  3.25.30(node0.11.13)
V8 is Google's open source JavaScript engine
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
lithium-gap-resolver-mips.cc
Go to the documentation of this file.
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 
28 #include "v8.h"
29 
32 
33 namespace v8 {
34 namespace internal {
35 
36 LGapResolver::LGapResolver(LCodeGen* owner)
37  : cgen_(owner),
38  moves_(32, owner->zone()),
39  root_index_(0),
40  in_cycle_(false),
41  saved_destination_(NULL) {}
42 
43 
44 void LGapResolver::Resolve(LParallelMove* parallel_move) {
45  ASSERT(moves_.is_empty());
46  // Build up a worklist of moves.
47  BuildInitialMoveList(parallel_move);
48 
49  for (int i = 0; i < moves_.length(); ++i) {
50  LMoveOperands move = moves_[i];
51  // Skip constants to perform them last. They don't block other moves
52  // and skipping such moves with register destinations keeps those
53  // registers free for the whole algorithm.
54  if (!move.IsEliminated() && !move.source()->IsConstantOperand()) {
55  root_index_ = i; // Any cycle is found when by reaching this move again.
56  PerformMove(i);
57  if (in_cycle_) {
58  RestoreValue();
59  }
60  }
61  }
62 
63  // Perform the moves with constant sources.
64  for (int i = 0; i < moves_.length(); ++i) {
65  if (!moves_[i].IsEliminated()) {
66  ASSERT(moves_[i].source()->IsConstantOperand());
67  EmitMove(i);
68  }
69  }
70 
71  moves_.Rewind(0);
72 }
73 
74 
75 void LGapResolver::BuildInitialMoveList(LParallelMove* parallel_move) {
76  // Perform a linear sweep of the moves to add them to the initial list of
77  // moves to perform, ignoring any move that is redundant (the source is
78  // the same as the destination, the destination is ignored and
79  // unallocated, or the move was already eliminated).
80  const ZoneList<LMoveOperands>* moves = parallel_move->move_operands();
81  for (int i = 0; i < moves->length(); ++i) {
82  LMoveOperands move = moves->at(i);
83  if (!move.IsRedundant()) moves_.Add(move, cgen_->zone());
84  }
85  Verify();
86 }
87 
88 
89 void LGapResolver::PerformMove(int index) {
90  // Each call to this function performs a move and deletes it from the move
91  // graph. We first recursively perform any move blocking this one. We
92  // mark a move as "pending" on entry to PerformMove in order to detect
93  // cycles in the move graph.
94 
95  // We can only find a cycle, when doing a depth-first traversal of moves,
96  // be encountering the starting move again. So by spilling the source of
97  // the starting move, we break the cycle. All moves are then unblocked,
98  // and the starting move is completed by writing the spilled value to
99  // its destination. All other moves from the spilled source have been
100  // completed prior to breaking the cycle.
101  // An additional complication is that moves to MemOperands with large
102  // offsets (more than 1K or 4K) require us to spill this spilled value to
103  // the stack, to free up the register.
104  ASSERT(!moves_[index].IsPending());
105  ASSERT(!moves_[index].IsRedundant());
106 
107  // Clear this move's destination to indicate a pending move. The actual
108  // destination is saved in a stack allocated local. Multiple moves can
109  // be pending because this function is recursive.
110  ASSERT(moves_[index].source() != NULL); // Or else it will look eliminated.
111  LOperand* destination = moves_[index].destination();
112  moves_[index].set_destination(NULL);
113 
114  // Perform a depth-first traversal of the move graph to resolve
115  // dependencies. Any unperformed, unpending move with a source the same
116  // as this one's destination blocks this one so recursively perform all
117  // such moves.
118  for (int i = 0; i < moves_.length(); ++i) {
119  LMoveOperands other_move = moves_[i];
120  if (other_move.Blocks(destination) && !other_move.IsPending()) {
121  PerformMove(i);
122  // If there is a blocking, pending move it must be moves_[root_index_]
123  // and all other moves with the same source as moves_[root_index_] are
124  // sucessfully executed (because they are cycle-free) by this loop.
125  }
126  }
127 
128  // We are about to resolve this move and don't need it marked as
129  // pending, so restore its destination.
130  moves_[index].set_destination(destination);
131 
132  // The move may be blocked on a pending move, which must be the starting move.
133  // In this case, we have a cycle, and we save the source of this move to
134  // a scratch register to break it.
135  LMoveOperands other_move = moves_[root_index_];
136  if (other_move.Blocks(destination)) {
137  ASSERT(other_move.IsPending());
138  BreakCycle(index);
139  return;
140  }
141 
142  // This move is no longer blocked.
143  EmitMove(index);
144 }
145 
146 
147 void LGapResolver::Verify() {
148 #ifdef ENABLE_SLOW_ASSERTS
149  // No operand should be the destination for more than one move.
150  for (int i = 0; i < moves_.length(); ++i) {
151  LOperand* destination = moves_[i].destination();
152  for (int j = i + 1; j < moves_.length(); ++j) {
153  SLOW_ASSERT(!destination->Equals(moves_[j].destination()));
154  }
155  }
156 #endif
157 }
158 
159 #define __ ACCESS_MASM(cgen_->masm())
160 
161 void LGapResolver::BreakCycle(int index) {
162  // We save in a register the value that should end up in the source of
163  // moves_[root_index]. After performing all moves in the tree rooted
164  // in that move, we save the value to that source.
165  ASSERT(moves_[index].destination()->Equals(moves_[root_index_].source()));
166  ASSERT(!in_cycle_);
167  in_cycle_ = true;
168  LOperand* source = moves_[index].source();
169  saved_destination_ = moves_[index].destination();
170  if (source->IsRegister()) {
171  __ mov(kLithiumScratchReg, cgen_->ToRegister(source));
172  } else if (source->IsStackSlot()) {
173  __ lw(kLithiumScratchReg, cgen_->ToMemOperand(source));
174  } else if (source->IsDoubleRegister()) {
175  __ mov_d(kLithiumScratchDouble, cgen_->ToDoubleRegister(source));
176  } else if (source->IsDoubleStackSlot()) {
177  __ ldc1(kLithiumScratchDouble, cgen_->ToMemOperand(source));
178  } else {
179  UNREACHABLE();
180  }
181  // This move will be done by restoring the saved value to the destination.
182  moves_[index].Eliminate();
183 }
184 
185 
186 void LGapResolver::RestoreValue() {
187  ASSERT(in_cycle_);
188  ASSERT(saved_destination_ != NULL);
189 
190  // Spilled value is in kLithiumScratchReg or kLithiumScratchDouble.
191  if (saved_destination_->IsRegister()) {
192  __ mov(cgen_->ToRegister(saved_destination_), kLithiumScratchReg);
193  } else if (saved_destination_->IsStackSlot()) {
194  __ sw(kLithiumScratchReg, cgen_->ToMemOperand(saved_destination_));
195  } else if (saved_destination_->IsDoubleRegister()) {
196  __ mov_d(cgen_->ToDoubleRegister(saved_destination_),
198  } else if (saved_destination_->IsDoubleStackSlot()) {
200  cgen_->ToMemOperand(saved_destination_));
201  } else {
202  UNREACHABLE();
203  }
204 
205  in_cycle_ = false;
206  saved_destination_ = NULL;
207 }
208 
209 
210 void LGapResolver::EmitMove(int index) {
211  LOperand* source = moves_[index].source();
212  LOperand* destination = moves_[index].destination();
213 
214  // Dispatch on the source and destination operand kinds. Not all
215  // combinations are possible.
216 
217  if (source->IsRegister()) {
218  Register source_register = cgen_->ToRegister(source);
219  if (destination->IsRegister()) {
220  __ mov(cgen_->ToRegister(destination), source_register);
221  } else {
222  ASSERT(destination->IsStackSlot());
223  __ sw(source_register, cgen_->ToMemOperand(destination));
224  }
225  } else if (source->IsStackSlot()) {
226  MemOperand source_operand = cgen_->ToMemOperand(source);
227  if (destination->IsRegister()) {
228  __ lw(cgen_->ToRegister(destination), source_operand);
229  } else {
230  ASSERT(destination->IsStackSlot());
231  MemOperand destination_operand = cgen_->ToMemOperand(destination);
232  if (in_cycle_) {
233  if (!destination_operand.OffsetIsInt16Encodable()) {
234  // 'at' is overwritten while saving the value to the destination.
235  // Therefore we can't use 'at'. It is OK if the read from the source
236  // destroys 'at', since that happens before the value is read.
237  // This uses only a single reg of the double reg-pair.
238  __ lwc1(kLithiumScratchDouble, source_operand);
239  __ swc1(kLithiumScratchDouble, destination_operand);
240  } else {
241  __ lw(at, source_operand);
242  __ sw(at, destination_operand);
243  }
244  } else {
245  __ lw(kLithiumScratchReg, source_operand);
246  __ sw(kLithiumScratchReg, destination_operand);
247  }
248  }
249 
250  } else if (source->IsConstantOperand()) {
251  LConstantOperand* constant_source = LConstantOperand::cast(source);
252  if (destination->IsRegister()) {
253  Register dst = cgen_->ToRegister(destination);
254  Representation r = cgen_->IsSmi(constant_source)
255  ? Representation::Smi() : Representation::Integer32();
256  if (cgen_->IsInteger32(constant_source)) {
257  __ li(dst, Operand(cgen_->ToRepresentation(constant_source, r)));
258  } else {
259  __ li(dst, cgen_->ToHandle(constant_source));
260  }
261  } else if (destination->IsDoubleRegister()) {
262  DoubleRegister result = cgen_->ToDoubleRegister(destination);
263  double v = cgen_->ToDouble(constant_source);
264  __ Move(result, v);
265  } else {
266  ASSERT(destination->IsStackSlot());
267  ASSERT(!in_cycle_); // Constant moves happen after all cycles are gone.
268  Representation r = cgen_->IsSmi(constant_source)
269  ? Representation::Smi() : Representation::Integer32();
270  if (cgen_->IsInteger32(constant_source)) {
272  Operand(cgen_->ToRepresentation(constant_source, r)));
273  } else {
274  __ li(kLithiumScratchReg, cgen_->ToHandle(constant_source));
275  }
276  __ sw(kLithiumScratchReg, cgen_->ToMemOperand(destination));
277  }
278 
279  } else if (source->IsDoubleRegister()) {
280  DoubleRegister source_register = cgen_->ToDoubleRegister(source);
281  if (destination->IsDoubleRegister()) {
282  __ mov_d(cgen_->ToDoubleRegister(destination), source_register);
283  } else {
284  ASSERT(destination->IsDoubleStackSlot());
285  MemOperand destination_operand = cgen_->ToMemOperand(destination);
286  __ sdc1(source_register, destination_operand);
287  }
288 
289  } else if (source->IsDoubleStackSlot()) {
290  MemOperand source_operand = cgen_->ToMemOperand(source);
291  if (destination->IsDoubleRegister()) {
292  __ ldc1(cgen_->ToDoubleRegister(destination), source_operand);
293  } else {
294  ASSERT(destination->IsDoubleStackSlot());
295  MemOperand destination_operand = cgen_->ToMemOperand(destination);
296  if (in_cycle_) {
297  // kLithiumScratchDouble was used to break the cycle,
298  // but kLithiumScratchReg is free.
299  MemOperand source_high_operand =
300  cgen_->ToHighMemOperand(source);
301  MemOperand destination_high_operand =
302  cgen_->ToHighMemOperand(destination);
303  __ lw(kLithiumScratchReg, source_operand);
304  __ sw(kLithiumScratchReg, destination_operand);
305  __ lw(kLithiumScratchReg, source_high_operand);
306  __ sw(kLithiumScratchReg, destination_high_operand);
307  } else {
308  __ ldc1(kLithiumScratchDouble, source_operand);
309  __ sdc1(kLithiumScratchDouble, destination_operand);
310  }
311  }
312  } else {
313  UNREACHABLE();
314  }
315 
316  moves_[index].Eliminate();
317 }
318 
319 
320 #undef __
321 
322 } } // namespace v8::internal
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter NULL
Definition: flags.cc:269
#define SLOW_ASSERT(condition)
Definition: checks.h:306
static Representation Smi()
#define ASSERT(condition)
Definition: checks.h:329
#define kLithiumScratchReg
#define UNREACHABLE()
Definition: checks.h:52
DwVfpRegister DoubleRegister
#define kLithiumScratchDouble