v8  3.25.30(node0.11.13)
V8 is Google's open source JavaScript engine
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
lithium-gap-resolver-arm.cc
Go to the documentation of this file.
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 
28 #include "v8.h"
29 
32 
33 namespace v8 {
34 namespace internal {
35 
36 static const Register kSavedValueRegister = { 9 };
37 
38 LGapResolver::LGapResolver(LCodeGen* owner)
39  : cgen_(owner), moves_(32, owner->zone()), root_index_(0), in_cycle_(false),
40  saved_destination_(NULL) { }
41 
42 
43 void LGapResolver::Resolve(LParallelMove* parallel_move) {
44  ASSERT(moves_.is_empty());
45  // Build up a worklist of moves.
46  BuildInitialMoveList(parallel_move);
47 
48  for (int i = 0; i < moves_.length(); ++i) {
49  LMoveOperands move = moves_[i];
50  // Skip constants to perform them last. They don't block other moves
51  // and skipping such moves with register destinations keeps those
52  // registers free for the whole algorithm.
53  if (!move.IsEliminated() && !move.source()->IsConstantOperand()) {
54  root_index_ = i; // Any cycle is found when by reaching this move again.
55  PerformMove(i);
56  if (in_cycle_) {
57  RestoreValue();
58  }
59  }
60  }
61 
62  // Perform the moves with constant sources.
63  for (int i = 0; i < moves_.length(); ++i) {
64  if (!moves_[i].IsEliminated()) {
65  ASSERT(moves_[i].source()->IsConstantOperand());
66  EmitMove(i);
67  }
68  }
69 
70  moves_.Rewind(0);
71 }
72 
73 
74 void LGapResolver::BuildInitialMoveList(LParallelMove* parallel_move) {
75  // Perform a linear sweep of the moves to add them to the initial list of
76  // moves to perform, ignoring any move that is redundant (the source is
77  // the same as the destination, the destination is ignored and
78  // unallocated, or the move was already eliminated).
79  const ZoneList<LMoveOperands>* moves = parallel_move->move_operands();
80  for (int i = 0; i < moves->length(); ++i) {
81  LMoveOperands move = moves->at(i);
82  if (!move.IsRedundant()) moves_.Add(move, cgen_->zone());
83  }
84  Verify();
85 }
86 
87 
88 void LGapResolver::PerformMove(int index) {
89  // Each call to this function performs a move and deletes it from the move
90  // graph. We first recursively perform any move blocking this one. We
91  // mark a move as "pending" on entry to PerformMove in order to detect
92  // cycles in the move graph.
93 
94  // We can only find a cycle, when doing a depth-first traversal of moves,
95  // be encountering the starting move again. So by spilling the source of
96  // the starting move, we break the cycle. All moves are then unblocked,
97  // and the starting move is completed by writing the spilled value to
98  // its destination. All other moves from the spilled source have been
99  // completed prior to breaking the cycle.
100  // An additional complication is that moves to MemOperands with large
101  // offsets (more than 1K or 4K) require us to spill this spilled value to
102  // the stack, to free up the register.
103  ASSERT(!moves_[index].IsPending());
104  ASSERT(!moves_[index].IsRedundant());
105 
106  // Clear this move's destination to indicate a pending move. The actual
107  // destination is saved in a stack allocated local. Multiple moves can
108  // be pending because this function is recursive.
109  ASSERT(moves_[index].source() != NULL); // Or else it will look eliminated.
110  LOperand* destination = moves_[index].destination();
111  moves_[index].set_destination(NULL);
112 
113  // Perform a depth-first traversal of the move graph to resolve
114  // dependencies. Any unperformed, unpending move with a source the same
115  // as this one's destination blocks this one so recursively perform all
116  // such moves.
117  for (int i = 0; i < moves_.length(); ++i) {
118  LMoveOperands other_move = moves_[i];
119  if (other_move.Blocks(destination) && !other_move.IsPending()) {
120  PerformMove(i);
121  // If there is a blocking, pending move it must be moves_[root_index_]
122  // and all other moves with the same source as moves_[root_index_] are
123  // sucessfully executed (because they are cycle-free) by this loop.
124  }
125  }
126 
127  // We are about to resolve this move and don't need it marked as
128  // pending, so restore its destination.
129  moves_[index].set_destination(destination);
130 
131  // The move may be blocked on a pending move, which must be the starting move.
132  // In this case, we have a cycle, and we save the source of this move to
133  // a scratch register to break it.
134  LMoveOperands other_move = moves_[root_index_];
135  if (other_move.Blocks(destination)) {
136  ASSERT(other_move.IsPending());
137  BreakCycle(index);
138  return;
139  }
140 
141  // This move is no longer blocked.
142  EmitMove(index);
143 }
144 
145 
146 void LGapResolver::Verify() {
147 #ifdef ENABLE_SLOW_ASSERTS
148  // No operand should be the destination for more than one move.
149  for (int i = 0; i < moves_.length(); ++i) {
150  LOperand* destination = moves_[i].destination();
151  for (int j = i + 1; j < moves_.length(); ++j) {
152  SLOW_ASSERT(!destination->Equals(moves_[j].destination()));
153  }
154  }
155 #endif
156 }
157 
158 #define __ ACCESS_MASM(cgen_->masm())
159 
160 void LGapResolver::BreakCycle(int index) {
161  // We save in a register the value that should end up in the source of
162  // moves_[root_index]. After performing all moves in the tree rooted
163  // in that move, we save the value to that source.
164  ASSERT(moves_[index].destination()->Equals(moves_[root_index_].source()));
165  ASSERT(!in_cycle_);
166  in_cycle_ = true;
167  LOperand* source = moves_[index].source();
168  saved_destination_ = moves_[index].destination();
169  if (source->IsRegister()) {
170  __ mov(kSavedValueRegister, cgen_->ToRegister(source));
171  } else if (source->IsStackSlot()) {
172  __ ldr(kSavedValueRegister, cgen_->ToMemOperand(source));
173  } else if (source->IsDoubleRegister()) {
174  __ vmov(kScratchDoubleReg, cgen_->ToDoubleRegister(source));
175  } else if (source->IsDoubleStackSlot()) {
176  __ vldr(kScratchDoubleReg, cgen_->ToMemOperand(source));
177  } else {
178  UNREACHABLE();
179  }
180  // This move will be done by restoring the saved value to the destination.
181  moves_[index].Eliminate();
182 }
183 
184 
185 void LGapResolver::RestoreValue() {
186  ASSERT(in_cycle_);
187  ASSERT(saved_destination_ != NULL);
188 
189  // Spilled value is in kSavedValueRegister or kSavedDoubleValueRegister.
190  if (saved_destination_->IsRegister()) {
191  __ mov(cgen_->ToRegister(saved_destination_), kSavedValueRegister);
192  } else if (saved_destination_->IsStackSlot()) {
193  __ str(kSavedValueRegister, cgen_->ToMemOperand(saved_destination_));
194  } else if (saved_destination_->IsDoubleRegister()) {
195  __ vmov(cgen_->ToDoubleRegister(saved_destination_), kScratchDoubleReg);
196  } else if (saved_destination_->IsDoubleStackSlot()) {
197  __ vstr(kScratchDoubleReg, cgen_->ToMemOperand(saved_destination_));
198  } else {
199  UNREACHABLE();
200  }
201 
202  in_cycle_ = false;
203  saved_destination_ = NULL;
204 }
205 
206 
207 void LGapResolver::EmitMove(int index) {
208  LOperand* source = moves_[index].source();
209  LOperand* destination = moves_[index].destination();
210 
211  // Dispatch on the source and destination operand kinds. Not all
212  // combinations are possible.
213 
214  if (source->IsRegister()) {
215  Register source_register = cgen_->ToRegister(source);
216  if (destination->IsRegister()) {
217  __ mov(cgen_->ToRegister(destination), source_register);
218  } else {
219  ASSERT(destination->IsStackSlot());
220  __ str(source_register, cgen_->ToMemOperand(destination));
221  }
222  } else if (source->IsStackSlot()) {
223  MemOperand source_operand = cgen_->ToMemOperand(source);
224  if (destination->IsRegister()) {
225  __ ldr(cgen_->ToRegister(destination), source_operand);
226  } else {
227  ASSERT(destination->IsStackSlot());
228  MemOperand destination_operand = cgen_->ToMemOperand(destination);
229  if (in_cycle_) {
230  if (!destination_operand.OffsetIsUint12Encodable()) {
231  // ip is overwritten while saving the value to the destination.
232  // Therefore we can't use ip. It is OK if the read from the source
233  // destroys ip, since that happens before the value is read.
234  __ vldr(kScratchDoubleReg.low(), source_operand);
235  __ vstr(kScratchDoubleReg.low(), destination_operand);
236  } else {
237  __ ldr(ip, source_operand);
238  __ str(ip, destination_operand);
239  }
240  } else {
241  __ ldr(kSavedValueRegister, source_operand);
242  __ str(kSavedValueRegister, destination_operand);
243  }
244  }
245 
246  } else if (source->IsConstantOperand()) {
247  LConstantOperand* constant_source = LConstantOperand::cast(source);
248  if (destination->IsRegister()) {
249  Register dst = cgen_->ToRegister(destination);
250  Representation r = cgen_->IsSmi(constant_source)
251  ? Representation::Smi() : Representation::Integer32();
252  if (cgen_->IsInteger32(constant_source)) {
253  __ mov(dst, Operand(cgen_->ToRepresentation(constant_source, r)));
254  } else {
255  __ Move(dst, cgen_->ToHandle(constant_source));
256  }
257  } else if (destination->IsDoubleRegister()) {
258  DwVfpRegister result = cgen_->ToDoubleRegister(destination);
259  double v = cgen_->ToDouble(constant_source);
260  __ Vmov(result, v, ip);
261  } else {
262  ASSERT(destination->IsStackSlot());
263  ASSERT(!in_cycle_); // Constant moves happen after all cycles are gone.
264  Representation r = cgen_->IsSmi(constant_source)
265  ? Representation::Smi() : Representation::Integer32();
266  if (cgen_->IsInteger32(constant_source)) {
267  __ mov(kSavedValueRegister,
268  Operand(cgen_->ToRepresentation(constant_source, r)));
269  } else {
270  __ Move(kSavedValueRegister,
271  cgen_->ToHandle(constant_source));
272  }
273  __ str(kSavedValueRegister, cgen_->ToMemOperand(destination));
274  }
275 
276  } else if (source->IsDoubleRegister()) {
277  DwVfpRegister source_register = cgen_->ToDoubleRegister(source);
278  if (destination->IsDoubleRegister()) {
279  __ vmov(cgen_->ToDoubleRegister(destination), source_register);
280  } else {
281  ASSERT(destination->IsDoubleStackSlot());
282  __ vstr(source_register, cgen_->ToMemOperand(destination));
283  }
284 
285  } else if (source->IsDoubleStackSlot()) {
286  MemOperand source_operand = cgen_->ToMemOperand(source);
287  if (destination->IsDoubleRegister()) {
288  __ vldr(cgen_->ToDoubleRegister(destination), source_operand);
289  } else {
290  ASSERT(destination->IsDoubleStackSlot());
291  MemOperand destination_operand = cgen_->ToMemOperand(destination);
292  if (in_cycle_) {
293  // kSavedDoubleValueRegister was used to break the cycle,
294  // but kSavedValueRegister is free.
295  MemOperand source_high_operand =
296  cgen_->ToHighMemOperand(source);
297  MemOperand destination_high_operand =
298  cgen_->ToHighMemOperand(destination);
299  __ ldr(kSavedValueRegister, source_operand);
300  __ str(kSavedValueRegister, destination_operand);
301  __ ldr(kSavedValueRegister, source_high_operand);
302  __ str(kSavedValueRegister, destination_high_operand);
303  } else {
304  __ vldr(kScratchDoubleReg, source_operand);
305  __ vstr(kScratchDoubleReg, destination_operand);
306  }
307  }
308  } else {
309  UNREACHABLE();
310  }
311 
312  moves_[index].Eliminate();
313 }
314 
315 
316 #undef __
317 
318 } } // namespace v8::internal
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter NULL
Definition: flags.cc:269
#define SLOW_ASSERT(condition)
Definition: checks.h:306
static Representation Smi()
#define __
#define ASSERT(condition)
Definition: checks.h:329
#define UNREACHABLE()
Definition: checks.h:52
const Register ip
#define kScratchDoubleReg