v8  3.25.30(node0.11.13)
V8 is Google's open source JavaScript engine
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
lithium-gap-resolver-arm64.cc
Go to the documentation of this file.
1 // Copyright 2013 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 
28 #include "v8.h"
29 
32 
33 namespace v8 {
34 namespace internal {
35 
36 // We use the root register to spill a value while breaking a cycle in parallel
37 // moves. We don't need access to roots while resolving the move list and using
38 // the root register has two advantages:
39 // - It is not in crankshaft allocatable registers list, so it can't interfere
40 // with any of the moves we are resolving.
41 // - We don't need to push it on the stack, as we can reload it with its value
42 // once we have resolved a cycle.
43 #define kSavedValue root
44 
45 // We use the MacroAssembler floating-point scratch register to break a cycle
46 // involving double values as the MacroAssembler will not need it for the
47 // operations performed by the gap resolver.
48 #define kSavedDoubleValue fp_scratch
49 
50 
51 LGapResolver::LGapResolver(LCodeGen* owner)
52  : cgen_(owner), moves_(32, owner->zone()), root_index_(0), in_cycle_(false),
53  saved_destination_(NULL), need_to_restore_root_(false) { }
54 
55 
56 #define __ ACCESS_MASM(cgen_->masm())
57 
58 void LGapResolver::Resolve(LParallelMove* parallel_move) {
59  ASSERT(moves_.is_empty());
60 
61  // Build up a worklist of moves.
62  BuildInitialMoveList(parallel_move);
63 
64  for (int i = 0; i < moves_.length(); ++i) {
65  LMoveOperands move = moves_[i];
66 
67  // Skip constants to perform them last. They don't block other moves
68  // and skipping such moves with register destinations keeps those
69  // registers free for the whole algorithm.
70  if (!move.IsEliminated() && !move.source()->IsConstantOperand()) {
71  root_index_ = i; // Any cycle is found when we reach this move again.
72  PerformMove(i);
73  if (in_cycle_) RestoreValue();
74  }
75  }
76 
77  // Perform the moves with constant sources.
78  for (int i = 0; i < moves_.length(); ++i) {
79  LMoveOperands move = moves_[i];
80 
81  if (!move.IsEliminated()) {
82  ASSERT(move.source()->IsConstantOperand());
83  EmitMove(i);
84  }
85  }
86 
87  if (need_to_restore_root_) {
88  ASSERT(kSavedValue.Is(root));
89  __ InitializeRootRegister();
90  need_to_restore_root_ = false;
91  }
92 
93  moves_.Rewind(0);
94 }
95 
96 
97 void LGapResolver::BuildInitialMoveList(LParallelMove* parallel_move) {
98  // Perform a linear sweep of the moves to add them to the initial list of
99  // moves to perform, ignoring any move that is redundant (the source is
100  // the same as the destination, the destination is ignored and
101  // unallocated, or the move was already eliminated).
102  const ZoneList<LMoveOperands>* moves = parallel_move->move_operands();
103  for (int i = 0; i < moves->length(); ++i) {
104  LMoveOperands move = moves->at(i);
105  if (!move.IsRedundant()) moves_.Add(move, cgen_->zone());
106  }
107  Verify();
108 }
109 
110 
111 void LGapResolver::PerformMove(int index) {
112  // Each call to this function performs a move and deletes it from the move
113  // graph. We first recursively perform any move blocking this one. We
114  // mark a move as "pending" on entry to PerformMove in order to detect
115  // cycles in the move graph.
116  LMoveOperands& current_move = moves_[index];
117 
118  ASSERT(!current_move.IsPending());
119  ASSERT(!current_move.IsRedundant());
120 
121  // Clear this move's destination to indicate a pending move. The actual
122  // destination is saved in a stack allocated local. Multiple moves can
123  // be pending because this function is recursive.
124  ASSERT(current_move.source() != NULL); // Otherwise it will look eliminated.
125  LOperand* destination = current_move.destination();
126  current_move.set_destination(NULL);
127 
128  // Perform a depth-first traversal of the move graph to resolve
129  // dependencies. Any unperformed, unpending move with a source the same
130  // as this one's destination blocks this one so recursively perform all
131  // such moves.
132  for (int i = 0; i < moves_.length(); ++i) {
133  LMoveOperands other_move = moves_[i];
134  if (other_move.Blocks(destination) && !other_move.IsPending()) {
135  PerformMove(i);
136  // If there is a blocking, pending move it must be moves_[root_index_]
137  // and all other moves with the same source as moves_[root_index_] are
138  // sucessfully executed (because they are cycle-free) by this loop.
139  }
140  }
141 
142  // We are about to resolve this move and don't need it marked as
143  // pending, so restore its destination.
144  current_move.set_destination(destination);
145 
146  // The move may be blocked on a pending move, which must be the starting move.
147  // In this case, we have a cycle, and we save the source of this move to
148  // a scratch register to break it.
149  LMoveOperands other_move = moves_[root_index_];
150  if (other_move.Blocks(destination)) {
151  ASSERT(other_move.IsPending());
152  BreakCycle(index);
153  return;
154  }
155 
156  // This move is no longer blocked.
157  EmitMove(index);
158 }
159 
160 
161 void LGapResolver::Verify() {
162 #ifdef ENABLE_SLOW_ASSERTS
163  // No operand should be the destination for more than one move.
164  for (int i = 0; i < moves_.length(); ++i) {
165  LOperand* destination = moves_[i].destination();
166  for (int j = i + 1; j < moves_.length(); ++j) {
167  SLOW_ASSERT(!destination->Equals(moves_[j].destination()));
168  }
169  }
170 #endif
171 }
172 
173 
174 void LGapResolver::BreakCycle(int index) {
175  ASSERT(moves_[index].destination()->Equals(moves_[root_index_].source()));
176  ASSERT(!in_cycle_);
177 
178  // We use registers which are not allocatable by crankshaft to break the cycle
179  // to be sure they don't interfere with the moves we are resolving.
180  ASSERT(!kSavedValue.IsAllocatable());
181  ASSERT(!kSavedDoubleValue.IsAllocatable());
182 
183  // We save in a register the source of that move and we remember its
184  // destination. Then we mark this move as resolved so the cycle is
185  // broken and we can perform the other moves.
186  in_cycle_ = true;
187  LOperand* source = moves_[index].source();
188  saved_destination_ = moves_[index].destination();
189 
190  if (source->IsRegister()) {
191  need_to_restore_root_ = true;
192  __ Mov(kSavedValue, cgen_->ToRegister(source));
193  } else if (source->IsStackSlot()) {
194  need_to_restore_root_ = true;
195  __ Ldr(kSavedValue, cgen_->ToMemOperand(source));
196  } else if (source->IsDoubleRegister()) {
197  ASSERT(cgen_->masm()->FPTmpList()->IncludesAliasOf(kSavedDoubleValue));
198  cgen_->masm()->FPTmpList()->Remove(kSavedDoubleValue);
199  __ Fmov(kSavedDoubleValue, cgen_->ToDoubleRegister(source));
200  } else if (source->IsDoubleStackSlot()) {
201  ASSERT(cgen_->masm()->FPTmpList()->IncludesAliasOf(kSavedDoubleValue));
202  cgen_->masm()->FPTmpList()->Remove(kSavedDoubleValue);
203  __ Ldr(kSavedDoubleValue, cgen_->ToMemOperand(source));
204  } else {
205  UNREACHABLE();
206  }
207 
208  // Mark this move as resolved.
209  // This move will be actually performed by moving the saved value to this
210  // move's destination in LGapResolver::RestoreValue().
211  moves_[index].Eliminate();
212 }
213 
214 
215 void LGapResolver::RestoreValue() {
216  ASSERT(in_cycle_);
217  ASSERT(saved_destination_ != NULL);
218 
219  if (saved_destination_->IsRegister()) {
220  __ Mov(cgen_->ToRegister(saved_destination_), kSavedValue);
221  } else if (saved_destination_->IsStackSlot()) {
222  __ Str(kSavedValue, cgen_->ToMemOperand(saved_destination_));
223  } else if (saved_destination_->IsDoubleRegister()) {
224  __ Fmov(cgen_->ToDoubleRegister(saved_destination_), kSavedDoubleValue);
225  cgen_->masm()->FPTmpList()->Combine(kSavedDoubleValue);
226  } else if (saved_destination_->IsDoubleStackSlot()) {
227  __ Str(kSavedDoubleValue, cgen_->ToMemOperand(saved_destination_));
228  cgen_->masm()->FPTmpList()->Combine(kSavedDoubleValue);
229  } else {
230  UNREACHABLE();
231  }
232 
233  in_cycle_ = false;
234  saved_destination_ = NULL;
235 }
236 
237 
238 void LGapResolver::EmitMove(int index) {
239  LOperand* source = moves_[index].source();
240  LOperand* destination = moves_[index].destination();
241 
242  // Dispatch on the source and destination operand kinds. Not all
243  // combinations are possible.
244 
245  if (source->IsRegister()) {
246  Register source_register = cgen_->ToRegister(source);
247  if (destination->IsRegister()) {
248  __ Mov(cgen_->ToRegister(destination), source_register);
249  } else {
250  ASSERT(destination->IsStackSlot());
251  __ Str(source_register, cgen_->ToMemOperand(destination));
252  }
253 
254  } else if (source->IsStackSlot()) {
255  MemOperand source_operand = cgen_->ToMemOperand(source);
256  if (destination->IsRegister()) {
257  __ Ldr(cgen_->ToRegister(destination), source_operand);
258  } else {
259  ASSERT(destination->IsStackSlot());
260  EmitStackSlotMove(index);
261  }
262 
263  } else if (source->IsConstantOperand()) {
264  LConstantOperand* constant_source = LConstantOperand::cast(source);
265  if (destination->IsRegister()) {
266  Register dst = cgen_->ToRegister(destination);
267  if (cgen_->IsSmi(constant_source)) {
268  __ Mov(dst, cgen_->ToSmi(constant_source));
269  } else if (cgen_->IsInteger32Constant(constant_source)) {
270  __ Mov(dst, cgen_->ToInteger32(constant_source));
271  } else {
272  __ LoadObject(dst, cgen_->ToHandle(constant_source));
273  }
274  } else if (destination->IsDoubleRegister()) {
275  DoubleRegister result = cgen_->ToDoubleRegister(destination);
276  __ Fmov(result, cgen_->ToDouble(constant_source));
277  } else {
278  ASSERT(destination->IsStackSlot());
279  ASSERT(!in_cycle_); // Constant moves happen after all cycles are gone.
280  need_to_restore_root_ = true;
281  if (cgen_->IsSmi(constant_source)) {
282  __ Mov(kSavedValue, cgen_->ToSmi(constant_source));
283  } else if (cgen_->IsInteger32Constant(constant_source)) {
284  __ Mov(kSavedValue, cgen_->ToInteger32(constant_source));
285  } else {
286  __ LoadObject(kSavedValue, cgen_->ToHandle(constant_source));
287  }
288  __ Str(kSavedValue, cgen_->ToMemOperand(destination));
289  }
290 
291  } else if (source->IsDoubleRegister()) {
292  DoubleRegister src = cgen_->ToDoubleRegister(source);
293  if (destination->IsDoubleRegister()) {
294  __ Fmov(cgen_->ToDoubleRegister(destination), src);
295  } else {
296  ASSERT(destination->IsDoubleStackSlot());
297  __ Str(src, cgen_->ToMemOperand(destination));
298  }
299 
300  } else if (source->IsDoubleStackSlot()) {
301  MemOperand src = cgen_->ToMemOperand(source);
302  if (destination->IsDoubleRegister()) {
303  __ Ldr(cgen_->ToDoubleRegister(destination), src);
304  } else {
305  ASSERT(destination->IsDoubleStackSlot());
306  EmitStackSlotMove(index);
307  }
308 
309  } else {
310  UNREACHABLE();
311  }
312 
313  // The move has been emitted, we can eliminate it.
314  moves_[index].Eliminate();
315 }
316 
317 
318 void LGapResolver::EmitStackSlotMove(int index) {
319  // We need a temp register to perform a stack slot to stack slot move, and
320  // the register must not be involved in breaking cycles.
321 
322  // Use the Crankshaft double scratch register as the temporary.
323  DoubleRegister temp = crankshaft_fp_scratch;
324 
325  LOperand* src = moves_[index].source();
326  LOperand* dst = moves_[index].destination();
327 
328  ASSERT(src->IsStackSlot());
329  ASSERT(dst->IsStackSlot());
330  __ Ldr(temp, cgen_->ToMemOperand(src));
331  __ Str(temp, cgen_->ToMemOperand(dst));
332 }
333 
334 } } // namespace v8::internal
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter NULL
Definition: flags.cc:269
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter NULL
#define SLOW_ASSERT(condition)
Definition: checks.h:306
#define kSavedDoubleValue
#define ASSERT(condition)
Definition: checks.h:329
#define UNREACHABLE()
Definition: checks.h:52
DwVfpRegister DoubleRegister
#define kSavedValue