36 LGapResolver::LGapResolver(LCodeGen* owner)
38 moves_(32, owner->zone()),
41 saved_destination_(
NULL) {}
44 void LGapResolver::Resolve(LParallelMove* parallel_move) {
47 BuildInitialMoveList(parallel_move);
49 for (
int i = 0; i < moves_.length(); ++i) {
50 LMoveOperands move = moves_[i];
54 if (!move.IsEliminated() && !move.source()->IsConstantOperand()) {
64 for (
int i = 0; i < moves_.length(); ++i) {
65 if (!moves_[i].IsEliminated()) {
66 ASSERT(moves_[i].source()->IsConstantOperand());
75 void LGapResolver::BuildInitialMoveList(LParallelMove* parallel_move) {
80 const ZoneList<LMoveOperands>* moves = parallel_move->move_operands();
81 for (
int i = 0; i < moves->length(); ++i) {
82 LMoveOperands move = moves->at(i);
83 if (!move.IsRedundant()) moves_.Add(move, cgen_->zone());
89 void LGapResolver::PerformMove(
int index) {
104 ASSERT(!moves_[index].IsPending());
105 ASSERT(!moves_[index].IsRedundant());
111 LOperand* destination = moves_[index].destination();
112 moves_[index].set_destination(
NULL);
118 for (
int i = 0; i < moves_.length(); ++i) {
119 LMoveOperands other_move = moves_[i];
120 if (other_move.Blocks(destination) && !other_move.IsPending()) {
130 moves_[index].set_destination(destination);
135 LMoveOperands other_move = moves_[root_index_];
136 if (other_move.Blocks(destination)) {
137 ASSERT(other_move.IsPending());
147 void LGapResolver::Verify() {
148 #ifdef ENABLE_SLOW_ASSERTS
150 for (
int i = 0; i < moves_.length(); ++i) {
151 LOperand* destination = moves_[i].destination();
152 for (
int j = i + 1; j < moves_.length(); ++j) {
153 SLOW_ASSERT(!destination->Equals(moves_[j].destination()));
159 #define __ ACCESS_MASM(cgen_->masm())
161 void LGapResolver::BreakCycle(
int index) {
165 ASSERT(moves_[index].destination()->Equals(moves_[root_index_].source()));
168 LOperand* source = moves_[index].source();
169 saved_destination_ = moves_[index].destination();
170 if (source->IsRegister()) {
172 }
else if (source->IsStackSlot()) {
174 }
else if (source->IsDoubleRegister()) {
176 }
else if (source->IsDoubleStackSlot()) {
182 moves_[index].Eliminate();
186 void LGapResolver::RestoreValue() {
191 if (saved_destination_->IsRegister()) {
193 }
else if (saved_destination_->IsStackSlot()) {
195 }
else if (saved_destination_->IsDoubleRegister()) {
196 __ mov_d(cgen_->ToDoubleRegister(saved_destination_),
198 }
else if (saved_destination_->IsDoubleStackSlot()) {
200 cgen_->ToMemOperand(saved_destination_));
206 saved_destination_ =
NULL;
210 void LGapResolver::EmitMove(
int index) {
211 LOperand* source = moves_[index].source();
212 LOperand* destination = moves_[index].destination();
217 if (source->IsRegister()) {
218 Register source_register = cgen_->ToRegister(source);
219 if (destination->IsRegister()) {
220 __ mov(cgen_->ToRegister(destination), source_register);
222 ASSERT(destination->IsStackSlot());
223 __ sw(source_register, cgen_->ToMemOperand(destination));
225 }
else if (source->IsStackSlot()) {
226 MemOperand source_operand = cgen_->ToMemOperand(source);
227 if (destination->IsRegister()) {
228 __ lw(cgen_->ToRegister(destination), source_operand);
230 ASSERT(destination->IsStackSlot());
231 MemOperand destination_operand = cgen_->ToMemOperand(destination);
233 if (!destination_operand.OffsetIsInt16Encodable()) {
241 __ lw(at, source_operand);
242 __ sw(at, destination_operand);
250 }
else if (source->IsConstantOperand()) {
251 LConstantOperand* constant_source = LConstantOperand::cast(source);
252 if (destination->IsRegister()) {
253 Register dst = cgen_->ToRegister(destination);
254 Representation r = cgen_->IsSmi(constant_source)
256 if (cgen_->IsInteger32(constant_source)) {
257 __ li(dst, Operand(cgen_->ToRepresentation(constant_source, r)));
259 __ li(dst, cgen_->ToHandle(constant_source));
261 }
else if (destination->IsDoubleRegister()) {
263 double v = cgen_->ToDouble(constant_source);
266 ASSERT(destination->IsStackSlot());
268 Representation r = cgen_->IsSmi(constant_source)
270 if (cgen_->IsInteger32(constant_source)) {
272 Operand(cgen_->ToRepresentation(constant_source, r)));
279 }
else if (source->IsDoubleRegister()) {
281 if (destination->IsDoubleRegister()) {
282 __ mov_d(cgen_->ToDoubleRegister(destination), source_register);
284 ASSERT(destination->IsDoubleStackSlot());
285 MemOperand destination_operand = cgen_->ToMemOperand(destination);
286 __ sdc1(source_register, destination_operand);
289 }
else if (source->IsDoubleStackSlot()) {
290 MemOperand source_operand = cgen_->ToMemOperand(source);
291 if (destination->IsDoubleRegister()) {
292 __ ldc1(cgen_->ToDoubleRegister(destination), source_operand);
294 ASSERT(destination->IsDoubleStackSlot());
295 MemOperand destination_operand = cgen_->ToMemOperand(destination);
300 cgen_->ToHighMemOperand(source);
302 cgen_->ToHighMemOperand(destination);
316 moves_[index].Eliminate();
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter NULL
#define SLOW_ASSERT(condition)
static Representation Smi()
#define ASSERT(condition)
#define kLithiumScratchReg
DwVfpRegister DoubleRegister
#define kLithiumScratchDouble