36 static const Register kSavedValueRegister = { 9 };
38 LGapResolver::LGapResolver(LCodeGen* owner)
39 : cgen_(owner), moves_(32, owner->zone()), root_index_(0), in_cycle_(
false),
40 saved_destination_(
NULL) { }
43 void LGapResolver::Resolve(LParallelMove* parallel_move) {
46 BuildInitialMoveList(parallel_move);
48 for (
int i = 0; i < moves_.length(); ++i) {
49 LMoveOperands move = moves_[i];
53 if (!move.IsEliminated() && !move.source()->IsConstantOperand()) {
63 for (
int i = 0; i < moves_.length(); ++i) {
64 if (!moves_[i].IsEliminated()) {
65 ASSERT(moves_[i].source()->IsConstantOperand());
74 void LGapResolver::BuildInitialMoveList(LParallelMove* parallel_move) {
79 const ZoneList<LMoveOperands>* moves = parallel_move->move_operands();
80 for (
int i = 0; i < moves->length(); ++i) {
81 LMoveOperands move = moves->at(i);
82 if (!move.IsRedundant()) moves_.Add(move, cgen_->zone());
88 void LGapResolver::PerformMove(
int index) {
103 ASSERT(!moves_[index].IsPending());
104 ASSERT(!moves_[index].IsRedundant());
110 LOperand* destination = moves_[index].destination();
111 moves_[index].set_destination(
NULL);
117 for (
int i = 0; i < moves_.length(); ++i) {
118 LMoveOperands other_move = moves_[i];
119 if (other_move.Blocks(destination) && !other_move.IsPending()) {
129 moves_[index].set_destination(destination);
134 LMoveOperands other_move = moves_[root_index_];
135 if (other_move.Blocks(destination)) {
136 ASSERT(other_move.IsPending());
146 void LGapResolver::Verify() {
147 #ifdef ENABLE_SLOW_ASSERTS
149 for (
int i = 0; i < moves_.length(); ++i) {
150 LOperand* destination = moves_[i].destination();
151 for (
int j = i + 1; j < moves_.length(); ++j) {
152 SLOW_ASSERT(!destination->Equals(moves_[j].destination()));
158 #define __ ACCESS_MASM(cgen_->masm())
160 void LGapResolver::BreakCycle(
int index) {
164 ASSERT(moves_[index].destination()->Equals(moves_[root_index_].source()));
167 LOperand* source = moves_[index].source();
168 saved_destination_ = moves_[index].destination();
169 if (source->IsRegister()) {
170 __ mov(kSavedValueRegister, cgen_->ToRegister(source));
171 }
else if (source->IsStackSlot()) {
172 __ ldr(kSavedValueRegister, cgen_->ToMemOperand(source));
173 }
else if (source->IsDoubleRegister()) {
175 }
else if (source->IsDoubleStackSlot()) {
181 moves_[index].Eliminate();
185 void LGapResolver::RestoreValue() {
190 if (saved_destination_->IsRegister()) {
191 __ mov(cgen_->ToRegister(saved_destination_), kSavedValueRegister);
192 }
else if (saved_destination_->IsStackSlot()) {
193 __ str(kSavedValueRegister, cgen_->ToMemOperand(saved_destination_));
194 }
else if (saved_destination_->IsDoubleRegister()) {
196 }
else if (saved_destination_->IsDoubleStackSlot()) {
203 saved_destination_ =
NULL;
207 void LGapResolver::EmitMove(
int index) {
208 LOperand* source = moves_[index].source();
209 LOperand* destination = moves_[index].destination();
214 if (source->IsRegister()) {
215 Register source_register = cgen_->ToRegister(source);
216 if (destination->IsRegister()) {
217 __ mov(cgen_->ToRegister(destination), source_register);
219 ASSERT(destination->IsStackSlot());
220 __ str(source_register, cgen_->ToMemOperand(destination));
223 }
else if (source->IsStackSlot()) {
224 MemOperand source_operand = cgen_->ToMemOperand(source);
225 if (destination->IsRegister()) {
226 __ ldr(cgen_->ToRegister(destination), source_operand);
228 ASSERT(destination->IsStackSlot());
229 MemOperand destination_operand = cgen_->ToMemOperand(destination);
231 if (!destination_operand.OffsetIsUint12Encodable()) {
238 __ ldr(
ip, source_operand);
239 __ str(
ip, destination_operand);
242 __ ldr(kSavedValueRegister, source_operand);
243 __ str(kSavedValueRegister, destination_operand);
247 }
else if (source->IsConstantOperand()) {
249 if (destination->IsRegister()) {
250 Register dst = cgen_->ToRegister(destination);
251 if (cgen_->IsInteger32(constant_source)) {
252 __ mov(dst, Operand(cgen_->ToInteger32(constant_source)));
254 __ LoadObject(dst, cgen_->ToHandle(constant_source));
257 ASSERT(destination->IsStackSlot());
259 if (cgen_->IsInteger32(constant_source)) {
260 __ mov(kSavedValueRegister,
261 Operand(cgen_->ToInteger32(constant_source)));
263 __ LoadObject(kSavedValueRegister,
264 cgen_->ToHandle(constant_source));
266 __ str(kSavedValueRegister, cgen_->ToMemOperand(destination));
269 }
else if (source->IsDoubleRegister()) {
271 if (destination->IsDoubleRegister()) {
272 __ vmov(cgen_->ToDoubleRegister(destination), source_register);
274 ASSERT(destination->IsDoubleStackSlot());
275 __ vstr(source_register, cgen_->ToMemOperand(destination));
278 }
else if (source->IsDoubleStackSlot()) {
279 MemOperand source_operand = cgen_->ToMemOperand(source);
280 if (destination->IsDoubleRegister()) {
281 __ vldr(cgen_->ToDoubleRegister(destination), source_operand);
283 ASSERT(destination->IsDoubleStackSlot());
284 MemOperand destination_operand = cgen_->ToMemOperand(destination);
289 cgen_->ToHighMemOperand(source);
291 cgen_->ToHighMemOperand(destination);
292 __ ldr(kSavedValueRegister, source_operand);
293 __ str(kSavedValueRegister, destination_operand);
294 __ ldr(kSavedValueRegister, source_high_operand);
295 __ str(kSavedValueRegister, destination_high_operand);
305 moves_[index].Eliminate();
#define SLOW_ASSERT(condition)
#define ASSERT(condition)
DwVfpRegister DoubleRegister
#define kScratchDoubleReg
static LConstantOperand * cast(LOperand *op)
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination trace on stack replacement optimize closures functions with arguments object optimize functions containing for in loops profiler considers IC stability primitive functions trigger their own optimization re try self optimization if it failed insert an interrupt check at function exit execution budget before interrupt is triggered call count before self optimization self_optimization count_based_interrupts weighted_back_edges trace_opt emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of SAHF instruction if enable use of VFP3 instructions if available this implies enabling ARMv7 enable use of ARMv7 instructions if enable use of MIPS FPU instructions if NULL