36 static const Register kSavedValueRegister = { 9 };
38 LGapResolver::LGapResolver(LCodeGen* owner)
39 : cgen_(owner), moves_(32, owner->zone()), root_index_(0), in_cycle_(
false),
40 saved_destination_(
NULL) { }
43 void LGapResolver::Resolve(LParallelMove* parallel_move) {
46 BuildInitialMoveList(parallel_move);
48 for (
int i = 0; i < moves_.length(); ++i) {
49 LMoveOperands move = moves_[i];
53 if (!move.IsEliminated() && !move.source()->IsConstantOperand()) {
63 for (
int i = 0; i < moves_.length(); ++i) {
64 if (!moves_[i].IsEliminated()) {
65 ASSERT(moves_[i].source()->IsConstantOperand());
74 void LGapResolver::BuildInitialMoveList(LParallelMove* parallel_move) {
79 const ZoneList<LMoveOperands>* moves = parallel_move->move_operands();
80 for (
int i = 0; i < moves->length(); ++i) {
81 LMoveOperands move = moves->at(i);
82 if (!move.IsRedundant()) moves_.Add(move, cgen_->zone());
88 void LGapResolver::PerformMove(
int index) {
103 ASSERT(!moves_[index].IsPending());
104 ASSERT(!moves_[index].IsRedundant());
110 LOperand* destination = moves_[index].destination();
111 moves_[index].set_destination(
NULL);
117 for (
int i = 0; i < moves_.length(); ++i) {
118 LMoveOperands other_move = moves_[i];
119 if (other_move.Blocks(destination) && !other_move.IsPending()) {
129 moves_[index].set_destination(destination);
134 LMoveOperands other_move = moves_[root_index_];
135 if (other_move.Blocks(destination)) {
136 ASSERT(other_move.IsPending());
146 void LGapResolver::Verify() {
147 #ifdef ENABLE_SLOW_ASSERTS
149 for (
int i = 0; i < moves_.length(); ++i) {
150 LOperand* destination = moves_[i].destination();
151 for (
int j = i + 1; j < moves_.length(); ++j) {
152 SLOW_ASSERT(!destination->Equals(moves_[j].destination()));
158 #define __ ACCESS_MASM(cgen_->masm())
160 void LGapResolver::BreakCycle(
int index) {
164 ASSERT(moves_[index].destination()->Equals(moves_[root_index_].source()));
167 LOperand* source = moves_[index].source();
168 saved_destination_ = moves_[index].destination();
169 if (source->IsRegister()) {
170 __ mov(kSavedValueRegister, cgen_->ToRegister(source));
171 }
else if (source->IsStackSlot()) {
172 __ ldr(kSavedValueRegister, cgen_->ToMemOperand(source));
173 }
else if (source->IsDoubleRegister()) {
175 }
else if (source->IsDoubleStackSlot()) {
181 moves_[index].Eliminate();
185 void LGapResolver::RestoreValue() {
190 if (saved_destination_->IsRegister()) {
191 __ mov(cgen_->ToRegister(saved_destination_), kSavedValueRegister);
192 }
else if (saved_destination_->IsStackSlot()) {
193 __ str(kSavedValueRegister, cgen_->ToMemOperand(saved_destination_));
194 }
else if (saved_destination_->IsDoubleRegister()) {
196 }
else if (saved_destination_->IsDoubleStackSlot()) {
203 saved_destination_ =
NULL;
207 void LGapResolver::EmitMove(
int index) {
208 LOperand* source = moves_[index].source();
209 LOperand* destination = moves_[index].destination();
214 if (source->IsRegister()) {
215 Register source_register = cgen_->ToRegister(source);
216 if (destination->IsRegister()) {
217 __ mov(cgen_->ToRegister(destination), source_register);
219 ASSERT(destination->IsStackSlot());
220 __ str(source_register, cgen_->ToMemOperand(destination));
222 }
else if (source->IsStackSlot()) {
223 MemOperand source_operand = cgen_->ToMemOperand(source);
224 if (destination->IsRegister()) {
225 __ ldr(cgen_->ToRegister(destination), source_operand);
227 ASSERT(destination->IsStackSlot());
228 MemOperand destination_operand = cgen_->ToMemOperand(destination);
230 if (!destination_operand.OffsetIsUint12Encodable()) {
237 __ ldr(
ip, source_operand);
238 __ str(
ip, destination_operand);
241 __ ldr(kSavedValueRegister, source_operand);
242 __ str(kSavedValueRegister, destination_operand);
246 }
else if (source->IsConstantOperand()) {
247 LConstantOperand* constant_source = LConstantOperand::cast(source);
248 if (destination->IsRegister()) {
249 Register dst = cgen_->ToRegister(destination);
250 Representation r = cgen_->IsSmi(constant_source)
252 if (cgen_->IsInteger32(constant_source)) {
253 __ mov(dst, Operand(cgen_->ToRepresentation(constant_source, r)));
255 __ Move(dst, cgen_->ToHandle(constant_source));
257 }
else if (destination->IsDoubleRegister()) {
258 DwVfpRegister result = cgen_->ToDoubleRegister(destination);
259 double v = cgen_->ToDouble(constant_source);
260 __ Vmov(result, v,
ip);
262 ASSERT(destination->IsStackSlot());
264 Representation r = cgen_->IsSmi(constant_source)
266 if (cgen_->IsInteger32(constant_source)) {
267 __ mov(kSavedValueRegister,
268 Operand(cgen_->ToRepresentation(constant_source, r)));
270 __ Move(kSavedValueRegister,
271 cgen_->ToHandle(constant_source));
273 __ str(kSavedValueRegister, cgen_->ToMemOperand(destination));
276 }
else if (source->IsDoubleRegister()) {
277 DwVfpRegister source_register = cgen_->ToDoubleRegister(source);
278 if (destination->IsDoubleRegister()) {
279 __ vmov(cgen_->ToDoubleRegister(destination), source_register);
281 ASSERT(destination->IsDoubleStackSlot());
282 __ vstr(source_register, cgen_->ToMemOperand(destination));
285 }
else if (source->IsDoubleStackSlot()) {
286 MemOperand source_operand = cgen_->ToMemOperand(source);
287 if (destination->IsDoubleRegister()) {
288 __ vldr(cgen_->ToDoubleRegister(destination), source_operand);
290 ASSERT(destination->IsDoubleStackSlot());
291 MemOperand destination_operand = cgen_->ToMemOperand(destination);
296 cgen_->ToHighMemOperand(source);
298 cgen_->ToHighMemOperand(destination);
299 __ ldr(kSavedValueRegister, source_operand);
300 __ str(kSavedValueRegister, destination_operand);
301 __ ldr(kSavedValueRegister, source_high_operand);
302 __ str(kSavedValueRegister, destination_high_operand);
312 moves_[index].Eliminate();
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter NULL
#define SLOW_ASSERT(condition)
static Representation Smi()
#define ASSERT(condition)
#define kScratchDoubleReg