30 #if V8_TARGET_ARCH_IA32
38 LGapResolver::LGapResolver(LCodeGen* owner)
40 moves_(32, owner->zone()),
43 spilled_register_(-1) {}
46 void LGapResolver::Resolve(LParallelMove* parallel_move) {
49 BuildInitialMoveList(parallel_move);
51 for (
int i = 0; i < moves_.length(); ++i) {
52 LMoveOperands move = moves_[i];
56 if (!move.IsEliminated() && !move.source()->IsConstantOperand()) {
62 for (
int i = 0; i < moves_.length(); ++i) {
63 if (!moves_[i].IsEliminated()) {
64 ASSERT(moves_[i].source()->IsConstantOperand());
74 void LGapResolver::BuildInitialMoveList(LParallelMove* parallel_move) {
79 const ZoneList<LMoveOperands>* moves = parallel_move->move_operands();
80 for (
int i = 0; i < moves->length(); ++i) {
81 LMoveOperands move = moves->at(i);
82 if (!move.IsRedundant()) AddMove(move);
88 void LGapResolver::PerformMove(
int index) {
96 ASSERT(!moves_[index].IsPending());
97 ASSERT(!moves_[index].IsRedundant());
102 LOperand* destination = moves_[index].destination();
103 moves_[index].set_destination(
NULL);
109 for (
int i = 0; i < moves_.length(); ++i) {
110 LMoveOperands other_move = moves_[i];
111 if (other_move.Blocks(destination) && !other_move.IsPending()) {
127 moves_[index].set_destination(destination);
131 if (moves_[index].source()->Equals(destination)) {
139 for (
int i = 0; i < moves_.length(); ++i) {
140 LMoveOperands other_move = moves_[i];
141 if (other_move.Blocks(destination)) {
142 ASSERT(other_move.IsPending());
153 void LGapResolver::AddMove(LMoveOperands move) {
154 LOperand* source = move.source();
155 if (source->IsRegister()) ++source_uses_[source->index()];
157 LOperand* destination = move.destination();
158 if (destination->IsRegister()) ++destination_uses_[destination->index()];
160 moves_.Add(move, cgen_->zone());
164 void LGapResolver::RemoveMove(
int index) {
165 LOperand* source = moves_[index].source();
166 if (source->IsRegister()) {
167 --source_uses_[source->index()];
168 ASSERT(source_uses_[source->index()] >= 0);
171 LOperand* destination = moves_[index].destination();
172 if (destination->IsRegister()) {
173 --destination_uses_[destination->index()];
174 ASSERT(destination_uses_[destination->index()] >= 0);
177 moves_[index].Eliminate();
181 int LGapResolver::CountSourceUses(LOperand* operand) {
183 for (
int i = 0; i < moves_.length(); ++i) {
184 if (!moves_[i].IsEliminated() && moves_[i].source()->Equals(operand)) {
192 Register LGapResolver::GetFreeRegisterNot(Register reg) {
195 if (source_uses_[i] == 0 && destination_uses_[i] > 0 && i != skip_index) {
203 bool LGapResolver::HasBeenReset() {
204 if (!moves_.is_empty())
return false;
205 if (spilled_register_ >= 0)
return false;
208 if (source_uses_[i] != 0)
return false;
209 if (destination_uses_[i] != 0)
return false;
215 void LGapResolver::Verify() {
216 #ifdef ENABLE_SLOW_ASSERTS
218 for (
int i = 0; i < moves_.length(); ++i) {
219 LOperand* destination = moves_[i].destination();
220 for (
int j = i + 1; j < moves_.length(); ++j) {
221 SLOW_ASSERT(!destination->Equals(moves_[j].destination()));
228 #define __ ACCESS_MASM(cgen_->masm())
230 void LGapResolver::Finish() {
231 if (spilled_register_ >= 0) {
233 spilled_register_ = -1;
239 void LGapResolver::EnsureRestored(LOperand* operand) {
240 if (operand->IsRegister() && operand->index() == spilled_register_) {
242 spilled_register_ = -1;
247 Register LGapResolver::EnsureTempRegister() {
249 if (spilled_register_ >= 0) {
254 Register free = GetFreeRegisterNot(
no_reg);
255 if (!free.is(
no_reg))
return free;
260 if (source_uses_[i] == 0 && destination_uses_[i] == 0) {
263 spilled_register_ = i;
271 spilled_register_ = 0;
276 void LGapResolver::EmitMove(
int index) {
277 LOperand* source = moves_[index].source();
278 LOperand* destination = moves_[index].destination();
279 EnsureRestored(source);
280 EnsureRestored(destination);
284 if (source->IsRegister()) {
285 ASSERT(destination->IsRegister() || destination->IsStackSlot());
286 Register src = cgen_->ToRegister(source);
287 Operand dst = cgen_->ToOperand(destination);
290 }
else if (source->IsStackSlot()) {
291 ASSERT(destination->IsRegister() || destination->IsStackSlot());
292 Operand src = cgen_->ToOperand(source);
293 if (destination->IsRegister()) {
294 Register dst = cgen_->ToRegister(destination);
299 Register tmp = EnsureTempRegister();
300 Operand dst = cgen_->ToOperand(destination);
305 }
else if (source->IsConstantOperand()) {
306 LConstantOperand* constant_source = LConstantOperand::cast(source);
307 if (destination->IsRegister()) {
308 Register dst = cgen_->ToRegister(destination);
309 Representation r = cgen_->IsSmi(constant_source)
311 if (cgen_->IsInteger32(constant_source)) {
312 __ Move(dst, cgen_->ToImmediate(constant_source, r));
314 __ LoadObject(dst, cgen_->ToHandle(constant_source));
316 }
else if (destination->IsDoubleRegister()) {
317 double v = cgen_->ToDouble(constant_source);
318 uint64_t int_val = BitCast<uint64_t, double>(v);
322 CpuFeatureScope scope(cgen_->masm(),
SSE2);
323 XMMRegister dst = cgen_->ToDoubleRegister(destination);
327 __ push(Immediate(upper));
328 __ push(Immediate(lower));
329 __ movsd(dst, Operand(
esp, 0));
333 __ push(Immediate(upper));
334 __ push(Immediate(lower));
335 X87Register dst = cgen_->ToX87Register(destination);
340 ASSERT(destination->IsStackSlot());
341 Operand dst = cgen_->ToOperand(destination);
342 Representation r = cgen_->IsSmi(constant_source)
344 if (cgen_->IsInteger32(constant_source)) {
345 __ Move(dst, cgen_->ToImmediate(constant_source, r));
347 Register tmp = EnsureTempRegister();
348 __ LoadObject(tmp, cgen_->ToHandle(constant_source));
353 }
else if (source->IsDoubleRegister()) {
355 CpuFeatureScope scope(cgen_->masm(),
SSE2);
356 XMMRegister src = cgen_->ToDoubleRegister(source);
357 if (destination->IsDoubleRegister()) {
358 XMMRegister dst = cgen_->ToDoubleRegister(destination);
361 ASSERT(destination->IsDoubleStackSlot());
362 Operand dst = cgen_->ToOperand(destination);
368 ASSERT(destination->IsDoubleStackSlot());
369 Operand dst = cgen_->ToOperand(destination);
370 X87Register src = cgen_->ToX87Register(source);
371 cgen_->X87Mov(dst, src);
373 }
else if (source->IsDoubleStackSlot()) {
375 CpuFeatureScope scope(cgen_->masm(),
SSE2);
376 ASSERT(destination->IsDoubleRegister() ||
377 destination->IsDoubleStackSlot());
378 Operand src = cgen_->ToOperand(source);
379 if (destination->IsDoubleRegister()) {
380 XMMRegister dst = cgen_->ToDoubleRegister(destination);
384 Operand dst = cgen_->ToOperand(destination);
392 if (destination->IsDoubleStackSlot()) {
393 Register tmp = EnsureTempRegister();
394 Operand src0 = cgen_->ToOperand(source);
395 Operand src1 = cgen_->HighOperand(source);
396 Operand dst0 = cgen_->ToOperand(destination);
397 Operand dst1 = cgen_->HighOperand(destination);
403 Operand src = cgen_->ToOperand(source);
404 X87Register dst = cgen_->ToX87Register(destination);
405 cgen_->X87Mov(dst, src);
416 void LGapResolver::EmitSwap(
int index) {
417 LOperand* source = moves_[index].source();
418 LOperand* destination = moves_[index].destination();
419 EnsureRestored(source);
420 EnsureRestored(destination);
424 if (source->IsRegister() && destination->IsRegister()) {
426 Register src = cgen_->ToRegister(source);
427 Register dst = cgen_->ToRegister(destination);
430 }
else if ((source->IsRegister() && destination->IsStackSlot()) ||
431 (source->IsStackSlot() && destination->IsRegister())) {
435 Register tmp = GetFreeRegisterNot(
no_reg);
437 cgen_->ToRegister(source->IsRegister() ? source : destination);
439 cgen_->ToOperand(source->IsRegister() ? destination : source);
450 }
else if (source->IsStackSlot() && destination->IsStackSlot()) {
453 Register tmp0 = EnsureTempRegister();
454 Register tmp1 = GetFreeRegisterNot(tmp0);
455 Operand src = cgen_->ToOperand(source);
456 Operand dst = cgen_->ToOperand(destination);
470 }
else if (source->IsDoubleRegister() && destination->IsDoubleRegister()) {
471 CpuFeatureScope scope(cgen_->masm(),
SSE2);
474 XMMRegister src = cgen_->ToDoubleRegister(source);
475 XMMRegister dst = cgen_->ToDoubleRegister(destination);
479 }
else if (source->IsDoubleRegister() || destination->IsDoubleRegister()) {
480 CpuFeatureScope scope(cgen_->masm(),
SSE2);
483 ASSERT(source->IsDoubleStackSlot() || destination->IsDoubleStackSlot());
484 XMMRegister reg = cgen_->ToDoubleRegister(source->IsDoubleRegister()
488 cgen_->ToOperand(source->IsDoubleRegister() ? destination : source);
490 __ movsd(other, reg);
492 }
else if (source->IsDoubleStackSlot() && destination->IsDoubleStackSlot()) {
493 CpuFeatureScope scope(cgen_->masm(),
SSE2);
497 Register tmp = EnsureTempRegister();
498 Operand src0 = cgen_->ToOperand(source);
499 Operand src1 = cgen_->HighOperand(source);
500 Operand dst0 = cgen_->ToOperand(destination);
501 Operand dst1 = cgen_->HighOperand(destination);
521 for (
int i = 0; i < moves_.length(); ++i) {
522 LMoveOperands other_move = moves_[i];
523 if (other_move.Blocks(source)) {
524 moves_[i].set_source(destination);
525 }
else if (other_move.Blocks(destination)) {
526 moves_[i].set_source(source);
532 if (source->IsRegister() && destination->IsRegister()) {
533 int temp = source_uses_[source->index()];
534 source_uses_[source->index()] = source_uses_[destination->index()];
535 source_uses_[destination->index()] = temp;
536 }
else if (source->IsRegister()) {
539 source_uses_[source->index()] = CountSourceUses(source);
540 }
else if (destination->IsRegister()) {
541 source_uses_[destination->index()] = CountSourceUses(destination);
549 #endif // V8_TARGET_ARCH_IA32
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter NULL
#define SLOW_ASSERT(condition)
static Representation Smi()
static int NumAllocatableRegisters()
static bool IsSupported(CpuFeature f)
#define ASSERT(condition)
static Register FromAllocationIndex(int index)
static int ToAllocationIndex(Register reg)