30 #if defined(V8_TARGET_ARCH_X64)
38 LGapResolver::LGapResolver(LCodeGen* owner)
39 : cgen_(owner), moves_(32, owner->zone()) {}
42 void LGapResolver::Resolve(LParallelMove* parallel_move) {
45 BuildInitialMoveList(parallel_move);
47 for (
int i = 0; i < moves_.length(); ++i) {
48 LMoveOperands move = moves_[i];
52 if (!move.IsEliminated() && !move.source()->IsConstantOperand()) {
58 for (
int i = 0; i < moves_.length(); ++i) {
59 if (!moves_[i].IsEliminated()) {
60 ASSERT(moves_[i].source()->IsConstantOperand());
69 void LGapResolver::BuildInitialMoveList(LParallelMove* parallel_move) {
74 const ZoneList<LMoveOperands>* moves = parallel_move->move_operands();
75 for (
int i = 0; i < moves->length(); ++i) {
76 LMoveOperands move = moves->at(i);
77 if (!move.IsRedundant()) moves_.Add(move, cgen_->zone());
83 void LGapResolver::PerformMove(
int index) {
91 ASSERT(!moves_[index].IsPending());
92 ASSERT(!moves_[index].IsRedundant());
98 LOperand* destination = moves_[index].destination();
99 moves_[index].set_destination(
NULL);
105 for (
int i = 0; i < moves_.length(); ++i) {
106 LMoveOperands other_move = moves_[i];
107 if (other_move.Blocks(destination) && !other_move.IsPending()) {
123 moves_[index].set_destination(destination);
127 if (moves_[index].source()->Equals(destination)) {
128 moves_[index].Eliminate();
135 for (
int i = 0; i < moves_.length(); ++i) {
136 LMoveOperands other_move = moves_[i];
137 if (other_move.Blocks(destination)) {
138 ASSERT(other_move.IsPending());
149 void LGapResolver::Verify() {
150 #ifdef ENABLE_SLOW_ASSERTS
152 for (
int i = 0; i < moves_.length(); ++i) {
153 LOperand* destination = moves_[i].destination();
154 for (
int j = i + 1; j < moves_.length(); ++j) {
155 SLOW_ASSERT(!destination->Equals(moves_[j].destination()));
162 #define __ ACCESS_MASM(cgen_->masm())
165 void LGapResolver::EmitMove(
int index) {
166 LOperand* source = moves_[index].source();
167 LOperand* destination = moves_[index].destination();
171 if (source->IsRegister()) {
172 Register src = cgen_->ToRegister(source);
173 if (destination->IsRegister()) {
174 Register dst = cgen_->ToRegister(destination);
177 ASSERT(destination->IsStackSlot());
178 Operand dst = cgen_->ToOperand(destination);
182 }
else if (source->IsStackSlot()) {
183 Operand src = cgen_->ToOperand(source);
184 if (destination->IsRegister()) {
185 Register dst = cgen_->ToRegister(destination);
188 ASSERT(destination->IsStackSlot());
189 Operand dst = cgen_->ToOperand(destination);
194 }
else if (source->IsConstantOperand()) {
196 if (destination->IsRegister()) {
197 Register dst = cgen_->ToRegister(destination);
198 if (cgen_->IsInteger32Constant(constant_source)) {
199 __ movl(dst, Immediate(cgen_->ToInteger32(constant_source)));
201 __ LoadObject(dst, cgen_->ToHandle(constant_source));
204 ASSERT(destination->IsStackSlot());
205 Operand dst = cgen_->ToOperand(destination);
206 if (cgen_->IsInteger32Constant(constant_source)) {
209 __ movq(dst, Immediate(cgen_->ToInteger32(constant_source)));
216 }
else if (source->IsDoubleRegister()) {
217 XMMRegister src = cgen_->ToDoubleRegister(source);
218 if (destination->IsDoubleRegister()) {
219 __ movaps(cgen_->ToDoubleRegister(destination), src);
221 ASSERT(destination->IsDoubleStackSlot());
222 __ movsd(cgen_->ToOperand(destination), src);
224 }
else if (source->IsDoubleStackSlot()) {
225 Operand src = cgen_->ToOperand(source);
226 if (destination->IsDoubleRegister()) {
227 __ movsd(cgen_->ToDoubleRegister(destination), src);
229 ASSERT(destination->IsDoubleStackSlot());
231 __ movsd(cgen_->ToOperand(destination),
xmm0);
237 moves_[index].Eliminate();
241 void LGapResolver::EmitSwap(
int index) {
242 LOperand* source = moves_[index].source();
243 LOperand* destination = moves_[index].destination();
247 if (source->IsRegister() && destination->IsRegister()) {
249 Register src = cgen_->ToRegister(source);
250 Register dst = cgen_->ToRegister(destination);
253 }
else if ((source->IsRegister() && destination->IsStackSlot()) ||
254 (source->IsStackSlot() && destination->IsRegister())) {
257 cgen_->ToRegister(source->IsRegister() ? source : destination);
259 cgen_->ToOperand(source->IsRegister() ? destination : source);
264 }
else if ((source->IsStackSlot() && destination->IsStackSlot()) ||
265 (source->IsDoubleStackSlot() && destination->IsDoubleStackSlot())) {
267 Operand src = cgen_->ToOperand(source);
268 Operand dst = cgen_->ToOperand(destination);
274 }
else if (source->IsDoubleRegister() && destination->IsDoubleRegister()) {
276 XMMRegister source_reg = cgen_->ToDoubleRegister(source);
277 XMMRegister destination_reg = cgen_->ToDoubleRegister(destination);
278 __ movaps(
xmm0, source_reg);
279 __ movaps(source_reg, destination_reg);
280 __ movaps(destination_reg,
xmm0);
282 }
else if (source->IsDoubleRegister() || destination->IsDoubleRegister()) {
284 ASSERT((source->IsDoubleRegister() && destination->IsDoubleStackSlot()) ||
285 (source->IsDoubleStackSlot() && destination->IsDoubleRegister()));
286 XMMRegister reg = cgen_->ToDoubleRegister(source->IsDoubleRegister()
289 LOperand* other = source->IsDoubleRegister() ? destination : source;
290 ASSERT(other->IsDoubleStackSlot());
291 Operand other_operand = cgen_->ToOperand(other);
292 __ movsd(
xmm0, other_operand);
293 __ movsd(other_operand, reg);
303 moves_[index].Eliminate();
308 for (
int i = 0; i < moves_.length(); ++i) {
309 LMoveOperands other_move = moves_[i];
310 if (other_move.Blocks(source)) {
311 moves_[i].set_source(destination);
312 }
else if (other_move.Blocks(destination)) {
313 moves_[i].set_source(source);
322 #endif // V8_TARGET_ARCH_X64
#define SLOW_ASSERT(condition)
#define ASSERT(condition)
static LConstantOperand * cast(LOperand *op)
const Register kScratchRegister
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination use dead code elimination trace on stack replacement optimize closures cache optimized code for closures functions with arguments object loop weight for representation inference allow uint32 values on optimize frames if they are used only in safe operations track parallel recompilation enable all profiler experiments number of stack frames inspected by the profiler call recompile stub directly when self optimizing trigger profiler ticks based on counting instead of timing weight back edges by jump distance for interrupt triggering percentage of ICs that must have type info to allow optimization watch_ic_patching retry_self_opt interrupt_at_exit extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of SAHF instruction if enable use of VFP3 instructions if available this implies enabling ARMv7 and VFP2 enable use of VFP2 instructions if available enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of MIPS FPU instructions if NULL