30 #if V8_TARGET_ARCH_X64
38 LGapResolver::LGapResolver(LCodeGen* owner)
39 : cgen_(owner), moves_(32, owner->zone()) {}
42 void LGapResolver::Resolve(LParallelMove* parallel_move) {
45 BuildInitialMoveList(parallel_move);
47 for (
int i = 0; i < moves_.length(); ++i) {
48 LMoveOperands move = moves_[i];
52 if (!move.IsEliminated() && !move.source()->IsConstantOperand()) {
58 for (
int i = 0; i < moves_.length(); ++i) {
59 if (!moves_[i].IsEliminated()) {
60 ASSERT(moves_[i].source()->IsConstantOperand());
69 void LGapResolver::BuildInitialMoveList(LParallelMove* parallel_move) {
74 const ZoneList<LMoveOperands>* moves = parallel_move->move_operands();
75 for (
int i = 0; i < moves->length(); ++i) {
76 LMoveOperands move = moves->at(i);
77 if (!move.IsRedundant()) moves_.Add(move, cgen_->zone());
83 void LGapResolver::PerformMove(
int index) {
91 ASSERT(!moves_[index].IsPending());
92 ASSERT(!moves_[index].IsRedundant());
98 LOperand* destination = moves_[index].destination();
99 moves_[index].set_destination(
NULL);
105 for (
int i = 0; i < moves_.length(); ++i) {
106 LMoveOperands other_move = moves_[i];
107 if (other_move.Blocks(destination) && !other_move.IsPending()) {
123 moves_[index].set_destination(destination);
127 if (moves_[index].source()->Equals(destination)) {
128 moves_[index].Eliminate();
135 for (
int i = 0; i < moves_.length(); ++i) {
136 LMoveOperands other_move = moves_[i];
137 if (other_move.Blocks(destination)) {
138 ASSERT(other_move.IsPending());
149 void LGapResolver::Verify() {
150 #ifdef ENABLE_SLOW_ASSERTS
152 for (
int i = 0; i < moves_.length(); ++i) {
153 LOperand* destination = moves_[i].destination();
154 for (
int j = i + 1; j < moves_.length(); ++j) {
155 SLOW_ASSERT(!destination->Equals(moves_[j].destination()));
162 #define __ ACCESS_MASM(cgen_->masm())
165 void LGapResolver::EmitMove(
int index) {
166 LOperand* source = moves_[index].source();
167 LOperand* destination = moves_[index].destination();
171 if (source->IsRegister()) {
172 Register src = cgen_->ToRegister(source);
173 if (destination->IsRegister()) {
174 Register dst = cgen_->ToRegister(destination);
177 ASSERT(destination->IsStackSlot());
178 Operand dst = cgen_->ToOperand(destination);
182 }
else if (source->IsStackSlot()) {
183 Operand src = cgen_->ToOperand(source);
184 if (destination->IsRegister()) {
185 Register dst = cgen_->ToRegister(destination);
188 ASSERT(destination->IsStackSlot());
189 Operand dst = cgen_->ToOperand(destination);
194 }
else if (source->IsConstantOperand()) {
195 LConstantOperand* constant_source = LConstantOperand::cast(source);
196 if (destination->IsRegister()) {
197 Register dst = cgen_->ToRegister(destination);
198 if (cgen_->IsSmiConstant(constant_source)) {
199 __ Move(dst, cgen_->ToSmi(constant_source));
200 }
else if (cgen_->IsInteger32Constant(constant_source)) {
201 int32_t constant = cgen_->ToInteger32(constant_source);
204 if (cgen_->IsDehoistedKeyConstant(constant_source)) {
205 __ Set(dst, constant);
207 __ Set(dst, static_cast<uint32_t>(constant));
210 __ Move(dst, cgen_->ToHandle(constant_source));
212 }
else if (destination->IsDoubleRegister()) {
213 double v = cgen_->ToDouble(constant_source);
214 uint64_t int_val = BitCast<uint64_t, double>(v);
215 XMMRegister dst = cgen_->ToDoubleRegister(destination);
223 ASSERT(destination->IsStackSlot());
224 Operand dst = cgen_->ToOperand(destination);
225 if (cgen_->IsSmiConstant(constant_source)) {
226 __ Move(dst, cgen_->ToSmi(constant_source));
227 }
else if (cgen_->IsInteger32Constant(constant_source)) {
229 __ movp(dst, Immediate(cgen_->ToInteger32(constant_source)));
236 }
else if (source->IsDoubleRegister()) {
237 XMMRegister src = cgen_->ToDoubleRegister(source);
238 if (destination->IsDoubleRegister()) {
239 __ movaps(cgen_->ToDoubleRegister(destination), src);
241 ASSERT(destination->IsDoubleStackSlot());
242 __ movsd(cgen_->ToOperand(destination), src);
244 }
else if (source->IsDoubleStackSlot()) {
245 Operand src = cgen_->ToOperand(source);
246 if (destination->IsDoubleRegister()) {
247 __ movsd(cgen_->ToDoubleRegister(destination), src);
249 ASSERT(destination->IsDoubleStackSlot());
251 __ movsd(cgen_->ToOperand(destination),
xmm0);
257 moves_[index].Eliminate();
261 void LGapResolver::EmitSwap(
int index) {
262 LOperand* source = moves_[index].source();
263 LOperand* destination = moves_[index].destination();
267 if (source->IsRegister() && destination->IsRegister()) {
269 Register src = cgen_->ToRegister(source);
270 Register dst = cgen_->ToRegister(destination);
273 }
else if ((source->IsRegister() && destination->IsStackSlot()) ||
274 (source->IsStackSlot() && destination->IsRegister())) {
277 cgen_->ToRegister(source->IsRegister() ? source : destination);
279 cgen_->ToOperand(source->IsRegister() ? destination : source);
284 }
else if ((source->IsStackSlot() && destination->IsStackSlot()) ||
285 (source->IsDoubleStackSlot() && destination->IsDoubleStackSlot())) {
287 Operand src = cgen_->ToOperand(source);
288 Operand dst = cgen_->ToOperand(destination);
294 }
else if (source->IsDoubleRegister() && destination->IsDoubleRegister()) {
296 XMMRegister source_reg = cgen_->ToDoubleRegister(source);
297 XMMRegister destination_reg = cgen_->ToDoubleRegister(destination);
298 __ movaps(
xmm0, source_reg);
299 __ movaps(source_reg, destination_reg);
300 __ movaps(destination_reg,
xmm0);
302 }
else if (source->IsDoubleRegister() || destination->IsDoubleRegister()) {
304 ASSERT((source->IsDoubleRegister() && destination->IsDoubleStackSlot()) ||
305 (source->IsDoubleStackSlot() && destination->IsDoubleRegister()));
306 XMMRegister reg = cgen_->ToDoubleRegister(source->IsDoubleRegister()
309 LOperand* other = source->IsDoubleRegister() ? destination : source;
310 ASSERT(other->IsDoubleStackSlot());
311 Operand other_operand = cgen_->ToOperand(other);
312 __ movsd(
xmm0, other_operand);
313 __ movsd(other_operand, reg);
323 moves_[index].Eliminate();
328 for (
int i = 0; i < moves_.length(); ++i) {
329 LMoveOperands other_move = moves_[i];
330 if (other_move.Blocks(source)) {
331 moves_[i].set_source(destination);
332 }
else if (other_move.Blocks(destination)) {
333 moves_[i].set_source(source);
342 #endif // V8_TARGET_ARCH_X64
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter NULL
#define SLOW_ASSERT(condition)
#define ASSERT(condition)
const Register kScratchRegister