v8  3.25.30(node0.11.13)
V8 is Google's open source JavaScript engine
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
lithium-allocator.cc
Go to the documentation of this file.
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 
28 #include "v8.h"
29 #include "lithium-allocator-inl.h"
30 
31 #include "hydrogen.h"
32 #include "string-stream.h"
33 
34 #if V8_TARGET_ARCH_IA32
35 #include "ia32/lithium-ia32.h"
36 #elif V8_TARGET_ARCH_X64
37 #include "x64/lithium-x64.h"
38 #elif V8_TARGET_ARCH_ARM64
39 #include "arm64/lithium-arm64.h"
40 #elif V8_TARGET_ARCH_ARM
41 #include "arm/lithium-arm.h"
42 #elif V8_TARGET_ARCH_MIPS
43 #include "mips/lithium-mips.h"
44 #else
45 #error "Unknown architecture."
46 #endif
47 
48 namespace v8 {
49 namespace internal {
50 
51 static inline LifetimePosition Min(LifetimePosition a, LifetimePosition b) {
52  return a.Value() < b.Value() ? a : b;
53 }
54 
55 
56 static inline LifetimePosition Max(LifetimePosition a, LifetimePosition b) {
57  return a.Value() > b.Value() ? a : b;
58 }
59 
60 
62  LOperand* operand,
63  LOperand* hint)
64  : operand_(operand),
65  hint_(hint),
66  pos_(pos),
67  next_(NULL),
68  requires_reg_(false),
69  register_beneficial_(true) {
70  if (operand_ != NULL && operand_->IsUnallocated()) {
71  LUnallocated* unalloc = LUnallocated::cast(operand_);
72  requires_reg_ = unalloc->HasRegisterPolicy();
73  register_beneficial_ = !unalloc->HasAnyPolicy();
74  }
75  ASSERT(pos_.IsValid());
76 }
77 
78 
79 bool UsePosition::HasHint() const {
80  return hint_ != NULL && !hint_->IsUnallocated();
81 }
82 
83 
85  return requires_reg_;
86 }
87 
88 
90  return register_beneficial_;
91 }
92 
93 
95  ASSERT(Contains(pos) && pos.Value() != start().Value());
96  UseInterval* after = new(zone) UseInterval(pos, end_);
97  after->next_ = next_;
98  next_ = after;
99  end_ = pos;
100 }
101 
102 
103 #ifdef DEBUG
104 
105 
106 void LiveRange::Verify() const {
107  UsePosition* cur = first_pos_;
108  while (cur != NULL) {
109  ASSERT(Start().Value() <= cur->pos().Value() &&
110  cur->pos().Value() <= End().Value());
111  cur = cur->next();
112  }
113 }
114 
115 
116 bool LiveRange::HasOverlap(UseInterval* target) const {
117  UseInterval* current_interval = first_interval_;
118  while (current_interval != NULL) {
119  // Intervals overlap if the start of one is contained in the other.
120  if (current_interval->Contains(target->start()) ||
121  target->Contains(current_interval->start())) {
122  return true;
123  }
124  current_interval = current_interval->next();
125  }
126  return false;
127 }
128 
129 
130 #endif
131 
132 
134  : id_(id),
135  spilled_(false),
136  kind_(UNALLOCATED_REGISTERS),
137  assigned_register_(kInvalidAssignment),
138  last_interval_(NULL),
139  first_interval_(NULL),
140  first_pos_(NULL),
141  parent_(NULL),
142  next_(NULL),
143  current_interval_(NULL),
144  last_processed_use_(NULL),
145  current_hint_operand_(NULL),
146  spill_operand_(new(zone) LOperand()),
147  spill_start_index_(kMaxInt) { }
148 
149 
152  assigned_register_ = reg;
153  ConvertOperands(zone);
154 }
155 
156 
158  ASSERT(!IsSpilled());
160  spilled_ = true;
161  assigned_register_ = kInvalidAssignment;
162  ConvertOperands(zone);
163 }
164 
165 
167  ASSERT(spill_operand_ != NULL);
168  return !spill_operand_->IsIgnored();
169 }
170 
171 
173  ASSERT(!operand->IsUnallocated());
174  ASSERT(spill_operand_ != NULL);
175  ASSERT(spill_operand_->IsIgnored());
176  spill_operand_->ConvertTo(operand->kind(), operand->index());
177 }
178 
179 
181  UsePosition* use_pos = last_processed_use_;
182  if (use_pos == NULL) use_pos = first_pos();
183  while (use_pos != NULL && use_pos->pos().Value() < start.Value()) {
184  use_pos = use_pos->next();
185  }
186  last_processed_use_ = use_pos;
187  return use_pos;
188 }
189 
190 
192  LifetimePosition start) {
193  UsePosition* pos = NextUsePosition(start);
194  while (pos != NULL && !pos->RegisterIsBeneficial()) {
195  pos = pos->next();
196  }
197  return pos;
198 }
199 
200 
202  LifetimePosition start) {
203  UsePosition* pos = first_pos();
204  UsePosition* prev = NULL;
205  while (pos != NULL && pos->pos().Value() < start.Value()) {
206  if (pos->RegisterIsBeneficial()) prev = pos;
207  pos = pos->next();
208  }
209  return prev;
210 }
211 
212 
214  UsePosition* pos = NextUsePosition(start);
215  while (pos != NULL && !pos->RequiresRegister()) {
216  pos = pos->next();
217  }
218  return pos;
219 }
220 
221 
223  // We cannot spill a live range that has a use requiring a register
224  // at the current or the immediate next position.
225  UsePosition* use_pos = NextRegisterPosition(pos);
226  if (use_pos == NULL) return true;
227  return
228  use_pos->pos().Value() > pos.NextInstruction().InstructionEnd().Value();
229 }
230 
231 
233  LOperand* op = NULL;
234  if (HasRegisterAssigned()) {
235  ASSERT(!IsSpilled());
236  switch (Kind()) {
237  case GENERAL_REGISTERS:
238  op = LRegister::Create(assigned_register(), zone);
239  break;
240  case DOUBLE_REGISTERS:
241  op = LDoubleRegister::Create(assigned_register(), zone);
242  break;
243  default:
244  UNREACHABLE();
245  }
246  } else if (IsSpilled()) {
248  op = TopLevel()->GetSpillOperand();
249  ASSERT(!op->IsUnallocated());
250  } else {
251  LUnallocated* unalloc = new(zone) LUnallocated(LUnallocated::NONE);
252  unalloc->set_virtual_register(id_);
253  op = unalloc;
254  }
255  return op;
256 }
257 
258 
259 UseInterval* LiveRange::FirstSearchIntervalForPosition(
260  LifetimePosition position) const {
261  if (current_interval_ == NULL) return first_interval_;
262  if (current_interval_->start().Value() > position.Value()) {
263  current_interval_ = NULL;
264  return first_interval_;
265  }
266  return current_interval_;
267 }
268 
269 
270 void LiveRange::AdvanceLastProcessedMarker(
271  UseInterval* to_start_of, LifetimePosition but_not_past) const {
272  if (to_start_of == NULL) return;
273  if (to_start_of->start().Value() > but_not_past.Value()) return;
274  LifetimePosition start =
275  current_interval_ == NULL ? LifetimePosition::Invalid()
276  : current_interval_->start();
277  if (to_start_of->start().Value() > start.Value()) {
278  current_interval_ = to_start_of;
279  }
280 }
281 
282 
284  LiveRange* result,
285  Zone* zone) {
286  ASSERT(Start().Value() < position.Value());
287  ASSERT(result->IsEmpty());
288  // Find the last interval that ends before the position. If the
289  // position is contained in one of the intervals in the chain, we
290  // split that interval and use the first part.
291  UseInterval* current = FirstSearchIntervalForPosition(position);
292 
293  // If the split position coincides with the beginning of a use interval
294  // we need to split use positons in a special way.
295  bool split_at_start = false;
296 
297  if (current->start().Value() == position.Value()) {
298  // When splitting at start we need to locate the previous use interval.
299  current = first_interval_;
300  }
301 
302  while (current != NULL) {
303  if (current->Contains(position)) {
304  current->SplitAt(position, zone);
305  break;
306  }
307  UseInterval* next = current->next();
308  if (next->start().Value() >= position.Value()) {
309  split_at_start = (next->start().Value() == position.Value());
310  break;
311  }
312  current = next;
313  }
314 
315  // Partition original use intervals to the two live ranges.
316  UseInterval* before = current;
317  UseInterval* after = before->next();
318  result->last_interval_ = (last_interval_ == before)
319  ? after // Only interval in the range after split.
320  : last_interval_; // Last interval of the original range.
321  result->first_interval_ = after;
322  last_interval_ = before;
323 
324  // Find the last use position before the split and the first use
325  // position after it.
326  UsePosition* use_after = first_pos_;
327  UsePosition* use_before = NULL;
328  if (split_at_start) {
329  // The split position coincides with the beginning of a use interval (the
330  // end of a lifetime hole). Use at this position should be attributed to
331  // the split child because split child owns use interval covering it.
332  while (use_after != NULL && use_after->pos().Value() < position.Value()) {
333  use_before = use_after;
334  use_after = use_after->next();
335  }
336  } else {
337  while (use_after != NULL && use_after->pos().Value() <= position.Value()) {
338  use_before = use_after;
339  use_after = use_after->next();
340  }
341  }
342 
343  // Partition original use positions to the two live ranges.
344  if (use_before != NULL) {
345  use_before->next_ = NULL;
346  } else {
347  first_pos_ = NULL;
348  }
349  result->first_pos_ = use_after;
350 
351  // Discard cached iteration state. It might be pointing
352  // to the use that no longer belongs to this live range.
353  last_processed_use_ = NULL;
354  current_interval_ = NULL;
355 
356  // Link the new live range in the chain before any of the other
357  // ranges linked from the range before the split.
358  result->parent_ = (parent_ == NULL) ? this : parent_;
359  result->kind_ = result->parent_->kind_;
360  result->next_ = next_;
361  next_ = result;
362 
363 #ifdef DEBUG
364  Verify();
365  result->Verify();
366 #endif
367 }
368 
369 
370 // This implements an ordering on live ranges so that they are ordered by their
371 // start positions. This is needed for the correctness of the register
372 // allocation algorithm. If two live ranges start at the same offset then there
373 // is a tie breaker based on where the value is first used. This part of the
374 // ordering is merely a heuristic.
376  LifetimePosition start = Start();
377  LifetimePosition other_start = other->Start();
378  if (start.Value() == other_start.Value()) {
379  UsePosition* pos = first_pos();
380  if (pos == NULL) return false;
381  UsePosition* other_pos = other->first_pos();
382  if (other_pos == NULL) return true;
383  return pos->pos().Value() < other_pos->pos().Value();
384  }
385  return start.Value() < other_start.Value();
386 }
387 
388 
390  LAllocator::TraceAlloc("Shorten live range %d to [%d\n", id_, start.Value());
391  ASSERT(first_interval_ != NULL);
392  ASSERT(first_interval_->start().Value() <= start.Value());
393  ASSERT(start.Value() < first_interval_->end().Value());
394  first_interval_->set_start(start);
395 }
396 
397 
399  LifetimePosition end,
400  Zone* zone) {
401  LAllocator::TraceAlloc("Ensure live range %d in interval [%d %d[\n",
402  id_,
403  start.Value(),
404  end.Value());
405  LifetimePosition new_end = end;
406  while (first_interval_ != NULL &&
407  first_interval_->start().Value() <= end.Value()) {
408  if (first_interval_->end().Value() > end.Value()) {
409  new_end = first_interval_->end();
410  }
411  first_interval_ = first_interval_->next();
412  }
413 
414  UseInterval* new_interval = new(zone) UseInterval(start, new_end);
415  new_interval->next_ = first_interval_;
416  first_interval_ = new_interval;
417  if (new_interval->next() == NULL) {
418  last_interval_ = new_interval;
419  }
420 }
421 
422 
424  LifetimePosition end,
425  Zone* zone) {
426  LAllocator::TraceAlloc("Add to live range %d interval [%d %d[\n",
427  id_,
428  start.Value(),
429  end.Value());
430  if (first_interval_ == NULL) {
431  UseInterval* interval = new(zone) UseInterval(start, end);
432  first_interval_ = interval;
433  last_interval_ = interval;
434  } else {
435  if (end.Value() == first_interval_->start().Value()) {
436  first_interval_->set_start(start);
437  } else if (end.Value() < first_interval_->start().Value()) {
438  UseInterval* interval = new(zone) UseInterval(start, end);
439  interval->set_next(first_interval_);
440  first_interval_ = interval;
441  } else {
442  // Order of instruction's processing (see ProcessInstructions) guarantees
443  // that each new use interval either precedes or intersects with
444  // last added interval.
445  ASSERT(start.Value() < first_interval_->end().Value());
446  first_interval_->start_ = Min(start, first_interval_->start_);
447  first_interval_->end_ = Max(end, first_interval_->end_);
448  }
449  }
450 }
451 
452 
454  LOperand* operand,
455  LOperand* hint,
456  Zone* zone) {
457  LAllocator::TraceAlloc("Add to live range %d use position %d\n",
458  id_,
459  pos.Value());
460  UsePosition* use_pos = new(zone) UsePosition(pos, operand, hint);
461  UsePosition* prev_hint = NULL;
462  UsePosition* prev = NULL;
463  UsePosition* current = first_pos_;
464  while (current != NULL && current->pos().Value() < pos.Value()) {
465  prev_hint = current->HasHint() ? current : prev_hint;
466  prev = current;
467  current = current->next();
468  }
469 
470  if (prev == NULL) {
471  use_pos->set_next(first_pos_);
472  first_pos_ = use_pos;
473  } else {
474  use_pos->next_ = prev->next_;
475  prev->next_ = use_pos;
476  }
477 
478  if (prev_hint == NULL && use_pos->HasHint()) {
479  current_hint_operand_ = hint;
480  }
481 }
482 
483 
484 void LiveRange::ConvertOperands(Zone* zone) {
485  LOperand* op = CreateAssignedOperand(zone);
486  UsePosition* use_pos = first_pos();
487  while (use_pos != NULL) {
488  ASSERT(Start().Value() <= use_pos->pos().Value() &&
489  use_pos->pos().Value() <= End().Value());
490 
491  if (use_pos->HasOperand()) {
492  ASSERT(op->IsRegister() || op->IsDoubleRegister() ||
493  !use_pos->RequiresRegister());
494  use_pos->operand()->ConvertTo(op->kind(), op->index());
495  }
496  use_pos = use_pos->next();
497  }
498 }
499 
500 
501 bool LiveRange::CanCover(LifetimePosition position) const {
502  if (IsEmpty()) return false;
503  return Start().Value() <= position.Value() &&
504  position.Value() < End().Value();
505 }
506 
507 
509  if (!CanCover(position)) return false;
510  UseInterval* start_search = FirstSearchIntervalForPosition(position);
511  for (UseInterval* interval = start_search;
512  interval != NULL;
513  interval = interval->next()) {
514  ASSERT(interval->next() == NULL ||
515  interval->next()->start().Value() >= interval->start().Value());
516  AdvanceLastProcessedMarker(interval, position);
517  if (interval->Contains(position)) return true;
518  if (interval->start().Value() > position.Value()) return false;
519  }
520  return false;
521 }
522 
523 
525  UseInterval* b = other->first_interval();
526  if (b == NULL) return LifetimePosition::Invalid();
527  LifetimePosition advance_last_processed_up_to = b->start();
528  UseInterval* a = FirstSearchIntervalForPosition(b->start());
529  while (a != NULL && b != NULL) {
530  if (a->start().Value() > other->End().Value()) break;
531  if (b->start().Value() > End().Value()) break;
532  LifetimePosition cur_intersection = a->Intersect(b);
533  if (cur_intersection.IsValid()) {
534  return cur_intersection;
535  }
536  if (a->start().Value() < b->start().Value()) {
537  a = a->next();
538  if (a == NULL || a->start().Value() > other->End().Value()) break;
539  AdvanceLastProcessedMarker(a, advance_last_processed_up_to);
540  } else {
541  b = b->next();
542  }
543  }
544  return LifetimePosition::Invalid();
545 }
546 
547 
548 LAllocator::LAllocator(int num_values, HGraph* graph)
549  : zone_(graph->isolate()),
550  chunk_(NULL),
551  live_in_sets_(graph->blocks()->length(), zone()),
552  live_ranges_(num_values * 2, zone()),
553  fixed_live_ranges_(NULL),
554  fixed_double_live_ranges_(NULL),
555  unhandled_live_ranges_(num_values * 2, zone()),
556  active_live_ranges_(8, zone()),
557  inactive_live_ranges_(8, zone()),
558  reusable_slots_(8, zone()),
559  next_virtual_register_(num_values),
560  first_artificial_register_(num_values),
561  mode_(UNALLOCATED_REGISTERS),
562  num_registers_(-1),
563  graph_(graph),
564  has_osr_entry_(false),
565  allocation_ok_(true) { }
566 
567 
568 void LAllocator::InitializeLivenessAnalysis() {
569  // Initialize the live_in sets for each block to NULL.
570  int block_count = graph_->blocks()->length();
571  live_in_sets_.Initialize(block_count, zone());
572  live_in_sets_.AddBlock(NULL, block_count, zone());
573 }
574 
575 
576 BitVector* LAllocator::ComputeLiveOut(HBasicBlock* block) {
577  // Compute live out for the given block, except not including backward
578  // successor edges.
579  BitVector* live_out = new(zone()) BitVector(next_virtual_register_, zone());
580 
581  // Process all successor blocks.
582  for (HSuccessorIterator it(block->end()); !it.Done(); it.Advance()) {
583  // Add values live on entry to the successor. Note the successor's
584  // live_in will not be computed yet for backwards edges.
585  HBasicBlock* successor = it.Current();
586  BitVector* live_in = live_in_sets_[successor->block_id()];
587  if (live_in != NULL) live_out->Union(*live_in);
588 
589  // All phi input operands corresponding to this successor edge are live
590  // out from this block.
591  int index = successor->PredecessorIndexOf(block);
592  const ZoneList<HPhi*>* phis = successor->phis();
593  for (int i = 0; i < phis->length(); ++i) {
594  HPhi* phi = phis->at(i);
595  if (!phi->OperandAt(index)->IsConstant()) {
596  live_out->Add(phi->OperandAt(index)->id());
597  }
598  }
599  }
600 
601  return live_out;
602 }
603 
604 
605 void LAllocator::AddInitialIntervals(HBasicBlock* block,
606  BitVector* live_out) {
607  // Add an interval that includes the entire block to the live range for
608  // each live_out value.
609  LifetimePosition start = LifetimePosition::FromInstructionIndex(
610  block->first_instruction_index());
611  LifetimePosition end = LifetimePosition::FromInstructionIndex(
612  block->last_instruction_index()).NextInstruction();
613  BitVector::Iterator iterator(live_out);
614  while (!iterator.Done()) {
615  int operand_index = iterator.Current();
616  LiveRange* range = LiveRangeFor(operand_index);
617  range->AddUseInterval(start, end, zone());
618  iterator.Advance();
619  }
620 }
621 
622 
623 int LAllocator::FixedDoubleLiveRangeID(int index) {
624  return -index - 1 - Register::kMaxNumAllocatableRegisters;
625 }
626 
627 
628 LOperand* LAllocator::AllocateFixed(LUnallocated* operand,
629  int pos,
630  bool is_tagged) {
631  TraceAlloc("Allocating fixed reg for op %d\n", operand->virtual_register());
632  ASSERT(operand->HasFixedPolicy());
633  if (operand->HasFixedSlotPolicy()) {
634  operand->ConvertTo(LOperand::STACK_SLOT, operand->fixed_slot_index());
635  } else if (operand->HasFixedRegisterPolicy()) {
636  int reg_index = operand->fixed_register_index();
637  operand->ConvertTo(LOperand::REGISTER, reg_index);
638  } else if (operand->HasFixedDoubleRegisterPolicy()) {
639  int reg_index = operand->fixed_register_index();
640  operand->ConvertTo(LOperand::DOUBLE_REGISTER, reg_index);
641  } else {
642  UNREACHABLE();
643  }
644  if (is_tagged) {
645  TraceAlloc("Fixed reg is tagged at %d\n", pos);
646  LInstruction* instr = InstructionAt(pos);
647  if (instr->HasPointerMap()) {
648  instr->pointer_map()->RecordPointer(operand, chunk()->zone());
649  }
650  }
651  return operand;
652 }
653 
654 
655 LiveRange* LAllocator::FixedLiveRangeFor(int index) {
657  LiveRange* result = fixed_live_ranges_[index];
658  if (result == NULL) {
659  result = new(zone()) LiveRange(FixedLiveRangeID(index), chunk()->zone());
660  ASSERT(result->IsFixed());
661  result->kind_ = GENERAL_REGISTERS;
662  SetLiveRangeAssignedRegister(result, index);
663  fixed_live_ranges_[index] = result;
664  }
665  return result;
666 }
667 
668 
669 LiveRange* LAllocator::FixedDoubleLiveRangeFor(int index) {
671  LiveRange* result = fixed_double_live_ranges_[index];
672  if (result == NULL) {
673  result = new(zone()) LiveRange(FixedDoubleLiveRangeID(index),
674  chunk()->zone());
675  ASSERT(result->IsFixed());
676  result->kind_ = DOUBLE_REGISTERS;
677  SetLiveRangeAssignedRegister(result, index);
678  fixed_double_live_ranges_[index] = result;
679  }
680  return result;
681 }
682 
683 
684 LiveRange* LAllocator::LiveRangeFor(int index) {
685  if (index >= live_ranges_.length()) {
686  live_ranges_.AddBlock(NULL, index - live_ranges_.length() + 1, zone());
687  }
688  LiveRange* result = live_ranges_[index];
689  if (result == NULL) {
690  result = new(zone()) LiveRange(index, chunk()->zone());
691  live_ranges_[index] = result;
692  }
693  return result;
694 }
695 
696 
697 LGap* LAllocator::GetLastGap(HBasicBlock* block) {
698  int last_instruction = block->last_instruction_index();
699  int index = chunk_->NearestGapPos(last_instruction);
700  return GapAt(index);
701 }
702 
703 
704 HPhi* LAllocator::LookupPhi(LOperand* operand) const {
705  if (!operand->IsUnallocated()) return NULL;
706  int index = LUnallocated::cast(operand)->virtual_register();
707  HValue* instr = graph_->LookupValue(index);
708  if (instr != NULL && instr->IsPhi()) {
709  return HPhi::cast(instr);
710  }
711  return NULL;
712 }
713 
714 
715 LiveRange* LAllocator::LiveRangeFor(LOperand* operand) {
716  if (operand->IsUnallocated()) {
717  return LiveRangeFor(LUnallocated::cast(operand)->virtual_register());
718  } else if (operand->IsRegister()) {
719  return FixedLiveRangeFor(operand->index());
720  } else if (operand->IsDoubleRegister()) {
721  return FixedDoubleLiveRangeFor(operand->index());
722  } else {
723  return NULL;
724  }
725 }
726 
727 
728 void LAllocator::Define(LifetimePosition position,
729  LOperand* operand,
730  LOperand* hint) {
731  LiveRange* range = LiveRangeFor(operand);
732  if (range == NULL) return;
733 
734  if (range->IsEmpty() || range->Start().Value() > position.Value()) {
735  // Can happen if there is a definition without use.
736  range->AddUseInterval(position, position.NextInstruction(), zone());
737  range->AddUsePosition(position.NextInstruction(), NULL, NULL, zone());
738  } else {
739  range->ShortenTo(position);
740  }
741 
742  if (operand->IsUnallocated()) {
743  LUnallocated* unalloc_operand = LUnallocated::cast(operand);
744  range->AddUsePosition(position, unalloc_operand, hint, zone());
745  }
746 }
747 
748 
749 void LAllocator::Use(LifetimePosition block_start,
750  LifetimePosition position,
751  LOperand* operand,
752  LOperand* hint) {
753  LiveRange* range = LiveRangeFor(operand);
754  if (range == NULL) return;
755  if (operand->IsUnallocated()) {
756  LUnallocated* unalloc_operand = LUnallocated::cast(operand);
757  range->AddUsePosition(position, unalloc_operand, hint, zone());
758  }
759  range->AddUseInterval(block_start, position, zone());
760 }
761 
762 
763 void LAllocator::AddConstraintsGapMove(int index,
764  LOperand* from,
765  LOperand* to) {
766  LGap* gap = GapAt(index);
767  LParallelMove* move = gap->GetOrCreateParallelMove(LGap::START,
768  chunk()->zone());
769  if (from->IsUnallocated()) {
770  const ZoneList<LMoveOperands>* move_operands = move->move_operands();
771  for (int i = 0; i < move_operands->length(); ++i) {
772  LMoveOperands cur = move_operands->at(i);
773  LOperand* cur_to = cur.destination();
774  if (cur_to->IsUnallocated()) {
775  if (LUnallocated::cast(cur_to)->virtual_register() ==
777  move->AddMove(cur.source(), to, chunk()->zone());
778  return;
779  }
780  }
781  }
782  }
783  move->AddMove(from, to, chunk()->zone());
784 }
785 
786 
787 void LAllocator::MeetRegisterConstraints(HBasicBlock* block) {
788  int start = block->first_instruction_index();
789  int end = block->last_instruction_index();
790  if (start == -1) return;
791  for (int i = start; i <= end; ++i) {
792  if (IsGapAt(i)) {
793  LInstruction* instr = NULL;
794  LInstruction* prev_instr = NULL;
795  if (i < end) instr = InstructionAt(i + 1);
796  if (i > start) prev_instr = InstructionAt(i - 1);
797  MeetConstraintsBetween(prev_instr, instr, i);
798  if (!AllocationOk()) return;
799  }
800  }
801 }
802 
803 
804 void LAllocator::MeetConstraintsBetween(LInstruction* first,
805  LInstruction* second,
806  int gap_index) {
807  // Handle fixed temporaries.
808  if (first != NULL) {
809  for (TempIterator it(first); !it.Done(); it.Advance()) {
810  LUnallocated* temp = LUnallocated::cast(it.Current());
811  if (temp->HasFixedPolicy()) {
812  AllocateFixed(temp, gap_index - 1, false);
813  }
814  }
815  }
816 
817  // Handle fixed output operand.
818  if (first != NULL && first->Output() != NULL) {
819  LUnallocated* first_output = LUnallocated::cast(first->Output());
820  LiveRange* range = LiveRangeFor(first_output->virtual_register());
821  bool assigned = false;
822  if (first_output->HasFixedPolicy()) {
823  LUnallocated* output_copy = first_output->CopyUnconstrained(
824  chunk()->zone());
825  bool is_tagged = HasTaggedValue(first_output->virtual_register());
826  AllocateFixed(first_output, gap_index, is_tagged);
827 
828  // This value is produced on the stack, we never need to spill it.
829  if (first_output->IsStackSlot()) {
830  range->SetSpillOperand(first_output);
831  range->SetSpillStartIndex(gap_index - 1);
832  assigned = true;
833  }
834  chunk_->AddGapMove(gap_index, first_output, output_copy);
835  }
836 
837  if (!assigned) {
838  range->SetSpillStartIndex(gap_index);
839 
840  // This move to spill operand is not a real use. Liveness analysis
841  // and splitting of live ranges do not account for it.
842  // Thus it should be inserted to a lifetime position corresponding to
843  // the instruction end.
844  LGap* gap = GapAt(gap_index);
845  LParallelMove* move = gap->GetOrCreateParallelMove(LGap::BEFORE,
846  chunk()->zone());
847  move->AddMove(first_output, range->GetSpillOperand(),
848  chunk()->zone());
849  }
850  }
851 
852  // Handle fixed input operands of second instruction.
853  if (second != NULL) {
854  for (UseIterator it(second); !it.Done(); it.Advance()) {
855  LUnallocated* cur_input = LUnallocated::cast(it.Current());
856  if (cur_input->HasFixedPolicy()) {
857  LUnallocated* input_copy = cur_input->CopyUnconstrained(
858  chunk()->zone());
859  bool is_tagged = HasTaggedValue(cur_input->virtual_register());
860  AllocateFixed(cur_input, gap_index + 1, is_tagged);
861  AddConstraintsGapMove(gap_index, input_copy, cur_input);
862  } else if (cur_input->HasWritableRegisterPolicy()) {
863  // The live range of writable input registers always goes until the end
864  // of the instruction.
865  ASSERT(!cur_input->IsUsedAtStart());
866 
867  LUnallocated* input_copy = cur_input->CopyUnconstrained(
868  chunk()->zone());
869  int vreg = GetVirtualRegister();
870  if (!AllocationOk()) return;
871  cur_input->set_virtual_register(vreg);
872 
873  if (RequiredRegisterKind(input_copy->virtual_register()) ==
875  double_artificial_registers_.Add(
876  cur_input->virtual_register() - first_artificial_register_,
877  zone());
878  }
879 
880  AddConstraintsGapMove(gap_index, input_copy, cur_input);
881  }
882  }
883  }
884 
885  // Handle "output same as input" for second instruction.
886  if (second != NULL && second->Output() != NULL) {
887  LUnallocated* second_output = LUnallocated::cast(second->Output());
888  if (second_output->HasSameAsInputPolicy()) {
889  LUnallocated* cur_input = LUnallocated::cast(second->FirstInput());
890  int output_vreg = second_output->virtual_register();
891  int input_vreg = cur_input->virtual_register();
892 
893  LUnallocated* input_copy = cur_input->CopyUnconstrained(
894  chunk()->zone());
895  cur_input->set_virtual_register(second_output->virtual_register());
896  AddConstraintsGapMove(gap_index, input_copy, cur_input);
897 
898  if (HasTaggedValue(input_vreg) && !HasTaggedValue(output_vreg)) {
899  int index = gap_index + 1;
900  LInstruction* instr = InstructionAt(index);
901  if (instr->HasPointerMap()) {
902  instr->pointer_map()->RecordPointer(input_copy, chunk()->zone());
903  }
904  } else if (!HasTaggedValue(input_vreg) && HasTaggedValue(output_vreg)) {
905  // The input is assumed to immediately have a tagged representation,
906  // before the pointer map can be used. I.e. the pointer map at the
907  // instruction will include the output operand (whose value at the
908  // beginning of the instruction is equal to the input operand). If
909  // this is not desired, then the pointer map at this instruction needs
910  // to be adjusted manually.
911  }
912  }
913  }
914 }
915 
916 
917 void LAllocator::ProcessInstructions(HBasicBlock* block, BitVector* live) {
918  int block_start = block->first_instruction_index();
919  int index = block->last_instruction_index();
920 
921  LifetimePosition block_start_position =
923 
924  while (index >= block_start) {
925  LifetimePosition curr_position =
927 
928  if (IsGapAt(index)) {
929  // We have a gap at this position.
930  LGap* gap = GapAt(index);
931  LParallelMove* move = gap->GetOrCreateParallelMove(LGap::START,
932  chunk()->zone());
933  const ZoneList<LMoveOperands>* move_operands = move->move_operands();
934  for (int i = 0; i < move_operands->length(); ++i) {
935  LMoveOperands* cur = &move_operands->at(i);
936  if (cur->IsIgnored()) continue;
937  LOperand* from = cur->source();
938  LOperand* to = cur->destination();
939  HPhi* phi = LookupPhi(to);
940  LOperand* hint = to;
941  if (phi != NULL) {
942  // This is a phi resolving move.
943  if (!phi->block()->IsLoopHeader()) {
944  hint = LiveRangeFor(phi->id())->current_hint_operand();
945  }
946  } else {
947  if (to->IsUnallocated()) {
948  if (live->Contains(LUnallocated::cast(to)->virtual_register())) {
949  Define(curr_position, to, from);
950  live->Remove(LUnallocated::cast(to)->virtual_register());
951  } else {
952  cur->Eliminate();
953  continue;
954  }
955  } else {
956  Define(curr_position, to, from);
957  }
958  }
959  Use(block_start_position, curr_position, from, hint);
960  if (from->IsUnallocated()) {
961  live->Add(LUnallocated::cast(from)->virtual_register());
962  }
963  }
964  } else {
965  ASSERT(!IsGapAt(index));
966  LInstruction* instr = InstructionAt(index);
967 
968  if (instr != NULL) {
969  LOperand* output = instr->Output();
970  if (output != NULL) {
971  if (output->IsUnallocated()) {
972  live->Remove(LUnallocated::cast(output)->virtual_register());
973  }
974  Define(curr_position, output, NULL);
975  }
976 
977  if (instr->ClobbersRegisters()) {
978  for (int i = 0; i < Register::kMaxNumAllocatableRegisters; ++i) {
979  if (output == NULL || !output->IsRegister() ||
980  output->index() != i) {
981  LiveRange* range = FixedLiveRangeFor(i);
982  range->AddUseInterval(curr_position,
983  curr_position.InstructionEnd(),
984  zone());
985  }
986  }
987  }
988 
989  if (instr->ClobbersDoubleRegisters()) {
990  for (int i = 0; i < DoubleRegister::NumAllocatableRegisters(); ++i) {
991  if (output == NULL || !output->IsDoubleRegister() ||
992  output->index() != i) {
993  LiveRange* range = FixedDoubleLiveRangeFor(i);
994  range->AddUseInterval(curr_position,
995  curr_position.InstructionEnd(),
996  zone());
997  }
998  }
999  }
1000 
1001  for (UseIterator it(instr); !it.Done(); it.Advance()) {
1002  LOperand* input = it.Current();
1003 
1004  LifetimePosition use_pos;
1005  if (input->IsUnallocated() &&
1006  LUnallocated::cast(input)->IsUsedAtStart()) {
1007  use_pos = curr_position;
1008  } else {
1009  use_pos = curr_position.InstructionEnd();
1010  }
1011 
1012  Use(block_start_position, use_pos, input, NULL);
1013  if (input->IsUnallocated()) {
1014  live->Add(LUnallocated::cast(input)->virtual_register());
1015  }
1016  }
1017 
1018  for (TempIterator it(instr); !it.Done(); it.Advance()) {
1019  LOperand* temp = it.Current();
1020  if (instr->ClobbersTemps()) {
1021  if (temp->IsRegister()) continue;
1022  if (temp->IsUnallocated()) {
1023  LUnallocated* temp_unalloc = LUnallocated::cast(temp);
1024  if (temp_unalloc->HasFixedPolicy()) {
1025  continue;
1026  }
1027  }
1028  }
1029  Use(block_start_position, curr_position.InstructionEnd(), temp, NULL);
1030  Define(curr_position, temp, NULL);
1031  }
1032  }
1033  }
1034 
1035  index = index - 1;
1036  }
1037 }
1038 
1039 
1040 void LAllocator::ResolvePhis(HBasicBlock* block) {
1041  const ZoneList<HPhi*>* phis = block->phis();
1042  for (int i = 0; i < phis->length(); ++i) {
1043  HPhi* phi = phis->at(i);
1044  LUnallocated* phi_operand =
1045  new(chunk()->zone()) LUnallocated(LUnallocated::NONE);
1046  phi_operand->set_virtual_register(phi->id());
1047  for (int j = 0; j < phi->OperandCount(); ++j) {
1048  HValue* op = phi->OperandAt(j);
1049  LOperand* operand = NULL;
1050  if (op->IsConstant() && op->EmitAtUses()) {
1051  HConstant* constant = HConstant::cast(op);
1052  operand = chunk_->DefineConstantOperand(constant);
1053  } else {
1054  ASSERT(!op->EmitAtUses());
1055  LUnallocated* unalloc =
1056  new(chunk()->zone()) LUnallocated(LUnallocated::ANY);
1057  unalloc->set_virtual_register(op->id());
1058  operand = unalloc;
1059  }
1060  HBasicBlock* cur_block = block->predecessors()->at(j);
1061  // The gap move must be added without any special processing as in
1062  // the AddConstraintsGapMove.
1063  chunk_->AddGapMove(cur_block->last_instruction_index() - 1,
1064  operand,
1065  phi_operand);
1066 
1067  // We are going to insert a move before the branch instruction.
1068  // Some branch instructions (e.g. loops' back edges)
1069  // can potentially cause a GC so they have a pointer map.
1070  // By inserting a move we essentially create a copy of a
1071  // value which is invisible to PopulatePointerMaps(), because we store
1072  // it into a location different from the operand of a live range
1073  // covering a branch instruction.
1074  // Thus we need to manually record a pointer.
1075  LInstruction* branch =
1076  InstructionAt(cur_block->last_instruction_index());
1077  if (branch->HasPointerMap()) {
1078  if (phi->representation().IsTagged() && !phi->type().IsSmi()) {
1079  branch->pointer_map()->RecordPointer(phi_operand, chunk()->zone());
1080  } else if (!phi->representation().IsDouble()) {
1081  branch->pointer_map()->RecordUntagged(phi_operand, chunk()->zone());
1082  }
1083  }
1084  }
1085 
1086  LiveRange* live_range = LiveRangeFor(phi->id());
1087  LLabel* label = chunk_->GetLabel(phi->block()->block_id());
1088  label->GetOrCreateParallelMove(LGap::START, chunk()->zone())->
1089  AddMove(phi_operand, live_range->GetSpillOperand(), chunk()->zone());
1090  live_range->SetSpillStartIndex(phi->block()->first_instruction_index());
1091  }
1092 }
1093 
1094 
1095 bool LAllocator::Allocate(LChunk* chunk) {
1096  ASSERT(chunk_ == NULL);
1097  chunk_ = static_cast<LPlatformChunk*>(chunk);
1098  assigned_registers_ =
1099  new(chunk->zone()) BitVector(Register::NumAllocatableRegisters(),
1100  chunk->zone());
1101  assigned_double_registers_ =
1102  new(chunk->zone()) BitVector(DoubleRegister::NumAllocatableRegisters(),
1103  chunk->zone());
1104  MeetRegisterConstraints();
1105  if (!AllocationOk()) return false;
1106  ResolvePhis();
1107  BuildLiveRanges();
1108  AllocateGeneralRegisters();
1109  if (!AllocationOk()) return false;
1110  AllocateDoubleRegisters();
1111  if (!AllocationOk()) return false;
1112  PopulatePointerMaps();
1113  ConnectRanges();
1114  ResolveControlFlow();
1115  return true;
1116 }
1117 
1118 
1119 void LAllocator::MeetRegisterConstraints() {
1120  LAllocatorPhase phase("L_Register constraints", this);
1121  first_artificial_register_ = next_virtual_register_;
1122  const ZoneList<HBasicBlock*>* blocks = graph_->blocks();
1123  for (int i = 0; i < blocks->length(); ++i) {
1124  HBasicBlock* block = blocks->at(i);
1125  MeetRegisterConstraints(block);
1126  if (!AllocationOk()) return;
1127  }
1128 }
1129 
1130 
1131 void LAllocator::ResolvePhis() {
1132  LAllocatorPhase phase("L_Resolve phis", this);
1133 
1134  // Process the blocks in reverse order.
1135  const ZoneList<HBasicBlock*>* blocks = graph_->blocks();
1136  for (int block_id = blocks->length() - 1; block_id >= 0; --block_id) {
1137  HBasicBlock* block = blocks->at(block_id);
1138  ResolvePhis(block);
1139  }
1140 }
1141 
1142 
1143 void LAllocator::ResolveControlFlow(LiveRange* range,
1144  HBasicBlock* block,
1145  HBasicBlock* pred) {
1146  LifetimePosition pred_end =
1147  LifetimePosition::FromInstructionIndex(pred->last_instruction_index());
1148  LifetimePosition cur_start =
1149  LifetimePosition::FromInstructionIndex(block->first_instruction_index());
1150  LiveRange* pred_cover = NULL;
1151  LiveRange* cur_cover = NULL;
1152  LiveRange* cur_range = range;
1153  while (cur_range != NULL && (cur_cover == NULL || pred_cover == NULL)) {
1154  if (cur_range->CanCover(cur_start)) {
1155  ASSERT(cur_cover == NULL);
1156  cur_cover = cur_range;
1157  }
1158  if (cur_range->CanCover(pred_end)) {
1159  ASSERT(pred_cover == NULL);
1160  pred_cover = cur_range;
1161  }
1162  cur_range = cur_range->next();
1163  }
1164 
1165  if (cur_cover->IsSpilled()) return;
1166  ASSERT(pred_cover != NULL && cur_cover != NULL);
1167  if (pred_cover != cur_cover) {
1168  LOperand* pred_op = pred_cover->CreateAssignedOperand(chunk()->zone());
1169  LOperand* cur_op = cur_cover->CreateAssignedOperand(chunk()->zone());
1170  if (!pred_op->Equals(cur_op)) {
1171  LGap* gap = NULL;
1172  if (block->predecessors()->length() == 1) {
1173  gap = GapAt(block->first_instruction_index());
1174  } else {
1175  ASSERT(pred->end()->SecondSuccessor() == NULL);
1176  gap = GetLastGap(pred);
1177 
1178  // We are going to insert a move before the branch instruction.
1179  // Some branch instructions (e.g. loops' back edges)
1180  // can potentially cause a GC so they have a pointer map.
1181  // By inserting a move we essentially create a copy of a
1182  // value which is invisible to PopulatePointerMaps(), because we store
1183  // it into a location different from the operand of a live range
1184  // covering a branch instruction.
1185  // Thus we need to manually record a pointer.
1186  LInstruction* branch = InstructionAt(pred->last_instruction_index());
1187  if (branch->HasPointerMap()) {
1188  if (HasTaggedValue(range->id())) {
1189  branch->pointer_map()->RecordPointer(cur_op, chunk()->zone());
1190  } else if (!cur_op->IsDoubleStackSlot() &&
1191  !cur_op->IsDoubleRegister()) {
1192  branch->pointer_map()->RemovePointer(cur_op);
1193  }
1194  }
1195  }
1196  gap->GetOrCreateParallelMove(
1197  LGap::START, chunk()->zone())->AddMove(pred_op, cur_op,
1198  chunk()->zone());
1199  }
1200  }
1201 }
1202 
1203 
1204 LParallelMove* LAllocator::GetConnectingParallelMove(LifetimePosition pos) {
1205  int index = pos.InstructionIndex();
1206  if (IsGapAt(index)) {
1207  LGap* gap = GapAt(index);
1208  return gap->GetOrCreateParallelMove(
1209  pos.IsInstructionStart() ? LGap::START : LGap::END, chunk()->zone());
1210  }
1211  int gap_pos = pos.IsInstructionStart() ? (index - 1) : (index + 1);
1212  return GapAt(gap_pos)->GetOrCreateParallelMove(
1213  (gap_pos < index) ? LGap::AFTER : LGap::BEFORE, chunk()->zone());
1214 }
1215 
1216 
1217 HBasicBlock* LAllocator::GetBlock(LifetimePosition pos) {
1218  LGap* gap = GapAt(chunk_->NearestGapPos(pos.InstructionIndex()));
1219  return gap->block();
1220 }
1221 
1222 
1223 void LAllocator::ConnectRanges() {
1224  LAllocatorPhase phase("L_Connect ranges", this);
1225  for (int i = 0; i < live_ranges()->length(); ++i) {
1226  LiveRange* first_range = live_ranges()->at(i);
1227  if (first_range == NULL || first_range->parent() != NULL) continue;
1228 
1229  LiveRange* second_range = first_range->next();
1230  while (second_range != NULL) {
1231  LifetimePosition pos = second_range->Start();
1232 
1233  if (!second_range->IsSpilled()) {
1234  // Add gap move if the two live ranges touch and there is no block
1235  // boundary.
1236  if (first_range->End().Value() == pos.Value()) {
1237  bool should_insert = true;
1238  if (IsBlockBoundary(pos)) {
1239  should_insert = CanEagerlyResolveControlFlow(GetBlock(pos));
1240  }
1241  if (should_insert) {
1242  LParallelMove* move = GetConnectingParallelMove(pos);
1243  LOperand* prev_operand = first_range->CreateAssignedOperand(
1244  chunk()->zone());
1245  LOperand* cur_operand = second_range->CreateAssignedOperand(
1246  chunk()->zone());
1247  move->AddMove(prev_operand, cur_operand,
1248  chunk()->zone());
1249  }
1250  }
1251  }
1252 
1253  first_range = second_range;
1254  second_range = second_range->next();
1255  }
1256  }
1257 }
1258 
1259 
1260 bool LAllocator::CanEagerlyResolveControlFlow(HBasicBlock* block) const {
1261  if (block->predecessors()->length() != 1) return false;
1262  return block->predecessors()->first()->block_id() == block->block_id() - 1;
1263 }
1264 
1265 
1266 void LAllocator::ResolveControlFlow() {
1267  LAllocatorPhase phase("L_Resolve control flow", this);
1268  const ZoneList<HBasicBlock*>* blocks = graph_->blocks();
1269  for (int block_id = 1; block_id < blocks->length(); ++block_id) {
1270  HBasicBlock* block = blocks->at(block_id);
1271  if (CanEagerlyResolveControlFlow(block)) continue;
1272  BitVector* live = live_in_sets_[block->block_id()];
1273  BitVector::Iterator iterator(live);
1274  while (!iterator.Done()) {
1275  int operand_index = iterator.Current();
1276  for (int i = 0; i < block->predecessors()->length(); ++i) {
1277  HBasicBlock* cur = block->predecessors()->at(i);
1278  LiveRange* cur_range = LiveRangeFor(operand_index);
1279  ResolveControlFlow(cur_range, block, cur);
1280  }
1281  iterator.Advance();
1282  }
1283  }
1284 }
1285 
1286 
1287 void LAllocator::BuildLiveRanges() {
1288  LAllocatorPhase phase("L_Build live ranges", this);
1289  InitializeLivenessAnalysis();
1290  // Process the blocks in reverse order.
1291  const ZoneList<HBasicBlock*>* blocks = graph_->blocks();
1292  for (int block_id = blocks->length() - 1; block_id >= 0; --block_id) {
1293  HBasicBlock* block = blocks->at(block_id);
1294  BitVector* live = ComputeLiveOut(block);
1295  // Initially consider all live_out values live for the entire block. We
1296  // will shorten these intervals if necessary.
1297  AddInitialIntervals(block, live);
1298 
1299  // Process the instructions in reverse order, generating and killing
1300  // live values.
1301  ProcessInstructions(block, live);
1302  // All phi output operands are killed by this block.
1303  const ZoneList<HPhi*>* phis = block->phis();
1304  for (int i = 0; i < phis->length(); ++i) {
1305  // The live range interval already ends at the first instruction of the
1306  // block.
1307  HPhi* phi = phis->at(i);
1308  live->Remove(phi->id());
1309 
1310  LOperand* hint = NULL;
1311  LOperand* phi_operand = NULL;
1312  LGap* gap = GetLastGap(phi->block()->predecessors()->at(0));
1313  LParallelMove* move = gap->GetOrCreateParallelMove(LGap::START,
1314  chunk()->zone());
1315  for (int j = 0; j < move->move_operands()->length(); ++j) {
1316  LOperand* to = move->move_operands()->at(j).destination();
1317  if (to->IsUnallocated() &&
1318  LUnallocated::cast(to)->virtual_register() == phi->id()) {
1319  hint = move->move_operands()->at(j).source();
1320  phi_operand = to;
1321  break;
1322  }
1323  }
1324  ASSERT(hint != NULL);
1325 
1326  LifetimePosition block_start = LifetimePosition::FromInstructionIndex(
1327  block->first_instruction_index());
1328  Define(block_start, phi_operand, hint);
1329  }
1330 
1331  // Now live is live_in for this block except not including values live
1332  // out on backward successor edges.
1333  live_in_sets_[block_id] = live;
1334 
1335  // If this block is a loop header go back and patch up the necessary
1336  // predecessor blocks.
1337  if (block->IsLoopHeader()) {
1338  // TODO(kmillikin): Need to be able to get the last block of the loop
1339  // in the loop information. Add a live range stretching from the first
1340  // loop instruction to the last for each value live on entry to the
1341  // header.
1342  HBasicBlock* back_edge = block->loop_information()->GetLastBackEdge();
1343  BitVector::Iterator iterator(live);
1344  LifetimePosition start = LifetimePosition::FromInstructionIndex(
1345  block->first_instruction_index());
1346  LifetimePosition end = LifetimePosition::FromInstructionIndex(
1347  back_edge->last_instruction_index()).NextInstruction();
1348  while (!iterator.Done()) {
1349  int operand_index = iterator.Current();
1350  LiveRange* range = LiveRangeFor(operand_index);
1351  range->EnsureInterval(start, end, zone());
1352  iterator.Advance();
1353  }
1354 
1355  for (int i = block->block_id() + 1; i <= back_edge->block_id(); ++i) {
1356  live_in_sets_[i]->Union(*live);
1357  }
1358  }
1359 
1360 #ifdef DEBUG
1361  if (block_id == 0) {
1362  BitVector::Iterator iterator(live);
1363  bool found = false;
1364  while (!iterator.Done()) {
1365  found = true;
1366  int operand_index = iterator.Current();
1367  if (chunk_->info()->IsStub()) {
1368  CodeStub::Major major_key = chunk_->info()->code_stub()->MajorKey();
1369  PrintF("Function: %s\n", CodeStub::MajorName(major_key, false));
1370  } else {
1371  ASSERT(chunk_->info()->IsOptimizing());
1372  AllowHandleDereference allow_deref;
1373  PrintF("Function: %s\n",
1374  chunk_->info()->function()->debug_name()->ToCString().get());
1375  }
1376  PrintF("Value %d used before first definition!\n", operand_index);
1377  LiveRange* range = LiveRangeFor(operand_index);
1378  PrintF("First use is at %d\n", range->first_pos()->pos().Value());
1379  iterator.Advance();
1380  }
1381  ASSERT(!found);
1382  }
1383 #endif
1384  }
1385 
1386  for (int i = 0; i < live_ranges_.length(); ++i) {
1387  if (live_ranges_[i] != NULL) {
1388  live_ranges_[i]->kind_ = RequiredRegisterKind(live_ranges_[i]->id());
1389  }
1390  }
1391 }
1392 
1393 
1394 bool LAllocator::SafePointsAreInOrder() const {
1395  const ZoneList<LPointerMap*>* pointer_maps = chunk_->pointer_maps();
1396  int safe_point = 0;
1397  for (int i = 0; i < pointer_maps->length(); ++i) {
1398  LPointerMap* map = pointer_maps->at(i);
1399  if (safe_point > map->lithium_position()) return false;
1400  safe_point = map->lithium_position();
1401  }
1402  return true;
1403 }
1404 
1405 
1406 void LAllocator::PopulatePointerMaps() {
1407  LAllocatorPhase phase("L_Populate pointer maps", this);
1408  const ZoneList<LPointerMap*>* pointer_maps = chunk_->pointer_maps();
1409 
1410  ASSERT(SafePointsAreInOrder());
1411 
1412  // Iterate over all safe point positions and record a pointer
1413  // for all spilled live ranges at this point.
1414  int first_safe_point_index = 0;
1415  int last_range_start = 0;
1416  for (int range_idx = 0; range_idx < live_ranges()->length(); ++range_idx) {
1417  LiveRange* range = live_ranges()->at(range_idx);
1418  if (range == NULL) continue;
1419  // Iterate over the first parts of multi-part live ranges.
1420  if (range->parent() != NULL) continue;
1421  // Skip non-pointer values.
1422  if (!HasTaggedValue(range->id())) continue;
1423  // Skip empty live ranges.
1424  if (range->IsEmpty()) continue;
1425 
1426  // Find the extent of the range and its children.
1427  int start = range->Start().InstructionIndex();
1428  int end = 0;
1429  for (LiveRange* cur = range; cur != NULL; cur = cur->next()) {
1430  LifetimePosition this_end = cur->End();
1431  if (this_end.InstructionIndex() > end) end = this_end.InstructionIndex();
1432  ASSERT(cur->Start().InstructionIndex() >= start);
1433  }
1434 
1435  // Most of the ranges are in order, but not all. Keep an eye on when
1436  // they step backwards and reset the first_safe_point_index so we don't
1437  // miss any safe points.
1438  if (start < last_range_start) {
1439  first_safe_point_index = 0;
1440  }
1441  last_range_start = start;
1442 
1443  // Step across all the safe points that are before the start of this range,
1444  // recording how far we step in order to save doing this for the next range.
1445  while (first_safe_point_index < pointer_maps->length()) {
1446  LPointerMap* map = pointer_maps->at(first_safe_point_index);
1447  int safe_point = map->lithium_position();
1448  if (safe_point >= start) break;
1449  first_safe_point_index++;
1450  }
1451 
1452  // Step through the safe points to see whether they are in the range.
1453  for (int safe_point_index = first_safe_point_index;
1454  safe_point_index < pointer_maps->length();
1455  ++safe_point_index) {
1456  LPointerMap* map = pointer_maps->at(safe_point_index);
1457  int safe_point = map->lithium_position();
1458 
1459  // The safe points are sorted so we can stop searching here.
1460  if (safe_point - 1 > end) break;
1461 
1462  // Advance to the next active range that covers the current
1463  // safe point position.
1464  LifetimePosition safe_point_pos =
1466  LiveRange* cur = range;
1467  while (cur != NULL && !cur->Covers(safe_point_pos)) {
1468  cur = cur->next();
1469  }
1470  if (cur == NULL) continue;
1471 
1472  // Check if the live range is spilled and the safe point is after
1473  // the spill position.
1474  if (range->HasAllocatedSpillOperand() &&
1475  safe_point >= range->spill_start_index()) {
1476  TraceAlloc("Pointer for range %d (spilled at %d) at safe point %d\n",
1477  range->id(), range->spill_start_index(), safe_point);
1478  map->RecordPointer(range->GetSpillOperand(), chunk()->zone());
1479  }
1480 
1481  if (!cur->IsSpilled()) {
1482  TraceAlloc("Pointer in register for range %d (start at %d) "
1483  "at safe point %d\n",
1484  cur->id(), cur->Start().Value(), safe_point);
1485  LOperand* operand = cur->CreateAssignedOperand(chunk()->zone());
1486  ASSERT(!operand->IsStackSlot());
1487  map->RecordPointer(operand, chunk()->zone());
1488  }
1489  }
1490  }
1491 }
1492 
1493 
1494 void LAllocator::AllocateGeneralRegisters() {
1495  LAllocatorPhase phase("L_Allocate general registers", this);
1496  num_registers_ = Register::NumAllocatableRegisters();
1497  mode_ = GENERAL_REGISTERS;
1498  AllocateRegisters();
1499 }
1500 
1501 
1502 void LAllocator::AllocateDoubleRegisters() {
1503  LAllocatorPhase phase("L_Allocate double registers", this);
1504  num_registers_ = DoubleRegister::NumAllocatableRegisters();
1505  mode_ = DOUBLE_REGISTERS;
1506  AllocateRegisters();
1507 }
1508 
1509 
1510 void LAllocator::AllocateRegisters() {
1511  ASSERT(unhandled_live_ranges_.is_empty());
1512 
1513  for (int i = 0; i < live_ranges_.length(); ++i) {
1514  if (live_ranges_[i] != NULL) {
1515  if (live_ranges_[i]->Kind() == mode_) {
1516  AddToUnhandledUnsorted(live_ranges_[i]);
1517  }
1518  }
1519  }
1520  SortUnhandled();
1521  ASSERT(UnhandledIsSorted());
1522 
1523  ASSERT(reusable_slots_.is_empty());
1524  ASSERT(active_live_ranges_.is_empty());
1525  ASSERT(inactive_live_ranges_.is_empty());
1526 
1527  if (mode_ == DOUBLE_REGISTERS) {
1528  for (int i = 0; i < DoubleRegister::NumAllocatableRegisters(); ++i) {
1529  LiveRange* current = fixed_double_live_ranges_.at(i);
1530  if (current != NULL) {
1531  AddToInactive(current);
1532  }
1533  }
1534  } else {
1535  ASSERT(mode_ == GENERAL_REGISTERS);
1536  for (int i = 0; i < fixed_live_ranges_.length(); ++i) {
1537  LiveRange* current = fixed_live_ranges_.at(i);
1538  if (current != NULL) {
1539  AddToInactive(current);
1540  }
1541  }
1542  }
1543 
1544  while (!unhandled_live_ranges_.is_empty()) {
1545  ASSERT(UnhandledIsSorted());
1546  LiveRange* current = unhandled_live_ranges_.RemoveLast();
1547  ASSERT(UnhandledIsSorted());
1548  LifetimePosition position = current->Start();
1549 #ifdef DEBUG
1550  allocation_finger_ = position;
1551 #endif
1552  TraceAlloc("Processing interval %d start=%d\n",
1553  current->id(),
1554  position.Value());
1555 
1556  if (current->HasAllocatedSpillOperand()) {
1557  TraceAlloc("Live range %d already has a spill operand\n", current->id());
1558  LifetimePosition next_pos = position;
1559  if (IsGapAt(next_pos.InstructionIndex())) {
1560  next_pos = next_pos.NextInstruction();
1561  }
1562  UsePosition* pos = current->NextUsePositionRegisterIsBeneficial(next_pos);
1563  // If the range already has a spill operand and it doesn't need a
1564  // register immediately, split it and spill the first part of the range.
1565  if (pos == NULL) {
1566  Spill(current);
1567  continue;
1568  } else if (pos->pos().Value() >
1569  current->Start().NextInstruction().Value()) {
1570  // Do not spill live range eagerly if use position that can benefit from
1571  // the register is too close to the start of live range.
1572  SpillBetween(current, current->Start(), pos->pos());
1573  if (!AllocationOk()) return;
1574  ASSERT(UnhandledIsSorted());
1575  continue;
1576  }
1577  }
1578 
1579  for (int i = 0; i < active_live_ranges_.length(); ++i) {
1580  LiveRange* cur_active = active_live_ranges_.at(i);
1581  if (cur_active->End().Value() <= position.Value()) {
1582  ActiveToHandled(cur_active);
1583  --i; // The live range was removed from the list of active live ranges.
1584  } else if (!cur_active->Covers(position)) {
1585  ActiveToInactive(cur_active);
1586  --i; // The live range was removed from the list of active live ranges.
1587  }
1588  }
1589 
1590  for (int i = 0; i < inactive_live_ranges_.length(); ++i) {
1591  LiveRange* cur_inactive = inactive_live_ranges_.at(i);
1592  if (cur_inactive->End().Value() <= position.Value()) {
1593  InactiveToHandled(cur_inactive);
1594  --i; // Live range was removed from the list of inactive live ranges.
1595  } else if (cur_inactive->Covers(position)) {
1596  InactiveToActive(cur_inactive);
1597  --i; // Live range was removed from the list of inactive live ranges.
1598  }
1599  }
1600 
1601  ASSERT(!current->HasRegisterAssigned() && !current->IsSpilled());
1602 
1603  bool result = TryAllocateFreeReg(current);
1604  if (!AllocationOk()) return;
1605 
1606  if (!result) AllocateBlockedReg(current);
1607  if (!AllocationOk()) return;
1608 
1609  if (current->HasRegisterAssigned()) {
1610  AddToActive(current);
1611  }
1612  }
1613 
1614  reusable_slots_.Rewind(0);
1615  active_live_ranges_.Rewind(0);
1616  inactive_live_ranges_.Rewind(0);
1617 }
1618 
1619 
1620 const char* LAllocator::RegisterName(int allocation_index) {
1621  if (mode_ == GENERAL_REGISTERS) {
1622  return Register::AllocationIndexToString(allocation_index);
1623  } else {
1624  return DoubleRegister::AllocationIndexToString(allocation_index);
1625  }
1626 }
1627 
1628 
1629 void LAllocator::TraceAlloc(const char* msg, ...) {
1630  if (FLAG_trace_alloc) {
1631  va_list arguments;
1632  va_start(arguments, msg);
1633  OS::VPrint(msg, arguments);
1634  va_end(arguments);
1635  }
1636 }
1637 
1638 
1639 bool LAllocator::HasTaggedValue(int virtual_register) const {
1640  HValue* value = graph_->LookupValue(virtual_register);
1641  if (value == NULL) return false;
1642  return value->representation().IsTagged() && !value->type().IsSmi();
1643 }
1644 
1645 
1646 RegisterKind LAllocator::RequiredRegisterKind(int virtual_register) const {
1647  if (virtual_register < first_artificial_register_) {
1648  HValue* value = graph_->LookupValue(virtual_register);
1649  if (value != NULL && value->representation().IsDouble()) {
1650  return DOUBLE_REGISTERS;
1651  }
1652  } else if (double_artificial_registers_.Contains(
1653  virtual_register - first_artificial_register_)) {
1654  return DOUBLE_REGISTERS;
1655  }
1656 
1657  return GENERAL_REGISTERS;
1658 }
1659 
1660 
1661 void LAllocator::AddToActive(LiveRange* range) {
1662  TraceAlloc("Add live range %d to active\n", range->id());
1663  active_live_ranges_.Add(range, zone());
1664 }
1665 
1666 
1667 void LAllocator::AddToInactive(LiveRange* range) {
1668  TraceAlloc("Add live range %d to inactive\n", range->id());
1669  inactive_live_ranges_.Add(range, zone());
1670 }
1671 
1672 
1673 void LAllocator::AddToUnhandledSorted(LiveRange* range) {
1674  if (range == NULL || range->IsEmpty()) return;
1675  ASSERT(!range->HasRegisterAssigned() && !range->IsSpilled());
1676  ASSERT(allocation_finger_.Value() <= range->Start().Value());
1677  for (int i = unhandled_live_ranges_.length() - 1; i >= 0; --i) {
1678  LiveRange* cur_range = unhandled_live_ranges_.at(i);
1679  if (range->ShouldBeAllocatedBefore(cur_range)) {
1680  TraceAlloc("Add live range %d to unhandled at %d\n", range->id(), i + 1);
1681  unhandled_live_ranges_.InsertAt(i + 1, range, zone());
1682  ASSERT(UnhandledIsSorted());
1683  return;
1684  }
1685  }
1686  TraceAlloc("Add live range %d to unhandled at start\n", range->id());
1687  unhandled_live_ranges_.InsertAt(0, range, zone());
1688  ASSERT(UnhandledIsSorted());
1689 }
1690 
1691 
1692 void LAllocator::AddToUnhandledUnsorted(LiveRange* range) {
1693  if (range == NULL || range->IsEmpty()) return;
1694  ASSERT(!range->HasRegisterAssigned() && !range->IsSpilled());
1695  TraceAlloc("Add live range %d to unhandled unsorted at end\n", range->id());
1696  unhandled_live_ranges_.Add(range, zone());
1697 }
1698 
1699 
1700 static int UnhandledSortHelper(LiveRange* const* a, LiveRange* const* b) {
1701  ASSERT(!(*a)->ShouldBeAllocatedBefore(*b) ||
1702  !(*b)->ShouldBeAllocatedBefore(*a));
1703  if ((*a)->ShouldBeAllocatedBefore(*b)) return 1;
1704  if ((*b)->ShouldBeAllocatedBefore(*a)) return -1;
1705  return (*a)->id() - (*b)->id();
1706 }
1707 
1708 
1709 // Sort the unhandled live ranges so that the ranges to be processed first are
1710 // at the end of the array list. This is convenient for the register allocation
1711 // algorithm because it is efficient to remove elements from the end.
1712 void LAllocator::SortUnhandled() {
1713  TraceAlloc("Sort unhandled\n");
1714  unhandled_live_ranges_.Sort(&UnhandledSortHelper);
1715 }
1716 
1717 
1718 bool LAllocator::UnhandledIsSorted() {
1719  int len = unhandled_live_ranges_.length();
1720  for (int i = 1; i < len; i++) {
1721  LiveRange* a = unhandled_live_ranges_.at(i - 1);
1722  LiveRange* b = unhandled_live_ranges_.at(i);
1723  if (a->Start().Value() < b->Start().Value()) return false;
1724  }
1725  return true;
1726 }
1727 
1728 
1729 void LAllocator::FreeSpillSlot(LiveRange* range) {
1730  // Check that we are the last range.
1731  if (range->next() != NULL) return;
1732 
1733  if (!range->TopLevel()->HasAllocatedSpillOperand()) return;
1734 
1735  int index = range->TopLevel()->GetSpillOperand()->index();
1736  if (index >= 0) {
1737  reusable_slots_.Add(range, zone());
1738  }
1739 }
1740 
1741 
1742 LOperand* LAllocator::TryReuseSpillSlot(LiveRange* range) {
1743  if (reusable_slots_.is_empty()) return NULL;
1744  if (reusable_slots_.first()->End().Value() >
1745  range->TopLevel()->Start().Value()) {
1746  return NULL;
1747  }
1748  LOperand* result = reusable_slots_.first()->TopLevel()->GetSpillOperand();
1749  reusable_slots_.Remove(0);
1750  return result;
1751 }
1752 
1753 
1754 void LAllocator::ActiveToHandled(LiveRange* range) {
1755  ASSERT(active_live_ranges_.Contains(range));
1756  active_live_ranges_.RemoveElement(range);
1757  TraceAlloc("Moving live range %d from active to handled\n", range->id());
1758  FreeSpillSlot(range);
1759 }
1760 
1761 
1762 void LAllocator::ActiveToInactive(LiveRange* range) {
1763  ASSERT(active_live_ranges_.Contains(range));
1764  active_live_ranges_.RemoveElement(range);
1765  inactive_live_ranges_.Add(range, zone());
1766  TraceAlloc("Moving live range %d from active to inactive\n", range->id());
1767 }
1768 
1769 
1770 void LAllocator::InactiveToHandled(LiveRange* range) {
1771  ASSERT(inactive_live_ranges_.Contains(range));
1772  inactive_live_ranges_.RemoveElement(range);
1773  TraceAlloc("Moving live range %d from inactive to handled\n", range->id());
1774  FreeSpillSlot(range);
1775 }
1776 
1777 
1778 void LAllocator::InactiveToActive(LiveRange* range) {
1779  ASSERT(inactive_live_ranges_.Contains(range));
1780  inactive_live_ranges_.RemoveElement(range);
1781  active_live_ranges_.Add(range, zone());
1782  TraceAlloc("Moving live range %d from inactive to active\n", range->id());
1783 }
1784 
1785 
1786 // TryAllocateFreeReg and AllocateBlockedReg assume this
1787 // when allocating local arrays.
1789  Register::kMaxNumAllocatableRegisters);
1790 
1791 
1792 bool LAllocator::TryAllocateFreeReg(LiveRange* current) {
1793  LifetimePosition free_until_pos[DoubleRegister::kMaxNumAllocatableRegisters];
1794 
1795  for (int i = 0; i < num_registers_; i++) {
1796  free_until_pos[i] = LifetimePosition::MaxPosition();
1797  }
1798 
1799  for (int i = 0; i < active_live_ranges_.length(); ++i) {
1800  LiveRange* cur_active = active_live_ranges_.at(i);
1801  free_until_pos[cur_active->assigned_register()] =
1803  }
1804 
1805  for (int i = 0; i < inactive_live_ranges_.length(); ++i) {
1806  LiveRange* cur_inactive = inactive_live_ranges_.at(i);
1807  ASSERT(cur_inactive->End().Value() > current->Start().Value());
1808  LifetimePosition next_intersection =
1809  cur_inactive->FirstIntersection(current);
1810  if (!next_intersection.IsValid()) continue;
1811  int cur_reg = cur_inactive->assigned_register();
1812  free_until_pos[cur_reg] = Min(free_until_pos[cur_reg], next_intersection);
1813  }
1814 
1815  LOperand* hint = current->FirstHint();
1816  if (hint != NULL && (hint->IsRegister() || hint->IsDoubleRegister())) {
1817  int register_index = hint->index();
1818  TraceAlloc(
1819  "Found reg hint %s (free until [%d) for live range %d (end %d[).\n",
1820  RegisterName(register_index),
1821  free_until_pos[register_index].Value(),
1822  current->id(),
1823  current->End().Value());
1824 
1825  // The desired register is free until the end of the current live range.
1826  if (free_until_pos[register_index].Value() >= current->End().Value()) {
1827  TraceAlloc("Assigning preferred reg %s to live range %d\n",
1828  RegisterName(register_index),
1829  current->id());
1830  SetLiveRangeAssignedRegister(current, register_index);
1831  return true;
1832  }
1833  }
1834 
1835  // Find the register which stays free for the longest time.
1836  int reg = 0;
1837  for (int i = 1; i < RegisterCount(); ++i) {
1838  if (free_until_pos[i].Value() > free_until_pos[reg].Value()) {
1839  reg = i;
1840  }
1841  }
1842 
1843  LifetimePosition pos = free_until_pos[reg];
1844 
1845  if (pos.Value() <= current->Start().Value()) {
1846  // All registers are blocked.
1847  return false;
1848  }
1849 
1850  if (pos.Value() < current->End().Value()) {
1851  // Register reg is available at the range start but becomes blocked before
1852  // the range end. Split current at position where it becomes blocked.
1853  LiveRange* tail = SplitRangeAt(current, pos);
1854  if (!AllocationOk()) return false;
1855  AddToUnhandledSorted(tail);
1856  }
1857 
1858 
1859  // Register reg is available at the range start and is free until
1860  // the range end.
1861  ASSERT(pos.Value() >= current->End().Value());
1862  TraceAlloc("Assigning free reg %s to live range %d\n",
1863  RegisterName(reg),
1864  current->id());
1865  SetLiveRangeAssignedRegister(current, reg);
1866 
1867  return true;
1868 }
1869 
1870 
1871 void LAllocator::AllocateBlockedReg(LiveRange* current) {
1872  UsePosition* register_use = current->NextRegisterPosition(current->Start());
1873  if (register_use == NULL) {
1874  // There is no use in the current live range that requires a register.
1875  // We can just spill it.
1876  Spill(current);
1877  return;
1878  }
1879 
1880 
1881  LifetimePosition use_pos[DoubleRegister::kMaxNumAllocatableRegisters];
1882  LifetimePosition block_pos[DoubleRegister::kMaxNumAllocatableRegisters];
1883 
1884  for (int i = 0; i < num_registers_; i++) {
1885  use_pos[i] = block_pos[i] = LifetimePosition::MaxPosition();
1886  }
1887 
1888  for (int i = 0; i < active_live_ranges_.length(); ++i) {
1889  LiveRange* range = active_live_ranges_[i];
1890  int cur_reg = range->assigned_register();
1891  if (range->IsFixed() || !range->CanBeSpilled(current->Start())) {
1892  block_pos[cur_reg] = use_pos[cur_reg] =
1894  } else {
1895  UsePosition* next_use = range->NextUsePositionRegisterIsBeneficial(
1896  current->Start());
1897  if (next_use == NULL) {
1898  use_pos[cur_reg] = range->End();
1899  } else {
1900  use_pos[cur_reg] = next_use->pos();
1901  }
1902  }
1903  }
1904 
1905  for (int i = 0; i < inactive_live_ranges_.length(); ++i) {
1906  LiveRange* range = inactive_live_ranges_.at(i);
1907  ASSERT(range->End().Value() > current->Start().Value());
1908  LifetimePosition next_intersection = range->FirstIntersection(current);
1909  if (!next_intersection.IsValid()) continue;
1910  int cur_reg = range->assigned_register();
1911  if (range->IsFixed()) {
1912  block_pos[cur_reg] = Min(block_pos[cur_reg], next_intersection);
1913  use_pos[cur_reg] = Min(block_pos[cur_reg], use_pos[cur_reg]);
1914  } else {
1915  use_pos[cur_reg] = Min(use_pos[cur_reg], next_intersection);
1916  }
1917  }
1918 
1919  int reg = 0;
1920  for (int i = 1; i < RegisterCount(); ++i) {
1921  if (use_pos[i].Value() > use_pos[reg].Value()) {
1922  reg = i;
1923  }
1924  }
1925 
1926  LifetimePosition pos = use_pos[reg];
1927 
1928  if (pos.Value() < register_use->pos().Value()) {
1929  // All registers are blocked before the first use that requires a register.
1930  // Spill starting part of live range up to that use.
1931  SpillBetween(current, current->Start(), register_use->pos());
1932  return;
1933  }
1934 
1935  if (block_pos[reg].Value() < current->End().Value()) {
1936  // Register becomes blocked before the current range end. Split before that
1937  // position.
1938  LiveRange* tail = SplitBetween(current,
1939  current->Start(),
1940  block_pos[reg].InstructionStart());
1941  if (!AllocationOk()) return;
1942  AddToUnhandledSorted(tail);
1943  }
1944 
1945  // Register reg is not blocked for the whole range.
1946  ASSERT(block_pos[reg].Value() >= current->End().Value());
1947  TraceAlloc("Assigning blocked reg %s to live range %d\n",
1948  RegisterName(reg),
1949  current->id());
1950  SetLiveRangeAssignedRegister(current, reg);
1951 
1952  // This register was not free. Thus we need to find and spill
1953  // parts of active and inactive live regions that use the same register
1954  // at the same lifetime positions as current.
1955  SplitAndSpillIntersecting(current);
1956 }
1957 
1958 
1959 LifetimePosition LAllocator::FindOptimalSpillingPos(LiveRange* range,
1960  LifetimePosition pos) {
1961  HBasicBlock* block = GetBlock(pos.InstructionStart());
1962  HBasicBlock* loop_header =
1963  block->IsLoopHeader() ? block : block->parent_loop_header();
1964 
1965  if (loop_header == NULL) return pos;
1966 
1967  UsePosition* prev_use =
1968  range->PreviousUsePositionRegisterIsBeneficial(pos);
1969 
1970  while (loop_header != NULL) {
1971  // We are going to spill live range inside the loop.
1972  // If possible try to move spilling position backwards to loop header.
1973  // This will reduce number of memory moves on the back edge.
1974  LifetimePosition loop_start = LifetimePosition::FromInstructionIndex(
1975  loop_header->first_instruction_index());
1976 
1977  if (range->Covers(loop_start)) {
1978  if (prev_use == NULL || prev_use->pos().Value() < loop_start.Value()) {
1979  // No register beneficial use inside the loop before the pos.
1980  pos = loop_start;
1981  }
1982  }
1983 
1984  // Try hoisting out to an outer loop.
1985  loop_header = loop_header->parent_loop_header();
1986  }
1987 
1988  return pos;
1989 }
1990 
1991 
1992 void LAllocator::SplitAndSpillIntersecting(LiveRange* current) {
1993  ASSERT(current->HasRegisterAssigned());
1994  int reg = current->assigned_register();
1995  LifetimePosition split_pos = current->Start();
1996  for (int i = 0; i < active_live_ranges_.length(); ++i) {
1997  LiveRange* range = active_live_ranges_[i];
1998  if (range->assigned_register() == reg) {
1999  UsePosition* next_pos = range->NextRegisterPosition(current->Start());
2000  LifetimePosition spill_pos = FindOptimalSpillingPos(range, split_pos);
2001  if (next_pos == NULL) {
2002  SpillAfter(range, spill_pos);
2003  } else {
2004  // When spilling between spill_pos and next_pos ensure that the range
2005  // remains spilled at least until the start of the current live range.
2006  // This guarantees that we will not introduce new unhandled ranges that
2007  // start before the current range as this violates allocation invariant
2008  // and will lead to an inconsistent state of active and inactive
2009  // live-ranges: ranges are allocated in order of their start positions,
2010  // ranges are retired from active/inactive when the start of the
2011  // current live-range is larger than their end.
2012  SpillBetweenUntil(range, spill_pos, current->Start(), next_pos->pos());
2013  }
2014  if (!AllocationOk()) return;
2015  ActiveToHandled(range);
2016  --i;
2017  }
2018  }
2019 
2020  for (int i = 0; i < inactive_live_ranges_.length(); ++i) {
2021  LiveRange* range = inactive_live_ranges_[i];
2022  ASSERT(range->End().Value() > current->Start().Value());
2023  if (range->assigned_register() == reg && !range->IsFixed()) {
2024  LifetimePosition next_intersection = range->FirstIntersection(current);
2025  if (next_intersection.IsValid()) {
2026  UsePosition* next_pos = range->NextRegisterPosition(current->Start());
2027  if (next_pos == NULL) {
2028  SpillAfter(range, split_pos);
2029  } else {
2030  next_intersection = Min(next_intersection, next_pos->pos());
2031  SpillBetween(range, split_pos, next_intersection);
2032  }
2033  if (!AllocationOk()) return;
2034  InactiveToHandled(range);
2035  --i;
2036  }
2037  }
2038  }
2039 }
2040 
2041 
2042 bool LAllocator::IsBlockBoundary(LifetimePosition pos) {
2043  return pos.IsInstructionStart() &&
2044  InstructionAt(pos.InstructionIndex())->IsLabel();
2045 }
2046 
2047 
2048 LiveRange* LAllocator::SplitRangeAt(LiveRange* range, LifetimePosition pos) {
2049  ASSERT(!range->IsFixed());
2050  TraceAlloc("Splitting live range %d at %d\n", range->id(), pos.Value());
2051 
2052  if (pos.Value() <= range->Start().Value()) return range;
2053 
2054  // We can't properly connect liveranges if split occured at the end
2055  // of control instruction.
2056  ASSERT(pos.IsInstructionStart() ||
2057  !chunk_->instructions()->at(pos.InstructionIndex())->IsControl());
2058 
2059  int vreg = GetVirtualRegister();
2060  if (!AllocationOk()) return NULL;
2061  LiveRange* result = LiveRangeFor(vreg);
2062  range->SplitAt(pos, result, zone());
2063  return result;
2064 }
2065 
2066 
2067 LiveRange* LAllocator::SplitBetween(LiveRange* range,
2068  LifetimePosition start,
2069  LifetimePosition end) {
2070  ASSERT(!range->IsFixed());
2071  TraceAlloc("Splitting live range %d in position between [%d, %d]\n",
2072  range->id(),
2073  start.Value(),
2074  end.Value());
2075 
2076  LifetimePosition split_pos = FindOptimalSplitPos(start, end);
2077  ASSERT(split_pos.Value() >= start.Value());
2078  return SplitRangeAt(range, split_pos);
2079 }
2080 
2081 
2082 LifetimePosition LAllocator::FindOptimalSplitPos(LifetimePosition start,
2083  LifetimePosition end) {
2084  int start_instr = start.InstructionIndex();
2085  int end_instr = end.InstructionIndex();
2086  ASSERT(start_instr <= end_instr);
2087 
2088  // We have no choice
2089  if (start_instr == end_instr) return end;
2090 
2091  HBasicBlock* start_block = GetBlock(start);
2092  HBasicBlock* end_block = GetBlock(end);
2093 
2094  if (end_block == start_block) {
2095  // The interval is split in the same basic block. Split at the latest
2096  // possible position.
2097  return end;
2098  }
2099 
2100  HBasicBlock* block = end_block;
2101  // Find header of outermost loop.
2102  while (block->parent_loop_header() != NULL &&
2103  block->parent_loop_header()->block_id() > start_block->block_id()) {
2104  block = block->parent_loop_header();
2105  }
2106 
2107  // We did not find any suitable outer loop. Split at the latest possible
2108  // position unless end_block is a loop header itself.
2109  if (block == end_block && !end_block->IsLoopHeader()) return end;
2110 
2112  block->first_instruction_index());
2113 }
2114 
2115 
2116 void LAllocator::SpillAfter(LiveRange* range, LifetimePosition pos) {
2117  LiveRange* second_part = SplitRangeAt(range, pos);
2118  if (!AllocationOk()) return;
2119  Spill(second_part);
2120 }
2121 
2122 
2123 void LAllocator::SpillBetween(LiveRange* range,
2124  LifetimePosition start,
2125  LifetimePosition end) {
2126  SpillBetweenUntil(range, start, start, end);
2127 }
2128 
2129 
2130 void LAllocator::SpillBetweenUntil(LiveRange* range,
2131  LifetimePosition start,
2132  LifetimePosition until,
2133  LifetimePosition end) {
2134  CHECK(start.Value() < end.Value());
2135  LiveRange* second_part = SplitRangeAt(range, start);
2136  if (!AllocationOk()) return;
2137 
2138  if (second_part->Start().Value() < end.Value()) {
2139  // The split result intersects with [start, end[.
2140  // Split it at position between ]start+1, end[, spill the middle part
2141  // and put the rest to unhandled.
2142  LiveRange* third_part = SplitBetween(
2143  second_part,
2144  Max(second_part->Start().InstructionEnd(), until),
2146  if (!AllocationOk()) return;
2147 
2148  ASSERT(third_part != second_part);
2149 
2150  Spill(second_part);
2151  AddToUnhandledSorted(third_part);
2152  } else {
2153  // The split result does not intersect with [start, end[.
2154  // Nothing to spill. Just put it to unhandled as whole.
2155  AddToUnhandledSorted(second_part);
2156  }
2157 }
2158 
2159 
2160 void LAllocator::Spill(LiveRange* range) {
2161  ASSERT(!range->IsSpilled());
2162  TraceAlloc("Spilling live range %d\n", range->id());
2163  LiveRange* first = range->TopLevel();
2164 
2165  if (!first->HasAllocatedSpillOperand()) {
2166  LOperand* op = TryReuseSpillSlot(range);
2167  if (op == NULL) op = chunk_->GetNextSpillSlot(range->Kind());
2168  first->SetSpillOperand(op);
2169  }
2170  range->MakeSpilled(chunk()->zone());
2171 }
2172 
2173 
2174 int LAllocator::RegisterCount() const {
2175  return num_registers_;
2176 }
2177 
2178 
2179 #ifdef DEBUG
2180 
2181 
2182 void LAllocator::Verify() const {
2183  for (int i = 0; i < live_ranges()->length(); ++i) {
2184  LiveRange* current = live_ranges()->at(i);
2185  if (current != NULL) current->Verify();
2186  }
2187 }
2188 
2189 
2190 #endif
2191 
2192 
2193 LAllocatorPhase::LAllocatorPhase(const char* name, LAllocator* allocator)
2194  : CompilationPhase(name, allocator->graph()->info()),
2195  allocator_(allocator) {
2196  if (FLAG_hydrogen_stats) {
2197  allocator_zone_start_allocation_size_ =
2198  allocator->zone()->allocation_size();
2199  }
2200 }
2201 
2202 
2204  if (FLAG_hydrogen_stats) {
2205  unsigned size = allocator_->zone()->allocation_size() -
2206  allocator_zone_start_allocation_size_;
2207  isolate()->GetHStatistics()->SaveTiming(name(), TimeDelta(), size);
2208  }
2209 
2210  if (ShouldProduceTraceOutput()) {
2211  isolate()->GetHTracer()->TraceLithium(name(), allocator_->chunk());
2212  isolate()->GetHTracer()->TraceLiveRanges(name(), allocator_);
2213  }
2214 
2215 #ifdef DEBUG
2216  if (allocator_ != NULL) allocator_->Verify();
2217 #endif
2218 }
2219 
2220 
2221 } } // namespace v8::internal
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter NULL
Definition: flags.cc:269
LOperand * operand() const
UseInterval * next() const
int index() const
Definition: lithium.h:61
LifetimePosition FirstIntersection(LiveRange *other)
static LUnallocated * cast(LOperand *op)
Definition: lithium.h:156
bool HasAllocatedSpillOperand() const
void MakeSpilled(Zone *zone)
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths true
Definition: flags.cc:208
void PrintF(const char *format,...)
Definition: v8utils.cc:40
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf map
Definition: flags.cc:350
LAllocatorPhase(const char *name, LAllocator *allocator)
Definition: v8.h:1407
static const int kMaxNumAllocatableRegisters
static LifetimePosition MaxPosition()
LiveRange(int id, Zone *zone)
static LifetimePosition Invalid()
LUnallocated * CopyUnconstrained(Zone *zone)
Definition: lithium.h:150
static int NumAllocatableRegisters()
void ShortenTo(LifetimePosition start)
bool ShouldBeAllocatedBefore(const LiveRange *other) const
const int kMaxInt
Definition: globals.h:248
UsePosition * NextUsePosition(LifetimePosition start)
static LifetimePosition FromInstructionIndex(int index)
bool CanCover(LifetimePosition position) const
#define ASSERT(condition)
Definition: checks.h:329
bool HasRegisterPolicy() const
Definition: lithium.h:208
#define CHECK(condition)
Definition: checks.h:75
LifetimePosition NextInstruction() const
LifetimePosition pos() const
static const char * AllocationIndexToString(int index)
bool Covers(LifetimePosition position)
void set_virtual_register(unsigned id)
Definition: lithium.h:260
LOperand * CreateAssignedOperand(Zone *zone)
static const int kMaxNumAllocatableRegisters
int virtual_register() const
Definition: lithium.h:257
#define UNREACHABLE()
Definition: checks.h:52
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object size
Definition: flags.cc:211
void set_assigned_register(int reg, Zone *zone)
LifetimePosition End() const
bool CanBeSpilled(LifetimePosition pos)
void ConvertTo(Kind kind, int index)
Definition: lithium.h:71
STATIC_ASSERT(sizeof(CPURegister)==sizeof(Register))
bool HasRegisterAssigned() const
void SplitAt(LifetimePosition position, LiveRange *result, Zone *zone)
LifetimePosition end() const
void SetSpillOperand(LOperand *operand)
UsePosition * NextUsePositionRegisterIsBeneficial(LifetimePosition start)
Kind kind() const
Definition: lithium.h:60
UseInterval * first_interval() const
static void VPrint(const char *format, va_list args)
void SplitAt(LifetimePosition pos, Zone *zone)
LifetimePosition PrevInstruction() const
bool Contains(LifetimePosition point) const
void AddUsePosition(LifetimePosition pos, LOperand *operand, LOperand *hint, Zone *zone)
LifetimePosition start() const
LifetimePosition Start() const
UseInterval(LifetimePosition start, LifetimePosition end)
UsePosition(LifetimePosition pos, LOperand *operand, LOperand *hint)
void EnsureInterval(LifetimePosition start, LifetimePosition end, Zone *zone)
UsePosition * first_pos() const
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function info
Definition: flags.cc:317
UsePosition * NextRegisterPosition(LifetimePosition start)
UsePosition * PreviousUsePositionRegisterIsBeneficial(LifetimePosition start)
LifetimePosition Intersect(const UseInterval *other) const
bool HasAnyPolicy() const
Definition: lithium.h:199
static const char * AllocationIndexToString(int index)
RegisterKind Kind() const
PerThreadAssertScopeDebugOnly< HANDLE_DEREFERENCE_ASSERT, true > AllowHandleDereference
Definition: assert-scope.h:226
UsePosition * next() const
void AddUseInterval(LifetimePosition start, LifetimePosition end, Zone *zone)
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long expose natives in global object expose freeBuffer extension expose gc extension under the specified name expose externalize string extension number of stack frames to capture disable builtin natives files print name of functions for which code is generated use random jit cookie to mask large constants trace lazy optimization use adaptive optimizations always try to OSR functions trace optimize function deoptimization minimum length for automatic enable preparsing maximum number of optimization attempts before giving up cache prototype transitions trace debugging JSON request response trace out of bounds accesses to external arrays trace_js_array_abuse automatically set the debug break flag when debugger commands are in the queue abort by crashing maximum length of function source code printed in a stack trace max size of the new max size of the old max size of executable always perform global GCs print one trace line following each garbage collection do not print trace line after scavenger collection print statistics of the maximum memory committed for the heap in name
Definition: flags.cc:505
LOperand * GetSpillOperand() const
static const int kInvalidAssignment
LiveRange * next() const
LifetimePosition InstructionEnd() const