v8  3.25.30(node0.11.13)
V8 is Google's open source JavaScript engine
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
hydrogen-instructions.cc
Go to the documentation of this file.
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 
28 #include "v8.h"
29 
30 #include "double.h"
31 #include "factory.h"
33 #include "property-details-inl.h"
34 
35 #if V8_TARGET_ARCH_IA32
36 #include "ia32/lithium-ia32.h"
37 #elif V8_TARGET_ARCH_X64
38 #include "x64/lithium-x64.h"
39 #elif V8_TARGET_ARCH_ARM64
40 #include "arm64/lithium-arm64.h"
41 #elif V8_TARGET_ARCH_ARM
42 #include "arm/lithium-arm.h"
43 #elif V8_TARGET_ARCH_MIPS
44 #include "mips/lithium-mips.h"
45 #else
46 #error Unsupported target architecture.
47 #endif
48 
49 namespace v8 {
50 namespace internal {
51 
52 #define DEFINE_COMPILE(type) \
53  LInstruction* H##type::CompileToLithium(LChunkBuilder* builder) { \
54  return builder->Do##type(this); \
55  }
57 #undef DEFINE_COMPILE
58 
59 
61  ASSERT(block() != NULL);
62  return block()->isolate();
63 }
64 
65 
69  // The representation of the value is dictated by type feedback and
70  // will not be changed later.
72  }
73 }
74 
75 
79  UpdateRepresentation(new_rep, h_infer, "inputs");
80  new_rep = RepresentationFromUses();
81  UpdateRepresentation(new_rep, h_infer, "uses");
82  if (representation().IsSmi() && HasNonSmiUse()) {
84  Representation::Integer32(), h_infer, "use requirements");
85  }
86 }
87 
88 
90  if (HasNoUses()) return Representation::None();
91 
92  // Array of use counts for each representation.
93  int use_count[Representation::kNumRepresentations] = { 0 };
94 
95  for (HUseIterator it(uses()); !it.Done(); it.Advance()) {
96  HValue* use = it.value();
97  Representation rep = use->observed_input_representation(it.index());
98  if (rep.IsNone()) continue;
99  if (FLAG_trace_representation) {
100  PrintF("#%d %s is used by #%d %s as %s%s\n",
101  id(), Mnemonic(), use->id(), use->Mnemonic(), rep.Mnemonic(),
102  (use->CheckFlag(kTruncatingToInt32) ? "-trunc" : ""));
103  }
104  use_count[rep.kind()] += 1;
105  }
106  if (IsPhi()) HPhi::cast(this)->AddIndirectUsesTo(&use_count[0]);
107  int tagged_count = use_count[Representation::kTagged];
108  int double_count = use_count[Representation::kDouble];
109  int int32_count = use_count[Representation::kInteger32];
110  int smi_count = use_count[Representation::kSmi];
111 
112  if (tagged_count > 0) return Representation::Tagged();
113  if (double_count > 0) return Representation::Double();
114  if (int32_count > 0) return Representation::Integer32();
115  if (smi_count > 0) return Representation::Smi();
116 
117  return Representation::None();
118 }
119 
120 
122  HInferRepresentationPhase* h_infer,
123  const char* reason) {
125  if (new_rep.is_more_general_than(r)) {
126  if (CheckFlag(kCannotBeTagged) && new_rep.IsTagged()) return;
127  if (FLAG_trace_representation) {
128  PrintF("Changing #%d %s representation %s -> %s based on %s\n",
129  id(), Mnemonic(), r.Mnemonic(), new_rep.Mnemonic(), reason);
130  }
131  ChangeRepresentation(new_rep);
132  AddDependantsToWorklist(h_infer);
133  }
134 }
135 
136 
138  for (HUseIterator it(uses()); !it.Done(); it.Advance()) {
139  h_infer->AddToWorklist(it.value());
140  }
141  for (int i = 0; i < OperandCount(); ++i) {
142  h_infer->AddToWorklist(OperandAt(i));
143  }
144 }
145 
146 
147 static int32_t ConvertAndSetOverflow(Representation r,
148  int64_t result,
149  bool* overflow) {
150  if (r.IsSmi()) {
151  if (result > Smi::kMaxValue) {
152  *overflow = true;
153  return Smi::kMaxValue;
154  }
155  if (result < Smi::kMinValue) {
156  *overflow = true;
157  return Smi::kMinValue;
158  }
159  } else {
160  if (result > kMaxInt) {
161  *overflow = true;
162  return kMaxInt;
163  }
164  if (result < kMinInt) {
165  *overflow = true;
166  return kMinInt;
167  }
168  }
169  return static_cast<int32_t>(result);
170 }
171 
172 
173 static int32_t AddWithoutOverflow(Representation r,
174  int32_t a,
175  int32_t b,
176  bool* overflow) {
177  int64_t result = static_cast<int64_t>(a) + static_cast<int64_t>(b);
178  return ConvertAndSetOverflow(r, result, overflow);
179 }
180 
181 
182 static int32_t SubWithoutOverflow(Representation r,
183  int32_t a,
184  int32_t b,
185  bool* overflow) {
186  int64_t result = static_cast<int64_t>(a) - static_cast<int64_t>(b);
187  return ConvertAndSetOverflow(r, result, overflow);
188 }
189 
190 
191 static int32_t MulWithoutOverflow(const Representation& r,
192  int32_t a,
193  int32_t b,
194  bool* overflow) {
195  int64_t result = static_cast<int64_t>(a) * static_cast<int64_t>(b);
196  return ConvertAndSetOverflow(r, result, overflow);
197 }
198 
199 
200 int32_t Range::Mask() const {
201  if (lower_ == upper_) return lower_;
202  if (lower_ >= 0) {
203  int32_t res = 1;
204  while (res < upper_) {
205  res = (res << 1) | 1;
206  }
207  return res;
208  }
209  return 0xffffffff;
210 }
211 
212 
213 void Range::AddConstant(int32_t value) {
214  if (value == 0) return;
215  bool may_overflow = false; // Overflow is ignored here.
216  Representation r = Representation::Integer32();
217  lower_ = AddWithoutOverflow(r, lower_, value, &may_overflow);
218  upper_ = AddWithoutOverflow(r, upper_, value, &may_overflow);
219 #ifdef DEBUG
220  Verify();
221 #endif
222 }
223 
224 
225 void Range::Intersect(Range* other) {
226  upper_ = Min(upper_, other->upper_);
227  lower_ = Max(lower_, other->lower_);
228  bool b = CanBeMinusZero() && other->CanBeMinusZero();
229  set_can_be_minus_zero(b);
230 }
231 
232 
233 void Range::Union(Range* other) {
234  upper_ = Max(upper_, other->upper_);
235  lower_ = Min(lower_, other->lower_);
236  bool b = CanBeMinusZero() || other->CanBeMinusZero();
237  set_can_be_minus_zero(b);
238 }
239 
240 
241 void Range::CombinedMax(Range* other) {
242  upper_ = Max(upper_, other->upper_);
243  lower_ = Max(lower_, other->lower_);
244  set_can_be_minus_zero(CanBeMinusZero() || other->CanBeMinusZero());
245 }
246 
247 
248 void Range::CombinedMin(Range* other) {
249  upper_ = Min(upper_, other->upper_);
250  lower_ = Min(lower_, other->lower_);
251  set_can_be_minus_zero(CanBeMinusZero() || other->CanBeMinusZero());
252 }
253 
254 
255 void Range::Sar(int32_t value) {
256  int32_t bits = value & 0x1F;
257  lower_ = lower_ >> bits;
258  upper_ = upper_ >> bits;
259  set_can_be_minus_zero(false);
260 }
261 
262 
263 void Range::Shl(int32_t value) {
264  int32_t bits = value & 0x1F;
265  int old_lower = lower_;
266  int old_upper = upper_;
267  lower_ = lower_ << bits;
268  upper_ = upper_ << bits;
269  if (old_lower != lower_ >> bits || old_upper != upper_ >> bits) {
270  upper_ = kMaxInt;
271  lower_ = kMinInt;
272  }
273  set_can_be_minus_zero(false);
274 }
275 
276 
277 bool Range::AddAndCheckOverflow(const Representation& r, Range* other) {
278  bool may_overflow = false;
279  lower_ = AddWithoutOverflow(r, lower_, other->lower(), &may_overflow);
280  upper_ = AddWithoutOverflow(r, upper_, other->upper(), &may_overflow);
281  KeepOrder();
282 #ifdef DEBUG
283  Verify();
284 #endif
285  return may_overflow;
286 }
287 
288 
289 bool Range::SubAndCheckOverflow(const Representation& r, Range* other) {
290  bool may_overflow = false;
291  lower_ = SubWithoutOverflow(r, lower_, other->upper(), &may_overflow);
292  upper_ = SubWithoutOverflow(r, upper_, other->lower(), &may_overflow);
293  KeepOrder();
294 #ifdef DEBUG
295  Verify();
296 #endif
297  return may_overflow;
298 }
299 
300 
301 void Range::KeepOrder() {
302  if (lower_ > upper_) {
303  int32_t tmp = lower_;
304  lower_ = upper_;
305  upper_ = tmp;
306  }
307 }
308 
309 
310 #ifdef DEBUG
311 void Range::Verify() const {
312  ASSERT(lower_ <= upper_);
313 }
314 #endif
315 
316 
317 bool Range::MulAndCheckOverflow(const Representation& r, Range* other) {
318  bool may_overflow = false;
319  int v1 = MulWithoutOverflow(r, lower_, other->lower(), &may_overflow);
320  int v2 = MulWithoutOverflow(r, lower_, other->upper(), &may_overflow);
321  int v3 = MulWithoutOverflow(r, upper_, other->lower(), &may_overflow);
322  int v4 = MulWithoutOverflow(r, upper_, other->upper(), &may_overflow);
323  lower_ = Min(Min(v1, v2), Min(v3, v4));
324  upper_ = Max(Max(v1, v2), Max(v3, v4));
325 #ifdef DEBUG
326  Verify();
327 #endif
328  return may_overflow;
329 }
330 
331 
332 const char* HType::ToString() {
333  // Note: The c1visualizer syntax for locals allows only a sequence of the
334  // following characters: A-Za-z0-9_-|:
335  switch (type_) {
336  case kNone: return "none";
337  case kTagged: return "tagged";
338  case kTaggedPrimitive: return "primitive";
339  case kTaggedNumber: return "number";
340  case kSmi: return "smi";
341  case kHeapNumber: return "heap-number";
342  case kString: return "string";
343  case kBoolean: return "boolean";
344  case kNonPrimitive: return "non-primitive";
345  case kJSArray: return "array";
346  case kJSObject: return "object";
347  }
348  UNREACHABLE();
349  return "unreachable";
350 }
351 
352 
353 HType HType::TypeFromValue(Handle<Object> value) {
354  HType result = HType::Tagged();
355  if (value->IsSmi()) {
356  result = HType::Smi();
357  } else if (value->IsHeapNumber()) {
358  result = HType::HeapNumber();
359  } else if (value->IsString()) {
360  result = HType::String();
361  } else if (value->IsBoolean()) {
362  result = HType::Boolean();
363  } else if (value->IsJSObject()) {
364  result = HType::JSObject();
365  } else if (value->IsJSArray()) {
366  result = HType::JSArray();
367  }
368  return result;
369 }
370 
371 
372 bool HValue::IsDefinedAfter(HBasicBlock* other) const {
373  return block()->block_id() > other->block_id();
374 }
375 
376 
378  // Skip and remove dead items in the use list.
379  while (tail_ != NULL && tail_->value()->CheckFlag(HValue::kIsDead)) {
380  tail_ = tail_->tail_;
381  }
382  return tail_;
383 }
384 
385 
387  for (HUseIterator it(uses()); !it.Done(); it.Advance()) {
388  if (it.value()->IsSimulate()) continue;
389  if (!it.value()->CheckFlag(f)) return false;
390  }
391  return true;
392 }
393 
394 
395 bool HValue::CheckUsesForFlag(Flag f, HValue** value) const {
396  for (HUseIterator it(uses()); !it.Done(); it.Advance()) {
397  if (it.value()->IsSimulate()) continue;
398  if (!it.value()->CheckFlag(f)) {
399  *value = it.value();
400  return false;
401  }
402  }
403  return true;
404 }
405 
406 
408  bool return_value = false;
409  for (HUseIterator it(uses()); !it.Done(); it.Advance()) {
410  if (it.value()->IsSimulate()) continue;
411  if (!it.value()->CheckFlag(f)) return false;
412  return_value = true;
413  }
414  return return_value;
415 }
416 
417 
418 HUseIterator::HUseIterator(HUseListNode* head) : next_(head) {
419  Advance();
420 }
421 
422 
423 void HUseIterator::Advance() {
424  current_ = next_;
425  if (current_ != NULL) {
426  next_ = current_->tail();
427  value_ = current_->value();
428  index_ = current_->index();
429  }
430 }
431 
432 
433 int HValue::UseCount() const {
434  int count = 0;
435  for (HUseIterator it(uses()); !it.Done(); it.Advance()) ++count;
436  return count;
437 }
438 
439 
440 HUseListNode* HValue::RemoveUse(HValue* value, int index) {
441  HUseListNode* previous = NULL;
442  HUseListNode* current = use_list_;
443  while (current != NULL) {
444  if (current->value() == value && current->index() == index) {
445  if (previous == NULL) {
446  use_list_ = current->tail();
447  } else {
448  previous->set_tail(current->tail());
449  }
450  break;
451  }
452 
453  previous = current;
454  current = current->tail();
455  }
456 
457 #ifdef DEBUG
458  // Do not reuse use list nodes in debug mode, zap them.
459  if (current != NULL) {
460  HUseListNode* temp =
461  new(block()->zone())
462  HUseListNode(current->value(), current->index(), NULL);
463  current->Zap();
464  current = temp;
465  }
466 #endif
467  return current;
468 }
469 
470 
471 bool HValue::Equals(HValue* other) {
472  if (other->opcode() != opcode()) return false;
473  if (!other->representation().Equals(representation())) return false;
474  if (!other->type_.Equals(type_)) return false;
475  if (other->flags() != flags()) return false;
476  if (OperandCount() != other->OperandCount()) return false;
477  for (int i = 0; i < OperandCount(); ++i) {
478  if (OperandAt(i)->id() != other->OperandAt(i)->id()) return false;
479  }
480  bool result = DataEquals(other);
481  ASSERT(!result || Hashcode() == other->Hashcode());
482  return result;
483 }
484 
485 
486 intptr_t HValue::Hashcode() {
487  intptr_t result = opcode();
488  int count = OperandCount();
489  for (int i = 0; i < count; ++i) {
490  result = result * 19 + OperandAt(i)->id() + (result >> 7);
491  }
492  return result;
493 }
494 
495 
496 const char* HValue::Mnemonic() const {
497  switch (opcode()) {
498 #define MAKE_CASE(type) case k##type: return #type;
500 #undef MAKE_CASE
501  case kPhi: return "Phi";
502  default: return "";
503  }
504 }
505 
506 
508  return FLAG_unreachable_code_elimination &&
509  !(block()->IsReachable() ||
510  IsBlockEntry() ||
511  IsControlInstruction() ||
512  IsSimulate() ||
513  IsEnterInlined() ||
514  IsLeaveInlined());
515 }
516 
517 
519  return IsConstant() && HConstant::cast(this)->HasInteger32Value();
520 }
521 
522 
524  return HConstant::cast(this)->Integer32Value();
525 }
526 
527 
529  return IsInteger32Constant() && GetInteger32Constant() == value;
530 }
531 
532 
533 void HValue::SetOperandAt(int index, HValue* value) {
534  RegisterUse(index, value);
535  InternalSetOperandAt(index, value);
536 }
537 
538 
540  // We replace all uses first, so Delete can assert that there are none.
541  if (other != NULL) ReplaceAllUsesWith(other);
542  Kill();
543  DeleteFromGraph();
544 }
545 
546 
548  while (use_list_ != NULL) {
549  HUseListNode* list_node = use_list_;
550  HValue* value = list_node->value();
551  ASSERT(!value->block()->IsStartBlock());
552  value->InternalSetOperandAt(list_node->index(), other);
553  use_list_ = list_node->tail();
554  list_node->set_tail(other->use_list_);
555  other->use_list_ = list_node;
556  }
557 }
558 
559 
560 void HValue::Kill() {
561  // Instead of going through the entire use list of each operand, we only
562  // check the first item in each use list and rely on the tail() method to
563  // skip dead items, removing them lazily next time we traverse the list.
564  SetFlag(kIsDead);
565  for (int i = 0; i < OperandCount(); ++i) {
566  HValue* operand = OperandAt(i);
567  if (operand == NULL) continue;
568  HUseListNode* first = operand->use_list_;
569  if (first != NULL && first->value()->CheckFlag(kIsDead)) {
570  operand->use_list_ = first->tail();
571  }
572  }
573 }
574 
575 
576 void HValue::SetBlock(HBasicBlock* block) {
577  ASSERT(block_ == NULL || block == NULL);
578  block_ = block;
579  if (id_ == kNoNumber && block != NULL) {
580  id_ = block->graph()->GetNextValueID(this);
581  }
582 }
583 
584 
586  if (!representation().IsTagged() || type().Equals(HType::Tagged())) return;
587  stream->Add(" type:%s", type().ToString());
588 }
589 
590 
592  if (range() == NULL || range()->IsMostGeneric()) return;
593  // Note: The c1visualizer syntax for locals allows only a sequence of the
594  // following characters: A-Za-z0-9_-|:
595  stream->Add(" range:%d_%d%s",
596  range()->lower(),
597  range()->upper(),
598  range()->CanBeMinusZero() ? "_m0" : "");
599 }
600 
601 
603  GVNFlagSet changes_flags = ChangesFlags();
604  if (changes_flags.IsEmpty()) return;
605  stream->Add(" changes[");
606  if (changes_flags == AllSideEffectsFlagSet()) {
607  stream->Add("*");
608  } else {
609  bool add_comma = false;
610 #define PRINT_DO(Type) \
611  if (changes_flags.Contains(k##Type)) { \
612  if (add_comma) stream->Add(","); \
613  add_comma = true; \
614  stream->Add(#Type); \
615  }
618 #undef PRINT_DO
619  }
620  stream->Add("]");
621 }
622 
623 
625  stream->Add("%s%d", representation_.Mnemonic(), id());
626 }
627 
628 
630  return !GetMonomorphicJSObjectMap().is_null();
631 }
632 
633 
635  HType type = CalculateInferredType();
636  bool result = (!type.Equals(type_));
637  type_ = type;
638  return result;
639 }
640 
641 
642 void HValue::RegisterUse(int index, HValue* new_value) {
643  HValue* old_value = OperandAt(index);
644  if (old_value == new_value) return;
645 
646  HUseListNode* removed = NULL;
647  if (old_value != NULL) {
648  removed = old_value->RemoveUse(this, index);
649  }
650 
651  if (new_value != NULL) {
652  if (removed == NULL) {
653  new_value->use_list_ = new(new_value->block()->zone()) HUseListNode(
654  this, index, new_value->use_list_);
655  } else {
656  removed->set_tail(new_value->use_list_);
657  new_value->use_list_ = removed;
658  }
659  }
660 }
661 
662 
663 void HValue::AddNewRange(Range* r, Zone* zone) {
664  if (!HasRange()) ComputeInitialRange(zone);
665  if (!HasRange()) range_ = new(zone) Range();
666  ASSERT(HasRange());
667  r->StackUpon(range_);
668  range_ = r;
669 }
670 
671 
673  ASSERT(HasRange());
674  ASSERT(range_->next() != NULL);
675  range_ = range_->next();
676 }
677 
678 
680  ASSERT(!HasRange());
681  range_ = InferRange(zone);
682  ASSERT(HasRange());
683 }
684 
685 
686 void HSourcePosition::PrintTo(FILE* out) {
687  if (IsUnknown()) {
688  PrintF(out, "<?>");
689  } else {
690  if (FLAG_hydrogen_track_positions) {
691  PrintF(out, "<%d:%d>", inlining_id(), position());
692  } else {
693  PrintF(out, "<0:%d>", raw());
694  }
695  }
696 }
697 
698 
700  PrintMnemonicTo(stream);
701  PrintDataTo(stream);
702  PrintRangeTo(stream);
703  PrintChangesTo(stream);
704  PrintTypeTo(stream);
706  stream->Add(" [noOSE]");
707  }
708  if (CheckFlag(HValue::kIsDead)) {
709  stream->Add(" [dead]");
710  }
711 }
712 
713 
715  for (int i = 0; i < OperandCount(); ++i) {
716  if (i > 0) stream->Add(" ");
717  OperandAt(i)->PrintNameTo(stream);
718  }
719 }
720 
721 
722 void HInstruction::PrintMnemonicTo(StringStream* stream) {
723  stream->Add("%s ", Mnemonic());
724 }
725 
726 
728  ASSERT(IsLinked());
729  ASSERT(!IsControlInstruction()); // Must never move control instructions.
730  ASSERT(!IsBlockEntry()); // Doesn't make sense to delete these.
731  ASSERT(previous_ != NULL);
732  previous_->next_ = next_;
733  if (next_ == NULL) {
734  ASSERT(block()->last() == this);
735  block()->set_last(previous_);
736  } else {
737  next_->previous_ = previous_;
738  }
739  clear_block();
740 }
741 
742 
744  ASSERT(!IsLinked());
745  ASSERT(!next->IsBlockEntry());
746  ASSERT(!IsControlInstruction());
747  ASSERT(!next->block()->IsStartBlock());
748  ASSERT(next->previous_ != NULL);
749  HInstruction* prev = next->previous();
750  prev->next_ = this;
751  next->previous_ = this;
752  next_ = next;
753  previous_ = prev;
754  SetBlock(next->block());
755  if (!has_position() && next->has_position()) {
756  set_position(next->position());
757  }
758 }
759 
760 
762  ASSERT(!IsLinked());
763  ASSERT(!previous->IsControlInstruction());
764  ASSERT(!IsControlInstruction() || previous->next_ == NULL);
765  HBasicBlock* block = previous->block();
766  // Never insert anything except constants into the start block after finishing
767  // it.
768  if (block->IsStartBlock() && block->IsFinished() && !IsConstant()) {
769  ASSERT(block->end()->SecondSuccessor() == NULL);
770  InsertAfter(block->end()->FirstSuccessor()->first());
771  return;
772  }
773 
774  // If we're inserting after an instruction with side-effects that is
775  // followed by a simulate instruction, we need to insert after the
776  // simulate instruction instead.
777  HInstruction* next = previous->next_;
778  if (previous->HasObservableSideEffects() && next != NULL) {
779  ASSERT(next->IsSimulate());
780  previous = next;
781  next = previous->next_;
782  }
783 
784  previous_ = previous;
785  next_ = next;
786  SetBlock(block);
787  previous->next_ = this;
788  if (next != NULL) next->previous_ = this;
789  if (block->last() == previous) {
790  block->set_last(this);
791  }
792  if (!has_position() && previous->has_position()) {
793  set_position(previous->position());
794  }
795 }
796 
797 
798 #ifdef DEBUG
799 void HInstruction::Verify() {
800  // Verify that input operands are defined before use.
801  HBasicBlock* cur_block = block();
802  for (int i = 0; i < OperandCount(); ++i) {
803  HValue* other_operand = OperandAt(i);
804  if (other_operand == NULL) continue;
805  HBasicBlock* other_block = other_operand->block();
806  if (cur_block == other_block) {
807  if (!other_operand->IsPhi()) {
808  HInstruction* cur = this->previous();
809  while (cur != NULL) {
810  if (cur == other_operand) break;
811  cur = cur->previous();
812  }
813  // Must reach other operand in the same block!
814  ASSERT(cur == other_operand);
815  }
816  } else {
817  // If the following assert fires, you may have forgotten an
818  // AddInstruction.
819  ASSERT(other_block->Dominates(cur_block));
820  }
821  }
822 
823  // Verify that instructions that may have side-effects are followed
824  // by a simulate instruction.
825  if (HasObservableSideEffects() && !IsOsrEntry()) {
826  ASSERT(next()->IsSimulate());
827  }
828 
829  // Verify that instructions that can be eliminated by GVN have overridden
830  // HValue::DataEquals. The default implementation is UNREACHABLE. We
831  // don't actually care whether DataEquals returns true or false here.
832  if (CheckFlag(kUseGVN)) DataEquals(this);
833 
834  // Verify that all uses are in the graph.
835  for (HUseIterator use = uses(); !use.Done(); use.Advance()) {
836  if (use.value()->IsInstruction()) {
837  ASSERT(HInstruction::cast(use.value())->IsLinked());
838  }
839  }
840 }
841 #endif
842 
843 
844 static bool HasPrimitiveRepresentation(HValue* instr) {
845  return instr->representation().IsInteger32() ||
846  instr->representation().IsDouble();
847 }
848 
849 
851  // TODO(titzer): make this a virtual method?
852  switch (opcode()) {
853  case HValue::kAccessArgumentsAt:
854  case HValue::kApplyArguments:
855  case HValue::kArgumentsElements:
856  case HValue::kArgumentsLength:
857  case HValue::kArgumentsObject:
858  case HValue::kBoundsCheckBaseIndexInformation:
859  case HValue::kCapturedObject:
860  case HValue::kClampToUint8:
861  case HValue::kConstant:
862  case HValue::kContext:
863  case HValue::kDateField:
864  case HValue::kDebugBreak:
865  case HValue::kDeclareGlobals:
866  case HValue::kDiv:
867  case HValue::kDummyUse:
868  case HValue::kEnterInlined:
869  case HValue::kEnvironmentMarker:
870  case HValue::kForInCacheArray:
871  case HValue::kForInPrepareMap:
872  case HValue::kFunctionLiteral:
873  case HValue::kGetCachedArrayIndex:
874  case HValue::kGoto:
875  case HValue::kInnerAllocatedObject:
876  case HValue::kInstanceOf:
877  case HValue::kInstanceOfKnownGlobal:
878  case HValue::kInvokeFunction:
879  case HValue::kLeaveInlined:
880  case HValue::kLoadContextSlot:
881  case HValue::kLoadFieldByIndex:
882  case HValue::kLoadFunctionPrototype:
883  case HValue::kLoadGlobalCell:
884  case HValue::kLoadGlobalGeneric:
885  case HValue::kLoadKeyed:
886  case HValue::kLoadKeyedGeneric:
887  case HValue::kLoadNamedField:
888  case HValue::kLoadNamedGeneric:
889  case HValue::kLoadRoot:
890  case HValue::kMapEnumLength:
891  case HValue::kMathFloorOfDiv:
892  case HValue::kMathMinMax:
893  case HValue::kMod:
894  case HValue::kMul:
895  case HValue::kOsrEntry:
896  case HValue::kParameter:
897  case HValue::kPower:
898  case HValue::kPushArgument:
899  case HValue::kRor:
900  case HValue::kSar:
901  case HValue::kSeqStringGetChar:
902  case HValue::kSeqStringSetChar:
903  case HValue::kShl:
904  case HValue::kShr:
905  case HValue::kSimulate:
906  case HValue::kStackCheck:
907  case HValue::kStoreCodeEntry:
908  case HValue::kStoreContextSlot:
909  case HValue::kStoreGlobalCell:
910  case HValue::kStoreKeyed:
911  case HValue::kStoreKeyedGeneric:
912  case HValue::kStoreNamedField:
913  case HValue::kStoreNamedGeneric:
914  case HValue::kStringAdd:
915  case HValue::kStringCharCodeAt:
916  case HValue::kStringCharFromCode:
917  case HValue::kSub:
918  case HValue::kThisFunction:
919  case HValue::kToFastProperties:
920  case HValue::kTransitionElementsKind:
921  case HValue::kTrapAllocationMemento:
922  case HValue::kTypeof:
923  case HValue::kUnaryMathOperation:
924  case HValue::kUseConst:
925  case HValue::kWrapReceiver:
926  return false;
927  case HValue::kForceRepresentation:
928  case HValue::kAdd:
929  case HValue::kBitwise:
930  case HValue::kChange:
931  case HValue::kCompareGeneric:
932  // These instructions might deoptimize if they are not primitive.
933  if (!HasPrimitiveRepresentation(this)) return true;
934  for (int i = 0; i < OperandCount(); i++) {
935  HValue* input = OperandAt(i);
936  if (!HasPrimitiveRepresentation(input)) return true;
937  }
938  return false;
939  default:
940  return true;
941  }
942 }
943 
944 
945 void HDummyUse::PrintDataTo(StringStream* stream) {
946  value()->PrintNameTo(stream);
947 }
948 
949 
950 void HEnvironmentMarker::PrintDataTo(StringStream* stream) {
951  stream->Add("%s var[%d]", kind() == BIND ? "bind" : "lookup", index());
952 }
953 
954 
956  value()->PrintNameTo(stream);
957  stream->Add(" ");
958  stream->Add("#%d", argument_count());
959 }
960 
961 
962 void HCallJSFunction::PrintDataTo(StringStream* stream) {
963  function()->PrintNameTo(stream);
964  stream->Add(" ");
965  stream->Add("#%d", argument_count());
966 }
967 
968 
969 HCallJSFunction* HCallJSFunction::New(
970  Zone* zone,
971  HValue* context,
972  HValue* function,
973  int argument_count,
974  bool pass_argument_count) {
975  bool has_stack_check = false;
976  if (function->IsConstant()) {
977  HConstant* fun_const = HConstant::cast(function);
978  Handle<JSFunction> jsfun =
979  Handle<JSFunction>::cast(fun_const->handle(zone->isolate()));
980  has_stack_check = !jsfun.is_null() &&
981  (jsfun->code()->kind() == Code::FUNCTION ||
982  jsfun->code()->kind() == Code::OPTIMIZED_FUNCTION);
983  }
984 
985  return new(zone) HCallJSFunction(
986  function, argument_count, pass_argument_count,
987  has_stack_check);
988 }
989 
990 
991 
992 
994  first()->PrintNameTo(stream);
995  stream->Add(" ");
996  second()->PrintNameTo(stream);
997  stream->Add(" ");
998  stream->Add("#%d", argument_count());
999 }
1000 
1001 
1002 void HBoundsCheck::ApplyIndexChange() {
1003  if (skip_check()) return;
1004 
1005  DecompositionResult decomposition;
1006  bool index_is_decomposable = index()->TryDecompose(&decomposition);
1007  if (index_is_decomposable) {
1008  ASSERT(decomposition.base() == base());
1009  if (decomposition.offset() == offset() &&
1010  decomposition.scale() == scale()) return;
1011  } else {
1012  return;
1013  }
1014 
1015  ReplaceAllUsesWith(index());
1016 
1017  HValue* current_index = decomposition.base();
1018  int actual_offset = decomposition.offset() + offset();
1019  int actual_scale = decomposition.scale() + scale();
1020 
1021  Zone* zone = block()->graph()->zone();
1022  HValue* context = block()->graph()->GetInvalidContext();
1023  if (actual_offset != 0) {
1024  HConstant* add_offset = HConstant::New(zone, context, actual_offset);
1025  add_offset->InsertBefore(this);
1026  HInstruction* add = HAdd::New(zone, context,
1027  current_index, add_offset);
1028  add->InsertBefore(this);
1029  add->AssumeRepresentation(index()->representation());
1030  add->ClearFlag(kCanOverflow);
1031  current_index = add;
1032  }
1033 
1034  if (actual_scale != 0) {
1035  HConstant* sar_scale = HConstant::New(zone, context, actual_scale);
1036  sar_scale->InsertBefore(this);
1037  HInstruction* sar = HSar::New(zone, context,
1038  current_index, sar_scale);
1039  sar->InsertBefore(this);
1040  sar->AssumeRepresentation(index()->representation());
1041  current_index = sar;
1042  }
1043 
1044  SetOperandAt(0, current_index);
1045 
1046  base_ = NULL;
1047  offset_ = 0;
1048  scale_ = 0;
1049 }
1050 
1051 
1052 void HBoundsCheck::PrintDataTo(StringStream* stream) {
1053  index()->PrintNameTo(stream);
1054  stream->Add(" ");
1055  length()->PrintNameTo(stream);
1056  if (base() != NULL && (offset() != 0 || scale() != 0)) {
1057  stream->Add(" base: ((");
1058  if (base() != index()) {
1059  index()->PrintNameTo(stream);
1060  } else {
1061  stream->Add("index");
1062  }
1063  stream->Add(" + %d) >> %d)", offset(), scale());
1064  }
1065  if (skip_check()) {
1066  stream->Add(" [DISABLED]");
1067  }
1068 }
1069 
1070 
1071 void HBoundsCheck::InferRepresentation(HInferRepresentationPhase* h_infer) {
1072  ASSERT(CheckFlag(kFlexibleRepresentation));
1073  HValue* actual_index = index()->ActualValue();
1074  HValue* actual_length = length()->ActualValue();
1075  Representation index_rep = actual_index->representation();
1076  Representation length_rep = actual_length->representation();
1077  if (index_rep.IsTagged() && actual_index->type().IsSmi()) {
1078  index_rep = Representation::Smi();
1079  }
1080  if (length_rep.IsTagged() && actual_length->type().IsSmi()) {
1081  length_rep = Representation::Smi();
1082  }
1083  Representation r = index_rep.generalize(length_rep);
1084  if (r.is_more_general_than(Representation::Integer32())) {
1086  }
1087  UpdateRepresentation(r, h_infer, "boundscheck");
1088 }
1089 
1090 
1091 Range* HBoundsCheck::InferRange(Zone* zone) {
1092  Representation r = representation();
1093  if (r.IsSmiOrInteger32() && length()->HasRange()) {
1094  int upper = length()->range()->upper() - (allow_equality() ? 0 : 1);
1095  int lower = 0;
1096 
1097  Range* result = new(zone) Range(lower, upper);
1098  if (index()->HasRange()) {
1099  result->Intersect(index()->range());
1100  }
1101 
1102  // In case of Smi representation, clamp result to Smi::kMaxValue.
1103  if (r.IsSmi()) result->ClampToSmi();
1104  return result;
1105  }
1106  return HValue::InferRange(zone);
1107 }
1108 
1109 
1110 void HBoundsCheckBaseIndexInformation::PrintDataTo(StringStream* stream) {
1111  stream->Add("base: ");
1112  base_index()->PrintNameTo(stream);
1113  stream->Add(", check: ");
1114  base_index()->PrintNameTo(stream);
1115 }
1116 
1117 
1118 void HCallWithDescriptor::PrintDataTo(StringStream* stream) {
1119  for (int i = 0; i < OperandCount(); i++) {
1120  OperandAt(i)->PrintNameTo(stream);
1121  stream->Add(" ");
1122  }
1123  stream->Add("#%d", argument_count());
1124 }
1125 
1126 
1127 void HCallNewArray::PrintDataTo(StringStream* stream) {
1128  stream->Add(ElementsKindToString(elements_kind()));
1129  stream->Add(" ");
1130  HBinaryCall::PrintDataTo(stream);
1131 }
1132 
1133 
1134 void HCallRuntime::PrintDataTo(StringStream* stream) {
1135  stream->Add("%o ", *name());
1136  if (save_doubles() == kSaveFPRegs) {
1137  stream->Add("[save doubles] ");
1138  }
1139  stream->Add("#%d", argument_count());
1140 }
1141 
1142 
1143 void HClassOfTestAndBranch::PrintDataTo(StringStream* stream) {
1144  stream->Add("class_of_test(");
1145  value()->PrintNameTo(stream);
1146  stream->Add(", \"%o\")", *class_name());
1147 }
1148 
1149 
1150 void HWrapReceiver::PrintDataTo(StringStream* stream) {
1151  receiver()->PrintNameTo(stream);
1152  stream->Add(" ");
1153  function()->PrintNameTo(stream);
1154 }
1155 
1156 
1157 void HAccessArgumentsAt::PrintDataTo(StringStream* stream) {
1158  arguments()->PrintNameTo(stream);
1159  stream->Add("[");
1160  index()->PrintNameTo(stream);
1161  stream->Add("], length ");
1162  length()->PrintNameTo(stream);
1163 }
1164 
1165 
1167  stream->Add(" goto (");
1168  bool first_block = true;
1169  for (HSuccessorIterator it(this); !it.Done(); it.Advance()) {
1170  stream->Add(first_block ? "B%d" : ", B%d", it.Current()->block_id());
1171  first_block = false;
1172  }
1173  stream->Add(")");
1174 }
1175 
1176 
1178  value()->PrintNameTo(stream);
1180 }
1181 
1182 
1183 void HReturn::PrintDataTo(StringStream* stream) {
1184  value()->PrintNameTo(stream);
1185  stream->Add(" (pop ");
1186  parameter_count()->PrintNameTo(stream);
1187  stream->Add(" values)");
1188 }
1189 
1190 
1191 Representation HBranch::observed_input_representation(int index) {
1192  static const ToBooleanStub::Types tagged_types(
1197  if (expected_input_types_.ContainsAnyOf(tagged_types)) {
1198  return Representation::Tagged();
1199  }
1200  if (expected_input_types_.Contains(ToBooleanStub::UNDEFINED)) {
1201  if (expected_input_types_.Contains(ToBooleanStub::HEAP_NUMBER)) {
1202  return Representation::Double();
1203  }
1204  return Representation::Tagged();
1205  }
1206  if (expected_input_types_.Contains(ToBooleanStub::HEAP_NUMBER)) {
1207  return Representation::Double();
1208  }
1209  if (expected_input_types_.Contains(ToBooleanStub::SMI)) {
1210  return Representation::Smi();
1211  }
1212  return Representation::None();
1213 }
1214 
1215 
1216 bool HBranch::KnownSuccessorBlock(HBasicBlock** block) {
1217  HValue* value = this->value();
1218  if (value->EmitAtUses()) {
1219  ASSERT(value->IsConstant());
1220  ASSERT(!value->representation().IsDouble());
1221  *block = HConstant::cast(value)->BooleanValue()
1222  ? FirstSuccessor()
1223  : SecondSuccessor();
1224  return true;
1225  }
1226  *block = NULL;
1227  return false;
1228 }
1229 
1230 
1231 void HCompareMap::PrintDataTo(StringStream* stream) {
1232  value()->PrintNameTo(stream);
1233  stream->Add(" (%p)", *map().handle());
1235  if (known_successor_index() == 0) {
1236  stream->Add(" [true]");
1237  } else if (known_successor_index() == 1) {
1238  stream->Add(" [false]");
1239  }
1240 }
1241 
1242 
1243 const char* HUnaryMathOperation::OpName() const {
1244  switch (op()) {
1245  case kMathFloor: return "floor";
1246  case kMathRound: return "round";
1247  case kMathAbs: return "abs";
1248  case kMathLog: return "log";
1249  case kMathExp: return "exp";
1250  case kMathSqrt: return "sqrt";
1251  case kMathPowHalf: return "pow-half";
1252  case kMathClz32: return "clz32";
1253  default:
1254  UNREACHABLE();
1255  return NULL;
1256  }
1257 }
1258 
1259 
1260 Range* HUnaryMathOperation::InferRange(Zone* zone) {
1261  Representation r = representation();
1262  if (op() == kMathClz32) return new(zone) Range(0, 32);
1263  if (r.IsSmiOrInteger32() && value()->HasRange()) {
1264  if (op() == kMathAbs) {
1265  int upper = value()->range()->upper();
1266  int lower = value()->range()->lower();
1267  bool spans_zero = value()->range()->CanBeZero();
1268  // Math.abs(kMinInt) overflows its representation, on which the
1269  // instruction deopts. Hence clamp it to kMaxInt.
1270  int abs_upper = upper == kMinInt ? kMaxInt : abs(upper);
1271  int abs_lower = lower == kMinInt ? kMaxInt : abs(lower);
1272  Range* result =
1273  new(zone) Range(spans_zero ? 0 : Min(abs_lower, abs_upper),
1274  Max(abs_lower, abs_upper));
1275  // In case of Smi representation, clamp Math.abs(Smi::kMinValue) to
1276  // Smi::kMaxValue.
1277  if (r.IsSmi()) result->ClampToSmi();
1278  return result;
1279  }
1280  }
1281  return HValue::InferRange(zone);
1282 }
1283 
1284 
1285 void HUnaryMathOperation::PrintDataTo(StringStream* stream) {
1286  const char* name = OpName();
1287  stream->Add("%s ", name);
1288  value()->PrintNameTo(stream);
1289 }
1290 
1291 
1293  value()->PrintNameTo(stream);
1294 }
1295 
1296 
1297 void HHasInstanceTypeAndBranch::PrintDataTo(StringStream* stream) {
1298  value()->PrintNameTo(stream);
1299  switch (from_) {
1301  if (to_ == LAST_TYPE) stream->Add(" spec_object");
1302  break;
1303  case JS_REGEXP_TYPE:
1304  if (to_ == JS_REGEXP_TYPE) stream->Add(" reg_exp");
1305  break;
1306  case JS_ARRAY_TYPE:
1307  if (to_ == JS_ARRAY_TYPE) stream->Add(" array");
1308  break;
1309  case JS_FUNCTION_TYPE:
1310  if (to_ == JS_FUNCTION_TYPE) stream->Add(" function");
1311  break;
1312  default:
1313  break;
1314  }
1315 }
1316 
1317 
1318 void HTypeofIsAndBranch::PrintDataTo(StringStream* stream) {
1319  value()->PrintNameTo(stream);
1320  stream->Add(" == %o", *type_literal_.handle());
1322 }
1323 
1324 
1325 static String* TypeOfString(HConstant* constant, Isolate* isolate) {
1326  Heap* heap = isolate->heap();
1327  if (constant->HasNumberValue()) return heap->number_string();
1328  if (constant->IsUndetectable()) return heap->undefined_string();
1329  if (constant->HasStringValue()) return heap->string_string();
1330  switch (constant->GetInstanceType()) {
1331  case ODDBALL_TYPE: {
1332  Unique<Object> unique = constant->GetUnique();
1333  if (unique.IsKnownGlobal(heap->true_value()) ||
1334  unique.IsKnownGlobal(heap->false_value())) {
1335  return heap->boolean_string();
1336  }
1337  if (unique.IsKnownGlobal(heap->null_value())) {
1338  return FLAG_harmony_typeof ? heap->null_string()
1339  : heap->object_string();
1340  }
1341  ASSERT(unique.IsKnownGlobal(heap->undefined_value()));
1342  return heap->undefined_string();
1343  }
1344  case SYMBOL_TYPE:
1345  return heap->symbol_string();
1346  case JS_FUNCTION_TYPE:
1348  return heap->function_string();
1349  default:
1350  return heap->object_string();
1351  }
1352 }
1353 
1354 
1355 bool HTypeofIsAndBranch::KnownSuccessorBlock(HBasicBlock** block) {
1356  if (FLAG_fold_constants && value()->IsConstant()) {
1357  HConstant* constant = HConstant::cast(value());
1358  String* type_string = TypeOfString(constant, isolate());
1359  bool same_type = type_literal_.IsKnownGlobal(type_string);
1360  *block = same_type ? FirstSuccessor() : SecondSuccessor();
1361  return true;
1362  } else if (value()->representation().IsSpecialization()) {
1363  bool number_type =
1364  type_literal_.IsKnownGlobal(isolate()->heap()->number_string());
1365  *block = number_type ? FirstSuccessor() : SecondSuccessor();
1366  return true;
1367  }
1368  *block = NULL;
1369  return false;
1370 }
1371 
1372 
1373 void HCheckMapValue::PrintDataTo(StringStream* stream) {
1374  value()->PrintNameTo(stream);
1375  stream->Add(" ");
1376  map()->PrintNameTo(stream);
1377 }
1378 
1379 
1380 void HForInPrepareMap::PrintDataTo(StringStream* stream) {
1381  enumerable()->PrintNameTo(stream);
1382 }
1383 
1384 
1385 void HForInCacheArray::PrintDataTo(StringStream* stream) {
1386  enumerable()->PrintNameTo(stream);
1387  stream->Add(" ");
1388  map()->PrintNameTo(stream);
1389  stream->Add("[%d]", idx_);
1390 }
1391 
1392 
1393 void HLoadFieldByIndex::PrintDataTo(StringStream* stream) {
1394  object()->PrintNameTo(stream);
1395  stream->Add(" ");
1396  index()->PrintNameTo(stream);
1397 }
1398 
1399 
1400 static bool MatchLeftIsOnes(HValue* l, HValue* r, HValue** negated) {
1401  if (!l->EqualsInteger32Constant(~0)) return false;
1402  *negated = r;
1403  return true;
1404 }
1405 
1406 
1407 static bool MatchNegationViaXor(HValue* instr, HValue** negated) {
1408  if (!instr->IsBitwise()) return false;
1409  HBitwise* b = HBitwise::cast(instr);
1410  return (b->op() == Token::BIT_XOR) &&
1411  (MatchLeftIsOnes(b->left(), b->right(), negated) ||
1412  MatchLeftIsOnes(b->right(), b->left(), negated));
1413 }
1414 
1415 
1416 static bool MatchDoubleNegation(HValue* instr, HValue** arg) {
1417  HValue* negated;
1418  return MatchNegationViaXor(instr, &negated) &&
1419  MatchNegationViaXor(negated, arg);
1420 }
1421 
1422 
1423 HValue* HBitwise::Canonicalize() {
1424  if (!representation().IsSmiOrInteger32()) return this;
1425  // If x is an int32, then x & -1 == x, x | 0 == x and x ^ 0 == x.
1426  int32_t nop_constant = (op() == Token::BIT_AND) ? -1 : 0;
1427  if (left()->EqualsInteger32Constant(nop_constant) &&
1428  !right()->CheckFlag(kUint32)) {
1429  return right();
1430  }
1431  if (right()->EqualsInteger32Constant(nop_constant) &&
1432  !left()->CheckFlag(kUint32)) {
1433  return left();
1434  }
1435  // Optimize double negation, a common pattern used for ToInt32(x).
1436  HValue* arg;
1437  if (MatchDoubleNegation(this, &arg) && !arg->CheckFlag(kUint32)) {
1438  return arg;
1439  }
1440  return this;
1441 }
1442 
1443 
1444 Representation HAdd::RepresentationFromInputs() {
1445  Representation left_rep = left()->representation();
1446  if (left_rep.IsExternal()) {
1447  return Representation::External();
1448  }
1450 }
1451 
1452 
1453 Representation HAdd::RequiredInputRepresentation(int index) {
1454  if (index == 2) {
1455  Representation left_rep = left()->representation();
1456  if (left_rep.IsExternal()) {
1457  return Representation::Integer32();
1458  }
1459  }
1461 }
1462 
1463 
1464 static bool IsIdentityOperation(HValue* arg1, HValue* arg2, int32_t identity) {
1465  return arg1->representation().IsSpecialization() &&
1466  arg2->EqualsInteger32Constant(identity);
1467 }
1468 
1469 
1470 HValue* HAdd::Canonicalize() {
1471  // Adding 0 is an identity operation except in case of -0: -0 + 0 = +0
1472  if (IsIdentityOperation(left(), right(), 0) &&
1473  !left()->representation().IsDouble()) { // Left could be -0.
1474  return left();
1475  }
1476  if (IsIdentityOperation(right(), left(), 0) &&
1477  !left()->representation().IsDouble()) { // Right could be -0.
1478  return right();
1479  }
1480  return this;
1481 }
1482 
1483 
1484 HValue* HSub::Canonicalize() {
1485  if (IsIdentityOperation(left(), right(), 0)) return left();
1486  return this;
1487 }
1488 
1489 
1490 HValue* HMul::Canonicalize() {
1491  if (IsIdentityOperation(left(), right(), 1)) return left();
1492  if (IsIdentityOperation(right(), left(), 1)) return right();
1493  return this;
1494 }
1495 
1496 
1497 bool HMul::MulMinusOne() {
1498  if (left()->EqualsInteger32Constant(-1) ||
1499  right()->EqualsInteger32Constant(-1)) {
1500  return true;
1501  }
1502 
1503  return false;
1504 }
1505 
1506 
1507 HValue* HMod::Canonicalize() {
1508  return this;
1509 }
1510 
1511 
1512 HValue* HDiv::Canonicalize() {
1513  if (IsIdentityOperation(left(), right(), 1)) return left();
1514  return this;
1515 }
1516 
1517 
1518 HValue* HChange::Canonicalize() {
1519  return (from().Equals(to())) ? value() : this;
1520 }
1521 
1522 
1523 HValue* HWrapReceiver::Canonicalize() {
1524  if (HasNoUses()) return NULL;
1525  if (receiver()->type().IsJSObject()) {
1526  return receiver();
1527  }
1528  return this;
1529 }
1530 
1531 
1532 void HTypeof::PrintDataTo(StringStream* stream) {
1533  value()->PrintNameTo(stream);
1534 }
1535 
1536 
1537 HInstruction* HForceRepresentation::New(Zone* zone, HValue* context,
1538  HValue* value, Representation representation) {
1539  if (FLAG_fold_constants && value->IsConstant()) {
1540  HConstant* c = HConstant::cast(value);
1541  if (c->HasNumberValue()) {
1542  double double_res = c->DoubleValue();
1543  if (representation.CanContainDouble(double_res)) {
1544  return HConstant::New(zone, context,
1545  static_cast<int32_t>(double_res),
1546  representation);
1547  }
1548  }
1549  }
1550  return new(zone) HForceRepresentation(value, representation);
1551 }
1552 
1553 
1554 void HForceRepresentation::PrintDataTo(StringStream* stream) {
1555  stream->Add("%s ", representation().Mnemonic());
1556  value()->PrintNameTo(stream);
1557 }
1558 
1559 
1560 void HChange::PrintDataTo(StringStream* stream) {
1562  stream->Add(" %s to %s", from().Mnemonic(), to().Mnemonic());
1563 
1564  if (CanTruncateToInt32()) stream->Add(" truncating-int32");
1565  if (CheckFlag(kBailoutOnMinusZero)) stream->Add(" -0?");
1566  if (CheckFlag(kAllowUndefinedAsNaN)) stream->Add(" allow-undefined-as-nan");
1567 }
1568 
1569 
1571  if (op() == kMathRound || op() == kMathFloor) {
1572  HValue* val = value();
1573  if (val->IsChange()) val = HChange::cast(val)->value();
1574  if (val->representation().IsSmiOrInteger32()) {
1575  if (val->representation().Equals(representation())) return val;
1576  return Prepend(new(block()->zone()) HChange(
1577  val, representation(), false, false));
1578  }
1579  }
1580  if (op() == kMathFloor && value()->IsDiv() && value()->UseCount() == 1) {
1581  HDiv* hdiv = HDiv::cast(value());
1582 
1583  HValue* left = hdiv->left();
1584  if (left->representation().IsInteger32()) {
1585  // A value with an integer representation does not need to be transformed.
1586  } else if (left->IsChange() && HChange::cast(left)->from().IsInteger32()) {
1587  // A change from an integer32 can be replaced by the integer32 value.
1588  left = HChange::cast(left)->value();
1589  } else if (hdiv->observed_input_representation(1).IsSmiOrInteger32()) {
1590  left = Prepend(new(block()->zone()) HChange(
1591  left, Representation::Integer32(), false, false));
1592  } else {
1593  return this;
1594  }
1595 
1596  HValue* right = hdiv->right();
1597  if (right->IsInteger32Constant()) {
1598  right = Prepend(HConstant::cast(right)->CopyToRepresentation(
1599  Representation::Integer32(), right->block()->zone()));
1600  } else if (right->representation().IsInteger32()) {
1601  // A value with an integer representation does not need to be transformed.
1602  } else if (right->IsChange() &&
1603  HChange::cast(right)->from().IsInteger32()) {
1604  // A change from an integer32 can be replaced by the integer32 value.
1605  right = HChange::cast(right)->value();
1606  } else if (hdiv->observed_input_representation(2).IsSmiOrInteger32()) {
1607  right = Prepend(new(block()->zone()) HChange(
1608  right, Representation::Integer32(), false, false));
1609  } else {
1610  return this;
1611  }
1612 
1613  return Prepend(HMathFloorOfDiv::New(
1614  block()->zone(), context(), left, right));
1615  }
1616  return this;
1617 }
1618 
1619 
1621  if (check_ == IS_STRING && value()->type().IsString()) {
1622  return value();
1623  }
1624 
1625  if (check_ == IS_INTERNALIZED_STRING && value()->IsConstant()) {
1626  if (HConstant::cast(value())->HasInternalizedStringValue()) {
1627  return value();
1628  }
1629  }
1630  return this;
1631 }
1632 
1633 
1634 void HCheckInstanceType::GetCheckInterval(InstanceType* first,
1635  InstanceType* last) {
1636  ASSERT(is_interval_check());
1637  switch (check_) {
1638  case IS_SPEC_OBJECT:
1639  *first = FIRST_SPEC_OBJECT_TYPE;
1640  *last = LAST_SPEC_OBJECT_TYPE;
1641  return;
1642  case IS_JS_ARRAY:
1643  *first = *last = JS_ARRAY_TYPE;
1644  return;
1645  default:
1646  UNREACHABLE();
1647  }
1648 }
1649 
1650 
1651 void HCheckInstanceType::GetCheckMaskAndTag(uint8_t* mask, uint8_t* tag) {
1652  ASSERT(!is_interval_check());
1653  switch (check_) {
1654  case IS_STRING:
1655  *mask = kIsNotStringMask;
1656  *tag = kStringTag;
1657  return;
1658  case IS_INTERNALIZED_STRING:
1660  *tag = kInternalizedTag;
1661  return;
1662  default:
1663  UNREACHABLE();
1664  }
1665 }
1666 
1667 
1668 bool HCheckMaps::HandleSideEffectDominator(GVNFlag side_effect,
1669  HValue* dominator) {
1670  ASSERT(side_effect == kMaps);
1671  // TODO(mstarzinger): For now we specialize on HStoreNamedField, but once
1672  // type information is rich enough we should generalize this to any HType
1673  // for which the map is known.
1674  if (HasNoUses() && dominator->IsStoreNamedField()) {
1675  HStoreNamedField* store = HStoreNamedField::cast(dominator);
1676  if (!store->has_transition() || store->object() != value()) return false;
1677  HConstant* transition = HConstant::cast(store->transition());
1678  if (map_set_.Contains(Unique<Map>::cast(transition->GetUnique()))) {
1679  DeleteAndReplaceWith(NULL);
1680  return true;
1681  }
1682  }
1683  return false;
1684 }
1685 
1686 
1687 void HCheckMaps::PrintDataTo(StringStream* stream) {
1688  value()->PrintNameTo(stream);
1689  stream->Add(" [%p", *map_set_.at(0).handle());
1690  for (int i = 1; i < map_set_.size(); ++i) {
1691  stream->Add(",%p", *map_set_.at(i).handle());
1692  }
1693  stream->Add("]%s", CanOmitMapChecks() ? "(omitted)" : "");
1694 }
1695 
1696 
1697 void HCheckValue::PrintDataTo(StringStream* stream) {
1698  value()->PrintNameTo(stream);
1699  stream->Add(" ");
1700  object().handle()->ShortPrint(stream);
1701 }
1702 
1703 
1704 HValue* HCheckValue::Canonicalize() {
1705  return (value()->IsConstant() &&
1706  HConstant::cast(value())->EqualsUnique(object_)) ? NULL : this;
1707 }
1708 
1709 
1710 const char* HCheckInstanceType::GetCheckName() {
1711  switch (check_) {
1712  case IS_SPEC_OBJECT: return "object";
1713  case IS_JS_ARRAY: return "array";
1714  case IS_STRING: return "string";
1715  case IS_INTERNALIZED_STRING: return "internalized_string";
1716  }
1717  UNREACHABLE();
1718  return "";
1719 }
1720 
1721 
1722 void HCheckInstanceType::PrintDataTo(StringStream* stream) {
1723  stream->Add("%s ", GetCheckName());
1725 }
1726 
1727 
1728 void HCallStub::PrintDataTo(StringStream* stream) {
1729  stream->Add("%s ",
1730  CodeStub::MajorName(major_key_, false));
1731  HUnaryCall::PrintDataTo(stream);
1732 }
1733 
1734 
1735 void HUnknownOSRValue::PrintDataTo(StringStream *stream) {
1736  const char* type = "expression";
1737  if (environment_->is_local_index(index_)) type = "local";
1738  if (environment_->is_special_index(index_)) type = "special";
1739  if (environment_->is_parameter_index(index_)) type = "parameter";
1740  stream->Add("%s @ %d", type, index_);
1741 }
1742 
1743 
1744 void HInstanceOf::PrintDataTo(StringStream* stream) {
1745  left()->PrintNameTo(stream);
1746  stream->Add(" ");
1747  right()->PrintNameTo(stream);
1748  stream->Add(" ");
1749  context()->PrintNameTo(stream);
1750 }
1751 
1752 
1753 Range* HValue::InferRange(Zone* zone) {
1754  Range* result;
1755  if (representation().IsSmi() || type().IsSmi()) {
1756  result = new(zone) Range(Smi::kMinValue, Smi::kMaxValue);
1757  result->set_can_be_minus_zero(false);
1758  } else {
1759  result = new(zone) Range();
1760  result->set_can_be_minus_zero(!CheckFlag(kAllUsesTruncatingToInt32));
1761  // TODO(jkummerow): The range cannot be minus zero when the upper type
1762  // bound is Integer32.
1763  }
1764  return result;
1765 }
1766 
1767 
1768 Range* HChange::InferRange(Zone* zone) {
1769  Range* input_range = value()->range();
1770  if (from().IsInteger32() && !value()->CheckFlag(HInstruction::kUint32) &&
1771  (to().IsSmi() ||
1772  (to().IsTagged() &&
1773  input_range != NULL &&
1774  input_range->IsInSmiRange()))) {
1775  set_type(HType::Smi());
1776  ClearChangesFlag(kNewSpacePromotion);
1777  }
1778  if (to().IsSmiOrTagged() &&
1779  input_range != NULL &&
1780  input_range->IsInSmiRange() &&
1781  (!SmiValuesAre32Bits() ||
1782  !value()->CheckFlag(HValue::kUint32) ||
1783  input_range->upper() != kMaxInt)) {
1784  // The Range class can't express upper bounds in the (kMaxInt, kMaxUint32]
1785  // interval, so we treat kMaxInt as a sentinel for this entire interval.
1786  ClearFlag(kCanOverflow);
1787  }
1788  Range* result = (input_range != NULL)
1789  ? input_range->Copy(zone)
1790  : HValue::InferRange(zone);
1791  result->set_can_be_minus_zero(!to().IsSmiOrInteger32() ||
1792  !(CheckFlag(kAllUsesTruncatingToInt32) ||
1793  CheckFlag(kAllUsesTruncatingToSmi)));
1794  if (to().IsSmi()) result->ClampToSmi();
1795  return result;
1796 }
1797 
1798 
1799 Range* HConstant::InferRange(Zone* zone) {
1800  if (has_int32_value_) {
1801  Range* result = new(zone) Range(int32_value_, int32_value_);
1802  result->set_can_be_minus_zero(false);
1803  return result;
1804  }
1805  return HValue::InferRange(zone);
1806 }
1807 
1808 
1809 HSourcePosition HPhi::position() const {
1810  return block()->first()->position();
1811 }
1812 
1813 
1814 Range* HPhi::InferRange(Zone* zone) {
1815  Representation r = representation();
1816  if (r.IsSmiOrInteger32()) {
1817  if (block()->IsLoopHeader()) {
1818  Range* range = r.IsSmi()
1819  ? new(zone) Range(Smi::kMinValue, Smi::kMaxValue)
1820  : new(zone) Range(kMinInt, kMaxInt);
1821  return range;
1822  } else {
1823  Range* range = OperandAt(0)->range()->Copy(zone);
1824  for (int i = 1; i < OperandCount(); ++i) {
1825  range->Union(OperandAt(i)->range());
1826  }
1827  return range;
1828  }
1829  } else {
1830  return HValue::InferRange(zone);
1831  }
1832 }
1833 
1834 
1835 Range* HAdd::InferRange(Zone* zone) {
1836  Representation r = representation();
1837  if (r.IsSmiOrInteger32()) {
1838  Range* a = left()->range();
1839  Range* b = right()->range();
1840  Range* res = a->Copy(zone);
1841  if (!res->AddAndCheckOverflow(r, b) ||
1842  (r.IsInteger32() && CheckFlag(kAllUsesTruncatingToInt32)) ||
1843  (r.IsSmi() && CheckFlag(kAllUsesTruncatingToSmi))) {
1844  ClearFlag(kCanOverflow);
1845  }
1846  res->set_can_be_minus_zero(!CheckFlag(kAllUsesTruncatingToSmi) &&
1847  !CheckFlag(kAllUsesTruncatingToInt32) &&
1848  a->CanBeMinusZero() && b->CanBeMinusZero());
1849  return res;
1850  } else {
1851  return HValue::InferRange(zone);
1852  }
1853 }
1854 
1855 
1856 Range* HSub::InferRange(Zone* zone) {
1857  Representation r = representation();
1858  if (r.IsSmiOrInteger32()) {
1859  Range* a = left()->range();
1860  Range* b = right()->range();
1861  Range* res = a->Copy(zone);
1862  if (!res->SubAndCheckOverflow(r, b) ||
1863  (r.IsInteger32() && CheckFlag(kAllUsesTruncatingToInt32)) ||
1864  (r.IsSmi() && CheckFlag(kAllUsesTruncatingToSmi))) {
1865  ClearFlag(kCanOverflow);
1866  }
1867  res->set_can_be_minus_zero(!CheckFlag(kAllUsesTruncatingToSmi) &&
1868  !CheckFlag(kAllUsesTruncatingToInt32) &&
1869  a->CanBeMinusZero() && b->CanBeZero());
1870  return res;
1871  } else {
1872  return HValue::InferRange(zone);
1873  }
1874 }
1875 
1876 
1877 Range* HMul::InferRange(Zone* zone) {
1878  Representation r = representation();
1879  if (r.IsSmiOrInteger32()) {
1880  Range* a = left()->range();
1881  Range* b = right()->range();
1882  Range* res = a->Copy(zone);
1883  if (!res->MulAndCheckOverflow(r, b) ||
1884  (((r.IsInteger32() && CheckFlag(kAllUsesTruncatingToInt32)) ||
1885  (r.IsSmi() && CheckFlag(kAllUsesTruncatingToSmi))) &&
1886  MulMinusOne())) {
1887  // Truncated int multiplication is too precise and therefore not the
1888  // same as converting to Double and back.
1889  // Handle truncated integer multiplication by -1 special.
1890  ClearFlag(kCanOverflow);
1891  }
1892  res->set_can_be_minus_zero(!CheckFlag(kAllUsesTruncatingToSmi) &&
1893  !CheckFlag(kAllUsesTruncatingToInt32) &&
1894  ((a->CanBeZero() && b->CanBeNegative()) ||
1895  (a->CanBeNegative() && b->CanBeZero())));
1896  return res;
1897  } else {
1898  return HValue::InferRange(zone);
1899  }
1900 }
1901 
1902 
1903 Range* HDiv::InferRange(Zone* zone) {
1904  if (representation().IsInteger32()) {
1905  Range* a = left()->range();
1906  Range* b = right()->range();
1907  Range* result = new(zone) Range();
1908  result->set_can_be_minus_zero(!CheckFlag(kAllUsesTruncatingToInt32) &&
1909  (a->CanBeMinusZero() ||
1910  (a->CanBeZero() && b->CanBeNegative())));
1911  if (!a->Includes(kMinInt) || !b->Includes(-1)) {
1912  ClearFlag(kCanOverflow);
1913  }
1914 
1915  if (!b->CanBeZero()) {
1916  ClearFlag(kCanBeDivByZero);
1917  }
1918  return result;
1919  } else {
1920  return HValue::InferRange(zone);
1921  }
1922 }
1923 
1924 
1925 Range* HMathFloorOfDiv::InferRange(Zone* zone) {
1926  if (representation().IsInteger32()) {
1927  Range* a = left()->range();
1928  Range* b = right()->range();
1929  Range* result = new(zone) Range();
1930  result->set_can_be_minus_zero(!CheckFlag(kAllUsesTruncatingToInt32) &&
1931  (a->CanBeMinusZero() ||
1932  (a->CanBeZero() && b->CanBeNegative())));
1933  if (!a->Includes(kMinInt)) {
1934  ClearFlag(kLeftCanBeMinInt);
1935  }
1936 
1937  if (!a->CanBeNegative()) {
1938  ClearFlag(HValue::kLeftCanBeNegative);
1939  }
1940 
1941  if (!a->CanBePositive()) {
1942  ClearFlag(HValue::kLeftCanBePositive);
1943  }
1944 
1945  if (!a->Includes(kMinInt) || !b->Includes(-1)) {
1946  ClearFlag(kCanOverflow);
1947  }
1948 
1949  if (!b->CanBeZero()) {
1950  ClearFlag(kCanBeDivByZero);
1951  }
1952  return result;
1953  } else {
1954  return HValue::InferRange(zone);
1955  }
1956 }
1957 
1958 
1959 Range* HMod::InferRange(Zone* zone) {
1960  if (representation().IsInteger32()) {
1961  Range* a = left()->range();
1962  Range* b = right()->range();
1963 
1964  // The magnitude of the modulus is bounded by the right operand. Note that
1965  // apart for the cases involving kMinInt, the calculation below is the same
1966  // as Max(Abs(b->lower()), Abs(b->upper())) - 1.
1967  int32_t positive_bound = -(Min(NegAbs(b->lower()), NegAbs(b->upper())) + 1);
1968 
1969  // The result of the modulo operation has the sign of its left operand.
1970  bool left_can_be_negative = a->CanBeMinusZero() || a->CanBeNegative();
1971  Range* result = new(zone) Range(left_can_be_negative ? -positive_bound : 0,
1972  a->CanBePositive() ? positive_bound : 0);
1973 
1974  result->set_can_be_minus_zero(!CheckFlag(kAllUsesTruncatingToInt32) &&
1975  left_can_be_negative);
1976 
1977  if (!a->CanBeNegative()) {
1978  ClearFlag(HValue::kLeftCanBeNegative);
1979  }
1980 
1981  if (!a->Includes(kMinInt) || !b->Includes(-1)) {
1982  ClearFlag(HValue::kCanOverflow);
1983  }
1984 
1985  if (!b->CanBeZero()) {
1986  ClearFlag(HValue::kCanBeDivByZero);
1987  }
1988  return result;
1989  } else {
1990  return HValue::InferRange(zone);
1991  }
1992 }
1993 
1994 
1995 InductionVariableData* InductionVariableData::ExaminePhi(HPhi* phi) {
1996  if (phi->block()->loop_information() == NULL) return NULL;
1997  if (phi->OperandCount() != 2) return NULL;
1998  int32_t candidate_increment;
1999 
2000  candidate_increment = ComputeIncrement(phi, phi->OperandAt(0));
2001  if (candidate_increment != 0) {
2002  return new(phi->block()->graph()->zone())
2003  InductionVariableData(phi, phi->OperandAt(1), candidate_increment);
2004  }
2005 
2006  candidate_increment = ComputeIncrement(phi, phi->OperandAt(1));
2007  if (candidate_increment != 0) {
2008  return new(phi->block()->graph()->zone())
2009  InductionVariableData(phi, phi->OperandAt(0), candidate_increment);
2010  }
2011 
2012  return NULL;
2013 }
2014 
2015 
2016 /*
2017  * This function tries to match the following patterns (and all the relevant
2018  * variants related to |, & and + being commutative):
2019  * base | constant_or_mask
2020  * base & constant_and_mask
2021  * (base + constant_offset) & constant_and_mask
2022  * (base - constant_offset) & constant_and_mask
2023  */
2024 void InductionVariableData::DecomposeBitwise(
2025  HValue* value,
2026  BitwiseDecompositionResult* result) {
2027  HValue* base = IgnoreOsrValue(value);
2028  result->base = value;
2029 
2030  if (!base->representation().IsInteger32()) return;
2031 
2032  if (base->IsBitwise()) {
2033  bool allow_offset = false;
2034  int32_t mask = 0;
2035 
2036  HBitwise* bitwise = HBitwise::cast(base);
2037  if (bitwise->right()->IsInteger32Constant()) {
2038  mask = bitwise->right()->GetInteger32Constant();
2039  base = bitwise->left();
2040  } else if (bitwise->left()->IsInteger32Constant()) {
2041  mask = bitwise->left()->GetInteger32Constant();
2042  base = bitwise->right();
2043  } else {
2044  return;
2045  }
2046  if (bitwise->op() == Token::BIT_AND) {
2047  result->and_mask = mask;
2048  allow_offset = true;
2049  } else if (bitwise->op() == Token::BIT_OR) {
2050  result->or_mask = mask;
2051  } else {
2052  return;
2053  }
2054 
2055  result->context = bitwise->context();
2056 
2057  if (allow_offset) {
2058  if (base->IsAdd()) {
2059  HAdd* add = HAdd::cast(base);
2060  if (add->right()->IsInteger32Constant()) {
2061  base = add->left();
2062  } else if (add->left()->IsInteger32Constant()) {
2063  base = add->right();
2064  }
2065  } else if (base->IsSub()) {
2066  HSub* sub = HSub::cast(base);
2067  if (sub->right()->IsInteger32Constant()) {
2068  base = sub->left();
2069  }
2070  }
2071  }
2072 
2073  result->base = base;
2074  }
2075 }
2076 
2077 
2078 void InductionVariableData::AddCheck(HBoundsCheck* check,
2079  int32_t upper_limit) {
2080  ASSERT(limit_validity() != NULL);
2081  if (limit_validity() != check->block() &&
2082  !limit_validity()->Dominates(check->block())) return;
2083  if (!phi()->block()->current_loop()->IsNestedInThisLoop(
2084  check->block()->current_loop())) return;
2085 
2086  ChecksRelatedToLength* length_checks = checks();
2087  while (length_checks != NULL) {
2088  if (length_checks->length() == check->length()) break;
2089  length_checks = length_checks->next();
2090  }
2091  if (length_checks == NULL) {
2092  length_checks = new(check->block()->zone())
2093  ChecksRelatedToLength(check->length(), checks());
2094  checks_ = length_checks;
2095  }
2096 
2097  length_checks->AddCheck(check, upper_limit);
2098 }
2099 
2100 
2101 void InductionVariableData::ChecksRelatedToLength::CloseCurrentBlock() {
2102  if (checks() != NULL) {
2103  InductionVariableCheck* c = checks();
2104  HBasicBlock* current_block = c->check()->block();
2105  while (c != NULL && c->check()->block() == current_block) {
2106  c->set_upper_limit(current_upper_limit_);
2107  c = c->next();
2108  }
2109  }
2110 }
2111 
2112 
2113 void InductionVariableData::ChecksRelatedToLength::UseNewIndexInCurrentBlock(
2114  Token::Value token,
2115  int32_t mask,
2116  HValue* index_base,
2117  HValue* context) {
2118  ASSERT(first_check_in_block() != NULL);
2119  HValue* previous_index = first_check_in_block()->index();
2120  ASSERT(context != NULL);
2121 
2122  Zone* zone = index_base->block()->graph()->zone();
2123  set_added_constant(HConstant::New(zone, context, mask));
2124  if (added_index() != NULL) {
2125  added_constant()->InsertBefore(added_index());
2126  } else {
2127  added_constant()->InsertBefore(first_check_in_block());
2128  }
2129 
2130  if (added_index() == NULL) {
2131  first_check_in_block()->ReplaceAllUsesWith(first_check_in_block()->index());
2132  HInstruction* new_index = HBitwise::New(zone, context, token, index_base,
2133  added_constant());
2134  ASSERT(new_index->IsBitwise());
2135  new_index->ClearAllSideEffects();
2136  new_index->AssumeRepresentation(Representation::Integer32());
2137  set_added_index(HBitwise::cast(new_index));
2138  added_index()->InsertBefore(first_check_in_block());
2139  }
2140  ASSERT(added_index()->op() == token);
2141 
2142  added_index()->SetOperandAt(1, index_base);
2143  added_index()->SetOperandAt(2, added_constant());
2144  first_check_in_block()->SetOperandAt(0, added_index());
2145  if (previous_index->UseCount() == 0) {
2146  previous_index->DeleteAndReplaceWith(NULL);
2147  }
2148 }
2149 
2150 void InductionVariableData::ChecksRelatedToLength::AddCheck(
2151  HBoundsCheck* check,
2152  int32_t upper_limit) {
2153  BitwiseDecompositionResult decomposition;
2154  InductionVariableData::DecomposeBitwise(check->index(), &decomposition);
2155 
2156  if (first_check_in_block() == NULL ||
2157  first_check_in_block()->block() != check->block()) {
2158  CloseCurrentBlock();
2159 
2160  first_check_in_block_ = check;
2161  set_added_index(NULL);
2162  set_added_constant(NULL);
2163  current_and_mask_in_block_ = decomposition.and_mask;
2164  current_or_mask_in_block_ = decomposition.or_mask;
2165  current_upper_limit_ = upper_limit;
2166 
2167  InductionVariableCheck* new_check = new(check->block()->graph()->zone())
2168  InductionVariableCheck(check, checks_, upper_limit);
2169  checks_ = new_check;
2170  return;
2171  }
2172 
2173  if (upper_limit > current_upper_limit()) {
2174  current_upper_limit_ = upper_limit;
2175  }
2176 
2177  if (decomposition.and_mask != 0 &&
2178  current_or_mask_in_block() == 0) {
2179  if (current_and_mask_in_block() == 0 ||
2180  decomposition.and_mask > current_and_mask_in_block()) {
2181  UseNewIndexInCurrentBlock(Token::BIT_AND,
2182  decomposition.and_mask,
2183  decomposition.base,
2184  decomposition.context);
2185  current_and_mask_in_block_ = decomposition.and_mask;
2186  }
2187  check->set_skip_check();
2188  }
2189  if (current_and_mask_in_block() == 0) {
2190  if (decomposition.or_mask > current_or_mask_in_block()) {
2191  UseNewIndexInCurrentBlock(Token::BIT_OR,
2192  decomposition.or_mask,
2193  decomposition.base,
2194  decomposition.context);
2195  current_or_mask_in_block_ = decomposition.or_mask;
2196  }
2197  check->set_skip_check();
2198  }
2199 
2200  if (!check->skip_check()) {
2201  InductionVariableCheck* new_check = new(check->block()->graph()->zone())
2202  InductionVariableCheck(check, checks_, upper_limit);
2203  checks_ = new_check;
2204  }
2205 }
2206 
2207 
2208 /*
2209  * This method detects if phi is an induction variable, with phi_operand as
2210  * its "incremented" value (the other operand would be the "base" value).
2211  *
2212  * It cheks is phi_operand has the form "phi + constant".
2213  * If yes, the constant is the increment that the induction variable gets at
2214  * every loop iteration.
2215  * Otherwise it returns 0.
2216  */
2217 int32_t InductionVariableData::ComputeIncrement(HPhi* phi,
2218  HValue* phi_operand) {
2219  if (!phi_operand->representation().IsInteger32()) return 0;
2220 
2221  if (phi_operand->IsAdd()) {
2222  HAdd* operation = HAdd::cast(phi_operand);
2223  if (operation->left() == phi &&
2224  operation->right()->IsInteger32Constant()) {
2225  return operation->right()->GetInteger32Constant();
2226  } else if (operation->right() == phi &&
2227  operation->left()->IsInteger32Constant()) {
2228  return operation->left()->GetInteger32Constant();
2229  }
2230  } else if (phi_operand->IsSub()) {
2231  HSub* operation = HSub::cast(phi_operand);
2232  if (operation->left() == phi &&
2233  operation->right()->IsInteger32Constant()) {
2234  return -operation->right()->GetInteger32Constant();
2235  }
2236  }
2237 
2238  return 0;
2239 }
2240 
2241 
2242 /*
2243  * Swaps the information in "update" with the one contained in "this".
2244  * The swapping is important because this method is used while doing a
2245  * dominator tree traversal, and "update" will retain the old data that
2246  * will be restored while backtracking.
2247  */
2248 void InductionVariableData::UpdateAdditionalLimit(
2249  InductionVariableLimitUpdate* update) {
2250  ASSERT(update->updated_variable == this);
2251  if (update->limit_is_upper) {
2252  swap(&additional_upper_limit_, &update->limit);
2253  swap(&additional_upper_limit_is_included_, &update->limit_is_included);
2254  } else {
2255  swap(&additional_lower_limit_, &update->limit);
2256  swap(&additional_lower_limit_is_included_, &update->limit_is_included);
2257  }
2258 }
2259 
2260 
2261 int32_t InductionVariableData::ComputeUpperLimit(int32_t and_mask,
2262  int32_t or_mask) {
2263  // Should be Smi::kMaxValue but it must fit 32 bits; lower is safe anyway.
2264  const int32_t MAX_LIMIT = 1 << 30;
2265 
2266  int32_t result = MAX_LIMIT;
2267 
2268  if (limit() != NULL &&
2269  limit()->IsInteger32Constant()) {
2270  int32_t limit_value = limit()->GetInteger32Constant();
2271  if (!limit_included()) {
2272  limit_value--;
2273  }
2274  if (limit_value < result) result = limit_value;
2275  }
2276 
2277  if (additional_upper_limit() != NULL &&
2278  additional_upper_limit()->IsInteger32Constant()) {
2279  int32_t limit_value = additional_upper_limit()->GetInteger32Constant();
2280  if (!additional_upper_limit_is_included()) {
2281  limit_value--;
2282  }
2283  if (limit_value < result) result = limit_value;
2284  }
2285 
2286  if (and_mask > 0 && and_mask < MAX_LIMIT) {
2287  if (and_mask < result) result = and_mask;
2288  return result;
2289  }
2290 
2291  // Add the effect of the or_mask.
2292  result |= or_mask;
2293 
2294  return result >= MAX_LIMIT ? kNoLimit : result;
2295 }
2296 
2297 
2298 HValue* InductionVariableData::IgnoreOsrValue(HValue* v) {
2299  if (!v->IsPhi()) return v;
2300  HPhi* phi = HPhi::cast(v);
2301  if (phi->OperandCount() != 2) return v;
2302  if (phi->OperandAt(0)->block()->is_osr_entry()) {
2303  return phi->OperandAt(1);
2304  } else if (phi->OperandAt(1)->block()->is_osr_entry()) {
2305  return phi->OperandAt(0);
2306  } else {
2307  return v;
2308  }
2309 }
2310 
2311 
2312 InductionVariableData* InductionVariableData::GetInductionVariableData(
2313  HValue* v) {
2314  v = IgnoreOsrValue(v);
2315  if (v->IsPhi()) {
2316  return HPhi::cast(v)->induction_variable_data();
2317  }
2318  return NULL;
2319 }
2320 
2321 
2322 /*
2323  * Check if a conditional branch to "current_branch" with token "token" is
2324  * the branch that keeps the induction loop running (and, conversely, will
2325  * terminate it if the "other_branch" is taken).
2326  *
2327  * Three conditions must be met:
2328  * - "current_branch" must be in the induction loop.
2329  * - "other_branch" must be out of the induction loop.
2330  * - "token" and the induction increment must be "compatible": the token should
2331  * be a condition that keeps the execution inside the loop until the limit is
2332  * reached.
2333  */
2334 bool InductionVariableData::CheckIfBranchIsLoopGuard(
2335  Token::Value token,
2336  HBasicBlock* current_branch,
2337  HBasicBlock* other_branch) {
2338  if (!phi()->block()->current_loop()->IsNestedInThisLoop(
2339  current_branch->current_loop())) {
2340  return false;
2341  }
2342 
2343  if (phi()->block()->current_loop()->IsNestedInThisLoop(
2344  other_branch->current_loop())) {
2345  return false;
2346  }
2347 
2348  if (increment() > 0 && (token == Token::LT || token == Token::LTE)) {
2349  return true;
2350  }
2351  if (increment() < 0 && (token == Token::GT || token == Token::GTE)) {
2352  return true;
2353  }
2354  if (Token::IsInequalityOp(token) && (increment() == 1 || increment() == -1)) {
2355  return true;
2356  }
2357 
2358  return false;
2359 }
2360 
2361 
2362 void InductionVariableData::ComputeLimitFromPredecessorBlock(
2363  HBasicBlock* block,
2364  LimitFromPredecessorBlock* result) {
2365  if (block->predecessors()->length() != 1) return;
2366  HBasicBlock* predecessor = block->predecessors()->at(0);
2367  HInstruction* end = predecessor->last();
2368 
2369  if (!end->IsCompareNumericAndBranch()) return;
2370  HCompareNumericAndBranch* branch = HCompareNumericAndBranch::cast(end);
2371 
2372  Token::Value token = branch->token();
2373  if (!Token::IsArithmeticCompareOp(token)) return;
2374 
2375  HBasicBlock* other_target;
2376  if (block == branch->SuccessorAt(0)) {
2377  other_target = branch->SuccessorAt(1);
2378  } else {
2379  other_target = branch->SuccessorAt(0);
2380  token = Token::NegateCompareOp(token);
2381  ASSERT(block == branch->SuccessorAt(1));
2382  }
2383 
2384  InductionVariableData* data;
2385 
2386  data = GetInductionVariableData(branch->left());
2387  HValue* limit = branch->right();
2388  if (data == NULL) {
2389  data = GetInductionVariableData(branch->right());
2390  token = Token::ReverseCompareOp(token);
2391  limit = branch->left();
2392  }
2393 
2394  if (data != NULL) {
2395  result->variable = data;
2396  result->token = token;
2397  result->limit = limit;
2398  result->other_target = other_target;
2399  }
2400 }
2401 
2402 
2403 /*
2404  * Compute the limit that is imposed on an induction variable when entering
2405  * "block" (if any).
2406  * If the limit is the "proper" induction limit (the one that makes the loop
2407  * terminate when the induction variable reaches it) it is stored directly in
2408  * the induction variable data.
2409  * Otherwise the limit is written in "additional_limit" and the method
2410  * returns true.
2411  */
2412 bool InductionVariableData::ComputeInductionVariableLimit(
2413  HBasicBlock* block,
2414  InductionVariableLimitUpdate* additional_limit) {
2415  LimitFromPredecessorBlock limit;
2416  ComputeLimitFromPredecessorBlock(block, &limit);
2417  if (!limit.LimitIsValid()) return false;
2418 
2419  if (limit.variable->CheckIfBranchIsLoopGuard(limit.token,
2420  block,
2421  limit.other_target)) {
2422  limit.variable->limit_ = limit.limit;
2423  limit.variable->limit_included_ = limit.LimitIsIncluded();
2424  limit.variable->limit_validity_ = block;
2425  limit.variable->induction_exit_block_ = block->predecessors()->at(0);
2426  limit.variable->induction_exit_target_ = limit.other_target;
2427  return false;
2428  } else {
2429  additional_limit->updated_variable = limit.variable;
2430  additional_limit->limit = limit.limit;
2431  additional_limit->limit_is_upper = limit.LimitIsUpper();
2432  additional_limit->limit_is_included = limit.LimitIsIncluded();
2433  return true;
2434  }
2435 }
2436 
2437 
2438 Range* HMathMinMax::InferRange(Zone* zone) {
2439  if (representation().IsSmiOrInteger32()) {
2440  Range* a = left()->range();
2441  Range* b = right()->range();
2442  Range* res = a->Copy(zone);
2443  if (operation_ == kMathMax) {
2444  res->CombinedMax(b);
2445  } else {
2446  ASSERT(operation_ == kMathMin);
2447  res->CombinedMin(b);
2448  }
2449  return res;
2450  } else {
2451  return HValue::InferRange(zone);
2452  }
2453 }
2454 
2455 
2456 void HPhi::PrintTo(StringStream* stream) {
2457  stream->Add("[");
2458  for (int i = 0; i < OperandCount(); ++i) {
2459  HValue* value = OperandAt(i);
2460  stream->Add(" ");
2461  value->PrintNameTo(stream);
2462  stream->Add(" ");
2463  }
2464  stream->Add(" uses:%d_%ds_%di_%dd_%dt",
2465  UseCount(),
2466  smi_non_phi_uses() + smi_indirect_uses(),
2467  int32_non_phi_uses() + int32_indirect_uses(),
2468  double_non_phi_uses() + double_indirect_uses(),
2469  tagged_non_phi_uses() + tagged_indirect_uses());
2470  PrintRangeTo(stream);
2471  PrintTypeTo(stream);
2472  stream->Add("]");
2473 }
2474 
2475 
2476 void HPhi::AddInput(HValue* value) {
2477  inputs_.Add(NULL, value->block()->zone());
2478  SetOperandAt(OperandCount() - 1, value);
2479  // Mark phis that may have 'arguments' directly or indirectly as an operand.
2480  if (!CheckFlag(kIsArguments) && value->CheckFlag(kIsArguments)) {
2481  SetFlag(kIsArguments);
2482  }
2483 }
2484 
2485 
2486 bool HPhi::HasRealUses() {
2487  for (HUseIterator it(uses()); !it.Done(); it.Advance()) {
2488  if (!it.value()->IsPhi()) return true;
2489  }
2490  return false;
2491 }
2492 
2493 
2494 HValue* HPhi::GetRedundantReplacement() {
2495  HValue* candidate = NULL;
2496  int count = OperandCount();
2497  int position = 0;
2498  while (position < count && candidate == NULL) {
2499  HValue* current = OperandAt(position++);
2500  if (current != this) candidate = current;
2501  }
2502  while (position < count) {
2503  HValue* current = OperandAt(position++);
2504  if (current != this && current != candidate) return NULL;
2505  }
2506  ASSERT(candidate != this);
2507  return candidate;
2508 }
2509 
2510 
2511 void HPhi::DeleteFromGraph() {
2512  ASSERT(block() != NULL);
2513  block()->RemovePhi(this);
2514  ASSERT(block() == NULL);
2515 }
2516 
2517 
2518 void HPhi::InitRealUses(int phi_id) {
2519  // Initialize real uses.
2520  phi_id_ = phi_id;
2521  // Compute a conservative approximation of truncating uses before inferring
2522  // representations. The proper, exact computation will be done later, when
2523  // inserting representation changes.
2524  SetFlag(kTruncatingToSmi);
2525  SetFlag(kTruncatingToInt32);
2526  for (HUseIterator it(uses()); !it.Done(); it.Advance()) {
2527  HValue* value = it.value();
2528  if (!value->IsPhi()) {
2529  Representation rep = value->observed_input_representation(it.index());
2530  non_phi_uses_[rep.kind()] += 1;
2531  if (FLAG_trace_representation) {
2532  PrintF("#%d Phi is used by real #%d %s as %s\n",
2533  id(), value->id(), value->Mnemonic(), rep.Mnemonic());
2534  }
2535  if (!value->IsSimulate()) {
2536  if (!value->CheckFlag(kTruncatingToSmi)) {
2537  ClearFlag(kTruncatingToSmi);
2538  }
2539  if (!value->CheckFlag(kTruncatingToInt32)) {
2540  ClearFlag(kTruncatingToInt32);
2541  }
2542  }
2543  }
2544  }
2545 }
2546 
2547 
2548 void HPhi::AddNonPhiUsesFrom(HPhi* other) {
2549  if (FLAG_trace_representation) {
2550  PrintF("adding to #%d Phi uses of #%d Phi: s%d i%d d%d t%d\n",
2551  id(), other->id(),
2552  other->non_phi_uses_[Representation::kSmi],
2553  other->non_phi_uses_[Representation::kInteger32],
2554  other->non_phi_uses_[Representation::kDouble],
2555  other->non_phi_uses_[Representation::kTagged]);
2556  }
2557 
2558  for (int i = 0; i < Representation::kNumRepresentations; i++) {
2559  indirect_uses_[i] += other->non_phi_uses_[i];
2560  }
2561 }
2562 
2563 
2564 void HPhi::AddIndirectUsesTo(int* dest) {
2565  for (int i = 0; i < Representation::kNumRepresentations; i++) {
2566  dest[i] += indirect_uses_[i];
2567  }
2568 }
2569 
2570 
2571 void HSimulate::MergeWith(ZoneList<HSimulate*>* list) {
2572  while (!list->is_empty()) {
2573  HSimulate* from = list->RemoveLast();
2574  ZoneList<HValue*>* from_values = &from->values_;
2575  for (int i = 0; i < from_values->length(); ++i) {
2576  if (from->HasAssignedIndexAt(i)) {
2577  int index = from->GetAssignedIndexAt(i);
2578  if (HasValueForIndex(index)) continue;
2579  AddAssignedValue(index, from_values->at(i));
2580  } else {
2581  if (pop_count_ > 0) {
2582  pop_count_--;
2583  } else {
2584  AddPushedValue(from_values->at(i));
2585  }
2586  }
2587  }
2588  pop_count_ += from->pop_count_;
2589  from->DeleteAndReplaceWith(NULL);
2590  }
2591 }
2592 
2593 
2594 void HSimulate::PrintDataTo(StringStream* stream) {
2595  stream->Add("id=%d", ast_id().ToInt());
2596  if (pop_count_ > 0) stream->Add(" pop %d", pop_count_);
2597  if (values_.length() > 0) {
2598  if (pop_count_ > 0) stream->Add(" /");
2599  for (int i = values_.length() - 1; i >= 0; --i) {
2600  if (HasAssignedIndexAt(i)) {
2601  stream->Add(" var[%d] = ", GetAssignedIndexAt(i));
2602  } else {
2603  stream->Add(" push ");
2604  }
2605  values_[i]->PrintNameTo(stream);
2606  if (i > 0) stream->Add(",");
2607  }
2608  }
2609 }
2610 
2611 
2612 void HSimulate::ReplayEnvironment(HEnvironment* env) {
2613  if (done_with_replay_) return;
2614  ASSERT(env != NULL);
2615  env->set_ast_id(ast_id());
2616  env->Drop(pop_count());
2617  for (int i = values()->length() - 1; i >= 0; --i) {
2618  HValue* value = values()->at(i);
2619  if (HasAssignedIndexAt(i)) {
2620  env->Bind(GetAssignedIndexAt(i), value);
2621  } else {
2622  env->Push(value);
2623  }
2624  }
2625  done_with_replay_ = true;
2626 }
2627 
2628 
2629 static void ReplayEnvironmentNested(const ZoneList<HValue*>* values,
2630  HCapturedObject* other) {
2631  for (int i = 0; i < values->length(); ++i) {
2632  HValue* value = values->at(i);
2633  if (value->IsCapturedObject()) {
2634  if (HCapturedObject::cast(value)->capture_id() == other->capture_id()) {
2635  values->at(i) = other;
2636  } else {
2637  ReplayEnvironmentNested(HCapturedObject::cast(value)->values(), other);
2638  }
2639  }
2640  }
2641 }
2642 
2643 
2644 // Replay captured objects by replacing all captured objects with the
2645 // same capture id in the current and all outer environments.
2646 void HCapturedObject::ReplayEnvironment(HEnvironment* env) {
2647  ASSERT(env != NULL);
2648  while (env != NULL) {
2649  ReplayEnvironmentNested(env->values(), this);
2650  env = env->outer();
2651  }
2652 }
2653 
2654 
2655 void HCapturedObject::PrintDataTo(StringStream* stream) {
2656  stream->Add("#%d ", capture_id());
2658 }
2659 
2660 
2661 void HEnterInlined::RegisterReturnTarget(HBasicBlock* return_target,
2662  Zone* zone) {
2663  ASSERT(return_target->IsInlineReturnTarget());
2664  return_targets_.Add(return_target, zone);
2665 }
2666 
2667 
2668 void HEnterInlined::PrintDataTo(StringStream* stream) {
2669  SmartArrayPointer<char> name = function()->debug_name()->ToCString();
2670  stream->Add("%s, id=%d", name.get(), function()->id().ToInt());
2671 }
2672 
2673 
2674 static bool IsInteger32(double value) {
2675  double roundtrip_value = static_cast<double>(static_cast<int32_t>(value));
2676  return BitCast<int64_t>(roundtrip_value) == BitCast<int64_t>(value);
2677 }
2678 
2679 
2680 HConstant::HConstant(Handle<Object> handle, Representation r)
2681  : HTemplateInstruction<0>(HType::TypeFromValue(handle)),
2682  object_(Unique<Object>::CreateUninitialized(handle)),
2683  has_smi_value_(false),
2684  has_int32_value_(false),
2685  has_double_value_(false),
2686  has_external_reference_value_(false),
2687  is_not_in_new_space_(true),
2688  boolean_value_(handle->BooleanValue()),
2689  is_undetectable_(false),
2690  instance_type_(kUnknownInstanceType) {
2691  if (handle->IsHeapObject()) {
2692  Handle<HeapObject> heap_obj = Handle<HeapObject>::cast(handle);
2693  Heap* heap = heap_obj->GetHeap();
2694  is_not_in_new_space_ = !heap->InNewSpace(*handle);
2695  instance_type_ = heap_obj->map()->instance_type();
2696  is_undetectable_ = heap_obj->map()->is_undetectable();
2697  }
2698  if (handle->IsNumber()) {
2699  double n = handle->Number();
2700  has_int32_value_ = IsInteger32(n);
2701  int32_value_ = DoubleToInt32(n);
2702  has_smi_value_ = has_int32_value_ && Smi::IsValid(int32_value_);
2703  double_value_ = n;
2704  has_double_value_ = true;
2705  // TODO(titzer): if this heap number is new space, tenure a new one.
2706  }
2707 
2708  Initialize(r);
2709 }
2710 
2711 
2712 HConstant::HConstant(Unique<Object> unique,
2713  Representation r,
2714  HType type,
2715  bool is_not_in_new_space,
2716  bool boolean_value,
2717  bool is_undetectable,
2718  InstanceType instance_type)
2719  : HTemplateInstruction<0>(type),
2720  object_(unique),
2721  has_smi_value_(false),
2722  has_int32_value_(false),
2723  has_double_value_(false),
2724  has_external_reference_value_(false),
2725  is_not_in_new_space_(is_not_in_new_space),
2726  boolean_value_(boolean_value),
2727  is_undetectable_(is_undetectable),
2728  instance_type_(instance_type) {
2729  ASSERT(!unique.handle().is_null());
2730  ASSERT(!type.IsTaggedNumber());
2731  Initialize(r);
2732 }
2733 
2734 
2735 HConstant::HConstant(int32_t integer_value,
2736  Representation r,
2737  bool is_not_in_new_space,
2738  Unique<Object> object)
2739  : object_(object),
2740  has_smi_value_(Smi::IsValid(integer_value)),
2741  has_int32_value_(true),
2742  has_double_value_(true),
2743  has_external_reference_value_(false),
2744  is_not_in_new_space_(is_not_in_new_space),
2745  boolean_value_(integer_value != 0),
2746  is_undetectable_(false),
2747  int32_value_(integer_value),
2748  double_value_(FastI2D(integer_value)),
2749  instance_type_(kUnknownInstanceType) {
2750  // It's possible to create a constant with a value in Smi-range but stored
2751  // in a (pre-existing) HeapNumber. See crbug.com/349878.
2752  bool could_be_heapobject = r.IsTagged() && !object.handle().is_null();
2753  bool is_smi = has_smi_value_ && !could_be_heapobject;
2754  set_type(is_smi ? HType::Smi() : HType::TaggedNumber());
2755  Initialize(r);
2756 }
2757 
2758 
2759 HConstant::HConstant(double double_value,
2760  Representation r,
2761  bool is_not_in_new_space,
2762  Unique<Object> object)
2763  : object_(object),
2764  has_int32_value_(IsInteger32(double_value)),
2765  has_double_value_(true),
2766  has_external_reference_value_(false),
2767  is_not_in_new_space_(is_not_in_new_space),
2768  boolean_value_(double_value != 0 && !std::isnan(double_value)),
2769  is_undetectable_(false),
2770  int32_value_(DoubleToInt32(double_value)),
2771  double_value_(double_value),
2772  instance_type_(kUnknownInstanceType) {
2773  has_smi_value_ = has_int32_value_ && Smi::IsValid(int32_value_);
2774  // It's possible to create a constant with a value in Smi-range but stored
2775  // in a (pre-existing) HeapNumber. See crbug.com/349878.
2776  bool could_be_heapobject = r.IsTagged() && !object.handle().is_null();
2777  bool is_smi = has_smi_value_ && !could_be_heapobject;
2778  set_type(is_smi ? HType::Smi() : HType::TaggedNumber());
2779  Initialize(r);
2780 }
2781 
2782 
2783 HConstant::HConstant(ExternalReference reference)
2784  : HTemplateInstruction<0>(HType::None()),
2785  object_(Unique<Object>(Handle<Object>::null())),
2786  has_smi_value_(false),
2787  has_int32_value_(false),
2788  has_double_value_(false),
2789  has_external_reference_value_(true),
2790  is_not_in_new_space_(true),
2791  boolean_value_(true),
2792  is_undetectable_(false),
2793  external_reference_value_(reference),
2794  instance_type_(kUnknownInstanceType) {
2795  Initialize(Representation::External());
2796 }
2797 
2798 
2799 void HConstant::Initialize(Representation r) {
2800  if (r.IsNone()) {
2801  if (has_smi_value_ && SmiValuesAre31Bits()) {
2802  r = Representation::Smi();
2803  } else if (has_int32_value_) {
2805  } else if (has_double_value_) {
2806  r = Representation::Double();
2807  } else if (has_external_reference_value_) {
2809  } else {
2810  Handle<Object> object = object_.handle();
2811  if (object->IsJSObject()) {
2812  // Try to eagerly migrate JSObjects that have deprecated maps.
2813  Handle<JSObject> js_object = Handle<JSObject>::cast(object);
2814  if (js_object->map()->is_deprecated()) {
2815  JSObject::TryMigrateInstance(js_object);
2816  }
2817  }
2818  r = Representation::Tagged();
2819  }
2820  }
2821  set_representation(r);
2822  SetFlag(kUseGVN);
2823 }
2824 
2825 
2826 bool HConstant::ImmortalImmovable() const {
2827  if (has_int32_value_) {
2828  return false;
2829  }
2830  if (has_double_value_) {
2831  if (IsSpecialDouble()) {
2832  return true;
2833  }
2834  return false;
2835  }
2836  if (has_external_reference_value_) {
2837  return false;
2838  }
2839 
2840  ASSERT(!object_.handle().is_null());
2841  Heap* heap = isolate()->heap();
2842  ASSERT(!object_.IsKnownGlobal(heap->minus_zero_value()));
2843  ASSERT(!object_.IsKnownGlobal(heap->nan_value()));
2844  return
2845 #define IMMORTAL_IMMOVABLE_ROOT(name) \
2846  object_.IsKnownGlobal(heap->name()) ||
2848 #undef IMMORTAL_IMMOVABLE_ROOT
2849 #define INTERNALIZED_STRING(name, value) \
2850  object_.IsKnownGlobal(heap->name()) ||
2852 #undef INTERNALIZED_STRING
2853 #define STRING_TYPE(NAME, size, name, Name) \
2854  object_.IsKnownGlobal(heap->name##_map()) ||
2856 #undef STRING_TYPE
2857  false;
2858 }
2859 
2860 
2861 bool HConstant::EmitAtUses() {
2862  ASSERT(IsLinked());
2863  if (block()->graph()->has_osr() &&
2864  block()->graph()->IsStandardConstant(this)) {
2865  // TODO(titzer): this seems like a hack that should be fixed by custom OSR.
2866  return true;
2867  }
2868  if (UseCount() == 0) return true;
2869  if (IsCell()) return false;
2870  if (representation().IsDouble()) return false;
2871  return true;
2872 }
2873 
2874 
2875 HConstant* HConstant::CopyToRepresentation(Representation r, Zone* zone) const {
2876  if (r.IsSmi() && !has_smi_value_) return NULL;
2877  if (r.IsInteger32() && !has_int32_value_) return NULL;
2878  if (r.IsDouble() && !has_double_value_) return NULL;
2879  if (r.IsExternal() && !has_external_reference_value_) return NULL;
2880  if (has_int32_value_) {
2881  return new(zone) HConstant(int32_value_, r, is_not_in_new_space_, object_);
2882  }
2883  if (has_double_value_) {
2884  return new(zone) HConstant(double_value_, r, is_not_in_new_space_, object_);
2885  }
2886  if (has_external_reference_value_) {
2887  return new(zone) HConstant(external_reference_value_);
2888  }
2889  ASSERT(!object_.handle().is_null());
2890  return new(zone) HConstant(object_,
2891  r,
2892  type_,
2893  is_not_in_new_space_,
2894  boolean_value_,
2895  is_undetectable_,
2896  instance_type_);
2897 }
2898 
2899 
2900 Maybe<HConstant*> HConstant::CopyToTruncatedInt32(Zone* zone) {
2901  HConstant* res = NULL;
2902  if (has_int32_value_) {
2903  res = new(zone) HConstant(int32_value_,
2905  is_not_in_new_space_,
2906  object_);
2907  } else if (has_double_value_) {
2908  res = new(zone) HConstant(DoubleToInt32(double_value_),
2910  is_not_in_new_space_,
2911  object_);
2912  }
2913  return Maybe<HConstant*>(res != NULL, res);
2914 }
2915 
2916 
2917 Maybe<HConstant*> HConstant::CopyToTruncatedNumber(Zone* zone) {
2918  HConstant* res = NULL;
2919  Handle<Object> handle = this->handle(zone->isolate());
2920  if (handle->IsBoolean()) {
2921  res = handle->BooleanValue() ?
2922  new(zone) HConstant(1) : new(zone) HConstant(0);
2923  } else if (handle->IsUndefined()) {
2924  res = new(zone) HConstant(OS::nan_value());
2925  } else if (handle->IsNull()) {
2926  res = new(zone) HConstant(0);
2927  }
2928  return Maybe<HConstant*>(res != NULL, res);
2929 }
2930 
2931 
2932 void HConstant::PrintDataTo(StringStream* stream) {
2933  if (has_int32_value_) {
2934  stream->Add("%d ", int32_value_);
2935  } else if (has_double_value_) {
2936  stream->Add("%f ", FmtElm(double_value_));
2937  } else if (has_external_reference_value_) {
2938  stream->Add("%p ", reinterpret_cast<void*>(
2939  external_reference_value_.address()));
2940  } else {
2941  handle(Isolate::Current())->ShortPrint(stream);
2942  }
2943  if (!is_not_in_new_space_) {
2944  stream->Add("[new space] ");
2945  }
2946 }
2947 
2948 
2950  left()->PrintNameTo(stream);
2951  stream->Add(" ");
2952  right()->PrintNameTo(stream);
2953  if (CheckFlag(kCanOverflow)) stream->Add(" !");
2954  if (CheckFlag(kBailoutOnMinusZero)) stream->Add(" -0?");
2955 }
2956 
2957 
2961  UpdateRepresentation(new_rep, h_infer, "inputs");
2962 
2963  if (representation().IsSmi() && HasNonSmiUse()) {
2965  Representation::Integer32(), h_infer, "use requirements");
2966  }
2967 
2968  if (observed_output_representation_.IsNone()) {
2969  new_rep = RepresentationFromUses();
2970  UpdateRepresentation(new_rep, h_infer, "uses");
2971  } else {
2972  new_rep = RepresentationFromOutput();
2973  UpdateRepresentation(new_rep, h_infer, "output");
2974  }
2975 }
2976 
2977 
2979  // Determine the worst case of observed input representations and
2980  // the currently assumed output representation.
2982  for (int i = 1; i <= 2; ++i) {
2984  }
2985  // If any of the actual input representation is more general than what we
2986  // have so far but not Tagged, use that representation instead.
2987  Representation left_rep = left()->representation();
2988  Representation right_rep = right()->representation();
2989  if (!left_rep.IsTagged()) rep = rep.generalize(left_rep);
2990  if (!right_rep.IsTagged()) rep = rep.generalize(right_rep);
2991 
2992  return rep;
2993 }
2994 
2995 
2996 bool HBinaryOperation::IgnoreObservedOutputRepresentation(
2997  Representation current_rep) {
2998  return ((current_rep.IsInteger32() && CheckUsesForFlag(kTruncatingToInt32)) ||
2999  (current_rep.IsSmi() && CheckUsesForFlag(kTruncatingToSmi))) &&
3000  // Mul in Integer32 mode would be too precise.
3001  (!this->IsMul() || HMul::cast(this)->MulMinusOne());
3002 }
3003 
3004 
3007  // Consider observed output representation, but ignore it if it's Double,
3008  // this instruction is not a division, and all its uses are truncating
3009  // to Integer32.
3010  if (observed_output_representation_.is_more_general_than(rep) &&
3011  !IgnoreObservedOutputRepresentation(rep)) {
3012  return observed_output_representation_;
3013  }
3014  return Representation::None();
3015 }
3016 
3017 
3022 }
3023 
3024 
3025 void HMathMinMax::InferRepresentation(HInferRepresentationPhase* h_infer) {
3026  ASSERT(CheckFlag(kFlexibleRepresentation));
3027  Representation new_rep = RepresentationFromInputs();
3028  UpdateRepresentation(new_rep, h_infer, "inputs");
3029  // Do not care about uses.
3030 }
3031 
3032 
3033 Range* HBitwise::InferRange(Zone* zone) {
3034  if (op() == Token::BIT_XOR) {
3035  if (left()->HasRange() && right()->HasRange()) {
3036  // The maximum value has the high bit, and all bits below, set:
3037  // (1 << high) - 1.
3038  // If the range can be negative, the minimum int is a negative number with
3039  // the high bit, and all bits below, unset:
3040  // -(1 << high).
3041  // If it cannot be negative, conservatively choose 0 as minimum int.
3042  int64_t left_upper = left()->range()->upper();
3043  int64_t left_lower = left()->range()->lower();
3044  int64_t right_upper = right()->range()->upper();
3045  int64_t right_lower = right()->range()->lower();
3046 
3047  if (left_upper < 0) left_upper = ~left_upper;
3048  if (left_lower < 0) left_lower = ~left_lower;
3049  if (right_upper < 0) right_upper = ~right_upper;
3050  if (right_lower < 0) right_lower = ~right_lower;
3051 
3052  int high = MostSignificantBit(
3053  static_cast<uint32_t>(
3054  left_upper | left_lower | right_upper | right_lower));
3055 
3056  int64_t limit = 1;
3057  limit <<= high;
3058  int32_t min = (left()->range()->CanBeNegative() ||
3059  right()->range()->CanBeNegative())
3060  ? static_cast<int32_t>(-limit) : 0;
3061  return new(zone) Range(min, static_cast<int32_t>(limit - 1));
3062  }
3063  Range* result = HValue::InferRange(zone);
3064  result->set_can_be_minus_zero(false);
3065  return result;
3066  }
3067  const int32_t kDefaultMask = static_cast<int32_t>(0xffffffff);
3068  int32_t left_mask = (left()->range() != NULL)
3069  ? left()->range()->Mask()
3070  : kDefaultMask;
3071  int32_t right_mask = (right()->range() != NULL)
3072  ? right()->range()->Mask()
3073  : kDefaultMask;
3074  int32_t result_mask = (op() == Token::BIT_AND)
3075  ? left_mask & right_mask
3076  : left_mask | right_mask;
3077  if (result_mask >= 0) return new(zone) Range(0, result_mask);
3078 
3079  Range* result = HValue::InferRange(zone);
3080  result->set_can_be_minus_zero(false);
3081  return result;
3082 }
3083 
3084 
3085 Range* HSar::InferRange(Zone* zone) {
3086  if (right()->IsConstant()) {
3087  HConstant* c = HConstant::cast(right());
3088  if (c->HasInteger32Value()) {
3089  Range* result = (left()->range() != NULL)
3090  ? left()->range()->Copy(zone)
3091  : new(zone) Range();
3092  result->Sar(c->Integer32Value());
3093  return result;
3094  }
3095  }
3096  return HValue::InferRange(zone);
3097 }
3098 
3099 
3100 Range* HShr::InferRange(Zone* zone) {
3101  if (right()->IsConstant()) {
3102  HConstant* c = HConstant::cast(right());
3103  if (c->HasInteger32Value()) {
3104  int shift_count = c->Integer32Value() & 0x1f;
3105  if (left()->range()->CanBeNegative()) {
3106  // Only compute bounds if the result always fits into an int32.
3107  return (shift_count >= 1)
3108  ? new(zone) Range(0,
3109  static_cast<uint32_t>(0xffffffff) >> shift_count)
3110  : new(zone) Range();
3111  } else {
3112  // For positive inputs we can use the >> operator.
3113  Range* result = (left()->range() != NULL)
3114  ? left()->range()->Copy(zone)
3115  : new(zone) Range();
3116  result->Sar(c->Integer32Value());
3117  return result;
3118  }
3119  }
3120  }
3121  return HValue::InferRange(zone);
3122 }
3123 
3124 
3125 Range* HShl::InferRange(Zone* zone) {
3126  if (right()->IsConstant()) {
3127  HConstant* c = HConstant::cast(right());
3128  if (c->HasInteger32Value()) {
3129  Range* result = (left()->range() != NULL)
3130  ? left()->range()->Copy(zone)
3131  : new(zone) Range();
3132  result->Shl(c->Integer32Value());
3133  return result;
3134  }
3135  }
3136  return HValue::InferRange(zone);
3137 }
3138 
3139 
3140 Range* HLoadNamedField::InferRange(Zone* zone) {
3141  if (access().representation().IsInteger8()) {
3142  return new(zone) Range(kMinInt8, kMaxInt8);
3143  }
3144  if (access().representation().IsUInteger8()) {
3145  return new(zone) Range(kMinUInt8, kMaxUInt8);
3146  }
3147  if (access().representation().IsInteger16()) {
3148  return new(zone) Range(kMinInt16, kMaxInt16);
3149  }
3150  if (access().representation().IsUInteger16()) {
3151  return new(zone) Range(kMinUInt16, kMaxUInt16);
3152  }
3153  if (access().IsStringLength()) {
3154  return new(zone) Range(0, String::kMaxLength);
3155  }
3156  return HValue::InferRange(zone);
3157 }
3158 
3159 
3160 Range* HLoadKeyed::InferRange(Zone* zone) {
3161  switch (elements_kind()) {
3163  return new(zone) Range(kMinInt8, kMaxInt8);
3166  return new(zone) Range(kMinUInt8, kMaxUInt8);
3168  return new(zone) Range(kMinInt16, kMaxInt16);
3170  return new(zone) Range(kMinUInt16, kMaxUInt16);
3171  default:
3172  return HValue::InferRange(zone);
3173  }
3174 }
3175 
3176 
3177 void HCompareGeneric::PrintDataTo(StringStream* stream) {
3178  stream->Add(Token::Name(token()));
3179  stream->Add(" ");
3181 }
3182 
3183 
3185  stream->Add(Token::Name(token()));
3186  stream->Add(" ");
3188 }
3189 
3190 
3192  stream->Add(Token::Name(token()));
3193  stream->Add(" ");
3194  left()->PrintNameTo(stream);
3195  stream->Add(" ");
3196  right()->PrintNameTo(stream);
3198 }
3199 
3200 
3202  left()->PrintNameTo(stream);
3203  stream->Add(" ");
3204  right()->PrintNameTo(stream);
3206 }
3207 
3208 
3211  *block = SuccessorAt(known_successor_index());
3212  return true;
3213  }
3214  if (FLAG_fold_constants && left()->IsConstant() && right()->IsConstant()) {
3215  *block = HConstant::cast(left())->DataEquals(HConstant::cast(right()))
3217  return true;
3218  }
3219  *block = NULL;
3220  return false;
3221 }
3222 
3223 
3224 bool ConstantIsObject(HConstant* constant, Isolate* isolate) {
3225  if (constant->HasNumberValue()) return false;
3226  if (constant->GetUnique().IsKnownGlobal(isolate->heap()->null_value())) {
3227  return true;
3228  }
3229  if (constant->IsUndetectable()) return false;
3230  InstanceType type = constant->GetInstanceType();
3231  return (FIRST_NONCALLABLE_SPEC_OBJECT_TYPE <= type) &&
3233 }
3234 
3235 
3236 bool HIsObjectAndBranch::KnownSuccessorBlock(HBasicBlock** block) {
3237  if (FLAG_fold_constants && value()->IsConstant()) {
3238  *block = ConstantIsObject(HConstant::cast(value()), isolate())
3239  ? FirstSuccessor() : SecondSuccessor();
3240  return true;
3241  }
3242  *block = NULL;
3243  return false;
3244 }
3245 
3246 
3247 bool HIsStringAndBranch::KnownSuccessorBlock(HBasicBlock** block) {
3248  if (FLAG_fold_constants && value()->IsConstant()) {
3249  *block = HConstant::cast(value())->HasStringValue()
3250  ? FirstSuccessor() : SecondSuccessor();
3251  return true;
3252  }
3253  *block = NULL;
3254  return false;
3255 }
3256 
3257 
3258 bool HIsUndetectableAndBranch::KnownSuccessorBlock(HBasicBlock** block) {
3259  if (FLAG_fold_constants && value()->IsConstant()) {
3260  *block = HConstant::cast(value())->IsUndetectable()
3261  ? FirstSuccessor() : SecondSuccessor();
3262  return true;
3263  }
3264  *block = NULL;
3265  return false;
3266 }
3267 
3268 
3269 bool HHasInstanceTypeAndBranch::KnownSuccessorBlock(HBasicBlock** block) {
3270  if (FLAG_fold_constants && value()->IsConstant()) {
3271  InstanceType type = HConstant::cast(value())->GetInstanceType();
3272  *block = (from_ <= type) && (type <= to_)
3273  ? FirstSuccessor() : SecondSuccessor();
3274  return true;
3275  }
3276  *block = NULL;
3277  return false;
3278 }
3279 
3280 
3281 void HCompareHoleAndBranch::InferRepresentation(
3282  HInferRepresentationPhase* h_infer) {
3283  ChangeRepresentation(value()->representation());
3284 }
3285 
3286 
3287 bool HCompareMinusZeroAndBranch::KnownSuccessorBlock(HBasicBlock** block) {
3288  if (FLAG_fold_constants && value()->IsConstant()) {
3289  HConstant* constant = HConstant::cast(value());
3290  if (constant->HasDoubleValue()) {
3291  *block = IsMinusZero(constant->DoubleValue())
3292  ? FirstSuccessor() : SecondSuccessor();
3293  return true;
3294  }
3295  }
3296  if (value()->representation().IsSmiOrInteger32()) {
3297  // A Smi or Integer32 cannot contain minus zero.
3298  *block = SecondSuccessor();
3299  return true;
3300  }
3301  *block = NULL;
3302  return false;
3303 }
3304 
3305 
3306 void HCompareMinusZeroAndBranch::InferRepresentation(
3307  HInferRepresentationPhase* h_infer) {
3308  ChangeRepresentation(value()->representation());
3309 }
3310 
3311 
3312 
3313 void HGoto::PrintDataTo(StringStream* stream) {
3314  stream->Add("B%d", SuccessorAt(0)->block_id());
3315 }
3316 
3317 
3319  HInferRepresentationPhase* h_infer) {
3320  Representation left_rep = left()->representation();
3321  Representation right_rep = right()->representation();
3322  Representation observed_left = observed_input_representation(0);
3323  Representation observed_right = observed_input_representation(1);
3324 
3326  rep = rep.generalize(observed_left);
3327  rep = rep.generalize(observed_right);
3328  if (rep.IsNone() || rep.IsSmiOrInteger32()) {
3329  if (!left_rep.IsTagged()) rep = rep.generalize(left_rep);
3330  if (!right_rep.IsTagged()) rep = rep.generalize(right_rep);
3331  } else {
3332  rep = Representation::Double();
3333  }
3334 
3335  if (rep.IsDouble()) {
3336  // According to the ES5 spec (11.9.3, 11.8.5), Equality comparisons (==, ===
3337  // and !=) have special handling of undefined, e.g. undefined == undefined
3338  // is 'true'. Relational comparisons have a different semantic, first
3339  // calling ToPrimitive() on their arguments. The standard Crankshaft
3340  // tagged-to-double conversion to ensure the HCompareNumericAndBranch's
3341  // inputs are doubles caused 'undefined' to be converted to NaN. That's
3342  // compatible out-of-the box with ordered relational comparisons (<, >, <=,
3343  // >=). However, for equality comparisons (and for 'in' and 'instanceof'),
3344  // it is not consistent with the spec. For example, it would cause undefined
3345  // == undefined (should be true) to be evaluated as NaN == NaN
3346  // (false). Therefore, any comparisons other than ordered relational
3347  // comparisons must cause a deopt when one of their arguments is undefined.
3348  // See also v8:1434
3351  }
3352  }
3353  ChangeRepresentation(rep);
3354 }
3355 
3356 
3357 void HParameter::PrintDataTo(StringStream* stream) {
3358  stream->Add("%u", index());
3359 }
3360 
3361 
3362 void HLoadNamedField::PrintDataTo(StringStream* stream) {
3363  object()->PrintNameTo(stream);
3364  access_.PrintTo(stream);
3365 
3366  if (HasDependency()) {
3367  stream->Add(" ");
3368  dependency()->PrintNameTo(stream);
3369  }
3370 }
3371 
3372 
3373 HCheckMaps* HCheckMaps::New(Zone* zone,
3374  HValue* context,
3375  HValue* value,
3376  Handle<Map> map,
3377  CompilationInfo* info,
3378  HValue* typecheck) {
3379  HCheckMaps* check_map = new(zone) HCheckMaps(value, zone, typecheck);
3380  check_map->Add(map, zone);
3381  if (map->CanOmitMapChecks() &&
3382  value->IsConstant() &&
3383  HConstant::cast(value)->HasMap(map)) {
3384  // TODO(titzer): collect dependent map checks into a list.
3385  check_map->omit_ = true;
3386  if (map->CanTransition()) {
3387  map->AddDependentCompilationInfo(
3389  }
3390  }
3391  return check_map;
3392 }
3393 
3394 
3395 void HLoadNamedGeneric::PrintDataTo(StringStream* stream) {
3396  object()->PrintNameTo(stream);
3397  stream->Add(".");
3398  stream->Add(String::cast(*name())->ToCString().get());
3399 }
3400 
3401 
3402 void HLoadKeyed::PrintDataTo(StringStream* stream) {
3403  if (!is_external()) {
3404  elements()->PrintNameTo(stream);
3405  } else {
3406  ASSERT(elements_kind() >= FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND &&
3407  elements_kind() <= LAST_EXTERNAL_ARRAY_ELEMENTS_KIND);
3408  elements()->PrintNameTo(stream);
3409  stream->Add(".");
3410  stream->Add(ElementsKindToString(elements_kind()));
3411  }
3412 
3413  stream->Add("[");
3414  key()->PrintNameTo(stream);
3415  if (IsDehoisted()) {
3416  stream->Add(" + %d]", index_offset());
3417  } else {
3418  stream->Add("]");
3419  }
3420 
3421  if (HasDependency()) {
3422  stream->Add(" ");
3423  dependency()->PrintNameTo(stream);
3424  }
3425 
3426  if (RequiresHoleCheck()) {
3427  stream->Add(" check_hole");
3428  }
3429 }
3430 
3431 
3432 bool HLoadKeyed::UsesMustHandleHole() const {
3433  if (IsFastPackedElementsKind(elements_kind())) {
3434  return false;
3435  }
3436 
3437  if (IsExternalArrayElementsKind(elements_kind())) {
3438  return false;
3439  }
3440 
3441  if (hole_mode() == ALLOW_RETURN_HOLE) {
3442  if (IsFastDoubleElementsKind(elements_kind())) {
3443  return AllUsesCanTreatHoleAsNaN();
3444  }
3445  return true;
3446  }
3447 
3448  if (IsFastDoubleElementsKind(elements_kind())) {
3449  return false;
3450  }
3451 
3452  // Holes are only returned as tagged values.
3453  if (!representation().IsTagged()) {
3454  return false;
3455  }
3456 
3457  for (HUseIterator it(uses()); !it.Done(); it.Advance()) {
3458  HValue* use = it.value();
3459  if (!use->IsChange()) return false;
3460  }
3461 
3462  return true;
3463 }
3464 
3465 
3466 bool HLoadKeyed::AllUsesCanTreatHoleAsNaN() const {
3467  return IsFastDoubleElementsKind(elements_kind()) &&
3468  CheckUsesForFlag(HValue::kAllowUndefinedAsNaN);
3469 }
3470 
3471 
3472 bool HLoadKeyed::RequiresHoleCheck() const {
3473  if (IsFastPackedElementsKind(elements_kind())) {
3474  return false;
3475  }
3476 
3477  if (IsExternalArrayElementsKind(elements_kind())) {
3478  return false;
3479  }
3480 
3481  return !UsesMustHandleHole();
3482 }
3483 
3484 
3485 void HLoadKeyedGeneric::PrintDataTo(StringStream* stream) {
3486  object()->PrintNameTo(stream);
3487  stream->Add("[");
3488  key()->PrintNameTo(stream);
3489  stream->Add("]");
3490 }
3491 
3492 
3494  // Recognize generic keyed loads that use property name generated
3495  // by for-in statement as a key and rewrite them into fast property load
3496  // by index.
3497  if (key()->IsLoadKeyed()) {
3498  HLoadKeyed* key_load = HLoadKeyed::cast(key());
3499  if (key_load->elements()->IsForInCacheArray()) {
3500  HForInCacheArray* names_cache =
3501  HForInCacheArray::cast(key_load->elements());
3502 
3503  if (names_cache->enumerable() == object()) {
3504  HForInCacheArray* index_cache =
3505  names_cache->index_cache();
3506  HCheckMapValue* map_check =
3507  HCheckMapValue::New(block()->graph()->zone(),
3508  block()->graph()->GetInvalidContext(),
3509  object(),
3510  names_cache->map());
3511  HInstruction* index = HLoadKeyed::New(
3512  block()->graph()->zone(),
3513  block()->graph()->GetInvalidContext(),
3514  index_cache,
3515  key_load->key(),
3516  key_load->key(),
3517  key_load->elements_kind());
3518  map_check->InsertBefore(this);
3519  index->InsertBefore(this);
3520  return Prepend(new(block()->zone()) HLoadFieldByIndex(
3521  object(), index));
3522  }
3523  }
3524  }
3525 
3526  return this;
3527 }
3528 
3529 
3530 void HStoreNamedGeneric::PrintDataTo(StringStream* stream) {
3531  object()->PrintNameTo(stream);
3532  stream->Add(".");
3533  ASSERT(name()->IsString());
3534  stream->Add(String::cast(*name())->ToCString().get());
3535  stream->Add(" = ");
3536  value()->PrintNameTo(stream);
3537 }
3538 
3539 
3540 void HStoreNamedField::PrintDataTo(StringStream* stream) {
3541  object()->PrintNameTo(stream);
3542  access_.PrintTo(stream);
3543  stream->Add(" = ");
3544  value()->PrintNameTo(stream);
3545  if (NeedsWriteBarrier()) {
3546  stream->Add(" (write-barrier)");
3547  }
3548  if (has_transition()) {
3549  stream->Add(" (transition map %p)", *transition_map());
3550  }
3551 }
3552 
3553 
3554 void HStoreKeyed::PrintDataTo(StringStream* stream) {
3555  if (!is_external()) {
3556  elements()->PrintNameTo(stream);
3557  } else {
3558  elements()->PrintNameTo(stream);
3559  stream->Add(".");
3560  stream->Add(ElementsKindToString(elements_kind()));
3561  ASSERT(elements_kind() >= FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND &&
3562  elements_kind() <= LAST_EXTERNAL_ARRAY_ELEMENTS_KIND);
3563  }
3564 
3565  stream->Add("[");
3566  key()->PrintNameTo(stream);
3567  if (IsDehoisted()) {
3568  stream->Add(" + %d] = ", index_offset());
3569  } else {
3570  stream->Add("] = ");
3571  }
3572 
3573  value()->PrintNameTo(stream);
3574 }
3575 
3576 
3577 void HStoreKeyedGeneric::PrintDataTo(StringStream* stream) {
3578  object()->PrintNameTo(stream);
3579  stream->Add("[");
3580  key()->PrintNameTo(stream);
3581  stream->Add("] = ");
3582  value()->PrintNameTo(stream);
3583 }
3584 
3585 
3586 void HTransitionElementsKind::PrintDataTo(StringStream* stream) {
3587  object()->PrintNameTo(stream);
3588  ElementsKind from_kind = original_map().handle()->elements_kind();
3589  ElementsKind to_kind = transitioned_map().handle()->elements_kind();
3590  stream->Add(" %p [%s] -> %p [%s]",
3591  *original_map().handle(),
3592  ElementsAccessor::ForKind(from_kind)->name(),
3593  *transitioned_map().handle(),
3594  ElementsAccessor::ForKind(to_kind)->name());
3595  if (IsSimpleMapChangeTransition(from_kind, to_kind)) stream->Add(" (simple)");
3596 }
3597 
3598 
3599 void HLoadGlobalCell::PrintDataTo(StringStream* stream) {
3600  stream->Add("[%p]", *cell().handle());
3601  if (!details_.IsDontDelete()) stream->Add(" (deleteable)");
3602  if (details_.IsReadOnly()) stream->Add(" (read-only)");
3603 }
3604 
3605 
3606 bool HLoadGlobalCell::RequiresHoleCheck() const {
3607  if (details_.IsDontDelete() && !details_.IsReadOnly()) return false;
3608  for (HUseIterator it(uses()); !it.Done(); it.Advance()) {
3609  HValue* use = it.value();
3610  if (!use->IsChange()) return true;
3611  }
3612  return false;
3613 }
3614 
3615 
3616 void HLoadGlobalGeneric::PrintDataTo(StringStream* stream) {
3617  stream->Add("%o ", *name());
3618 }
3619 
3620 
3621 void HInnerAllocatedObject::PrintDataTo(StringStream* stream) {
3622  base_object()->PrintNameTo(stream);
3623  stream->Add(" offset ");
3624  offset()->PrintTo(stream);
3625 }
3626 
3627 
3628 void HStoreGlobalCell::PrintDataTo(StringStream* stream) {
3629  stream->Add("[%p] = ", *cell().handle());
3630  value()->PrintNameTo(stream);
3631  if (!details_.IsDontDelete()) stream->Add(" (deleteable)");
3632  if (details_.IsReadOnly()) stream->Add(" (read-only)");
3633 }
3634 
3635 
3636 void HLoadContextSlot::PrintDataTo(StringStream* stream) {
3637  value()->PrintNameTo(stream);
3638  stream->Add("[%d]", slot_index());
3639 }
3640 
3641 
3642 void HStoreContextSlot::PrintDataTo(StringStream* stream) {
3643  context()->PrintNameTo(stream);
3644  stream->Add("[%d] = ", slot_index());
3645  value()->PrintNameTo(stream);
3646 }
3647 
3648 
3649 // Implementation of type inference and type conversions. Calculates
3650 // the inferred type of this instruction based on the input operands.
3651 
3653  return type_;
3654 }
3655 
3656 
3657 HType HPhi::CalculateInferredType() {
3658  if (OperandCount() == 0) return HType::Tagged();
3659  HType result = OperandAt(0)->type();
3660  for (int i = 1; i < OperandCount(); ++i) {
3661  HType current = OperandAt(i)->type();
3662  result = result.Combine(current);
3663  }
3664  return result;
3665 }
3666 
3667 
3668 HType HChange::CalculateInferredType() {
3669  if (from().IsDouble() && to().IsTagged()) return HType::HeapNumber();
3670  return type();
3671 }
3672 
3673 
3674 Representation HUnaryMathOperation::RepresentationFromInputs() {
3675  Representation rep = representation();
3676  // If any of the actual input representation is more general than what we
3677  // have so far but not Tagged, use that representation instead.
3678  Representation input_rep = value()->representation();
3679  if (!input_rep.IsTagged()) {
3680  rep = rep.generalize(input_rep);
3681  }
3682  return rep;
3683 }
3684 
3685 
3686 bool HAllocate::HandleSideEffectDominator(GVNFlag side_effect,
3687  HValue* dominator) {
3688  ASSERT(side_effect == kNewSpacePromotion);
3689  Zone* zone = block()->zone();
3690  if (!FLAG_use_allocation_folding) return false;
3691 
3692  // Try to fold allocations together with their dominating allocations.
3693  if (!dominator->IsAllocate()) {
3694  if (FLAG_trace_allocation_folding) {
3695  PrintF("#%d (%s) cannot fold into #%d (%s)\n",
3696  id(), Mnemonic(), dominator->id(), dominator->Mnemonic());
3697  }
3698  return false;
3699  }
3700 
3701  // Check whether we are folding within the same block for local folding.
3702  if (FLAG_use_local_allocation_folding && dominator->block() != block()) {
3703  if (FLAG_trace_allocation_folding) {
3704  PrintF("#%d (%s) cannot fold into #%d (%s), crosses basic blocks\n",
3705  id(), Mnemonic(), dominator->id(), dominator->Mnemonic());
3706  }
3707  return false;
3708  }
3709 
3710  HAllocate* dominator_allocate = HAllocate::cast(dominator);
3711  HValue* dominator_size = dominator_allocate->size();
3712  HValue* current_size = size();
3713 
3714  // TODO(hpayer): Add support for non-constant allocation in dominator.
3715  if (!current_size->IsInteger32Constant() ||
3716  !dominator_size->IsInteger32Constant()) {
3717  if (FLAG_trace_allocation_folding) {
3718  PrintF("#%d (%s) cannot fold into #%d (%s), dynamic allocation size\n",
3719  id(), Mnemonic(), dominator->id(), dominator->Mnemonic());
3720  }
3721  return false;
3722  }
3723 
3724  dominator_allocate = GetFoldableDominator(dominator_allocate);
3725  if (dominator_allocate == NULL) {
3726  return false;
3727  }
3728 
3729  ASSERT((IsNewSpaceAllocation() &&
3730  dominator_allocate->IsNewSpaceAllocation()) ||
3731  (IsOldDataSpaceAllocation() &&
3732  dominator_allocate->IsOldDataSpaceAllocation()) ||
3733  (IsOldPointerSpaceAllocation() &&
3734  dominator_allocate->IsOldPointerSpaceAllocation()));
3735 
3736  // First update the size of the dominator allocate instruction.
3737  dominator_size = dominator_allocate->size();
3738  int32_t original_object_size =
3739  HConstant::cast(dominator_size)->GetInteger32Constant();
3740  int32_t dominator_size_constant = original_object_size;
3741  int32_t current_size_constant =
3742  HConstant::cast(current_size)->GetInteger32Constant();
3743  int32_t new_dominator_size = dominator_size_constant + current_size_constant;
3744 
3745  if (MustAllocateDoubleAligned()) {
3746  if (!dominator_allocate->MustAllocateDoubleAligned()) {
3747  dominator_allocate->MakeDoubleAligned();
3748  }
3749  if ((dominator_size_constant & kDoubleAlignmentMask) != 0) {
3750  dominator_size_constant += kDoubleSize / 2;
3751  new_dominator_size += kDoubleSize / 2;
3752  }
3753  }
3754 
3755  // Since we clear the first word after folded memory, we cannot use the
3756  // whole Page::kMaxRegularHeapObjectSize memory.
3757  if (new_dominator_size > Page::kMaxRegularHeapObjectSize - kPointerSize) {
3758  if (FLAG_trace_allocation_folding) {
3759  PrintF("#%d (%s) cannot fold into #%d (%s) due to size: %d\n",
3760  id(), Mnemonic(), dominator_allocate->id(),
3761  dominator_allocate->Mnemonic(), new_dominator_size);
3762  }
3763  return false;
3764  }
3765 
3766  HInstruction* new_dominator_size_constant = HConstant::CreateAndInsertBefore(
3767  zone,
3768  context(),
3769  new_dominator_size,
3771  dominator_allocate);
3772  dominator_allocate->UpdateSize(new_dominator_size_constant);
3773 
3774 #ifdef VERIFY_HEAP
3775  if (FLAG_verify_heap && dominator_allocate->IsNewSpaceAllocation()) {
3776  dominator_allocate->MakePrefillWithFiller();
3777  } else {
3778  // TODO(hpayer): This is a short-term hack to make allocation mementos
3779  // work again in new space.
3780  dominator_allocate->ClearNextMapWord(original_object_size);
3781  }
3782 #else
3783  // TODO(hpayer): This is a short-term hack to make allocation mementos
3784  // work again in new space.
3785  dominator_allocate->ClearNextMapWord(original_object_size);
3786 #endif
3787 
3788  dominator_allocate->UpdateClearNextMapWord(MustClearNextMapWord());
3789 
3790  // After that replace the dominated allocate instruction.
3791  HInstruction* inner_offset = HConstant::CreateAndInsertBefore(
3792  zone,
3793  context(),
3794  dominator_size_constant,
3796  this);
3797 
3798  HInstruction* dominated_allocate_instr =
3799  HInnerAllocatedObject::New(zone,
3800  context(),
3801  dominator_allocate,
3802  inner_offset,
3803  type());
3804  dominated_allocate_instr->InsertBefore(this);
3805  DeleteAndReplaceWith(dominated_allocate_instr);
3806  if (FLAG_trace_allocation_folding) {
3807  PrintF("#%d (%s) folded into #%d (%s)\n",
3808  id(), Mnemonic(), dominator_allocate->id(),
3809  dominator_allocate->Mnemonic());
3810  }
3811  return true;
3812 }
3813 
3814 
3815 HAllocate* HAllocate::GetFoldableDominator(HAllocate* dominator) {
3816  if (!IsFoldable(dominator)) {
3817  // We cannot hoist old space allocations over new space allocations.
3818  if (IsNewSpaceAllocation() || dominator->IsNewSpaceAllocation()) {
3819  if (FLAG_trace_allocation_folding) {
3820  PrintF("#%d (%s) cannot fold into #%d (%s), new space hoisting\n",
3821  id(), Mnemonic(), dominator->id(), dominator->Mnemonic());
3822  }
3823  return NULL;
3824  }
3825 
3826  HAllocate* dominator_dominator = dominator->dominating_allocate_;
3827 
3828  // We can hoist old data space allocations over an old pointer space
3829  // allocation and vice versa. For that we have to check the dominator
3830  // of the dominator allocate instruction.
3831  if (dominator_dominator == NULL) {
3832  dominating_allocate_ = dominator;
3833  if (FLAG_trace_allocation_folding) {
3834  PrintF("#%d (%s) cannot fold into #%d (%s), different spaces\n",
3835  id(), Mnemonic(), dominator->id(), dominator->Mnemonic());
3836  }
3837  return NULL;
3838  }
3839 
3840  // We can just fold old space allocations that are in the same basic block,
3841  // since it is not guaranteed that we fill up the whole allocated old
3842  // space memory.
3843  // TODO(hpayer): Remove this limitation and add filler maps for each each
3844  // allocation as soon as we have store elimination.
3845  if (block()->block_id() != dominator_dominator->block()->block_id()) {
3846  if (FLAG_trace_allocation_folding) {
3847  PrintF("#%d (%s) cannot fold into #%d (%s), different basic blocks\n",
3848  id(), Mnemonic(), dominator_dominator->id(),
3849  dominator_dominator->Mnemonic());
3850  }
3851  return NULL;
3852  }
3853 
3854  ASSERT((IsOldDataSpaceAllocation() &&
3855  dominator_dominator->IsOldDataSpaceAllocation()) ||
3856  (IsOldPointerSpaceAllocation() &&
3857  dominator_dominator->IsOldPointerSpaceAllocation()));
3858 
3859  int32_t current_size = HConstant::cast(size())->GetInteger32Constant();
3860  HStoreNamedField* dominator_free_space_size =
3861  dominator->filler_free_space_size_;
3862  if (dominator_free_space_size != NULL) {
3863  // We already hoisted one old space allocation, i.e., we already installed
3864  // a filler map. Hence, we just have to update the free space size.
3865  dominator->UpdateFreeSpaceFiller(current_size);
3866  } else {
3867  // This is the first old space allocation that gets hoisted. We have to
3868  // install a filler map since the follwing allocation may cause a GC.
3869  dominator->CreateFreeSpaceFiller(current_size);
3870  }
3871 
3872  // We can hoist the old space allocation over the actual dominator.
3873  return dominator_dominator;
3874  }
3875  return dominator;
3876 }
3877 
3878 
3879 void HAllocate::UpdateFreeSpaceFiller(int32_t free_space_size) {
3880  ASSERT(filler_free_space_size_ != NULL);
3881  Zone* zone = block()->zone();
3882  // We must explicitly force Smi representation here because on x64 we
3883  // would otherwise automatically choose int32, but the actual store
3884  // requires a Smi-tagged value.
3885  HConstant* new_free_space_size = HConstant::CreateAndInsertBefore(
3886  zone,
3887  context(),
3888  filler_free_space_size_->value()->GetInteger32Constant() +
3889  free_space_size,
3891  filler_free_space_size_);
3892  filler_free_space_size_->UpdateValue(new_free_space_size);
3893 }
3894 
3895 
3896 void HAllocate::CreateFreeSpaceFiller(int32_t free_space_size) {
3897  ASSERT(filler_free_space_size_ == NULL);
3898  Zone* zone = block()->zone();
3899  HInstruction* free_space_instr =
3900  HInnerAllocatedObject::New(zone, context(), dominating_allocate_,
3901  dominating_allocate_->size(), type());
3902  free_space_instr->InsertBefore(this);
3903  HConstant* filler_map = HConstant::New(
3904  zone,
3905  context(),
3906  isolate()->factory()->free_space_map());
3907  filler_map->FinalizeUniqueness(); // TODO(titzer): should be init'd a'ready
3908  filler_map->InsertAfter(free_space_instr);
3909  HInstruction* store_map = HStoreNamedField::New(zone, context(),
3910  free_space_instr, HObjectAccess::ForMap(), filler_map);
3911  store_map->SetFlag(HValue::kHasNoObservableSideEffects);
3912  store_map->InsertAfter(filler_map);
3913 
3914  // We must explicitly force Smi representation here because on x64 we
3915  // would otherwise automatically choose int32, but the actual store
3916  // requires a Smi-tagged value.
3917  HConstant* filler_size = HConstant::CreateAndInsertAfter(
3918  zone, context(), free_space_size, Representation::Smi(), store_map);
3919  // Must force Smi representation for x64 (see comment above).
3920  HObjectAccess access =
3921  HObjectAccess::ForMapAndOffset(isolate()->factory()->free_space_map(),
3924  HStoreNamedField* store_size = HStoreNamedField::New(zone, context(),
3925  free_space_instr, access, filler_size);
3926  store_size->SetFlag(HValue::kHasNoObservableSideEffects);
3927  store_size->InsertAfter(filler_size);
3928  filler_free_space_size_ = store_size;
3929 }
3930 
3931 
3932 void HAllocate::ClearNextMapWord(int offset) {
3933  if (MustClearNextMapWord()) {
3934  Zone* zone = block()->zone();
3935  HObjectAccess access =
3936  HObjectAccess::ForObservableJSObjectOffset(offset);
3937  HStoreNamedField* clear_next_map =
3938  HStoreNamedField::New(zone, context(), this, access,
3939  block()->graph()->GetConstant0());
3940  clear_next_map->ClearAllSideEffects();
3941  clear_next_map->InsertAfter(this);
3942  }
3943 }
3944 
3945 
3946 void HAllocate::PrintDataTo(StringStream* stream) {
3947  size()->PrintNameTo(stream);
3948  stream->Add(" (");
3949  if (IsNewSpaceAllocation()) stream->Add("N");
3950  if (IsOldPointerSpaceAllocation()) stream->Add("P");
3951  if (IsOldDataSpaceAllocation()) stream->Add("D");
3952  if (MustAllocateDoubleAligned()) stream->Add("A");
3953  if (MustPrefillWithFiller()) stream->Add("F");
3954  stream->Add(")");
3955 }
3956 
3957 
3958 bool HStoreKeyed::NeedsCanonicalization() {
3959  // If value is an integer or smi or comes from the result of a keyed load or
3960  // constant then it is either be a non-hole value or in the case of a constant
3961  // the hole is only being stored explicitly: no need for canonicalization.
3962  //
3963  // The exception to that is keyed loads from external float or double arrays:
3964  // these can load arbitrary representation of NaN.
3965 
3966  if (value()->IsConstant()) {
3967  return false;
3968  }
3969 
3970  if (value()->IsLoadKeyed()) {
3972  HLoadKeyed::cast(value())->elements_kind());
3973  }
3974 
3975  if (value()->IsChange()) {
3976  if (HChange::cast(value())->from().IsSmiOrInteger32()) {
3977  return false;
3978  }
3979  if (HChange::cast(value())->value()->type().IsSmi()) {
3980  return false;
3981  }
3982  }
3983  return true;
3984 }
3985 
3986 
3987 #define H_CONSTANT_INT(val) \
3988 HConstant::New(zone, context, static_cast<int32_t>(val))
3989 #define H_CONSTANT_DOUBLE(val) \
3990 HConstant::New(zone, context, static_cast<double>(val))
3991 
3992 #define DEFINE_NEW_H_SIMPLE_ARITHMETIC_INSTR(HInstr, op) \
3993 HInstruction* HInstr::New( \
3994  Zone* zone, HValue* context, HValue* left, HValue* right) { \
3995  if (FLAG_fold_constants && left->IsConstant() && right->IsConstant()) { \
3996  HConstant* c_left = HConstant::cast(left); \
3997  HConstant* c_right = HConstant::cast(right); \
3998  if ((c_left->HasNumberValue() && c_right->HasNumberValue())) { \
3999  double double_res = c_left->DoubleValue() op c_right->DoubleValue(); \
4000  if (IsInt32Double(double_res)) { \
4001  return H_CONSTANT_INT(double_res); \
4002  } \
4003  return H_CONSTANT_DOUBLE(double_res); \
4004  } \
4005  } \
4006  return new(zone) HInstr(context, left, right); \
4007 }
4008 
4009 
4013 
4014 #undef DEFINE_NEW_H_SIMPLE_ARITHMETIC_INSTR
4015 
4016 
4017 HInstruction* HStringAdd::New(Zone* zone,
4018  HValue* context,
4019  HValue* left,
4020  HValue* right,
4021  PretenureFlag pretenure_flag,
4023  Handle<AllocationSite> allocation_site) {
4024  if (FLAG_fold_constants && left->IsConstant() && right->IsConstant()) {
4025  HConstant* c_right = HConstant::cast(right);
4026  HConstant* c_left = HConstant::cast(left);
4027  if (c_left->HasStringValue() && c_right->HasStringValue()) {
4028  Handle<String> left_string = c_left->StringValue();
4029  Handle<String> right_string = c_right->StringValue();
4030  // Prevent possible exception by invalid string length.
4031  if (left_string->length() + right_string->length() < String::kMaxLength) {
4032  Handle<String> concat = zone->isolate()->factory()->NewFlatConcatString(
4033  c_left->StringValue(), c_right->StringValue());
4034  ASSERT(!concat.is_null());
4035  return HConstant::New(zone, context, concat);
4036  }
4037  }
4038  }
4039  return new(zone) HStringAdd(
4040  context, left, right, pretenure_flag, flags, allocation_site);
4041 }
4042 
4043 
4044 void HStringAdd::PrintDataTo(StringStream* stream) {
4046  stream->Add("_CheckBoth");
4047  } else if ((flags() & STRING_ADD_CHECK_BOTH) == STRING_ADD_CHECK_LEFT) {
4048  stream->Add("_CheckLeft");
4049  } else if ((flags() & STRING_ADD_CHECK_BOTH) == STRING_ADD_CHECK_RIGHT) {
4050  stream->Add("_CheckRight");
4051  }
4053  stream->Add(" (");
4054  if (pretenure_flag() == NOT_TENURED) stream->Add("N");
4055  else if (pretenure_flag() == TENURED) stream->Add("D");
4056  stream->Add(")");
4057 }
4058 
4059 
4060 HInstruction* HStringCharFromCode::New(
4061  Zone* zone, HValue* context, HValue* char_code) {
4062  if (FLAG_fold_constants && char_code->IsConstant()) {
4063  HConstant* c_code = HConstant::cast(char_code);
4064  Isolate* isolate = zone->isolate();
4065  if (c_code->HasNumberValue()) {
4066  if (std::isfinite(c_code->DoubleValue())) {
4067  uint32_t code = c_code->NumberValueAsInteger32() & 0xffff;
4068  return HConstant::New(zone, context,
4069  LookupSingleCharacterStringFromCode(isolate, code));
4070  }
4071  return HConstant::New(zone, context, isolate->factory()->empty_string());
4072  }
4073  }
4074  return new(zone) HStringCharFromCode(context, char_code);
4075 }
4076 
4077 
4078 HInstruction* HUnaryMathOperation::New(
4079  Zone* zone, HValue* context, HValue* value, BuiltinFunctionId op) {
4080  do {
4081  if (!FLAG_fold_constants) break;
4082  if (!value->IsConstant()) break;
4083  HConstant* constant = HConstant::cast(value);
4084  if (!constant->HasNumberValue()) break;
4085  double d = constant->DoubleValue();
4086  if (std::isnan(d)) { // NaN poisons everything.
4087  return H_CONSTANT_DOUBLE(OS::nan_value());
4088  }
4089  if (std::isinf(d)) { // +Infinity and -Infinity.
4090  switch (op) {
4091  case kMathExp:
4092  return H_CONSTANT_DOUBLE((d > 0.0) ? d : 0.0);
4093  case kMathLog:
4094  case kMathSqrt:
4095  return H_CONSTANT_DOUBLE((d > 0.0) ? d : OS::nan_value());
4096  case kMathPowHalf:
4097  case kMathAbs:
4098  return H_CONSTANT_DOUBLE((d > 0.0) ? d : -d);
4099  case kMathRound:
4100  case kMathFloor:
4101  return H_CONSTANT_DOUBLE(d);
4102  case kMathClz32:
4103  return H_CONSTANT_INT(32);
4104  default:
4105  UNREACHABLE();
4106  break;
4107  }
4108  }
4109  switch (op) {
4110  case kMathExp:
4111  return H_CONSTANT_DOUBLE(fast_exp(d));
4112  case kMathLog:
4113  return H_CONSTANT_DOUBLE(std::log(d));
4114  case kMathSqrt:
4115  return H_CONSTANT_DOUBLE(fast_sqrt(d));
4116  case kMathPowHalf:
4117  return H_CONSTANT_DOUBLE(power_double_double(d, 0.5));
4118  case kMathAbs:
4119  return H_CONSTANT_DOUBLE((d >= 0.0) ? d + 0.0 : -d);
4120  case kMathRound:
4121  // -0.5 .. -0.0 round to -0.0.
4122  if ((d >= -0.5 && Double(d).Sign() < 0)) return H_CONSTANT_DOUBLE(-0.0);
4123  // Doubles are represented as Significant * 2 ^ Exponent. If the
4124  // Exponent is not negative, the double value is already an integer.
4125  if (Double(d).Exponent() >= 0) return H_CONSTANT_DOUBLE(d);
4126  return H_CONSTANT_DOUBLE(std::floor(d + 0.5));
4127  case kMathFloor:
4128  return H_CONSTANT_DOUBLE(std::floor(d));
4129  case kMathClz32: {
4130  uint32_t i = DoubleToUint32(d);
4131  return H_CONSTANT_INT(
4132  (i == 0) ? 32 : CompilerIntrinsics::CountLeadingZeros(i));
4133  }
4134  default:
4135  UNREACHABLE();
4136  break;
4137  }
4138  } while (false);
4139  return new(zone) HUnaryMathOperation(context, value, op);
4140 }
4141 
4142 
4143 HInstruction* HPower::New(Zone* zone,
4144  HValue* context,
4145  HValue* left,
4146  HValue* right) {
4147  if (FLAG_fold_constants && left->IsConstant() && right->IsConstant()) {
4148  HConstant* c_left = HConstant::cast(left);
4149  HConstant* c_right = HConstant::cast(right);
4150  if (c_left->HasNumberValue() && c_right->HasNumberValue()) {
4151  double result = power_helper(c_left->DoubleValue(),
4152  c_right->DoubleValue());
4153  return H_CONSTANT_DOUBLE(std::isnan(result) ? OS::nan_value() : result);
4154  }
4155  }
4156  return new(zone) HPower(left, right);
4157 }
4158 
4159 
4160 HInstruction* HMathMinMax::New(
4161  Zone* zone, HValue* context, HValue* left, HValue* right, Operation op) {
4162  if (FLAG_fold_constants && left->IsConstant() && right->IsConstant()) {
4163  HConstant* c_left = HConstant::cast(left);
4164  HConstant* c_right = HConstant::cast(right);
4165  if (c_left->HasNumberValue() && c_right->HasNumberValue()) {
4166  double d_left = c_left->DoubleValue();
4167  double d_right = c_right->DoubleValue();
4168  if (op == kMathMin) {
4169  if (d_left > d_right) return H_CONSTANT_DOUBLE(d_right);
4170  if (d_left < d_right) return H_CONSTANT_DOUBLE(d_left);
4171  if (d_left == d_right) {
4172  // Handle +0 and -0.
4173  return H_CONSTANT_DOUBLE((Double(d_left).Sign() == -1) ? d_left
4174  : d_right);
4175  }
4176  } else {
4177  if (d_left < d_right) return H_CONSTANT_DOUBLE(d_right);
4178  if (d_left > d_right) return H_CONSTANT_DOUBLE(d_left);
4179  if (d_left == d_right) {
4180  // Handle +0 and -0.
4181  return H_CONSTANT_DOUBLE((Double(d_left).Sign() == -1) ? d_right
4182  : d_left);
4183  }
4184  }
4185  // All comparisons failed, must be NaN.
4186  return H_CONSTANT_DOUBLE(OS::nan_value());
4187  }
4188  }
4189  return new(zone) HMathMinMax(context, left, right, op);
4190 }
4191 
4192 
4193 HInstruction* HMod::New(Zone* zone,
4194  HValue* context,
4195  HValue* left,
4196  HValue* right) {
4197  if (FLAG_fold_constants && left->IsConstant() && right->IsConstant()) {
4198  HConstant* c_left = HConstant::cast(left);
4199  HConstant* c_right = HConstant::cast(right);
4200  if (c_left->HasInteger32Value() && c_right->HasInteger32Value()) {
4201  int32_t dividend = c_left->Integer32Value();
4202  int32_t divisor = c_right->Integer32Value();
4203  if (dividend == kMinInt && divisor == -1) {
4204  return H_CONSTANT_DOUBLE(-0.0);
4205  }
4206  if (divisor != 0) {
4207  int32_t res = dividend % divisor;
4208  if ((res == 0) && (dividend < 0)) {
4209  return H_CONSTANT_DOUBLE(-0.0);
4210  }
4211  return H_CONSTANT_INT(res);
4212  }
4213  }
4214  }
4215  return new(zone) HMod(context, left, right);
4216 }
4217 
4218 
4219 HInstruction* HDiv::New(
4220  Zone* zone, HValue* context, HValue* left, HValue* right) {
4221  // If left and right are constant values, try to return a constant value.
4222  if (FLAG_fold_constants && left->IsConstant() && right->IsConstant()) {
4223  HConstant* c_left = HConstant::cast(left);
4224  HConstant* c_right = HConstant::cast(right);
4225  if ((c_left->HasNumberValue() && c_right->HasNumberValue())) {
4226  if (c_right->DoubleValue() != 0) {
4227  double double_res = c_left->DoubleValue() / c_right->DoubleValue();
4228  if (IsInt32Double(double_res)) {
4229  return H_CONSTANT_INT(double_res);
4230  }
4231  return H_CONSTANT_DOUBLE(double_res);
4232  } else {
4233  int sign = Double(c_left->DoubleValue()).Sign() *
4234  Double(c_right->DoubleValue()).Sign(); // Right could be -0.
4235  return H_CONSTANT_DOUBLE(sign * V8_INFINITY);
4236  }
4237  }
4238  }
4239  return new(zone) HDiv(context, left, right);
4240 }
4241 
4242 
4243 HInstruction* HBitwise::New(
4244  Zone* zone, HValue* context, Token::Value op, HValue* left, HValue* right) {
4245  if (FLAG_fold_constants && left->IsConstant() && right->IsConstant()) {
4246  HConstant* c_left = HConstant::cast(left);
4247  HConstant* c_right = HConstant::cast(right);
4248  if ((c_left->HasNumberValue() && c_right->HasNumberValue())) {
4249  int32_t result;
4250  int32_t v_left = c_left->NumberValueAsInteger32();
4251  int32_t v_right = c_right->NumberValueAsInteger32();
4252  switch (op) {
4253  case Token::BIT_XOR:
4254  result = v_left ^ v_right;
4255  break;
4256  case Token::BIT_AND:
4257  result = v_left & v_right;
4258  break;
4259  case Token::BIT_OR:
4260  result = v_left | v_right;
4261  break;
4262  default:
4263  result = 0; // Please the compiler.
4264  UNREACHABLE();
4265  }
4266  return H_CONSTANT_INT(result);
4267  }
4268  }
4269  return new(zone) HBitwise(context, op, left, right);
4270 }
4271 
4272 
4273 #define DEFINE_NEW_H_BITWISE_INSTR(HInstr, result) \
4274 HInstruction* HInstr::New( \
4275  Zone* zone, HValue* context, HValue* left, HValue* right) { \
4276  if (FLAG_fold_constants && left->IsConstant() && right->IsConstant()) { \
4277  HConstant* c_left = HConstant::cast(left); \
4278  HConstant* c_right = HConstant::cast(right); \
4279  if ((c_left->HasNumberValue() && c_right->HasNumberValue())) { \
4280  return H_CONSTANT_INT(result); \
4281  } \
4282  } \
4283  return new(zone) HInstr(context, left, right); \
4284 }
4285 
4286 
4288 c_left->NumberValueAsInteger32() >> (c_right->NumberValueAsInteger32() & 0x1f))
4290 c_left->NumberValueAsInteger32() << (c_right->NumberValueAsInteger32() & 0x1f))
4291 
4292 #undef DEFINE_NEW_H_BITWISE_INSTR
4293 
4294 
4295 HInstruction* HShr::New(
4296  Zone* zone, HValue* context, HValue* left, HValue* right) {
4297  if (FLAG_fold_constants && left->IsConstant() && right->IsConstant()) {
4298  HConstant* c_left = HConstant::cast(left);
4299  HConstant* c_right = HConstant::cast(right);
4300  if ((c_left->HasNumberValue() && c_right->HasNumberValue())) {
4301  int32_t left_val = c_left->NumberValueAsInteger32();
4302  int32_t right_val = c_right->NumberValueAsInteger32() & 0x1f;
4303  if ((right_val == 0) && (left_val < 0)) {
4304  return H_CONSTANT_DOUBLE(static_cast<uint32_t>(left_val));
4305  }
4306  return H_CONSTANT_INT(static_cast<uint32_t>(left_val) >> right_val);
4307  }
4308  }
4309  return new(zone) HShr(context, left, right);
4310 }
4311 
4312 
4313 HInstruction* HSeqStringGetChar::New(Zone* zone,
4314  HValue* context,
4315  String::Encoding encoding,
4316  HValue* string,
4317  HValue* index) {
4318  if (FLAG_fold_constants && string->IsConstant() && index->IsConstant()) {
4319  HConstant* c_string = HConstant::cast(string);
4320  HConstant* c_index = HConstant::cast(index);
4321  if (c_string->HasStringValue() && c_index->HasInteger32Value()) {
4322  Handle<String> s = c_string->StringValue();
4323  int32_t i = c_index->Integer32Value();
4324  ASSERT_LE(0, i);
4325  ASSERT_LT(i, s->length());
4326  return H_CONSTANT_INT(s->Get(i));
4327  }
4328  }
4329  return new(zone) HSeqStringGetChar(encoding, string, index);
4330 }
4331 
4332 
4333 #undef H_CONSTANT_INT
4334 #undef H_CONSTANT_DOUBLE
4335 
4336 
4337 void HBitwise::PrintDataTo(StringStream* stream) {
4338  stream->Add(Token::Name(op_));
4339  stream->Add(" ");
4341 }
4342 
4343 
4344 void HPhi::SimplifyConstantInputs() {
4345  // Convert constant inputs to integers when all uses are truncating.
4346  // This must happen before representation inference takes place.
4347  if (!CheckUsesForFlag(kTruncatingToInt32)) return;
4348  for (int i = 0; i < OperandCount(); ++i) {
4349  if (!OperandAt(i)->IsConstant()) return;
4350  }
4351  HGraph* graph = block()->graph();
4352  for (int i = 0; i < OperandCount(); ++i) {
4353  HConstant* operand = HConstant::cast(OperandAt(i));
4354  if (operand->HasInteger32Value()) {
4355  continue;
4356  } else if (operand->HasDoubleValue()) {
4357  HConstant* integer_input =
4358  HConstant::New(graph->zone(), graph->GetInvalidContext(),
4359  DoubleToInt32(operand->DoubleValue()));
4360  integer_input->InsertAfter(operand);
4361  SetOperandAt(i, integer_input);
4362  } else if (operand->HasBooleanValue()) {
4363  SetOperandAt(i, operand->BooleanValue() ? graph->GetConstant1()
4364  : graph->GetConstant0());
4365  } else if (operand->ImmortalImmovable()) {
4366  SetOperandAt(i, graph->GetConstant0());
4367  }
4368  }
4369  // Overwrite observed input representations because they are likely Tagged.
4370  for (HUseIterator it(uses()); !it.Done(); it.Advance()) {
4371  HValue* use = it.value();
4372  if (use->IsBinaryOperation()) {
4373  HBinaryOperation::cast(use)->set_observed_input_representation(
4374  it.index(), Representation::Smi());
4375  }
4376  }
4377 }
4378 
4379 
4380 void HPhi::InferRepresentation(HInferRepresentationPhase* h_infer) {
4381  ASSERT(CheckFlag(kFlexibleRepresentation));
4382  Representation new_rep = RepresentationFromInputs();
4383  UpdateRepresentation(new_rep, h_infer, "inputs");
4384  new_rep = RepresentationFromUses();
4385  UpdateRepresentation(new_rep, h_infer, "uses");
4386  new_rep = RepresentationFromUseRequirements();
4387  UpdateRepresentation(new_rep, h_infer, "use requirements");
4388 }
4389 
4390 
4391 Representation HPhi::RepresentationFromInputs() {
4392  Representation r = Representation::None();
4393  for (int i = 0; i < OperandCount(); ++i) {
4394  r = r.generalize(OperandAt(i)->KnownOptimalRepresentation());
4395  }
4396  return r;
4397 }
4398 
4399 
4400 // Returns a representation if all uses agree on the same representation.
4401 // Integer32 is also returned when some uses are Smi but others are Integer32.
4404  for (HUseIterator it(uses()); !it.Done(); it.Advance()) {
4405  // Ignore the use requirement from never run code
4406  if (it.value()->block()->IsUnreachable()) continue;
4407 
4408  // We check for observed_input_representation elsewhere.
4409  Representation use_rep =
4410  it.value()->RequiredInputRepresentation(it.index());
4411  if (rep.IsNone()) {
4412  rep = use_rep;
4413  continue;
4414  }
4415  if (use_rep.IsNone() || rep.Equals(use_rep)) continue;
4416  if (rep.generalize(use_rep).IsInteger32()) {
4417  rep = Representation::Integer32();
4418  continue;
4419  }
4420  return Representation::None();
4421  }
4422  return rep;
4423 }
4424 
4425 
4427  for (HUseIterator it(uses()); !it.Done(); it.Advance()) {
4428  // We check for observed_input_representation elsewhere.
4429  Representation use_rep =
4430  it.value()->RequiredInputRepresentation(it.index());
4431  if (!use_rep.IsNone() &&
4432  !use_rep.IsSmi() &&
4433  !use_rep.IsTagged()) {
4434  return true;
4435  }
4436  }
4437  return false;
4438 }
4439 
4440 
4441 // Node-specific verification code is only included in debug mode.
4442 #ifdef DEBUG
4443 
4444 void HPhi::Verify() {
4445  ASSERT(OperandCount() == block()->predecessors()->length());
4446  for (int i = 0; i < OperandCount(); ++i) {
4447  HValue* value = OperandAt(i);
4448  HBasicBlock* defining_block = value->block();
4449  HBasicBlock* predecessor_block = block()->predecessors()->at(i);
4450  ASSERT(defining_block == predecessor_block ||
4451  defining_block->Dominates(predecessor_block));
4452  }
4453 }
4454 
4455 
4456 void HSimulate::Verify() {
4457  HInstruction::Verify();
4458  ASSERT(HasAstId());
4459 }
4460 
4461 
4462 void HCheckHeapObject::Verify() {
4463  HInstruction::Verify();
4464  ASSERT(HasNoUses());
4465 }
4466 
4467 
4468 void HCheckValue::Verify() {
4469  HInstruction::Verify();
4470  ASSERT(HasNoUses());
4471 }
4472 
4473 #endif
4474 
4475 
4476 HObjectAccess HObjectAccess::ForFixedArrayHeader(int offset) {
4477  ASSERT(offset >= 0);
4478  ASSERT(offset < FixedArray::kHeaderSize);
4479  if (offset == FixedArray::kLengthOffset) return ForFixedArrayLength();
4480  return HObjectAccess(kInobject, offset);
4481 }
4482 
4483 
4484 HObjectAccess HObjectAccess::ForMapAndOffset(Handle<Map> map, int offset,
4485  Representation representation) {
4486  ASSERT(offset >= 0);
4487  Portion portion = kInobject;
4488 
4489  if (offset == JSObject::kElementsOffset) {
4490  portion = kElementsPointer;
4491  } else if (offset == JSObject::kMapOffset) {
4492  portion = kMaps;
4493  }
4494  bool existing_inobject_property = true;
4495  if (!map.is_null()) {
4496  existing_inobject_property = (offset <
4497  map->instance_size() - map->unused_property_fields() * kPointerSize);
4498  }
4499  return HObjectAccess(portion, offset, representation, Handle<String>::null(),
4500  false, existing_inobject_property);
4501 }
4502 
4503 
4504 HObjectAccess HObjectAccess::ForAllocationSiteOffset(int offset) {
4505  switch (offset) {
4507  return HObjectAccess(kInobject, offset, Representation::Tagged());
4509  return HObjectAccess(kInobject, offset, Representation::Tagged());
4511  return HObjectAccess(kInobject, offset, Representation::Smi());
4513  return HObjectAccess(kInobject, offset, Representation::Smi());
4515  return HObjectAccess(kInobject, offset, Representation::Tagged());
4517  return HObjectAccess(kInobject, offset, Representation::Tagged());
4518  default:
4519  UNREACHABLE();
4520  }
4521  return HObjectAccess(kInobject, offset);
4522 }
4523 
4524 
4525 HObjectAccess HObjectAccess::ForContextSlot(int index) {
4526  ASSERT(index >= 0);
4527  Portion portion = kInobject;
4528  int offset = Context::kHeaderSize + index * kPointerSize;
4529  ASSERT_EQ(offset, Context::SlotOffset(index) + kHeapObjectTag);
4530  return HObjectAccess(portion, offset, Representation::Tagged());
4531 }
4532 
4533 
4534 HObjectAccess HObjectAccess::ForJSArrayOffset(int offset) {
4535  ASSERT(offset >= 0);
4536  Portion portion = kInobject;
4537 
4538  if (offset == JSObject::kElementsOffset) {
4539  portion = kElementsPointer;
4540  } else if (offset == JSArray::kLengthOffset) {
4541  portion = kArrayLengths;
4542  } else if (offset == JSObject::kMapOffset) {
4543  portion = kMaps;
4544  }
4545  return HObjectAccess(portion, offset);
4546 }
4547 
4548 
4549 HObjectAccess HObjectAccess::ForBackingStoreOffset(int offset,
4550  Representation representation) {
4551  ASSERT(offset >= 0);
4552  return HObjectAccess(kBackingStore, offset, representation,
4553  Handle<String>::null(), false, false);
4554 }
4555 
4556 
4557 HObjectAccess HObjectAccess::ForField(Handle<Map> map,
4558  LookupResult* lookup,
4559  Handle<String> name) {
4560  ASSERT(lookup->IsField() || lookup->IsTransitionToField());
4561  int index;
4562  Representation representation;
4563  if (lookup->IsField()) {
4564  index = lookup->GetLocalFieldIndexFromMap(*map);
4565  representation = lookup->representation();
4566  } else {
4567  Map* transition = lookup->GetTransitionTarget();
4568  int descriptor = transition->LastAdded();
4569  index = transition->instance_descriptors()->GetFieldIndex(descriptor) -
4570  map->inobject_properties();
4571  PropertyDetails details =
4572  transition->instance_descriptors()->GetDetails(descriptor);
4573  representation = details.representation();
4574  }
4575  if (index < 0) {
4576  // Negative property indices are in-object properties, indexed
4577  // from the end of the fixed part of the object.
4578  int offset = (index * kPointerSize) + map->instance_size();
4579  return HObjectAccess(kInobject, offset, representation, name, false, true);
4580  } else {
4581  // Non-negative property indices are in the properties array.
4582  int offset = (index * kPointerSize) + FixedArray::kHeaderSize;
4583  return HObjectAccess(kBackingStore, offset, representation, name,
4584  false, false);
4585  }
4586 }
4587 
4588 
4589 HObjectAccess HObjectAccess::ForCellPayload(Isolate* isolate) {
4590  return HObjectAccess(
4592  Handle<String>(isolate->heap()->cell_value_string()));
4593 }
4594 
4595 
4596 void HObjectAccess::SetGVNFlags(HValue *instr, PropertyAccessType access_type) {
4597  // set the appropriate GVN flags for a given load or store instruction
4598  if (access_type == STORE) {
4599  // track dominating allocations in order to eliminate write barriers
4600  instr->SetDependsOnFlag(::v8::internal::kNewSpacePromotion);
4601  instr->SetFlag(HValue::kTrackSideEffectDominators);
4602  } else {
4603  // try to GVN loads, but don't hoist above map changes
4604  instr->SetFlag(HValue::kUseGVN);
4605  instr->SetDependsOnFlag(::v8::internal::kMaps);
4606  }
4607 
4608  switch (portion()) {
4609  case kArrayLengths:
4610  if (access_type == STORE) {
4611  instr->SetChangesFlag(::v8::internal::kArrayLengths);
4612  } else {
4613  instr->SetDependsOnFlag(::v8::internal::kArrayLengths);
4614  }
4615  break;
4616  case kStringLengths:
4617  if (access_type == STORE) {
4618  instr->SetChangesFlag(::v8::internal::kStringLengths);
4619  } else {
4620  instr->SetDependsOnFlag(::v8::internal::kStringLengths);
4621  }
4622  break;
4623  case kInobject:
4624  if (access_type == STORE) {
4625  instr->SetChangesFlag(::v8::internal::kInobjectFields);
4626  } else {
4627  instr->SetDependsOnFlag(::v8::internal::kInobjectFields);
4628  }
4629  break;
4630  case kDouble:
4631  if (access_type == STORE) {
4632  instr->SetChangesFlag(::v8::internal::kDoubleFields);
4633  } else {
4634  instr->SetDependsOnFlag(::v8::internal::kDoubleFields);
4635  }
4636  break;
4637  case kBackingStore:
4638  if (access_type == STORE) {
4639  instr->SetChangesFlag(::v8::internal::kBackingStoreFields);
4640  } else {
4641  instr->SetDependsOnFlag(::v8::internal::kBackingStoreFields);
4642  }
4643  break;
4644  case kElementsPointer:
4645  if (access_type == STORE) {
4646  instr->SetChangesFlag(::v8::internal::kElementsPointer);
4647  } else {
4648  instr->SetDependsOnFlag(::v8::internal::kElementsPointer);
4649  }
4650  break;
4651  case kMaps:
4652  if (access_type == STORE) {
4653  instr->SetChangesFlag(::v8::internal::kMaps);
4654  } else {
4655  instr->SetDependsOnFlag(::v8::internal::kMaps);
4656  }
4657  break;
4658  case kExternalMemory:
4659  if (access_type == STORE) {
4660  instr->SetChangesFlag(::v8::internal::kExternalMemory);
4661  } else {
4662  instr->SetDependsOnFlag(::v8::internal::kExternalMemory);
4663  }
4664  break;
4665  }
4666 }
4667 
4668 
4669 void HObjectAccess::PrintTo(StringStream* stream) const {
4670  stream->Add(".");
4671 
4672  switch (portion()) {
4673  case kArrayLengths:
4674  case kStringLengths:
4675  stream->Add("%length");
4676  break;
4677  case kElementsPointer:
4678  stream->Add("%elements");
4679  break;
4680  case kMaps:
4681  stream->Add("%map");
4682  break;
4683  case kDouble: // fall through
4684  case kInobject:
4685  if (!name_.is_null()) {
4686  stream->Add(String::cast(*name_)->ToCString().get());
4687  }
4688  stream->Add("[in-object]");
4689  break;
4690  case kBackingStore:
4691  if (!name_.is_null()) {
4692  stream->Add(String::cast(*name_)->ToCString().get());
4693  }
4694  stream->Add("[backing-store]");
4695  break;
4696  case kExternalMemory:
4697  stream->Add("[external-memory]");
4698  break;
4699  }
4700 
4701  stream->Add("@%d", offset());
4702 }
4703 
4704 } } // namespace v8::internal
bool is_more_general_than(const Representation &other) const
bool HasObservableSideEffects() const
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter NULL
Definition: flags.cc:269
#define H_CONSTANT_INT(val)
const int kMinInt
Definition: globals.h:249
#define IMMORTAL_IMMOVABLE_ROOT_LIST(V)
Definition: heap.h:233
virtual void PrintDataTo(StringStream *stream) V8_OVERRIDE
virtual Representation observed_input_representation(int index) V8_OVERRIDE
const intptr_t kDoubleAlignmentMask
Definition: v8globals.h:53
bool IsExternalArrayElementsKind(ElementsKind kind)
const char * ToCString(const v8::String::Utf8Value &value)
static const int kValueOffset
Definition: objects.h:9547
static int SlotOffset(int index)
Definition: contexts.h:498
uint16_t current_
c_left NumberValueAsInteger32()<< (c_right-> NumberValueAsInteger32()&0x1f)) HInstruction *HShr::New(Zone *zone, HValue *context, HValue *left, HValue *right)
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths true
Definition: flags.cc:208
static Representation Smi()
void PrintF(const char *format,...)
Definition: v8utils.cc:40
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf map
Definition: flags.cc:350
#define IMMORTAL_IMMOVABLE_ROOT(name)
virtual bool DataEquals(HValue *other)
static String * cast(Object *obj)
HUseListNode * RemoveUse(HValue *value, int index)
HInstruction * previous() const
static const char * Name(Value tok)
Definition: token.h:198
const int kMaxUInt16
Definition: globals.h:256
const int kMinInt8
Definition: globals.h:251
HBasicBlock * block() const
static Handle< T > cast(Handle< S > that)
Definition: handles.h:75
int CountLeadingZeros(uint64_t value, int width)
double power_helper(double x, double y)
Definition: assembler.cc:1436
static Representation Integer32()
T Max(T a, T b)
Definition: utils.h:227
virtual bool KnownSuccessorBlock(HBasicBlock **block) V8_OVERRIDE
virtual void AssumeRepresentation(Representation r) V8_OVERRIDE
double fast_sqrt(double input)
bool ConstantIsObject(HConstant *constant, Isolate *isolate)
virtual void PrintDataTo(StringStream *stream) V8_OVERRIDE
bool IsExternalFloatOrDoubleElementsKind(ElementsKind kind)
kSerializedDataOffset Object
Definition: objects-inl.h:5016
Encoding
Definition: v8.h:1663
int int32_t
Definition: unicode.cc:47
virtual void AssumeRepresentation(Representation r)
virtual void PrintDataTo(StringStream *stream) V8_OVERRIDE
const int kMaxInt
Definition: globals.h:248
bool EqualsInteger32Constant(int32_t value)
const uint32_t kIsNotInternalizedMask
Definition: objects.h:603
static bool IsArithmeticCompareOp(Value op)
Definition: token.h:236
virtual Representation observed_input_representation(int index) V8_OVERRIDE
unibrow::Mapping< unibrow::Ecma262Canonicalize > Canonicalize
static const int kSizeOffset
Definition: objects.h:4652
#define ASSERT(condition)
Definition: checks.h:329
const int kMinUInt16
Definition: globals.h:257
virtual void InferRepresentation(HInferRepresentationPhase *h_infer)
#define INTERNALIZED_STRING(name, value)
bool IsEmpty() const
Definition: utils.h:1054
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long expose natives in global object expose freeBuffer extension expose gc extension under the specified name expose externalize string extension number of stack frames to capture disable builtin natives files print name of functions for which code is generated use random jit cookie to mask large constants trace lazy optimization use adaptive optimizations always try to OSR functions trace optimize function deoptimization minimum length for automatic enable preparsing maximum number of optimization attempts before giving up cache prototype transitions trace debugging JSON request response trace out of bounds accesses to external arrays trace_js_array_abuse automatically set the debug break flag when debugger commands are in the queue abort by crashing maximum length of function source code printed in a stack trace max size of the new max size of the old max size of executable always perform global GCs print one trace line following each garbage collection do not print trace line after scavenger collection print statistics of the maximum memory committed for the heap in only print modified registers Don t break for ASM_UNIMPLEMENTED_BREAK macros print stack trace when an illegal exception is thrown randomize hashes to avoid predictable hash Fixed seed to use to hash property Print the time it takes to deserialize the snapshot testing_bool_flag testing_int_flag string flag tmp file in which to serialize heap Print the time it takes to lazily compile hydrogen code stubs concurrent_recompilation concurrent_sweeping Print usage including on console Map counters to a file Enable debugger compile events enable GDBJIT enable GDBJIT interface for all code objects dump only objects containing this substring stress the GC compactor to flush out pretty print source code print source AST function name where to insert a breakpoint print scopes for builtins trace contexts operations print stuff during garbage collection report code statistics after GC report handles after GC trace cache state transitions print interface inference details prints when objects are turned into dictionaries report heap spill statistics along with trace isolate state changes trace regexp bytecode execution Minimal Log all events to the log file Log API events to the log file Log heap samples on garbage collection for the hp2ps tool log positions Log suspect operations Used with turns on browser compatible mode for profiling v8 log
virtual void PrintDataTo(StringStream *stream)
int isfinite(double x)
static Representation Double()
void set_observed_input_representation(int index, Representation rep)
#define INTERNALIZED_STRING_LIST(V)
Definition: heap.h:276
#define STRING_TYPE_LIST(V)
Definition: objects.h:459
void AddDependantsToWorklist(HInferRepresentationPhase *h_infer)
bool IsSimpleMapChangeTransition(ElementsKind from_kind, ElementsKind to_kind)
Representation representation() const
virtual void InferRepresentation(HInferRepresentationPhase *h_infer) V8_OVERRIDE
DEFINE_NEW_H_BITWISE_INSTR(HSar, c_left->NumberValueAsInteger32() >>(c_right->NumberValueAsInteger32()&0x1f)) DEFINE_NEW_H_BITWISE_INSTR(HShl
bool HasAtLeastOneUseWithFlagAndNoneWithout(Flag f) const
static const int kDependentCodeOffset
Definition: objects.h:8416
virtual int argument_count() const
#define V8_INFINITY
Definition: globals.h:44
int isnan(double x)
void Add(Vector< const char > format, Vector< FmtElm > elms)
virtual void InferRepresentation(HInferRepresentationPhase *h_infer) V8_OVERRIDE
static Handle< Object > TryMigrateInstance(Handle< JSObject > instance)
Definition: objects.cc:3866
static const int kPretenureCreateCountOffset
Definition: objects.h:8414
SmartArrayPointer< char > ToCString(AllowNullsFlag allow_nulls, RobustnessFlag robustness_flag, int offset, int length, int *length_output=0)
Definition: objects.cc:8272
#define HYDROGEN_CONCRETE_INSTRUCTION_LIST(V)
double fast_exp(double input)
virtual HType CalculateInferredType()
void AddNewRange(Range *r, Zone *zone)
static const int kNestedSiteOffset
Definition: objects.h:8412
static bool IsInequalityOp(Value op)
Definition: token.h:232
static Value ReverseCompareOp(Value op)
Definition: token.h:258
virtual int OperandCount()=0
#define UNREACHABLE()
Definition: checks.h:52
static const int kPretenureDataOffset
Definition: objects.h:8413
bool Equals(HValue *other)
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object size
Definition: flags.cc:211
void PrintRangeTo(StringStream *stream)
const char * Mnemonic() const
Definition: objects.cc:2251
static GVNFlagSet AllSideEffectsFlagSet()
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long expose natives in global object expose freeBuffer extension expose gc extension under the specified name expose externalize string extension number of stack frames to capture disable builtin natives files print name of functions for which code is generated use random jit cookie to mask large constants trace lazy optimization use adaptive optimizations always try to OSR functions trace optimize function deoptimization minimum length for automatic enable preparsing maximum number of optimization attempts before giving up cache prototype transitions trace debugging JSON request response trace out of bounds accesses to external arrays trace_js_array_abuse automatically set the debug break flag when debugger commands are in the queue abort by crashing maximum length of function source code printed in a stack trace max size of the new max size of the old max size of executable always perform global GCs print one trace line following each garbage collection do not print trace line after scavenger collection print statistics of the maximum memory committed for the heap in only print modified registers Don t break for ASM_UNIMPLEMENTED_BREAK macros print stack trace when an illegal exception is thrown randomize hashes to avoid predictable hash Fixed seed to use to hash property Print the time it takes to deserialize the snapshot testing_bool_flag testing_int_flag string flag tmp file in which to serialize heap Print the time it takes to lazily compile hydrogen code stubs concurrent_recompilation concurrent_sweeping Print usage including flags
Definition: flags.cc:665
bool IsFastPackedElementsKind(ElementsKind kind)
const int kDoubleSize
Definition: globals.h:266
void PrintChangesTo(StringStream *stream)
#define DEFINE_COMPILE(type)
static bool IsValid(intptr_t value)
Definition: objects-inl.h:1278
virtual void PrintDataTo(StringStream *stream) V8_OVERRIDE
static const int kMinValue
Definition: objects.h:1679
const int kPointerSize
Definition: globals.h:268
static const int kTransitionInfoOffset
Definition: objects.h:8411
void check(i::Vector< const uint8_t > string)
Representation RepresentationFromUses()
bool IsDefinedAfter(HBasicBlock *other) const
void ComputeInitialRange(Zone *zone)
const int kHeapObjectTag
Definition: v8.h:5473
bool Equals(const Representation &other) const
static Value NegateCompareOp(Value op)
Definition: token.h:241
virtual void PrintDataTo(StringStream *stream) V8_OVERRIDE
#define GVN_TRACKED_FLAG_LIST(V)
double power_double_double(double x, double y)
Definition: assembler.cc:1471
virtual void UpdateRepresentation(Representation new_rep, HInferRepresentationPhase *h_infer, const char *reason) V8_OVERRIDE
virtual void PrintDataTo(StringStream *stream) V8_OVERRIDE
virtual Handle< Map > GetMonomorphicJSObjectMap()
static const int kMaxRegularHeapObjectSize
Definition: spaces.h:820
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra code(assertions) for debugging") DEFINE_bool(code_comments
GVNFlagSet ChangesFlags() const
virtual void PrintTo(StringStream *stream) V8_OVERRIDE
HInstruction * next() const
#define ASSERT_LE(v1, v2)
Definition: checks.h:334
virtual Representation RepresentationFromInputs() V8_OVERRIDE
void set_type(HType new_type)
Definition: v8.h:123
static const int kElementsOffset
Definition: objects.h:2756
#define MAKE_CASE(type)
const uint32_t kStringTag
Definition: objects.h:598
static Representation External()
const int kMinUInt8
Definition: globals.h:253
int isinf(double x)
const uint32_t kInternalizedTag
Definition: objects.h:605
Representation RepresentationFromUseRequirements()
void DeleteAndReplaceWith(HValue *other)
#define GVN_UNTRACKED_FLAG_LIST(V)
T NegAbs(T a)
Definition: utils.h:248
virtual intptr_t Hashcode()
static const int kLengthOffset
Definition: objects.h:10076
const int kMaxUInt8
Definition: globals.h:252
virtual void PrintDataTo(StringStream *stream) V8_OVERRIDE
#define ASSERT_LT(v1, v2)
Definition: checks.h:333
static ElementsAccessor * ForKind(ElementsKind elements_kind)
Definition: elements.h:178
void PrintNameTo(StringStream *stream)
Representation generalize(Representation other)
Definition: v8.h:2107
void InsertAfter(HInstruction *previous)
static const int kHeaderSize
Definition: objects.h:3016
virtual void PrintDataTo(StringStream *stream) V8_OVERRIDE
virtual Opcode opcode() const =0
virtual void DeleteFromGraph()=0
uint32_t DoubleToUint32(double x)
Definition: conversions.h:106
static const int kMapOffset
Definition: objects.h:1890
int32_t DoubleToInt32(double x)
const uint32_t kIsNotStringMask
Definition: objects.h:597
const int kMinInt16
Definition: globals.h:255
virtual HSourcePosition position() const V8_OVERRIDE
static const int kLengthOffset
Definition: objects.h:3015
static double nan_value()
void ChangeRepresentation(Representation r)
const char * ElementsKindToString(ElementsKind kind)
Handle< T > handle(T *t, Isolate *isolate)
Definition: handles.h:103
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function info
Definition: flags.cc:317
Handle< String > LookupSingleCharacterStringFromCode(Isolate *isolate, uint32_t index)
Definition: handles.cc:212
void SetOperandAt(int index, HValue *value)
virtual Range * InferRange(Zone *zone)
bool CheckFlag(Flag f) const
#define DEFINE_NEW_H_SIMPLE_ARITHMETIC_INSTR(HInstr, op)
void set_position(HSourcePosition position)
virtual Representation RepresentationFromInputs()
void InsertBefore(HInstruction *next)
#define H_CONSTANT_DOUBLE(val)
static Handle< String > null()
Definition: handles.h:80
#define ASSERT_EQ(v1, v2)
Definition: checks.h:330
void SetBlock(HBasicBlock *block)
const int kMaxInt16
Definition: globals.h:254
virtual Representation observed_input_representation(int index)
virtual HValue * OperandAt(int index) const =0
bool CheckUsesForFlag(Flag f) const
double FastI2D(int x)
Definition: conversions.h:81
virtual void PrintDataTo(StringStream *stream) V8_OVERRIDE
static const int kWeakNextOffset
Definition: objects.h:8418
const char * Mnemonic() const
static Representation None()
virtual void InternalSetOperandAt(int index, HValue *value)=0
static const int kMaxLength
Definition: objects.h:8922
void set_tail(HUseListNode *list)
static Representation Tagged()
void PrintTypeTo(StringStream *stream)
int MostSignificantBit(uint32_t x)
Definition: utils.h:89
virtual void UpdateRepresentation(Representation new_rep, HInferRepresentationPhase *h_infer, const char *reason)
T Min(T a, T b)
Definition: utils.h:234
#define PRINT_DO(Type)
Isolate * isolate() const
void RegisterUse(int index, HValue *new_value)
static const int kMaxValue
Definition: objects.h:1681
const int kMaxInt8
Definition: globals.h:250
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long expose natives in global object expose freeBuffer extension expose gc extension under the specified name expose externalize string extension number of stack frames to capture disable builtin natives files print name of functions for which code is generated use random jit cookie to mask large constants trace lazy optimization use adaptive optimizations always try to OSR functions trace optimize function deoptimization minimum length for automatic enable preparsing maximum number of optimization attempts before giving up cache prototype transitions trace debugging JSON request response trace out of bounds accesses to external arrays trace_js_array_abuse automatically set the debug break flag when debugger commands are in the queue abort by crashing maximum length of function source code printed in a stack trace max size of the new max size of the old max size of executable always perform global GCs print one trace line following each garbage collection do not print trace line after scavenger collection print statistics of the maximum memory committed for the heap in name
Definition: flags.cc:505
static HValue * cast(HValue *value)
void ReplaceAllUsesWith(HValue *other)
bool IsFastDoubleElementsKind(ElementsKind kind)
static bool IsOrderedRelationalCompareOp(Value op)
Definition: token.h:224
virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE
HUseIterator uses() const