28 #ifndef V8_TYPE_INFO_H_
29 #define V8_TYPE_INFO_H_
84 Type t =
static_cast<Type
>(bit_representation);
98 return TypeInfo(static_cast<Type>(a.type_ & b.type_));
109 if (rep.
bits == minus_zero.
bits)
return false;
111 value == static_cast<int32_t>(value)) {
120 return type_ == other.type_;
124 ASSERT(type_ != kUninitialized);
125 return type_ == kUnknown;
129 ASSERT(type_ != kUninitialized);
130 return ((type_ & kPrimitive) == kPrimitive);
134 ASSERT(type_ != kUninitialized);
135 return ((type_ & kNumber) == kNumber);
139 ASSERT(type_ != kUninitialized);
140 return ((type_ & kSmi) == kSmi);
144 ASSERT(type_ != kUninitialized);
145 return ((type_ & kSymbol) == kSymbol);
149 ASSERT(type_ != kUninitialized);
150 return ((type_ & kSymbol) == kString);
154 ASSERT(type_ != kUninitialized);
155 return ((type_ & kInteger32) == kInteger32);
159 ASSERT(type_ != kUninitialized);
160 return ((type_ & kDouble) == kDouble);
164 ASSERT(type_ != kUninitialized);
165 return ((type_ & kString) == kString);
169 ASSERT(type_ != kUninitialized);
170 return ((type_ & kNonPrimitive) == kNonPrimitive);
174 return type_ == kUninitialized;
179 case kUnknown:
return "Unknown";
180 case kPrimitive:
return "Primitive";
181 case kNumber:
return "Number";
182 case kInteger32:
return "Integer32";
183 case kSmi:
return "Smi";
184 case kSymbol:
return "Symbol";
185 case kDouble:
return "Double";
186 case kString:
return "String";
187 case kNonPrimitive:
return "Object";
188 case kUninitialized:
return "Uninitialized";
191 return "Unreachable code";
204 kNonPrimitive = 0x40,
205 kUninitialized = 0x7f
207 explicit inline TypeInfo(Type t) : type_(t) { }
221 class BinaryOperation;
225 class CompareOperation;
226 class CompilationInfo;
227 class CountOperation;
231 class UnaryOperation;
232 class ForInStatement;
330 #endif // V8_TYPE_INFO_H_
static TypeInfo Combine(TypeInfo a, TypeInfo b)
Handle< JSFunction > GetCallNewTarget(CallNew *expr)
Handle< Map > GetCompareMap(CompareOperation *expr)
static bool IsInt32Double(double value)
void CollectKeyedReceiverTypes(TypeFeedbackId ast_id, SmallMapList *types)
static TypeInfo NonPrimitive()
bool IsForInFastCase(ForInStatement *expr)
static bool CanRetainOtherContext(Map *map, Context *native_context)
static TypeInfo Unknown()
static TypeInfo Primitive()
Handle< Map > LoadMonomorphicReceiverType(Property *expr)
#define ASSERT(condition)
bool StoreIsMonomorphicNormal(TypeFeedbackId ast_id)
TypeInfo IncrementType(CountOperation *expr)
static TypeInfo TypeFromValue(Handle< Object > value)
void CallReceiverTypes(Call *expr, Handle< String > name, CallKind call_kind, SmallMapList *types)
void LoadReceiverTypes(Property *expr, Handle< String > name, SmallMapList *types)
TypeInfo SwitchType(CaseClause *clause)
static TypeInfo Integer32()
CheckType GetCallCheckType(Call *expr)
bool StoreIsMegamorphicWithTypeInfo(TypeFeedbackId ast_id)
Handle< JSObject > GetPrototypeForPrimitiveCheck(CheckType check)
Handle< Map > StoreMonomorphicReceiverType(TypeFeedbackId ast_id)
bool LoadIsUninitialized(Property *expr)
bool Equals(const TypeInfo &other)
TypeInfo CompareType(CompareOperation *expr)
static TypeInfo FromInt(int bit_representation)
TypeInfo BinaryType(BinaryOperation *expr)
byte ToBooleanTypes(TypeFeedbackId ast_id)
Handle< Map > GetObjectLiteralStoreMap(ObjectLiteral::Property *prop)
bool LoadIsBuiltin(Property *expr, Builtins::Name id)
TypeFeedbackOracle(Handle< Code > code, Handle< Context > native_context, Isolate *isolate, Zone *zone)
static TypeInfo Uninitialized()
void StoreReceiverTypes(Assignment *expr, Handle< String > name, SmallMapList *types)
bool CallIsMonomorphic(Call *expr)
bool IsSymbolCompare(CompareOperation *expr)
Handle< JSFunction > GetCallTarget(Call *expr)
bool ObjectLiteralStoreIsMonomorphic(ObjectLiteral::Property *prop)
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination use dead code elimination trace on stack replacement optimize closures cache optimized code for closures functions with arguments object loop weight for representation inference allow uint32 values on optimize frames if they are used only in safe operations track parallel recompilation enable all profiler experiments number of stack frames inspected by the profiler call recompile stub directly when self optimizing trigger profiler ticks based on counting instead of timing weight back edges by jump distance for interrupt triggering percentage of ICs that must have type info to allow optimization watch_ic_patching retry_self_opt interrupt_at_exit extra verbose compilation tracing generate extra code(assertions) for debugging") DEFINE_bool(code_comments
const int kMaxKeyedPolymorphism
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination use dead code elimination trace on stack replacement optimize closures cache optimized code for closures functions with arguments object loop weight for representation inference allow uint32 values on optimize frames if they are used only in safe operations track parallel recompilation enable all profiler experiments number of stack frames inspected by the profiler call recompile stub directly when self optimizing trigger profiler ticks based on counting instead of timing weight back edges by jump distance for interrupt triggering percentage of ICs that must have type info to allow optimization watch_ic_patching retry_self_opt interrupt_at_exit extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of SAHF instruction if enable use of VFP3 instructions if available this implies enabling ARMv7 and VFP2 enable use of VFP2 instructions if available enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of MIPS FPU instructions if expose natives in global object expose gc extension number of stack frames to capture disable builtin natives files print a stack trace if an assertion failure occurs use random jit cookie to mask large constants trace lazy optimization use adaptive optimizations prepare for turning on always opt minimum length for automatic enable preparsing maximum number of optimization attempts before giving up cache prototype transitions automatically set the debug break flag when debugger commands are in the queue always cause a debug break before aborting maximum length of function source code printed in a stack trace max size of the new max size of the old max size of executable always perform global GCs print one trace line following each garbage collection do not print trace line after scavenger collection print more details following each garbage collection print amount of external allocated memory after each time it is adjusted flush code that we expect not to use again before full gc do incremental marking steps track object counts and memory usage use caching Perform compaction on every full GC Never perform compaction on full GC testing only Compact code space on full incremental collections Default seed for initializing random allows verbose printing trace parsing and preparsing Check icache flushes in ARM and MIPS simulator Stack alingment in bytes in print stack trace when throwing exceptions randomize hashes to avoid predictable hash Fixed seed to use to hash property activate a timer that switches between V8 threads testing_bool_flag float flag Seed used for threading test randomness A filename with extra code to be included in the Print usage including flags
bool LoadIsMonomorphicNormal(Property *expr)
bool LoadIsMegamorphicWithTypeInfo(Property *expr)
void check(i::Vector< const char > string)
TypeInfo UnaryType(UnaryOperation *expr)
bool CallNewIsMonomorphic(CallNew *expr)