28 #ifndef V8_OBJECTS_VISITING_H_
29 #define V8_OBJECTS_VISITING_H_
49 #define VISITOR_ID_LIST(V) \
52 V(ShortcutCandidate) \
58 V(FixedFloat64Array) \
59 V(ConstantPoolArray) \
70 V(DataObjectGeneric) \
97 V(SharedFunctionInfo) \
116 #define VISITOR_ID_ENUM_DECL(id) kVisit##id,
118 #undef VISITOR_ID_ENUM_DECL
152 return Min(specialization,
generic);
157 template<
typename Callback>
170 return reinterpret_cast<Callback
>(callbacks_[id]);
174 return reinterpret_cast<Callback
>(callbacks_[map->
visitor_id()]);
179 callbacks_[id] =
reinterpret_cast<AtomicWord>(callback);
182 template<
typename Visitor,
185 int object_size_in_words>
189 &Visitor::template VisitSpecialized<size>);
193 template<
typename Visitor,
199 RegisterSpecialization<Visitor, base, generic, 2>();
200 RegisterSpecialization<Visitor, base, generic, 3>();
201 RegisterSpecialization<Visitor, base, generic, 4>();
202 RegisterSpecialization<Visitor, base, generic, 5>();
203 RegisterSpecialization<Visitor, base, generic, 6>();
204 RegisterSpecialization<Visitor, base, generic, 7>();
205 RegisterSpecialization<Visitor, base, generic, 8>();
206 RegisterSpecialization<Visitor, base, generic, 9>();
215 template<
typename StaticVisitor>
222 Object** start_slot =
reinterpret_cast<Object**
>(
object->address() +
224 Object** end_slot =
reinterpret_cast<Object**
>(
object->address() +
226 StaticVisitor::VisitPointers(heap, start_slot, end_slot);
231 template<
typename StaticVisitor,
typename BodyDescriptor,
typename ReturnType>
235 int object_size = BodyDescriptor::SizeOf(map,
object);
239 BodyDescriptor::kStartOffset,
241 return static_cast<ReturnType
>(object_size);
244 template<
int object_size>
246 ASSERT(BodyDescriptor::SizeOf(map,
object) == object_size);
250 BodyDescriptor::kStartOffset,
252 return static_cast<ReturnType
>(object_size);
257 template<
typename StaticVisitor,
typename BodyDescriptor,
typename ReturnType>
264 BodyDescriptor::kStartOffset,
265 BodyDescriptor::kEndOffset);
266 return static_cast<ReturnType
>(BodyDescriptor::kSize);
287 template<
typename StaticVisitor>
293 return table_.GetVisitor(map)(
map,
obj);
297 for (
Object** p = start; p < end; p++) StaticVisitor::VisitPointer(heap, p);
318 INLINE(
static int VisitByteArray(Map* map, HeapObject*
object)) {
319 return reinterpret_cast<ByteArray*
>(object)->ByteArraySize();
322 INLINE(
static int VisitFixedDoubleArray(Map* map, HeapObject*
object)) {
323 int length =
reinterpret_cast<FixedDoubleArray*
>(object)->length();
327 INLINE(
static int VisitFixedTypedArray(Map* map, HeapObject*
object)) {
328 return reinterpret_cast<FixedTypedArrayBase*
>(object)->
size();
331 INLINE(
static int VisitJSObject(Map* map, HeapObject*
object)) {
332 return JSObjectVisitor::Visit(map,
object);
335 INLINE(
static int VisitSeqOneByteString(Map* map, HeapObject*
object)) {
337 SeqOneByteStringSize(map->instance_type());
340 INLINE(
static int VisitSeqTwoByteString(Map* map, HeapObject*
object)) {
342 SeqTwoByteStringSize(map->instance_type());
345 INLINE(
static int VisitFreeSpace(Map* map, HeapObject*
object)) {
349 INLINE(
static int VisitJSArrayBuffer(Map* map, HeapObject*
object));
350 INLINE(
static int VisitJSTypedArray(Map* map, HeapObject*
object));
351 INLINE(
static int VisitJSDataView(Map* map, HeapObject*
object));
353 class DataObjectVisitor {
355 template<
int object_size>
356 static inline int VisitSpecialized(Map* map, HeapObject*
object) {
360 INLINE(
static int Visit(Map* map, HeapObject*
object)) {
361 return map->instance_size();
365 typedef FlexibleBodyVisitor<StaticVisitor,
366 StructBodyDescriptor,
369 typedef FlexibleBodyVisitor<StaticVisitor,
370 JSObject::BodyDescriptor,
371 int> JSObjectVisitor;
373 typedef int (*Callback)(Map*
map, HeapObject* object);
375 static VisitorDispatchTable<Callback> table_;
379 template<
typename StaticVisitor>
380 VisitorDispatchTable<typename StaticNewSpaceVisitor<StaticVisitor>::Callback>
381 StaticNewSpaceVisitor<StaticVisitor>::table_;
398 template<
typename StaticVisitor>
410 INLINE(
static void VisitEmbeddedPointer(
Heap* heap, RelocInfo* rinfo));
411 INLINE(
static void VisitCell(
Heap* heap, RelocInfo* rinfo));
412 INLINE(
static void VisitDebugTarget(
Heap* heap, RelocInfo* rinfo));
413 INLINE(
static void VisitCodeTarget(
Heap* heap, RelocInfo* rinfo));
414 INLINE(
static void VisitCodeAgeSequence(
Heap* heap, RelocInfo* rinfo));
415 INLINE(
static void VisitExternalReference(RelocInfo* rinfo)) { }
416 INLINE(
static void VisitRuntimeEntry(RelocInfo* rinfo)) { }
481 template<
typename StaticVisitor>
488 #endif // V8_OBJECTS_VISITING_H_
FlexibleBodyVisitor< StaticVisitor, FixedArray::BodyDescriptor, void > FixedArrayVisitor
STATIC_ASSERT(kVisitorIdCount<=256)
static const int kCodeEntryOffset
static VisitorDispatchTable< Callback > table_
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf map
INLINE(static void VisitPointers(Heap *heap, Object **start, Object **end))
static void MarkInlinedFunctionsCode(Heap *heap, Code *code)
static void MarkMapContents(Heap *heap, Map *map)
static VisitorId GetVisitorIdForSize(VisitorId base, VisitorId generic, int object_size)
static VisitorId GetVisitorId(Map *map)
static FreeSpace * cast(Object *obj)
static SeqOneByteString * cast(Object *obj)
INLINE(static ReturnType Visit(Map *map, HeapObject *object))
static void VisitSpecialized(Map *map, HeapObject *object)
#define ASSERT(condition)
const int kPointerSizeLog2
#define VISITOR_ID_LIST(V)
INLINE(static void VisitNextCodeLink(Heap *heap, Object **slot))
void NoBarrier_Store(volatile Atomic32 *ptr, Atomic32 value)
static Object ** RawField(HeapObject *obj, int offset)
void(* Callback)(Map *map, HeapObject *object)
FlexibleBodyVisitor< StaticVisitor, StructBodyDescriptor, void > StructObjectVisitor
INLINE(static int IterateBody(Map *map, HeapObject *obj))
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object size
STATIC_ASSERT(sizeof(CPURegister)==sizeof(Register))
void Register(StaticVisitorBase::VisitorId id, Callback callback)
INLINE(static void VisitRuntimeEntry(RelocInfo *rinfo))
INLINE(static void IteratePointers(Heap *heap, HeapObject *object, int start_offset, int end_offset))
bool IsAligned(T value, U alignment)
INLINE(static void IterateBody(Map *map, HeapObject *obj))
static const int kMaxRegularHeapObjectSize
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra code(assertions) for debugging") DEFINE_bool(code_comments
INLINE(static void VisitExternalReference(RelocInfo *rinfo))
static const int kPropertiesOffset
static void VisitSharedFunctionInfoStrongCode(Heap *heap, HeapObject *object)
static int SizeFor(int length)
INLINE(static ReturnType Visit(Map *map, HeapObject *object))
static void VisitSharedFunctionInfoWeakCode(Heap *heap, HeapObject *object)
Callback GetVisitorById(StaticVisitorBase::VisitorId id)
static const int kNonWeakFieldsEndOffset
void RegisterSpecialization()
static SeqTwoByteString * cast(Object *obj)
INLINE(static void Visit(Map *map, HeapObject *object))
#define VISITOR_ID_ENUM_DECL(id)
FlexibleBodyVisitor< StaticVisitor, JSObject::BodyDescriptor, void > JSObjectVisitor
InstanceType instance_type()
static void VisitJSFunctionWeakCode(Heap *heap, HeapObject *object)
static VisitorId GetVisitorId(int instance_type, int instance_size)
static void MarkTransitionArray(Heap *heap, TransitionArray *transitions)
void CopyFrom(VisitorDispatchTable *other)
void RegisterSpecializations()
static void VisitJSFunctionStrongCode(Heap *heap, HeapObject *object)
static ReturnType VisitSpecialized(Map *map, HeapObject *object)
Callback GetVisitor(Map *map)