37 TEST(SamplingCircularQueue) {
39 const int kMaxRecordsInQueue = 4;
40 SamplingCircularQueue<Record, kMaxRecordsInQueue> scq;
45 for (Record i = 1; i < 1 + kMaxRecordsInQueue; ++i) {
46 Record* rec =
reinterpret_cast<Record*
>(scq.StartEnqueue());
57 for (
int i = 0; i < 10; ++i) {
58 Record* rec =
reinterpret_cast<Record*
>(scq.StartEnqueue());
64 for (Record i = 1; i < 1 + kMaxRecordsInQueue; ++i) {
65 Record* rec =
reinterpret_cast<Record*
>(scq.Peek());
67 CHECK_EQ(static_cast<int64_t>(i), static_cast<int64_t>(*rec));
68 CHECK_EQ(rec, reinterpret_cast<Record*>(scq.Peek()));
70 CHECK_NE(rec, reinterpret_cast<Record*>(scq.Peek()));
77 for (Record i = 0; i < kMaxRecordsInQueue / 2; ++i) {
78 Record* rec =
reinterpret_cast<Record*
>(scq.StartEnqueue());
86 for (Record i = 0; i < kMaxRecordsInQueue / 2; ++i) {
87 Record* rec =
reinterpret_cast<Record*
>(scq.Peek());
89 CHECK_EQ(static_cast<int64_t>(i), static_cast<int64_t>(*rec));
90 CHECK_EQ(rec, reinterpret_cast<Record*>(scq.Peek()));
92 CHECK_NE(rec, reinterpret_cast<Record*>(scq.Peek()));
103 typedef SamplingCircularQueue<Record, 12> TestSampleQueue;
107 ProducerThread(TestSampleQueue* scq,
108 int records_per_chunk,
110 i::Semaphore* finished)
111 : Thread(
"producer"),
113 records_per_chunk_(records_per_chunk),
115 finished_(finished) { }
118 for (Record i = value_; i < value_ + records_per_chunk_; ++i) {
119 Record* rec =
reinterpret_cast<Record*
>(scq_->StartEnqueue());
122 scq_->FinishEnqueue();
129 TestSampleQueue* scq_;
130 const int records_per_chunk_;
132 i::Semaphore* finished_;
137 TEST(SamplingCircularQueueMultithreading) {
143 const int kRecordsPerChunk = 4;
147 ProducerThread producer1(&scq, kRecordsPerChunk, 1, &semaphore);
148 ProducerThread producer2(&scq, kRecordsPerChunk, 10, &semaphore);
149 ProducerThread producer3(&scq, kRecordsPerChunk, 20, &semaphore);
154 for (Record i = 1; i < 1 + kRecordsPerChunk; ++i) {
155 Record* rec =
reinterpret_cast<Record*
>(scq.Peek());
157 CHECK_EQ(static_cast<int64_t>(i), static_cast<int64_t>(*rec));
158 CHECK_EQ(rec, reinterpret_cast<Record*>(scq.Peek()));
160 CHECK_NE(rec, reinterpret_cast<Record*>(scq.Peek()));
166 for (Record i = 10; i < 10 + kRecordsPerChunk; ++i) {
167 Record* rec =
reinterpret_cast<Record*
>(scq.Peek());
169 CHECK_EQ(static_cast<int64_t>(i), static_cast<int64_t>(*rec));
170 CHECK_EQ(rec, reinterpret_cast<Record*>(scq.Peek()));
172 CHECK_NE(rec, reinterpret_cast<Record*>(scq.Peek()));
178 for (Record i = 20; i < 20 + kRecordsPerChunk; ++i) {
179 Record* rec =
reinterpret_cast<Record*
>(scq.Peek());
181 CHECK_EQ(static_cast<int64_t>(i), static_cast<int64_t>(*rec));
182 CHECK_EQ(rec, reinterpret_cast<Record*>(scq.Peek()));
184 CHECK_NE(rec, reinterpret_cast<Record*>(scq.Peek()));
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter NULL
#define CHECK_EQ(expected, value)
#define CHECK_NE(unexpected, value)
v8::internal::Semaphore * semaphore
TEST(SamplingCircularQueue)