v8  3.14.5(node0.10.28)
V8 is Google's open source JavaScript engine
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
test-circular-queue.cc
Go to the documentation of this file.
1 // Copyright 2010 the V8 project authors. All rights reserved.
2 //
3 // Tests of the circular queue.
4 
5 #include "v8.h"
6 #include "circular-queue-inl.h"
7 #include "cctest.h"
8 
10 
11 
12 TEST(SamplingCircularQueue) {
13  typedef SamplingCircularQueue::Cell Record;
14  const int kRecordsPerChunk = 4;
15  SamplingCircularQueue scq(sizeof(Record),
16  kRecordsPerChunk * sizeof(Record),
17  3);
18 
19  // Check that we are using non-reserved values.
20  CHECK_NE(SamplingCircularQueue::kClear, 1);
21  CHECK_NE(SamplingCircularQueue::kEnd, 1);
22  // Fill up the first chunk.
23  CHECK_EQ(NULL, scq.StartDequeue());
24  for (Record i = 1; i < 1 + kRecordsPerChunk; ++i) {
25  Record* rec = reinterpret_cast<Record*>(scq.Enqueue());
26  CHECK_NE(NULL, rec);
27  *rec = i;
28  CHECK_EQ(NULL, scq.StartDequeue());
29  }
30 
31  // Fill up the second chunk. Consumption must still be unavailable.
32  CHECK_EQ(NULL, scq.StartDequeue());
33  for (Record i = 10; i < 10 + kRecordsPerChunk; ++i) {
34  Record* rec = reinterpret_cast<Record*>(scq.Enqueue());
35  CHECK_NE(NULL, rec);
36  *rec = i;
37  CHECK_EQ(NULL, scq.StartDequeue());
38  }
39 
40  Record* rec = reinterpret_cast<Record*>(scq.Enqueue());
41  CHECK_NE(NULL, rec);
42  *rec = 20;
43  // Now as we started filling up the third chunk, consumption
44  // must become possible.
45  CHECK_NE(NULL, scq.StartDequeue());
46 
47  // Consume the first chunk.
48  for (Record i = 1; i < 1 + kRecordsPerChunk; ++i) {
49  Record* rec = reinterpret_cast<Record*>(scq.StartDequeue());
50  CHECK_NE(NULL, rec);
51  CHECK_EQ(static_cast<int64_t>(i), static_cast<int64_t>(*rec));
52  CHECK_EQ(rec, reinterpret_cast<Record*>(scq.StartDequeue()));
53  scq.FinishDequeue();
54  CHECK_NE(rec, reinterpret_cast<Record*>(scq.StartDequeue()));
55  }
56  // Now consumption must not be possible, as consumer now polls
57  // the first chunk for emptinness.
58  CHECK_EQ(NULL, scq.StartDequeue());
59 
60  scq.FlushResidualRecords();
61  // From now, consumer no more polls ahead of the current chunk,
62  // so it's possible to consume the second chunk.
63  CHECK_NE(NULL, scq.StartDequeue());
64  // Consume the second chunk
65  for (Record i = 10; i < 10 + kRecordsPerChunk; ++i) {
66  Record* rec = reinterpret_cast<Record*>(scq.StartDequeue());
67  CHECK_NE(NULL, rec);
68  CHECK_EQ(static_cast<int64_t>(i), static_cast<int64_t>(*rec));
69  CHECK_EQ(rec, reinterpret_cast<Record*>(scq.StartDequeue()));
70  scq.FinishDequeue();
71  CHECK_NE(rec, reinterpret_cast<Record*>(scq.StartDequeue()));
72  }
73  // Consumption must still be possible as the first cell of the
74  // last chunk is not clean.
75  CHECK_NE(NULL, scq.StartDequeue());
76 }
77 
78 
79 namespace {
80 
81 class ProducerThread: public i::Thread {
82  public:
83  typedef SamplingCircularQueue::Cell Record;
84 
85  ProducerThread(SamplingCircularQueue* scq,
86  int records_per_chunk,
87  Record value,
88  i::Semaphore* finished)
89  : Thread("producer"),
90  scq_(scq),
91  records_per_chunk_(records_per_chunk),
92  value_(value),
93  finished_(finished) { }
94 
95  virtual void Run() {
96  for (Record i = value_; i < value_ + records_per_chunk_; ++i) {
97  Record* rec = reinterpret_cast<Record*>(scq_->Enqueue());
98  CHECK_NE(NULL, rec);
99  *rec = i;
100  }
101 
102  finished_->Signal();
103  }
104 
105  private:
106  SamplingCircularQueue* scq_;
107  const int records_per_chunk_;
108  Record value_;
109  i::Semaphore* finished_;
110 };
111 
112 } // namespace
113 
114 TEST(SamplingCircularQueueMultithreading) {
115  // Emulate multiple VM threads working 'one thread at a time.'
116  // This test enqueues data from different threads. This corresponds
117  // to the case of profiling under Linux, where signal handler that
118  // does sampling is called in the context of different VM threads.
119 
120  typedef ProducerThread::Record Record;
121  const int kRecordsPerChunk = 4;
122  SamplingCircularQueue scq(sizeof(Record),
123  kRecordsPerChunk * sizeof(Record),
124  3);
126  // Don't poll ahead, making possible to check data in the buffer
127  // immediately after enqueuing.
128  scq.FlushResidualRecords();
129 
130  // Check that we are using non-reserved values.
131  CHECK_NE(SamplingCircularQueue::kClear, 1);
132  CHECK_NE(SamplingCircularQueue::kEnd, 1);
133  ProducerThread producer1(&scq, kRecordsPerChunk, 1, semaphore);
134  ProducerThread producer2(&scq, kRecordsPerChunk, 10, semaphore);
135  ProducerThread producer3(&scq, kRecordsPerChunk, 20, semaphore);
136 
137  CHECK_EQ(NULL, scq.StartDequeue());
138  producer1.Start();
139  semaphore->Wait();
140  for (Record i = 1; i < 1 + kRecordsPerChunk; ++i) {
141  Record* rec = reinterpret_cast<Record*>(scq.StartDequeue());
142  CHECK_NE(NULL, rec);
143  CHECK_EQ(static_cast<int64_t>(i), static_cast<int64_t>(*rec));
144  CHECK_EQ(rec, reinterpret_cast<Record*>(scq.StartDequeue()));
145  scq.FinishDequeue();
146  CHECK_NE(rec, reinterpret_cast<Record*>(scq.StartDequeue()));
147  }
148 
149  CHECK_EQ(NULL, scq.StartDequeue());
150  producer2.Start();
151  semaphore->Wait();
152  for (Record i = 10; i < 10 + kRecordsPerChunk; ++i) {
153  Record* rec = reinterpret_cast<Record*>(scq.StartDequeue());
154  CHECK_NE(NULL, rec);
155  CHECK_EQ(static_cast<int64_t>(i), static_cast<int64_t>(*rec));
156  CHECK_EQ(rec, reinterpret_cast<Record*>(scq.StartDequeue()));
157  scq.FinishDequeue();
158  CHECK_NE(rec, reinterpret_cast<Record*>(scq.StartDequeue()));
159  }
160 
161  CHECK_EQ(NULL, scq.StartDequeue());
162  producer3.Start();
163  semaphore->Wait();
164  for (Record i = 20; i < 20 + kRecordsPerChunk; ++i) {
165  Record* rec = reinterpret_cast<Record*>(scq.StartDequeue());
166  CHECK_NE(NULL, rec);
167  CHECK_EQ(static_cast<int64_t>(i), static_cast<int64_t>(*rec));
168  CHECK_EQ(rec, reinterpret_cast<Record*>(scq.StartDequeue()));
169  scq.FinishDequeue();
170  CHECK_NE(rec, reinterpret_cast<Record*>(scq.StartDequeue()));
171  }
172 
173  CHECK_EQ(NULL, scq.StartDequeue());
174 
175  delete semaphore;
176 }
#define CHECK_EQ(expected, value)
Definition: checks.h:219
virtual void Run()=0
#define CHECK_NE(unexpected, value)
Definition: checks.h:223
static Semaphore * CreateSemaphore(int count)
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination use dead code elimination trace on stack replacement optimize closures cache optimized code for closures functions with arguments object loop weight for representation inference allow uint32 values on optimize frames if they are used only in safe operations track parallel recompilation enable all profiler experiments number of stack frames inspected by the profiler call recompile stub directly when self optimizing trigger profiler ticks based on counting instead of timing weight back edges by jump distance for interrupt triggering percentage of ICs that must have type info to allow optimization watch_ic_patching retry_self_opt interrupt_at_exit extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of SAHF instruction if enable use of VFP3 instructions if available this implies enabling ARMv7 and VFP2 enable use of VFP2 instructions if available enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of MIPS FPU instructions if NULL
virtual void Wait()=0
v8::internal::Semaphore * semaphore
TEST(SamplingCircularQueue)