8 : flushing_(false), max_chunks_(max_chunks),
9 trace_writer_(trace_writer), id_(id) {
10 chunks_.resize(max_chunks);
14 Mutex::ScopedLock scoped_lock(mutex_);
16 if (total_chunks_ == 0 || chunks_[total_chunks_ - 1]->
IsFull()) {
17 auto& chunk = chunks_[total_chunks_++];
19 chunk->Reset(current_chunk_seq_++);
21 chunk.reset(
new TraceBufferChunk(current_chunk_seq_++));
24 auto& chunk = chunks_[total_chunks_ - 1];
26 TraceObject* trace_object = chunk->AddTraceEvent(&event_index);
27 *handle = MakeHandle(total_chunks_ - 1, chunk->seq(), event_index);
32 Mutex::ScopedLock scoped_lock(mutex_);
37 size_t chunk_index, event_index;
38 uint32_t buffer_id, chunk_seq;
39 ExtractHandle(handle, &buffer_id, &chunk_index, &chunk_seq, &event_index);
40 if (buffer_id != id_ || chunk_index >= total_chunks_) {
46 auto& chunk = chunks_[chunk_index];
47 if (chunk->seq() != chunk_seq) {
51 return chunk->GetEventAt(event_index);
56 Mutex::ScopedLock scoped_lock(mutex_);
57 if (total_chunks_ > 0) {
59 for (
size_t i = 0; i < total_chunks_; ++i) {
60 auto& chunk = chunks_[i];
61 for (
size_t j = 0; j < chunk->size(); ++j) {
69 trace_writer_->
Flush(blocking);
72 uint64_t InternalTraceBuffer::MakeHandle(
73 size_t chunk_index, uint32_t chunk_seq,
size_t event_index)
const {
74 return ((static_cast<uint64_t>(chunk_seq) * Capacity() +
75 chunk_index * TraceBufferChunk::kChunkSize + event_index) << 1) + id_;
78 void InternalTraceBuffer::ExtractHandle(
79 uint64_t handle, uint32_t* buffer_id,
size_t* chunk_index,
80 uint32_t* chunk_seq,
size_t* event_index)
const {
81 *buffer_id =
static_cast<uint32_t
>(handle & 0x1);
83 *chunk_seq =
static_cast<uint32_t
>(handle / Capacity());
84 size_t indices = handle % Capacity();
85 *chunk_index = indices / TraceBufferChunk::kChunkSize;
86 *event_index = indices % TraceBufferChunk::kChunkSize;
91 : tracing_loop_(tracing_loop), trace_writer_(trace_writer),
92 buffer1_(max_chunks, 0, trace_writer),
93 buffer2_(max_chunks, 1, trace_writer) {
94 current_buf_.store(&buffer1_);
96 flush_signal_.data =
this;
97 int err = uv_async_init(tracing_loop_, &flush_signal_,
98 NonBlockingFlushSignalCb);
101 exit_signal_.data =
this;
102 err = uv_async_init(tracing_loop_, &exit_signal_, ExitSignalCb);
107 uv_async_send(&exit_signal_);
108 Mutex::ScopedLock scoped_lock(exit_mutex_);
110 exit_cond_.
Wait(scoped_lock);
116 if (!TryLoadAvailableBuffer()) {
123 return current_buf_.load()->AddTraceEvent(handle);
127 return current_buf_.load()->GetEventByHandle(handle);
131 buffer1_.
Flush(
true);
132 buffer2_.
Flush(
true);
139 bool NodeTraceBuffer::TryLoadAvailableBuffer() {
142 uv_async_send(&flush_signal_);
144 &buffer2_ : &buffer1_;
145 if (!other_buf->
IsFull()) {
146 current_buf_.store(other_buf);
155 void NodeTraceBuffer::NonBlockingFlushSignalCb(uv_async_t* signal) {
158 buffer->buffer1_.
Flush(
false);
161 buffer->buffer2_.
Flush(
false);
166 void NodeTraceBuffer::ExitSignalCb(uv_async_t* signal) {
168 uv_close(reinterpret_cast<uv_handle_t*>(&buffer->flush_signal_),
nullptr);
169 uv_close(reinterpret_cast<uv_handle_t*>(&buffer->exit_signal_),
170 [](uv_handle_t* signal) {
171 NodeTraceBuffer* buffer =
172 reinterpret_cast<NodeTraceBuffer*>(signal->data);
173 Mutex::ScopedLock scoped_lock(buffer->exit_mutex_);
174 buffer->exited_ = true;
175 buffer->exit_cond_.Signal(scoped_lock);
TraceObject * GetEventByHandle(uint64_t handle) override
void Flush(bool blocking)
InternalTraceBuffer(size_t max_chunks, uint32_t id, NodeTraceWriter *trace_writer)
void AppendTraceEvent(TraceObject *trace_event) override
TraceObject * GetEventByHandle(uint64_t handle)
TraceObject * AddTraceEvent(uint64_t *handle) override
void Wait(const ScopedLock &scoped_lock)
TraceObject * AddTraceEvent(uint64_t *handle)
NodeTraceBuffer(size_t max_chunks, NodeTraceWriter *trace_writer, uv_loop_t *tracing_loop)