9 #include "txn_proto2_impl.h"
16 /** logger subsystem **/
18 bool txn_logger::g_persist = false;
19 bool txn_logger::g_call_fsync = true;
20 bool txn_logger::g_use_compression = false;
21 bool txn_logger::g_fake_writes = false;
22 size_t txn_logger::g_nworkers = 0;
23 txn_logger::epoch_array
24 txn_logger::per_thread_sync_epochs_[txn_logger::g_nmax_loggers];
25 aligned_padded_elem<atomic<uint64_t>>
26 txn_logger::system_sync_epoch_(0);
27 percore<txn_logger::persist_ctx>
28 txn_logger::g_persist_ctxs;
29 percore<txn_logger::persist_stats>
30 txn_logger::g_persist_stats;
32 txn_logger::g_evt_log_buffer_epoch_boundary("log_buffer_epoch_boundary");
34 txn_logger::g_evt_log_buffer_out_of_space("log_buffer_out_of_space");
36 txn_logger::g_evt_log_buffer_bytes_before_compress("log_buffer_bytes_before_compress");
38 txn_logger::g_evt_log_buffer_bytes_after_compress("log_buffer_bytes_after_compress");
40 txn_logger::g_evt_logger_writev_limit_met("logger_writev_limit_met");
42 txn_logger::g_evt_logger_max_lag_wait("logger_max_lag_wait");
44 txn_logger::g_evt_avg_log_buffer_compress_time_us("avg_log_buffer_compress_time_us");
46 txn_logger::g_evt_avg_log_entry_ntxns("avg_log_entry_ntxns_per_entry");
48 txn_logger::g_evt_avg_logger_bytes_per_writev("avg_logger_bytes_per_writev");
50 txn_logger::g_evt_avg_logger_bytes_per_sec("avg_logger_bytes_per_sec");
52 static event_avg_counter
53 evt_avg_log_buffer_iov_len("avg_log_buffer_iov_len");
58 const vector<string> &logfiles,
59 const vector<vector<unsigned>> &assignments_given,
60 vector<vector<unsigned>> *assignments_used,
65 INVARIANT(!g_persist);
66 INVARIANT(g_nworkers == 0);
67 INVARIANT(nworkers > 0);
68 INVARIANT(!logfiles.empty());
69 INVARIANT(logfiles.size() <= g_nmax_loggers);
70 INVARIANT(!use_compression || g_perthread_buffers > 1); // need 1 as scratch buf
72 for (auto &fname : logfiles) {
73 int fd = open(fname.c_str(), O_CREAT|O_WRONLY|O_TRUNC, 0664);
81 g_call_fsync = call_fsync;
82 g_use_compression = use_compression;
83 g_fake_writes = fake_writes;
84 g_nworkers = nworkers;
86 for (size_t i = 0; i < g_nmax_loggers; i++)
87 for (size_t j = 0; j < g_nworkers; j++)
88 per_thread_sync_epochs_[i].epochs_[j].store(0, memory_order_release);
90 vector<thread> writers;
91 vector<vector<unsigned>> assignments(assignments_given);
93 if (assignments.empty()) {
94 // compute assuming homogenous disks
95 if (g_nworkers <= fds.size()) {
96 // each thread gets its own logging worker
97 for (size_t i = 0; i < g_nworkers; i++)
98 assignments.push_back({(unsigned) i});
100 // XXX: currently we assume each logger is equally as fast- we should
101 // adjust ratios accordingly for non-homogenous loggers
102 const size_t threads_per_logger = g_nworkers / fds.size();
103 for (size_t i = 0; i < fds.size(); i++) {
104 assignments.emplace_back(
106 i * threads_per_logger,
107 ((i + 1) == fds.size()) ? g_nworkers : (i + 1) * threads_per_logger));
112 INVARIANT(AssignmentsValid(assignments, fds.size(), g_nworkers));
114 for (size_t i = 0; i < assignments.size(); i++) {
115 writers.emplace_back(
117 i, fds[i], assignments[i]);
118 writers.back().detach();
121 thread persist_thread(&txn_logger::persister, assignments);
122 persist_thread.detach();
124 if (assignments_used)
125 *assignments_used = assignments;
129 txn_logger::persister(
130 vector<vector<unsigned>> assignments)
134 const uint64_t last_loop_usec = loop_timer.lap();
135 const uint64_t delay_time_usec = ticker::tick_us;
136 if (last_loop_usec < delay_time_usec) {
137 const uint64_t sleep_ns = (delay_time_usec - last_loop_usec) * 1000;
139 t.tv_sec = sleep_ns / ONE_SECOND_NS;
140 t.tv_nsec = sleep_ns % ONE_SECOND_NS;
141 nanosleep(&t, nullptr);
143 advance_system_sync_epoch(assignments);
148 txn_logger::advance_system_sync_epoch(
149 const vector<vector<unsigned>> &assignments)
151 uint64_t min_so_far = numeric_limits<uint64_t>::max();
152 const uint64_t best_tick_ex =
153 ticker::s_instance.global_current_tick();
155 const uint64_t best_tick_inc =
156 best_tick_ex ? (best_tick_ex - 1) : 0;
158 for (size_t i = 0; i < assignments.size(); i++)
159 for (auto j : assignments[i])
160 for (size_t k = j; k < NMAXCORES; k += g_nworkers) {
161 persist_ctx &ctx = persist_ctx_for(k, INITMODE_NONE);
162 // we need to arbitrarily advance threads which are not "doing
163 // anything", so they don't drag down the persistence of the system. if
164 // we can see that a thread is NOT in a guarded section AND its
165 // core->logger queue is empty, then that means we can advance its sync
166 // epoch up to best_tick_inc, b/c it is guaranteed that the next time
167 // it does any actions will be in epoch > best_tick_inc
168 if (!ctx.persist_buffers_.peek()) {
169 spinlock &l = ticker::s_instance.lock_for(k);
170 if (!l.is_locked()) {
171 bool did_lock = false;
172 for (size_t c = 0; c < 3; c++) {
179 if (!ctx.persist_buffers_.peek()) {
180 min_so_far = min(min_so_far, best_tick_inc);
181 per_thread_sync_epochs_[i].epochs_[k].store(
182 best_tick_inc, memory_order_release);
191 per_thread_sync_epochs_[i].epochs_[k].load(
192 memory_order_acquire),
196 const uint64_t syssync =
197 system_sync_epoch_->load(memory_order_acquire);
199 INVARIANT(min_so_far < numeric_limits<uint64_t>::max());
200 INVARIANT(syssync <= min_so_far);
202 // need to aggregate from [syssync + 1, min_so_far]
203 const uint64_t now_us = timer::cur_usec();
204 for (size_t i = 0; i < g_persist_stats.size(); i++) {
205 auto &ps = g_persist_stats[i];
206 for (uint64_t e = syssync + 1; e <= min_so_far; e++) {
207 auto &pes = ps.d_[e % g_max_lag_epochs];
208 const uint64_t ntxns_in_epoch = pes.ntxns_.load(memory_order_acquire);
209 const uint64_t start_us = pes.earliest_start_us_.load(memory_order_acquire);
210 INVARIANT(now_us >= start_us);
211 non_atomic_fetch_add(ps.ntxns_persisted_, ntxns_in_epoch);
212 non_atomic_fetch_add(
214 (now_us - start_us) * ntxns_in_epoch);
215 pes.ntxns_.store(0, memory_order_release);
216 pes.earliest_start_us_.store(0, memory_order_release);
220 system_sync_epoch_->store(min_so_far, memory_order_release);
226 vector<unsigned> assignment)
229 if (g_pin_loggers_to_numa_nodes) {
230 ALWAYS_ASSERT(!numa_run_on_node(id % numa_num_configured_nodes()));
231 ALWAYS_ASSERT(!sched_yield());
235 min(size_t(IOV_MAX), g_nworkers * g_perthread_buffers));
236 vector<pbuffer *> pxs;
239 // XXX: sense is not useful for now, unless we want to
240 // fsync in the background...
241 bool sense = false; // cur is at sense, prev is at !sense
242 uint64_t epoch_prefixes[2][NMAXCORES];
244 NDB_MEMSET(&epoch_prefixes[0], 0, sizeof(epoch_prefixes[0]));
245 NDB_MEMSET(&epoch_prefixes[1], 0, sizeof(epoch_prefixes[1]));
247 // NOTE: a core id in the persistence system really represets
248 // all cores in the regular system modulo g_nworkers
249 size_t nbufswritten = 0, nbyteswritten = 0;
252 const uint64_t last_loop_usec = loop_timer.lap();
253 const uint64_t delay_time_usec = ticker::tick_us;
254 // don't allow this loop to proceed less than an epoch's worth of time,
255 // so we can batch IO
256 if (last_loop_usec < delay_time_usec && nbufswritten < iovs.size()) {
257 const uint64_t sleep_ns = (delay_time_usec - last_loop_usec) * 1000;
259 t.tv_sec = sleep_ns / ONE_SECOND_NS;
260 t.tv_nsec = sleep_ns % ONE_SECOND_NS;
261 nanosleep(&t, nullptr);
264 // we need g_persist_stats[cur_sync_epoch_ex % g_nmax_loggers]
265 // to remain untouched (until the syncer can catch up), so we
266 // cannot read any buffers with epoch >=
267 // (cur_sync_epoch_ex + g_max_lag_epochs)
268 const uint64_t cur_sync_epoch_ex =
269 system_sync_epoch_->load(memory_order_acquire) + 1;
270 nbufswritten = nbyteswritten = 0;
271 for (auto idx : assignment) {
272 INVARIANT(idx >= 0 && idx < g_nworkers);
273 for (size_t k = idx; k < NMAXCORES; k += g_nworkers) {
274 persist_ctx &ctx = persist_ctx_for(k, INITMODE_NONE);
275 ctx.persist_buffers_.peekall(pxs);
276 for (auto px : pxs) {
278 INVARIANT(!px->io_scheduled_);
279 INVARIANT(nbufswritten <= iovs.size());
280 INVARIANT(px->header()->nentries_);
281 INVARIANT(px->core_id_ == k);
282 if (nbufswritten == iovs.size()) {
283 ++g_evt_logger_writev_limit_met;
286 if (transaction_proto2_static::EpochId(px->header()->last_tid_) >=
287 cur_sync_epoch_ex + g_max_lag_epochs) {
288 ++g_evt_logger_max_lag_wait;
291 iovs[nbufswritten].iov_base = (void *) &px->buf_start_[0];
293 #ifdef LOGGER_UNSAFE_REDUCE_BUFFER_SIZE
294 #define PXLEN(px) (((px)->curoff_ < 4) ? (px)->curoff_ : ((px)->curoff_ / 4))
296 #define PXLEN(px) ((px)->curoff_)
299 const size_t pxlen = PXLEN(px);
301 iovs[nbufswritten].iov_len = pxlen;
302 evt_avg_log_buffer_iov_len.offer(pxlen);
303 px->io_scheduled_ = true;
305 nbyteswritten += pxlen;
307 #ifdef CHECK_INVARIANTS
308 auto last_tid_cid = transaction_proto2_static::CoreId(px->header()->last_tid_);
309 auto px_cid = px->core_id_;
310 if (last_tid_cid != px_cid) {
311 cerr << "header: " << *px->header() << endl;
312 cerr << g_proto_version_str(last_tid_cid) << endl;
313 cerr << "last_tid_cid: " << last_tid_cid << endl;
314 cerr << "px_cid: " << px_cid << endl;
318 const uint64_t px_epoch =
319 transaction_proto2_static::EpochId(px->header()->last_tid_);
321 transaction_proto2_static::CoreId(px->header()->last_tid_) ==
323 INVARIANT(epoch_prefixes[sense][k] <= px_epoch);
324 INVARIANT(px_epoch > 0);
325 epoch_prefixes[sense][k] = px_epoch - 1;
326 auto &pes = g_persist_stats[k].d_[px_epoch % g_max_lag_epochs];
327 if (!pes.ntxns_.load(memory_order_acquire))
328 pes.earliest_start_us_.store(px->earliest_start_us_, memory_order_release);
329 non_atomic_fetch_add(pes.ntxns_, px->header()->nentries_);
330 g_evt_avg_log_entry_ntxns.offer(px->header()->nentries_);
337 // XXX: should probably sleep here
342 const bool dosense = sense;
344 if (!g_fake_writes) {
345 #ifdef ENABLE_EVENT_COUNTERS
348 const ssize_t ret = writev(fd, &iovs[0], nbufswritten);
349 if (unlikely(ret == -1)) {
351 ALWAYS_ASSERT(false);
355 const int fret = fdatasync(fd);
356 if (unlikely(fret == -1)) {
358 ALWAYS_ASSERT(false);
362 #ifdef ENABLE_EVENT_COUNTERS
364 g_evt_avg_logger_bytes_per_writev.offer(nbyteswritten);
365 const double bytes_per_sec =
366 double(nbyteswritten)/(write_timer.lap_ms() / 1000.0);
367 g_evt_avg_logger_bytes_per_sec.offer(bytes_per_sec);
372 // update metadata from previous write
374 // return all buffers that have been io_scheduled_ - we can do this as
375 // soon as write returns. we take care to return to the proper buffer
376 epoch_array &ea = per_thread_sync_epochs_[id];
377 for (auto idx: assignment) {
378 for (size_t k = idx; k < NMAXCORES; k += g_nworkers) {
379 const uint64_t x0 = ea.epochs_[k].load(memory_order_acquire);
380 const uint64_t x1 = epoch_prefixes[dosense][k];
382 ea.epochs_[k].store(x1, memory_order_release);
384 persist_ctx &ctx = persist_ctx_for(k, INITMODE_NONE);
386 while ((px = ctx.persist_buffers_.peek()) && px->io_scheduled_) {
387 #ifdef LOGGER_STRIDE_OVER_BUFFER
389 const size_t pxlen = PXLEN(px);
390 const size_t stridelen = 1;
391 for (size_t p = 0; p < pxlen; p += stridelen)
392 if ((&px->buf_start_[0])[p] & 0xF)
393 non_atomic_fetch_add(ea.dummy_work_, 1UL);
396 px0 = ctx.persist_buffers_.deq();
397 INVARIANT(px == px0);
398 INVARIANT(px->header()->nentries_);
400 INVARIANT(ctx.init_);
401 INVARIANT(px0->core_id_ == k);
402 ctx.all_buffers_.enq(px0);
412 tuple<uint64_t, uint64_t, double>
413 txn_logger::compute_ntxns_persisted_statistics()
415 uint64_t acc = 0, acc1 = 0, acc2 = 0;
417 for (size_t i = 0; i < g_persist_stats.size(); i++) {
418 acc += g_persist_stats[i].ntxns_persisted_.load(memory_order_acquire);
419 acc1 += g_persist_stats[i].ntxns_pushed_.load(memory_order_acquire);
420 acc2 += g_persist_stats[i].ntxns_committed_.load(memory_order_acquire);
421 num += g_persist_stats[i].latency_numer_.load(memory_order_acquire);
423 INVARIANT(acc <= acc1);
424 INVARIANT(acc1 <= acc2);
426 return make_tuple(0, acc1, 0.0);
427 return make_tuple(acc, acc1, double(num)/double(acc));
431 txn_logger::clear_ntxns_persisted_statistics()
433 for (size_t i = 0; i < g_persist_stats.size(); i++) {
434 auto &ps = g_persist_stats[i];
435 ps.ntxns_persisted_.store(0, memory_order_release);
436 ps.ntxns_pushed_.store(0, memory_order_release);
437 ps.ntxns_committed_.store(0, memory_order_release);
438 ps.latency_numer_.store(0, memory_order_release);
439 for (size_t e = 0; e < g_max_lag_epochs; e++) {
440 auto &pes = ps.d_[e];
441 pes.ntxns_.store(0, memory_order_release);
442 pes.earliest_start_us_.store(0, memory_order_release);
448 txn_logger::wait_for_idle_state()
450 for (size_t i = 0; i < NMAXCORES; i++) {
451 persist_ctx &ctx = persist_ctx_for(i, INITMODE_NONE);
455 while (!(px = ctx.all_buffers_.peek()) || px->header()->nentries_)
457 while (ctx.persist_buffers_.peek())
463 txn_logger::wait_until_current_point_persisted()
465 const uint64_t e = ticker::s_instance.global_current_tick();
466 cerr << "waiting for system_sync_epoch_="
467 << system_sync_epoch_->load(memory_order_acquire)
468 << " to be < e=" << e << endl;
469 while (system_sync_epoch_->load(memory_order_acquire) < e)
474 /** garbage collection subsystem **/
476 static event_counter evt_local_chain_cleanups("local_chain_cleanups");
477 static event_counter evt_try_delete_unlinks("try_delete_unlinks");
478 static event_avg_counter evt_avg_time_inbetween_ro_epochs_usec(
479 "avg_time_inbetween_ro_epochs_usec");
482 transaction_proto2_static::InitGC()
484 g_flags->g_gc_init.store(true, memory_order_release);
490 const uint64_t sleep_ns = transaction_proto2_static::ReadOnlyEpochUsec * 1000;
492 t.tv_sec = sleep_ns / ONE_SECOND_NS;
493 t.tv_nsec = sleep_ns % ONE_SECOND_NS;
494 nanosleep(&t, nullptr);
498 transaction_proto2_static::PurgeThreadOutstandingGCTasks()
500 #ifdef PROTO2_CAN_DISABLE_GC
504 INVARIANT(!rcu::s_instance.in_rcu_region());
505 threadctx &ctx = g_threadctxs.my();
507 if (!ctx.queue_.get_latest_epoch(e))
509 // wait until we can clean up e
511 const uint64_t last_tick_ex = ticker::s_instance.global_last_tick_exclusive();
512 const uint64_t ro_tick_ex = to_read_only_tick(last_tick_ex);
513 if (unlikely(!ro_tick_ex)) {
517 const uint64_t ro_tick_geq = ro_tick_ex - 1;
518 if (ro_tick_geq < e) {
524 clean_up_to_including(ctx, e);
525 INVARIANT(ctx.queue_.empty());
528 //#ifdef CHECK_INVARIANTS
529 //// make sure hidden is blocked by version e, when traversing from start
531 //IsBlocked(dbtuple *start, dbtuple *hidden, uint64_t e)
533 // dbtuple *c = start;
537 // if (c->is_not_behind(e))
542 // ALWAYS_ASSERT(false); // hidden should be found on chain
547 transaction_proto2_static::clean_up_to_including(threadctx &ctx, uint64_t ro_tick_geq)
549 INVARIANT(!rcu::s_instance.in_rcu_region());
550 INVARIANT(ctx.last_reaped_epoch_ <= ro_tick_geq);
551 INVARIANT(ctx.scratch_.empty());
552 if (ctx.last_reaped_epoch_ == ro_tick_geq)
555 #ifdef ENABLE_EVENT_COUNTERS
556 const uint64_t now = timer::cur_usec();
557 if (ctx.last_reaped_timestamp_us_ > 0) {
558 const uint64_t diff = now - ctx.last_reaped_timestamp_us_;
559 evt_avg_time_inbetween_ro_epochs_usec.offer(diff);
561 ctx.last_reaped_timestamp_us_ = now;
563 ctx.last_reaped_epoch_ = ro_tick_geq;
565 #ifdef CHECK_INVARIANTS
566 const uint64_t last_tick_ex = ticker::s_instance.global_last_tick_exclusive();
567 INVARIANT(last_tick_ex);
568 const uint64_t last_consistent_tid = ComputeReadOnlyTid(last_tick_ex - 1);
569 const uint64_t computed_last_tick_ex = ticker::s_instance.compute_global_last_tick_exclusive();
570 INVARIANT(last_tick_ex <= computed_last_tick_ex);
571 INVARIANT(to_read_only_tick(last_tick_ex) > ro_tick_geq);
575 char rcu_guard[sizeof(scoped_rcu_base<false>)] = {0};
576 const size_t max_niters_with_rcu = 128;
577 #define ENTER_RCU() \
579 new (&rcu_guard[0]) scoped_rcu_base<false>(); \
583 scoped_rcu_base<false> *px = (scoped_rcu_base<false> *) &rcu_guard[0]; \
584 px->~scoped_rcu_base<false>(); \
587 ctx.scratch_.empty_accept_from(ctx.queue_, ro_tick_geq);
588 ctx.scratch_.transfer_freelist(ctx.queue_);
589 px_queue &q = ctx.scratch_;
593 size_t niters_with_rcu = 0, n = 0;
594 for (auto it = q.begin(); it != q.end(); ++it, ++n, ++niters_with_rcu) {
596 INVARIANT(delent.tuple()->opaque.load(std::memory_order_acquire) == 1);
597 if (!delent.key_.get_flags()) {
598 // guaranteed to be gc-able now (even w/o RCU)
599 #ifdef CHECK_INVARIANTS
600 if (delent.trigger_tid_ > last_consistent_tid /*|| !IsBlocked(delent.tuple_ahead_, delent.tuple(), last_consistent_tid) */) {
601 cerr << "tuple ahead : " << g_proto_version_str(delent.tuple_ahead_->version) << endl;
602 cerr << "tuple ahead : " << *delent.tuple_ahead_ << endl;
603 cerr << "trigger tid : " << g_proto_version_str(delent.trigger_tid_) << endl;
604 cerr << "tuple : " << g_proto_version_str(delent.tuple()->version) << endl;
605 cerr << "last_consist_tid: " << g_proto_version_str(last_consistent_tid) << endl;
606 cerr << "last_tick_ex : " << last_tick_ex << endl;
607 cerr << "ro_tick_geq : " << ro_tick_geq << endl;
608 cerr << "rcu_block_tick : " << it.tick() << endl;
610 INVARIANT(delent.trigger_tid_ <= last_consistent_tid);
611 delent.tuple()->opaque.store(0, std::memory_order_release);
613 dbtuple::release_no_rcu(delent.tuple());
615 INVARIANT(!delent.tuple_ahead_);
616 INVARIANT(delent.btr_);
617 // check if an element preceeds the (deleted) tuple before doing the delete
618 ::lock_guard<dbtuple> lg_tuple(delent.tuple(), false);
619 #ifdef CHECK_INVARIANTS
620 if (!delent.tuple()->is_not_behind(last_consistent_tid)) {
621 cerr << "trigger tid : " << g_proto_version_str(delent.trigger_tid_) << endl;
622 cerr << "tuple : " << g_proto_version_str(delent.tuple()->version) << endl;
623 cerr << "last_consist_tid: " << g_proto_version_str(last_consistent_tid) << endl;
624 cerr << "last_tick_ex : " << last_tick_ex << endl;
625 cerr << "ro_tick_geq : " << ro_tick_geq << endl;
626 cerr << "rcu_block_tick : " << it.tick() << endl;
628 INVARIANT(delent.tuple()->version == delent.trigger_tid_);
629 INVARIANT(delent.tuple()->is_not_behind(last_consistent_tid));
630 INVARIANT(delent.tuple()->is_deleting());
632 if (unlikely(!delent.tuple()->is_latest())) {
633 // requeue it up, except this time as a regular delete
634 const uint64_t my_ro_tick = to_read_only_tick(
635 ticker::s_instance.global_current_tick());
639 MakeTid(CoreMask, NumIdMask >> NumIdShift, (my_ro_tick + 1) * ReadOnlyEpochMultiplier - 1),
641 marked_ptr<string>(),
644 ++g_evt_proto_gc_delete_requeue;
645 // reclaim string ptrs
646 string *spx = delent.key_.get();
648 ctx.pool_.emplace_back(spx);
651 #ifdef CHECK_INVARIANTS
652 delent.tuple()->opaque.store(0, std::memory_order_release);
654 // if delent.key_ is nullptr, then the key is stored in the tuple
655 // record storage location, and the size field contains the length of
658 // otherwise, delent.key_ is a pointer to a string containing the
661 string *spx = delent.key_.get();
663 k = varkey(delent.tuple()->get_value_start(), delent.tuple()->size);
666 ctx.pool_.emplace_back(spx);
674 typename concurrent_btree::value_type removed = 0;
675 const bool did_remove = delent.btr_->remove(k, &removed);
676 ALWAYS_ASSERT(did_remove);
677 INVARIANT(removed == (typename concurrent_btree::value_type) delent.tuple());
678 delent.tuple()->clear_latest();
679 dbtuple::release(delent.tuple()); // rcu free it
682 if (in_rcu && niters_with_rcu >= max_niters_with_rcu) {
689 g_evt_avg_proto_gc_queue_len.offer(n);
693 INVARIANT(!rcu::s_instance.in_rcu_region());
696 aligned_padded_elem<transaction_proto2_static::hackstruct>
697 transaction_proto2_static::g_hack;
698 aligned_padded_elem<transaction_proto2_static::flags>
699 transaction_proto2_static::g_flags;
700 percore_lazy<transaction_proto2_static::threadctx>
701 transaction_proto2_static::g_threadctxs;
703 transaction_proto2_static::g_evt_worker_thread_wait_log_buffer(
704 "worker_thread_wait_log_buffer");
706 transaction_proto2_static::g_evt_dbtuple_no_space_for_delkey(
707 "dbtuple_no_space_for_delkey");
709 transaction_proto2_static::g_evt_proto_gc_delete_requeue(
710 "proto_gc_delete_requeue");
712 transaction_proto2_static::g_evt_avg_log_entry_size(
713 "avg_log_entry_size");
715 transaction_proto2_static::g_evt_avg_proto_gc_queue_len(
716 "avg_proto_gc_queue_len");