14 #include "btree_impl.h"
18 #include "scopedperf.hh"
20 #if defined(NDB_MASSTREE)
21 #include "masstree_btree.h"
22 struct testing_concurrent_btree_traits : public masstree_params {
23 static const bool RcuRespCaller = false;
25 typedef mbtree<testing_concurrent_btree_traits> testing_concurrent_btree;
26 #define HAVE_REVERSE_RANGE_SCANS
28 struct testing_concurrent_btree_traits : public concurrent_btree_traits {
29 static const bool RcuRespCaller = false;
31 typedef btree<testing_concurrent_btree_traits> testing_concurrent_btree;
37 class scoped_rate_timer {
44 scoped_rate_timer(const string ®ion, size_t n) : region(region), n(n)
49 double x = t.lap() / 1000.0; // ms
50 double rate = double(n) / (x / 1000.0);
51 cerr << "timed region `" << region << "' took " << x
52 << " ms (" << rate << " events/sec)" << endl;
56 class btree_worker : public ndb_thread {
58 btree_worker(testing_concurrent_btree *btr) : btr(btr) {}
59 btree_worker(testing_concurrent_btree &btr) : btr(&btr) {}
61 testing_concurrent_btree *const btr;
67 testing_concurrent_btree btr;
68 btr.invariant_checker();
70 // fill up root leaf node
71 for (size_t i = 0; i < testing_concurrent_btree::NKeysPerNode; i++) {
72 btr.insert(u64_varkey(i), (typename testing_concurrent_btree::value_type) i);
73 btr.invariant_checker();
75 typename testing_concurrent_btree::value_type v = 0;
76 ALWAYS_ASSERT(btr.search(u64_varkey(i), v));
77 ALWAYS_ASSERT(v == (typename testing_concurrent_btree::value_type) i);
79 ALWAYS_ASSERT(btr.size() == testing_concurrent_btree::NKeysPerNode);
82 btr.insert(u64_varkey(testing_concurrent_btree::NKeysPerNode), (typename testing_concurrent_btree::value_type) (testing_concurrent_btree::NKeysPerNode));
83 btr.invariant_checker();
84 ALWAYS_ASSERT(btr.size() == testing_concurrent_btree::NKeysPerNode + 1);
86 // now make sure we can find everything post split
87 for (size_t i = 0; i < testing_concurrent_btree::NKeysPerNode + 1; i++) {
88 typename testing_concurrent_btree::value_type v = 0;
89 ALWAYS_ASSERT(btr.search(u64_varkey(i), v));
90 ALWAYS_ASSERT(v == (typename testing_concurrent_btree::value_type) i);
93 // now fill up the new root node
94 const size_t n = (testing_concurrent_btree::NKeysPerNode + testing_concurrent_btree::NKeysPerNode * (testing_concurrent_btree::NMinKeysPerNode));
95 for (size_t i = testing_concurrent_btree::NKeysPerNode + 1; i < n; i++) {
96 btr.insert(u64_varkey(i), (typename testing_concurrent_btree::value_type) i);
97 btr.invariant_checker();
99 typename testing_concurrent_btree::value_type v = 0;
100 ALWAYS_ASSERT(btr.search(u64_varkey(i), v));
101 ALWAYS_ASSERT(v == (typename testing_concurrent_btree::value_type) i);
103 ALWAYS_ASSERT(btr.size() == n);
105 // cause the root node to split
106 btr.insert(u64_varkey(n), (typename testing_concurrent_btree::value_type) n);
107 btr.invariant_checker();
108 ALWAYS_ASSERT(btr.size() == n + 1);
110 // once again make sure we can find everything
111 for (size_t i = 0; i < n + 1; i++) {
112 typename testing_concurrent_btree::value_type v = 0;
113 ALWAYS_ASSERT(btr.search(u64_varkey(i), v));
114 ALWAYS_ASSERT(v == (typename testing_concurrent_btree::value_type) i);
121 testing_concurrent_btree btr;
122 const size_t n = 1000;
123 for (size_t i = 0; i < n; i += 2) {
124 btr.insert(u64_varkey(i), (typename testing_concurrent_btree::value_type) i);
125 btr.invariant_checker();
127 typename testing_concurrent_btree::value_type v = 0;
128 ALWAYS_ASSERT(btr.search(u64_varkey(i), v));
129 ALWAYS_ASSERT(v == (typename testing_concurrent_btree::value_type) i);
132 for (size_t i = 1; i < n; i += 2) {
133 btr.insert(u64_varkey(i), (typename testing_concurrent_btree::value_type) i);
134 btr.invariant_checker();
136 typename testing_concurrent_btree::value_type v = 0;
137 ALWAYS_ASSERT(btr.search(u64_varkey(i), v));
138 ALWAYS_ASSERT(v == (typename testing_concurrent_btree::value_type) i);
141 ALWAYS_ASSERT(btr.size() == n);
147 testing_concurrent_btree btr;
149 for (size_t i = 0; i < testing_concurrent_btree::NKeysPerNode * 2; i++) {
150 btr.insert(u64_varkey(i), (typename testing_concurrent_btree::value_type) i);
151 btr.invariant_checker();
153 typename testing_concurrent_btree::value_type v = 0;
154 ALWAYS_ASSERT(btr.search(u64_varkey(i), v));
155 ALWAYS_ASSERT(v == (typename testing_concurrent_btree::value_type) i);
157 ALWAYS_ASSERT(btr.size() == testing_concurrent_btree::NKeysPerNode * 2);
159 for (size_t i = 0; i < testing_concurrent_btree::NKeysPerNode * 2; i++) {
160 btr.remove(u64_varkey(i));
161 btr.invariant_checker();
163 typename testing_concurrent_btree::value_type v = 0;
164 ALWAYS_ASSERT(!btr.search(u64_varkey(i), v));
166 ALWAYS_ASSERT(btr.size() == 0);
168 for (size_t i = 0; i < testing_concurrent_btree::NKeysPerNode * 2; i++) {
169 btr.insert(u64_varkey(i), (typename testing_concurrent_btree::value_type) i);
170 btr.invariant_checker();
172 typename testing_concurrent_btree::value_type v = 0;
173 ALWAYS_ASSERT(btr.search(u64_varkey(i), v));
174 ALWAYS_ASSERT(v == (typename testing_concurrent_btree::value_type) i);
176 ALWAYS_ASSERT(btr.size() == testing_concurrent_btree::NKeysPerNode * 2);
178 for (ssize_t i = testing_concurrent_btree::NKeysPerNode * 2 - 1; i >= 0; i--) {
179 btr.remove(u64_varkey(i));
180 btr.invariant_checker();
182 typename testing_concurrent_btree::value_type v = 0;
183 ALWAYS_ASSERT(!btr.search(u64_varkey(i), v));
185 ALWAYS_ASSERT(btr.size() == 0);
187 for (size_t i = 0; i < testing_concurrent_btree::NKeysPerNode * 2; i++) {
188 btr.insert(u64_varkey(i), (typename testing_concurrent_btree::value_type) i);
189 btr.invariant_checker();
191 typename testing_concurrent_btree::value_type v = 0;
192 ALWAYS_ASSERT(btr.search(u64_varkey(i), v));
193 ALWAYS_ASSERT(v == (typename testing_concurrent_btree::value_type) i);
195 ALWAYS_ASSERT(btr.size() == testing_concurrent_btree::NKeysPerNode * 2);
197 for (ssize_t i = testing_concurrent_btree::NKeysPerNode; i >= 0; i--) {
198 btr.remove(u64_varkey(i));
199 btr.invariant_checker();
201 typename testing_concurrent_btree::value_type v = 0;
202 ALWAYS_ASSERT(!btr.search(u64_varkey(i), v));
205 for (size_t i = testing_concurrent_btree::NKeysPerNode + 1; i < testing_concurrent_btree::NKeysPerNode * 2; i++) {
206 btr.remove(u64_varkey(i));
207 btr.invariant_checker();
209 typename testing_concurrent_btree::value_type v = 0;
210 ALWAYS_ASSERT(!btr.search(u64_varkey(i), v));
212 ALWAYS_ASSERT(btr.size() == 0);
218 testing_concurrent_btree btr;
219 const size_t nkeys = 10000;
220 for (size_t i = 0; i < nkeys; i++) {
221 btr.insert(u64_varkey(i), (typename testing_concurrent_btree::value_type) i);
222 btr.invariant_checker();
223 typename testing_concurrent_btree::value_type v = 0;
224 ALWAYS_ASSERT(btr.search(u64_varkey(i), v));
225 ALWAYS_ASSERT(v == (typename testing_concurrent_btree::value_type) i);
227 ALWAYS_ASSERT(btr.size() == nkeys);
231 for (size_t i = 0; i < nkeys; i++) {
232 size_t k = rand() % nkeys;
233 btr.remove(u64_varkey(k));
234 btr.invariant_checker();
235 typename testing_concurrent_btree::value_type v = 0;
236 ALWAYS_ASSERT(!btr.search(u64_varkey(k), v));
239 for (size_t i = 0; i < nkeys; i++) {
240 btr.remove(u64_varkey(i));
241 btr.invariant_checker();
242 typename testing_concurrent_btree::value_type v = 0;
243 ALWAYS_ASSERT(!btr.search(u64_varkey(i), v));
245 ALWAYS_ASSERT(btr.size() == 0);
251 // insert in random order, delete in random order
252 testing_concurrent_btree btr;
254 unsigned int seeds[] = {
255 54321, 2013883780, 3028985725, 3058602342, 256561598, 2895653051
258 for (size_t iter = 0; iter < ARRAY_NELEMS(seeds); iter++) {
260 const size_t nkeys = 20000;
262 for (size_t i = 0; i < nkeys; i++) {
263 size_t k = rand() % nkeys;
265 btr.insert(u64_varkey(k), (typename testing_concurrent_btree::value_type) k);
266 btr.invariant_checker();
267 typename testing_concurrent_btree::value_type v = 0;
268 ALWAYS_ASSERT(btr.search(u64_varkey(k), v));
269 ALWAYS_ASSERT(v == (typename testing_concurrent_btree::value_type) k);
271 ALWAYS_ASSERT(btr.size() == s.size());
273 for (size_t i = 0; i < nkeys * 2; i++) {
274 size_t k = rand() % nkeys;
275 btr.remove(u64_varkey(k));
276 btr.invariant_checker();
277 typename testing_concurrent_btree::value_type v = 0;
278 ALWAYS_ASSERT(!btr.search(u64_varkey(k), v));
282 for (size_t i = 0; i < nkeys; i++) {
283 btr.remove(u64_varkey(i));
284 btr.invariant_checker();
285 typename testing_concurrent_btree::value_type v = 0;
286 ALWAYS_ASSERT(!btr.search(u64_varkey(i), v));
289 ALWAYS_ASSERT(btr.size() == 0);
294 struct scan_callback {
296 pair< std::string, // we want to make copies of keys
297 typename testing_concurrent_btree::value_type > > kv_vec;
298 scan_callback(kv_vec *data, bool reverse = false)
299 : data(data), reverse_(reverse) {}
301 operator()(const typename testing_concurrent_btree::string_type &k,
302 typename testing_concurrent_btree::value_type v) const
304 if (!data->empty()) {
306 typename testing_concurrent_btree::string_type(data->back().first) >= k;
308 typename testing_concurrent_btree::string_type(data->back().first) <= k;
309 if ((!reverse_ && geq) || (reverse_ && leq)) {
310 cerr << "data->size(): " << data->size() << endl;
311 cerr << "prev: " << varkey(data->back().first) << endl;
312 cerr << "cur : " << varkey(k) << endl;
313 ALWAYS_ASSERT(false);
316 data->push_back(make_pair(k, v));
327 testing_concurrent_btree btr;
328 const size_t nkeys = 1000;
329 for (size_t i = 0; i < nkeys; i++)
330 btr.insert(u64_varkey(i), (typename testing_concurrent_btree::value_type) i);
331 btr.invariant_checker();
332 ALWAYS_ASSERT(btr.size() == nkeys);
334 using namespace test6_ns;
336 scan_callback::kv_vec data;
337 scan_callback cb(&data);
338 u64_varkey max_key(600);
339 btr.search_range(u64_varkey(500), &max_key, cb);
340 ALWAYS_ASSERT(data.size() == 100);
341 for (size_t i = 0; i < 100; i++) {
342 const varkey lhs(data[i].first), rhs(u64_varkey(500 + i));
343 ALWAYS_ASSERT(lhs == rhs);
344 ALWAYS_ASSERT(data[i].second == (typename testing_concurrent_btree::value_type) (500 + i));
348 btr.search_range(u64_varkey(500), NULL, cb);
349 ALWAYS_ASSERT(data.size() == 500);
350 for (size_t i = 0; i < 500; i++) {
351 ALWAYS_ASSERT(varkey(data[i].first) == u64_varkey(500 + i));
352 ALWAYS_ASSERT(data[i].second == (typename testing_concurrent_btree::value_type) (500 + i));
355 #ifdef HAVE_REVERSE_RANGE_SCANS
357 scan_callback cb_rev(&data, true);
358 btr.rsearch_range(u64_varkey(499), NULL, cb_rev);
359 ALWAYS_ASSERT(data.size() == 500);
360 for (ssize_t i = 499; i >= 0; i--) {
361 ALWAYS_ASSERT(varkey(data[499 - i].first) == u64_varkey(i));
362 ALWAYS_ASSERT(data[499 - i].second == (typename testing_concurrent_btree::value_type) (i));
366 u64_varkey min_key(499);
367 btr.rsearch_range(u64_varkey(999), &min_key, cb_rev);
368 ALWAYS_ASSERT(data.size() == 500);
369 for (ssize_t i = 999; i >= 500; i--) {
370 ALWAYS_ASSERT(varkey(data[999 - i].first) == u64_varkey(i));
371 ALWAYS_ASSERT(data[999 - i].second == (typename testing_concurrent_btree::value_type) (i));
379 testing_concurrent_btree btr;
380 ALWAYS_ASSERT(!btr.remove(u64_varkey(0)));
381 ALWAYS_ASSERT(btr.insert(u64_varkey(0), (typename testing_concurrent_btree::value_type) 0));
382 ALWAYS_ASSERT(!btr.insert(u64_varkey(0), (typename testing_concurrent_btree::value_type) 1));
383 typename testing_concurrent_btree::value_type v;
384 ALWAYS_ASSERT(btr.search(u64_varkey(0), v));
385 ALWAYS_ASSERT(v == (typename testing_concurrent_btree::value_type) 1);
386 ALWAYS_ASSERT(!btr.insert_if_absent(u64_varkey(0), (typename testing_concurrent_btree::value_type) 2));
387 ALWAYS_ASSERT(btr.search(u64_varkey(0), v));
388 ALWAYS_ASSERT(v == (typename testing_concurrent_btree::value_type) 1);
389 ALWAYS_ASSERT(btr.remove(u64_varkey(0)));
390 ALWAYS_ASSERT(btr.insert_if_absent(u64_varkey(0), (typename testing_concurrent_btree::value_type) 2));
391 ALWAYS_ASSERT(btr.search(u64_varkey(0), v));
392 ALWAYS_ASSERT(v == (typename testing_concurrent_btree::value_type) 2);
396 test_varlen_single_layer()
398 testing_concurrent_btree btr;
400 const char *k0 = "a";
401 const char *k1 = "aa";
402 const char *k2 = "aaa";
403 const char *k3 = "aaaa";
404 const char *k4 = "aaaaa";
406 const char *keys[] = {k0, k1, k2, k3, k4};
407 for (size_t i = 0; i < ARRAY_NELEMS(keys); i++) {
408 ALWAYS_ASSERT(btr.insert(varkey(keys[i]), (typename testing_concurrent_btree::value_type) keys[i]));
409 btr.invariant_checker();
412 ALWAYS_ASSERT(btr.size() == ARRAY_NELEMS(keys));
413 for (size_t i = 0; i < ARRAY_NELEMS(keys); i++) {
414 typename testing_concurrent_btree::value_type v = 0;
415 ALWAYS_ASSERT(btr.search(varkey(keys[i]), v));
416 ALWAYS_ASSERT(strcmp((const char *) v, keys[i]) == 0);
419 for (size_t i = 0; i < ARRAY_NELEMS(keys); i++) {
420 ALWAYS_ASSERT(btr.remove(varkey(keys[i])));
421 btr.invariant_checker();
423 ALWAYS_ASSERT(btr.size() == 0);
427 test_varlen_multi_layer()
429 testing_concurrent_btree btr;
431 const char *k0 = "aaaaaaa";
432 const char *k1 = "aaaaaaaa";
433 const char *k2 = "aaaaaaaaa";
434 const char *k3 = "aaaaaaaaaa";
435 const char *k4 = "aaaaaaaaaaa";
436 const char *k5 = "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa";
437 const char *keys[] = {k0, k1, k2, k3, k4, k5};
439 for (size_t i = 0; i < ARRAY_NELEMS(keys); i++) {
440 ALWAYS_ASSERT(btr.insert(varkey(keys[i]), (typename testing_concurrent_btree::value_type) keys[i]));
441 btr.invariant_checker();
444 ALWAYS_ASSERT(btr.size() == ARRAY_NELEMS(keys));
445 for (size_t i = 0; i < ARRAY_NELEMS(keys); i++) {
446 typename testing_concurrent_btree::value_type v = 0;
447 ALWAYS_ASSERT(btr.search(varkey(keys[i]), v));
448 ALWAYS_ASSERT(strcmp((const char *) v, keys[i]) == 0);
451 for (size_t i = 0; i < ARRAY_NELEMS(keys); i++) {
452 ALWAYS_ASSERT(btr.remove(varkey(keys[i])));
453 btr.invariant_checker();
455 ALWAYS_ASSERT(btr.size() == 0);
461 const char *k0 = "aaaaaaaaa";
462 const char *k1 = "aaaaaaaaaa";
464 testing_concurrent_btree btr;
465 ALWAYS_ASSERT(btr.insert(varkey(k0), (typename testing_concurrent_btree::value_type) k0));
466 ALWAYS_ASSERT(btr.insert(varkey(k1), (typename testing_concurrent_btree::value_type) k1));
467 ALWAYS_ASSERT(btr.size() == 2);
470 static __attribute__((used)) void test_ensure_printable() {
471 testing_concurrent_btree btr;
475 class test_range_scan_helper : public testing_concurrent_btree::search_range_callback {
479 expect() : tag(), expected_size() {}
480 expect(size_t expected_size)
481 : tag(0), expected_size(expected_size) {}
482 expect(const set<string> &expected_keys)
483 : tag(1), expected_keys(expected_keys) {}
485 size_t expected_size;
486 set<string> expected_keys;
494 test_range_scan_helper(
495 testing_concurrent_btree &btr,
496 const testing_concurrent_btree::key_type &begin,
497 const testing_concurrent_btree::key_type *end,
499 const expect &expectation,
500 ExpectType ex_type = EXPECT_EXACT)
503 end(end ? new testing_concurrent_btree::key_type(*end) : NULL),
505 expectation(expectation),
510 ~test_range_scan_helper()
517 invoke(const typename testing_concurrent_btree::string_type &k,
518 typename testing_concurrent_btree::value_type v)
520 VERBOSE(cerr << "test_range_scan_helper::invoke(): received key(size="
521 << k.size() << "): " << hexify(k) << endl);
524 ALWAYS_ASSERT(typename testing_concurrent_btree::string_type(keys.back()) < k);
526 ALWAYS_ASSERT(typename testing_concurrent_btree::string_type(keys.back()) > k);
536 btr->search_range_call(begin, end, *this);
538 btr->rsearch_range_call(begin, end, *this);
539 if (expectation.tag == 0) {
542 ALWAYS_ASSERT(keys.size() == expectation.expected_size);
545 ALWAYS_ASSERT(keys.size() >= expectation.expected_size);
551 ALWAYS_ASSERT(keys.size() == expectation.expected_keys.size());
554 cmp.assign(expectation.expected_keys.begin(), expectation.expected_keys.end());
556 cmp.assign(expectation.expected_keys.rbegin(), expectation.expected_keys.rend());
557 for (size_t i = 0; i < keys.size(); i++) {
558 if (keys[i] != cmp[i]) {
559 cerr << "A: " << hexify(keys[i]) << endl;
560 cerr << "B: " << hexify(cmp[i]) << endl;
561 ALWAYS_ASSERT(false);
566 case EXPECT_ATLEAST: {
567 ALWAYS_ASSERT(keys.size() >= expectation.expected_keys.size());
568 // every key in the expected set must be present
569 set<string> keyset(keys.begin(), keys.end());
570 for (auto it = expectation.expected_keys.begin();
571 it != expectation.expected_keys.end(); ++it)
572 ALWAYS_ASSERT(keyset.count(*it) == 1);
580 testing_concurrent_btree *const btr;
581 testing_concurrent_btree::key_type begin;
582 testing_concurrent_btree::key_type *end;
591 test_two_layer_range_scan()
593 const char *keys[] = {
599 "b", "c", "d", "e", "f", "g", "h", "i", "j", "k",
600 "l", "m", "n", "o", "p", "q", "r", "s",
603 testing_concurrent_btree btr;
604 for (size_t i = 0; i < ARRAY_NELEMS(keys); i++) {
605 ALWAYS_ASSERT(btr.insert(varkey(keys[i]), (typename testing_concurrent_btree::value_type) keys[i]));
606 btr.invariant_checker();
609 test_range_scan_helper::expect ex(set<string>(keys, keys + ARRAY_NELEMS(keys)));
610 test_range_scan_helper tester(btr, varkey(""), NULL, false, ex);
613 #ifdef HAVE_REVERSE_RANGE_SCANS
614 test_range_scan_helper tester_rev(btr, varkey("zzzzzzzzzzzzzzzzzzzzzz"), NULL, true, ex);
620 test_multi_layer_scan()
622 const uint8_t lokey_cstr[] = {
623 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x02, 0x45, 0x49, 0x4E, 0x47,
624 0x41, 0x54, 0x49, 0x4F, 0x4E, 0x45, 0x49, 0x4E, 0x47, 0x00, 0x00, 0x00,
625 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
626 0x00, 0x00, 0x00, 0x00
628 const uint8_t hikey_cstr[] = {
629 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x02, 0x45, 0x49, 0x4E, 0x47,
630 0x41, 0x54, 0x49, 0x4F, 0x4E, 0x45, 0x49, 0x4E, 0x47, 0x00, 0x00, 0x00,
631 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
632 0xFF, 0xFF, 0xFF, 0xFF
634 const string lokey_s((const char *) &lokey_cstr[0], ARRAY_NELEMS(lokey_cstr));
635 const string hikey_s((const char *) &hikey_cstr[0], ARRAY_NELEMS(hikey_cstr));
637 string lokey_s_next(lokey_s);
638 lokey_s_next.resize(lokey_s_next.size() + 1);
640 const varkey hikey(hikey_s);
642 testing_concurrent_btree btr;
643 ALWAYS_ASSERT(btr.insert(varkey(lokey_s), (typename testing_concurrent_btree::value_type) 0x123));
645 test_range_scan_helper::expect ex(0);
646 test_range_scan_helper tester(btr, varkey(lokey_s_next), &hikey, false, ex);
649 #ifdef HAVE_REVERSE_RANGE_SCANS
650 const varkey lokey(lokey_s);
651 test_range_scan_helper tester_rev(btr, varkey(hikey_s), &lokey, true, ex);
659 const uint8_t k0[] = {};
660 const uint8_t k1[] = {'\0'};
661 const uint8_t k2[] = {'\0', '\0'};
662 const uint8_t k3[] = {'\0', '\0', '\0'};
663 const uint8_t k4[] = {'\0', '\0', '\0', '\0'};
664 const uint8_t k5[] = {'\0', '\0', '\0', '\0', '\0'};
665 const uint8_t k6[] = {'\0', '\0', '\0', '\0', '\0', '\0'};
666 const uint8_t k7[] = {'\0', '\0', '\0', '\0', '\0', '\0', '\0'};
667 const uint8_t k8[] = {'\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0'};
668 const uint8_t k9[] = {'\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0'};
669 const uint8_t k10[] = {'\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0'};
670 const uint8_t *keys[] = {k0, k1, k2, k3, k4, k5, k6, k7, k8, k9, k10};
672 testing_concurrent_btree btr;
674 for (size_t i = 0; i < ARRAY_NELEMS(keys); i++) {
675 ALWAYS_ASSERT(btr.insert(varkey(keys[i], i), (typename testing_concurrent_btree::value_type) i));
676 btr.invariant_checker();
679 for (size_t i = 0; i < ARRAY_NELEMS(keys); i++) {
680 typename testing_concurrent_btree::value_type v = 0;
681 ALWAYS_ASSERT(btr.search(varkey(keys[i], i), v));
682 ALWAYS_ASSERT(v == (typename testing_concurrent_btree::value_type) i);
685 for (size_t i = 1; i <= 20; i++) {
686 ALWAYS_ASSERT(btr.insert(u64_varkey(i), (typename testing_concurrent_btree::value_type) i));
687 btr.invariant_checker();
690 for (size_t i = 0; i < ARRAY_NELEMS(keys); i++) {
691 typename testing_concurrent_btree::value_type v = 0;
692 ALWAYS_ASSERT(btr.search(varkey(keys[i], i), v));
693 ALWAYS_ASSERT(v == (typename testing_concurrent_btree::value_type) i);
696 for (size_t i = 1; i <= 20; i++) {
697 typename testing_concurrent_btree::value_type v = 0;
698 ALWAYS_ASSERT(btr.search(u64_varkey(i), v));
699 ALWAYS_ASSERT(v == (typename testing_concurrent_btree::value_type) i);
706 const size_t nprefixes = 200;
708 testing_concurrent_btree btr;
710 fast_random r(9084398309893);
712 set<string> prefixes;
713 for (size_t i = 0; i < nprefixes; i++) {
715 const string k(r.next_string(r.next() % 30));
716 if (prefixes.count(k) == 1)
722 for (auto &prefix : prefixes) {
723 for (size_t i = 1; i <= 12; i++) {
724 std::string x(prefix);
725 x.resize(x.size() + i);
731 for (auto it = keys.begin(); it != keys.end(); ++it, ++ctr) {
732 ALWAYS_ASSERT(btr.insert(varkey(*it), (typename testing_concurrent_btree::value_type) it->data()));
733 btr.invariant_checker();
734 ALWAYS_ASSERT(btr.size() == ctr);
736 ALWAYS_ASSERT(btr.size() == keys.size());
738 for (auto it = keys.begin(); it != keys.end(); ++it) {
739 typename testing_concurrent_btree::value_type v = 0;
740 ALWAYS_ASSERT(btr.search(varkey(*it), v));
741 ALWAYS_ASSERT(v == (typename testing_concurrent_btree::value_type) it->data());
744 test_range_scan_helper::expect ex(keys);
745 test_range_scan_helper tester(btr, varkey(*keys.begin()), NULL, false, ex);
748 #ifdef HAVE_REVERSE_RANGE_SCANS
749 test_range_scan_helper tester_rev(btr, varkey(*keys.rbegin()), NULL, true, ex);
753 ctr = keys.size() - 1;
754 for (auto it = keys.begin(); it != keys.end(); ++it, --ctr) {
755 ALWAYS_ASSERT(btr.remove(varkey(*it)));
756 btr.invariant_checker();
757 ALWAYS_ASSERT(btr.size() == ctr);
759 ALWAYS_ASSERT(btr.size() == 0);
763 maxkey(unsigned size)
765 return string(size, 255);
771 testing_concurrent_btree btr;
772 fast_random r(43698);
774 const size_t nkeys = 10000;
775 const unsigned int maxkeylen = 1000;
780 for (size_t i = 0; i < nkeys; i++) {
782 string k = r.next_readable_string(r.next() % (maxkeylen + 1));
783 if (keyset.count(k) == 1)
787 btr.insert(varkey(keys[i]), (typename testing_concurrent_btree::value_type) keys[i].data());
788 btr.invariant_checker();
791 ALWAYS_ASSERT(btr.size() == keyset.size());
793 for (size_t i = 0; i < nkeys; i++) {
794 typename testing_concurrent_btree::value_type v = 0;
795 ALWAYS_ASSERT(btr.search(varkey(keys[i]), v));
796 ALWAYS_ASSERT(v == (typename testing_concurrent_btree::value_type) keys[i].data());
799 test_range_scan_helper::expect ex(keyset);
800 test_range_scan_helper tester(btr, varkey(""), NULL, false, ex);
803 #ifdef HAVE_REVERSE_RANGE_SCANS
804 const string mkey = maxkey(maxkeylen);
805 test_range_scan_helper tester_rev(btr, varkey(mkey), NULL, true, ex);
809 for (size_t i = 0; i < nkeys; i++) {
810 btr.remove(varkey(keys[i]));
811 btr.invariant_checker();
813 ALWAYS_ASSERT(btr.size() == 0);
817 test_insert_remove_mix()
819 testing_concurrent_btree btr;
820 fast_random r(38953623328597);
822 // bootstrap with keys, then alternate insert/remove
823 const size_t nkeys_start = 100000;
825 vector<string> start_keys_v;
826 set<string> start_keys;
827 for (size_t i = 0; i < nkeys_start; i++) {
829 string k = r.next_readable_string(r.next() % 200);
830 if (start_keys.count(k) == 1)
832 start_keys_v.push_back(k);
833 start_keys.insert(k);
834 ALWAYS_ASSERT(btr.insert(varkey(k), (typename testing_concurrent_btree::value_type) k.data()));
836 btr.invariant_checker();
837 ALWAYS_ASSERT(btr.size() == start_keys.size());
839 vector<string> insert_keys_v;
840 set<string> insert_keys;
841 for (size_t i = 0; i < nkeys_start; i++) {
843 string k = r.next_readable_string(r.next() % 200);
844 if (start_keys.count(k) == 1 || insert_keys.count(k) == 1)
846 insert_keys_v.push_back(k);
847 insert_keys.insert(k);
850 for (size_t i = 0; i < nkeys_start; i++) {
851 ALWAYS_ASSERT(btr.remove(varkey(start_keys_v[i])));
852 ALWAYS_ASSERT(btr.insert(varkey(insert_keys_v[i]), (typename testing_concurrent_btree::value_type) insert_keys_v[i].data()));
854 btr.invariant_checker();
855 ALWAYS_ASSERT(btr.size() == insert_keys.size());
858 namespace mp_test1_ns {
860 static const size_t nkeys = 20000;
862 class ins0_worker : public btree_worker {
864 ins0_worker(testing_concurrent_btree &btr) : btree_worker(btr) {}
867 for (size_t i = 0; i < nkeys / 2; i++)
868 btr->insert(u64_varkey(i), (typename testing_concurrent_btree::value_type) i);
872 class ins1_worker : public btree_worker {
874 ins1_worker(testing_concurrent_btree &btr) : btree_worker(btr) {}
877 for (size_t i = nkeys / 2; i < nkeys; i++)
878 btr->insert(u64_varkey(i), (typename testing_concurrent_btree::value_type) i);
886 using namespace mp_test1_ns;
888 // test a bunch of concurrent inserts
889 testing_concurrent_btree btr;
894 w0.start(); w1.start();
895 w0.join(); w1.join();
897 btr.invariant_checker();
898 for (size_t i = 0; i < nkeys; i++) {
899 typename testing_concurrent_btree::value_type v = 0;
900 ALWAYS_ASSERT(btr.search(u64_varkey(i), v));
901 ALWAYS_ASSERT(v == (typename testing_concurrent_btree::value_type) i);
903 ALWAYS_ASSERT(btr.size() == nkeys);
906 namespace mp_test2_ns {
908 static const size_t nkeys = 20000;
910 class rm0_worker : public btree_worker {
912 rm0_worker(testing_concurrent_btree &btr) : btree_worker(btr) {}
915 for (size_t i = 0; i < nkeys / 2; i++)
916 btr->remove(u64_varkey(i));
920 class rm1_worker : public btree_worker {
922 rm1_worker(testing_concurrent_btree &btr) : btree_worker(btr) {}
925 for (size_t i = nkeys / 2; i < nkeys; i++)
926 btr->remove(u64_varkey(i));
934 using namespace mp_test2_ns;
936 // test a bunch of concurrent removes
937 testing_concurrent_btree btr;
939 for (size_t i = 0; i < nkeys; i++)
940 btr.insert(u64_varkey(u64_varkey(i)), (typename testing_concurrent_btree::value_type) i);
941 btr.invariant_checker();
946 w0.start(); w1.start();
947 w0.join(); w1.join();
949 btr.invariant_checker();
950 for (size_t i = 0; i < nkeys; i++) {
951 typename testing_concurrent_btree::value_type v = 0;
952 ALWAYS_ASSERT(!btr.search(u64_varkey(i), v));
954 ALWAYS_ASSERT(btr.size() == 0);
957 namespace mp_test3_ns {
959 static const size_t nkeys = 20000;
961 class rm0_worker : public btree_worker {
963 rm0_worker(testing_concurrent_btree &btr) : btree_worker(btr) {}
966 // remove the even keys
967 for (size_t i = 0; i < nkeys; i += 2)
968 btr->remove(u64_varkey(i));
972 class ins0_worker : public btree_worker {
974 ins0_worker(testing_concurrent_btree &btr) : btree_worker(btr) {}
977 // insert the odd keys
978 for (size_t i = 1; i < nkeys; i += 2)
979 btr->insert(u64_varkey(i), (typename testing_concurrent_btree::value_type) i);
987 using namespace mp_test3_ns;
989 // test a bunch of concurrent inserts and removes
990 testing_concurrent_btree btr;
992 // insert the even keys
993 for (size_t i = 0; i < nkeys; i += 2)
994 btr.insert(u64_varkey(u64_varkey(i)), (typename testing_concurrent_btree::value_type) i);
995 btr.invariant_checker();
1000 w0.start(); w1.start();
1001 w0.join(); w1.join();
1003 btr.invariant_checker();
1005 // should find no even keys
1006 for (size_t i = 0; i < nkeys; i += 2) {
1007 typename testing_concurrent_btree::value_type v = 0;
1008 ALWAYS_ASSERT(!btr.search(u64_varkey(i), v));
1011 // should find all odd keys
1012 for (size_t i = 1; i < nkeys; i += 2) {
1013 typename testing_concurrent_btree::value_type v = 0;
1014 ALWAYS_ASSERT(btr.search(u64_varkey(i), v));
1015 ALWAYS_ASSERT(v == (typename testing_concurrent_btree::value_type) i);
1018 ALWAYS_ASSERT(btr.size() == nkeys / 2);
1021 namespace mp_test4_ns {
1023 static const size_t nkeys = 20000;
1025 class search0_worker : public btree_worker {
1027 search0_worker(testing_concurrent_btree &btr) : btree_worker(btr) {}
1030 // search the even keys
1031 for (size_t i = 0; i < nkeys; i += 2) {
1032 typename testing_concurrent_btree::value_type v = 0;
1033 ALWAYS_ASSERT(btr->search(u64_varkey(i), v));
1034 ALWAYS_ASSERT(v == (typename testing_concurrent_btree::value_type) i);
1039 class ins0_worker : public btree_worker {
1041 ins0_worker(testing_concurrent_btree &btr) : btree_worker(btr) {}
1044 // insert the odd keys
1045 for (size_t i = 1; i < nkeys; i += 2)
1046 btr->insert(u64_varkey(i), (typename testing_concurrent_btree::value_type) i);
1050 class rm0_worker : public btree_worker {
1052 rm0_worker(testing_concurrent_btree &btr) : btree_worker(btr) {}
1055 // remove and reinsert odd keys
1056 for (size_t i = 1; i < nkeys; i += 2) {
1057 btr->remove(u64_varkey(i));
1058 btr->insert(u64_varkey(i), (typename testing_concurrent_btree::value_type) i);
1067 using namespace mp_test4_ns;
1069 // test a bunch of concurrent searches, inserts, and removes
1070 testing_concurrent_btree btr;
1072 // insert the even keys
1073 for (size_t i = 0; i < nkeys; i += 2)
1074 btr.insert(u64_varkey(u64_varkey(i)), (typename testing_concurrent_btree::value_type) i);
1075 btr.invariant_checker();
1077 search0_worker w0(btr);
1078 ins0_worker w1(btr);
1081 w0.start(); w1.start(); w2.start();
1082 w0.join(); w1.join(); w2.join();
1084 btr.invariant_checker();
1086 // should find all keys
1087 for (size_t i = 0; i < nkeys; i++) {
1088 typename testing_concurrent_btree::value_type v = 0;
1089 ALWAYS_ASSERT(btr.search(u64_varkey(i), v));
1090 ALWAYS_ASSERT(v == (typename testing_concurrent_btree::value_type) i);
1093 ALWAYS_ASSERT(btr.size() == nkeys);
1096 namespace mp_test_pinning_ns {
1097 static const size_t keys_per_thread = 1000;
1098 static const size_t nthreads = 4;
1099 static atomic<bool> running(true);
1100 class worker : public btree_worker {
1102 worker(unsigned int thread, testing_concurrent_btree &btr)
1103 : btree_worker(btr), thread(thread) {}
1107 rcu::s_instance.pin_current_thread(thread % coreid::num_cpus_online());
1108 for (unsigned mode = 0; running.load(); mode++) {
1109 for (size_t i = thread * keys_per_thread;
1110 running.load() && i < (thread + 1) * keys_per_thread;
1114 btr->remove(u64_varkey(i));
1117 btr->insert(u64_varkey(i), (typename testing_concurrent_btree::value_type) i);
1123 unsigned int thread;
1130 using namespace mp_test_pinning_ns;
1131 testing_concurrent_btree btr;
1132 vector<unique_ptr<worker>> workers;
1133 for (size_t i = 0; i < nthreads; i++)
1134 workers.emplace_back(new worker(i, btr));
1135 for (auto &p : workers)
1138 running.store(false);
1139 for (auto &p : workers)
1141 btr.invariant_checker();
1144 namespace mp_test_inserts_removes_ns {
1145 static const size_t keys_per_thread = 10000;
1146 static const size_t nthreads = 4;
1147 class worker : public btree_worker {
1149 worker(bool inserts, unsigned int thread,
1150 testing_concurrent_btree &btr)
1151 : btree_worker(btr), inserts(inserts), thread(thread) {}
1155 for (size_t i = thread * keys_per_thread;
1156 i < (thread + 1) * keys_per_thread;
1160 btr->insert(u64_varkey(i), (typename testing_concurrent_btree::value_type) i);
1162 btr->remove(u64_varkey(i));
1167 unsigned int thread;
1172 mp_test_inserts_removes()
1174 using namespace mp_test_inserts_removes_ns;
1175 for (size_t iter = 0; iter < 3; iter++) {
1176 testing_concurrent_btree btr;
1177 vector<unique_ptr<worker>> workers;
1179 for (size_t i = 0; i < nthreads; i++)
1180 workers.emplace_back(new worker(true, i, btr));
1181 for (auto &p : workers)
1183 for (auto &p : workers)
1185 btr.invariant_checker();
1188 for (size_t i = 0; i < nthreads; i++)
1189 workers.emplace_back(new worker(false, i, btr));
1190 for (auto &p : workers)
1192 for (auto &p : workers)
1194 btr.invariant_checker();
1197 for (size_t i = 0; i < nthreads; i++) {
1198 workers.emplace_back(new worker(true, i, btr));
1199 workers.emplace_back(new worker(false, i, btr));
1201 for (auto &p : workers)
1203 for (auto &p : workers)
1205 btr.invariant_checker();
1210 namespace mp_test5_ns {
1212 static const size_t niters = 100000;
1213 static const size_t max_key = 45;
1215 typedef set<typename testing_concurrent_btree::key_slice> key_set;
1222 class worker : public btree_worker {
1224 worker(unsigned int seed, testing_concurrent_btree &btr) : btree_worker(btr), seed(seed) {}
1227 unsigned int s = seed;
1228 // 60% search, 30% insert, 10% remove
1229 for (size_t i = 0; i < niters; i++) {
1230 double choice = double(rand_r(&s)) / double(RAND_MAX);
1231 typename testing_concurrent_btree::key_slice k = rand_r(&s) % max_key;
1233 typename testing_concurrent_btree::value_type v = 0;
1234 if (btr->search(u64_varkey(k), v))
1235 ALWAYS_ASSERT(v == (typename testing_concurrent_btree::value_type) k);
1236 } else if (choice < 0.9) {
1237 btr->insert(u64_varkey(k), (typename testing_concurrent_btree::value_type) k);
1238 sum.inserts.insert(k);
1240 btr->remove(u64_varkey(k));
1241 sum.removes.insert(k);
1254 using namespace mp_test5_ns;
1256 testing_concurrent_btree btr;
1258 worker w0(2145906155, btr);
1259 worker w1(409088773, btr);
1260 worker w2(4199288861, btr);
1261 worker w3(496889962, btr);
1263 w0.start(); w1.start(); w2.start(); w3.start();
1264 w0.join(); w1.join(); w2.join(); w3.join();
1266 summary *s0, *s1, *s2, *s3;
1267 s0 = (summary *) &w0.sum;
1268 s1 = (summary *) &w1.sum;
1269 s2 = (summary *) &w2.sum;
1270 s3 = (summary *) &w3.sum;
1275 summary *sums[] = { s0, s1, s2, s3 };
1276 for (size_t i = 0; i < ARRAY_NELEMS(sums); i++) {
1277 inserts.insert(sums[i]->inserts.begin(), sums[i]->inserts.end());
1278 removes.insert(sums[i]->removes.begin(), sums[i]->removes.end());
1281 cerr << "num_inserts: " << inserts.size() << endl;
1282 cerr << "num_removes: " << removes.size() << endl;
1284 for (key_set::iterator it = inserts.begin(); it != inserts.end(); ++it) {
1285 if (removes.count(*it) == 1)
1287 typename testing_concurrent_btree::value_type v = 0;
1288 ALWAYS_ASSERT(btr.search(u64_varkey(*it), v));
1289 ALWAYS_ASSERT(v == (typename testing_concurrent_btree::value_type) *it);
1292 btr.invariant_checker();
1293 cerr << "btr size: " << btr.size() << endl;
1296 namespace mp_test6_ns {
1297 static const size_t nthreads = 16;
1298 static const size_t ninsertkeys_perthread = 100000;
1299 static const size_t nremovekeys_perthread = 100000;
1301 typedef vector<typename testing_concurrent_btree::key_slice> key_vec;
1303 class insert_worker : public btree_worker {
1305 insert_worker(const vector<typename testing_concurrent_btree::key_slice> &keys, testing_concurrent_btree &btr)
1306 : btree_worker(btr), keys(keys) {}
1309 for (size_t i = 0; i < keys.size(); i++)
1310 btr->insert(u64_varkey(keys[i]), (typename testing_concurrent_btree::value_type) keys[i]);
1313 vector<typename testing_concurrent_btree::key_slice> keys;
1316 class remove_worker : public btree_worker {
1318 remove_worker(const vector<typename testing_concurrent_btree::key_slice> &keys, testing_concurrent_btree &btr)
1319 : btree_worker(btr), keys(keys) {}
1322 for (size_t i = 0; i < keys.size(); i++)
1323 btr->remove(u64_varkey(keys[i]));
1326 vector<typename testing_concurrent_btree::key_slice> keys;
1333 using namespace mp_test6_ns;
1335 testing_concurrent_btree btr;
1336 vector<key_vec> inps;
1337 set<unsigned long> insert_keys, remove_keys;
1339 fast_random r(87643982);
1340 for (size_t i = 0; i < nthreads / 2; i++) {
1342 for (size_t j = 0; j < ninsertkeys_perthread; j++) {
1343 unsigned long k = r.next();
1344 insert_keys.insert(k);
1347 inps.push_back(inp);
1349 for (size_t i = nthreads / 2; i < nthreads; i++) {
1351 for (size_t j = 0; j < nremovekeys_perthread;) {
1352 unsigned long k = r.next();
1353 if (insert_keys.count(k) == 1)
1355 btr.insert(u64_varkey(k), (typename testing_concurrent_btree::value_type) k);
1356 remove_keys.insert(k);
1360 inps.push_back(inp);
1363 vector<btree_worker*> workers;
1364 for (size_t i = 0; i < nthreads / 2; i++)
1365 workers.push_back(new insert_worker(inps[i], btr));
1366 for (size_t i = nthreads / 2; i < nthreads; i++)
1367 workers.push_back(new remove_worker(inps[i], btr));
1368 for (size_t i = 0; i < nthreads; i++)
1369 workers[i]->start();
1370 for (size_t i = 0; i < nthreads; i++)
1373 btr.invariant_checker();
1375 ALWAYS_ASSERT(btr.size() == insert_keys.size());
1376 for (set<unsigned long>::iterator it = insert_keys.begin();
1377 it != insert_keys.end(); ++it) {
1378 typename testing_concurrent_btree::value_type v = 0;
1379 ALWAYS_ASSERT(btr.search(u64_varkey(*it), v));
1380 ALWAYS_ASSERT(v == (typename testing_concurrent_btree::value_type) *it);
1382 for (set<unsigned long>::iterator it = remove_keys.begin();
1383 it != remove_keys.end(); ++it) {
1384 typename testing_concurrent_btree::value_type v = 0;
1385 ALWAYS_ASSERT(!btr.search(u64_varkey(*it), v));
1388 for (size_t i = 0; i < nthreads; i++)
1392 namespace mp_test7_ns {
1393 static const size_t nkeys = 50;
1394 static volatile bool running = false;
1396 typedef vector<typename testing_concurrent_btree::key_slice> key_vec;
1398 struct scan_callback {
1400 pair< std::string, typename testing_concurrent_btree::value_type > > kv_vec;
1401 scan_callback(kv_vec *data) : data(data) {}
1403 operator()(const typename testing_concurrent_btree::string_type &k, typename testing_concurrent_btree::value_type v) const
1405 //ALWAYS_ASSERT(data->empty() || data->back().first < k.str());
1406 std::string k_str(k);
1407 if (!data->empty() && data->back().first >= k_str) {
1408 cerr << "prev: <" << hexify(data->back().first) << ">" << endl;
1409 cerr << "cur : <" << hexify(k_str) << ">" << endl;
1410 ALWAYS_ASSERT(false);
1412 data->push_back(make_pair(std::move(k_str), v));
1418 class lookup_worker : public btree_worker {
1420 lookup_worker(unsigned long seed, const key_vec &keys, testing_concurrent_btree &btr)
1421 : btree_worker(btr), seed(seed), keys(keys)
1425 fast_random r(seed);
1427 uint64_t k = keys[r.next() % keys.size()];
1428 typename testing_concurrent_btree::value_type v = NULL;
1429 ALWAYS_ASSERT(btr->search(u64_varkey(k), v));
1430 ALWAYS_ASSERT(v == (typename testing_concurrent_btree::value_type) k);
1437 class scan_worker : public btree_worker {
1439 scan_worker(const key_vec &keys, testing_concurrent_btree &btr)
1440 : btree_worker(btr), keys(keys)
1445 scan_callback::kv_vec data;
1446 scan_callback cb(&data);
1447 btr->search_range(u64_varkey(nkeys / 2), NULL, cb);
1448 set<typename testing_concurrent_btree::string_type> scan_keys;
1450 for (size_t i = 0; i < data.size(); i++) {
1452 ALWAYS_ASSERT(data[i].first != prev);
1453 ALWAYS_ASSERT(data[i].first > prev);
1455 scan_keys.insert(data[i].first);
1456 prev = data[i].first;
1458 for (size_t i = 0; i < keys.size(); i++) {
1459 if (keys[i] < (nkeys / 2))
1461 ALWAYS_ASSERT(scan_keys.count(u64_varkey(keys[i]).str()) == 1);
1468 class mod_worker : public btree_worker {
1470 mod_worker(const key_vec &keys, testing_concurrent_btree &btr)
1471 : btree_worker(btr), keys(keys)
1476 for (size_t i = 0; running; i = (i + 1) % keys.size(), insert = !insert) {
1478 btr->insert(u64_varkey(keys[i]), (typename testing_concurrent_btree::value_type) keys[i]);
1480 btr->remove(u64_varkey(keys[i]));
1490 using namespace mp_test7_ns;
1491 fast_random r(904380439);
1492 key_vec lookup_keys;
1494 for (size_t i = 0; i < nkeys; i++) {
1496 mod_keys.push_back(i);
1498 lookup_keys.push_back(i);
1501 testing_concurrent_btree btr;
1502 for (size_t i = 0; i < lookup_keys.size(); i++)
1503 btr.insert(u64_varkey(lookup_keys[i]), (typename testing_concurrent_btree::value_type) lookup_keys[i]);
1504 btr.invariant_checker();
1506 lookup_worker w0(2398430, lookup_keys, btr);
1507 lookup_worker w1(8532, lookup_keys, btr);
1508 lookup_worker w2(23, lookup_keys, btr);
1509 lookup_worker w3(1328209843, lookup_keys, btr);
1510 scan_worker w4(lookup_keys, btr);
1511 scan_worker w5(lookup_keys, btr);
1512 mod_worker w6(mod_keys, btr);
1515 COMPILER_MEMORY_FENCE;
1516 w0.start(); w1.start(); w2.start(); w3.start(); w4.start(); w5.start(); w6.start();
1518 COMPILER_MEMORY_FENCE;
1520 COMPILER_MEMORY_FENCE;
1521 w0.join(); w1.join(); w2.join(); w3.join(); w4.join(); w5.join(); w6.join();
1524 namespace mp_test8_ns {
1525 static const size_t nthreads = 16;
1526 static const size_t ninsertkeys_perthread = 100000;
1527 static const size_t nremovekeys_perthread = 100000;
1529 typedef vector<string> key_vec;
1531 class insert_worker : public btree_worker {
1533 insert_worker(const vector<string> &keys, testing_concurrent_btree &btr)
1534 : btree_worker(btr), keys(keys) {}
1537 for (size_t i = 0; i < keys.size(); i++)
1538 ALWAYS_ASSERT(btr->insert(varkey(keys[i]), (typename testing_concurrent_btree::value_type) keys[i].data()));
1541 vector<string> keys;
1544 class remove_worker : public btree_worker {
1546 remove_worker(const vector<string> &keys, testing_concurrent_btree &btr)
1547 : btree_worker(btr), keys(keys) {}
1550 for (size_t i = 0; i < keys.size(); i++)
1551 ALWAYS_ASSERT(btr->remove(varkey(keys[i])));
1554 vector<string> keys;
1561 using namespace mp_test8_ns;
1563 testing_concurrent_btree btr;
1564 vector<key_vec> inps;
1565 set<string> insert_keys, remove_keys;
1567 fast_random r(83287583);
1568 for (size_t i = 0; i < nthreads / 2; i++) {
1570 for (size_t j = 0; j < ninsertkeys_perthread; j++) {
1572 string k = r.next_string(r.next() % 200);
1573 if (insert_keys.count(k) == 1)
1575 insert_keys.insert(k);
1578 inps.push_back(inp);
1580 for (size_t i = nthreads / 2; i < nthreads; i++) {
1582 for (size_t j = 0; j < nremovekeys_perthread;) {
1583 string k = r.next_string(r.next() % 200);
1584 if (insert_keys.count(k) == 1 || remove_keys.count(k) == 1)
1586 ALWAYS_ASSERT(btr.insert(varkey(k), (typename testing_concurrent_btree::value_type) k.data()));
1587 remove_keys.insert(k);
1591 inps.push_back(inp);
1594 btr.invariant_checker();
1596 vector<btree_worker*> workers;
1597 for (size_t i = 0; i < nthreads / 2; i++)
1598 workers.push_back(new insert_worker(inps[i], btr));
1599 for (size_t i = nthreads / 2; i < nthreads; i++)
1600 workers.push_back(new remove_worker(inps[i], btr));
1601 for (size_t i = 0; i < nthreads; i++)
1602 workers[i]->start();
1603 for (size_t i = 0; i < nthreads; i++)
1606 btr.invariant_checker();
1608 ALWAYS_ASSERT(btr.size() == insert_keys.size());
1609 for (set<string>::iterator it = insert_keys.begin();
1610 it != insert_keys.end(); ++it) {
1611 typename testing_concurrent_btree::value_type v = 0;
1612 ALWAYS_ASSERT(btr.search(varkey(*it), v));
1614 for (set<string>::iterator it = remove_keys.begin();
1615 it != remove_keys.end(); ++it) {
1616 typename testing_concurrent_btree::value_type v = 0;
1617 ALWAYS_ASSERT(!btr.search(varkey(*it), v));
1620 for (size_t i = 0; i < nthreads; i++)
1624 namespace mp_test_long_keys_ns {
1625 static const size_t nthreads = 16;
1626 static const size_t ninsertkeys_perthread = 500000;
1627 static const size_t nremovekeys_perthread = 500000;
1629 typedef vector<string> key_vec;
1631 class insert_worker : public btree_worker {
1633 insert_worker(const vector<string> &keys, testing_concurrent_btree &btr)
1634 : btree_worker(btr), keys(keys) {}
1637 for (size_t i = 0; i < keys.size(); i++)
1638 ALWAYS_ASSERT(btr->insert(varkey(keys[i]), (typename testing_concurrent_btree::value_type) keys[i].data()));
1641 vector<string> keys;
1644 class remove_worker : public btree_worker {
1646 remove_worker(const vector<string> &keys, testing_concurrent_btree &btr)
1647 : btree_worker(btr), keys(keys) {}
1650 for (size_t i = 0; i < keys.size(); i++)
1651 ALWAYS_ASSERT(btr->remove(varkey(keys[i])));
1654 vector<string> keys;
1657 static volatile bool running = false;
1659 class scan_worker : public btree_worker {
1661 scan_worker(const set<string> &ex, testing_concurrent_btree &btr, bool reverse)
1662 : btree_worker(btr), ex(ex), reverse_(reverse) {}
1665 const string mkey = maxkey(200+9);
1668 test_range_scan_helper tester(*btr, varkey(""), NULL, false,
1669 ex, test_range_scan_helper::EXPECT_ATLEAST);
1672 test_range_scan_helper tester(*btr, varkey(mkey), NULL, true,
1673 ex, test_range_scan_helper::EXPECT_ATLEAST);
1679 test_range_scan_helper::expect ex;
1687 // all keys at least 9-bytes long
1688 using namespace mp_test_long_keys_ns;
1690 testing_concurrent_btree btr;
1691 vector<key_vec> inps;
1692 set<string> existing_keys, insert_keys, remove_keys;
1694 fast_random r(189230589352);
1695 for (size_t i = 0; i < 10000; i++) {
1697 string k = r.next_string((r.next() % 200) + 9);
1698 if (existing_keys.count(k) == 1)
1700 existing_keys.insert(k);
1701 ALWAYS_ASSERT(btr.insert(varkey(k), (typename testing_concurrent_btree::value_type) k.data()));
1703 ALWAYS_ASSERT(btr.size() == existing_keys.size());
1705 for (size_t i = 0; i < nthreads / 2; i++) {
1707 for (size_t j = 0; j < ninsertkeys_perthread; j++) {
1709 string k = r.next_string((r.next() % 200) + 9);
1710 if (insert_keys.count(k) == 1 || existing_keys.count(k) == 1)
1712 insert_keys.insert(k);
1715 inps.push_back(inp);
1718 for (size_t i = nthreads / 2; i < nthreads; i++) {
1720 for (size_t j = 0; j < nremovekeys_perthread;) {
1721 string k = r.next_string((r.next() % 200) + 9);
1722 if (insert_keys.count(k) == 1 || existing_keys.count(k) == 1 || remove_keys.count(k) == 1)
1724 ALWAYS_ASSERT(btr.insert(varkey(k), (typename testing_concurrent_btree::value_type) k.data()));
1725 remove_keys.insert(k);
1729 inps.push_back(inp);
1732 ALWAYS_ASSERT(btr.size() == (insert_keys.size() + existing_keys.size()));
1733 btr.invariant_checker();
1735 vector<btree_worker*> workers, running_workers;
1737 for (size_t i = 0; i < nthreads / 2; i++)
1738 workers.push_back(new insert_worker(inps[i], btr));
1739 for (size_t i = nthreads / 2; i < nthreads; i++)
1740 workers.push_back(new remove_worker(inps[i], btr));
1741 for (size_t i = 0; i < 4; i++)
1742 running_workers.push_back(new scan_worker(existing_keys, btr, (i % 2)));
1743 for (size_t i = 0; i < nthreads; i++)
1744 workers[i]->start();
1745 for (size_t i = 0; i < running_workers.size(); i++)
1746 running_workers[i]->start();
1747 for (size_t i = 0; i < nthreads; i++)
1750 for (size_t i = 0; i < running_workers.size(); i++)
1751 running_workers[i]->join();
1753 btr.invariant_checker();
1755 ALWAYS_ASSERT(btr.size() == (insert_keys.size() + existing_keys.size()));
1756 for (set<string>::iterator it = insert_keys.begin();
1757 it != insert_keys.end(); ++it) {
1758 typename testing_concurrent_btree::value_type v = 0;
1759 ALWAYS_ASSERT(btr.search(varkey(*it), v));
1761 for (set<string>::iterator it = remove_keys.begin();
1762 it != remove_keys.end(); ++it) {
1763 typename testing_concurrent_btree::value_type v = 0;
1764 ALWAYS_ASSERT(!btr.search(varkey(*it), v));
1767 for (size_t i = 0; i < nthreads; i++)
1769 for (size_t i = 0; i < running_workers.size(); i++)
1770 delete running_workers[i];
1773 static void perf_test() UNUSED;
1777 const size_t nrecs = 10000000;
1778 const size_t nlookups = 10000000;
1782 map<uint64_t, uint64_t> m;
1784 scoped_rate_timer t("map insert", nrecs);
1785 for (size_t i = 0; i < nrecs; i++)
1789 scoped_rate_timer t("map random lookups", nlookups);
1790 for (size_t i = 0; i < nlookups; i++) {
1791 //uint64_t key = rand() % nrecs;
1793 map<uint64_t, uint64_t>::iterator it =
1795 ALWAYS_ASSERT(it != m.end());
1802 testing_concurrent_btree btr;
1804 scoped_rate_timer t("btree insert", nrecs);
1805 for (size_t i = 0; i < nrecs; i++)
1806 btr.insert(u64_varkey(u64_varkey(i)), (typename testing_concurrent_btree::value_type) i);
1809 scoped_rate_timer t("btree random lookups", nlookups);
1810 for (size_t i = 0; i < nlookups; i++) {
1811 //uint64_t key = rand() % nrecs;
1813 typename testing_concurrent_btree::value_type v = 0;
1814 ALWAYS_ASSERT(btr.search(u64_varkey(key), v));
1820 namespace read_only_perf_test_ns {
1821 const size_t nkeys = 140000000; // 140M
1822 //const size_t nkeys = 100000; // 100K
1824 unsigned long seeds[] = {
1825 9576455804445224191ULL,
1826 3303315688255411629ULL,
1827 3116364238170296072ULL,
1828 641702699332002535ULL,
1829 17755947590284612420ULL,
1830 13349066465957081273ULL,
1831 16389054441777092823ULL,
1832 2687412585397891607ULL,
1833 16665670053534306255ULL,
1834 5166823197462453937ULL,
1835 1252059952779729626ULL,
1836 17962022827457676982ULL,
1837 940911318964853784ULL,
1838 479878990529143738ULL,
1839 250864516707124695ULL,
1840 8507722621803716653ULL,
1843 volatile bool running = false;
1845 class worker : public btree_worker {
1847 worker(unsigned int seed, testing_concurrent_btree &btr) : btree_worker(btr), n(0), seed(seed) {}
1850 fast_random r(seed);
1852 typename testing_concurrent_btree::key_slice k = r.next() % nkeys;
1853 typename testing_concurrent_btree::value_type v = 0;
1854 ALWAYS_ASSERT(btr->search(u64_varkey(k), v));
1855 ALWAYS_ASSERT(v == (typename testing_concurrent_btree::value_type) k);
1865 static void read_only_perf_test() UNUSED;
1867 read_only_perf_test()
1869 using namespace read_only_perf_test_ns;
1871 testing_concurrent_btree btr;
1873 for (size_t i = 0; i < nkeys; i++)
1874 btr.insert(u64_varkey(i), (typename testing_concurrent_btree::value_type) i);
1875 cerr << "btree loaded, test starting" << endl;
1877 vector<worker *> workers;
1878 for (size_t i = 0; i < ARRAY_NELEMS(seeds); i++)
1879 workers.push_back(new worker(seeds[i], btr));
1883 COMPILER_MEMORY_FENCE;
1884 for (size_t i = 0; i < ARRAY_NELEMS(seeds); i++)
1885 workers[i]->start();
1887 COMPILER_MEMORY_FENCE;
1889 COMPILER_MEMORY_FENCE;
1890 uint64_t total_n = 0;
1891 for (size_t i = 0; i < ARRAY_NELEMS(seeds); i++) {
1893 total_n += workers[i]->n;
1897 double agg_throughput = double(total_n) / (double(t.lap()) / 1000000.0);
1898 double avg_per_core_throughput = agg_throughput / double(ARRAY_NELEMS(seeds));
1900 cerr << "agg_read_throughput: " << agg_throughput << " gets/sec" << endl;
1901 cerr << "avg_per_core_read_throughput: " << avg_per_core_throughput << " gets/sec/core" << endl;
1904 namespace write_only_perf_test_ns {
1905 const size_t nkeys = 140000000; // 140M
1906 //const size_t nkeys = 100000; // 100K
1908 unsigned long seeds[] = {
1909 17188055221422272641ULL,
1910 915721317773011804ULL,
1911 11607688859420148202ULL,
1912 16566896965529356730ULL,
1913 3687473034241167633ULL,
1914 1168118474092824592ULL,
1915 912212972587845337ULL,
1916 890657129662032640ULL,
1917 7557640044845923769ULL,
1918 9490577770668659131ULL,
1919 14081403972130650060ULL,
1920 14956552848279294368ULL,
1921 8669268465391111275ULL,
1922 1904251150166743550ULL,
1923 4418832947790992405ULL,
1924 9558684485283258563ULL,
1927 class worker : public btree_worker {
1929 worker(unsigned int seed, testing_concurrent_btree &btr) : btree_worker(btr), seed(seed) {}
1932 fast_random r(seed);
1933 for (size_t i = 0; i < nkeys / ARRAY_NELEMS(seeds); i++) {
1934 typename testing_concurrent_btree::key_slice k = r.next() % nkeys;
1935 btr->insert(u64_varkey(k), (typename testing_concurrent_btree::value_type) k);
1943 static void write_only_perf_test() UNUSED;
1945 write_only_perf_test()
1947 using namespace write_only_perf_test_ns;
1949 testing_concurrent_btree btr;
1951 vector<worker *> workers;
1952 for (size_t i = 0; i < ARRAY_NELEMS(seeds); i++)
1953 workers.push_back(new worker(seeds[i], btr));
1956 for (size_t i = 0; i < ARRAY_NELEMS(seeds); i++)
1957 workers[i]->start();
1958 for (size_t i = 0; i < ARRAY_NELEMS(seeds); i++) {
1963 double agg_throughput = double(nkeys) / (double(t.lap()) / 1000000.0);
1964 double avg_per_core_throughput = agg_throughput / double(ARRAY_NELEMS(seeds));
1966 cerr << "agg_write_throughput: " << agg_throughput << " puts/sec" << endl;
1967 cerr << "avg_per_core_write_throughput: " << avg_per_core_throughput << " puts/sec/core" << endl;
1971 TestConcurrentBtreeFast()
1979 test_varlen_single_layer();
1980 test_varlen_multi_layer();
1982 test_two_layer_range_scan();
1983 test_multi_layer_scan();
1987 test_insert_remove_mix();
1989 mp_test_inserts_removes();
1990 cout << "testing_concurrent_btree::TestFast passed" << endl;
1994 TestConcurrentBtreeSlow()
2005 mp_test_long_keys();
2007 //read_only_perf_test();
2008 //write_only_perf_test();
2009 cout << "testing_concurrent_btree::TestSlow passed" << endl;