diff --git a/src/btree.c b/src/btree.c index 6ddca4e5d..1e06acaf3 100644 --- a/src/btree.c +++ b/src/btree.c @@ -1135,15 +1135,6 @@ btree_addrs_share_extent(cache *cc, uint64 left_addr, uint64 right_addr) allocator_get_config(al), right_addr, left_addr); } -static inline uint64 -btree_root_to_meta_addr(const btree_config *cfg, - uint64 root_addr, - uint64 meta_page_no) -{ - return root_addr + (meta_page_no + 1) * btree_page_size(cfg); -} - - /*---------------------------------------------------------- * Creating and destroying B-trees. *---------------------------------------------------------- diff --git a/src/btree_private.h b/src/btree_private.h index a400c2ab3..484a97d31 100644 --- a/src/btree_private.h +++ b/src/btree_private.h @@ -305,3 +305,11 @@ btree_get_child_addr(const btree_config *cfg, { return index_entry_child_addr(btree_get_index_entry(cfg, hdr, k)); } + +static inline uint64 +btree_root_to_meta_addr(const btree_config *cfg, + uint64 root_addr, + uint64 meta_page_no) +{ + return root_addr + (meta_page_no + 1) * btree_page_size(cfg); +} diff --git a/src/mini_allocator.c b/src/mini_allocator.c index 41df2c39c..1a0437320 100644 --- a/src/mini_allocator.c +++ b/src/mini_allocator.c @@ -255,7 +255,6 @@ static uint64 allocator_page_number(allocator *al, uint64 page_addr) { allocator_config *allocator_cfg = allocator_get_config(al); - debug_assert(allocator_valid_page_addr(al, page_addr)); return ((page_addr / allocator_cfg->io_cfg->page_size)); } @@ -275,7 +274,6 @@ static uint64 allocator_extent_number(allocator *al, uint64 page_addr) { allocator_config *allocator_cfg = allocator_get_config(al); - debug_assert(allocator_valid_page_addr(al, page_addr)); return ((allocator_extent_base_addr(al, page_addr) / allocator_cfg->io_cfg->extent_size)); } @@ -381,7 +379,7 @@ static uint64 mini_num_entries(page_handle *meta_page) { mini_meta_hdr *hdr = (mini_meta_hdr *)meta_page->data; - return hdr->num_entries; + return (uint64)hdr->num_entries; } /* diff --git a/src/routing_filter.h b/src/routing_filter.h index f37db7f0c..0a76d7a5b 100644 --- a/src/routing_filter.h +++ b/src/routing_filter.h @@ -43,12 +43,12 @@ typedef struct routing_config { /* * ----------------------------------------------------------------------------- * Routing Filter: Disk-resident structure, on pages of type PAGE_TYPE_TRUNK. - * Stored in trunk nodes, and is a pointer to a routing filter. + * Stored in trunk nodes, and is a pointer to a routing filter's page. * ----------------------------------------------------------------------------- */ typedef struct ONDISK routing_filter { - uint64 addr; - uint64 meta_head; + uint64 addr; // Address of page holding filter + uint64 meta_head; // Address of metadata page holding mini-allocator info uint32 num_fingerprints; uint32 num_unique; uint32 value_size; diff --git a/src/trunk.c b/src/trunk.c index 8ce98e7bc..fd298374d 100644 --- a/src/trunk.c +++ b/src/trunk.c @@ -54,8 +54,8 @@ static const int64 latency_histo_buckets[LATENCYHISTO_SIZE] = { * structures sized by these limits can fit within 4K byte pages. * * NOTE: The bundle and sub-bundle related limits below are used to size arrays - * of structures in splinter_trunk_hdr{}; i.e. Splinter pages of type - * PAGE_TYPE_TRUNK. So these constants do affect disk-resident structures. + * of structures in trunk_hdr{}; i.e. Splinter pages of type PAGE_TYPE_TRUNK. + * So these constants do affect disk-resident structures. */ #define TRUNK_MAX_PIVOTS (20) #define TRUNK_MAX_BUNDLES (12) @@ -105,6 +105,19 @@ static const int64 latency_histo_buckets[LATENCYHISTO_SIZE] = { * If verbose_logging_enabled is enabled in trunk_config, these functions print * to cfg->log_handle. */ +void +trunk_enable_verbose_logging(trunk_handle *spl, platform_log_handle *log_handle) +{ + spl->cfg.verbose_logging_enabled = TRUE; + spl->cfg.log_handle = log_handle; +} + +void +trunk_disable_verbose_logging(trunk_handle *spl) +{ + spl->cfg.verbose_logging_enabled = FALSE; + spl->cfg.log_handle = NULL; +} static inline bool trunk_verbose_logging_enabled(trunk_handle *spl) @@ -143,15 +156,19 @@ trunk_close_log_stream_if_enabled(trunk_handle *spl, #define trunk_log_stream_if_enabled(spl, _stream, message, ...) \ do { \ if (trunk_verbose_logging_enabled(spl)) { \ - platform_log_stream( \ - (_stream), "[%3lu] " message, platform_get_tid(), ##__VA_ARGS__); \ + platform_log_stream((_stream), \ + "trunk_log():%d [%lu] " message, \ + __LINE__, \ + platform_get_tid(), \ + ##__VA_ARGS__); \ } \ } while (0) #define trunk_default_log_if_enabled(spl, message, ...) \ do { \ if (trunk_verbose_logging_enabled(spl)) { \ - platform_default_log(message, __VA_ARGS__); \ + platform_default_log( \ + "trunk_log():%d " message, __LINE__, __VA_ARGS__); \ } \ } while (0) @@ -355,7 +372,7 @@ trunk_log_node_if_enabled(platform_stream_handle *stream, * Array of bundles * When a collection of branches are flushed into a node, they are * organized into a bundle. This bundle will be compacted into a - * single branch by a call to trunk_compact_bundle. Bundles are + * single branch by a call to trunk_compact_bundle(). Bundles are * implemented as a collection of subbundles, each of which covers a * range of branches. * ---------- @@ -2227,7 +2244,7 @@ trunk_leaf_rebundle_all_branches(trunk_handle *spl, routing_filter *filter = trunk_subbundle_filter(spl, node, sb, 0); trunk_pivot_data *pdata = trunk_get_pivot_data(spl, node, 0); *filter = pdata->filter; - debug_assert(filter->addr != 0); + debug_assert((filter->addr != 0), "addr=%lu\n", filter->addr); ZERO_STRUCT(pdata->filter); debug_assert(trunk_subbundle_branch_count(spl, node, sb) != 0); } @@ -8180,6 +8197,62 @@ trunk_print(platform_log_handle *log_handle, trunk_handle *spl) trunk_print_subtree(log_handle, spl, spl->root_addr); } +/* + * Print meta-page's linked list for one routing filter at address 'meta_head'. + */ +void +trunk_print_filter_metapage_list(platform_log_handle *log_handle, + trunk_handle *spl, + uint64 meta_head) +{ + platform_log(log_handle, + "\nFilter Metadata page starting from meta_head=%lu\n{\n", + meta_head); + mini_unkeyed_print(spl->cc, meta_head, PAGE_TYPE_FILTER); + platform_log(log_handle, "\n}\n"); +} + +void +trunk_print_one_pivots_filter_metapages(platform_log_handle *log_handle, + trunk_handle *spl, + trunk_node *node, + uint16 pivot_no) +{ + trunk_pivot_data *pdata = trunk_get_pivot_data(spl, node, pivot_no); + + // Last pivot won't have any filter metadata pages for it. + if (pivot_no == (trunk_num_pivot_keys(spl, node) - 1)) { + return; + } + trunk_print_filter_metapage_list(log_handle, spl, pdata->filter.meta_head); +} + +/* Print filter's metadata pages for given node at address 'node_addr' */ +void +trunk_print_nodes_filter_metapages(platform_log_handle *log_handle, + trunk_handle *spl, + uint64 node_addr) +{ + trunk_node node; + trunk_node_get(spl->cc, node_addr, &node); + + for (uint16 pivot_no = 0; pivot_no < trunk_num_pivot_keys(spl, &node); + pivot_no++) + { + trunk_print_one_pivots_filter_metapages(log_handle, spl, &node, pivot_no); + } + + trunk_node_unget(spl->cc, &node); +} + +void +trunk_print_root_nodes_filter_metapages(platform_log_handle *log_handle, + trunk_handle *spl) +{ + trunk_print_nodes_filter_metapages(log_handle, spl, spl->root_addr); +} + + /* * trunk_print_super_block() * diff --git a/src/trunk.h b/src/trunk.h index 4320d9f75..b0b8c0cac 100644 --- a/src/trunk.h +++ b/src/trunk.h @@ -413,6 +413,17 @@ trunk_print_space_use(platform_log_handle *log_handle, trunk_handle *spl); bool trunk_verify_tree(trunk_handle *spl); +void +trunk_print_root_nodes_filter_metapages(platform_log_handle *log_handle, + trunk_handle *spl); + +void +trunk_enable_verbose_logging(trunk_handle *spl, + platform_log_handle *log_handle); + +void +trunk_disable_verbose_logging(trunk_handle *spl); + static inline uint64 trunk_max_key_size(trunk_handle *spl) { diff --git a/tests/unit/btree_stress_test.c b/tests/unit/btree_stress_test.c index 57abf12c1..5827d4ec1 100644 --- a/tests/unit/btree_stress_test.c +++ b/tests/unit/btree_stress_test.c @@ -26,6 +26,8 @@ #include "btree_private.h" #include "btree_test_common.h" +typedef void (*btree_thread_hdlr)(void *arg); + typedef struct insert_thread_params { cache *cc; btree_config *cfg; @@ -35,6 +37,7 @@ typedef struct insert_thread_params { uint64 root_addr; int start; int end; + platform_thread thread; } insert_thread_params; // Function Prototypes @@ -82,6 +85,24 @@ ungen_key(key test_key); static message gen_msg(btree_config *cfg, uint64 i, uint8 *buffer, size_t length); +static void +load_thread_params(insert_thread_params *params, + uint64 nthreads, + platform_heap_id hid, + cache *cc, + btree_config *btree_cfg, + mini_allocator *mini, + uint64 root_addr, + int nkvs); + +static platform_status +do_n_thread_creates(const char *thread_type, + insert_thread_params *params, + uint64 nthreads, + task_system *ts, + platform_heap_id hid, + btree_thread_hdlr thread_hdlr); + /* * Global data declaration macro: */ @@ -182,8 +203,8 @@ CTEST_TEARDOWN(btree_stress) * Test case to exercise random inserts of large volumes of data, across * multiple threads. This test case verifies that registration of threads * to Splinter is working stably. + * ------------------------------------------------------------------------- */ - CTEST2(btree_stress, test_random_inserts_concurrent) { int nkvs = 1000000; @@ -194,36 +215,26 @@ CTEST2(btree_stress, test_random_inserts_concurrent) uint64 root_addr = btree_create( (cache *)&data->cc, &data->dbtree_cfg, &mini, PAGE_TYPE_MEMTABLE); - platform_heap_id hid = platform_get_heap_id(); - insert_thread_params *params = TYPED_ARRAY_ZALLOC(hid, params, nthreads); - platform_thread *threads = TYPED_ARRAY_ZALLOC(hid, threads, nthreads); + platform_heap_id hid = platform_get_heap_id(); + insert_thread_params *params = TYPED_ARRAY_ZALLOC(hid, params, nthreads); - for (uint64 i = 0; i < nthreads; i++) { - params[i].cc = (cache *)&data->cc; - params[i].cfg = &data->dbtree_cfg; - params[i].hid = data->hid; - params[i].scratch = TYPED_MALLOC(data->hid, params[i].scratch); - params[i].mini = &mini; - params[i].root_addr = root_addr; - params[i].start = i * (nkvs / nthreads); - params[i].end = i < nthreads - 1 ? (i + 1) * (nkvs / nthreads) : nkvs; - } + load_thread_params(params, + nthreads, + data->hid, + (cache *)&data->cc, + &data->dbtree_cfg, + &mini, + root_addr, + nkvs); - for (uint64 i = 0; i < nthreads; i++) { - platform_status ret = task_thread_create("insert thread", - insert_thread, - ¶ms[i], - 0, - data->ts, - data->hid, - &threads[i]); - ASSERT_TRUE(SUCCESS(ret)); - // insert_tests((cache *)&cc, &dbtree_cfg, &test_scratch, &mini, - // root_addr, 0, nkvs); - } + + platform_status ret; + ret = do_n_thread_creates( + "insert thread", params, nthreads, data->ts, data->hid, insert_thread); + ASSERT_TRUE(SUCCESS(ret)); for (uint64 thread_no = 0; thread_no < nthreads; thread_no++) { - platform_thread_join(threads[thread_no]); + platform_thread_join(params[thread_no].thread); } int rc = query_tests((cache *)&data->cc, @@ -258,18 +269,83 @@ CTEST2(btree_stress, test_random_inserts_concurrent) (cache *)&data->cc, &data->dbtree_cfg, packed_root_addr, nkvs, data->hid); ASSERT_NOT_EQUAL(0, rc, "Invalid ranges in packed tree\n"); + // Release memory allocated in this test case + for (uint64 i = 0; i < nthreads; i++) { + platform_free(data->hid, params[i].scratch); + } + platform_free(hid, params); +} + +/* + * ------------------------------------------------------------------------- + * Test case to exercise random inserts of large volumes of data, and then + * invoke some BTree-print methods, to verify that they are basically working. + * The initial work to setup the BTree and load some data is shared between + * this and the test_random_inserts_concurrent() sub-case. + * ------------------------------------------------------------------------- + */ +CTEST2(btree_stress, test_btree_print_diags) +{ + int nkvs = 1000000; + int nthreads = 8; + + mini_allocator mini; + + uint64 root_addr = btree_create( + (cache *)&data->cc, &data->dbtree_cfg, &mini, PAGE_TYPE_MEMTABLE); + + platform_heap_id hid = platform_get_heap_id(); + insert_thread_params *params = TYPED_ARRAY_ZALLOC(hid, params, nthreads); + + load_thread_params(params, + nthreads, + data->hid, + (cache *)&data->cc, + &data->dbtree_cfg, + &mini, + root_addr, + nkvs); + + + platform_status ret; + ret = do_n_thread_creates( + "insert thread", params, nthreads, data->ts, data->hid, insert_thread); + ASSERT_TRUE(SUCCESS(ret)); + + for (uint64 thread_no = 0; thread_no < nthreads; thread_no++) { + platform_thread_join(params[thread_no].thread); + } + + uint64 packed_root_addr = pack_tests( + (cache *)&data->cc, &data->dbtree_cfg, data->hid, root_addr, nkvs); + if (0 < nkvs && !packed_root_addr) { + ASSERT_TRUE(FALSE, "Pack failed.\n"); + } + // Exercise print method to verify that it basically continues to work. + CTEST_LOG_INFO("\n**** btree_print_tree() on BTree root=%lu****\n", + packed_root_addr); btree_print_tree(Platform_default_log_handle, (cache *)&data->cc, &data->dbtree_cfg, packed_root_addr); + uint64 meta_page_addr = + btree_root_to_meta_addr(&data->dbtree_cfg, packed_root_addr, 0); + CTEST_LOG_INFO("\n**** mini_keyed_print() BTree root=%lu" + ", meta page addr=%lu ****\n", + packed_root_addr, + meta_page_addr); + + // Exercise print method of mini-allocator's keyed meta-page + mini_keyed_print( + (cache *)&data->cc, data->data_cfg, meta_page_addr, PAGE_TYPE_BRANCH); + // Release memory allocated in this test case for (uint64 i = 0; i < nthreads; i++) { platform_free(data->hid, params[i].scratch); } platform_free(hid, params); - platform_free(hid, threads); } /* @@ -277,6 +353,54 @@ CTEST2(btree_stress, test_random_inserts_concurrent) * Define minions and helper functions used by this test suite. * ******************************************************************************** */ +/* + * Helper function to load thread-specific parameters to drive the workload + */ +static void +load_thread_params(insert_thread_params *params, + uint64 nthreads, + platform_heap_id hid, + cache *cc, + btree_config *btree_cfg, + mini_allocator *mini, + uint64 root_addr, + int nkvs) +{ + for (uint64 i = 0; i < nthreads; i++) { + params[i].cc = cc; + params[i].cfg = btree_cfg; + params[i].hid = hid; + params[i].scratch = TYPED_MALLOC(hid, params[i].scratch); + params[i].mini = mini; + params[i].root_addr = root_addr; + params[i].start = i * (nkvs / nthreads); + params[i].end = ((i < nthreads - 1) ? (i + 1) * (nkvs / nthreads) : nkvs); + } +} + +/* + * Helper function to create n-threads, each thread executing the specified + * thread_hdlr handler function. + */ +static platform_status +do_n_thread_creates(const char *thread_type, + insert_thread_params *params, + uint64 nthreads, + task_system *ts, + platform_heap_id hid, + btree_thread_hdlr thread_hdlr) +{ + platform_status ret; + for (uint64 i = 0; i < nthreads; i++) { + ret = task_thread_create( + thread_type, thread_hdlr, ¶ms[i], 0, ts, hid, ¶ms[i].thread); + if (!SUCCESS(ret)) { + return ret; + } + } + return ret; +} + static void insert_thread(void *arg) { diff --git a/tests/unit/splinter_test.c b/tests/unit/splinter_test.c index 19320273b..829aec352 100644 --- a/tests/unit/splinter_test.c +++ b/tests/unit/splinter_test.c @@ -650,7 +650,7 @@ CTEST2(splinter, test_splinter_print_diags) "Expected to have inserted non-zero rows, num_inserts=%lu", num_inserts); - CTEST_LOG_INFO("**** Splinter Diagnostics ****\n" + CTEST_LOG_INFO("\n**** Splinter Diagnostics ****\n" "Generated by %s:%d:%s ****\n", __FILE__, __LINE__, @@ -667,6 +667,48 @@ CTEST2(splinter, test_splinter_print_diags) allocator_print_stats(alp); allocator_print_allocated(alp); + CTEST_LOG_INFO("\n** Trunk nodes filter metapages list: " + "trunk_print_root_nodes_filter_metapages() **\n"); + + // Exercise print method of mini-allocator's unkeyed meta-page + trunk_print_root_nodes_filter_metapages(Platform_default_log_handle, spl); + + trunk_destroy(spl); +} + +/* + * ----------------------------------------------------------------------------- + * Test case to exercise verbose print logging instrumentation in trunk.c + * generated by trunk_default_log_if_enabled(). + * ----------------------------------------------------------------------------- + */ +CTEST2(splinter, test_splinter_verbose_print_diags) +{ + allocator *alp = (allocator *)&data->al; + + trunk_handle *spl = trunk_create(data->splinter_cfg, + alp, + (cache *)data->clock_cache, + data->tasks, + test_generate_allocator_root_id(), + data->hid); + ASSERT_TRUE(spl != NULL); + + trunk_enable_verbose_logging(spl, Platform_default_log_handle); + + // Insert slightly over a 1M rows + data->test_exec_cfg.num_inserts = (Kilo * Kilo); + + CTEST_LOG_INFO("\n**** Splinter Diagnostics: Trunk Verbose Logging ****\n"); + + uint64 num_inserts = splinter_do_inserts(data, spl, FALSE, NULL); + ASSERT_NOT_EQUAL(0, + num_inserts, + "Expected to have inserted non-zero rows, num_inserts=%lu", + num_inserts); + + trunk_disable_verbose_logging(spl); + trunk_destroy(spl); }