diff --git a/src/blockstore_init.cpp b/src/blockstore_init.cpp index a76ee0db..dc6a2be8 100644 --- a/src/blockstore_init.cpp +++ b/src/blockstore_init.cpp @@ -549,20 +549,20 @@ int blockstore_init_journal::handle_journal_part(void *buf, uint64_t done_pos, u .oid = je->small_write.oid, .version = je->small_write.version, }; - void *bmp = (void*)je + sizeof(journal_entry_small_write); + void *bmp = NULL; + void *bmp_from = (void*)je + sizeof(journal_entry_small_write); if (bs->clean_entry_bitmap_size <= sizeof(void*)) { - memcpy(&bmp, bmp, bs->clean_entry_bitmap_size); + memcpy(&bmp, bmp_from, bs->clean_entry_bitmap_size); } - else if (!bs->journal.inmemory) + else { - // FIXME Using large blockstore objects and not keeping journal in memory - // will result in a lot of small allocations for entry bitmaps. This can - // only be fixed by using a patched map with dynamic entry size, but not - // the btree_map, because it doesn't keep iterators valid all the time. - void *bmp_cp = malloc_or_die(bs->clean_entry_bitmap_size); - memcpy(bmp_cp, bmp, bs->clean_entry_bitmap_size); - bmp = bmp_cp; + // FIXME Using large blockstore objects will result in a lot of small + // allocations for entry bitmaps. This can only be fixed by using + // a patched map with dynamic entry size, but not the btree_map, + // because it doesn't keep iterators valid all the time. + bmp = malloc_or_die(bs->clean_entry_bitmap_size); + memcpy(bmp, bmp_from, bs->clean_entry_bitmap_size); } bs->dirty_db.emplace(ov, (dirty_entry){ .state = (BS_ST_SMALL_WRITE | BS_ST_SYNCED), @@ -629,20 +629,20 @@ int blockstore_init_journal::handle_journal_part(void *buf, uint64_t done_pos, u .oid = je->big_write.oid, .version = je->big_write.version, }; - void *bmp = (void*)je + sizeof(journal_entry_big_write); + void *bmp = NULL; + void *bmp_from = (void*)je + sizeof(journal_entry_big_write); if (bs->clean_entry_bitmap_size <= sizeof(void*)) { - memcpy(&bmp, bmp, bs->clean_entry_bitmap_size); + memcpy(&bmp, bmp_from, bs->clean_entry_bitmap_size); } - else if (!bs->journal.inmemory) + else { - // FIXME Using large blockstore objects and not keeping journal in memory - // will result in a lot of small allocations for entry bitmaps. This can - // only be fixed by using a patched map with dynamic entry size, but not - // the btree_map, because it doesn't keep iterators valid all the time. - void *bmp_cp = malloc_or_die(bs->clean_entry_bitmap_size); - memcpy(bmp_cp, bmp, bs->clean_entry_bitmap_size); - bmp = bmp_cp; + // FIXME Using large blockstore objects will result in a lot of small + // allocations for entry bitmaps. This can only be fixed by using + // a patched map with dynamic entry size, but not the btree_map, + // because it doesn't keep iterators valid all the time. + bmp = malloc_or_die(bs->clean_entry_bitmap_size); + memcpy(bmp, bmp_from, bs->clean_entry_bitmap_size); } auto dirty_it = bs->dirty_db.emplace(ov, (dirty_entry){ .state = (BS_ST_BIG_WRITE | BS_ST_SYNCED), diff --git a/src/blockstore_write.cpp b/src/blockstore_write.cpp index 67a6c4b9..fc670ed1 100644 --- a/src/blockstore_write.cpp +++ b/src/blockstore_write.cpp @@ -30,10 +30,13 @@ bool blockstore_impl_t::enqueue_write(blockstore_op_t *op) wait_big = (dirty_it->second.state & BS_ST_TYPE_MASK) == BS_ST_BIG_WRITE ? !IS_SYNCED(dirty_it->second.state) : ((dirty_it->second.state & BS_ST_WORKFLOW_MASK) == BS_ST_WAIT_BIG); - if (clean_entry_bitmap_size > sizeof(void*)) - memcpy(bmp, dirty_it->second.bitmap, clean_entry_bitmap_size); - else - bmp = dirty_it->second.bitmap; + if (!is_del && !deleted) + { + if (clean_entry_bitmap_size > sizeof(void*)) + memcpy(bmp, dirty_it->second.bitmap, clean_entry_bitmap_size); + else + bmp = dirty_it->second.bitmap; + } } } if (!found) @@ -42,8 +45,11 @@ bool blockstore_impl_t::enqueue_write(blockstore_op_t *op) if (clean_it != clean_db.end()) { version = clean_it->second.version + 1; - void *bmp_ptr = get_clean_entry_bitmap(clean_it->second.location, clean_entry_bitmap_size); - memcpy((clean_entry_bitmap_size > sizeof(void*) ? bmp : &bmp), bmp_ptr, clean_entry_bitmap_size); + if (!is_del) + { + void *bmp_ptr = get_clean_entry_bitmap(clean_it->second.location, clean_entry_bitmap_size); + memcpy((clean_entry_bitmap_size > sizeof(void*) ? bmp : &bmp), bmp_ptr, clean_entry_bitmap_size); + } } else {