diff --git a/src/blockstore_sync.cpp b/src/blockstore_sync.cpp index 1bb0b51d..2ee01a44 100644 --- a/src/blockstore_sync.cpp +++ b/src/blockstore_sync.cpp @@ -80,7 +80,8 @@ int blockstore_impl_t::continue_sync(blockstore_op_t *op, bool queue_has_in_prog // 2nd step: Data device is synced, prepare & write journal entries // Check space in the journal and journal memory buffers blockstore_journal_check_t space_check(this); - if (!space_check.check_available(op, PRIV(op)->sync_big_writes.size(), sizeof(journal_entry_big_write), JOURNAL_STABILIZE_RESERVATION)) + if (!space_check.check_available(op, PRIV(op)->sync_big_writes.size(), + sizeof(journal_entry_big_write) + clean_entry_bitmap_size, JOURNAL_STABILIZE_RESERVATION)) { return 0; } @@ -95,7 +96,7 @@ int blockstore_impl_t::continue_sync(blockstore_op_t *op, bool queue_has_in_prog int s = 0, cur_sector = -1; while (it != PRIV(op)->sync_big_writes.end()) { - if (!journal.entry_fits(sizeof(journal_entry_big_write)) && + if (!journal.entry_fits(sizeof(journal_entry_big_write) + clean_entry_bitmap_size) && journal.sector_info[journal.cur_sector].dirty) { if (cur_sector == -1) @@ -103,24 +104,27 @@ int blockstore_impl_t::continue_sync(blockstore_op_t *op, bool queue_has_in_prog prepare_journal_sector_write(journal, journal.cur_sector, sqe[s++], [this, op](ring_data_t *data) { handle_sync_event(data, op); }); cur_sector = journal.cur_sector; } + auto & dirty_entry = dirty_db.at(*it); journal_entry_big_write *je = (journal_entry_big_write*)prefill_single_journal_entry( - journal, (dirty_db[*it].state & BS_ST_INSTANT) ? JE_BIG_WRITE_INSTANT : JE_BIG_WRITE, - sizeof(journal_entry_big_write) + journal, (dirty_entry.state & BS_ST_INSTANT) ? JE_BIG_WRITE_INSTANT : JE_BIG_WRITE, + sizeof(journal_entry_big_write) + clean_entry_bitmap_size ); - dirty_db[*it].journal_sector = journal.sector_info[journal.cur_sector].offset; + dirty_entry.journal_sector = journal.sector_info[journal.cur_sector].offset; journal.used_sectors[journal.sector_info[journal.cur_sector].offset]++; #ifdef BLOCKSTORE_DEBUG printf( "journal offset %08lx is used by %lx:%lx v%lu (%lu refs)\n", - dirty_db[*it].journal_sector, it->oid.inode, it->oid.stripe, it->version, + dirty_entry.journal_sector, it->oid.inode, it->oid.stripe, it->version, journal.used_sectors[journal.sector_info[journal.cur_sector].offset] ); #endif je->oid = it->oid; je->version = it->version; - je->offset = dirty_db[*it].offset; - je->len = dirty_db[*it].len; - je->location = dirty_db[*it].location; + je->offset = dirty_entry.offset; + je->len = dirty_entry.len; + je->location = dirty_entry.location; + memcpy((void*)(je+1), (clean_entry_bitmap_size > sizeof(void*) + ? dirty_entry.bitmap : &dirty_entry.bitmap), clean_entry_bitmap_size); je->crc32 = je_crc32((journal_entry*)je); journal.crc32_last = je->crc32; it++; diff --git a/src/blockstore_write.cpp b/src/blockstore_write.cpp index 32ac783d..67a6c4b9 100644 --- a/src/blockstore_write.cpp +++ b/src/blockstore_write.cpp @@ -241,7 +241,8 @@ int blockstore_impl_t::dequeue_write(blockstore_op_t *op) if ((dirty_it->second.state & BS_ST_TYPE_MASK) == BS_ST_BIG_WRITE) { blockstore_journal_check_t space_check(this); - if (!space_check.check_available(op, unsynced_big_write_count + 1, sizeof(journal_entry_big_write), JOURNAL_STABILIZE_RESERVATION)) + if (!space_check.check_available(op, unsynced_big_write_count + 1, + sizeof(journal_entry_big_write) + clean_entry_bitmap_size, JOURNAL_STABILIZE_RESERVATION)) { return 0; } @@ -307,8 +308,11 @@ int blockstore_impl_t::dequeue_write(blockstore_op_t *op) // Small (journaled) write // First check if the journal has sufficient space blockstore_journal_check_t space_check(this); - if (unsynced_big_write_count && !space_check.check_available(op, unsynced_big_write_count, sizeof(journal_entry_big_write), 0) - || !space_check.check_available(op, 1, sizeof(journal_entry_small_write), op->len + JOURNAL_STABILIZE_RESERVATION)) + if (unsynced_big_write_count && + !space_check.check_available(op, unsynced_big_write_count, + sizeof(journal_entry_big_write) + clean_entry_bitmap_size, 0) + || !space_check.check_available(op, 1, + sizeof(journal_entry_small_write) + clean_entry_bitmap_size, op->len + JOURNAL_STABILIZE_RESERVATION)) { return 0; } @@ -316,8 +320,7 @@ int blockstore_impl_t::dequeue_write(blockstore_op_t *op) // There is sufficient space. Get SQE(s) struct io_uring_sqe *sqe1 = NULL; if (immediate_commit != IMMEDIATE_NONE || - (journal_block_size - journal.in_sector_pos) < sizeof(journal_entry_small_write) && - journal.sector_info[journal.cur_sector].dirty) + !journal.entry_fits(sizeof(journal_entry_small_write) + clean_entry_bitmap_size)) { // Write current journal sector only if it's dirty and full, or in the immediate_commit mode BS_SUBMIT_GET_SQE_DECL(sqe1);