Remove duplicate journal writing code (and fix it at the same time)

blocking-uring-test
Vitaliy Filippov 2019-11-11 00:28:14 +03:00
parent ff57dd420e
commit 8edb9e9d6f
7 changed files with 151 additions and 168 deletions

View File

@ -1,10 +1,10 @@
all: allocator.o blockstore.o blockstore_init.o blockstore_open.o blockstore_read.o \
all: allocator.o blockstore.o blockstore_init.o blockstore_open.o blockstore_journal.o blockstore_read.o \
blockstore_write.o blockstore_sync.o blockstore_stable.o crc32c.o ringloop.o test
clean:
rm -f *.o
crc32c.o: crc32c.c
g++ -c -o $@ $<
%.o: %.cpp blockstore.h
g++ -c -o $@ $<
g++ -Wall -Wno-sign-compare -Wno-parentheses -c -o $@ $<
test: test.cpp
g++ -o test -luring test.cpp

View File

@ -84,6 +84,10 @@
return 0;\
}
class blockstore;
class blockstore_operation;
// 16 bytes per object/stripe id
// stripe includes replica number in 4 least significant bits
struct __attribute__((__packed__)) object_id
@ -202,6 +206,7 @@ struct blockstore_operation
// FIXME: Move internal fields somewhere
friend class blockstore;
friend class blockstore_journal_check_t;
private:
// Wait status
int wait_for;
@ -220,8 +225,6 @@ private:
int sync_state, prev_sync_count;
};
class blockstore;
#include "blockstore_init.h"
class blockstore
@ -255,6 +258,7 @@ class blockstore
friend class blockstore_init_meta;
friend class blockstore_init_journal;
friend class blockstore_journal_check_t;
void calc_lengths(spp::sparse_hash_map<std::string, std::string> & config);
void open_data(spp::sparse_hash_map<std::string, std::string> & config);

48
blockstore_journal.cpp Normal file
View File

@ -0,0 +1,48 @@
#include "blockstore.h"
blockstore_journal_check_t::blockstore_journal_check_t(blockstore *bs)
{
this->bs = bs;
sectors_required = 0;
next_pos = bs->journal.next_free;
next_sector = bs->journal.cur_sector;
next_in_pos = bs->journal.in_sector_pos;
}
// Check if we can write <required> entries of <size> bytes and <data_after> data bytes after them to the journal
int blockstore_journal_check_t::check_available(blockstore_operation *op, int required, int size, int data_after)
{
while (1)
{
int fits = (512 - next_in_pos) / size;
if (fits > 0)
{
required -= fits;
next_in_pos += fits * size;
sectors_required++;
}
if (required <= 0)
break;
next_pos = (next_pos+512) < bs->journal.len ? next_pos+512 : 512;
next_sector = ((next_sector + 1) % bs->journal.sector_count);
next_in_pos = 0;
if (bs->journal.sector_info[next_sector].usage_count > 0)
{
// No memory buffer available. Wait for it.
op->wait_for = WAIT_JOURNAL_BUFFER;
return 0;
}
}
if (data_after > 0)
{
next_pos = (bs->journal.len - next_pos < data_after ? 512 : next_pos) + data_after;
}
if (next_pos >= bs->journal.used_start)
{
// No space in the journal. Wait for it.
op->wait_for = WAIT_JOURNAL;
op->wait_detail = next_pos;
return 0;
}
return 1;
}

View File

@ -119,6 +119,39 @@ struct journal_t
uint8_t *sector_buf;
journal_sector_info_t *sector_info;
uint64_t sector_count;
uint64_t cur_sector = 0;
uint64_t in_sector_pos = 0;
int cur_sector = 0;
int in_sector_pos = 0;
};
struct blockstore_journal_check_t
{
blockstore *bs;
uint64_t next_pos, next_sector, next_in_pos;
int sectors_required;
blockstore_journal_check_t(blockstore *bs);
int check_available(blockstore_operation *op, int required, int size, int data_after);
};
inline journal_entry* prefill_single_journal_entry(journal_t & journal, uint16_t type, uint32_t size)
{
if (512 - journal.in_sector_pos < size)
{
// Move to the next journal sector
// Also select next sector buffer in memory
journal.cur_sector = ((journal.cur_sector + 1) % journal.sector_count);
journal.sector_info[journal.cur_sector].offset = journal.next_free;
journal.in_sector_pos = 0;
journal.next_free = (journal.next_free+512) < journal.len ? journal.next_free + 512 : 512;
memset(journal.sector_buf + 512*journal.cur_sector, 0, 512);
}
journal_entry *je = (struct journal_entry*)(
journal.sector_buf + 512*journal.cur_sector + journal.in_sector_pos
);
journal.in_sector_pos += size;
je->magic = JOURNAL_MAGIC;
je->type = type;
je->size = size;
je->crc32_prev = journal.crc32_last;
return je;
}

View File

@ -36,52 +36,18 @@ int blockstore::dequeue_stable(blockstore_operation *op)
op->callback(op);
return 1;
}
// FIXME: Try to deduplicate journal entry submission code...
// Check journal space
uint64_t next_pos = journal.next_free;
if (512 - journal.in_sector_pos < sizeof(struct journal_entry_stable))
blockstore_journal_check_t space_check(this);
if (!space_check.check_available(op, 1, sizeof(journal_entry_stable), 0))
{
next_pos = (next_pos+512) < journal.len ? next_pos+512 : 512;
// Also check if we have an unused memory buffer for the journal sector
if (journal.sector_info[((journal.cur_sector + 1) % journal.sector_count)].usage_count > 0)
{
// No memory buffer available. Wait for it.
op->wait_for = WAIT_JOURNAL_BUFFER;
return 0;
}
}
if (next_pos >= journal.used_start)
{
// No space in the journal. Wait for it.
op->wait_for = WAIT_JOURNAL;
op->wait_detail = next_pos;
return 0;
}
// There is sufficient space. Get SQE
BS_SUBMIT_GET_SQE(sqe, data);
// Got SQE. Prepare journal sector write
if (512 - journal.in_sector_pos < sizeof(struct journal_entry_stable))
{
// Move to the next journal sector
// Also select next sector buffer in memory
journal.cur_sector = ((journal.cur_sector + 1) % journal.sector_count);
journal.sector_info[journal.cur_sector].offset = journal.next_free;
journal.in_sector_pos = 0;
journal.next_free = (journal.next_free+512) < journal.len ? journal.next_free + 512 : 512;
memset(journal.sector_buf + 512*journal.cur_sector, 0, 512);
}
journal_entry_stable *je = (journal_entry_stable*)(
journal.sector_buf + 512*journal.cur_sector + journal.in_sector_pos
);
*je = {
.crc32 = 0,
.magic = JOURNAL_MAGIC,
.type = JE_STABLE,
.size = sizeof(struct journal_entry_stable),
.crc32_prev = journal.crc32_last,
.oid = op->oid,
.version = op->version,
};
journal_entry_stable *je = (journal_entry_stable*)prefill_single_journal_entry(journal, JE_STABLE, sizeof(struct journal_entry_stable));
je->oid = op->oid;
je->version = op->version;
je->crc32 = je_crc32((journal_entry*)je);
journal.crc32_last = je->crc32;
data->iov = (struct iovec){ journal.sector_buf + 512*journal.cur_sector, 512 };
@ -113,25 +79,30 @@ void blockstore::handle_stable_event(ring_data_t *data, blockstore_operation *op
});
if (dirty_it->second.state == ST_J_SYNCED)
{
dirty_it->second.state = ST_J_STABLE;
// Copy data from the journal to the data device
// -> increase version on the metadata device
// -> advance clean_db entry's version and clear previous journal entries
// This makes 1 4K small write look like:
// 1) Copy data from the journal to the data device
// 2) Increase version on the metadata device
// 3) Advance clean_db entry's version, clear previous journal entries
// This makes 1 4K small write+sync look like:
// 512b+4K (journal) + sync + 512b (journal) + sync + 512b (metadata) + 4K (data) + sync.
// WA = 2.375. It's not the best, SSD FTL-like redirect-write with defragmentation
// could probably be lower even with defragmentation. But it's fixed and it's still
// better than in Ceph. :)
dirty_it->second.state = ST_J_STABLE;
// Acknowledge op
op->retval = 0;
op->callback(op);
}
else if (dirty_it->second.state == ST_D_META_SYNCED)
{
dirty_it->second.state = ST_D_STABLE;
// Copy metadata from the journal to the metadata device
// -> move dirty_db entry to clean_db and clear previous journal entries
// This makes 1 128K big write look like:
// 1) Copy metadata from the journal to the metadata device
// 2) Move dirty_db entry to clean_db and clear previous journal entries
// This makes 1 128K big write+sync look like:
// 128K (data) + sync + 512b (journal) + sync + 512b (journal) + sync + 512b (metadata) + sync.
// WA = 1.012. Very good :)
}
dirty_it->second.state = ST_D_STABLE;
// Acknowledge op
op->retval = 0;
op->callback(op);
}
}
}

View File

@ -59,87 +59,49 @@ int blockstore::continue_sync(blockstore_operation *op)
{
// 2nd step: Data device is synced, prepare & write journal entries
// Check space in the journal and journal memory buffers
int required = op->sync_big_writes.size(), sectors_required = 1;
uint64_t next_pos = journal.next_free, next_sector = journal.cur_sector;
while (1)
blockstore_journal_check_t space_check(this);
if (!space_check.check_available(op, op->sync_big_writes.size(), sizeof(journal_entry_big_write), 0))
{
int fits = (512 - journal.in_sector_pos) / sizeof(journal_entry_big_write);
required -= fits;
if (required <= 0)
break;
next_pos = (next_pos+512) < journal.len ? next_pos+512 : 512;
sectors_required++;
next_sector = ((next_sector + 1) % journal.sector_count);
if (journal.sector_info[next_sector].usage_count > 0)
{
// No memory buffer available. Wait for it.
op->wait_for = WAIT_JOURNAL_BUFFER;
return 0;
}
}
if (next_pos >= journal.used_start)
{
// No space in the journal. Wait for it.
op->wait_for = WAIT_JOURNAL;
op->wait_detail = next_pos;
return 0;
}
// Get SQEs. Don't bother about merging, submit each journal sector as a separate request
struct io_uring_sqe *sqe[sectors_required+1];
for (int i = 0; i < sectors_required+1; i++)
struct io_uring_sqe *sqe[space_check.sectors_required+1];
for (int i = 0; i < space_check.sectors_required+1; i++)
{
BS_SUBMIT_GET_SQE_DECL(sqe[i]);
}
// Prepare and submit journal entries
op->min_used_journal_sector = 1 + journal.cur_sector;
sectors_required = 0;
required = op->sync_big_writes.size();
auto it = op->sync_big_writes.begin();
while (1)
int s = 0, cur_sector = -1;
while (it != op->sync_big_writes.end())
{
int fits = (512 - journal.in_sector_pos) / sizeof(journal_entry_big_write);
while (fits > 0 && required > 0)
{
journal_entry_big_write *je = (journal_entry_big_write*)(
journal.sector_buf + 512*journal.cur_sector + journal.in_sector_pos
);
*je = {
.crc32 = 0,
.magic = JOURNAL_MAGIC,
.type = JE_BIG_WRITE,
.size = sizeof(journal_entry_big_write),
.crc32_prev = journal.crc32_last,
.oid = it->oid,
.version = it->version,
.block = dirty_db[*it].location,
};
journal_entry_big_write *je = (journal_entry_big_write*)
prefill_single_journal_entry(journal, JE_BIG_WRITE, sizeof(journal_entry_big_write));
je->oid = it->oid;
je->version = it->version;
je->block = dirty_db[*it].location;
je->crc32 = je_crc32((journal_entry*)je);
journal.crc32_last = je->crc32;
journal.in_sector_pos += sizeof(journal_entry_big_write);
required--;
it++;
}
if (required <= 0)
break;
if (cur_sector != journal.cur_sector)
{
cur_sector = journal.cur_sector;
journal.sector_info[journal.cur_sector].usage_count++;
struct ring_data_t *data = ((ring_data_t*)sqe[sectors_required]->user_data);
struct ring_data_t *data = ((ring_data_t*)sqe[s]->user_data);
data->iov = (struct iovec){ journal.sector_buf + 512*journal.cur_sector, 512 };
data->op = op;
io_uring_prep_writev(
sqe[sectors_required], journal.fd, &data->iov, 1, journal.offset + journal.sector_info[journal.cur_sector].offset
sqe[s], journal.fd, &data->iov, 1, journal.offset + journal.sector_info[journal.cur_sector].offset
);
journal.cur_sector = ((journal.cur_sector + 1) % journal.sector_count);
journal.sector_info[journal.cur_sector].offset = journal.next_free;
journal.in_sector_pos = 0;
journal.next_free = (journal.next_free + 512) < journal.len ? journal.next_free + 512 : 512;
memset(journal.sector_buf + 512*journal.cur_sector, 0, 512);
sectors_required++;
s++;
}
}
// ... And a journal fsync
io_uring_prep_fsync(sqe[sectors_required], journal.fd, 0);
struct ring_data_t *data = ((ring_data_t*)sqe[sectors_required]->user_data);
io_uring_prep_fsync(sqe[s], journal.fd, 0);
struct ring_data_t *data = ((ring_data_t*)sqe[s]->user_data);
data->op = op;
op->pending_ops = 1 + sectors_required;
op->pending_ops = 1 + s;
op->max_used_journal_sector = 1 + journal.cur_sector;
op->sync_state = SYNC_JOURNAL_SYNC_SENT;
}

View File

@ -72,59 +72,25 @@ int blockstore::dequeue_write(blockstore_operation *op)
// Small (journaled) write
// First check if the journal has sufficient space
// FIXME Always two SQEs for now. Although it's possible to send 1 sometimes
uint64_t next_pos = journal.next_free;
if (512 - journal.in_sector_pos < sizeof(struct journal_entry_small_write))
//two_sqes = (512 - journal.in_sector_pos < sizeof(struct journal_entry_small_write)
// ? (journal.len - next_pos < op->len)
// : (journal.sector_info[journal.cur_sector].offset + 512 != journal.next_free ||
// journal.len - next_pos < op->len);
blockstore_journal_check_t space_check(this);
if (!space_check.check_available(op, 1, sizeof(journal_entry_small_write), op->len))
{
//if (journal.len - next_pos < op->len)
// two_sqes = true;
next_pos = (next_pos+512) < journal.len ? next_pos+512 : 512;
// Also check if we have an unused memory buffer for the journal sector
if (journal.sector_info[((journal.cur_sector + 1) % journal.sector_count)].usage_count > 0)
{
// No memory buffer available. Wait for it.
op->wait_for = WAIT_JOURNAL_BUFFER;
return 0;
}
}
//else if (journal.sector_info[journal.cur_sector].offset + 512 != journal.next_free ||
// journal.len - next_pos < op->len)
// two_sqes = true;
next_pos = (journal.len - next_pos < op->len ? 512 : next_pos) + op->len;
if (next_pos >= journal.used_start)
{
// No space in the journal. Wait for it.
op->wait_for = WAIT_JOURNAL;
op->wait_detail = next_pos;
return 0;
}
// There is sufficient space. Get SQE(s)
BS_SUBMIT_GET_SQE(sqe1, data1);
BS_SUBMIT_GET_SQE(sqe2, data2);
// Got SQEs. Prepare journal sector write
if (512 - journal.in_sector_pos < sizeof(struct journal_entry_small_write))
{
// Move to the next journal sector
// Also select next sector buffer in memory
journal.cur_sector = ((journal.cur_sector + 1) % journal.sector_count);
journal.sector_info[journal.cur_sector].offset = journal.next_free;
journal.in_sector_pos = 0;
journal.next_free = (journal.next_free+512) < journal.len ? journal.next_free + 512 : 512;
memset(journal.sector_buf + 512*journal.cur_sector, 0, 512);
}
journal_entry_small_write *je = (struct journal_entry_small_write*)(
journal.sector_buf + 512*journal.cur_sector + journal.in_sector_pos
);
*je = {
.crc32 = 0,
.magic = JOURNAL_MAGIC,
.type = JE_SMALL_WRITE,
.size = sizeof(struct journal_entry_small_write),
.crc32_prev = journal.crc32_last,
.oid = op->oid,
.version = op->version,
.offset = op->offset,
.len = op->len,
};
journal_entry_small_write *je = (journal_entry_small_write*)
prefill_single_journal_entry(journal, JE_SMALL_WRITE, sizeof(struct journal_entry_small_write));
je->oid = op->oid;
je->version = op->version;
je->offset = op->offset;
je->len = op->len;
je->crc32 = je_crc32((journal_entry*)je);
journal.crc32_last = je->crc32;
data1->iov = (struct iovec){ journal.sector_buf + 512*journal.cur_sector, 512 };
@ -133,8 +99,9 @@ int blockstore::dequeue_write(blockstore_operation *op)
sqe1, journal.fd, &data1->iov, 1, journal.offset + journal.sector_info[journal.cur_sector].offset
);
journal.sector_info[journal.cur_sector].usage_count++;
op->min_used_journal_sector = op->max_used_journal_sector = 1 + journal.cur_sector;
// Prepare journal data write
journal.next_free = (journal.next_free + op->len) < journal.len ? journal.next_free + op->len : 512;
journal.next_free = (journal.next_free + op->len) < journal.len ? journal.next_free : 512;
data2->iov = (struct iovec){ op->buf, op->len };
data2->op = op;
io_uring_prep_writev(
@ -142,10 +109,8 @@ int blockstore::dequeue_write(blockstore_operation *op)
);
dirty_it->second.location = journal.next_free;
dirty_it->second.state = ST_J_SUBMITTED;
// Move journal.next_free
journal.next_free += op->len;
op->pending_ops = 2;
op->min_used_journal_sector = op->max_used_journal_sector = 1 + journal.cur_sector;
}
return 1;
}