Rename object_db to clean_db

blocking-uring-test
Vitaliy Filippov 2019-11-10 15:17:21 +03:00
parent 69581e6d0c
commit ff57dd420e
5 changed files with 11 additions and 11 deletions

View File

@ -229,7 +229,7 @@ class blockstore
struct ring_consumer_t ring_consumer; struct ring_consumer_t ring_consumer;
// Another option is https://github.com/algorithm-ninja/cpp-btree // Another option is https://github.com/algorithm-ninja/cpp-btree
spp::sparse_hash_map<object_id, clean_entry, oid_hash> object_db; spp::sparse_hash_map<object_id, clean_entry, oid_hash> clean_db;
std::map<obj_ver_id, dirty_entry> dirty_db; std::map<obj_ver_id, dirty_entry> dirty_db;
std::list<blockstore_operation*> submit_queue; std::list<blockstore_operation*> submit_queue;
std::deque<obj_ver_id> unsynced_big_writes, unsynced_small_writes; std::deque<obj_ver_id> unsynced_big_writes, unsynced_small_writes;

View File

@ -75,7 +75,7 @@ void blockstore_init_meta::handle_entries(struct clean_disk_entry* entries, int
if (entries[i].oid.inode > 0) if (entries[i].oid.inode > 0)
{ {
allocator_set(bs->data_alloc, done_cnt+i, true); allocator_set(bs->data_alloc, done_cnt+i, true);
bs->object_db[entries[i].oid] = (struct clean_entry){ bs->clean_db[entries[i].oid] = (struct clean_entry){
entries[i].version, entries[i].version,
(uint32_t)(entries[i].flags ? ST_CURRENT : ST_D_META_SYNCED), (uint32_t)(entries[i].flags ? ST_CURRENT : ST_D_META_SYNCED),
done_cnt+i done_cnt+i

View File

@ -71,13 +71,13 @@ int blockstore::fulfill_read(blockstore_operation *read_op, uint32_t item_start,
int blockstore::dequeue_read(blockstore_operation *read_op) int blockstore::dequeue_read(blockstore_operation *read_op)
{ {
auto clean_it = object_db.find(read_op->oid); auto clean_it = clean_db.find(read_op->oid);
auto dirty_it = dirty_db.upper_bound((obj_ver_id){ auto dirty_it = dirty_db.upper_bound((obj_ver_id){
.oid = read_op->oid, .oid = read_op->oid,
.version = UINT64_MAX, .version = UINT64_MAX,
}); });
dirty_it--; dirty_it--;
bool clean_found = clean_it != object_db.end(); bool clean_found = clean_it != clean_db.end();
bool dirty_found = (dirty_it != dirty_db.end() && dirty_it->first.oid == read_op->oid); bool dirty_found = (dirty_it != dirty_db.end() && dirty_it->first.oid == read_op->oid);
if (!clean_found && !dirty_found) if (!clean_found && !dirty_found)
{ {
@ -107,7 +107,7 @@ int blockstore::dequeue_read(blockstore_operation *read_op)
dirty_it--; dirty_it--;
} }
} }
if (clean_it != object_db.end()) if (clean_it != clean_db.end())
{ {
if (!fulfill_read(read_op, 0, block_size, ST_CURRENT, 0, clean_it->second.location)) if (!fulfill_read(read_op, 0, block_size, ST_CURRENT, 0, clean_it->second.location))
{ {

View File

@ -8,8 +8,8 @@ int blockstore::dequeue_stable(blockstore_operation *op)
}); });
if (dirty_it == dirty_db.end()) if (dirty_it == dirty_db.end())
{ {
auto clean_it = object_db.find(op->oid); auto clean_it = clean_db.find(op->oid);
if (clean_it == object_db.end() || clean_it->second.version < op->version) if (clean_it == clean_db.end() || clean_it->second.version < op->version)
{ {
// No such object version // No such object version
op->retval = EINVAL; op->retval = EINVAL;
@ -116,7 +116,7 @@ void blockstore::handle_stable_event(ring_data_t *data, blockstore_operation *op
dirty_it->second.state = ST_J_STABLE; dirty_it->second.state = ST_J_STABLE;
// Copy data from the journal to the data device // Copy data from the journal to the data device
// -> increase version on the metadata device // -> increase version on the metadata device
// -> advance object_db entry's version and clear previous journal entries // -> advance clean_db entry's version and clear previous journal entries
// This makes 1 4K small write look like: // This makes 1 4K small write look like:
// 512b+4K (journal) + sync + 512b (journal) + sync + 512b (metadata) + 4K (data) + sync. // 512b+4K (journal) + sync + 512b (journal) + sync + 512b (metadata) + 4K (data) + sync.
// WA = 2.375. It's not the best, SSD FTL-like redirect-write with defragmentation // WA = 2.375. It's not the best, SSD FTL-like redirect-write with defragmentation
@ -127,7 +127,7 @@ void blockstore::handle_stable_event(ring_data_t *data, blockstore_operation *op
{ {
dirty_it->second.state = ST_D_STABLE; dirty_it->second.state = ST_D_STABLE;
// Copy metadata from the journal to the metadata device // Copy metadata from the journal to the metadata device
// -> move dirty_db entry to object_db and clear previous journal entries // -> move dirty_db entry to clean_db and clear previous journal entries
// This makes 1 128K big write look like: // This makes 1 128K big write look like:
// 128K (data) + sync + 512b (journal) + sync + 512b (journal) + sync + 512b (metadata) + sync. // 128K (data) + sync + 512b (journal) + sync + 512b (journal) + sync + 512b (metadata) + sync.
// WA = 1.012. Very good :) // WA = 1.012. Very good :)

View File

@ -14,8 +14,8 @@ void blockstore::enqueue_write(blockstore_operation *op)
} }
else else
{ {
auto clean_it = object_db.find(op->oid); auto clean_it = clean_db.find(op->oid);
if (clean_it != object_db.end()) if (clean_it != clean_db.end())
{ {
op->version = clean_it->second.version + 1; op->version = clean_it->second.version + 1;
} }