Used journal sector tracking

blocking-uring-test
Vitaliy Filippov 2019-11-14 21:15:59 +03:00
parent f1e236c6e8
commit 0627dd0f5e
6 changed files with 18 additions and 1 deletions

View File

@ -129,7 +129,7 @@ struct __attribute__((__packed__)) clean_entry
uint64_t location;
};
// 48 = 24 + 24 bytes per dirty entry in memory (obj_ver_id => dirty_entry)
// 56 = 24 + 32 bytes per dirty entry in memory (obj_ver_id => dirty_entry)
struct __attribute__((__packed__)) obj_ver_id
{
object_id oid;
@ -148,6 +148,7 @@ struct __attribute__((__packed__)) dirty_entry
uint64_t location; // location in either journal or data
uint32_t offset; // offset within stripe
uint32_t len; // data length
uint64_t journal_sector; // journal sector used for this entry
};
class oid_hash

View File

@ -289,6 +289,8 @@ resume_0:
flusher->syncs.erase(cur_sync);
}
}
// FIXME: Adjust clean_db and dirty_db
// FIXME: ...and clear part of the journal
wait_state = 0;
flusher->active_flushers--;
goto resume_0;

View File

@ -276,6 +276,7 @@ int blockstore_init_journal::handle_journal_part(void *buf, uint64_t len)
break;
}
}
bs->journal.used_sectors[total_pos]++;
pos += je->size;
crc32_last = je->crc32;
if (je->type == JE_SMALL_WRITE)
@ -292,6 +293,7 @@ int blockstore_init_journal::handle_journal_part(void *buf, uint64_t len)
{
// data is right next
location = done_pos + total_pos;
// FIXME: OOPS. Please don't modify total_pos here
total_pos += je->small_write.len;
}
bs->dirty_db.emplace((obj_ver_id){
@ -303,6 +305,7 @@ int blockstore_init_journal::handle_journal_part(void *buf, uint64_t len)
.location = location,
.offset = je->small_write.offset,
.len = je->small_write.len,
.journal_sector = total_pos,
});
}
else if (je->type == JE_BIG_WRITE)
@ -317,6 +320,7 @@ int blockstore_init_journal::handle_journal_part(void *buf, uint64_t len)
.location = je->big_write.location,
.offset = 0,
.len = bs->block_size,
.journal_sector = total_pos,
});
}
else if (je->type == JE_STABLE)
@ -351,6 +355,7 @@ int blockstore_init_journal::handle_journal_part(void *buf, uint64_t len)
.location = 0,
.offset = 0,
.len = 0,
.journal_sector = total_pos,
});
}
}

View File

@ -121,6 +121,10 @@ struct journal_t
uint64_t sector_count;
int cur_sector = 0;
int in_sector_pos = 0;
// Used sector map
// May use ~ 80 MB per 1 GB of used journal space in the worst case
std::map<uint64_t, uint64_t> used_sectors;
};
struct blockstore_journal_check_t

View File

@ -78,6 +78,8 @@ int blockstore::continue_sync(blockstore_operation *op)
{
journal_entry_big_write *je = (journal_entry_big_write*)
prefill_single_journal_entry(journal, JE_BIG_WRITE, sizeof(journal_entry_big_write));
dirty_db[*it].journal_sector = journal.sector_info[journal.cur_sector].offset;
journal.used_sectors[journal.sector_info[journal.cur_sector].offset]++;
je->oid = it->oid;
je->version = it->version;
je->location = dirty_db[*it].location;

View File

@ -34,6 +34,7 @@ void blockstore::enqueue_write(blockstore_operation *op)
.location = 0,
.offset = op->offset,
.len = op->len,
.journal_sector = 0,
});
// Remember write as unsynced here, so external consumers could get
// the list of dirty objects to sync just before issuing a SYNC request
@ -122,6 +123,8 @@ int blockstore::dequeue_write(blockstore_operation *op)
// Got SQEs. Prepare journal sector write
journal_entry_small_write *je = (journal_entry_small_write*)
prefill_single_journal_entry(journal, JE_SMALL_WRITE, sizeof(struct journal_entry_small_write));
dirty_it->second.journal_sector = journal.sector_info[journal.cur_sector].offset;
journal.used_sectors[journal.sector_info[journal.cur_sector].offset]++;
je->oid = op->oid;
je->version = op->version;
je->offset = op->offset;