Fix old_clean_loc treatment (cannot persist sparse_hash_map iterator)

blocking-uring-test
Vitaliy Filippov 2019-11-27 02:16:43 +03:00
parent 2831d40edb
commit 9568354d78
3 changed files with 13 additions and 13 deletions

View File

@ -4,9 +4,9 @@ all: $(BLOCKSTORE_OBJS) test test_blockstore libfio_blockstore.so
clean:
rm -f *.o
crc32c.o: crc32c.c
g++ -fPIC -c -o $@ $<
g++ -g -O3 -fPIC -c -o $@ $<
%.o: %.cpp allocator.h blockstore_flush.h blockstore.h blockstore_init.h blockstore_journal.h crc32c.h ringloop.h xor.h timerfd_interval.h
g++ -g -Wall -Wno-sign-compare -Wno-parentheses -fPIC -c -o $@ $<
g++ -g -O3 -Wall -Wno-sign-compare -Wno-parentheses -fPIC -c -o $@ $<
test: test.cpp
g++ -g -O3 -o test -luring test.cpp
test_blockstore: $(BLOCKSTORE_OBJS) test_blockstore.cpp
@ -14,4 +14,4 @@ test_blockstore: $(BLOCKSTORE_OBJS) test_blockstore.cpp
test_allocator: test_allocator.cpp allocator.o
g++ -g -o test_allocator test_allocator.cpp allocator.o
libfio_blockstore.so: fio_engine.cpp $(BLOCKSTORE_OBJS)
g++ -g -Wno-pointer-arith -fPIC -shared -luring -o libfio_blockstore.so fio_engine.cpp $(BLOCKSTORE_OBJS)
g++ -g -O3 -Wno-pointer-arith -fPIC -shared -luring -o libfio_blockstore.so fio_engine.cpp $(BLOCKSTORE_OBJS)

View File

@ -226,20 +226,21 @@ resume_0:
wait_state = 0;
goto resume_0;
}
// Find it in clean_db
{
auto clean_it = bs->clean_db.find(cur.oid);
old_clean_loc = (clean_it != bs->clean_db.end() ? clean_it->second.location : UINT64_MAX);
}
if (clean_loc == UINT64_MAX)
{
// Find it in clean_db
clean_it = bs->clean_db.find(cur.oid);
if (clean_it == bs->clean_db.end())
if (old_clean_loc == UINT64_MAX)
{
// Object not present at all. This is a bug.
throw std::runtime_error("BUG: Object we are trying to flush is not allocated on the data device");
}
else
clean_loc = clean_it->second.location;
clean_loc = old_clean_loc;
}
else
clean_it = bs->clean_db.end();
// Also we need to submit the metadata read. We do a read-modify-write for every operation.
// But we must check if the same sector is already in memory.
// Another option is to keep all raw metadata in memory all the time. Maybe I'll do it sometime...
@ -383,9 +384,9 @@ resume_0:
}
}
// Update clean_db and dirty_db, free old data locations
if (clean_it != bs->clean_db.end() && clean_it->second.location != clean_loc)
if (old_clean_loc != clean_loc)
{
bs->data_alloc->set(clean_it->second.location >> bs->block_order, false);
bs->data_alloc->set(old_clean_loc >> bs->block_order, false);
}
bs->clean_db[cur.oid] = {
.version = cur.version,

View File

@ -31,10 +31,9 @@ class journal_flusher_co
bool skip_copy;
obj_ver_id cur;
std::map<obj_ver_id, dirty_entry>::iterator dirty_it, dirty_start, dirty_end;
spp::sparse_hash_map<object_id, clean_entry, oid_hash>::iterator clean_it;
std::vector<copy_buffer_t> v;
std::vector<copy_buffer_t>::iterator it;
uint64_t offset, len, submit_len, clean_loc, meta_sector, meta_pos;
uint64_t offset, len, submit_len, clean_loc, old_clean_loc, meta_sector, meta_pos;
std::map<uint64_t, meta_sector_t>::iterator meta_it;
std::map<object_id, uint64_t>::iterator repeat_it;
std::map<uint64_t, uint64_t>::iterator journal_used_it;