Use fdatasync (just for testing over an FS)

blocking-uring-test
Vitaliy Filippov 2019-11-27 02:33:33 +03:00
parent 06634054c9
commit ce5cd13bc8
4 changed files with 9 additions and 9 deletions

View File

@ -353,14 +353,14 @@ resume_0:
await_sqe(9); await_sqe(9);
data->callback = simple_callback; data->callback = simple_callback;
data->iov = { 0 }; data->iov = { 0 };
my_uring_prep_fsync(sqe, bs->data_fd, 0); my_uring_prep_fsync(sqe, bs->data_fd, IORING_FSYNC_DATASYNC);
wait_count++; wait_count++;
if (bs->meta_fd != bs->data_fd) if (bs->meta_fd != bs->data_fd)
{ {
await_sqe(10); await_sqe(10);
data->callback = simple_callback; data->callback = simple_callback;
data->iov = { 0 }; data->iov = { 0 };
my_uring_prep_fsync(sqe, bs->meta_fd, 0); my_uring_prep_fsync(sqe, bs->meta_fd, IORING_FSYNC_DATASYNC);
wait_count++; wait_count++;
} }
wait_state = 11; wait_state = 11;

View File

@ -206,7 +206,7 @@ resume_1:
my_uring_prep_writev(sqe, bs->journal.fd, &data->iov, 1, bs->journal.offset); my_uring_prep_writev(sqe, bs->journal.fd, &data->iov, 1, bs->journal.offset);
wait_count++; wait_count++;
GET_SQE(); GET_SQE();
my_uring_prep_fsync(sqe, bs->journal.fd, 0); my_uring_prep_fsync(sqe, bs->journal.fd, IORING_FSYNC_DATASYNC);
data->iov = { 0 }; data->iov = { 0 };
data->callback = simple_callback; data->callback = simple_callback;
wait_count++; wait_count++;

View File

@ -7,9 +7,9 @@
// //
// This makes 1 4K small write+sync look like: // This makes 1 4K small write+sync look like:
// 512b+4K (journal) + sync + 512b (journal) + sync + 4K (data) [+ sync?] + 512b (metadata) + sync. // 512b+4K (journal) + sync + 512b (journal) + sync + 4K (data) [+ sync?] + 512b (metadata) + sync.
// WA = 2.375. It's not the best, SSD FTL-like redirect-write with defragmentation // WA = 2.375. It's not the best, SSD FTL-like redirect-write could probably be lower
// could probably be lower even with defragmentation. But it's fixed and it's still // even with defragmentation. But it's fixed and it's still better than in Ceph. :)
// better than in Ceph. :) except for HDD-only clusters, because each write results in 3 seeks. // except for HDD-only clusters, because each write results in 3 seeks.
// Stabilize big write: // Stabilize big write:
// 1) Copy metadata from the journal to the metadata device // 1) Copy metadata from the journal to the metadata device

View File

@ -41,7 +41,7 @@ int blockstore::continue_sync(blockstore_operation *op)
// No big writes, just fsync the journal // No big writes, just fsync the journal
// FIXME: Add no-fsync mode // FIXME: Add no-fsync mode
BS_SUBMIT_GET_SQE(sqe, data); BS_SUBMIT_GET_SQE(sqe, data);
my_uring_prep_fsync(sqe, journal.fd, 0); my_uring_prep_fsync(sqe, journal.fd, IORING_FSYNC_DATASYNC);
data->iov = { 0 }; data->iov = { 0 };
data->callback = cb; data->callback = cb;
op->min_used_journal_sector = op->max_used_journal_sector = 0; op->min_used_journal_sector = op->max_used_journal_sector = 0;
@ -53,7 +53,7 @@ int blockstore::continue_sync(blockstore_operation *op)
// 1st step: fsync data // 1st step: fsync data
// FIXME: Add no-fsync mode // FIXME: Add no-fsync mode
BS_SUBMIT_GET_SQE(sqe, data); BS_SUBMIT_GET_SQE(sqe, data);
my_uring_prep_fsync(sqe, data_fd, 0); my_uring_prep_fsync(sqe, data_fd, IORING_FSYNC_DATASYNC);
data->iov = { 0 }; data->iov = { 0 };
data->callback = cb; data->callback = cb;
op->min_used_journal_sector = op->max_used_journal_sector = 0; op->min_used_journal_sector = op->max_used_journal_sector = 0;
@ -100,7 +100,7 @@ int blockstore::continue_sync(blockstore_operation *op)
} }
op->max_used_journal_sector = 1 + journal.cur_sector; op->max_used_journal_sector = 1 + journal.cur_sector;
// ... And a journal fsync // ... And a journal fsync
my_uring_prep_fsync(sqe[s], journal.fd, 0); my_uring_prep_fsync(sqe[s], journal.fd, IORING_FSYNC_DATASYNC);
struct ring_data_t *data = ((ring_data_t*)sqe[s]->user_data); struct ring_data_t *data = ((ring_data_t*)sqe[s]->user_data);
data->iov = { 0 }; data->iov = { 0 };
data->callback = cb; data->callback = cb;