Simplified distributed block storage with strong consistency, like in Ceph
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
 
 
 
 
 
 

246 lines
8.3 KiB

  1. // Copyright (c) Vitaliy Filippov, 2019+
  2. // License: VNPL-1.0 (see README.md for details)
  3. #include "blockstore_impl.h"
  4. // Stabilize small write:
  5. // 1) Copy data from the journal to the data device
  6. // 2) Increase version on the metadata device and sync it
  7. // 3) Advance clean_db entry's version, clear previous journal entries
  8. //
  9. // This makes 1 4K small write+sync look like:
  10. // 512b+4K (journal) + sync + 512b (journal) + sync + 4K (data) [+ sync?] + 512b (metadata) + sync.
  11. // WA = 2.375. It's not the best, SSD FTL-like redirect-write could probably be lower
  12. // even with defragmentation. But it's fixed and it's still better than in Ceph. :)
  13. // except for HDD-only clusters, because each write results in 3 seeks.
  14. // Stabilize big write:
  15. // 1) Copy metadata from the journal to the metadata device
  16. // 2) Move dirty_db entry to clean_db and clear previous journal entries
  17. //
  18. // This makes 1 128K big write+sync look like:
  19. // 128K (data) + sync + 512b (journal) + sync + 512b (journal) + sync + 512b (metadata) + sync.
  20. // WA = 1.012. Very good :)
  21. // Stabilize delete:
  22. // 1) Remove metadata entry and sync it
  23. // 2) Remove dirty_db entry and clear previous journal entries
  24. // We have 2 problems here:
  25. // - In the cluster environment, we must store the "tombstones" of deleted objects until
  26. // all replicas (not just quorum) agrees about their deletion. That is, "stabilize" is
  27. // not possible for deletes in degraded placement groups
  28. // - With simple "fixed" metadata tables we can't just clear the metadata entry of the latest
  29. // object version. We must clear all previous entries, too.
  30. // FIXME Fix both problems - probably, by switching from "fixed" metadata tables to "dynamic"
  31. // AND We must do it in batches, for the sake of reduced fsync call count
  32. // AND We must know what we stabilize. Basic workflow is like:
  33. // 1) primary OSD receives sync request
  34. // 2) it submits syncs to blockstore and peers
  35. // 3) after everyone acks sync it acks sync to the client
  36. // 4) after a while it takes his synced object list and sends stabilize requests
  37. // to peers and to its own blockstore, thus freeing the old version
  38. int blockstore_impl_t::dequeue_stable(blockstore_op_t *op)
  39. {
  40. if (PRIV(op)->op_state)
  41. {
  42. return continue_stable(op);
  43. }
  44. obj_ver_id* v;
  45. int i, todo = 0;
  46. for (i = 0, v = (obj_ver_id*)op->buf; i < op->len; i++, v++)
  47. {
  48. auto dirty_it = dirty_db.find(*v);
  49. if (dirty_it == dirty_db.end())
  50. {
  51. auto clean_it = clean_db.find(v->oid);
  52. if (clean_it == clean_db.end() || clean_it->second.version < v->version)
  53. {
  54. // No such object version
  55. op->retval = -ENOENT;
  56. FINISH_OP(op);
  57. return 1;
  58. }
  59. else
  60. {
  61. // Already stable
  62. }
  63. }
  64. else if (IS_IN_FLIGHT(dirty_it->second.state))
  65. {
  66. // Object write is still in progress. Wait until the write request completes
  67. return 0;
  68. }
  69. else if (!IS_SYNCED(dirty_it->second.state))
  70. {
  71. // Object not synced yet. Caller must sync it first
  72. op->retval = -EBUSY;
  73. FINISH_OP(op);
  74. return 1;
  75. }
  76. else if (!IS_STABLE(dirty_it->second.state))
  77. {
  78. todo++;
  79. }
  80. }
  81. if (!todo)
  82. {
  83. // Already stable
  84. op->retval = 0;
  85. FINISH_OP(op);
  86. return 1;
  87. }
  88. // Check journal space
  89. blockstore_journal_check_t space_check(this);
  90. if (!space_check.check_available(op, todo, sizeof(journal_entry_stable), 0))
  91. {
  92. return 0;
  93. }
  94. // There is sufficient space. Get SQEs
  95. struct io_uring_sqe *sqe[space_check.sectors_required];
  96. for (i = 0; i < space_check.sectors_required; i++)
  97. {
  98. BS_SUBMIT_GET_SQE_DECL(sqe[i]);
  99. }
  100. // Prepare and submit journal entries
  101. auto cb = [this, op](ring_data_t *data) { handle_stable_event(data, op); };
  102. int s = 0, cur_sector = -1;
  103. if ((journal_block_size - journal.in_sector_pos) < sizeof(journal_entry_stable) &&
  104. journal.sector_info[journal.cur_sector].dirty)
  105. {
  106. if (cur_sector == -1)
  107. PRIV(op)->min_flushed_journal_sector = 1 + journal.cur_sector;
  108. cur_sector = journal.cur_sector;
  109. prepare_journal_sector_write(journal, cur_sector, sqe[s++], cb);
  110. }
  111. for (i = 0, v = (obj_ver_id*)op->buf; i < op->len; i++, v++)
  112. {
  113. // FIXME: Only stabilize versions that aren't stable yet
  114. journal_entry_stable *je = (journal_entry_stable*)
  115. prefill_single_journal_entry(journal, JE_STABLE, sizeof(journal_entry_stable));
  116. journal.sector_info[journal.cur_sector].dirty = false;
  117. je->oid = v->oid;
  118. je->version = v->version;
  119. je->crc32 = je_crc32((journal_entry*)je);
  120. journal.crc32_last = je->crc32;
  121. if (cur_sector != journal.cur_sector)
  122. {
  123. // Write previous sector. We should write the sector only after filling it,
  124. // because otherwise we'll write a lot more sectors in the "no_same_sector_overwrite" mode
  125. if (cur_sector != -1)
  126. prepare_journal_sector_write(journal, cur_sector, sqe[s++], cb);
  127. else
  128. PRIV(op)->min_flushed_journal_sector = 1 + journal.cur_sector;
  129. cur_sector = journal.cur_sector;
  130. }
  131. }
  132. if (cur_sector != -1)
  133. prepare_journal_sector_write(journal, cur_sector, sqe[s++], cb);
  134. PRIV(op)->max_flushed_journal_sector = 1 + journal.cur_sector;
  135. PRIV(op)->pending_ops = s;
  136. PRIV(op)->op_state = 1;
  137. return 1;
  138. }
  139. int blockstore_impl_t::continue_stable(blockstore_op_t *op)
  140. {
  141. if (PRIV(op)->op_state == 2)
  142. goto resume_2;
  143. else if (PRIV(op)->op_state == 3)
  144. goto resume_3;
  145. else if (PRIV(op)->op_state == 5)
  146. goto resume_5;
  147. else
  148. return 1;
  149. resume_2:
  150. // Release used journal sectors
  151. release_journal_sectors(op);
  152. resume_3:
  153. if (!disable_journal_fsync)
  154. {
  155. io_uring_sqe *sqe = get_sqe();
  156. if (!sqe)
  157. {
  158. return 0;
  159. }
  160. ring_data_t *data = ((ring_data_t*)sqe->user_data);
  161. my_uring_prep_fsync(sqe, journal.fd, IORING_FSYNC_DATASYNC);
  162. data->iov = { 0 };
  163. data->callback = [this, op](ring_data_t *data) { handle_stable_event(data, op); };
  164. PRIV(op)->min_flushed_journal_sector = PRIV(op)->max_flushed_journal_sector = 0;
  165. PRIV(op)->pending_ops = 1;
  166. PRIV(op)->op_state = 4;
  167. return 1;
  168. }
  169. resume_5:
  170. // Mark dirty_db entries as stable, acknowledge op completion
  171. obj_ver_id* v;
  172. int i;
  173. for (i = 0, v = (obj_ver_id*)op->buf; i < op->len; i++, v++)
  174. {
  175. // Mark all dirty_db entries up to op->version as stable
  176. mark_stable(*v);
  177. }
  178. // Acknowledge op
  179. op->retval = 0;
  180. FINISH_OP(op);
  181. return 1;
  182. }
  183. void blockstore_impl_t::mark_stable(const obj_ver_id & v)
  184. {
  185. auto dirty_it = dirty_db.find(v);
  186. if (dirty_it != dirty_db.end())
  187. {
  188. while (1)
  189. {
  190. if ((dirty_it->second.state & BS_ST_WORKFLOW_MASK) == BS_ST_SYNCED)
  191. {
  192. dirty_it->second.state = (dirty_it->second.state & ~BS_ST_WORKFLOW_MASK) | BS_ST_STABLE;
  193. }
  194. else if (IS_STABLE(dirty_it->second.state))
  195. {
  196. break;
  197. }
  198. if (dirty_it == dirty_db.begin())
  199. {
  200. break;
  201. }
  202. dirty_it--;
  203. if (dirty_it->first.oid != v.oid)
  204. {
  205. break;
  206. }
  207. }
  208. flusher->enqueue_flush(v);
  209. }
  210. auto unstab_it = unstable_writes.find(v.oid);
  211. if (unstab_it != unstable_writes.end() &&
  212. unstab_it->second <= v.version)
  213. {
  214. unstable_writes.erase(unstab_it);
  215. }
  216. }
  217. void blockstore_impl_t::handle_stable_event(ring_data_t *data, blockstore_op_t *op)
  218. {
  219. live = true;
  220. if (data->res != data->iov.iov_len)
  221. {
  222. throw std::runtime_error(
  223. "write operation failed ("+std::to_string(data->res)+" != "+std::to_string(data->iov.iov_len)+
  224. "). in-memory state is corrupted. AAAAAAAaaaaaaaaa!!!111"
  225. );
  226. }
  227. PRIV(op)->pending_ops--;
  228. if (PRIV(op)->pending_ops == 0)
  229. {
  230. PRIV(op)->op_state++;
  231. if (!continue_stable(op))
  232. {
  233. submit_queue.push_front(op);
  234. }
  235. }
  236. }