pblk-write.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (C) 2016 CNEX Labs
  4. * Initial release: Javier Gonzalez <javier@cnexlabs.com>
  5. * Matias Bjorling <matias@cnexlabs.com>
  6. *
  7. * This program is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU General Public License version
  9. * 2 as published by the Free Software Foundation.
  10. *
  11. * This program is distributed in the hope that it will be useful, but
  12. * WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  14. * General Public License for more details.
  15. *
  16. * pblk-write.c - pblk's write path from write buffer to media
  17. */
  18. #include "pblk.h"
  19. #include "pblk-trace.h"
  20. static unsigned long pblk_end_w_bio(struct pblk *pblk, struct nvm_rq *rqd,
  21. struct pblk_c_ctx *c_ctx)
  22. {
  23. struct bio *original_bio;
  24. struct pblk_rb *rwb = &pblk->rwb;
  25. unsigned long ret;
  26. int i;
  27. for (i = 0; i < c_ctx->nr_valid; i++) {
  28. struct pblk_w_ctx *w_ctx;
  29. int pos = c_ctx->sentry + i;
  30. int flags;
  31. w_ctx = pblk_rb_w_ctx(rwb, pos);
  32. flags = READ_ONCE(w_ctx->flags);
  33. if (flags & PBLK_FLUSH_ENTRY) {
  34. flags &= ~PBLK_FLUSH_ENTRY;
  35. /* Release flags on context. Protect from writes */
  36. smp_store_release(&w_ctx->flags, flags);
  37. #ifdef CONFIG_NVM_PBLK_DEBUG
  38. atomic_dec(&rwb->inflight_flush_point);
  39. #endif
  40. }
  41. while ((original_bio = bio_list_pop(&w_ctx->bios)))
  42. bio_endio(original_bio);
  43. }
  44. if (c_ctx->nr_padded)
  45. pblk_bio_free_pages(pblk, rqd->bio, c_ctx->nr_valid,
  46. c_ctx->nr_padded);
  47. #ifdef CONFIG_NVM_PBLK_DEBUG
  48. atomic_long_add(rqd->nr_ppas, &pblk->sync_writes);
  49. #endif
  50. ret = pblk_rb_sync_advance(&pblk->rwb, c_ctx->nr_valid);
  51. bio_put(rqd->bio);
  52. pblk_free_rqd(pblk, rqd, PBLK_WRITE);
  53. return ret;
  54. }
  55. static unsigned long pblk_end_queued_w_bio(struct pblk *pblk,
  56. struct nvm_rq *rqd,
  57. struct pblk_c_ctx *c_ctx)
  58. {
  59. list_del(&c_ctx->list);
  60. return pblk_end_w_bio(pblk, rqd, c_ctx);
  61. }
  62. static void pblk_complete_write(struct pblk *pblk, struct nvm_rq *rqd,
  63. struct pblk_c_ctx *c_ctx)
  64. {
  65. struct pblk_c_ctx *c, *r;
  66. unsigned long flags;
  67. unsigned long pos;
  68. #ifdef CONFIG_NVM_PBLK_DEBUG
  69. atomic_long_sub(c_ctx->nr_valid, &pblk->inflight_writes);
  70. #endif
  71. pblk_up_rq(pblk, c_ctx->lun_bitmap);
  72. pos = pblk_rb_sync_init(&pblk->rwb, &flags);
  73. if (pos == c_ctx->sentry) {
  74. pos = pblk_end_w_bio(pblk, rqd, c_ctx);
  75. retry:
  76. list_for_each_entry_safe(c, r, &pblk->compl_list, list) {
  77. rqd = nvm_rq_from_c_ctx(c);
  78. if (c->sentry == pos) {
  79. pos = pblk_end_queued_w_bio(pblk, rqd, c);
  80. goto retry;
  81. }
  82. }
  83. } else {
  84. WARN_ON(nvm_rq_from_c_ctx(c_ctx) != rqd);
  85. list_add_tail(&c_ctx->list, &pblk->compl_list);
  86. }
  87. pblk_rb_sync_end(&pblk->rwb, &flags);
  88. }
  89. /* Map remaining sectors in chunk, starting from ppa */
  90. static void pblk_map_remaining(struct pblk *pblk, struct ppa_addr *ppa,
  91. int rqd_ppas)
  92. {
  93. struct pblk_line *line;
  94. struct ppa_addr map_ppa = *ppa;
  95. __le64 addr_empty = cpu_to_le64(ADDR_EMPTY);
  96. __le64 *lba_list;
  97. u64 paddr;
  98. int done = 0;
  99. int n = 0;
  100. line = pblk_ppa_to_line(pblk, *ppa);
  101. lba_list = emeta_to_lbas(pblk, line->emeta->buf);
  102. spin_lock(&line->lock);
  103. while (!done) {
  104. paddr = pblk_dev_ppa_to_line_addr(pblk, map_ppa);
  105. if (!test_and_set_bit(paddr, line->map_bitmap))
  106. line->left_msecs--;
  107. if (n < rqd_ppas && lba_list[paddr] != addr_empty)
  108. line->nr_valid_lbas--;
  109. lba_list[paddr] = addr_empty;
  110. if (!test_and_set_bit(paddr, line->invalid_bitmap))
  111. le32_add_cpu(line->vsc, -1);
  112. done = nvm_next_ppa_in_chk(pblk->dev, &map_ppa);
  113. n++;
  114. }
  115. line->w_err_gc->has_write_err = 1;
  116. spin_unlock(&line->lock);
  117. }
  118. static void pblk_prepare_resubmit(struct pblk *pblk, unsigned int sentry,
  119. unsigned int nr_entries)
  120. {
  121. struct pblk_rb *rb = &pblk->rwb;
  122. struct pblk_rb_entry *entry;
  123. struct pblk_line *line;
  124. struct pblk_w_ctx *w_ctx;
  125. struct ppa_addr ppa_l2p;
  126. int flags;
  127. unsigned int i;
  128. spin_lock(&pblk->trans_lock);
  129. for (i = 0; i < nr_entries; i++) {
  130. entry = &rb->entries[pblk_rb_ptr_wrap(rb, sentry, i)];
  131. w_ctx = &entry->w_ctx;
  132. /* Check if the lba has been overwritten */
  133. if (w_ctx->lba != ADDR_EMPTY) {
  134. ppa_l2p = pblk_trans_map_get(pblk, w_ctx->lba);
  135. if (!pblk_ppa_comp(ppa_l2p, entry->cacheline))
  136. w_ctx->lba = ADDR_EMPTY;
  137. }
  138. /* Mark up the entry as submittable again */
  139. flags = READ_ONCE(w_ctx->flags);
  140. flags |= PBLK_WRITTEN_DATA;
  141. /* Release flags on write context. Protect from writes */
  142. smp_store_release(&w_ctx->flags, flags);
  143. /* Decrease the reference count to the line as we will
  144. * re-map these entries
  145. */
  146. line = pblk_ppa_to_line(pblk, w_ctx->ppa);
  147. atomic_dec(&line->sec_to_update);
  148. kref_put(&line->ref, pblk_line_put);
  149. }
  150. spin_unlock(&pblk->trans_lock);
  151. }
  152. static void pblk_queue_resubmit(struct pblk *pblk, struct pblk_c_ctx *c_ctx)
  153. {
  154. struct pblk_c_ctx *r_ctx;
  155. r_ctx = kzalloc(sizeof(struct pblk_c_ctx), GFP_KERNEL);
  156. if (!r_ctx)
  157. return;
  158. r_ctx->lun_bitmap = NULL;
  159. r_ctx->sentry = c_ctx->sentry;
  160. r_ctx->nr_valid = c_ctx->nr_valid;
  161. r_ctx->nr_padded = c_ctx->nr_padded;
  162. spin_lock(&pblk->resubmit_lock);
  163. list_add_tail(&r_ctx->list, &pblk->resubmit_list);
  164. spin_unlock(&pblk->resubmit_lock);
  165. #ifdef CONFIG_NVM_PBLK_DEBUG
  166. atomic_long_add(c_ctx->nr_valid, &pblk->recov_writes);
  167. #endif
  168. }
  169. static void pblk_submit_rec(struct work_struct *work)
  170. {
  171. struct pblk_rec_ctx *recovery =
  172. container_of(work, struct pblk_rec_ctx, ws_rec);
  173. struct pblk *pblk = recovery->pblk;
  174. struct nvm_rq *rqd = recovery->rqd;
  175. struct pblk_c_ctx *c_ctx = nvm_rq_to_pdu(rqd);
  176. struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd);
  177. pblk_log_write_err(pblk, rqd);
  178. pblk_map_remaining(pblk, ppa_list, rqd->nr_ppas);
  179. pblk_queue_resubmit(pblk, c_ctx);
  180. pblk_up_rq(pblk, c_ctx->lun_bitmap);
  181. if (c_ctx->nr_padded)
  182. pblk_bio_free_pages(pblk, rqd->bio, c_ctx->nr_valid,
  183. c_ctx->nr_padded);
  184. bio_put(rqd->bio);
  185. pblk_free_rqd(pblk, rqd, PBLK_WRITE);
  186. mempool_free(recovery, &pblk->rec_pool);
  187. atomic_dec(&pblk->inflight_io);
  188. pblk_write_kick(pblk);
  189. }
  190. static void pblk_end_w_fail(struct pblk *pblk, struct nvm_rq *rqd)
  191. {
  192. struct pblk_rec_ctx *recovery;
  193. recovery = mempool_alloc(&pblk->rec_pool, GFP_ATOMIC);
  194. if (!recovery) {
  195. pblk_err(pblk, "could not allocate recovery work\n");
  196. return;
  197. }
  198. recovery->pblk = pblk;
  199. recovery->rqd = rqd;
  200. INIT_WORK(&recovery->ws_rec, pblk_submit_rec);
  201. queue_work(pblk->close_wq, &recovery->ws_rec);
  202. }
  203. static void pblk_end_io_write(struct nvm_rq *rqd)
  204. {
  205. struct pblk *pblk = rqd->private;
  206. struct pblk_c_ctx *c_ctx = nvm_rq_to_pdu(rqd);
  207. if (rqd->error) {
  208. pblk_end_w_fail(pblk, rqd);
  209. return;
  210. } else {
  211. if (trace_pblk_chunk_state_enabled())
  212. pblk_check_chunk_state_update(pblk, rqd);
  213. #ifdef CONFIG_NVM_PBLK_DEBUG
  214. WARN_ONCE(rqd->bio->bi_status, "pblk: corrupted write error\n");
  215. #endif
  216. }
  217. pblk_complete_write(pblk, rqd, c_ctx);
  218. atomic_dec(&pblk->inflight_io);
  219. }
  220. static void pblk_end_io_write_meta(struct nvm_rq *rqd)
  221. {
  222. struct pblk *pblk = rqd->private;
  223. struct pblk_g_ctx *m_ctx = nvm_rq_to_pdu(rqd);
  224. struct pblk_line *line = m_ctx->private;
  225. struct pblk_emeta *emeta = line->emeta;
  226. struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd);
  227. int sync;
  228. pblk_up_chunk(pblk, ppa_list[0]);
  229. if (rqd->error) {
  230. pblk_log_write_err(pblk, rqd);
  231. pblk_err(pblk, "metadata I/O failed. Line %d\n", line->id);
  232. line->w_err_gc->has_write_err = 1;
  233. } else {
  234. if (trace_pblk_chunk_state_enabled())
  235. pblk_check_chunk_state_update(pblk, rqd);
  236. }
  237. sync = atomic_add_return(rqd->nr_ppas, &emeta->sync);
  238. if (sync == emeta->nr_entries)
  239. pblk_gen_run_ws(pblk, line, NULL, pblk_line_close_ws,
  240. GFP_ATOMIC, pblk->close_wq);
  241. pblk_free_rqd(pblk, rqd, PBLK_WRITE_INT);
  242. atomic_dec(&pblk->inflight_io);
  243. }
  244. static int pblk_alloc_w_rq(struct pblk *pblk, struct nvm_rq *rqd,
  245. unsigned int nr_secs, nvm_end_io_fn(*end_io))
  246. {
  247. /* Setup write request */
  248. rqd->opcode = NVM_OP_PWRITE;
  249. rqd->nr_ppas = nr_secs;
  250. rqd->is_seq = 1;
  251. rqd->private = pblk;
  252. rqd->end_io = end_io;
  253. return pblk_alloc_rqd_meta(pblk, rqd);
  254. }
  255. static int pblk_setup_w_rq(struct pblk *pblk, struct nvm_rq *rqd,
  256. struct ppa_addr *erase_ppa)
  257. {
  258. struct pblk_line_meta *lm = &pblk->lm;
  259. struct pblk_line *e_line = pblk_line_get_erase(pblk);
  260. struct pblk_c_ctx *c_ctx = nvm_rq_to_pdu(rqd);
  261. unsigned int valid = c_ctx->nr_valid;
  262. unsigned int padded = c_ctx->nr_padded;
  263. unsigned int nr_secs = valid + padded;
  264. unsigned long *lun_bitmap;
  265. int ret;
  266. lun_bitmap = kzalloc(lm->lun_bitmap_len, GFP_KERNEL);
  267. if (!lun_bitmap)
  268. return -ENOMEM;
  269. c_ctx->lun_bitmap = lun_bitmap;
  270. ret = pblk_alloc_w_rq(pblk, rqd, nr_secs, pblk_end_io_write);
  271. if (ret) {
  272. kfree(lun_bitmap);
  273. return ret;
  274. }
  275. if (likely(!e_line || !atomic_read(&e_line->left_eblks)))
  276. ret = pblk_map_rq(pblk, rqd, c_ctx->sentry, lun_bitmap,
  277. valid, 0);
  278. else
  279. ret = pblk_map_erase_rq(pblk, rqd, c_ctx->sentry, lun_bitmap,
  280. valid, erase_ppa);
  281. return ret;
  282. }
  283. static int pblk_calc_secs_to_sync(struct pblk *pblk, unsigned int secs_avail,
  284. unsigned int secs_to_flush)
  285. {
  286. int secs_to_sync;
  287. secs_to_sync = pblk_calc_secs(pblk, secs_avail, secs_to_flush, true);
  288. #ifdef CONFIG_NVM_PBLK_DEBUG
  289. if ((!secs_to_sync && secs_to_flush)
  290. || (secs_to_sync < 0)
  291. || (secs_to_sync > secs_avail && !secs_to_flush)) {
  292. pblk_err(pblk, "bad sector calculation (a:%d,s:%d,f:%d)\n",
  293. secs_avail, secs_to_sync, secs_to_flush);
  294. }
  295. #endif
  296. return secs_to_sync;
  297. }
  298. int pblk_submit_meta_io(struct pblk *pblk, struct pblk_line *meta_line)
  299. {
  300. struct nvm_tgt_dev *dev = pblk->dev;
  301. struct nvm_geo *geo = &dev->geo;
  302. struct pblk_line_mgmt *l_mg = &pblk->l_mg;
  303. struct pblk_line_meta *lm = &pblk->lm;
  304. struct pblk_emeta *emeta = meta_line->emeta;
  305. struct ppa_addr *ppa_list;
  306. struct pblk_g_ctx *m_ctx;
  307. struct nvm_rq *rqd;
  308. void *data;
  309. u64 paddr;
  310. int rq_ppas = pblk->min_write_pgs;
  311. int id = meta_line->id;
  312. int rq_len;
  313. int i, j;
  314. int ret;
  315. rqd = pblk_alloc_rqd(pblk, PBLK_WRITE_INT);
  316. m_ctx = nvm_rq_to_pdu(rqd);
  317. m_ctx->private = meta_line;
  318. rq_len = rq_ppas * geo->csecs;
  319. data = ((void *)emeta->buf) + emeta->mem;
  320. ret = pblk_alloc_w_rq(pblk, rqd, rq_ppas, pblk_end_io_write_meta);
  321. if (ret)
  322. goto fail_free_rqd;
  323. ppa_list = nvm_rq_to_ppa_list(rqd);
  324. for (i = 0; i < rqd->nr_ppas; ) {
  325. spin_lock(&meta_line->lock);
  326. paddr = __pblk_alloc_page(pblk, meta_line, rq_ppas);
  327. spin_unlock(&meta_line->lock);
  328. for (j = 0; j < rq_ppas; j++, i++, paddr++)
  329. ppa_list[i] = addr_to_gen_ppa(pblk, paddr, id);
  330. }
  331. spin_lock(&l_mg->close_lock);
  332. emeta->mem += rq_len;
  333. if (emeta->mem >= lm->emeta_len[0])
  334. list_del(&meta_line->list);
  335. spin_unlock(&l_mg->close_lock);
  336. pblk_down_chunk(pblk, ppa_list[0]);
  337. ret = pblk_submit_io(pblk, rqd, data);
  338. if (ret) {
  339. pblk_err(pblk, "emeta I/O submission failed: %d\n", ret);
  340. goto fail_rollback;
  341. }
  342. return NVM_IO_OK;
  343. fail_rollback:
  344. pblk_up_chunk(pblk, ppa_list[0]);
  345. spin_lock(&l_mg->close_lock);
  346. pblk_dealloc_page(pblk, meta_line, rq_ppas);
  347. list_add(&meta_line->list, &meta_line->list);
  348. spin_unlock(&l_mg->close_lock);
  349. fail_free_rqd:
  350. pblk_free_rqd(pblk, rqd, PBLK_WRITE_INT);
  351. return ret;
  352. }
  353. static inline bool pblk_valid_meta_ppa(struct pblk *pblk,
  354. struct pblk_line *meta_line,
  355. struct nvm_rq *data_rqd)
  356. {
  357. struct nvm_tgt_dev *dev = pblk->dev;
  358. struct nvm_geo *geo = &dev->geo;
  359. struct pblk_c_ctx *data_c_ctx = nvm_rq_to_pdu(data_rqd);
  360. struct pblk_line *data_line = pblk_line_get_data(pblk);
  361. struct ppa_addr ppa, ppa_opt;
  362. u64 paddr;
  363. int pos_opt;
  364. /* Schedule a metadata I/O that is half the distance from the data I/O
  365. * with regards to the number of LUNs forming the pblk instance. This
  366. * balances LUN conflicts across every I/O.
  367. *
  368. * When the LUN configuration changes (e.g., due to GC), this distance
  369. * can align, which would result on metadata and data I/Os colliding. In
  370. * this case, modify the distance to not be optimal, but move the
  371. * optimal in the right direction.
  372. */
  373. paddr = pblk_lookup_page(pblk, meta_line);
  374. ppa = addr_to_gen_ppa(pblk, paddr, 0);
  375. ppa_opt = addr_to_gen_ppa(pblk, paddr + data_line->meta_distance, 0);
  376. pos_opt = pblk_ppa_to_pos(geo, ppa_opt);
  377. if (test_bit(pos_opt, data_c_ctx->lun_bitmap) ||
  378. test_bit(pos_opt, data_line->blk_bitmap))
  379. return true;
  380. if (unlikely(pblk_ppa_comp(ppa_opt, ppa)))
  381. data_line->meta_distance--;
  382. return false;
  383. }
  384. static struct pblk_line *pblk_should_submit_meta_io(struct pblk *pblk,
  385. struct nvm_rq *data_rqd)
  386. {
  387. struct pblk_line_meta *lm = &pblk->lm;
  388. struct pblk_line_mgmt *l_mg = &pblk->l_mg;
  389. struct pblk_line *meta_line;
  390. spin_lock(&l_mg->close_lock);
  391. if (list_empty(&l_mg->emeta_list)) {
  392. spin_unlock(&l_mg->close_lock);
  393. return NULL;
  394. }
  395. meta_line = list_first_entry(&l_mg->emeta_list, struct pblk_line, list);
  396. if (meta_line->emeta->mem >= lm->emeta_len[0]) {
  397. spin_unlock(&l_mg->close_lock);
  398. return NULL;
  399. }
  400. spin_unlock(&l_mg->close_lock);
  401. if (!pblk_valid_meta_ppa(pblk, meta_line, data_rqd))
  402. return NULL;
  403. return meta_line;
  404. }
  405. static int pblk_submit_io_set(struct pblk *pblk, struct nvm_rq *rqd)
  406. {
  407. struct ppa_addr erase_ppa;
  408. struct pblk_line *meta_line;
  409. int err;
  410. pblk_ppa_set_empty(&erase_ppa);
  411. /* Assign lbas to ppas and populate request structure */
  412. err = pblk_setup_w_rq(pblk, rqd, &erase_ppa);
  413. if (err) {
  414. pblk_err(pblk, "could not setup write request: %d\n", err);
  415. return NVM_IO_ERR;
  416. }
  417. meta_line = pblk_should_submit_meta_io(pblk, rqd);
  418. /* Submit data write for current data line */
  419. err = pblk_submit_io(pblk, rqd, NULL);
  420. if (err) {
  421. pblk_err(pblk, "data I/O submission failed: %d\n", err);
  422. return NVM_IO_ERR;
  423. }
  424. if (!pblk_ppa_empty(erase_ppa)) {
  425. /* Submit erase for next data line */
  426. if (pblk_blk_erase_async(pblk, erase_ppa)) {
  427. struct pblk_line *e_line = pblk_line_get_erase(pblk);
  428. struct nvm_tgt_dev *dev = pblk->dev;
  429. struct nvm_geo *geo = &dev->geo;
  430. int bit;
  431. atomic_inc(&e_line->left_eblks);
  432. bit = pblk_ppa_to_pos(geo, erase_ppa);
  433. WARN_ON(!test_and_clear_bit(bit, e_line->erase_bitmap));
  434. }
  435. }
  436. if (meta_line) {
  437. /* Submit metadata write for previous data line */
  438. err = pblk_submit_meta_io(pblk, meta_line);
  439. if (err) {
  440. pblk_err(pblk, "metadata I/O submission failed: %d",
  441. err);
  442. return NVM_IO_ERR;
  443. }
  444. }
  445. return NVM_IO_OK;
  446. }
  447. static void pblk_free_write_rqd(struct pblk *pblk, struct nvm_rq *rqd)
  448. {
  449. struct pblk_c_ctx *c_ctx = nvm_rq_to_pdu(rqd);
  450. struct bio *bio = rqd->bio;
  451. if (c_ctx->nr_padded)
  452. pblk_bio_free_pages(pblk, bio, c_ctx->nr_valid,
  453. c_ctx->nr_padded);
  454. }
  455. static int pblk_submit_write(struct pblk *pblk, int *secs_left)
  456. {
  457. struct bio *bio;
  458. struct nvm_rq *rqd;
  459. unsigned int secs_avail, secs_to_sync, secs_to_com;
  460. unsigned int secs_to_flush, packed_meta_pgs;
  461. unsigned long pos;
  462. unsigned int resubmit;
  463. *secs_left = 0;
  464. spin_lock(&pblk->resubmit_lock);
  465. resubmit = !list_empty(&pblk->resubmit_list);
  466. spin_unlock(&pblk->resubmit_lock);
  467. /* Resubmit failed writes first */
  468. if (resubmit) {
  469. struct pblk_c_ctx *r_ctx;
  470. spin_lock(&pblk->resubmit_lock);
  471. r_ctx = list_first_entry(&pblk->resubmit_list,
  472. struct pblk_c_ctx, list);
  473. list_del(&r_ctx->list);
  474. spin_unlock(&pblk->resubmit_lock);
  475. secs_avail = r_ctx->nr_valid;
  476. pos = r_ctx->sentry;
  477. pblk_prepare_resubmit(pblk, pos, secs_avail);
  478. secs_to_sync = pblk_calc_secs_to_sync(pblk, secs_avail,
  479. secs_avail);
  480. kfree(r_ctx);
  481. } else {
  482. /* If there are no sectors in the cache,
  483. * flushes (bios without data) will be cleared on
  484. * the cache threads
  485. */
  486. secs_avail = pblk_rb_read_count(&pblk->rwb);
  487. if (!secs_avail)
  488. return 0;
  489. secs_to_flush = pblk_rb_flush_point_count(&pblk->rwb);
  490. if (!secs_to_flush && secs_avail < pblk->min_write_pgs_data)
  491. return 0;
  492. secs_to_sync = pblk_calc_secs_to_sync(pblk, secs_avail,
  493. secs_to_flush);
  494. if (secs_to_sync > pblk->max_write_pgs) {
  495. pblk_err(pblk, "bad buffer sync calculation\n");
  496. return 0;
  497. }
  498. secs_to_com = (secs_to_sync > secs_avail) ?
  499. secs_avail : secs_to_sync;
  500. pos = pblk_rb_read_commit(&pblk->rwb, secs_to_com);
  501. }
  502. packed_meta_pgs = (pblk->min_write_pgs - pblk->min_write_pgs_data);
  503. bio = bio_alloc(GFP_KERNEL, secs_to_sync + packed_meta_pgs);
  504. bio->bi_iter.bi_sector = 0; /* internal bio */
  505. bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
  506. rqd = pblk_alloc_rqd(pblk, PBLK_WRITE);
  507. rqd->bio = bio;
  508. if (pblk_rb_read_to_bio(&pblk->rwb, rqd, pos, secs_to_sync,
  509. secs_avail)) {
  510. pblk_err(pblk, "corrupted write bio\n");
  511. goto fail_put_bio;
  512. }
  513. if (pblk_submit_io_set(pblk, rqd))
  514. goto fail_free_bio;
  515. #ifdef CONFIG_NVM_PBLK_DEBUG
  516. atomic_long_add(secs_to_sync, &pblk->sub_writes);
  517. #endif
  518. *secs_left = 1;
  519. return 0;
  520. fail_free_bio:
  521. pblk_free_write_rqd(pblk, rqd);
  522. fail_put_bio:
  523. bio_put(bio);
  524. pblk_free_rqd(pblk, rqd, PBLK_WRITE);
  525. return -EINTR;
  526. }
  527. int pblk_write_ts(void *data)
  528. {
  529. struct pblk *pblk = data;
  530. int secs_left;
  531. int write_failure = 0;
  532. while (!kthread_should_stop()) {
  533. if (!write_failure) {
  534. write_failure = pblk_submit_write(pblk, &secs_left);
  535. if (secs_left)
  536. continue;
  537. }
  538. set_current_state(TASK_INTERRUPTIBLE);
  539. io_schedule();
  540. }
  541. return 0;
  542. }