pblk-map.c 5.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (C) 2016 CNEX Labs
  4. * Initial release: Javier Gonzalez <javier@cnexlabs.com>
  5. * Matias Bjorling <matias@cnexlabs.com>
  6. *
  7. * This program is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU General Public License version
  9. * 2 as published by the Free Software Foundation.
  10. *
  11. * This program is distributed in the hope that it will be useful, but
  12. * WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  14. * General Public License for more details.
  15. *
  16. * pblk-map.c - pblk's lba-ppa mapping strategy
  17. *
  18. */
  19. #include "pblk.h"
  20. static int pblk_map_page_data(struct pblk *pblk, unsigned int sentry,
  21. struct ppa_addr *ppa_list,
  22. unsigned long *lun_bitmap,
  23. void *meta_list,
  24. unsigned int valid_secs)
  25. {
  26. struct pblk_line *line = pblk_line_get_data(pblk);
  27. struct pblk_emeta *emeta;
  28. struct pblk_w_ctx *w_ctx;
  29. __le64 *lba_list;
  30. u64 paddr;
  31. int nr_secs = pblk->min_write_pgs;
  32. int i;
  33. if (!line)
  34. return -ENOSPC;
  35. if (pblk_line_is_full(line)) {
  36. struct pblk_line *prev_line = line;
  37. /* If we cannot allocate a new line, make sure to store metadata
  38. * on current line and then fail
  39. */
  40. line = pblk_line_replace_data(pblk);
  41. pblk_line_close_meta(pblk, prev_line);
  42. if (!line) {
  43. pblk_pipeline_stop(pblk);
  44. return -ENOSPC;
  45. }
  46. }
  47. emeta = line->emeta;
  48. lba_list = emeta_to_lbas(pblk, emeta->buf);
  49. paddr = pblk_alloc_page(pblk, line, nr_secs);
  50. for (i = 0; i < nr_secs; i++, paddr++) {
  51. struct pblk_sec_meta *meta = pblk_get_meta(pblk, meta_list, i);
  52. __le64 addr_empty = cpu_to_le64(ADDR_EMPTY);
  53. /* ppa to be sent to the device */
  54. ppa_list[i] = addr_to_gen_ppa(pblk, paddr, line->id);
  55. /* Write context for target bio completion on write buffer. Note
  56. * that the write buffer is protected by the sync backpointer,
  57. * and a single writer thread have access to each specific entry
  58. * at a time. Thus, it is safe to modify the context for the
  59. * entry we are setting up for submission without taking any
  60. * lock or memory barrier.
  61. */
  62. if (i < valid_secs) {
  63. kref_get(&line->ref);
  64. atomic_inc(&line->sec_to_update);
  65. w_ctx = pblk_rb_w_ctx(&pblk->rwb, sentry + i);
  66. w_ctx->ppa = ppa_list[i];
  67. meta->lba = cpu_to_le64(w_ctx->lba);
  68. lba_list[paddr] = cpu_to_le64(w_ctx->lba);
  69. if (lba_list[paddr] != addr_empty)
  70. line->nr_valid_lbas++;
  71. else
  72. atomic64_inc(&pblk->pad_wa);
  73. } else {
  74. lba_list[paddr] = addr_empty;
  75. meta->lba = addr_empty;
  76. __pblk_map_invalidate(pblk, line, paddr);
  77. }
  78. }
  79. pblk_down_rq(pblk, ppa_list[0], lun_bitmap);
  80. return 0;
  81. }
  82. int pblk_map_rq(struct pblk *pblk, struct nvm_rq *rqd, unsigned int sentry,
  83. unsigned long *lun_bitmap, unsigned int valid_secs,
  84. unsigned int off)
  85. {
  86. void *meta_list = pblk_get_meta_for_writes(pblk, rqd);
  87. void *meta_buffer;
  88. struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd);
  89. unsigned int map_secs;
  90. int min = pblk->min_write_pgs;
  91. int i;
  92. int ret;
  93. for (i = off; i < rqd->nr_ppas; i += min) {
  94. map_secs = (i + min > valid_secs) ? (valid_secs % min) : min;
  95. meta_buffer = pblk_get_meta(pblk, meta_list, i);
  96. ret = pblk_map_page_data(pblk, sentry + i, &ppa_list[i],
  97. lun_bitmap, meta_buffer, map_secs);
  98. if (ret)
  99. return ret;
  100. }
  101. return 0;
  102. }
  103. /* only if erase_ppa is set, acquire erase semaphore */
  104. int pblk_map_erase_rq(struct pblk *pblk, struct nvm_rq *rqd,
  105. unsigned int sentry, unsigned long *lun_bitmap,
  106. unsigned int valid_secs, struct ppa_addr *erase_ppa)
  107. {
  108. struct nvm_tgt_dev *dev = pblk->dev;
  109. struct nvm_geo *geo = &dev->geo;
  110. struct pblk_line_meta *lm = &pblk->lm;
  111. void *meta_list = pblk_get_meta_for_writes(pblk, rqd);
  112. void *meta_buffer;
  113. struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd);
  114. struct pblk_line *e_line, *d_line;
  115. unsigned int map_secs;
  116. int min = pblk->min_write_pgs;
  117. int i, erase_lun;
  118. int ret;
  119. for (i = 0; i < rqd->nr_ppas; i += min) {
  120. map_secs = (i + min > valid_secs) ? (valid_secs % min) : min;
  121. meta_buffer = pblk_get_meta(pblk, meta_list, i);
  122. ret = pblk_map_page_data(pblk, sentry + i, &ppa_list[i],
  123. lun_bitmap, meta_buffer, map_secs);
  124. if (ret)
  125. return ret;
  126. erase_lun = pblk_ppa_to_pos(geo, ppa_list[i]);
  127. /* line can change after page map. We might also be writing the
  128. * last line.
  129. */
  130. e_line = pblk_line_get_erase(pblk);
  131. if (!e_line)
  132. return pblk_map_rq(pblk, rqd, sentry, lun_bitmap,
  133. valid_secs, i + min);
  134. spin_lock(&e_line->lock);
  135. if (!test_bit(erase_lun, e_line->erase_bitmap)) {
  136. set_bit(erase_lun, e_line->erase_bitmap);
  137. atomic_dec(&e_line->left_eblks);
  138. *erase_ppa = ppa_list[i];
  139. erase_ppa->a.blk = e_line->id;
  140. erase_ppa->a.reserved = 0;
  141. spin_unlock(&e_line->lock);
  142. /* Avoid evaluating e_line->left_eblks */
  143. return pblk_map_rq(pblk, rqd, sentry, lun_bitmap,
  144. valid_secs, i + min);
  145. }
  146. spin_unlock(&e_line->lock);
  147. }
  148. d_line = pblk_line_get_data(pblk);
  149. /* line can change after page map. We might also be writing the
  150. * last line.
  151. */
  152. e_line = pblk_line_get_erase(pblk);
  153. if (!e_line)
  154. return -ENOSPC;
  155. /* Erase blocks that are bad in this line but might not be in next */
  156. if (unlikely(pblk_ppa_empty(*erase_ppa)) &&
  157. bitmap_weight(d_line->blk_bitmap, lm->blk_per_line)) {
  158. int bit = -1;
  159. retry:
  160. bit = find_next_bit(d_line->blk_bitmap,
  161. lm->blk_per_line, bit + 1);
  162. if (bit >= lm->blk_per_line)
  163. return 0;
  164. spin_lock(&e_line->lock);
  165. if (test_bit(bit, e_line->erase_bitmap)) {
  166. spin_unlock(&e_line->lock);
  167. goto retry;
  168. }
  169. spin_unlock(&e_line->lock);
  170. set_bit(bit, e_line->erase_bitmap);
  171. atomic_dec(&e_line->left_eblks);
  172. *erase_ppa = pblk->luns[bit].bppa; /* set ch and lun */
  173. erase_ppa->a.blk = e_line->id;
  174. }
  175. return 0;
  176. }