raid1-10.c 2.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112
  1. // SPDX-License-Identifier: GPL-2.0
  2. /* Maximum size of each resync request */
  3. #define RESYNC_BLOCK_SIZE (64*1024)
  4. #define RESYNC_PAGES ((RESYNC_BLOCK_SIZE + PAGE_SIZE-1) / PAGE_SIZE)
  5. /*
  6. * Number of guaranteed raid bios in case of extreme VM load:
  7. */
  8. #define NR_RAID_BIOS 256
  9. /* when we get a read error on a read-only array, we redirect to another
  10. * device without failing the first device, or trying to over-write to
  11. * correct the read error. To keep track of bad blocks on a per-bio
  12. * level, we store IO_BLOCKED in the appropriate 'bios' pointer
  13. */
  14. #define IO_BLOCKED ((struct bio *)1)
  15. /* When we successfully write to a known bad-block, we need to remove the
  16. * bad-block marking which must be done from process context. So we record
  17. * the success by setting devs[n].bio to IO_MADE_GOOD
  18. */
  19. #define IO_MADE_GOOD ((struct bio *)2)
  20. #define BIO_SPECIAL(bio) ((unsigned long)bio <= 2)
  21. /* When there are this many requests queue to be written by
  22. * the raid thread, we become 'congested' to provide back-pressure
  23. * for writeback.
  24. */
  25. static int max_queued_requests = 1024;
  26. /* for managing resync I/O pages */
  27. struct resync_pages {
  28. void *raid_bio;
  29. struct page *pages[RESYNC_PAGES];
  30. };
  31. static void rbio_pool_free(void *rbio, void *data)
  32. {
  33. kfree(rbio);
  34. }
  35. static inline int resync_alloc_pages(struct resync_pages *rp,
  36. gfp_t gfp_flags)
  37. {
  38. int i;
  39. for (i = 0; i < RESYNC_PAGES; i++) {
  40. rp->pages[i] = alloc_page(gfp_flags);
  41. if (!rp->pages[i])
  42. goto out_free;
  43. }
  44. return 0;
  45. out_free:
  46. while (--i >= 0)
  47. put_page(rp->pages[i]);
  48. return -ENOMEM;
  49. }
  50. static inline void resync_free_pages(struct resync_pages *rp)
  51. {
  52. int i;
  53. for (i = 0; i < RESYNC_PAGES; i++)
  54. put_page(rp->pages[i]);
  55. }
  56. static inline void resync_get_all_pages(struct resync_pages *rp)
  57. {
  58. int i;
  59. for (i = 0; i < RESYNC_PAGES; i++)
  60. get_page(rp->pages[i]);
  61. }
  62. static inline struct page *resync_fetch_page(struct resync_pages *rp,
  63. unsigned idx)
  64. {
  65. if (WARN_ON_ONCE(idx >= RESYNC_PAGES))
  66. return NULL;
  67. return rp->pages[idx];
  68. }
  69. /*
  70. * 'strct resync_pages' stores actual pages used for doing the resync
  71. * IO, and it is per-bio, so make .bi_private points to it.
  72. */
  73. static inline struct resync_pages *get_resync_pages(struct bio *bio)
  74. {
  75. return bio->bi_private;
  76. }
  77. /* generally called after bio_reset() for reseting bvec */
  78. static void md_bio_reset_resync_pages(struct bio *bio, struct resync_pages *rp,
  79. int size)
  80. {
  81. int idx = 0;
  82. /* initialize bvec table again */
  83. do {
  84. struct page *page = resync_fetch_page(rp, idx);
  85. int len = min_t(int, size, PAGE_SIZE);
  86. /*
  87. * won't fail because the vec table is big
  88. * enough to hold all these pages
  89. */
  90. bio_add_page(bio, page, len, 0);
  91. size -= len;
  92. } while (idx++ < RESYNC_PAGES && size > 0);
  93. }