badblocks.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Bad block management
  4. *
  5. * - Heavily based on MD badblocks code from Neil Brown
  6. *
  7. * Copyright (c) 2015, Intel Corporation.
  8. */
  9. #include <linux/badblocks.h>
  10. #include <linux/seqlock.h>
  11. #include <linux/device.h>
  12. #include <linux/kernel.h>
  13. #include <linux/module.h>
  14. #include <linux/stddef.h>
  15. #include <linux/types.h>
  16. #include <linux/slab.h>
  17. /**
  18. * badblocks_check() - check a given range for bad sectors
  19. * @bb: the badblocks structure that holds all badblock information
  20. * @s: sector (start) at which to check for badblocks
  21. * @sectors: number of sectors to check for badblocks
  22. * @first_bad: pointer to store location of the first badblock
  23. * @bad_sectors: pointer to store number of badblocks after @first_bad
  24. *
  25. * We can record which blocks on each device are 'bad' and so just
  26. * fail those blocks, or that stripe, rather than the whole device.
  27. * Entries in the bad-block table are 64bits wide. This comprises:
  28. * Length of bad-range, in sectors: 0-511 for lengths 1-512
  29. * Start of bad-range, sector offset, 54 bits (allows 8 exbibytes)
  30. * A 'shift' can be set so that larger blocks are tracked and
  31. * consequently larger devices can be covered.
  32. * 'Acknowledged' flag - 1 bit. - the most significant bit.
  33. *
  34. * Locking of the bad-block table uses a seqlock so badblocks_check
  35. * might need to retry if it is very unlucky.
  36. * We will sometimes want to check for bad blocks in a bi_end_io function,
  37. * so we use the write_seqlock_irq variant.
  38. *
  39. * When looking for a bad block we specify a range and want to
  40. * know if any block in the range is bad. So we binary-search
  41. * to the last range that starts at-or-before the given endpoint,
  42. * (or "before the sector after the target range")
  43. * then see if it ends after the given start.
  44. *
  45. * Return:
  46. * 0: there are no known bad blocks in the range
  47. * 1: there are known bad block which are all acknowledged
  48. * -1: there are bad blocks which have not yet been acknowledged in metadata.
  49. * plus the start/length of the first bad section we overlap.
  50. */
  51. int badblocks_check(struct badblocks *bb, sector_t s, int sectors,
  52. sector_t *first_bad, int *bad_sectors)
  53. {
  54. int hi;
  55. int lo;
  56. u64 *p = bb->page;
  57. int rv;
  58. sector_t target = s + sectors;
  59. unsigned seq;
  60. if (bb->shift > 0) {
  61. /* round the start down, and the end up */
  62. s >>= bb->shift;
  63. target += (1<<bb->shift) - 1;
  64. target >>= bb->shift;
  65. sectors = target - s;
  66. }
  67. /* 'target' is now the first block after the bad range */
  68. retry:
  69. seq = read_seqbegin(&bb->lock);
  70. lo = 0;
  71. rv = 0;
  72. hi = bb->count;
  73. /* Binary search between lo and hi for 'target'
  74. * i.e. for the last range that starts before 'target'
  75. */
  76. /* INVARIANT: ranges before 'lo' and at-or-after 'hi'
  77. * are known not to be the last range before target.
  78. * VARIANT: hi-lo is the number of possible
  79. * ranges, and decreases until it reaches 1
  80. */
  81. while (hi - lo > 1) {
  82. int mid = (lo + hi) / 2;
  83. sector_t a = BB_OFFSET(p[mid]);
  84. if (a < target)
  85. /* This could still be the one, earlier ranges
  86. * could not.
  87. */
  88. lo = mid;
  89. else
  90. /* This and later ranges are definitely out. */
  91. hi = mid;
  92. }
  93. /* 'lo' might be the last that started before target, but 'hi' isn't */
  94. if (hi > lo) {
  95. /* need to check all range that end after 's' to see if
  96. * any are unacknowledged.
  97. */
  98. while (lo >= 0 &&
  99. BB_OFFSET(p[lo]) + BB_LEN(p[lo]) > s) {
  100. if (BB_OFFSET(p[lo]) < target) {
  101. /* starts before the end, and finishes after
  102. * the start, so they must overlap
  103. */
  104. if (rv != -1 && BB_ACK(p[lo]))
  105. rv = 1;
  106. else
  107. rv = -1;
  108. *first_bad = BB_OFFSET(p[lo]);
  109. *bad_sectors = BB_LEN(p[lo]);
  110. }
  111. lo--;
  112. }
  113. }
  114. if (read_seqretry(&bb->lock, seq))
  115. goto retry;
  116. return rv;
  117. }
  118. EXPORT_SYMBOL_GPL(badblocks_check);
  119. static void badblocks_update_acked(struct badblocks *bb)
  120. {
  121. u64 *p = bb->page;
  122. int i;
  123. bool unacked = false;
  124. if (!bb->unacked_exist)
  125. return;
  126. for (i = 0; i < bb->count ; i++) {
  127. if (!BB_ACK(p[i])) {
  128. unacked = true;
  129. break;
  130. }
  131. }
  132. if (!unacked)
  133. bb->unacked_exist = 0;
  134. }
  135. /**
  136. * badblocks_set() - Add a range of bad blocks to the table.
  137. * @bb: the badblocks structure that holds all badblock information
  138. * @s: first sector to mark as bad
  139. * @sectors: number of sectors to mark as bad
  140. * @acknowledged: weather to mark the bad sectors as acknowledged
  141. *
  142. * This might extend the table, or might contract it if two adjacent ranges
  143. * can be merged. We binary-search to find the 'insertion' point, then
  144. * decide how best to handle it.
  145. *
  146. * Return:
  147. * 0: success
  148. * 1: failed to set badblocks (out of space)
  149. */
  150. int badblocks_set(struct badblocks *bb, sector_t s, int sectors,
  151. int acknowledged)
  152. {
  153. u64 *p;
  154. int lo, hi;
  155. int rv = 0;
  156. unsigned long flags;
  157. if (bb->shift < 0)
  158. /* badblocks are disabled */
  159. return 1;
  160. if (bb->shift) {
  161. /* round the start down, and the end up */
  162. sector_t next = s + sectors;
  163. s >>= bb->shift;
  164. next += (1<<bb->shift) - 1;
  165. next >>= bb->shift;
  166. sectors = next - s;
  167. }
  168. write_seqlock_irqsave(&bb->lock, flags);
  169. p = bb->page;
  170. lo = 0;
  171. hi = bb->count;
  172. /* Find the last range that starts at-or-before 's' */
  173. while (hi - lo > 1) {
  174. int mid = (lo + hi) / 2;
  175. sector_t a = BB_OFFSET(p[mid]);
  176. if (a <= s)
  177. lo = mid;
  178. else
  179. hi = mid;
  180. }
  181. if (hi > lo && BB_OFFSET(p[lo]) > s)
  182. hi = lo;
  183. if (hi > lo) {
  184. /* we found a range that might merge with the start
  185. * of our new range
  186. */
  187. sector_t a = BB_OFFSET(p[lo]);
  188. sector_t e = a + BB_LEN(p[lo]);
  189. int ack = BB_ACK(p[lo]);
  190. if (e >= s) {
  191. /* Yes, we can merge with a previous range */
  192. if (s == a && s + sectors >= e)
  193. /* new range covers old */
  194. ack = acknowledged;
  195. else
  196. ack = ack && acknowledged;
  197. if (e < s + sectors)
  198. e = s + sectors;
  199. if (e - a <= BB_MAX_LEN) {
  200. p[lo] = BB_MAKE(a, e-a, ack);
  201. s = e;
  202. } else {
  203. /* does not all fit in one range,
  204. * make p[lo] maximal
  205. */
  206. if (BB_LEN(p[lo]) != BB_MAX_LEN)
  207. p[lo] = BB_MAKE(a, BB_MAX_LEN, ack);
  208. s = a + BB_MAX_LEN;
  209. }
  210. sectors = e - s;
  211. }
  212. }
  213. if (sectors && hi < bb->count) {
  214. /* 'hi' points to the first range that starts after 's'.
  215. * Maybe we can merge with the start of that range
  216. */
  217. sector_t a = BB_OFFSET(p[hi]);
  218. sector_t e = a + BB_LEN(p[hi]);
  219. int ack = BB_ACK(p[hi]);
  220. if (a <= s + sectors) {
  221. /* merging is possible */
  222. if (e <= s + sectors) {
  223. /* full overlap */
  224. e = s + sectors;
  225. ack = acknowledged;
  226. } else
  227. ack = ack && acknowledged;
  228. a = s;
  229. if (e - a <= BB_MAX_LEN) {
  230. p[hi] = BB_MAKE(a, e-a, ack);
  231. s = e;
  232. } else {
  233. p[hi] = BB_MAKE(a, BB_MAX_LEN, ack);
  234. s = a + BB_MAX_LEN;
  235. }
  236. sectors = e - s;
  237. lo = hi;
  238. hi++;
  239. }
  240. }
  241. if (sectors == 0 && hi < bb->count) {
  242. /* we might be able to combine lo and hi */
  243. /* Note: 's' is at the end of 'lo' */
  244. sector_t a = BB_OFFSET(p[hi]);
  245. int lolen = BB_LEN(p[lo]);
  246. int hilen = BB_LEN(p[hi]);
  247. int newlen = lolen + hilen - (s - a);
  248. if (s >= a && newlen < BB_MAX_LEN) {
  249. /* yes, we can combine them */
  250. int ack = BB_ACK(p[lo]) && BB_ACK(p[hi]);
  251. p[lo] = BB_MAKE(BB_OFFSET(p[lo]), newlen, ack);
  252. memmove(p + hi, p + hi + 1,
  253. (bb->count - hi - 1) * 8);
  254. bb->count--;
  255. }
  256. }
  257. while (sectors) {
  258. /* didn't merge (it all).
  259. * Need to add a range just before 'hi'
  260. */
  261. if (bb->count >= MAX_BADBLOCKS) {
  262. /* No room for more */
  263. rv = 1;
  264. break;
  265. } else {
  266. int this_sectors = sectors;
  267. memmove(p + hi + 1, p + hi,
  268. (bb->count - hi) * 8);
  269. bb->count++;
  270. if (this_sectors > BB_MAX_LEN)
  271. this_sectors = BB_MAX_LEN;
  272. p[hi] = BB_MAKE(s, this_sectors, acknowledged);
  273. sectors -= this_sectors;
  274. s += this_sectors;
  275. }
  276. }
  277. bb->changed = 1;
  278. if (!acknowledged)
  279. bb->unacked_exist = 1;
  280. else
  281. badblocks_update_acked(bb);
  282. write_sequnlock_irqrestore(&bb->lock, flags);
  283. return rv;
  284. }
  285. EXPORT_SYMBOL_GPL(badblocks_set);
  286. /**
  287. * badblocks_clear() - Remove a range of bad blocks to the table.
  288. * @bb: the badblocks structure that holds all badblock information
  289. * @s: first sector to mark as bad
  290. * @sectors: number of sectors to mark as bad
  291. *
  292. * This may involve extending the table if we spilt a region,
  293. * but it must not fail. So if the table becomes full, we just
  294. * drop the remove request.
  295. *
  296. * Return:
  297. * 0: success
  298. * 1: failed to clear badblocks
  299. */
  300. int badblocks_clear(struct badblocks *bb, sector_t s, int sectors)
  301. {
  302. u64 *p;
  303. int lo, hi;
  304. sector_t target = s + sectors;
  305. int rv = 0;
  306. if (bb->shift > 0) {
  307. /* When clearing we round the start up and the end down.
  308. * This should not matter as the shift should align with
  309. * the block size and no rounding should ever be needed.
  310. * However it is better the think a block is bad when it
  311. * isn't than to think a block is not bad when it is.
  312. */
  313. s += (1<<bb->shift) - 1;
  314. s >>= bb->shift;
  315. target >>= bb->shift;
  316. sectors = target - s;
  317. }
  318. write_seqlock_irq(&bb->lock);
  319. p = bb->page;
  320. lo = 0;
  321. hi = bb->count;
  322. /* Find the last range that starts before 'target' */
  323. while (hi - lo > 1) {
  324. int mid = (lo + hi) / 2;
  325. sector_t a = BB_OFFSET(p[mid]);
  326. if (a < target)
  327. lo = mid;
  328. else
  329. hi = mid;
  330. }
  331. if (hi > lo) {
  332. /* p[lo] is the last range that could overlap the
  333. * current range. Earlier ranges could also overlap,
  334. * but only this one can overlap the end of the range.
  335. */
  336. if ((BB_OFFSET(p[lo]) + BB_LEN(p[lo]) > target) &&
  337. (BB_OFFSET(p[lo]) < target)) {
  338. /* Partial overlap, leave the tail of this range */
  339. int ack = BB_ACK(p[lo]);
  340. sector_t a = BB_OFFSET(p[lo]);
  341. sector_t end = a + BB_LEN(p[lo]);
  342. if (a < s) {
  343. /* we need to split this range */
  344. if (bb->count >= MAX_BADBLOCKS) {
  345. rv = -ENOSPC;
  346. goto out;
  347. }
  348. memmove(p+lo+1, p+lo, (bb->count - lo) * 8);
  349. bb->count++;
  350. p[lo] = BB_MAKE(a, s-a, ack);
  351. lo++;
  352. }
  353. p[lo] = BB_MAKE(target, end - target, ack);
  354. /* there is no longer an overlap */
  355. hi = lo;
  356. lo--;
  357. }
  358. while (lo >= 0 &&
  359. (BB_OFFSET(p[lo]) + BB_LEN(p[lo]) > s) &&
  360. (BB_OFFSET(p[lo]) < target)) {
  361. /* This range does overlap */
  362. if (BB_OFFSET(p[lo]) < s) {
  363. /* Keep the early parts of this range. */
  364. int ack = BB_ACK(p[lo]);
  365. sector_t start = BB_OFFSET(p[lo]);
  366. p[lo] = BB_MAKE(start, s - start, ack);
  367. /* now low doesn't overlap, so.. */
  368. break;
  369. }
  370. lo--;
  371. }
  372. /* 'lo' is strictly before, 'hi' is strictly after,
  373. * anything between needs to be discarded
  374. */
  375. if (hi - lo > 1) {
  376. memmove(p+lo+1, p+hi, (bb->count - hi) * 8);
  377. bb->count -= (hi - lo - 1);
  378. }
  379. }
  380. badblocks_update_acked(bb);
  381. bb->changed = 1;
  382. out:
  383. write_sequnlock_irq(&bb->lock);
  384. return rv;
  385. }
  386. EXPORT_SYMBOL_GPL(badblocks_clear);
  387. /**
  388. * ack_all_badblocks() - Acknowledge all bad blocks in a list.
  389. * @bb: the badblocks structure that holds all badblock information
  390. *
  391. * This only succeeds if ->changed is clear. It is used by
  392. * in-kernel metadata updates
  393. */
  394. void ack_all_badblocks(struct badblocks *bb)
  395. {
  396. if (bb->page == NULL || bb->changed)
  397. /* no point even trying */
  398. return;
  399. write_seqlock_irq(&bb->lock);
  400. if (bb->changed == 0 && bb->unacked_exist) {
  401. u64 *p = bb->page;
  402. int i;
  403. for (i = 0; i < bb->count ; i++) {
  404. if (!BB_ACK(p[i])) {
  405. sector_t start = BB_OFFSET(p[i]);
  406. int len = BB_LEN(p[i]);
  407. p[i] = BB_MAKE(start, len, 1);
  408. }
  409. }
  410. bb->unacked_exist = 0;
  411. }
  412. write_sequnlock_irq(&bb->lock);
  413. }
  414. EXPORT_SYMBOL_GPL(ack_all_badblocks);
  415. /**
  416. * badblocks_show() - sysfs access to bad-blocks list
  417. * @bb: the badblocks structure that holds all badblock information
  418. * @page: buffer received from sysfs
  419. * @unack: weather to show unacknowledged badblocks
  420. *
  421. * Return:
  422. * Length of returned data
  423. */
  424. ssize_t badblocks_show(struct badblocks *bb, char *page, int unack)
  425. {
  426. size_t len;
  427. int i;
  428. u64 *p = bb->page;
  429. unsigned seq;
  430. if (bb->shift < 0)
  431. return 0;
  432. retry:
  433. seq = read_seqbegin(&bb->lock);
  434. len = 0;
  435. i = 0;
  436. while (len < PAGE_SIZE && i < bb->count) {
  437. sector_t s = BB_OFFSET(p[i]);
  438. unsigned int length = BB_LEN(p[i]);
  439. int ack = BB_ACK(p[i]);
  440. i++;
  441. if (unack && ack)
  442. continue;
  443. len += snprintf(page+len, PAGE_SIZE-len, "%llu %u\n",
  444. (unsigned long long)s << bb->shift,
  445. length << bb->shift);
  446. }
  447. if (unack && len == 0)
  448. bb->unacked_exist = 0;
  449. if (read_seqretry(&bb->lock, seq))
  450. goto retry;
  451. return len;
  452. }
  453. EXPORT_SYMBOL_GPL(badblocks_show);
  454. /**
  455. * badblocks_store() - sysfs access to bad-blocks list
  456. * @bb: the badblocks structure that holds all badblock information
  457. * @page: buffer received from sysfs
  458. * @len: length of data received from sysfs
  459. * @unack: weather to show unacknowledged badblocks
  460. *
  461. * Return:
  462. * Length of the buffer processed or -ve error.
  463. */
  464. ssize_t badblocks_store(struct badblocks *bb, const char *page, size_t len,
  465. int unack)
  466. {
  467. unsigned long long sector;
  468. int length;
  469. char newline;
  470. switch (sscanf(page, "%llu %d%c", &sector, &length, &newline)) {
  471. case 3:
  472. if (newline != '\n')
  473. return -EINVAL;
  474. fallthrough;
  475. case 2:
  476. if (length <= 0)
  477. return -EINVAL;
  478. break;
  479. default:
  480. return -EINVAL;
  481. }
  482. if (badblocks_set(bb, sector, length, !unack))
  483. return -ENOSPC;
  484. else
  485. return len;
  486. }
  487. EXPORT_SYMBOL_GPL(badblocks_store);
  488. static int __badblocks_init(struct device *dev, struct badblocks *bb,
  489. int enable)
  490. {
  491. bb->dev = dev;
  492. bb->count = 0;
  493. if (enable)
  494. bb->shift = 0;
  495. else
  496. bb->shift = -1;
  497. if (dev)
  498. bb->page = devm_kzalloc(dev, PAGE_SIZE, GFP_KERNEL);
  499. else
  500. bb->page = kzalloc(PAGE_SIZE, GFP_KERNEL);
  501. if (!bb->page) {
  502. bb->shift = -1;
  503. return -ENOMEM;
  504. }
  505. seqlock_init(&bb->lock);
  506. return 0;
  507. }
  508. /**
  509. * badblocks_init() - initialize the badblocks structure
  510. * @bb: the badblocks structure that holds all badblock information
  511. * @enable: weather to enable badblocks accounting
  512. *
  513. * Return:
  514. * 0: success
  515. * -ve errno: on error
  516. */
  517. int badblocks_init(struct badblocks *bb, int enable)
  518. {
  519. return __badblocks_init(NULL, bb, enable);
  520. }
  521. EXPORT_SYMBOL_GPL(badblocks_init);
  522. int devm_init_badblocks(struct device *dev, struct badblocks *bb)
  523. {
  524. if (!bb)
  525. return -EINVAL;
  526. return __badblocks_init(dev, bb, 1);
  527. }
  528. EXPORT_SYMBOL_GPL(devm_init_badblocks);
  529. /**
  530. * badblocks_exit() - free the badblocks structure
  531. * @bb: the badblocks structure that holds all badblock information
  532. */
  533. void badblocks_exit(struct badblocks *bb)
  534. {
  535. if (!bb)
  536. return;
  537. if (bb->dev)
  538. devm_kfree(bb->dev, bb->page);
  539. else
  540. kfree(bb->page);
  541. bb->page = NULL;
  542. }
  543. EXPORT_SYMBOL_GPL(badblocks_exit);