dm-dust.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (c) 2018 Red Hat, Inc.
  4. *
  5. * This is a test "dust" device, which fails reads on specified
  6. * sectors, emulating the behavior of a hard disk drive sending
  7. * a "Read Medium Error" sense.
  8. *
  9. */
  10. #include <linux/device-mapper.h>
  11. #include <linux/module.h>
  12. #include <linux/rbtree.h>
  13. #define DM_MSG_PREFIX "dust"
  14. struct badblock {
  15. struct rb_node node;
  16. sector_t bb;
  17. unsigned char wr_fail_cnt;
  18. };
  19. struct dust_device {
  20. struct dm_dev *dev;
  21. struct rb_root badblocklist;
  22. unsigned long long badblock_count;
  23. spinlock_t dust_lock;
  24. unsigned int blksz;
  25. int sect_per_block_shift;
  26. unsigned int sect_per_block;
  27. sector_t start;
  28. bool fail_read_on_bb:1;
  29. bool quiet_mode:1;
  30. };
  31. static struct badblock *dust_rb_search(struct rb_root *root, sector_t blk)
  32. {
  33. struct rb_node *node = root->rb_node;
  34. while (node) {
  35. struct badblock *bblk = rb_entry(node, struct badblock, node);
  36. if (bblk->bb > blk)
  37. node = node->rb_left;
  38. else if (bblk->bb < blk)
  39. node = node->rb_right;
  40. else
  41. return bblk;
  42. }
  43. return NULL;
  44. }
  45. static bool dust_rb_insert(struct rb_root *root, struct badblock *new)
  46. {
  47. struct badblock *bblk;
  48. struct rb_node **link = &root->rb_node, *parent = NULL;
  49. sector_t value = new->bb;
  50. while (*link) {
  51. parent = *link;
  52. bblk = rb_entry(parent, struct badblock, node);
  53. if (bblk->bb > value)
  54. link = &(*link)->rb_left;
  55. else if (bblk->bb < value)
  56. link = &(*link)->rb_right;
  57. else
  58. return false;
  59. }
  60. rb_link_node(&new->node, parent, link);
  61. rb_insert_color(&new->node, root);
  62. return true;
  63. }
  64. static int dust_remove_block(struct dust_device *dd, unsigned long long block)
  65. {
  66. struct badblock *bblock;
  67. unsigned long flags;
  68. spin_lock_irqsave(&dd->dust_lock, flags);
  69. bblock = dust_rb_search(&dd->badblocklist, block);
  70. if (bblock == NULL) {
  71. if (!dd->quiet_mode) {
  72. DMERR("%s: block %llu not found in badblocklist",
  73. __func__, block);
  74. }
  75. spin_unlock_irqrestore(&dd->dust_lock, flags);
  76. return -EINVAL;
  77. }
  78. rb_erase(&bblock->node, &dd->badblocklist);
  79. dd->badblock_count--;
  80. if (!dd->quiet_mode)
  81. DMINFO("%s: badblock removed at block %llu", __func__, block);
  82. kfree(bblock);
  83. spin_unlock_irqrestore(&dd->dust_lock, flags);
  84. return 0;
  85. }
  86. static int dust_add_block(struct dust_device *dd, unsigned long long block,
  87. unsigned char wr_fail_cnt)
  88. {
  89. struct badblock *bblock;
  90. unsigned long flags;
  91. bblock = kmalloc(sizeof(*bblock), GFP_KERNEL);
  92. if (bblock == NULL) {
  93. if (!dd->quiet_mode)
  94. DMERR("%s: badblock allocation failed", __func__);
  95. return -ENOMEM;
  96. }
  97. spin_lock_irqsave(&dd->dust_lock, flags);
  98. bblock->bb = block;
  99. bblock->wr_fail_cnt = wr_fail_cnt;
  100. if (!dust_rb_insert(&dd->badblocklist, bblock)) {
  101. if (!dd->quiet_mode) {
  102. DMERR("%s: block %llu already in badblocklist",
  103. __func__, block);
  104. }
  105. spin_unlock_irqrestore(&dd->dust_lock, flags);
  106. kfree(bblock);
  107. return -EINVAL;
  108. }
  109. dd->badblock_count++;
  110. if (!dd->quiet_mode) {
  111. DMINFO("%s: badblock added at block %llu with write fail count %hhu",
  112. __func__, block, wr_fail_cnt);
  113. }
  114. spin_unlock_irqrestore(&dd->dust_lock, flags);
  115. return 0;
  116. }
  117. static int dust_query_block(struct dust_device *dd, unsigned long long block, char *result,
  118. unsigned int maxlen, unsigned int *sz_ptr)
  119. {
  120. struct badblock *bblock;
  121. unsigned long flags;
  122. unsigned int sz = *sz_ptr;
  123. spin_lock_irqsave(&dd->dust_lock, flags);
  124. bblock = dust_rb_search(&dd->badblocklist, block);
  125. if (bblock != NULL)
  126. DMEMIT("%s: block %llu found in badblocklist", __func__, block);
  127. else
  128. DMEMIT("%s: block %llu not found in badblocklist", __func__, block);
  129. spin_unlock_irqrestore(&dd->dust_lock, flags);
  130. return 1;
  131. }
  132. static int __dust_map_read(struct dust_device *dd, sector_t thisblock)
  133. {
  134. struct badblock *bblk = dust_rb_search(&dd->badblocklist, thisblock);
  135. if (bblk)
  136. return DM_MAPIO_KILL;
  137. return DM_MAPIO_REMAPPED;
  138. }
  139. static int dust_map_read(struct dust_device *dd, sector_t thisblock,
  140. bool fail_read_on_bb)
  141. {
  142. unsigned long flags;
  143. int r = DM_MAPIO_REMAPPED;
  144. if (fail_read_on_bb) {
  145. thisblock >>= dd->sect_per_block_shift;
  146. spin_lock_irqsave(&dd->dust_lock, flags);
  147. r = __dust_map_read(dd, thisblock);
  148. spin_unlock_irqrestore(&dd->dust_lock, flags);
  149. }
  150. return r;
  151. }
  152. static int __dust_map_write(struct dust_device *dd, sector_t thisblock)
  153. {
  154. struct badblock *bblk = dust_rb_search(&dd->badblocklist, thisblock);
  155. if (bblk && bblk->wr_fail_cnt > 0) {
  156. bblk->wr_fail_cnt--;
  157. return DM_MAPIO_KILL;
  158. }
  159. if (bblk) {
  160. rb_erase(&bblk->node, &dd->badblocklist);
  161. dd->badblock_count--;
  162. kfree(bblk);
  163. if (!dd->quiet_mode) {
  164. sector_div(thisblock, dd->sect_per_block);
  165. DMINFO("block %llu removed from badblocklist by write",
  166. (unsigned long long)thisblock);
  167. }
  168. }
  169. return DM_MAPIO_REMAPPED;
  170. }
  171. static int dust_map_write(struct dust_device *dd, sector_t thisblock,
  172. bool fail_read_on_bb)
  173. {
  174. unsigned long flags;
  175. int r = DM_MAPIO_REMAPPED;
  176. if (fail_read_on_bb) {
  177. thisblock >>= dd->sect_per_block_shift;
  178. spin_lock_irqsave(&dd->dust_lock, flags);
  179. r = __dust_map_write(dd, thisblock);
  180. spin_unlock_irqrestore(&dd->dust_lock, flags);
  181. }
  182. return r;
  183. }
  184. static int dust_map(struct dm_target *ti, struct bio *bio)
  185. {
  186. struct dust_device *dd = ti->private;
  187. int r;
  188. bio_set_dev(bio, dd->dev->bdev);
  189. bio->bi_iter.bi_sector = dd->start + dm_target_offset(ti, bio->bi_iter.bi_sector);
  190. if (bio_data_dir(bio) == READ)
  191. r = dust_map_read(dd, bio->bi_iter.bi_sector, dd->fail_read_on_bb);
  192. else
  193. r = dust_map_write(dd, bio->bi_iter.bi_sector, dd->fail_read_on_bb);
  194. return r;
  195. }
  196. static bool __dust_clear_badblocks(struct rb_root *tree,
  197. unsigned long long count)
  198. {
  199. struct rb_node *node = NULL, *nnode = NULL;
  200. nnode = rb_first(tree);
  201. if (nnode == NULL) {
  202. BUG_ON(count != 0);
  203. return false;
  204. }
  205. while (nnode) {
  206. node = nnode;
  207. nnode = rb_next(node);
  208. rb_erase(node, tree);
  209. count--;
  210. kfree(node);
  211. }
  212. BUG_ON(count != 0);
  213. BUG_ON(tree->rb_node != NULL);
  214. return true;
  215. }
  216. static int dust_clear_badblocks(struct dust_device *dd, char *result, unsigned int maxlen,
  217. unsigned int *sz_ptr)
  218. {
  219. unsigned long flags;
  220. struct rb_root badblocklist;
  221. unsigned long long badblock_count;
  222. unsigned int sz = *sz_ptr;
  223. spin_lock_irqsave(&dd->dust_lock, flags);
  224. badblocklist = dd->badblocklist;
  225. badblock_count = dd->badblock_count;
  226. dd->badblocklist = RB_ROOT;
  227. dd->badblock_count = 0;
  228. spin_unlock_irqrestore(&dd->dust_lock, flags);
  229. if (!__dust_clear_badblocks(&badblocklist, badblock_count))
  230. DMEMIT("%s: no badblocks found", __func__);
  231. else
  232. DMEMIT("%s: badblocks cleared", __func__);
  233. return 1;
  234. }
  235. static int dust_list_badblocks(struct dust_device *dd, char *result, unsigned int maxlen,
  236. unsigned int *sz_ptr)
  237. {
  238. unsigned long flags;
  239. struct rb_root badblocklist;
  240. struct rb_node *node;
  241. struct badblock *bblk;
  242. unsigned int sz = *sz_ptr;
  243. unsigned long long num = 0;
  244. spin_lock_irqsave(&dd->dust_lock, flags);
  245. badblocklist = dd->badblocklist;
  246. for (node = rb_first(&badblocklist); node; node = rb_next(node)) {
  247. bblk = rb_entry(node, struct badblock, node);
  248. DMEMIT("%llu\n", bblk->bb);
  249. num++;
  250. }
  251. spin_unlock_irqrestore(&dd->dust_lock, flags);
  252. if (!num)
  253. DMEMIT("No blocks in badblocklist");
  254. return 1;
  255. }
  256. /*
  257. * Target parameters:
  258. *
  259. * <device_path> <offset> <blksz>
  260. *
  261. * device_path: path to the block device
  262. * offset: offset to data area from start of device_path
  263. * blksz: block size (minimum 512, maximum 1073741824, must be a power of 2)
  264. */
  265. static int dust_ctr(struct dm_target *ti, unsigned int argc, char **argv)
  266. {
  267. struct dust_device *dd;
  268. unsigned long long tmp;
  269. char dummy;
  270. unsigned int blksz;
  271. unsigned int sect_per_block;
  272. sector_t DUST_MAX_BLKSZ_SECTORS = 2097152;
  273. sector_t max_block_sectors = min(ti->len, DUST_MAX_BLKSZ_SECTORS);
  274. if (argc != 3) {
  275. ti->error = "Invalid argument count";
  276. return -EINVAL;
  277. }
  278. if (kstrtouint(argv[2], 10, &blksz) || !blksz) {
  279. ti->error = "Invalid block size parameter";
  280. return -EINVAL;
  281. }
  282. if (blksz < 512) {
  283. ti->error = "Block size must be at least 512";
  284. return -EINVAL;
  285. }
  286. if (!is_power_of_2(blksz)) {
  287. ti->error = "Block size must be a power of 2";
  288. return -EINVAL;
  289. }
  290. if (to_sector(blksz) > max_block_sectors) {
  291. ti->error = "Block size is too large";
  292. return -EINVAL;
  293. }
  294. sect_per_block = (blksz >> SECTOR_SHIFT);
  295. if (sscanf(argv[1], "%llu%c", &tmp, &dummy) != 1 || tmp != (sector_t)tmp) {
  296. ti->error = "Invalid device offset sector";
  297. return -EINVAL;
  298. }
  299. dd = kzalloc(sizeof(struct dust_device), GFP_KERNEL);
  300. if (dd == NULL) {
  301. ti->error = "Cannot allocate context";
  302. return -ENOMEM;
  303. }
  304. if (dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &dd->dev)) {
  305. ti->error = "Device lookup failed";
  306. kfree(dd);
  307. return -EINVAL;
  308. }
  309. dd->sect_per_block = sect_per_block;
  310. dd->blksz = blksz;
  311. dd->start = tmp;
  312. dd->sect_per_block_shift = __ffs(sect_per_block);
  313. /*
  314. * Whether to fail a read on a "bad" block.
  315. * Defaults to false; enabled later by message.
  316. */
  317. dd->fail_read_on_bb = false;
  318. /*
  319. * Initialize bad block list rbtree.
  320. */
  321. dd->badblocklist = RB_ROOT;
  322. dd->badblock_count = 0;
  323. spin_lock_init(&dd->dust_lock);
  324. dd->quiet_mode = false;
  325. BUG_ON(dm_set_target_max_io_len(ti, dd->sect_per_block) != 0);
  326. ti->num_discard_bios = 1;
  327. ti->num_flush_bios = 1;
  328. ti->private = dd;
  329. return 0;
  330. }
  331. static void dust_dtr(struct dm_target *ti)
  332. {
  333. struct dust_device *dd = ti->private;
  334. __dust_clear_badblocks(&dd->badblocklist, dd->badblock_count);
  335. dm_put_device(ti, dd->dev);
  336. kfree(dd);
  337. }
  338. static int dust_message(struct dm_target *ti, unsigned int argc, char **argv,
  339. char *result, unsigned int maxlen)
  340. {
  341. struct dust_device *dd = ti->private;
  342. sector_t size = i_size_read(dd->dev->bdev->bd_inode) >> SECTOR_SHIFT;
  343. bool invalid_msg = false;
  344. int r = -EINVAL;
  345. unsigned long long tmp, block;
  346. unsigned char wr_fail_cnt;
  347. unsigned int tmp_ui;
  348. unsigned long flags;
  349. unsigned int sz = 0;
  350. char dummy;
  351. if (argc == 1) {
  352. if (!strcasecmp(argv[0], "addbadblock") ||
  353. !strcasecmp(argv[0], "removebadblock") ||
  354. !strcasecmp(argv[0], "queryblock")) {
  355. DMERR("%s requires an additional argument", argv[0]);
  356. } else if (!strcasecmp(argv[0], "disable")) {
  357. DMINFO("disabling read failures on bad sectors");
  358. dd->fail_read_on_bb = false;
  359. r = 0;
  360. } else if (!strcasecmp(argv[0], "enable")) {
  361. DMINFO("enabling read failures on bad sectors");
  362. dd->fail_read_on_bb = true;
  363. r = 0;
  364. } else if (!strcasecmp(argv[0], "countbadblocks")) {
  365. spin_lock_irqsave(&dd->dust_lock, flags);
  366. DMEMIT("countbadblocks: %llu badblock(s) found",
  367. dd->badblock_count);
  368. spin_unlock_irqrestore(&dd->dust_lock, flags);
  369. r = 1;
  370. } else if (!strcasecmp(argv[0], "clearbadblocks")) {
  371. r = dust_clear_badblocks(dd, result, maxlen, &sz);
  372. } else if (!strcasecmp(argv[0], "quiet")) {
  373. if (!dd->quiet_mode)
  374. dd->quiet_mode = true;
  375. else
  376. dd->quiet_mode = false;
  377. r = 0;
  378. } else if (!strcasecmp(argv[0], "listbadblocks")) {
  379. r = dust_list_badblocks(dd, result, maxlen, &sz);
  380. } else {
  381. invalid_msg = true;
  382. }
  383. } else if (argc == 2) {
  384. if (sscanf(argv[1], "%llu%c", &tmp, &dummy) != 1)
  385. return r;
  386. block = tmp;
  387. sector_div(size, dd->sect_per_block);
  388. if (block > size) {
  389. DMERR("selected block value out of range");
  390. return r;
  391. }
  392. if (!strcasecmp(argv[0], "addbadblock"))
  393. r = dust_add_block(dd, block, 0);
  394. else if (!strcasecmp(argv[0], "removebadblock"))
  395. r = dust_remove_block(dd, block);
  396. else if (!strcasecmp(argv[0], "queryblock"))
  397. r = dust_query_block(dd, block, result, maxlen, &sz);
  398. else
  399. invalid_msg = true;
  400. } else if (argc == 3) {
  401. if (sscanf(argv[1], "%llu%c", &tmp, &dummy) != 1)
  402. return r;
  403. if (sscanf(argv[2], "%u%c", &tmp_ui, &dummy) != 1)
  404. return r;
  405. block = tmp;
  406. if (tmp_ui > 255) {
  407. DMERR("selected write fail count out of range");
  408. return r;
  409. }
  410. wr_fail_cnt = tmp_ui;
  411. sector_div(size, dd->sect_per_block);
  412. if (block > size) {
  413. DMERR("selected block value out of range");
  414. return r;
  415. }
  416. if (!strcasecmp(argv[0], "addbadblock"))
  417. r = dust_add_block(dd, block, wr_fail_cnt);
  418. else
  419. invalid_msg = true;
  420. } else
  421. DMERR("invalid number of arguments '%d'", argc);
  422. if (invalid_msg)
  423. DMERR("unrecognized message '%s' received", argv[0]);
  424. return r;
  425. }
  426. static void dust_status(struct dm_target *ti, status_type_t type,
  427. unsigned int status_flags, char *result, unsigned int maxlen)
  428. {
  429. struct dust_device *dd = ti->private;
  430. unsigned int sz = 0;
  431. switch (type) {
  432. case STATUSTYPE_INFO:
  433. DMEMIT("%s %s %s", dd->dev->name,
  434. dd->fail_read_on_bb ? "fail_read_on_bad_block" : "bypass",
  435. dd->quiet_mode ? "quiet" : "verbose");
  436. break;
  437. case STATUSTYPE_TABLE:
  438. DMEMIT("%s %llu %u", dd->dev->name,
  439. (unsigned long long)dd->start, dd->blksz);
  440. break;
  441. }
  442. }
  443. static int dust_prepare_ioctl(struct dm_target *ti, struct block_device **bdev)
  444. {
  445. struct dust_device *dd = ti->private;
  446. struct dm_dev *dev = dd->dev;
  447. *bdev = dev->bdev;
  448. /*
  449. * Only pass ioctls through if the device sizes match exactly.
  450. */
  451. if (dd->start ||
  452. ti->len != i_size_read(dev->bdev->bd_inode) >> SECTOR_SHIFT)
  453. return 1;
  454. return 0;
  455. }
  456. static int dust_iterate_devices(struct dm_target *ti, iterate_devices_callout_fn fn,
  457. void *data)
  458. {
  459. struct dust_device *dd = ti->private;
  460. return fn(ti, dd->dev, dd->start, ti->len, data);
  461. }
  462. static struct target_type dust_target = {
  463. .name = "dust",
  464. .version = {1, 0, 0},
  465. .module = THIS_MODULE,
  466. .ctr = dust_ctr,
  467. .dtr = dust_dtr,
  468. .iterate_devices = dust_iterate_devices,
  469. .map = dust_map,
  470. .message = dust_message,
  471. .status = dust_status,
  472. .prepare_ioctl = dust_prepare_ioctl,
  473. };
  474. static int __init dm_dust_init(void)
  475. {
  476. int r = dm_register_target(&dust_target);
  477. if (r < 0)
  478. DMERR("dm_register_target failed %d", r);
  479. return r;
  480. }
  481. static void __exit dm_dust_exit(void)
  482. {
  483. dm_unregister_target(&dust_target);
  484. }
  485. module_init(dm_dust_init);
  486. module_exit(dm_dust_exit);
  487. MODULE_DESCRIPTION(DM_NAME " dust test target");
  488. MODULE_AUTHOR("Bryan Gurney <dm-devel@redhat.com>");
  489. MODULE_LICENSE("GPL");