blk-zoned.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Zoned block device handling
  4. *
  5. * Copyright (c) 2015, Hannes Reinecke
  6. * Copyright (c) 2015, SUSE Linux GmbH
  7. *
  8. * Copyright (c) 2016, Damien Le Moal
  9. * Copyright (c) 2016, Western Digital
  10. */
  11. #include <linux/kernel.h>
  12. #include <linux/module.h>
  13. #include <linux/rbtree.h>
  14. #include <linux/blkdev.h>
  15. #include <linux/blk-mq.h>
  16. #include <linux/mm.h>
  17. #include <linux/vmalloc.h>
  18. #include <linux/sched/mm.h>
  19. #include "blk.h"
  20. #define ZONE_COND_NAME(name) [BLK_ZONE_COND_##name] = #name
  21. static const char *const zone_cond_name[] = {
  22. ZONE_COND_NAME(NOT_WP),
  23. ZONE_COND_NAME(EMPTY),
  24. ZONE_COND_NAME(IMP_OPEN),
  25. ZONE_COND_NAME(EXP_OPEN),
  26. ZONE_COND_NAME(CLOSED),
  27. ZONE_COND_NAME(READONLY),
  28. ZONE_COND_NAME(FULL),
  29. ZONE_COND_NAME(OFFLINE),
  30. };
  31. #undef ZONE_COND_NAME
  32. /**
  33. * blk_zone_cond_str - Return string XXX in BLK_ZONE_COND_XXX.
  34. * @zone_cond: BLK_ZONE_COND_XXX.
  35. *
  36. * Description: Centralize block layer function to convert BLK_ZONE_COND_XXX
  37. * into string format. Useful in the debugging and tracing zone conditions. For
  38. * invalid BLK_ZONE_COND_XXX it returns string "UNKNOWN".
  39. */
  40. const char *blk_zone_cond_str(enum blk_zone_cond zone_cond)
  41. {
  42. static const char *zone_cond_str = "UNKNOWN";
  43. if (zone_cond < ARRAY_SIZE(zone_cond_name) && zone_cond_name[zone_cond])
  44. zone_cond_str = zone_cond_name[zone_cond];
  45. return zone_cond_str;
  46. }
  47. EXPORT_SYMBOL_GPL(blk_zone_cond_str);
  48. static inline sector_t blk_zone_start(struct request_queue *q,
  49. sector_t sector)
  50. {
  51. sector_t zone_mask = blk_queue_zone_sectors(q) - 1;
  52. return sector & ~zone_mask;
  53. }
  54. /*
  55. * Return true if a request is a write requests that needs zone write locking.
  56. */
  57. bool blk_req_needs_zone_write_lock(struct request *rq)
  58. {
  59. if (!rq->q->seq_zones_wlock)
  60. return false;
  61. if (blk_rq_is_passthrough(rq))
  62. return false;
  63. switch (req_op(rq)) {
  64. case REQ_OP_WRITE_ZEROES:
  65. case REQ_OP_WRITE_SAME:
  66. case REQ_OP_WRITE:
  67. return blk_rq_zone_is_seq(rq);
  68. default:
  69. return false;
  70. }
  71. }
  72. EXPORT_SYMBOL_GPL(blk_req_needs_zone_write_lock);
  73. bool blk_req_zone_write_trylock(struct request *rq)
  74. {
  75. unsigned int zno = blk_rq_zone_no(rq);
  76. if (test_and_set_bit(zno, rq->q->seq_zones_wlock))
  77. return false;
  78. WARN_ON_ONCE(rq->rq_flags & RQF_ZONE_WRITE_LOCKED);
  79. rq->rq_flags |= RQF_ZONE_WRITE_LOCKED;
  80. return true;
  81. }
  82. EXPORT_SYMBOL_GPL(blk_req_zone_write_trylock);
  83. void __blk_req_zone_write_lock(struct request *rq)
  84. {
  85. if (WARN_ON_ONCE(test_and_set_bit(blk_rq_zone_no(rq),
  86. rq->q->seq_zones_wlock)))
  87. return;
  88. WARN_ON_ONCE(rq->rq_flags & RQF_ZONE_WRITE_LOCKED);
  89. rq->rq_flags |= RQF_ZONE_WRITE_LOCKED;
  90. }
  91. EXPORT_SYMBOL_GPL(__blk_req_zone_write_lock);
  92. void __blk_req_zone_write_unlock(struct request *rq)
  93. {
  94. rq->rq_flags &= ~RQF_ZONE_WRITE_LOCKED;
  95. if (rq->q->seq_zones_wlock)
  96. WARN_ON_ONCE(!test_and_clear_bit(blk_rq_zone_no(rq),
  97. rq->q->seq_zones_wlock));
  98. }
  99. EXPORT_SYMBOL_GPL(__blk_req_zone_write_unlock);
  100. /**
  101. * blkdev_nr_zones - Get number of zones
  102. * @disk: Target gendisk
  103. *
  104. * Return the total number of zones of a zoned block device. For a block
  105. * device without zone capabilities, the number of zones is always 0.
  106. */
  107. unsigned int blkdev_nr_zones(struct gendisk *disk)
  108. {
  109. sector_t zone_sectors = blk_queue_zone_sectors(disk->queue);
  110. if (!blk_queue_is_zoned(disk->queue))
  111. return 0;
  112. return (get_capacity(disk) + zone_sectors - 1) >> ilog2(zone_sectors);
  113. }
  114. EXPORT_SYMBOL_GPL(blkdev_nr_zones);
  115. /**
  116. * blkdev_report_zones - Get zones information
  117. * @bdev: Target block device
  118. * @sector: Sector from which to report zones
  119. * @nr_zones: Maximum number of zones to report
  120. * @cb: Callback function called for each reported zone
  121. * @data: Private data for the callback
  122. *
  123. * Description:
  124. * Get zone information starting from the zone containing @sector for at most
  125. * @nr_zones, and call @cb for each zone reported by the device.
  126. * To report all zones in a device starting from @sector, the BLK_ALL_ZONES
  127. * constant can be passed to @nr_zones.
  128. * Returns the number of zones reported by the device, or a negative errno
  129. * value in case of failure.
  130. *
  131. * Note: The caller must use memalloc_noXX_save/restore() calls to control
  132. * memory allocations done within this function.
  133. */
  134. int blkdev_report_zones(struct block_device *bdev, sector_t sector,
  135. unsigned int nr_zones, report_zones_cb cb, void *data)
  136. {
  137. struct gendisk *disk = bdev->bd_disk;
  138. sector_t capacity = get_capacity(disk);
  139. if (!blk_queue_is_zoned(bdev_get_queue(bdev)) ||
  140. WARN_ON_ONCE(!disk->fops->report_zones))
  141. return -EOPNOTSUPP;
  142. if (!nr_zones || sector >= capacity)
  143. return 0;
  144. return disk->fops->report_zones(disk, sector, nr_zones, cb, data);
  145. }
  146. EXPORT_SYMBOL_GPL(blkdev_report_zones);
  147. static inline bool blkdev_allow_reset_all_zones(struct block_device *bdev,
  148. sector_t sector,
  149. sector_t nr_sectors)
  150. {
  151. if (!blk_queue_zone_resetall(bdev_get_queue(bdev)))
  152. return false;
  153. /*
  154. * REQ_OP_ZONE_RESET_ALL can be executed only if the number of sectors
  155. * of the applicable zone range is the entire disk.
  156. */
  157. return !sector && nr_sectors == get_capacity(bdev->bd_disk);
  158. }
  159. /**
  160. * blkdev_zone_mgmt - Execute a zone management operation on a range of zones
  161. * @bdev: Target block device
  162. * @op: Operation to be performed on the zones
  163. * @sector: Start sector of the first zone to operate on
  164. * @nr_sectors: Number of sectors, should be at least the length of one zone and
  165. * must be zone size aligned.
  166. * @gfp_mask: Memory allocation flags (for bio_alloc)
  167. *
  168. * Description:
  169. * Perform the specified operation on the range of zones specified by
  170. * @sector..@sector+@nr_sectors. Specifying the entire disk sector range
  171. * is valid, but the specified range should not contain conventional zones.
  172. * The operation to execute on each zone can be a zone reset, open, close
  173. * or finish request.
  174. */
  175. int blkdev_zone_mgmt(struct block_device *bdev, enum req_opf op,
  176. sector_t sector, sector_t nr_sectors,
  177. gfp_t gfp_mask)
  178. {
  179. struct request_queue *q = bdev_get_queue(bdev);
  180. sector_t zone_sectors = blk_queue_zone_sectors(q);
  181. sector_t capacity = get_capacity(bdev->bd_disk);
  182. sector_t end_sector = sector + nr_sectors;
  183. struct bio *bio = NULL;
  184. int ret;
  185. if (!blk_queue_is_zoned(q))
  186. return -EOPNOTSUPP;
  187. if (bdev_read_only(bdev))
  188. return -EPERM;
  189. if (!op_is_zone_mgmt(op))
  190. return -EOPNOTSUPP;
  191. if (end_sector <= sector || end_sector > capacity)
  192. /* Out of range */
  193. return -EINVAL;
  194. /* Check alignment (handle eventual smaller last zone) */
  195. if (sector & (zone_sectors - 1))
  196. return -EINVAL;
  197. if ((nr_sectors & (zone_sectors - 1)) && end_sector != capacity)
  198. return -EINVAL;
  199. while (sector < end_sector) {
  200. bio = blk_next_bio(bio, 0, gfp_mask);
  201. bio_set_dev(bio, bdev);
  202. /*
  203. * Special case for the zone reset operation that reset all
  204. * zones, this is useful for applications like mkfs.
  205. */
  206. if (op == REQ_OP_ZONE_RESET &&
  207. blkdev_allow_reset_all_zones(bdev, sector, nr_sectors)) {
  208. bio->bi_opf = REQ_OP_ZONE_RESET_ALL | REQ_SYNC;
  209. break;
  210. }
  211. bio->bi_opf = op | REQ_SYNC;
  212. bio->bi_iter.bi_sector = sector;
  213. sector += zone_sectors;
  214. /* This may take a while, so be nice to others */
  215. cond_resched();
  216. }
  217. ret = submit_bio_wait(bio);
  218. bio_put(bio);
  219. return ret;
  220. }
  221. EXPORT_SYMBOL_GPL(blkdev_zone_mgmt);
  222. struct zone_report_args {
  223. struct blk_zone __user *zones;
  224. };
  225. static int blkdev_copy_zone_to_user(struct blk_zone *zone, unsigned int idx,
  226. void *data)
  227. {
  228. struct zone_report_args *args = data;
  229. if (copy_to_user(&args->zones[idx], zone, sizeof(struct blk_zone)))
  230. return -EFAULT;
  231. return 0;
  232. }
  233. /*
  234. * BLKREPORTZONE ioctl processing.
  235. * Called from blkdev_ioctl.
  236. */
  237. int blkdev_report_zones_ioctl(struct block_device *bdev, fmode_t mode,
  238. unsigned int cmd, unsigned long arg)
  239. {
  240. void __user *argp = (void __user *)arg;
  241. struct zone_report_args args;
  242. struct request_queue *q;
  243. struct blk_zone_report rep;
  244. int ret;
  245. if (!argp)
  246. return -EINVAL;
  247. q = bdev_get_queue(bdev);
  248. if (!q)
  249. return -ENXIO;
  250. if (!blk_queue_is_zoned(q))
  251. return -ENOTTY;
  252. if (copy_from_user(&rep, argp, sizeof(struct blk_zone_report)))
  253. return -EFAULT;
  254. if (!rep.nr_zones)
  255. return -EINVAL;
  256. args.zones = argp + sizeof(struct blk_zone_report);
  257. ret = blkdev_report_zones(bdev, rep.sector, rep.nr_zones,
  258. blkdev_copy_zone_to_user, &args);
  259. if (ret < 0)
  260. return ret;
  261. rep.nr_zones = ret;
  262. rep.flags = BLK_ZONE_REP_CAPACITY;
  263. if (copy_to_user(argp, &rep, sizeof(struct blk_zone_report)))
  264. return -EFAULT;
  265. return 0;
  266. }
  267. static int blkdev_truncate_zone_range(struct block_device *bdev, fmode_t mode,
  268. const struct blk_zone_range *zrange)
  269. {
  270. loff_t start, end;
  271. if (zrange->sector + zrange->nr_sectors <= zrange->sector ||
  272. zrange->sector + zrange->nr_sectors > get_capacity(bdev->bd_disk))
  273. /* Out of range */
  274. return -EINVAL;
  275. start = zrange->sector << SECTOR_SHIFT;
  276. end = ((zrange->sector + zrange->nr_sectors) << SECTOR_SHIFT) - 1;
  277. return truncate_bdev_range(bdev, mode, start, end);
  278. }
  279. /*
  280. * BLKRESETZONE, BLKOPENZONE, BLKCLOSEZONE and BLKFINISHZONE ioctl processing.
  281. * Called from blkdev_ioctl.
  282. */
  283. int blkdev_zone_mgmt_ioctl(struct block_device *bdev, fmode_t mode,
  284. unsigned int cmd, unsigned long arg)
  285. {
  286. void __user *argp = (void __user *)arg;
  287. struct request_queue *q;
  288. struct blk_zone_range zrange;
  289. enum req_opf op;
  290. int ret;
  291. if (!argp)
  292. return -EINVAL;
  293. q = bdev_get_queue(bdev);
  294. if (!q)
  295. return -ENXIO;
  296. if (!blk_queue_is_zoned(q))
  297. return -ENOTTY;
  298. if (!(mode & FMODE_WRITE))
  299. return -EBADF;
  300. if (copy_from_user(&zrange, argp, sizeof(struct blk_zone_range)))
  301. return -EFAULT;
  302. switch (cmd) {
  303. case BLKRESETZONE:
  304. op = REQ_OP_ZONE_RESET;
  305. /* Invalidate the page cache, including dirty pages. */
  306. ret = blkdev_truncate_zone_range(bdev, mode, &zrange);
  307. if (ret)
  308. return ret;
  309. break;
  310. case BLKOPENZONE:
  311. op = REQ_OP_ZONE_OPEN;
  312. break;
  313. case BLKCLOSEZONE:
  314. op = REQ_OP_ZONE_CLOSE;
  315. break;
  316. case BLKFINISHZONE:
  317. op = REQ_OP_ZONE_FINISH;
  318. break;
  319. default:
  320. return -ENOTTY;
  321. }
  322. ret = blkdev_zone_mgmt(bdev, op, zrange.sector, zrange.nr_sectors,
  323. GFP_KERNEL);
  324. /*
  325. * Invalidate the page cache again for zone reset: writes can only be
  326. * direct for zoned devices so concurrent writes would not add any page
  327. * to the page cache after/during reset. The page cache may be filled
  328. * again due to concurrent reads though and dropping the pages for
  329. * these is fine.
  330. */
  331. if (!ret && cmd == BLKRESETZONE)
  332. ret = blkdev_truncate_zone_range(bdev, mode, &zrange);
  333. return ret;
  334. }
  335. static inline unsigned long *blk_alloc_zone_bitmap(int node,
  336. unsigned int nr_zones)
  337. {
  338. return kcalloc_node(BITS_TO_LONGS(nr_zones), sizeof(unsigned long),
  339. GFP_NOIO, node);
  340. }
  341. void blk_queue_free_zone_bitmaps(struct request_queue *q)
  342. {
  343. kfree(q->conv_zones_bitmap);
  344. q->conv_zones_bitmap = NULL;
  345. kfree(q->seq_zones_wlock);
  346. q->seq_zones_wlock = NULL;
  347. }
  348. struct blk_revalidate_zone_args {
  349. struct gendisk *disk;
  350. unsigned long *conv_zones_bitmap;
  351. unsigned long *seq_zones_wlock;
  352. unsigned int nr_zones;
  353. sector_t zone_sectors;
  354. sector_t sector;
  355. };
  356. /*
  357. * Helper function to check the validity of zones of a zoned block device.
  358. */
  359. static int blk_revalidate_zone_cb(struct blk_zone *zone, unsigned int idx,
  360. void *data)
  361. {
  362. struct blk_revalidate_zone_args *args = data;
  363. struct gendisk *disk = args->disk;
  364. struct request_queue *q = disk->queue;
  365. sector_t capacity = get_capacity(disk);
  366. /*
  367. * All zones must have the same size, with the exception on an eventual
  368. * smaller last zone.
  369. */
  370. if (zone->start == 0) {
  371. if (zone->len == 0 || !is_power_of_2(zone->len)) {
  372. pr_warn("%s: Invalid zoned device with non power of two zone size (%llu)\n",
  373. disk->disk_name, zone->len);
  374. return -ENODEV;
  375. }
  376. args->zone_sectors = zone->len;
  377. args->nr_zones = (capacity + zone->len - 1) >> ilog2(zone->len);
  378. } else if (zone->start + args->zone_sectors < capacity) {
  379. if (zone->len != args->zone_sectors) {
  380. pr_warn("%s: Invalid zoned device with non constant zone size\n",
  381. disk->disk_name);
  382. return -ENODEV;
  383. }
  384. } else {
  385. if (zone->len > args->zone_sectors) {
  386. pr_warn("%s: Invalid zoned device with larger last zone size\n",
  387. disk->disk_name);
  388. return -ENODEV;
  389. }
  390. }
  391. /* Check for holes in the zone report */
  392. if (zone->start != args->sector) {
  393. pr_warn("%s: Zone gap at sectors %llu..%llu\n",
  394. disk->disk_name, args->sector, zone->start);
  395. return -ENODEV;
  396. }
  397. /* Check zone type */
  398. switch (zone->type) {
  399. case BLK_ZONE_TYPE_CONVENTIONAL:
  400. if (!args->conv_zones_bitmap) {
  401. args->conv_zones_bitmap =
  402. blk_alloc_zone_bitmap(q->node, args->nr_zones);
  403. if (!args->conv_zones_bitmap)
  404. return -ENOMEM;
  405. }
  406. set_bit(idx, args->conv_zones_bitmap);
  407. break;
  408. case BLK_ZONE_TYPE_SEQWRITE_REQ:
  409. case BLK_ZONE_TYPE_SEQWRITE_PREF:
  410. if (!args->seq_zones_wlock) {
  411. args->seq_zones_wlock =
  412. blk_alloc_zone_bitmap(q->node, args->nr_zones);
  413. if (!args->seq_zones_wlock)
  414. return -ENOMEM;
  415. }
  416. break;
  417. default:
  418. pr_warn("%s: Invalid zone type 0x%x at sectors %llu\n",
  419. disk->disk_name, (int)zone->type, zone->start);
  420. return -ENODEV;
  421. }
  422. args->sector += zone->len;
  423. return 0;
  424. }
  425. /**
  426. * blk_revalidate_disk_zones - (re)allocate and initialize zone bitmaps
  427. * @disk: Target disk
  428. * @update_driver_data: Callback to update driver data on the frozen disk
  429. *
  430. * Helper function for low-level device drivers to (re) allocate and initialize
  431. * a disk request queue zone bitmaps. This functions should normally be called
  432. * within the disk ->revalidate method for blk-mq based drivers. For BIO based
  433. * drivers only q->nr_zones needs to be updated so that the sysfs exposed value
  434. * is correct.
  435. * If the @update_driver_data callback function is not NULL, the callback is
  436. * executed with the device request queue frozen after all zones have been
  437. * checked.
  438. */
  439. int blk_revalidate_disk_zones(struct gendisk *disk,
  440. void (*update_driver_data)(struct gendisk *disk))
  441. {
  442. struct request_queue *q = disk->queue;
  443. struct blk_revalidate_zone_args args = {
  444. .disk = disk,
  445. };
  446. unsigned int noio_flag;
  447. int ret;
  448. if (WARN_ON_ONCE(!blk_queue_is_zoned(q)))
  449. return -EIO;
  450. if (WARN_ON_ONCE(!queue_is_mq(q)))
  451. return -EIO;
  452. if (!get_capacity(disk))
  453. return -EIO;
  454. /*
  455. * Ensure that all memory allocations in this context are done as if
  456. * GFP_NOIO was specified.
  457. */
  458. noio_flag = memalloc_noio_save();
  459. ret = disk->fops->report_zones(disk, 0, UINT_MAX,
  460. blk_revalidate_zone_cb, &args);
  461. memalloc_noio_restore(noio_flag);
  462. /*
  463. * Install the new bitmaps and update nr_zones only once the queue is
  464. * stopped and all I/Os are completed (i.e. a scheduler is not
  465. * referencing the bitmaps).
  466. */
  467. blk_mq_freeze_queue(q);
  468. if (ret >= 0) {
  469. blk_queue_chunk_sectors(q, args.zone_sectors);
  470. q->nr_zones = args.nr_zones;
  471. swap(q->seq_zones_wlock, args.seq_zones_wlock);
  472. swap(q->conv_zones_bitmap, args.conv_zones_bitmap);
  473. if (update_driver_data)
  474. update_driver_data(disk);
  475. ret = 0;
  476. } else {
  477. pr_warn("%s: failed to revalidate zones\n", disk->disk_name);
  478. blk_queue_free_zone_bitmaps(q);
  479. }
  480. blk_mq_unfreeze_queue(q);
  481. kfree(args.seq_zones_wlock);
  482. kfree(args.conv_zones_bitmap);
  483. return ret;
  484. }
  485. EXPORT_SYMBOL_GPL(blk_revalidate_disk_zones);