blk-sysfs.c 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Functions related to sysfs handling
  4. */
  5. #include <linux/kernel.h>
  6. #include <linux/slab.h>
  7. #include <linux/module.h>
  8. #include <linux/bio.h>
  9. #include <linux/blkdev.h>
  10. #include <linux/backing-dev.h>
  11. #include <linux/blktrace_api.h>
  12. #include <linux/blk-mq.h>
  13. #include <linux/blk-cgroup.h>
  14. #include <linux/debugfs.h>
  15. #include "blk.h"
  16. #include "blk-mq.h"
  17. #include "blk-mq-debugfs.h"
  18. #include "blk-wbt.h"
  19. struct queue_sysfs_entry {
  20. struct attribute attr;
  21. ssize_t (*show)(struct request_queue *, char *);
  22. ssize_t (*store)(struct request_queue *, const char *, size_t);
  23. };
  24. static ssize_t
  25. queue_var_show(unsigned long var, char *page)
  26. {
  27. return sprintf(page, "%lu\n", var);
  28. }
  29. static ssize_t
  30. queue_var_store(unsigned long *var, const char *page, size_t count)
  31. {
  32. int err;
  33. unsigned long v;
  34. err = kstrtoul(page, 10, &v);
  35. if (err || v > UINT_MAX)
  36. return -EINVAL;
  37. *var = v;
  38. return count;
  39. }
  40. static ssize_t queue_var_store64(s64 *var, const char *page)
  41. {
  42. int err;
  43. s64 v;
  44. err = kstrtos64(page, 10, &v);
  45. if (err < 0)
  46. return err;
  47. *var = v;
  48. return 0;
  49. }
  50. static ssize_t queue_requests_show(struct request_queue *q, char *page)
  51. {
  52. return queue_var_show(q->nr_requests, (page));
  53. }
  54. static ssize_t
  55. queue_requests_store(struct request_queue *q, const char *page, size_t count)
  56. {
  57. unsigned long nr;
  58. int ret, err;
  59. if (!queue_is_mq(q))
  60. return -EINVAL;
  61. ret = queue_var_store(&nr, page, count);
  62. if (ret < 0)
  63. return ret;
  64. if (nr < BLKDEV_MIN_RQ)
  65. nr = BLKDEV_MIN_RQ;
  66. err = blk_mq_update_nr_requests(q, nr);
  67. if (err)
  68. return err;
  69. return ret;
  70. }
  71. static ssize_t queue_ra_show(struct request_queue *q, char *page)
  72. {
  73. unsigned long ra_kb = q->backing_dev_info->ra_pages <<
  74. (PAGE_SHIFT - 10);
  75. return queue_var_show(ra_kb, (page));
  76. }
  77. static ssize_t
  78. queue_ra_store(struct request_queue *q, const char *page, size_t count)
  79. {
  80. unsigned long ra_kb;
  81. ssize_t ret = queue_var_store(&ra_kb, page, count);
  82. if (ret < 0)
  83. return ret;
  84. q->backing_dev_info->ra_pages = ra_kb >> (PAGE_SHIFT - 10);
  85. return ret;
  86. }
  87. static ssize_t queue_max_sectors_show(struct request_queue *q, char *page)
  88. {
  89. int max_sectors_kb = queue_max_sectors(q) >> 1;
  90. return queue_var_show(max_sectors_kb, (page));
  91. }
  92. static ssize_t queue_max_segments_show(struct request_queue *q, char *page)
  93. {
  94. return queue_var_show(queue_max_segments(q), (page));
  95. }
  96. static ssize_t queue_max_discard_segments_show(struct request_queue *q,
  97. char *page)
  98. {
  99. return queue_var_show(queue_max_discard_segments(q), (page));
  100. }
  101. static ssize_t queue_max_integrity_segments_show(struct request_queue *q, char *page)
  102. {
  103. return queue_var_show(q->limits.max_integrity_segments, (page));
  104. }
  105. static ssize_t queue_max_segment_size_show(struct request_queue *q, char *page)
  106. {
  107. return queue_var_show(queue_max_segment_size(q), (page));
  108. }
  109. static ssize_t queue_logical_block_size_show(struct request_queue *q, char *page)
  110. {
  111. return queue_var_show(queue_logical_block_size(q), page);
  112. }
  113. static ssize_t queue_physical_block_size_show(struct request_queue *q, char *page)
  114. {
  115. return queue_var_show(queue_physical_block_size(q), page);
  116. }
  117. static ssize_t queue_chunk_sectors_show(struct request_queue *q, char *page)
  118. {
  119. return queue_var_show(q->limits.chunk_sectors, page);
  120. }
  121. static ssize_t queue_io_min_show(struct request_queue *q, char *page)
  122. {
  123. return queue_var_show(queue_io_min(q), page);
  124. }
  125. static ssize_t queue_io_opt_show(struct request_queue *q, char *page)
  126. {
  127. return queue_var_show(queue_io_opt(q), page);
  128. }
  129. static ssize_t queue_discard_granularity_show(struct request_queue *q, char *page)
  130. {
  131. return queue_var_show(q->limits.discard_granularity, page);
  132. }
  133. static ssize_t queue_discard_max_hw_show(struct request_queue *q, char *page)
  134. {
  135. return sprintf(page, "%llu\n",
  136. (unsigned long long)q->limits.max_hw_discard_sectors << 9);
  137. }
  138. static ssize_t queue_discard_max_show(struct request_queue *q, char *page)
  139. {
  140. return sprintf(page, "%llu\n",
  141. (unsigned long long)q->limits.max_discard_sectors << 9);
  142. }
  143. static ssize_t queue_discard_max_store(struct request_queue *q,
  144. const char *page, size_t count)
  145. {
  146. unsigned long max_discard;
  147. ssize_t ret = queue_var_store(&max_discard, page, count);
  148. if (ret < 0)
  149. return ret;
  150. if (max_discard & (q->limits.discard_granularity - 1))
  151. return -EINVAL;
  152. max_discard >>= 9;
  153. if (max_discard > UINT_MAX)
  154. return -EINVAL;
  155. if (max_discard > q->limits.max_hw_discard_sectors)
  156. max_discard = q->limits.max_hw_discard_sectors;
  157. q->limits.max_discard_sectors = max_discard;
  158. return ret;
  159. }
  160. static ssize_t queue_discard_zeroes_data_show(struct request_queue *q, char *page)
  161. {
  162. return queue_var_show(0, page);
  163. }
  164. static ssize_t queue_write_same_max_show(struct request_queue *q, char *page)
  165. {
  166. return sprintf(page, "%llu\n",
  167. (unsigned long long)q->limits.max_write_same_sectors << 9);
  168. }
  169. static ssize_t queue_write_zeroes_max_show(struct request_queue *q, char *page)
  170. {
  171. return sprintf(page, "%llu\n",
  172. (unsigned long long)q->limits.max_write_zeroes_sectors << 9);
  173. }
  174. static ssize_t queue_zone_append_max_show(struct request_queue *q, char *page)
  175. {
  176. unsigned long long max_sectors = q->limits.max_zone_append_sectors;
  177. return sprintf(page, "%llu\n", max_sectors << SECTOR_SHIFT);
  178. }
  179. static ssize_t
  180. queue_max_sectors_store(struct request_queue *q, const char *page, size_t count)
  181. {
  182. unsigned long max_sectors_kb,
  183. max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1,
  184. page_kb = 1 << (PAGE_SHIFT - 10);
  185. ssize_t ret = queue_var_store(&max_sectors_kb, page, count);
  186. if (ret < 0)
  187. return ret;
  188. max_hw_sectors_kb = min_not_zero(max_hw_sectors_kb, (unsigned long)
  189. q->limits.max_dev_sectors >> 1);
  190. if (max_sectors_kb > max_hw_sectors_kb || max_sectors_kb < page_kb)
  191. return -EINVAL;
  192. spin_lock_irq(&q->queue_lock);
  193. q->limits.max_sectors = max_sectors_kb << 1;
  194. q->backing_dev_info->io_pages = max_sectors_kb >> (PAGE_SHIFT - 10);
  195. spin_unlock_irq(&q->queue_lock);
  196. return ret;
  197. }
  198. static ssize_t queue_max_hw_sectors_show(struct request_queue *q, char *page)
  199. {
  200. int max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1;
  201. return queue_var_show(max_hw_sectors_kb, (page));
  202. }
  203. #define QUEUE_SYSFS_BIT_FNS(name, flag, neg) \
  204. static ssize_t \
  205. queue_##name##_show(struct request_queue *q, char *page) \
  206. { \
  207. int bit; \
  208. bit = test_bit(QUEUE_FLAG_##flag, &q->queue_flags); \
  209. return queue_var_show(neg ? !bit : bit, page); \
  210. } \
  211. static ssize_t \
  212. queue_##name##_store(struct request_queue *q, const char *page, size_t count) \
  213. { \
  214. unsigned long val; \
  215. ssize_t ret; \
  216. ret = queue_var_store(&val, page, count); \
  217. if (ret < 0) \
  218. return ret; \
  219. if (neg) \
  220. val = !val; \
  221. \
  222. if (val) \
  223. blk_queue_flag_set(QUEUE_FLAG_##flag, q); \
  224. else \
  225. blk_queue_flag_clear(QUEUE_FLAG_##flag, q); \
  226. return ret; \
  227. }
  228. QUEUE_SYSFS_BIT_FNS(nonrot, NONROT, 1);
  229. QUEUE_SYSFS_BIT_FNS(random, ADD_RANDOM, 0);
  230. QUEUE_SYSFS_BIT_FNS(iostats, IO_STAT, 0);
  231. QUEUE_SYSFS_BIT_FNS(stable_writes, STABLE_WRITES, 0);
  232. #undef QUEUE_SYSFS_BIT_FNS
  233. static ssize_t queue_zoned_show(struct request_queue *q, char *page)
  234. {
  235. switch (blk_queue_zoned_model(q)) {
  236. case BLK_ZONED_HA:
  237. return sprintf(page, "host-aware\n");
  238. case BLK_ZONED_HM:
  239. return sprintf(page, "host-managed\n");
  240. default:
  241. return sprintf(page, "none\n");
  242. }
  243. }
  244. static ssize_t queue_nr_zones_show(struct request_queue *q, char *page)
  245. {
  246. return queue_var_show(blk_queue_nr_zones(q), page);
  247. }
  248. static ssize_t queue_max_open_zones_show(struct request_queue *q, char *page)
  249. {
  250. return queue_var_show(queue_max_open_zones(q), page);
  251. }
  252. static ssize_t queue_max_active_zones_show(struct request_queue *q, char *page)
  253. {
  254. return queue_var_show(queue_max_active_zones(q), page);
  255. }
  256. static ssize_t queue_nomerges_show(struct request_queue *q, char *page)
  257. {
  258. return queue_var_show((blk_queue_nomerges(q) << 1) |
  259. blk_queue_noxmerges(q), page);
  260. }
  261. static ssize_t queue_nomerges_store(struct request_queue *q, const char *page,
  262. size_t count)
  263. {
  264. unsigned long nm;
  265. ssize_t ret = queue_var_store(&nm, page, count);
  266. if (ret < 0)
  267. return ret;
  268. blk_queue_flag_clear(QUEUE_FLAG_NOMERGES, q);
  269. blk_queue_flag_clear(QUEUE_FLAG_NOXMERGES, q);
  270. if (nm == 2)
  271. blk_queue_flag_set(QUEUE_FLAG_NOMERGES, q);
  272. else if (nm)
  273. blk_queue_flag_set(QUEUE_FLAG_NOXMERGES, q);
  274. return ret;
  275. }
  276. static ssize_t queue_rq_affinity_show(struct request_queue *q, char *page)
  277. {
  278. bool set = test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags);
  279. bool force = test_bit(QUEUE_FLAG_SAME_FORCE, &q->queue_flags);
  280. return queue_var_show(set << force, page);
  281. }
  282. static ssize_t
  283. queue_rq_affinity_store(struct request_queue *q, const char *page, size_t count)
  284. {
  285. ssize_t ret = -EINVAL;
  286. #ifdef CONFIG_SMP
  287. unsigned long val;
  288. ret = queue_var_store(&val, page, count);
  289. if (ret < 0)
  290. return ret;
  291. if (val == 2) {
  292. blk_queue_flag_set(QUEUE_FLAG_SAME_COMP, q);
  293. blk_queue_flag_set(QUEUE_FLAG_SAME_FORCE, q);
  294. } else if (val == 1) {
  295. blk_queue_flag_set(QUEUE_FLAG_SAME_COMP, q);
  296. blk_queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q);
  297. } else if (val == 0) {
  298. blk_queue_flag_clear(QUEUE_FLAG_SAME_COMP, q);
  299. blk_queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q);
  300. }
  301. #endif
  302. return ret;
  303. }
  304. static ssize_t queue_poll_delay_show(struct request_queue *q, char *page)
  305. {
  306. int val;
  307. if (q->poll_nsec == BLK_MQ_POLL_CLASSIC)
  308. val = BLK_MQ_POLL_CLASSIC;
  309. else
  310. val = q->poll_nsec / 1000;
  311. return sprintf(page, "%d\n", val);
  312. }
  313. static ssize_t queue_poll_delay_store(struct request_queue *q, const char *page,
  314. size_t count)
  315. {
  316. int err, val;
  317. if (!q->mq_ops || !q->mq_ops->poll)
  318. return -EINVAL;
  319. err = kstrtoint(page, 10, &val);
  320. if (err < 0)
  321. return err;
  322. if (val == BLK_MQ_POLL_CLASSIC)
  323. q->poll_nsec = BLK_MQ_POLL_CLASSIC;
  324. else if (val >= 0)
  325. q->poll_nsec = val * 1000;
  326. else
  327. return -EINVAL;
  328. return count;
  329. }
  330. static ssize_t queue_poll_show(struct request_queue *q, char *page)
  331. {
  332. return queue_var_show(test_bit(QUEUE_FLAG_POLL, &q->queue_flags), page);
  333. }
  334. static ssize_t queue_poll_store(struct request_queue *q, const char *page,
  335. size_t count)
  336. {
  337. unsigned long poll_on;
  338. ssize_t ret;
  339. if (!q->tag_set || q->tag_set->nr_maps <= HCTX_TYPE_POLL ||
  340. !q->tag_set->map[HCTX_TYPE_POLL].nr_queues)
  341. return -EINVAL;
  342. ret = queue_var_store(&poll_on, page, count);
  343. if (ret < 0)
  344. return ret;
  345. if (poll_on)
  346. blk_queue_flag_set(QUEUE_FLAG_POLL, q);
  347. else
  348. blk_queue_flag_clear(QUEUE_FLAG_POLL, q);
  349. return ret;
  350. }
  351. static ssize_t queue_io_timeout_show(struct request_queue *q, char *page)
  352. {
  353. return sprintf(page, "%u\n", jiffies_to_msecs(q->rq_timeout));
  354. }
  355. static ssize_t queue_io_timeout_store(struct request_queue *q, const char *page,
  356. size_t count)
  357. {
  358. unsigned int val;
  359. int err;
  360. err = kstrtou32(page, 10, &val);
  361. if (err || val == 0)
  362. return -EINVAL;
  363. blk_queue_rq_timeout(q, msecs_to_jiffies(val));
  364. return count;
  365. }
  366. static ssize_t queue_wb_lat_show(struct request_queue *q, char *page)
  367. {
  368. if (!wbt_rq_qos(q))
  369. return -EINVAL;
  370. return sprintf(page, "%llu\n", div_u64(wbt_get_min_lat(q), 1000));
  371. }
  372. static ssize_t queue_wb_lat_store(struct request_queue *q, const char *page,
  373. size_t count)
  374. {
  375. struct rq_qos *rqos;
  376. ssize_t ret;
  377. s64 val;
  378. ret = queue_var_store64(&val, page);
  379. if (ret < 0)
  380. return ret;
  381. if (val < -1)
  382. return -EINVAL;
  383. rqos = wbt_rq_qos(q);
  384. if (!rqos) {
  385. ret = wbt_init(q);
  386. if (ret)
  387. return ret;
  388. }
  389. if (val == -1)
  390. val = wbt_default_latency_nsec(q);
  391. else if (val >= 0)
  392. val *= 1000ULL;
  393. if (wbt_get_min_lat(q) == val)
  394. return count;
  395. /*
  396. * Ensure that the queue is idled, in case the latency update
  397. * ends up either enabling or disabling wbt completely. We can't
  398. * have IO inflight if that happens.
  399. */
  400. blk_mq_freeze_queue(q);
  401. blk_mq_quiesce_queue(q);
  402. wbt_set_min_lat(q, val);
  403. blk_mq_unquiesce_queue(q);
  404. blk_mq_unfreeze_queue(q);
  405. return count;
  406. }
  407. static ssize_t queue_wc_show(struct request_queue *q, char *page)
  408. {
  409. if (test_bit(QUEUE_FLAG_WC, &q->queue_flags))
  410. return sprintf(page, "write back\n");
  411. return sprintf(page, "write through\n");
  412. }
  413. static ssize_t queue_wc_store(struct request_queue *q, const char *page,
  414. size_t count)
  415. {
  416. int set = -1;
  417. if (!strncmp(page, "write back", 10))
  418. set = 1;
  419. else if (!strncmp(page, "write through", 13) ||
  420. !strncmp(page, "none", 4))
  421. set = 0;
  422. if (set == -1)
  423. return -EINVAL;
  424. if (set)
  425. blk_queue_flag_set(QUEUE_FLAG_WC, q);
  426. else
  427. blk_queue_flag_clear(QUEUE_FLAG_WC, q);
  428. return count;
  429. }
  430. static ssize_t queue_fua_show(struct request_queue *q, char *page)
  431. {
  432. return sprintf(page, "%u\n", test_bit(QUEUE_FLAG_FUA, &q->queue_flags));
  433. }
  434. static ssize_t queue_dax_show(struct request_queue *q, char *page)
  435. {
  436. return queue_var_show(blk_queue_dax(q), page);
  437. }
  438. #define QUEUE_RO_ENTRY(_prefix, _name) \
  439. static struct queue_sysfs_entry _prefix##_entry = { \
  440. .attr = { .name = _name, .mode = 0444 }, \
  441. .show = _prefix##_show, \
  442. };
  443. #define QUEUE_RW_ENTRY(_prefix, _name) \
  444. static struct queue_sysfs_entry _prefix##_entry = { \
  445. .attr = { .name = _name, .mode = 0644 }, \
  446. .show = _prefix##_show, \
  447. .store = _prefix##_store, \
  448. };
  449. QUEUE_RW_ENTRY(queue_requests, "nr_requests");
  450. QUEUE_RW_ENTRY(queue_ra, "read_ahead_kb");
  451. QUEUE_RW_ENTRY(queue_max_sectors, "max_sectors_kb");
  452. QUEUE_RO_ENTRY(queue_max_hw_sectors, "max_hw_sectors_kb");
  453. QUEUE_RO_ENTRY(queue_max_segments, "max_segments");
  454. QUEUE_RO_ENTRY(queue_max_integrity_segments, "max_integrity_segments");
  455. QUEUE_RO_ENTRY(queue_max_segment_size, "max_segment_size");
  456. QUEUE_RW_ENTRY(elv_iosched, "scheduler");
  457. QUEUE_RO_ENTRY(queue_logical_block_size, "logical_block_size");
  458. QUEUE_RO_ENTRY(queue_physical_block_size, "physical_block_size");
  459. QUEUE_RO_ENTRY(queue_chunk_sectors, "chunk_sectors");
  460. QUEUE_RO_ENTRY(queue_io_min, "minimum_io_size");
  461. QUEUE_RO_ENTRY(queue_io_opt, "optimal_io_size");
  462. QUEUE_RO_ENTRY(queue_max_discard_segments, "max_discard_segments");
  463. QUEUE_RO_ENTRY(queue_discard_granularity, "discard_granularity");
  464. QUEUE_RO_ENTRY(queue_discard_max_hw, "discard_max_hw_bytes");
  465. QUEUE_RW_ENTRY(queue_discard_max, "discard_max_bytes");
  466. QUEUE_RO_ENTRY(queue_discard_zeroes_data, "discard_zeroes_data");
  467. QUEUE_RO_ENTRY(queue_write_same_max, "write_same_max_bytes");
  468. QUEUE_RO_ENTRY(queue_write_zeroes_max, "write_zeroes_max_bytes");
  469. QUEUE_RO_ENTRY(queue_zone_append_max, "zone_append_max_bytes");
  470. QUEUE_RO_ENTRY(queue_zoned, "zoned");
  471. QUEUE_RO_ENTRY(queue_nr_zones, "nr_zones");
  472. QUEUE_RO_ENTRY(queue_max_open_zones, "max_open_zones");
  473. QUEUE_RO_ENTRY(queue_max_active_zones, "max_active_zones");
  474. QUEUE_RW_ENTRY(queue_nomerges, "nomerges");
  475. QUEUE_RW_ENTRY(queue_rq_affinity, "rq_affinity");
  476. QUEUE_RW_ENTRY(queue_poll, "io_poll");
  477. QUEUE_RW_ENTRY(queue_poll_delay, "io_poll_delay");
  478. QUEUE_RW_ENTRY(queue_wc, "write_cache");
  479. QUEUE_RO_ENTRY(queue_fua, "fua");
  480. QUEUE_RO_ENTRY(queue_dax, "dax");
  481. QUEUE_RW_ENTRY(queue_io_timeout, "io_timeout");
  482. QUEUE_RW_ENTRY(queue_wb_lat, "wbt_lat_usec");
  483. #ifdef CONFIG_BLK_DEV_THROTTLING_LOW
  484. QUEUE_RW_ENTRY(blk_throtl_sample_time, "throttle_sample_time");
  485. #endif
  486. /* legacy alias for logical_block_size: */
  487. static struct queue_sysfs_entry queue_hw_sector_size_entry = {
  488. .attr = {.name = "hw_sector_size", .mode = 0444 },
  489. .show = queue_logical_block_size_show,
  490. };
  491. QUEUE_RW_ENTRY(queue_nonrot, "rotational");
  492. QUEUE_RW_ENTRY(queue_iostats, "iostats");
  493. QUEUE_RW_ENTRY(queue_random, "add_random");
  494. QUEUE_RW_ENTRY(queue_stable_writes, "stable_writes");
  495. static struct attribute *queue_attrs[] = {
  496. &queue_requests_entry.attr,
  497. &queue_ra_entry.attr,
  498. &queue_max_hw_sectors_entry.attr,
  499. &queue_max_sectors_entry.attr,
  500. &queue_max_segments_entry.attr,
  501. &queue_max_discard_segments_entry.attr,
  502. &queue_max_integrity_segments_entry.attr,
  503. &queue_max_segment_size_entry.attr,
  504. &elv_iosched_entry.attr,
  505. &queue_hw_sector_size_entry.attr,
  506. &queue_logical_block_size_entry.attr,
  507. &queue_physical_block_size_entry.attr,
  508. &queue_chunk_sectors_entry.attr,
  509. &queue_io_min_entry.attr,
  510. &queue_io_opt_entry.attr,
  511. &queue_discard_granularity_entry.attr,
  512. &queue_discard_max_entry.attr,
  513. &queue_discard_max_hw_entry.attr,
  514. &queue_discard_zeroes_data_entry.attr,
  515. &queue_write_same_max_entry.attr,
  516. &queue_write_zeroes_max_entry.attr,
  517. &queue_zone_append_max_entry.attr,
  518. &queue_nonrot_entry.attr,
  519. &queue_zoned_entry.attr,
  520. &queue_nr_zones_entry.attr,
  521. &queue_max_open_zones_entry.attr,
  522. &queue_max_active_zones_entry.attr,
  523. &queue_nomerges_entry.attr,
  524. &queue_rq_affinity_entry.attr,
  525. &queue_iostats_entry.attr,
  526. &queue_stable_writes_entry.attr,
  527. &queue_random_entry.attr,
  528. &queue_poll_entry.attr,
  529. &queue_wc_entry.attr,
  530. &queue_fua_entry.attr,
  531. &queue_dax_entry.attr,
  532. &queue_wb_lat_entry.attr,
  533. &queue_poll_delay_entry.attr,
  534. &queue_io_timeout_entry.attr,
  535. #ifdef CONFIG_BLK_DEV_THROTTLING_LOW
  536. &blk_throtl_sample_time_entry.attr,
  537. #endif
  538. NULL,
  539. };
  540. static umode_t queue_attr_visible(struct kobject *kobj, struct attribute *attr,
  541. int n)
  542. {
  543. struct request_queue *q =
  544. container_of(kobj, struct request_queue, kobj);
  545. if (attr == &queue_io_timeout_entry.attr &&
  546. (!q->mq_ops || !q->mq_ops->timeout))
  547. return 0;
  548. if ((attr == &queue_max_open_zones_entry.attr ||
  549. attr == &queue_max_active_zones_entry.attr) &&
  550. !blk_queue_is_zoned(q))
  551. return 0;
  552. return attr->mode;
  553. }
  554. static struct attribute_group queue_attr_group = {
  555. .attrs = queue_attrs,
  556. .is_visible = queue_attr_visible,
  557. };
  558. #define to_queue(atr) container_of((atr), struct queue_sysfs_entry, attr)
  559. static ssize_t
  560. queue_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
  561. {
  562. struct queue_sysfs_entry *entry = to_queue(attr);
  563. struct request_queue *q =
  564. container_of(kobj, struct request_queue, kobj);
  565. ssize_t res;
  566. if (!entry->show)
  567. return -EIO;
  568. mutex_lock(&q->sysfs_lock);
  569. res = entry->show(q, page);
  570. mutex_unlock(&q->sysfs_lock);
  571. return res;
  572. }
  573. static ssize_t
  574. queue_attr_store(struct kobject *kobj, struct attribute *attr,
  575. const char *page, size_t length)
  576. {
  577. struct queue_sysfs_entry *entry = to_queue(attr);
  578. struct request_queue *q;
  579. ssize_t res;
  580. if (!entry->store)
  581. return -EIO;
  582. q = container_of(kobj, struct request_queue, kobj);
  583. mutex_lock(&q->sysfs_lock);
  584. res = entry->store(q, page, length);
  585. mutex_unlock(&q->sysfs_lock);
  586. return res;
  587. }
  588. static void blk_free_queue_rcu(struct rcu_head *rcu_head)
  589. {
  590. struct request_queue *q = container_of(rcu_head, struct request_queue,
  591. rcu_head);
  592. kmem_cache_free(blk_requestq_cachep, q);
  593. }
  594. /* Unconfigure the I/O scheduler and dissociate from the cgroup controller. */
  595. static void blk_exit_queue(struct request_queue *q)
  596. {
  597. /*
  598. * Since the I/O scheduler exit code may access cgroup information,
  599. * perform I/O scheduler exit before disassociating from the block
  600. * cgroup controller.
  601. */
  602. if (q->elevator) {
  603. ioc_clear_queue(q);
  604. __elevator_exit(q, q->elevator);
  605. }
  606. /*
  607. * Remove all references to @q from the block cgroup controller before
  608. * restoring @q->queue_lock to avoid that restoring this pointer causes
  609. * e.g. blkcg_print_blkgs() to crash.
  610. */
  611. blkcg_exit_queue(q);
  612. /*
  613. * Since the cgroup code may dereference the @q->backing_dev_info
  614. * pointer, only decrease its reference count after having removed the
  615. * association with the block cgroup controller.
  616. */
  617. bdi_put(q->backing_dev_info);
  618. }
  619. /**
  620. * blk_release_queue - releases all allocated resources of the request_queue
  621. * @kobj: pointer to a kobject, whose container is a request_queue
  622. *
  623. * This function releases all allocated resources of the request queue.
  624. *
  625. * The struct request_queue refcount is incremented with blk_get_queue() and
  626. * decremented with blk_put_queue(). Once the refcount reaches 0 this function
  627. * is called.
  628. *
  629. * For drivers that have a request_queue on a gendisk and added with
  630. * __device_add_disk() the refcount to request_queue will reach 0 with
  631. * the last put_disk() called by the driver. For drivers which don't use
  632. * __device_add_disk() this happens with blk_cleanup_queue().
  633. *
  634. * Drivers exist which depend on the release of the request_queue to be
  635. * synchronous, it should not be deferred.
  636. *
  637. * Context: can sleep
  638. */
  639. static void blk_release_queue(struct kobject *kobj)
  640. {
  641. struct request_queue *q =
  642. container_of(kobj, struct request_queue, kobj);
  643. might_sleep();
  644. if (test_bit(QUEUE_FLAG_POLL_STATS, &q->queue_flags))
  645. blk_stat_remove_callback(q, q->poll_cb);
  646. blk_stat_free_callback(q->poll_cb);
  647. blk_free_queue_stats(q->stats);
  648. if (queue_is_mq(q)) {
  649. struct blk_mq_hw_ctx *hctx;
  650. int i;
  651. cancel_delayed_work_sync(&q->requeue_work);
  652. queue_for_each_hw_ctx(q, hctx, i)
  653. cancel_delayed_work_sync(&hctx->run_work);
  654. }
  655. blk_exit_queue(q);
  656. blk_queue_free_zone_bitmaps(q);
  657. if (queue_is_mq(q))
  658. blk_mq_release(q);
  659. blk_trace_shutdown(q);
  660. mutex_lock(&q->debugfs_mutex);
  661. debugfs_remove_recursive(q->debugfs_dir);
  662. mutex_unlock(&q->debugfs_mutex);
  663. if (queue_is_mq(q))
  664. blk_mq_debugfs_unregister(q);
  665. bioset_exit(&q->bio_split);
  666. ida_simple_remove(&blk_queue_ida, q->id);
  667. call_rcu(&q->rcu_head, blk_free_queue_rcu);
  668. }
  669. static const struct sysfs_ops queue_sysfs_ops = {
  670. .show = queue_attr_show,
  671. .store = queue_attr_store,
  672. };
  673. struct kobj_type blk_queue_ktype = {
  674. .sysfs_ops = &queue_sysfs_ops,
  675. .release = blk_release_queue,
  676. };
  677. /**
  678. * blk_register_queue - register a block layer queue with sysfs
  679. * @disk: Disk of which the request queue should be registered with sysfs.
  680. */
  681. int blk_register_queue(struct gendisk *disk)
  682. {
  683. int ret;
  684. struct device *dev = disk_to_dev(disk);
  685. struct request_queue *q = disk->queue;
  686. if (WARN_ON(!q))
  687. return -ENXIO;
  688. WARN_ONCE(blk_queue_registered(q),
  689. "%s is registering an already registered queue\n",
  690. kobject_name(&dev->kobj));
  691. /*
  692. * SCSI probing may synchronously create and destroy a lot of
  693. * request_queues for non-existent devices. Shutting down a fully
  694. * functional queue takes measureable wallclock time as RCU grace
  695. * periods are involved. To avoid excessive latency in these
  696. * cases, a request_queue starts out in a degraded mode which is
  697. * faster to shut down and is made fully functional here as
  698. * request_queues for non-existent devices never get registered.
  699. */
  700. if (!blk_queue_init_done(q)) {
  701. blk_queue_flag_set(QUEUE_FLAG_INIT_DONE, q);
  702. percpu_ref_switch_to_percpu(&q->q_usage_counter);
  703. }
  704. blk_queue_update_readahead(q);
  705. ret = blk_trace_init_sysfs(dev);
  706. if (ret)
  707. return ret;
  708. mutex_lock(&q->sysfs_dir_lock);
  709. ret = kobject_add(&q->kobj, kobject_get(&dev->kobj), "%s", "queue");
  710. if (ret < 0) {
  711. blk_trace_remove_sysfs(dev);
  712. goto unlock;
  713. }
  714. ret = sysfs_create_group(&q->kobj, &queue_attr_group);
  715. if (ret) {
  716. blk_trace_remove_sysfs(dev);
  717. kobject_del(&q->kobj);
  718. kobject_put(&dev->kobj);
  719. goto unlock;
  720. }
  721. mutex_lock(&q->debugfs_mutex);
  722. q->debugfs_dir = debugfs_create_dir(kobject_name(q->kobj.parent),
  723. blk_debugfs_root);
  724. mutex_unlock(&q->debugfs_mutex);
  725. if (queue_is_mq(q)) {
  726. __blk_mq_register_dev(dev, q);
  727. blk_mq_debugfs_register(q);
  728. }
  729. mutex_lock(&q->sysfs_lock);
  730. if (q->elevator) {
  731. ret = elv_register_queue(q, false);
  732. if (ret) {
  733. mutex_unlock(&q->sysfs_lock);
  734. mutex_unlock(&q->sysfs_dir_lock);
  735. kobject_del(&q->kobj);
  736. blk_trace_remove_sysfs(dev);
  737. kobject_put(&dev->kobj);
  738. return ret;
  739. }
  740. }
  741. blk_queue_flag_set(QUEUE_FLAG_REGISTERED, q);
  742. wbt_enable_default(q);
  743. blk_throtl_register_queue(q);
  744. /* Now everything is ready and send out KOBJ_ADD uevent */
  745. kobject_uevent(&q->kobj, KOBJ_ADD);
  746. if (q->elevator)
  747. kobject_uevent(&q->elevator->kobj, KOBJ_ADD);
  748. mutex_unlock(&q->sysfs_lock);
  749. ret = 0;
  750. unlock:
  751. mutex_unlock(&q->sysfs_dir_lock);
  752. return ret;
  753. }
  754. EXPORT_SYMBOL_GPL(blk_register_queue);
  755. /**
  756. * blk_unregister_queue - counterpart of blk_register_queue()
  757. * @disk: Disk of which the request queue should be unregistered from sysfs.
  758. *
  759. * Note: the caller is responsible for guaranteeing that this function is called
  760. * after blk_register_queue() has finished.
  761. */
  762. void blk_unregister_queue(struct gendisk *disk)
  763. {
  764. struct request_queue *q = disk->queue;
  765. if (WARN_ON(!q))
  766. return;
  767. /* Return early if disk->queue was never registered. */
  768. if (!blk_queue_registered(q))
  769. return;
  770. /*
  771. * Since sysfs_remove_dir() prevents adding new directory entries
  772. * before removal of existing entries starts, protect against
  773. * concurrent elv_iosched_store() calls.
  774. */
  775. mutex_lock(&q->sysfs_lock);
  776. blk_queue_flag_clear(QUEUE_FLAG_REGISTERED, q);
  777. mutex_unlock(&q->sysfs_lock);
  778. mutex_lock(&q->sysfs_dir_lock);
  779. /*
  780. * Remove the sysfs attributes before unregistering the queue data
  781. * structures that can be modified through sysfs.
  782. */
  783. if (queue_is_mq(q))
  784. blk_mq_unregister_dev(disk_to_dev(disk), q);
  785. blk_trace_remove_sysfs(disk_to_dev(disk));
  786. mutex_lock(&q->sysfs_lock);
  787. if (q->elevator)
  788. elv_unregister_queue(q);
  789. mutex_unlock(&q->sysfs_lock);
  790. /* Now that we've deleted all child objects, we can delete the queue. */
  791. kobject_uevent(&q->kobj, KOBJ_REMOVE);
  792. kobject_del(&q->kobj);
  793. mutex_unlock(&q->sysfs_dir_lock);
  794. kobject_put(&disk_to_dev(disk)->kobj);
  795. }