blk-mq-debugfs.c 25 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (C) 2017 Facebook
  4. */
  5. #include <linux/kernel.h>
  6. #include <linux/blkdev.h>
  7. #include <linux/debugfs.h>
  8. #include <linux/blk-mq.h>
  9. #include "blk.h"
  10. #include "blk-mq.h"
  11. #include "blk-mq-debugfs.h"
  12. #include "blk-mq-tag.h"
  13. #include "blk-rq-qos.h"
  14. static void print_stat(struct seq_file *m, struct blk_rq_stat *stat)
  15. {
  16. if (stat->nr_samples) {
  17. seq_printf(m, "samples=%d, mean=%llu, min=%llu, max=%llu",
  18. stat->nr_samples, stat->mean, stat->min, stat->max);
  19. } else {
  20. seq_puts(m, "samples=0");
  21. }
  22. }
  23. static int queue_poll_stat_show(void *data, struct seq_file *m)
  24. {
  25. struct request_queue *q = data;
  26. int bucket;
  27. for (bucket = 0; bucket < (BLK_MQ_POLL_STATS_BKTS / 2); bucket++) {
  28. seq_printf(m, "read (%d Bytes): ", 1 << (9 + bucket));
  29. print_stat(m, &q->poll_stat[2 * bucket]);
  30. seq_puts(m, "\n");
  31. seq_printf(m, "write (%d Bytes): ", 1 << (9 + bucket));
  32. print_stat(m, &q->poll_stat[2 * bucket + 1]);
  33. seq_puts(m, "\n");
  34. }
  35. return 0;
  36. }
  37. static void *queue_requeue_list_start(struct seq_file *m, loff_t *pos)
  38. __acquires(&q->requeue_lock)
  39. {
  40. struct request_queue *q = m->private;
  41. spin_lock_irq(&q->requeue_lock);
  42. return seq_list_start(&q->requeue_list, *pos);
  43. }
  44. static void *queue_requeue_list_next(struct seq_file *m, void *v, loff_t *pos)
  45. {
  46. struct request_queue *q = m->private;
  47. return seq_list_next(v, &q->requeue_list, pos);
  48. }
  49. static void queue_requeue_list_stop(struct seq_file *m, void *v)
  50. __releases(&q->requeue_lock)
  51. {
  52. struct request_queue *q = m->private;
  53. spin_unlock_irq(&q->requeue_lock);
  54. }
  55. static const struct seq_operations queue_requeue_list_seq_ops = {
  56. .start = queue_requeue_list_start,
  57. .next = queue_requeue_list_next,
  58. .stop = queue_requeue_list_stop,
  59. .show = blk_mq_debugfs_rq_show,
  60. };
  61. static int blk_flags_show(struct seq_file *m, const unsigned long flags,
  62. const char *const *flag_name, int flag_name_count)
  63. {
  64. bool sep = false;
  65. int i;
  66. for (i = 0; i < sizeof(flags) * BITS_PER_BYTE; i++) {
  67. if (!(flags & BIT(i)))
  68. continue;
  69. if (sep)
  70. seq_puts(m, "|");
  71. sep = true;
  72. if (i < flag_name_count && flag_name[i])
  73. seq_puts(m, flag_name[i]);
  74. else
  75. seq_printf(m, "%d", i);
  76. }
  77. return 0;
  78. }
  79. static int queue_pm_only_show(void *data, struct seq_file *m)
  80. {
  81. struct request_queue *q = data;
  82. seq_printf(m, "%d\n", atomic_read(&q->pm_only));
  83. return 0;
  84. }
  85. #define QUEUE_FLAG_NAME(name) [QUEUE_FLAG_##name] = #name
  86. static const char *const blk_queue_flag_name[] = {
  87. QUEUE_FLAG_NAME(STOPPED),
  88. QUEUE_FLAG_NAME(DYING),
  89. QUEUE_FLAG_NAME(NOMERGES),
  90. QUEUE_FLAG_NAME(SAME_COMP),
  91. QUEUE_FLAG_NAME(FAIL_IO),
  92. QUEUE_FLAG_NAME(NONROT),
  93. QUEUE_FLAG_NAME(IO_STAT),
  94. QUEUE_FLAG_NAME(DISCARD),
  95. QUEUE_FLAG_NAME(NOXMERGES),
  96. QUEUE_FLAG_NAME(ADD_RANDOM),
  97. QUEUE_FLAG_NAME(SECERASE),
  98. QUEUE_FLAG_NAME(SAME_FORCE),
  99. QUEUE_FLAG_NAME(DEAD),
  100. QUEUE_FLAG_NAME(INIT_DONE),
  101. QUEUE_FLAG_NAME(STABLE_WRITES),
  102. QUEUE_FLAG_NAME(POLL),
  103. QUEUE_FLAG_NAME(WC),
  104. QUEUE_FLAG_NAME(FUA),
  105. QUEUE_FLAG_NAME(DAX),
  106. QUEUE_FLAG_NAME(STATS),
  107. QUEUE_FLAG_NAME(POLL_STATS),
  108. QUEUE_FLAG_NAME(REGISTERED),
  109. QUEUE_FLAG_NAME(SCSI_PASSTHROUGH),
  110. QUEUE_FLAG_NAME(QUIESCED),
  111. QUEUE_FLAG_NAME(PCI_P2PDMA),
  112. QUEUE_FLAG_NAME(ZONE_RESETALL),
  113. QUEUE_FLAG_NAME(RQ_ALLOC_TIME),
  114. QUEUE_FLAG_NAME(HCTX_ACTIVE),
  115. QUEUE_FLAG_NAME(NOWAIT),
  116. };
  117. #undef QUEUE_FLAG_NAME
  118. static int queue_state_show(void *data, struct seq_file *m)
  119. {
  120. struct request_queue *q = data;
  121. blk_flags_show(m, q->queue_flags, blk_queue_flag_name,
  122. ARRAY_SIZE(blk_queue_flag_name));
  123. seq_puts(m, "\n");
  124. return 0;
  125. }
  126. static ssize_t queue_state_write(void *data, const char __user *buf,
  127. size_t count, loff_t *ppos)
  128. {
  129. struct request_queue *q = data;
  130. char opbuf[16] = { }, *op;
  131. /*
  132. * The "state" attribute is removed after blk_cleanup_queue() has called
  133. * blk_mq_free_queue(). Return if QUEUE_FLAG_DEAD has been set to avoid
  134. * triggering a use-after-free.
  135. */
  136. if (blk_queue_dead(q))
  137. return -ENOENT;
  138. if (count >= sizeof(opbuf)) {
  139. pr_err("%s: operation too long\n", __func__);
  140. goto inval;
  141. }
  142. if (copy_from_user(opbuf, buf, count))
  143. return -EFAULT;
  144. op = strstrip(opbuf);
  145. if (strcmp(op, "run") == 0) {
  146. blk_mq_run_hw_queues(q, true);
  147. } else if (strcmp(op, "start") == 0) {
  148. blk_mq_start_stopped_hw_queues(q, true);
  149. } else if (strcmp(op, "kick") == 0) {
  150. blk_mq_kick_requeue_list(q);
  151. } else {
  152. pr_err("%s: unsupported operation '%s'\n", __func__, op);
  153. inval:
  154. pr_err("%s: use 'run', 'start' or 'kick'\n", __func__);
  155. return -EINVAL;
  156. }
  157. return count;
  158. }
  159. static int queue_write_hint_show(void *data, struct seq_file *m)
  160. {
  161. struct request_queue *q = data;
  162. int i;
  163. for (i = 0; i < BLK_MAX_WRITE_HINTS; i++)
  164. seq_printf(m, "hint%d: %llu\n", i, q->write_hints[i]);
  165. return 0;
  166. }
  167. static ssize_t queue_write_hint_store(void *data, const char __user *buf,
  168. size_t count, loff_t *ppos)
  169. {
  170. struct request_queue *q = data;
  171. int i;
  172. for (i = 0; i < BLK_MAX_WRITE_HINTS; i++)
  173. q->write_hints[i] = 0;
  174. return count;
  175. }
  176. static const struct blk_mq_debugfs_attr blk_mq_debugfs_queue_attrs[] = {
  177. { "poll_stat", 0400, queue_poll_stat_show },
  178. { "requeue_list", 0400, .seq_ops = &queue_requeue_list_seq_ops },
  179. { "pm_only", 0600, queue_pm_only_show, NULL },
  180. { "state", 0600, queue_state_show, queue_state_write },
  181. { "write_hints", 0600, queue_write_hint_show, queue_write_hint_store },
  182. { "zone_wlock", 0400, queue_zone_wlock_show, NULL },
  183. { },
  184. };
  185. #define HCTX_STATE_NAME(name) [BLK_MQ_S_##name] = #name
  186. static const char *const hctx_state_name[] = {
  187. HCTX_STATE_NAME(STOPPED),
  188. HCTX_STATE_NAME(TAG_ACTIVE),
  189. HCTX_STATE_NAME(SCHED_RESTART),
  190. HCTX_STATE_NAME(INACTIVE),
  191. };
  192. #undef HCTX_STATE_NAME
  193. static int hctx_state_show(void *data, struct seq_file *m)
  194. {
  195. struct blk_mq_hw_ctx *hctx = data;
  196. blk_flags_show(m, hctx->state, hctx_state_name,
  197. ARRAY_SIZE(hctx_state_name));
  198. seq_puts(m, "\n");
  199. return 0;
  200. }
  201. #define BLK_TAG_ALLOC_NAME(name) [BLK_TAG_ALLOC_##name] = #name
  202. static const char *const alloc_policy_name[] = {
  203. BLK_TAG_ALLOC_NAME(FIFO),
  204. BLK_TAG_ALLOC_NAME(RR),
  205. };
  206. #undef BLK_TAG_ALLOC_NAME
  207. #define HCTX_FLAG_NAME(name) [ilog2(BLK_MQ_F_##name)] = #name
  208. static const char *const hctx_flag_name[] = {
  209. HCTX_FLAG_NAME(SHOULD_MERGE),
  210. HCTX_FLAG_NAME(TAG_QUEUE_SHARED),
  211. HCTX_FLAG_NAME(BLOCKING),
  212. HCTX_FLAG_NAME(NO_SCHED),
  213. HCTX_FLAG_NAME(STACKING),
  214. HCTX_FLAG_NAME(TAG_HCTX_SHARED),
  215. };
  216. #undef HCTX_FLAG_NAME
  217. static int hctx_flags_show(void *data, struct seq_file *m)
  218. {
  219. struct blk_mq_hw_ctx *hctx = data;
  220. const int alloc_policy = BLK_MQ_FLAG_TO_ALLOC_POLICY(hctx->flags);
  221. seq_puts(m, "alloc_policy=");
  222. if (alloc_policy < ARRAY_SIZE(alloc_policy_name) &&
  223. alloc_policy_name[alloc_policy])
  224. seq_puts(m, alloc_policy_name[alloc_policy]);
  225. else
  226. seq_printf(m, "%d", alloc_policy);
  227. seq_puts(m, " ");
  228. blk_flags_show(m,
  229. hctx->flags ^ BLK_ALLOC_POLICY_TO_MQ_FLAG(alloc_policy),
  230. hctx_flag_name, ARRAY_SIZE(hctx_flag_name));
  231. seq_puts(m, "\n");
  232. return 0;
  233. }
  234. #define CMD_FLAG_NAME(name) [__REQ_##name] = #name
  235. static const char *const cmd_flag_name[] = {
  236. CMD_FLAG_NAME(FAILFAST_DEV),
  237. CMD_FLAG_NAME(FAILFAST_TRANSPORT),
  238. CMD_FLAG_NAME(FAILFAST_DRIVER),
  239. CMD_FLAG_NAME(SYNC),
  240. CMD_FLAG_NAME(META),
  241. CMD_FLAG_NAME(PRIO),
  242. CMD_FLAG_NAME(NOMERGE),
  243. CMD_FLAG_NAME(IDLE),
  244. CMD_FLAG_NAME(INTEGRITY),
  245. CMD_FLAG_NAME(FUA),
  246. CMD_FLAG_NAME(PREFLUSH),
  247. CMD_FLAG_NAME(RAHEAD),
  248. CMD_FLAG_NAME(BACKGROUND),
  249. CMD_FLAG_NAME(NOWAIT),
  250. CMD_FLAG_NAME(NOUNMAP),
  251. CMD_FLAG_NAME(HIPRI),
  252. };
  253. #undef CMD_FLAG_NAME
  254. #define RQF_NAME(name) [ilog2((__force u32)RQF_##name)] = #name
  255. static const char *const rqf_name[] = {
  256. RQF_NAME(SORTED),
  257. RQF_NAME(STARTED),
  258. RQF_NAME(SOFTBARRIER),
  259. RQF_NAME(FLUSH_SEQ),
  260. RQF_NAME(MIXED_MERGE),
  261. RQF_NAME(MQ_INFLIGHT),
  262. RQF_NAME(DONTPREP),
  263. RQF_NAME(FAILED),
  264. RQF_NAME(QUIET),
  265. RQF_NAME(ELVPRIV),
  266. RQF_NAME(IO_STAT),
  267. RQF_NAME(ALLOCED),
  268. RQF_NAME(PM),
  269. RQF_NAME(HASHED),
  270. RQF_NAME(STATS),
  271. RQF_NAME(SPECIAL_PAYLOAD),
  272. RQF_NAME(ZONE_WRITE_LOCKED),
  273. RQF_NAME(MQ_POLL_SLEPT),
  274. };
  275. #undef RQF_NAME
  276. static const char *const blk_mq_rq_state_name_array[] = {
  277. [MQ_RQ_IDLE] = "idle",
  278. [MQ_RQ_IN_FLIGHT] = "in_flight",
  279. [MQ_RQ_COMPLETE] = "complete",
  280. };
  281. static const char *blk_mq_rq_state_name(enum mq_rq_state rq_state)
  282. {
  283. if (WARN_ON_ONCE((unsigned int)rq_state >=
  284. ARRAY_SIZE(blk_mq_rq_state_name_array)))
  285. return "(?)";
  286. return blk_mq_rq_state_name_array[rq_state];
  287. }
  288. int __blk_mq_debugfs_rq_show(struct seq_file *m, struct request *rq)
  289. {
  290. const struct blk_mq_ops *const mq_ops = rq->q->mq_ops;
  291. const unsigned int op = req_op(rq);
  292. const char *op_str = blk_op_str(op);
  293. seq_printf(m, "%p {.op=", rq);
  294. if (strcmp(op_str, "UNKNOWN") == 0)
  295. seq_printf(m, "%u", op);
  296. else
  297. seq_printf(m, "%s", op_str);
  298. seq_puts(m, ", .cmd_flags=");
  299. blk_flags_show(m, rq->cmd_flags & ~REQ_OP_MASK, cmd_flag_name,
  300. ARRAY_SIZE(cmd_flag_name));
  301. seq_puts(m, ", .rq_flags=");
  302. blk_flags_show(m, (__force unsigned int)rq->rq_flags, rqf_name,
  303. ARRAY_SIZE(rqf_name));
  304. seq_printf(m, ", .state=%s", blk_mq_rq_state_name(blk_mq_rq_state(rq)));
  305. seq_printf(m, ", .tag=%d, .internal_tag=%d", rq->tag,
  306. rq->internal_tag);
  307. if (mq_ops->show_rq)
  308. mq_ops->show_rq(m, rq);
  309. seq_puts(m, "}\n");
  310. return 0;
  311. }
  312. EXPORT_SYMBOL_GPL(__blk_mq_debugfs_rq_show);
  313. int blk_mq_debugfs_rq_show(struct seq_file *m, void *v)
  314. {
  315. return __blk_mq_debugfs_rq_show(m, list_entry_rq(v));
  316. }
  317. EXPORT_SYMBOL_GPL(blk_mq_debugfs_rq_show);
  318. static void *hctx_dispatch_start(struct seq_file *m, loff_t *pos)
  319. __acquires(&hctx->lock)
  320. {
  321. struct blk_mq_hw_ctx *hctx = m->private;
  322. spin_lock(&hctx->lock);
  323. return seq_list_start(&hctx->dispatch, *pos);
  324. }
  325. static void *hctx_dispatch_next(struct seq_file *m, void *v, loff_t *pos)
  326. {
  327. struct blk_mq_hw_ctx *hctx = m->private;
  328. return seq_list_next(v, &hctx->dispatch, pos);
  329. }
  330. static void hctx_dispatch_stop(struct seq_file *m, void *v)
  331. __releases(&hctx->lock)
  332. {
  333. struct blk_mq_hw_ctx *hctx = m->private;
  334. spin_unlock(&hctx->lock);
  335. }
  336. static const struct seq_operations hctx_dispatch_seq_ops = {
  337. .start = hctx_dispatch_start,
  338. .next = hctx_dispatch_next,
  339. .stop = hctx_dispatch_stop,
  340. .show = blk_mq_debugfs_rq_show,
  341. };
  342. struct show_busy_params {
  343. struct seq_file *m;
  344. struct blk_mq_hw_ctx *hctx;
  345. };
  346. /*
  347. * Note: the state of a request may change while this function is in progress,
  348. * e.g. due to a concurrent blk_mq_finish_request() call. Returns true to
  349. * keep iterating requests.
  350. */
  351. static bool hctx_show_busy_rq(struct request *rq, void *data, bool reserved)
  352. {
  353. const struct show_busy_params *params = data;
  354. if (rq->mq_hctx == params->hctx)
  355. __blk_mq_debugfs_rq_show(params->m, rq);
  356. return true;
  357. }
  358. static int hctx_busy_show(void *data, struct seq_file *m)
  359. {
  360. struct blk_mq_hw_ctx *hctx = data;
  361. struct show_busy_params params = { .m = m, .hctx = hctx };
  362. blk_mq_tagset_busy_iter(hctx->queue->tag_set, hctx_show_busy_rq,
  363. &params);
  364. return 0;
  365. }
  366. static const char *const hctx_types[] = {
  367. [HCTX_TYPE_DEFAULT] = "default",
  368. [HCTX_TYPE_READ] = "read",
  369. [HCTX_TYPE_POLL] = "poll",
  370. };
  371. static int hctx_type_show(void *data, struct seq_file *m)
  372. {
  373. struct blk_mq_hw_ctx *hctx = data;
  374. BUILD_BUG_ON(ARRAY_SIZE(hctx_types) != HCTX_MAX_TYPES);
  375. seq_printf(m, "%s\n", hctx_types[hctx->type]);
  376. return 0;
  377. }
  378. static int hctx_ctx_map_show(void *data, struct seq_file *m)
  379. {
  380. struct blk_mq_hw_ctx *hctx = data;
  381. sbitmap_bitmap_show(&hctx->ctx_map, m);
  382. return 0;
  383. }
  384. static void blk_mq_debugfs_tags_show(struct seq_file *m,
  385. struct blk_mq_tags *tags)
  386. {
  387. seq_printf(m, "nr_tags=%u\n", tags->nr_tags);
  388. seq_printf(m, "nr_reserved_tags=%u\n", tags->nr_reserved_tags);
  389. seq_printf(m, "active_queues=%d\n",
  390. atomic_read(&tags->active_queues));
  391. seq_puts(m, "\nbitmap_tags:\n");
  392. sbitmap_queue_show(tags->bitmap_tags, m);
  393. if (tags->nr_reserved_tags) {
  394. seq_puts(m, "\nbreserved_tags:\n");
  395. sbitmap_queue_show(tags->breserved_tags, m);
  396. }
  397. }
  398. static int hctx_tags_show(void *data, struct seq_file *m)
  399. {
  400. struct blk_mq_hw_ctx *hctx = data;
  401. struct request_queue *q = hctx->queue;
  402. int res;
  403. res = mutex_lock_interruptible(&q->sysfs_lock);
  404. if (res)
  405. goto out;
  406. if (hctx->tags)
  407. blk_mq_debugfs_tags_show(m, hctx->tags);
  408. mutex_unlock(&q->sysfs_lock);
  409. out:
  410. return res;
  411. }
  412. static int hctx_tags_bitmap_show(void *data, struct seq_file *m)
  413. {
  414. struct blk_mq_hw_ctx *hctx = data;
  415. struct request_queue *q = hctx->queue;
  416. int res;
  417. res = mutex_lock_interruptible(&q->sysfs_lock);
  418. if (res)
  419. goto out;
  420. if (hctx->tags)
  421. sbitmap_bitmap_show(&hctx->tags->bitmap_tags->sb, m);
  422. mutex_unlock(&q->sysfs_lock);
  423. out:
  424. return res;
  425. }
  426. static int hctx_sched_tags_show(void *data, struct seq_file *m)
  427. {
  428. struct blk_mq_hw_ctx *hctx = data;
  429. struct request_queue *q = hctx->queue;
  430. int res;
  431. res = mutex_lock_interruptible(&q->sysfs_lock);
  432. if (res)
  433. goto out;
  434. if (hctx->sched_tags)
  435. blk_mq_debugfs_tags_show(m, hctx->sched_tags);
  436. mutex_unlock(&q->sysfs_lock);
  437. out:
  438. return res;
  439. }
  440. static int hctx_sched_tags_bitmap_show(void *data, struct seq_file *m)
  441. {
  442. struct blk_mq_hw_ctx *hctx = data;
  443. struct request_queue *q = hctx->queue;
  444. int res;
  445. res = mutex_lock_interruptible(&q->sysfs_lock);
  446. if (res)
  447. goto out;
  448. if (hctx->sched_tags)
  449. sbitmap_bitmap_show(&hctx->sched_tags->bitmap_tags->sb, m);
  450. mutex_unlock(&q->sysfs_lock);
  451. out:
  452. return res;
  453. }
  454. static int hctx_io_poll_show(void *data, struct seq_file *m)
  455. {
  456. struct blk_mq_hw_ctx *hctx = data;
  457. seq_printf(m, "considered=%lu\n", hctx->poll_considered);
  458. seq_printf(m, "invoked=%lu\n", hctx->poll_invoked);
  459. seq_printf(m, "success=%lu\n", hctx->poll_success);
  460. return 0;
  461. }
  462. static ssize_t hctx_io_poll_write(void *data, const char __user *buf,
  463. size_t count, loff_t *ppos)
  464. {
  465. struct blk_mq_hw_ctx *hctx = data;
  466. hctx->poll_considered = hctx->poll_invoked = hctx->poll_success = 0;
  467. return count;
  468. }
  469. static int hctx_dispatched_show(void *data, struct seq_file *m)
  470. {
  471. struct blk_mq_hw_ctx *hctx = data;
  472. int i;
  473. seq_printf(m, "%8u\t%lu\n", 0U, hctx->dispatched[0]);
  474. for (i = 1; i < BLK_MQ_MAX_DISPATCH_ORDER - 1; i++) {
  475. unsigned int d = 1U << (i - 1);
  476. seq_printf(m, "%8u\t%lu\n", d, hctx->dispatched[i]);
  477. }
  478. seq_printf(m, "%8u+\t%lu\n", 1U << (i - 1), hctx->dispatched[i]);
  479. return 0;
  480. }
  481. static ssize_t hctx_dispatched_write(void *data, const char __user *buf,
  482. size_t count, loff_t *ppos)
  483. {
  484. struct blk_mq_hw_ctx *hctx = data;
  485. int i;
  486. for (i = 0; i < BLK_MQ_MAX_DISPATCH_ORDER; i++)
  487. hctx->dispatched[i] = 0;
  488. return count;
  489. }
  490. static int hctx_queued_show(void *data, struct seq_file *m)
  491. {
  492. struct blk_mq_hw_ctx *hctx = data;
  493. seq_printf(m, "%lu\n", hctx->queued);
  494. return 0;
  495. }
  496. static ssize_t hctx_queued_write(void *data, const char __user *buf,
  497. size_t count, loff_t *ppos)
  498. {
  499. struct blk_mq_hw_ctx *hctx = data;
  500. hctx->queued = 0;
  501. return count;
  502. }
  503. static int hctx_run_show(void *data, struct seq_file *m)
  504. {
  505. struct blk_mq_hw_ctx *hctx = data;
  506. seq_printf(m, "%lu\n", hctx->run);
  507. return 0;
  508. }
  509. static ssize_t hctx_run_write(void *data, const char __user *buf, size_t count,
  510. loff_t *ppos)
  511. {
  512. struct blk_mq_hw_ctx *hctx = data;
  513. hctx->run = 0;
  514. return count;
  515. }
  516. static int hctx_active_show(void *data, struct seq_file *m)
  517. {
  518. struct blk_mq_hw_ctx *hctx = data;
  519. seq_printf(m, "%d\n", atomic_read(&hctx->nr_active));
  520. return 0;
  521. }
  522. static int hctx_dispatch_busy_show(void *data, struct seq_file *m)
  523. {
  524. struct blk_mq_hw_ctx *hctx = data;
  525. seq_printf(m, "%u\n", hctx->dispatch_busy);
  526. return 0;
  527. }
  528. #define CTX_RQ_SEQ_OPS(name, type) \
  529. static void *ctx_##name##_rq_list_start(struct seq_file *m, loff_t *pos) \
  530. __acquires(&ctx->lock) \
  531. { \
  532. struct blk_mq_ctx *ctx = m->private; \
  533. \
  534. spin_lock(&ctx->lock); \
  535. return seq_list_start(&ctx->rq_lists[type], *pos); \
  536. } \
  537. \
  538. static void *ctx_##name##_rq_list_next(struct seq_file *m, void *v, \
  539. loff_t *pos) \
  540. { \
  541. struct blk_mq_ctx *ctx = m->private; \
  542. \
  543. return seq_list_next(v, &ctx->rq_lists[type], pos); \
  544. } \
  545. \
  546. static void ctx_##name##_rq_list_stop(struct seq_file *m, void *v) \
  547. __releases(&ctx->lock) \
  548. { \
  549. struct blk_mq_ctx *ctx = m->private; \
  550. \
  551. spin_unlock(&ctx->lock); \
  552. } \
  553. \
  554. static const struct seq_operations ctx_##name##_rq_list_seq_ops = { \
  555. .start = ctx_##name##_rq_list_start, \
  556. .next = ctx_##name##_rq_list_next, \
  557. .stop = ctx_##name##_rq_list_stop, \
  558. .show = blk_mq_debugfs_rq_show, \
  559. }
  560. CTX_RQ_SEQ_OPS(default, HCTX_TYPE_DEFAULT);
  561. CTX_RQ_SEQ_OPS(read, HCTX_TYPE_READ);
  562. CTX_RQ_SEQ_OPS(poll, HCTX_TYPE_POLL);
  563. static int ctx_dispatched_show(void *data, struct seq_file *m)
  564. {
  565. struct blk_mq_ctx *ctx = data;
  566. seq_printf(m, "%lu %lu\n", ctx->rq_dispatched[1], ctx->rq_dispatched[0]);
  567. return 0;
  568. }
  569. static ssize_t ctx_dispatched_write(void *data, const char __user *buf,
  570. size_t count, loff_t *ppos)
  571. {
  572. struct blk_mq_ctx *ctx = data;
  573. ctx->rq_dispatched[0] = ctx->rq_dispatched[1] = 0;
  574. return count;
  575. }
  576. static int ctx_merged_show(void *data, struct seq_file *m)
  577. {
  578. struct blk_mq_ctx *ctx = data;
  579. seq_printf(m, "%lu\n", ctx->rq_merged);
  580. return 0;
  581. }
  582. static ssize_t ctx_merged_write(void *data, const char __user *buf,
  583. size_t count, loff_t *ppos)
  584. {
  585. struct blk_mq_ctx *ctx = data;
  586. ctx->rq_merged = 0;
  587. return count;
  588. }
  589. static int ctx_completed_show(void *data, struct seq_file *m)
  590. {
  591. struct blk_mq_ctx *ctx = data;
  592. seq_printf(m, "%lu %lu\n", ctx->rq_completed[1], ctx->rq_completed[0]);
  593. return 0;
  594. }
  595. static ssize_t ctx_completed_write(void *data, const char __user *buf,
  596. size_t count, loff_t *ppos)
  597. {
  598. struct blk_mq_ctx *ctx = data;
  599. ctx->rq_completed[0] = ctx->rq_completed[1] = 0;
  600. return count;
  601. }
  602. static int blk_mq_debugfs_show(struct seq_file *m, void *v)
  603. {
  604. const struct blk_mq_debugfs_attr *attr = m->private;
  605. void *data = d_inode(m->file->f_path.dentry->d_parent)->i_private;
  606. return attr->show(data, m);
  607. }
  608. static ssize_t blk_mq_debugfs_write(struct file *file, const char __user *buf,
  609. size_t count, loff_t *ppos)
  610. {
  611. struct seq_file *m = file->private_data;
  612. const struct blk_mq_debugfs_attr *attr = m->private;
  613. void *data = d_inode(file->f_path.dentry->d_parent)->i_private;
  614. /*
  615. * Attributes that only implement .seq_ops are read-only and 'attr' is
  616. * the same with 'data' in this case.
  617. */
  618. if (attr == data || !attr->write)
  619. return -EPERM;
  620. return attr->write(data, buf, count, ppos);
  621. }
  622. static int blk_mq_debugfs_open(struct inode *inode, struct file *file)
  623. {
  624. const struct blk_mq_debugfs_attr *attr = inode->i_private;
  625. void *data = d_inode(file->f_path.dentry->d_parent)->i_private;
  626. struct seq_file *m;
  627. int ret;
  628. if (attr->seq_ops) {
  629. ret = seq_open(file, attr->seq_ops);
  630. if (!ret) {
  631. m = file->private_data;
  632. m->private = data;
  633. }
  634. return ret;
  635. }
  636. if (WARN_ON_ONCE(!attr->show))
  637. return -EPERM;
  638. return single_open(file, blk_mq_debugfs_show, inode->i_private);
  639. }
  640. static int blk_mq_debugfs_release(struct inode *inode, struct file *file)
  641. {
  642. const struct blk_mq_debugfs_attr *attr = inode->i_private;
  643. if (attr->show)
  644. return single_release(inode, file);
  645. return seq_release(inode, file);
  646. }
  647. static const struct file_operations blk_mq_debugfs_fops = {
  648. .open = blk_mq_debugfs_open,
  649. .read = seq_read,
  650. .write = blk_mq_debugfs_write,
  651. .llseek = seq_lseek,
  652. .release = blk_mq_debugfs_release,
  653. };
  654. static const struct blk_mq_debugfs_attr blk_mq_debugfs_hctx_attrs[] = {
  655. {"state", 0400, hctx_state_show},
  656. {"flags", 0400, hctx_flags_show},
  657. {"dispatch", 0400, .seq_ops = &hctx_dispatch_seq_ops},
  658. {"busy", 0400, hctx_busy_show},
  659. {"ctx_map", 0400, hctx_ctx_map_show},
  660. {"tags", 0400, hctx_tags_show},
  661. {"tags_bitmap", 0400, hctx_tags_bitmap_show},
  662. {"sched_tags", 0400, hctx_sched_tags_show},
  663. {"sched_tags_bitmap", 0400, hctx_sched_tags_bitmap_show},
  664. {"io_poll", 0600, hctx_io_poll_show, hctx_io_poll_write},
  665. {"dispatched", 0600, hctx_dispatched_show, hctx_dispatched_write},
  666. {"queued", 0600, hctx_queued_show, hctx_queued_write},
  667. {"run", 0600, hctx_run_show, hctx_run_write},
  668. {"active", 0400, hctx_active_show},
  669. {"dispatch_busy", 0400, hctx_dispatch_busy_show},
  670. {"type", 0400, hctx_type_show},
  671. {},
  672. };
  673. static const struct blk_mq_debugfs_attr blk_mq_debugfs_ctx_attrs[] = {
  674. {"default_rq_list", 0400, .seq_ops = &ctx_default_rq_list_seq_ops},
  675. {"read_rq_list", 0400, .seq_ops = &ctx_read_rq_list_seq_ops},
  676. {"poll_rq_list", 0400, .seq_ops = &ctx_poll_rq_list_seq_ops},
  677. {"dispatched", 0600, ctx_dispatched_show, ctx_dispatched_write},
  678. {"merged", 0600, ctx_merged_show, ctx_merged_write},
  679. {"completed", 0600, ctx_completed_show, ctx_completed_write},
  680. {},
  681. };
  682. static void debugfs_create_files(struct dentry *parent, void *data,
  683. const struct blk_mq_debugfs_attr *attr)
  684. {
  685. if (IS_ERR_OR_NULL(parent))
  686. return;
  687. d_inode(parent)->i_private = data;
  688. for (; attr->name; attr++)
  689. debugfs_create_file(attr->name, attr->mode, parent,
  690. (void *)attr, &blk_mq_debugfs_fops);
  691. }
  692. void blk_mq_debugfs_register(struct request_queue *q)
  693. {
  694. struct blk_mq_hw_ctx *hctx;
  695. int i;
  696. debugfs_create_files(q->debugfs_dir, q, blk_mq_debugfs_queue_attrs);
  697. /*
  698. * blk_mq_init_sched() attempted to do this already, but q->debugfs_dir
  699. * didn't exist yet (because we don't know what to name the directory
  700. * until the queue is registered to a gendisk).
  701. */
  702. if (q->elevator && !q->sched_debugfs_dir)
  703. blk_mq_debugfs_register_sched(q);
  704. /* Similarly, blk_mq_init_hctx() couldn't do this previously. */
  705. queue_for_each_hw_ctx(q, hctx, i) {
  706. if (!hctx->debugfs_dir)
  707. blk_mq_debugfs_register_hctx(q, hctx);
  708. if (q->elevator && !hctx->sched_debugfs_dir)
  709. blk_mq_debugfs_register_sched_hctx(q, hctx);
  710. }
  711. if (q->rq_qos) {
  712. struct rq_qos *rqos = q->rq_qos;
  713. while (rqos) {
  714. blk_mq_debugfs_register_rqos(rqos);
  715. rqos = rqos->next;
  716. }
  717. }
  718. }
  719. void blk_mq_debugfs_unregister(struct request_queue *q)
  720. {
  721. q->sched_debugfs_dir = NULL;
  722. }
  723. static void blk_mq_debugfs_register_ctx(struct blk_mq_hw_ctx *hctx,
  724. struct blk_mq_ctx *ctx)
  725. {
  726. struct dentry *ctx_dir;
  727. char name[20];
  728. snprintf(name, sizeof(name), "cpu%u", ctx->cpu);
  729. ctx_dir = debugfs_create_dir(name, hctx->debugfs_dir);
  730. debugfs_create_files(ctx_dir, ctx, blk_mq_debugfs_ctx_attrs);
  731. }
  732. void blk_mq_debugfs_register_hctx(struct request_queue *q,
  733. struct blk_mq_hw_ctx *hctx)
  734. {
  735. struct blk_mq_ctx *ctx;
  736. char name[20];
  737. int i;
  738. snprintf(name, sizeof(name), "hctx%u", hctx->queue_num);
  739. hctx->debugfs_dir = debugfs_create_dir(name, q->debugfs_dir);
  740. debugfs_create_files(hctx->debugfs_dir, hctx, blk_mq_debugfs_hctx_attrs);
  741. hctx_for_each_ctx(hctx, ctx, i)
  742. blk_mq_debugfs_register_ctx(hctx, ctx);
  743. }
  744. void blk_mq_debugfs_unregister_hctx(struct blk_mq_hw_ctx *hctx)
  745. {
  746. debugfs_remove_recursive(hctx->debugfs_dir);
  747. hctx->sched_debugfs_dir = NULL;
  748. hctx->debugfs_dir = NULL;
  749. }
  750. void blk_mq_debugfs_register_hctxs(struct request_queue *q)
  751. {
  752. struct blk_mq_hw_ctx *hctx;
  753. int i;
  754. queue_for_each_hw_ctx(q, hctx, i)
  755. blk_mq_debugfs_register_hctx(q, hctx);
  756. }
  757. void blk_mq_debugfs_unregister_hctxs(struct request_queue *q)
  758. {
  759. struct blk_mq_hw_ctx *hctx;
  760. int i;
  761. queue_for_each_hw_ctx(q, hctx, i)
  762. blk_mq_debugfs_unregister_hctx(hctx);
  763. }
  764. void blk_mq_debugfs_register_sched(struct request_queue *q)
  765. {
  766. struct elevator_type *e = q->elevator->type;
  767. /*
  768. * If the parent directory has not been created yet, return, we will be
  769. * called again later on and the directory/files will be created then.
  770. */
  771. if (!q->debugfs_dir)
  772. return;
  773. if (!e->queue_debugfs_attrs)
  774. return;
  775. q->sched_debugfs_dir = debugfs_create_dir("sched", q->debugfs_dir);
  776. debugfs_create_files(q->sched_debugfs_dir, q, e->queue_debugfs_attrs);
  777. }
  778. void blk_mq_debugfs_unregister_sched(struct request_queue *q)
  779. {
  780. debugfs_remove_recursive(q->sched_debugfs_dir);
  781. q->sched_debugfs_dir = NULL;
  782. }
  783. static const char *rq_qos_id_to_name(enum rq_qos_id id)
  784. {
  785. switch (id) {
  786. case RQ_QOS_WBT:
  787. return "wbt";
  788. case RQ_QOS_LATENCY:
  789. return "latency";
  790. case RQ_QOS_COST:
  791. return "cost";
  792. case RQ_QOS_IOPRIO:
  793. return "ioprio";
  794. }
  795. return "unknown";
  796. }
  797. void blk_mq_debugfs_unregister_rqos(struct rq_qos *rqos)
  798. {
  799. debugfs_remove_recursive(rqos->debugfs_dir);
  800. rqos->debugfs_dir = NULL;
  801. }
  802. void blk_mq_debugfs_register_rqos(struct rq_qos *rqos)
  803. {
  804. struct request_queue *q = rqos->q;
  805. const char *dir_name = rq_qos_id_to_name(rqos->id);
  806. if (rqos->debugfs_dir || !rqos->ops->debugfs_attrs)
  807. return;
  808. if (!q->rqos_debugfs_dir)
  809. q->rqos_debugfs_dir = debugfs_create_dir("rqos",
  810. q->debugfs_dir);
  811. rqos->debugfs_dir = debugfs_create_dir(dir_name,
  812. rqos->q->rqos_debugfs_dir);
  813. debugfs_create_files(rqos->debugfs_dir, rqos, rqos->ops->debugfs_attrs);
  814. }
  815. void blk_mq_debugfs_unregister_queue_rqos(struct request_queue *q)
  816. {
  817. debugfs_remove_recursive(q->rqos_debugfs_dir);
  818. q->rqos_debugfs_dir = NULL;
  819. }
  820. void blk_mq_debugfs_register_sched_hctx(struct request_queue *q,
  821. struct blk_mq_hw_ctx *hctx)
  822. {
  823. struct elevator_type *e = q->elevator->type;
  824. if (!e->hctx_debugfs_attrs)
  825. return;
  826. hctx->sched_debugfs_dir = debugfs_create_dir("sched",
  827. hctx->debugfs_dir);
  828. debugfs_create_files(hctx->sched_debugfs_dir, hctx,
  829. e->hctx_debugfs_attrs);
  830. }
  831. void blk_mq_debugfs_unregister_sched_hctx(struct blk_mq_hw_ctx *hctx)
  832. {
  833. debugfs_remove_recursive(hctx->sched_debugfs_dir);
  834. hctx->sched_debugfs_dir = NULL;
  835. }