kyber-iosched.c 28 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * The Kyber I/O scheduler. Controls latency by throttling queue depths using
  4. * scalable techniques.
  5. *
  6. * Copyright (C) 2017 Facebook
  7. */
  8. #include <linux/kernel.h>
  9. #include <linux/blkdev.h>
  10. #include <linux/blk-mq.h>
  11. #include <linux/elevator.h>
  12. #include <linux/module.h>
  13. #include <linux/sbitmap.h>
  14. #include "blk.h"
  15. #include "blk-mq.h"
  16. #include "blk-mq-debugfs.h"
  17. #include "blk-mq-sched.h"
  18. #include "blk-mq-tag.h"
  19. #define CREATE_TRACE_POINTS
  20. #include <trace/events/kyber.h>
  21. /*
  22. * Scheduling domains: the device is divided into multiple domains based on the
  23. * request type.
  24. */
  25. enum {
  26. KYBER_READ,
  27. KYBER_WRITE,
  28. KYBER_DISCARD,
  29. KYBER_OTHER,
  30. KYBER_NUM_DOMAINS,
  31. };
  32. static const char *kyber_domain_names[] = {
  33. [KYBER_READ] = "READ",
  34. [KYBER_WRITE] = "WRITE",
  35. [KYBER_DISCARD] = "DISCARD",
  36. [KYBER_OTHER] = "OTHER",
  37. };
  38. enum {
  39. /*
  40. * In order to prevent starvation of synchronous requests by a flood of
  41. * asynchronous requests, we reserve 25% of requests for synchronous
  42. * operations.
  43. */
  44. KYBER_ASYNC_PERCENT = 75,
  45. };
  46. /*
  47. * Maximum device-wide depth for each scheduling domain.
  48. *
  49. * Even for fast devices with lots of tags like NVMe, you can saturate the
  50. * device with only a fraction of the maximum possible queue depth. So, we cap
  51. * these to a reasonable value.
  52. */
  53. static const unsigned int kyber_depth[] = {
  54. [KYBER_READ] = 256,
  55. [KYBER_WRITE] = 128,
  56. [KYBER_DISCARD] = 64,
  57. [KYBER_OTHER] = 16,
  58. };
  59. /*
  60. * Default latency targets for each scheduling domain.
  61. */
  62. static const u64 kyber_latency_targets[] = {
  63. [KYBER_READ] = 2ULL * NSEC_PER_MSEC,
  64. [KYBER_WRITE] = 10ULL * NSEC_PER_MSEC,
  65. [KYBER_DISCARD] = 5ULL * NSEC_PER_SEC,
  66. };
  67. /*
  68. * Batch size (number of requests we'll dispatch in a row) for each scheduling
  69. * domain.
  70. */
  71. static const unsigned int kyber_batch_size[] = {
  72. [KYBER_READ] = 16,
  73. [KYBER_WRITE] = 8,
  74. [KYBER_DISCARD] = 1,
  75. [KYBER_OTHER] = 1,
  76. };
  77. /*
  78. * Requests latencies are recorded in a histogram with buckets defined relative
  79. * to the target latency:
  80. *
  81. * <= 1/4 * target latency
  82. * <= 1/2 * target latency
  83. * <= 3/4 * target latency
  84. * <= target latency
  85. * <= 1 1/4 * target latency
  86. * <= 1 1/2 * target latency
  87. * <= 1 3/4 * target latency
  88. * > 1 3/4 * target latency
  89. */
  90. enum {
  91. /*
  92. * The width of the latency histogram buckets is
  93. * 1 / (1 << KYBER_LATENCY_SHIFT) * target latency.
  94. */
  95. KYBER_LATENCY_SHIFT = 2,
  96. /*
  97. * The first (1 << KYBER_LATENCY_SHIFT) buckets are <= target latency,
  98. * thus, "good".
  99. */
  100. KYBER_GOOD_BUCKETS = 1 << KYBER_LATENCY_SHIFT,
  101. /* There are also (1 << KYBER_LATENCY_SHIFT) "bad" buckets. */
  102. KYBER_LATENCY_BUCKETS = 2 << KYBER_LATENCY_SHIFT,
  103. };
  104. /*
  105. * We measure both the total latency and the I/O latency (i.e., latency after
  106. * submitting to the device).
  107. */
  108. enum {
  109. KYBER_TOTAL_LATENCY,
  110. KYBER_IO_LATENCY,
  111. };
  112. static const char *kyber_latency_type_names[] = {
  113. [KYBER_TOTAL_LATENCY] = "total",
  114. [KYBER_IO_LATENCY] = "I/O",
  115. };
  116. /*
  117. * Per-cpu latency histograms: total latency and I/O latency for each scheduling
  118. * domain except for KYBER_OTHER.
  119. */
  120. struct kyber_cpu_latency {
  121. atomic_t buckets[KYBER_OTHER][2][KYBER_LATENCY_BUCKETS];
  122. };
  123. /*
  124. * There is a same mapping between ctx & hctx and kcq & khd,
  125. * we use request->mq_ctx->index_hw to index the kcq in khd.
  126. */
  127. struct kyber_ctx_queue {
  128. /*
  129. * Used to ensure operations on rq_list and kcq_map to be an atmoic one.
  130. * Also protect the rqs on rq_list when merge.
  131. */
  132. spinlock_t lock;
  133. struct list_head rq_list[KYBER_NUM_DOMAINS];
  134. } ____cacheline_aligned_in_smp;
  135. struct kyber_queue_data {
  136. struct request_queue *q;
  137. /*
  138. * Each scheduling domain has a limited number of in-flight requests
  139. * device-wide, limited by these tokens.
  140. */
  141. struct sbitmap_queue domain_tokens[KYBER_NUM_DOMAINS];
  142. /*
  143. * Async request percentage, converted to per-word depth for
  144. * sbitmap_get_shallow().
  145. */
  146. unsigned int async_depth;
  147. struct kyber_cpu_latency __percpu *cpu_latency;
  148. /* Timer for stats aggregation and adjusting domain tokens. */
  149. struct timer_list timer;
  150. unsigned int latency_buckets[KYBER_OTHER][2][KYBER_LATENCY_BUCKETS];
  151. unsigned long latency_timeout[KYBER_OTHER];
  152. int domain_p99[KYBER_OTHER];
  153. /* Target latencies in nanoseconds. */
  154. u64 latency_targets[KYBER_OTHER];
  155. };
  156. struct kyber_hctx_data {
  157. spinlock_t lock;
  158. struct list_head rqs[KYBER_NUM_DOMAINS];
  159. unsigned int cur_domain;
  160. unsigned int batching;
  161. struct kyber_ctx_queue *kcqs;
  162. struct sbitmap kcq_map[KYBER_NUM_DOMAINS];
  163. struct sbq_wait domain_wait[KYBER_NUM_DOMAINS];
  164. struct sbq_wait_state *domain_ws[KYBER_NUM_DOMAINS];
  165. atomic_t wait_index[KYBER_NUM_DOMAINS];
  166. };
  167. static int kyber_domain_wake(wait_queue_entry_t *wait, unsigned mode, int flags,
  168. void *key);
  169. static unsigned int kyber_sched_domain(unsigned int op)
  170. {
  171. switch (op & REQ_OP_MASK) {
  172. case REQ_OP_READ:
  173. return KYBER_READ;
  174. case REQ_OP_WRITE:
  175. return KYBER_WRITE;
  176. case REQ_OP_DISCARD:
  177. return KYBER_DISCARD;
  178. default:
  179. return KYBER_OTHER;
  180. }
  181. }
  182. static void flush_latency_buckets(struct kyber_queue_data *kqd,
  183. struct kyber_cpu_latency *cpu_latency,
  184. unsigned int sched_domain, unsigned int type)
  185. {
  186. unsigned int *buckets = kqd->latency_buckets[sched_domain][type];
  187. atomic_t *cpu_buckets = cpu_latency->buckets[sched_domain][type];
  188. unsigned int bucket;
  189. for (bucket = 0; bucket < KYBER_LATENCY_BUCKETS; bucket++)
  190. buckets[bucket] += atomic_xchg(&cpu_buckets[bucket], 0);
  191. }
  192. /*
  193. * Calculate the histogram bucket with the given percentile rank, or -1 if there
  194. * aren't enough samples yet.
  195. */
  196. static int calculate_percentile(struct kyber_queue_data *kqd,
  197. unsigned int sched_domain, unsigned int type,
  198. unsigned int percentile)
  199. {
  200. unsigned int *buckets = kqd->latency_buckets[sched_domain][type];
  201. unsigned int bucket, samples = 0, percentile_samples;
  202. for (bucket = 0; bucket < KYBER_LATENCY_BUCKETS; bucket++)
  203. samples += buckets[bucket];
  204. if (!samples)
  205. return -1;
  206. /*
  207. * We do the calculation once we have 500 samples or one second passes
  208. * since the first sample was recorded, whichever comes first.
  209. */
  210. if (!kqd->latency_timeout[sched_domain])
  211. kqd->latency_timeout[sched_domain] = max(jiffies + HZ, 1UL);
  212. if (samples < 500 &&
  213. time_is_after_jiffies(kqd->latency_timeout[sched_domain])) {
  214. return -1;
  215. }
  216. kqd->latency_timeout[sched_domain] = 0;
  217. percentile_samples = DIV_ROUND_UP(samples * percentile, 100);
  218. for (bucket = 0; bucket < KYBER_LATENCY_BUCKETS - 1; bucket++) {
  219. if (buckets[bucket] >= percentile_samples)
  220. break;
  221. percentile_samples -= buckets[bucket];
  222. }
  223. memset(buckets, 0, sizeof(kqd->latency_buckets[sched_domain][type]));
  224. trace_kyber_latency(kqd->q, kyber_domain_names[sched_domain],
  225. kyber_latency_type_names[type], percentile,
  226. bucket + 1, 1 << KYBER_LATENCY_SHIFT, samples);
  227. return bucket;
  228. }
  229. static void kyber_resize_domain(struct kyber_queue_data *kqd,
  230. unsigned int sched_domain, unsigned int depth)
  231. {
  232. depth = clamp(depth, 1U, kyber_depth[sched_domain]);
  233. if (depth != kqd->domain_tokens[sched_domain].sb.depth) {
  234. sbitmap_queue_resize(&kqd->domain_tokens[sched_domain], depth);
  235. trace_kyber_adjust(kqd->q, kyber_domain_names[sched_domain],
  236. depth);
  237. }
  238. }
  239. static void kyber_timer_fn(struct timer_list *t)
  240. {
  241. struct kyber_queue_data *kqd = from_timer(kqd, t, timer);
  242. unsigned int sched_domain;
  243. int cpu;
  244. bool bad = false;
  245. /* Sum all of the per-cpu latency histograms. */
  246. for_each_online_cpu(cpu) {
  247. struct kyber_cpu_latency *cpu_latency;
  248. cpu_latency = per_cpu_ptr(kqd->cpu_latency, cpu);
  249. for (sched_domain = 0; sched_domain < KYBER_OTHER; sched_domain++) {
  250. flush_latency_buckets(kqd, cpu_latency, sched_domain,
  251. KYBER_TOTAL_LATENCY);
  252. flush_latency_buckets(kqd, cpu_latency, sched_domain,
  253. KYBER_IO_LATENCY);
  254. }
  255. }
  256. /*
  257. * Check if any domains have a high I/O latency, which might indicate
  258. * congestion in the device. Note that we use the p90; we don't want to
  259. * be too sensitive to outliers here.
  260. */
  261. for (sched_domain = 0; sched_domain < KYBER_OTHER; sched_domain++) {
  262. int p90;
  263. p90 = calculate_percentile(kqd, sched_domain, KYBER_IO_LATENCY,
  264. 90);
  265. if (p90 >= KYBER_GOOD_BUCKETS)
  266. bad = true;
  267. }
  268. /*
  269. * Adjust the scheduling domain depths. If we determined that there was
  270. * congestion, we throttle all domains with good latencies. Either way,
  271. * we ease up on throttling domains with bad latencies.
  272. */
  273. for (sched_domain = 0; sched_domain < KYBER_OTHER; sched_domain++) {
  274. unsigned int orig_depth, depth;
  275. int p99;
  276. p99 = calculate_percentile(kqd, sched_domain,
  277. KYBER_TOTAL_LATENCY, 99);
  278. /*
  279. * This is kind of subtle: different domains will not
  280. * necessarily have enough samples to calculate the latency
  281. * percentiles during the same window, so we have to remember
  282. * the p99 for the next time we observe congestion; once we do,
  283. * we don't want to throttle again until we get more data, so we
  284. * reset it to -1.
  285. */
  286. if (bad) {
  287. if (p99 < 0)
  288. p99 = kqd->domain_p99[sched_domain];
  289. kqd->domain_p99[sched_domain] = -1;
  290. } else if (p99 >= 0) {
  291. kqd->domain_p99[sched_domain] = p99;
  292. }
  293. if (p99 < 0)
  294. continue;
  295. /*
  296. * If this domain has bad latency, throttle less. Otherwise,
  297. * throttle more iff we determined that there is congestion.
  298. *
  299. * The new depth is scaled linearly with the p99 latency vs the
  300. * latency target. E.g., if the p99 is 3/4 of the target, then
  301. * we throttle down to 3/4 of the current depth, and if the p99
  302. * is 2x the target, then we double the depth.
  303. */
  304. if (bad || p99 >= KYBER_GOOD_BUCKETS) {
  305. orig_depth = kqd->domain_tokens[sched_domain].sb.depth;
  306. depth = (orig_depth * (p99 + 1)) >> KYBER_LATENCY_SHIFT;
  307. kyber_resize_domain(kqd, sched_domain, depth);
  308. }
  309. }
  310. }
  311. static unsigned int kyber_sched_tags_shift(struct request_queue *q)
  312. {
  313. /*
  314. * All of the hardware queues have the same depth, so we can just grab
  315. * the shift of the first one.
  316. */
  317. return q->queue_hw_ctx[0]->sched_tags->bitmap_tags->sb.shift;
  318. }
  319. static struct kyber_queue_data *kyber_queue_data_alloc(struct request_queue *q)
  320. {
  321. struct kyber_queue_data *kqd;
  322. unsigned int shift;
  323. int ret = -ENOMEM;
  324. int i;
  325. kqd = kzalloc_node(sizeof(*kqd), GFP_KERNEL, q->node);
  326. if (!kqd)
  327. goto err;
  328. kqd->q = q;
  329. kqd->cpu_latency = alloc_percpu_gfp(struct kyber_cpu_latency,
  330. GFP_KERNEL | __GFP_ZERO);
  331. if (!kqd->cpu_latency)
  332. goto err_kqd;
  333. timer_setup(&kqd->timer, kyber_timer_fn, 0);
  334. for (i = 0; i < KYBER_NUM_DOMAINS; i++) {
  335. WARN_ON(!kyber_depth[i]);
  336. WARN_ON(!kyber_batch_size[i]);
  337. ret = sbitmap_queue_init_node(&kqd->domain_tokens[i],
  338. kyber_depth[i], -1, false,
  339. GFP_KERNEL, q->node);
  340. if (ret) {
  341. while (--i >= 0)
  342. sbitmap_queue_free(&kqd->domain_tokens[i]);
  343. goto err_buckets;
  344. }
  345. }
  346. for (i = 0; i < KYBER_OTHER; i++) {
  347. kqd->domain_p99[i] = -1;
  348. kqd->latency_targets[i] = kyber_latency_targets[i];
  349. }
  350. shift = kyber_sched_tags_shift(q);
  351. kqd->async_depth = (1U << shift) * KYBER_ASYNC_PERCENT / 100U;
  352. return kqd;
  353. err_buckets:
  354. free_percpu(kqd->cpu_latency);
  355. err_kqd:
  356. kfree(kqd);
  357. err:
  358. return ERR_PTR(ret);
  359. }
  360. static int kyber_init_sched(struct request_queue *q, struct elevator_type *e)
  361. {
  362. struct kyber_queue_data *kqd;
  363. struct elevator_queue *eq;
  364. eq = elevator_alloc(q, e);
  365. if (!eq)
  366. return -ENOMEM;
  367. kqd = kyber_queue_data_alloc(q);
  368. if (IS_ERR(kqd)) {
  369. kobject_put(&eq->kobj);
  370. return PTR_ERR(kqd);
  371. }
  372. blk_stat_enable_accounting(q);
  373. eq->elevator_data = kqd;
  374. q->elevator = eq;
  375. return 0;
  376. }
  377. static void kyber_exit_sched(struct elevator_queue *e)
  378. {
  379. struct kyber_queue_data *kqd = e->elevator_data;
  380. int i;
  381. del_timer_sync(&kqd->timer);
  382. for (i = 0; i < KYBER_NUM_DOMAINS; i++)
  383. sbitmap_queue_free(&kqd->domain_tokens[i]);
  384. free_percpu(kqd->cpu_latency);
  385. kfree(kqd);
  386. }
  387. static void kyber_ctx_queue_init(struct kyber_ctx_queue *kcq)
  388. {
  389. unsigned int i;
  390. spin_lock_init(&kcq->lock);
  391. for (i = 0; i < KYBER_NUM_DOMAINS; i++)
  392. INIT_LIST_HEAD(&kcq->rq_list[i]);
  393. }
  394. static int kyber_init_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)
  395. {
  396. struct kyber_queue_data *kqd = hctx->queue->elevator->elevator_data;
  397. struct kyber_hctx_data *khd;
  398. int i;
  399. khd = kmalloc_node(sizeof(*khd), GFP_KERNEL, hctx->numa_node);
  400. if (!khd)
  401. return -ENOMEM;
  402. khd->kcqs = kmalloc_array_node(hctx->nr_ctx,
  403. sizeof(struct kyber_ctx_queue),
  404. GFP_KERNEL, hctx->numa_node);
  405. if (!khd->kcqs)
  406. goto err_khd;
  407. for (i = 0; i < hctx->nr_ctx; i++)
  408. kyber_ctx_queue_init(&khd->kcqs[i]);
  409. for (i = 0; i < KYBER_NUM_DOMAINS; i++) {
  410. if (sbitmap_init_node(&khd->kcq_map[i], hctx->nr_ctx,
  411. ilog2(8), GFP_KERNEL, hctx->numa_node)) {
  412. while (--i >= 0)
  413. sbitmap_free(&khd->kcq_map[i]);
  414. goto err_kcqs;
  415. }
  416. }
  417. spin_lock_init(&khd->lock);
  418. for (i = 0; i < KYBER_NUM_DOMAINS; i++) {
  419. INIT_LIST_HEAD(&khd->rqs[i]);
  420. khd->domain_wait[i].sbq = NULL;
  421. init_waitqueue_func_entry(&khd->domain_wait[i].wait,
  422. kyber_domain_wake);
  423. khd->domain_wait[i].wait.private = hctx;
  424. INIT_LIST_HEAD(&khd->domain_wait[i].wait.entry);
  425. atomic_set(&khd->wait_index[i], 0);
  426. }
  427. khd->cur_domain = 0;
  428. khd->batching = 0;
  429. hctx->sched_data = khd;
  430. sbitmap_queue_min_shallow_depth(hctx->sched_tags->bitmap_tags,
  431. kqd->async_depth);
  432. return 0;
  433. err_kcqs:
  434. kfree(khd->kcqs);
  435. err_khd:
  436. kfree(khd);
  437. return -ENOMEM;
  438. }
  439. static void kyber_exit_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)
  440. {
  441. struct kyber_hctx_data *khd = hctx->sched_data;
  442. int i;
  443. for (i = 0; i < KYBER_NUM_DOMAINS; i++)
  444. sbitmap_free(&khd->kcq_map[i]);
  445. kfree(khd->kcqs);
  446. kfree(hctx->sched_data);
  447. }
  448. static int rq_get_domain_token(struct request *rq)
  449. {
  450. return (long)rq->elv.priv[0];
  451. }
  452. static void rq_set_domain_token(struct request *rq, int token)
  453. {
  454. rq->elv.priv[0] = (void *)(long)token;
  455. }
  456. static void rq_clear_domain_token(struct kyber_queue_data *kqd,
  457. struct request *rq)
  458. {
  459. unsigned int sched_domain;
  460. int nr;
  461. nr = rq_get_domain_token(rq);
  462. if (nr != -1) {
  463. sched_domain = kyber_sched_domain(rq->cmd_flags);
  464. sbitmap_queue_clear(&kqd->domain_tokens[sched_domain], nr,
  465. rq->mq_ctx->cpu);
  466. }
  467. }
  468. static void kyber_limit_depth(unsigned int op, struct blk_mq_alloc_data *data)
  469. {
  470. /*
  471. * We use the scheduler tags as per-hardware queue queueing tokens.
  472. * Async requests can be limited at this stage.
  473. */
  474. if (!op_is_sync(op)) {
  475. struct kyber_queue_data *kqd = data->q->elevator->elevator_data;
  476. data->shallow_depth = kqd->async_depth;
  477. }
  478. }
  479. static bool kyber_bio_merge(struct request_queue *q, struct bio *bio,
  480. unsigned int nr_segs)
  481. {
  482. struct blk_mq_ctx *ctx = blk_mq_get_ctx(q);
  483. struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, bio->bi_opf, ctx);
  484. struct kyber_hctx_data *khd = hctx->sched_data;
  485. struct kyber_ctx_queue *kcq = &khd->kcqs[ctx->index_hw[hctx->type]];
  486. unsigned int sched_domain = kyber_sched_domain(bio->bi_opf);
  487. struct list_head *rq_list = &kcq->rq_list[sched_domain];
  488. bool merged;
  489. spin_lock(&kcq->lock);
  490. merged = blk_bio_list_merge(hctx->queue, rq_list, bio, nr_segs);
  491. spin_unlock(&kcq->lock);
  492. return merged;
  493. }
  494. static void kyber_prepare_request(struct request *rq)
  495. {
  496. rq_set_domain_token(rq, -1);
  497. }
  498. static void kyber_insert_requests(struct blk_mq_hw_ctx *hctx,
  499. struct list_head *rq_list, bool at_head)
  500. {
  501. struct kyber_hctx_data *khd = hctx->sched_data;
  502. struct request *rq, *next;
  503. list_for_each_entry_safe(rq, next, rq_list, queuelist) {
  504. unsigned int sched_domain = kyber_sched_domain(rq->cmd_flags);
  505. struct kyber_ctx_queue *kcq = &khd->kcqs[rq->mq_ctx->index_hw[hctx->type]];
  506. struct list_head *head = &kcq->rq_list[sched_domain];
  507. spin_lock(&kcq->lock);
  508. if (at_head)
  509. list_move(&rq->queuelist, head);
  510. else
  511. list_move_tail(&rq->queuelist, head);
  512. sbitmap_set_bit(&khd->kcq_map[sched_domain],
  513. rq->mq_ctx->index_hw[hctx->type]);
  514. blk_mq_sched_request_inserted(rq);
  515. spin_unlock(&kcq->lock);
  516. }
  517. }
  518. static void kyber_finish_request(struct request *rq)
  519. {
  520. struct kyber_queue_data *kqd = rq->q->elevator->elevator_data;
  521. rq_clear_domain_token(kqd, rq);
  522. }
  523. static void add_latency_sample(struct kyber_cpu_latency *cpu_latency,
  524. unsigned int sched_domain, unsigned int type,
  525. u64 target, u64 latency)
  526. {
  527. unsigned int bucket;
  528. u64 divisor;
  529. if (latency > 0) {
  530. divisor = max_t(u64, target >> KYBER_LATENCY_SHIFT, 1);
  531. bucket = min_t(unsigned int, div64_u64(latency - 1, divisor),
  532. KYBER_LATENCY_BUCKETS - 1);
  533. } else {
  534. bucket = 0;
  535. }
  536. atomic_inc(&cpu_latency->buckets[sched_domain][type][bucket]);
  537. }
  538. static void kyber_completed_request(struct request *rq, u64 now)
  539. {
  540. struct kyber_queue_data *kqd = rq->q->elevator->elevator_data;
  541. struct kyber_cpu_latency *cpu_latency;
  542. unsigned int sched_domain;
  543. u64 target;
  544. sched_domain = kyber_sched_domain(rq->cmd_flags);
  545. if (sched_domain == KYBER_OTHER)
  546. return;
  547. cpu_latency = get_cpu_ptr(kqd->cpu_latency);
  548. target = kqd->latency_targets[sched_domain];
  549. add_latency_sample(cpu_latency, sched_domain, KYBER_TOTAL_LATENCY,
  550. target, now - rq->start_time_ns);
  551. add_latency_sample(cpu_latency, sched_domain, KYBER_IO_LATENCY, target,
  552. now - rq->io_start_time_ns);
  553. put_cpu_ptr(kqd->cpu_latency);
  554. timer_reduce(&kqd->timer, jiffies + HZ / 10);
  555. }
  556. struct flush_kcq_data {
  557. struct kyber_hctx_data *khd;
  558. unsigned int sched_domain;
  559. struct list_head *list;
  560. };
  561. static bool flush_busy_kcq(struct sbitmap *sb, unsigned int bitnr, void *data)
  562. {
  563. struct flush_kcq_data *flush_data = data;
  564. struct kyber_ctx_queue *kcq = &flush_data->khd->kcqs[bitnr];
  565. spin_lock(&kcq->lock);
  566. list_splice_tail_init(&kcq->rq_list[flush_data->sched_domain],
  567. flush_data->list);
  568. sbitmap_clear_bit(sb, bitnr);
  569. spin_unlock(&kcq->lock);
  570. return true;
  571. }
  572. static void kyber_flush_busy_kcqs(struct kyber_hctx_data *khd,
  573. unsigned int sched_domain,
  574. struct list_head *list)
  575. {
  576. struct flush_kcq_data data = {
  577. .khd = khd,
  578. .sched_domain = sched_domain,
  579. .list = list,
  580. };
  581. sbitmap_for_each_set(&khd->kcq_map[sched_domain],
  582. flush_busy_kcq, &data);
  583. }
  584. static int kyber_domain_wake(wait_queue_entry_t *wqe, unsigned mode, int flags,
  585. void *key)
  586. {
  587. struct blk_mq_hw_ctx *hctx = READ_ONCE(wqe->private);
  588. struct sbq_wait *wait = container_of(wqe, struct sbq_wait, wait);
  589. sbitmap_del_wait_queue(wait);
  590. blk_mq_run_hw_queue(hctx, true);
  591. return 1;
  592. }
  593. static int kyber_get_domain_token(struct kyber_queue_data *kqd,
  594. struct kyber_hctx_data *khd,
  595. struct blk_mq_hw_ctx *hctx)
  596. {
  597. unsigned int sched_domain = khd->cur_domain;
  598. struct sbitmap_queue *domain_tokens = &kqd->domain_tokens[sched_domain];
  599. struct sbq_wait *wait = &khd->domain_wait[sched_domain];
  600. struct sbq_wait_state *ws;
  601. int nr;
  602. nr = __sbitmap_queue_get(domain_tokens);
  603. /*
  604. * If we failed to get a domain token, make sure the hardware queue is
  605. * run when one becomes available. Note that this is serialized on
  606. * khd->lock, but we still need to be careful about the waker.
  607. */
  608. if (nr < 0 && list_empty_careful(&wait->wait.entry)) {
  609. ws = sbq_wait_ptr(domain_tokens,
  610. &khd->wait_index[sched_domain]);
  611. khd->domain_ws[sched_domain] = ws;
  612. sbitmap_add_wait_queue(domain_tokens, ws, wait);
  613. /*
  614. * Try again in case a token was freed before we got on the wait
  615. * queue.
  616. */
  617. nr = __sbitmap_queue_get(domain_tokens);
  618. }
  619. /*
  620. * If we got a token while we were on the wait queue, remove ourselves
  621. * from the wait queue to ensure that all wake ups make forward
  622. * progress. It's possible that the waker already deleted the entry
  623. * between the !list_empty_careful() check and us grabbing the lock, but
  624. * list_del_init() is okay with that.
  625. */
  626. if (nr >= 0 && !list_empty_careful(&wait->wait.entry)) {
  627. ws = khd->domain_ws[sched_domain];
  628. spin_lock_irq(&ws->wait.lock);
  629. sbitmap_del_wait_queue(wait);
  630. spin_unlock_irq(&ws->wait.lock);
  631. }
  632. return nr;
  633. }
  634. static struct request *
  635. kyber_dispatch_cur_domain(struct kyber_queue_data *kqd,
  636. struct kyber_hctx_data *khd,
  637. struct blk_mq_hw_ctx *hctx)
  638. {
  639. struct list_head *rqs;
  640. struct request *rq;
  641. int nr;
  642. rqs = &khd->rqs[khd->cur_domain];
  643. /*
  644. * If we already have a flushed request, then we just need to get a
  645. * token for it. Otherwise, if there are pending requests in the kcqs,
  646. * flush the kcqs, but only if we can get a token. If not, we should
  647. * leave the requests in the kcqs so that they can be merged. Note that
  648. * khd->lock serializes the flushes, so if we observed any bit set in
  649. * the kcq_map, we will always get a request.
  650. */
  651. rq = list_first_entry_or_null(rqs, struct request, queuelist);
  652. if (rq) {
  653. nr = kyber_get_domain_token(kqd, khd, hctx);
  654. if (nr >= 0) {
  655. khd->batching++;
  656. rq_set_domain_token(rq, nr);
  657. list_del_init(&rq->queuelist);
  658. return rq;
  659. } else {
  660. trace_kyber_throttled(kqd->q,
  661. kyber_domain_names[khd->cur_domain]);
  662. }
  663. } else if (sbitmap_any_bit_set(&khd->kcq_map[khd->cur_domain])) {
  664. nr = kyber_get_domain_token(kqd, khd, hctx);
  665. if (nr >= 0) {
  666. kyber_flush_busy_kcqs(khd, khd->cur_domain, rqs);
  667. rq = list_first_entry(rqs, struct request, queuelist);
  668. khd->batching++;
  669. rq_set_domain_token(rq, nr);
  670. list_del_init(&rq->queuelist);
  671. return rq;
  672. } else {
  673. trace_kyber_throttled(kqd->q,
  674. kyber_domain_names[khd->cur_domain]);
  675. }
  676. }
  677. /* There were either no pending requests or no tokens. */
  678. return NULL;
  679. }
  680. static struct request *kyber_dispatch_request(struct blk_mq_hw_ctx *hctx)
  681. {
  682. struct kyber_queue_data *kqd = hctx->queue->elevator->elevator_data;
  683. struct kyber_hctx_data *khd = hctx->sched_data;
  684. struct request *rq;
  685. int i;
  686. spin_lock(&khd->lock);
  687. /*
  688. * First, if we are still entitled to batch, try to dispatch a request
  689. * from the batch.
  690. */
  691. if (khd->batching < kyber_batch_size[khd->cur_domain]) {
  692. rq = kyber_dispatch_cur_domain(kqd, khd, hctx);
  693. if (rq)
  694. goto out;
  695. }
  696. /*
  697. * Either,
  698. * 1. We were no longer entitled to a batch.
  699. * 2. The domain we were batching didn't have any requests.
  700. * 3. The domain we were batching was out of tokens.
  701. *
  702. * Start another batch. Note that this wraps back around to the original
  703. * domain if no other domains have requests or tokens.
  704. */
  705. khd->batching = 0;
  706. for (i = 0; i < KYBER_NUM_DOMAINS; i++) {
  707. if (khd->cur_domain == KYBER_NUM_DOMAINS - 1)
  708. khd->cur_domain = 0;
  709. else
  710. khd->cur_domain++;
  711. rq = kyber_dispatch_cur_domain(kqd, khd, hctx);
  712. if (rq)
  713. goto out;
  714. }
  715. rq = NULL;
  716. out:
  717. spin_unlock(&khd->lock);
  718. return rq;
  719. }
  720. static bool kyber_has_work(struct blk_mq_hw_ctx *hctx)
  721. {
  722. struct kyber_hctx_data *khd = hctx->sched_data;
  723. int i;
  724. for (i = 0; i < KYBER_NUM_DOMAINS; i++) {
  725. if (!list_empty_careful(&khd->rqs[i]) ||
  726. sbitmap_any_bit_set(&khd->kcq_map[i]))
  727. return true;
  728. }
  729. return false;
  730. }
  731. #define KYBER_LAT_SHOW_STORE(domain, name) \
  732. static ssize_t kyber_##name##_lat_show(struct elevator_queue *e, \
  733. char *page) \
  734. { \
  735. struct kyber_queue_data *kqd = e->elevator_data; \
  736. \
  737. return sprintf(page, "%llu\n", kqd->latency_targets[domain]); \
  738. } \
  739. \
  740. static ssize_t kyber_##name##_lat_store(struct elevator_queue *e, \
  741. const char *page, size_t count) \
  742. { \
  743. struct kyber_queue_data *kqd = e->elevator_data; \
  744. unsigned long long nsec; \
  745. int ret; \
  746. \
  747. ret = kstrtoull(page, 10, &nsec); \
  748. if (ret) \
  749. return ret; \
  750. \
  751. kqd->latency_targets[domain] = nsec; \
  752. \
  753. return count; \
  754. }
  755. KYBER_LAT_SHOW_STORE(KYBER_READ, read);
  756. KYBER_LAT_SHOW_STORE(KYBER_WRITE, write);
  757. #undef KYBER_LAT_SHOW_STORE
  758. #define KYBER_LAT_ATTR(op) __ATTR(op##_lat_nsec, 0644, kyber_##op##_lat_show, kyber_##op##_lat_store)
  759. static struct elv_fs_entry kyber_sched_attrs[] = {
  760. KYBER_LAT_ATTR(read),
  761. KYBER_LAT_ATTR(write),
  762. __ATTR_NULL
  763. };
  764. #undef KYBER_LAT_ATTR
  765. #ifdef CONFIG_BLK_DEBUG_FS
  766. #define KYBER_DEBUGFS_DOMAIN_ATTRS(domain, name) \
  767. static int kyber_##name##_tokens_show(void *data, struct seq_file *m) \
  768. { \
  769. struct request_queue *q = data; \
  770. struct kyber_queue_data *kqd = q->elevator->elevator_data; \
  771. \
  772. sbitmap_queue_show(&kqd->domain_tokens[domain], m); \
  773. return 0; \
  774. } \
  775. \
  776. static void *kyber_##name##_rqs_start(struct seq_file *m, loff_t *pos) \
  777. __acquires(&khd->lock) \
  778. { \
  779. struct blk_mq_hw_ctx *hctx = m->private; \
  780. struct kyber_hctx_data *khd = hctx->sched_data; \
  781. \
  782. spin_lock(&khd->lock); \
  783. return seq_list_start(&khd->rqs[domain], *pos); \
  784. } \
  785. \
  786. static void *kyber_##name##_rqs_next(struct seq_file *m, void *v, \
  787. loff_t *pos) \
  788. { \
  789. struct blk_mq_hw_ctx *hctx = m->private; \
  790. struct kyber_hctx_data *khd = hctx->sched_data; \
  791. \
  792. return seq_list_next(v, &khd->rqs[domain], pos); \
  793. } \
  794. \
  795. static void kyber_##name##_rqs_stop(struct seq_file *m, void *v) \
  796. __releases(&khd->lock) \
  797. { \
  798. struct blk_mq_hw_ctx *hctx = m->private; \
  799. struct kyber_hctx_data *khd = hctx->sched_data; \
  800. \
  801. spin_unlock(&khd->lock); \
  802. } \
  803. \
  804. static const struct seq_operations kyber_##name##_rqs_seq_ops = { \
  805. .start = kyber_##name##_rqs_start, \
  806. .next = kyber_##name##_rqs_next, \
  807. .stop = kyber_##name##_rqs_stop, \
  808. .show = blk_mq_debugfs_rq_show, \
  809. }; \
  810. \
  811. static int kyber_##name##_waiting_show(void *data, struct seq_file *m) \
  812. { \
  813. struct blk_mq_hw_ctx *hctx = data; \
  814. struct kyber_hctx_data *khd = hctx->sched_data; \
  815. wait_queue_entry_t *wait = &khd->domain_wait[domain].wait; \
  816. \
  817. seq_printf(m, "%d\n", !list_empty_careful(&wait->entry)); \
  818. return 0; \
  819. }
  820. KYBER_DEBUGFS_DOMAIN_ATTRS(KYBER_READ, read)
  821. KYBER_DEBUGFS_DOMAIN_ATTRS(KYBER_WRITE, write)
  822. KYBER_DEBUGFS_DOMAIN_ATTRS(KYBER_DISCARD, discard)
  823. KYBER_DEBUGFS_DOMAIN_ATTRS(KYBER_OTHER, other)
  824. #undef KYBER_DEBUGFS_DOMAIN_ATTRS
  825. static int kyber_async_depth_show(void *data, struct seq_file *m)
  826. {
  827. struct request_queue *q = data;
  828. struct kyber_queue_data *kqd = q->elevator->elevator_data;
  829. seq_printf(m, "%u\n", kqd->async_depth);
  830. return 0;
  831. }
  832. static int kyber_cur_domain_show(void *data, struct seq_file *m)
  833. {
  834. struct blk_mq_hw_ctx *hctx = data;
  835. struct kyber_hctx_data *khd = hctx->sched_data;
  836. seq_printf(m, "%s\n", kyber_domain_names[khd->cur_domain]);
  837. return 0;
  838. }
  839. static int kyber_batching_show(void *data, struct seq_file *m)
  840. {
  841. struct blk_mq_hw_ctx *hctx = data;
  842. struct kyber_hctx_data *khd = hctx->sched_data;
  843. seq_printf(m, "%u\n", khd->batching);
  844. return 0;
  845. }
  846. #define KYBER_QUEUE_DOMAIN_ATTRS(name) \
  847. {#name "_tokens", 0400, kyber_##name##_tokens_show}
  848. static const struct blk_mq_debugfs_attr kyber_queue_debugfs_attrs[] = {
  849. KYBER_QUEUE_DOMAIN_ATTRS(read),
  850. KYBER_QUEUE_DOMAIN_ATTRS(write),
  851. KYBER_QUEUE_DOMAIN_ATTRS(discard),
  852. KYBER_QUEUE_DOMAIN_ATTRS(other),
  853. {"async_depth", 0400, kyber_async_depth_show},
  854. {},
  855. };
  856. #undef KYBER_QUEUE_DOMAIN_ATTRS
  857. #define KYBER_HCTX_DOMAIN_ATTRS(name) \
  858. {#name "_rqs", 0400, .seq_ops = &kyber_##name##_rqs_seq_ops}, \
  859. {#name "_waiting", 0400, kyber_##name##_waiting_show}
  860. static const struct blk_mq_debugfs_attr kyber_hctx_debugfs_attrs[] = {
  861. KYBER_HCTX_DOMAIN_ATTRS(read),
  862. KYBER_HCTX_DOMAIN_ATTRS(write),
  863. KYBER_HCTX_DOMAIN_ATTRS(discard),
  864. KYBER_HCTX_DOMAIN_ATTRS(other),
  865. {"cur_domain", 0400, kyber_cur_domain_show},
  866. {"batching", 0400, kyber_batching_show},
  867. {},
  868. };
  869. #undef KYBER_HCTX_DOMAIN_ATTRS
  870. #endif
  871. static struct elevator_type kyber_sched = {
  872. .ops = {
  873. .init_sched = kyber_init_sched,
  874. .exit_sched = kyber_exit_sched,
  875. .init_hctx = kyber_init_hctx,
  876. .exit_hctx = kyber_exit_hctx,
  877. .limit_depth = kyber_limit_depth,
  878. .bio_merge = kyber_bio_merge,
  879. .prepare_request = kyber_prepare_request,
  880. .insert_requests = kyber_insert_requests,
  881. .finish_request = kyber_finish_request,
  882. .requeue_request = kyber_finish_request,
  883. .completed_request = kyber_completed_request,
  884. .dispatch_request = kyber_dispatch_request,
  885. .has_work = kyber_has_work,
  886. },
  887. #ifdef CONFIG_BLK_DEBUG_FS
  888. .queue_debugfs_attrs = kyber_queue_debugfs_attrs,
  889. .hctx_debugfs_attrs = kyber_hctx_debugfs_attrs,
  890. #endif
  891. .elevator_attrs = kyber_sched_attrs,
  892. .elevator_name = "kyber",
  893. .elevator_features = ELEVATOR_F_MQ_AWARE,
  894. .elevator_owner = THIS_MODULE,
  895. };
  896. static int __init kyber_init(void)
  897. {
  898. return elv_register(&kyber_sched);
  899. }
  900. static void __exit kyber_exit(void)
  901. {
  902. elv_unregister(&kyber_sched);
  903. }
  904. module_init(kyber_init);
  905. module_exit(kyber_exit);
  906. MODULE_AUTHOR("Omar Sandoval");
  907. MODULE_LICENSE("GPL");
  908. MODULE_DESCRIPTION("Kyber I/O scheduler");