sbitmap.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (C) 2016 Facebook
  4. * Copyright (C) 2013-2014 Jens Axboe
  5. */
  6. #include <linux/sched.h>
  7. #include <linux/random.h>
  8. #include <linux/sbitmap.h>
  9. #include <linux/seq_file.h>
  10. /*
  11. * See if we have deferred clears that we can batch move
  12. */
  13. static inline bool sbitmap_deferred_clear(struct sbitmap *sb, int index)
  14. {
  15. unsigned long mask, val;
  16. bool ret = false;
  17. unsigned long flags;
  18. spin_lock_irqsave(&sb->map[index].swap_lock, flags);
  19. if (!sb->map[index].cleared)
  20. goto out_unlock;
  21. /*
  22. * First get a stable cleared mask, setting the old mask to 0.
  23. */
  24. mask = xchg(&sb->map[index].cleared, 0);
  25. /*
  26. * Now clear the masked bits in our free word
  27. */
  28. do {
  29. val = sb->map[index].word;
  30. } while (cmpxchg(&sb->map[index].word, val, val & ~mask) != val);
  31. ret = true;
  32. out_unlock:
  33. spin_unlock_irqrestore(&sb->map[index].swap_lock, flags);
  34. return ret;
  35. }
  36. int sbitmap_init_node(struct sbitmap *sb, unsigned int depth, int shift,
  37. gfp_t flags, int node)
  38. {
  39. unsigned int bits_per_word;
  40. unsigned int i;
  41. if (shift < 0) {
  42. shift = ilog2(BITS_PER_LONG);
  43. /*
  44. * If the bitmap is small, shrink the number of bits per word so
  45. * we spread over a few cachelines, at least. If less than 4
  46. * bits, just forget about it, it's not going to work optimally
  47. * anyway.
  48. */
  49. if (depth >= 4) {
  50. while ((4U << shift) > depth)
  51. shift--;
  52. }
  53. }
  54. bits_per_word = 1U << shift;
  55. if (bits_per_word > BITS_PER_LONG)
  56. return -EINVAL;
  57. sb->shift = shift;
  58. sb->depth = depth;
  59. sb->map_nr = DIV_ROUND_UP(sb->depth, bits_per_word);
  60. if (depth == 0) {
  61. sb->map = NULL;
  62. return 0;
  63. }
  64. sb->map = kcalloc_node(sb->map_nr, sizeof(*sb->map), flags, node);
  65. if (!sb->map)
  66. return -ENOMEM;
  67. for (i = 0; i < sb->map_nr; i++) {
  68. sb->map[i].depth = min(depth, bits_per_word);
  69. depth -= sb->map[i].depth;
  70. spin_lock_init(&sb->map[i].swap_lock);
  71. }
  72. return 0;
  73. }
  74. EXPORT_SYMBOL_GPL(sbitmap_init_node);
  75. void sbitmap_resize(struct sbitmap *sb, unsigned int depth)
  76. {
  77. unsigned int bits_per_word = 1U << sb->shift;
  78. unsigned int i;
  79. for (i = 0; i < sb->map_nr; i++)
  80. sbitmap_deferred_clear(sb, i);
  81. sb->depth = depth;
  82. sb->map_nr = DIV_ROUND_UP(sb->depth, bits_per_word);
  83. for (i = 0; i < sb->map_nr; i++) {
  84. sb->map[i].depth = min(depth, bits_per_word);
  85. depth -= sb->map[i].depth;
  86. }
  87. }
  88. EXPORT_SYMBOL_GPL(sbitmap_resize);
  89. static int __sbitmap_get_word(unsigned long *word, unsigned long depth,
  90. unsigned int hint, bool wrap)
  91. {
  92. unsigned int orig_hint = hint;
  93. int nr;
  94. while (1) {
  95. nr = find_next_zero_bit(word, depth, hint);
  96. if (unlikely(nr >= depth)) {
  97. /*
  98. * We started with an offset, and we didn't reset the
  99. * offset to 0 in a failure case, so start from 0 to
  100. * exhaust the map.
  101. */
  102. if (orig_hint && hint && wrap) {
  103. hint = orig_hint = 0;
  104. continue;
  105. }
  106. return -1;
  107. }
  108. if (!test_and_set_bit_lock(nr, word))
  109. break;
  110. hint = nr + 1;
  111. if (hint >= depth - 1)
  112. hint = 0;
  113. }
  114. return nr;
  115. }
  116. static int sbitmap_find_bit_in_index(struct sbitmap *sb, int index,
  117. unsigned int alloc_hint, bool round_robin)
  118. {
  119. int nr;
  120. do {
  121. nr = __sbitmap_get_word(&sb->map[index].word,
  122. sb->map[index].depth, alloc_hint,
  123. !round_robin);
  124. if (nr != -1)
  125. break;
  126. if (!sbitmap_deferred_clear(sb, index))
  127. break;
  128. } while (1);
  129. return nr;
  130. }
  131. int sbitmap_get(struct sbitmap *sb, unsigned int alloc_hint, bool round_robin)
  132. {
  133. unsigned int i, index;
  134. int nr = -1;
  135. index = SB_NR_TO_INDEX(sb, alloc_hint);
  136. /*
  137. * Unless we're doing round robin tag allocation, just use the
  138. * alloc_hint to find the right word index. No point in looping
  139. * twice in find_next_zero_bit() for that case.
  140. */
  141. if (round_robin)
  142. alloc_hint = SB_NR_TO_BIT(sb, alloc_hint);
  143. else
  144. alloc_hint = 0;
  145. for (i = 0; i < sb->map_nr; i++) {
  146. nr = sbitmap_find_bit_in_index(sb, index, alloc_hint,
  147. round_robin);
  148. if (nr != -1) {
  149. nr += index << sb->shift;
  150. break;
  151. }
  152. /* Jump to next index. */
  153. alloc_hint = 0;
  154. if (++index >= sb->map_nr)
  155. index = 0;
  156. }
  157. return nr;
  158. }
  159. EXPORT_SYMBOL_GPL(sbitmap_get);
  160. int sbitmap_get_shallow(struct sbitmap *sb, unsigned int alloc_hint,
  161. unsigned long shallow_depth)
  162. {
  163. unsigned int i, index;
  164. int nr = -1;
  165. index = SB_NR_TO_INDEX(sb, alloc_hint);
  166. for (i = 0; i < sb->map_nr; i++) {
  167. again:
  168. nr = __sbitmap_get_word(&sb->map[index].word,
  169. min(sb->map[index].depth, shallow_depth),
  170. SB_NR_TO_BIT(sb, alloc_hint), true);
  171. if (nr != -1) {
  172. nr += index << sb->shift;
  173. break;
  174. }
  175. if (sbitmap_deferred_clear(sb, index))
  176. goto again;
  177. /* Jump to next index. */
  178. index++;
  179. alloc_hint = index << sb->shift;
  180. if (index >= sb->map_nr) {
  181. index = 0;
  182. alloc_hint = 0;
  183. }
  184. }
  185. return nr;
  186. }
  187. EXPORT_SYMBOL_GPL(sbitmap_get_shallow);
  188. bool sbitmap_any_bit_set(const struct sbitmap *sb)
  189. {
  190. unsigned int i;
  191. for (i = 0; i < sb->map_nr; i++) {
  192. if (sb->map[i].word & ~sb->map[i].cleared)
  193. return true;
  194. }
  195. return false;
  196. }
  197. EXPORT_SYMBOL_GPL(sbitmap_any_bit_set);
  198. static unsigned int __sbitmap_weight(const struct sbitmap *sb, bool set)
  199. {
  200. unsigned int i, weight = 0;
  201. for (i = 0; i < sb->map_nr; i++) {
  202. const struct sbitmap_word *word = &sb->map[i];
  203. if (set)
  204. weight += bitmap_weight(&word->word, word->depth);
  205. else
  206. weight += bitmap_weight(&word->cleared, word->depth);
  207. }
  208. return weight;
  209. }
  210. static unsigned int sbitmap_weight(const struct sbitmap *sb)
  211. {
  212. return __sbitmap_weight(sb, true);
  213. }
  214. static unsigned int sbitmap_cleared(const struct sbitmap *sb)
  215. {
  216. return __sbitmap_weight(sb, false);
  217. }
  218. void sbitmap_show(struct sbitmap *sb, struct seq_file *m)
  219. {
  220. seq_printf(m, "depth=%u\n", sb->depth);
  221. seq_printf(m, "busy=%u\n", sbitmap_weight(sb) - sbitmap_cleared(sb));
  222. seq_printf(m, "cleared=%u\n", sbitmap_cleared(sb));
  223. seq_printf(m, "bits_per_word=%u\n", 1U << sb->shift);
  224. seq_printf(m, "map_nr=%u\n", sb->map_nr);
  225. }
  226. EXPORT_SYMBOL_GPL(sbitmap_show);
  227. static inline void emit_byte(struct seq_file *m, unsigned int offset, u8 byte)
  228. {
  229. if ((offset & 0xf) == 0) {
  230. if (offset != 0)
  231. seq_putc(m, '\n');
  232. seq_printf(m, "%08x:", offset);
  233. }
  234. if ((offset & 0x1) == 0)
  235. seq_putc(m, ' ');
  236. seq_printf(m, "%02x", byte);
  237. }
  238. void sbitmap_bitmap_show(struct sbitmap *sb, struct seq_file *m)
  239. {
  240. u8 byte = 0;
  241. unsigned int byte_bits = 0;
  242. unsigned int offset = 0;
  243. int i;
  244. for (i = 0; i < sb->map_nr; i++) {
  245. unsigned long word = READ_ONCE(sb->map[i].word);
  246. unsigned long cleared = READ_ONCE(sb->map[i].cleared);
  247. unsigned int word_bits = READ_ONCE(sb->map[i].depth);
  248. word &= ~cleared;
  249. while (word_bits > 0) {
  250. unsigned int bits = min(8 - byte_bits, word_bits);
  251. byte |= (word & (BIT(bits) - 1)) << byte_bits;
  252. byte_bits += bits;
  253. if (byte_bits == 8) {
  254. emit_byte(m, offset, byte);
  255. byte = 0;
  256. byte_bits = 0;
  257. offset++;
  258. }
  259. word >>= bits;
  260. word_bits -= bits;
  261. }
  262. }
  263. if (byte_bits) {
  264. emit_byte(m, offset, byte);
  265. offset++;
  266. }
  267. if (offset)
  268. seq_putc(m, '\n');
  269. }
  270. EXPORT_SYMBOL_GPL(sbitmap_bitmap_show);
  271. static unsigned int sbq_calc_wake_batch(struct sbitmap_queue *sbq,
  272. unsigned int depth)
  273. {
  274. unsigned int wake_batch;
  275. unsigned int shallow_depth;
  276. /*
  277. * For each batch, we wake up one queue. We need to make sure that our
  278. * batch size is small enough that the full depth of the bitmap,
  279. * potentially limited by a shallow depth, is enough to wake up all of
  280. * the queues.
  281. *
  282. * Each full word of the bitmap has bits_per_word bits, and there might
  283. * be a partial word. There are depth / bits_per_word full words and
  284. * depth % bits_per_word bits left over. In bitwise arithmetic:
  285. *
  286. * bits_per_word = 1 << shift
  287. * depth / bits_per_word = depth >> shift
  288. * depth % bits_per_word = depth & ((1 << shift) - 1)
  289. *
  290. * Each word can be limited to sbq->min_shallow_depth bits.
  291. */
  292. shallow_depth = min(1U << sbq->sb.shift, sbq->min_shallow_depth);
  293. depth = ((depth >> sbq->sb.shift) * shallow_depth +
  294. min(depth & ((1U << sbq->sb.shift) - 1), shallow_depth));
  295. wake_batch = clamp_t(unsigned int, depth / SBQ_WAIT_QUEUES, 1,
  296. SBQ_WAKE_BATCH);
  297. return wake_batch;
  298. }
  299. int sbitmap_queue_init_node(struct sbitmap_queue *sbq, unsigned int depth,
  300. int shift, bool round_robin, gfp_t flags, int node)
  301. {
  302. int ret;
  303. int i;
  304. ret = sbitmap_init_node(&sbq->sb, depth, shift, flags, node);
  305. if (ret)
  306. return ret;
  307. sbq->alloc_hint = alloc_percpu_gfp(unsigned int, flags);
  308. if (!sbq->alloc_hint) {
  309. sbitmap_free(&sbq->sb);
  310. return -ENOMEM;
  311. }
  312. if (depth && !round_robin) {
  313. for_each_possible_cpu(i)
  314. *per_cpu_ptr(sbq->alloc_hint, i) = prandom_u32() % depth;
  315. }
  316. sbq->min_shallow_depth = UINT_MAX;
  317. sbq->wake_batch = sbq_calc_wake_batch(sbq, depth);
  318. atomic_set(&sbq->wake_index, 0);
  319. atomic_set(&sbq->ws_active, 0);
  320. sbq->ws = kzalloc_node(SBQ_WAIT_QUEUES * sizeof(*sbq->ws), flags, node);
  321. if (!sbq->ws) {
  322. free_percpu(sbq->alloc_hint);
  323. sbitmap_free(&sbq->sb);
  324. return -ENOMEM;
  325. }
  326. for (i = 0; i < SBQ_WAIT_QUEUES; i++) {
  327. init_waitqueue_head(&sbq->ws[i].wait);
  328. atomic_set(&sbq->ws[i].wait_cnt, sbq->wake_batch);
  329. }
  330. sbq->round_robin = round_robin;
  331. return 0;
  332. }
  333. EXPORT_SYMBOL_GPL(sbitmap_queue_init_node);
  334. static void sbitmap_queue_update_wake_batch(struct sbitmap_queue *sbq,
  335. unsigned int depth)
  336. {
  337. unsigned int wake_batch = sbq_calc_wake_batch(sbq, depth);
  338. int i;
  339. if (sbq->wake_batch != wake_batch) {
  340. WRITE_ONCE(sbq->wake_batch, wake_batch);
  341. /*
  342. * Pairs with the memory barrier in sbitmap_queue_wake_up()
  343. * to ensure that the batch size is updated before the wait
  344. * counts.
  345. */
  346. smp_mb();
  347. for (i = 0; i < SBQ_WAIT_QUEUES; i++)
  348. atomic_set(&sbq->ws[i].wait_cnt, 1);
  349. }
  350. }
  351. void sbitmap_queue_resize(struct sbitmap_queue *sbq, unsigned int depth)
  352. {
  353. sbitmap_queue_update_wake_batch(sbq, depth);
  354. sbitmap_resize(&sbq->sb, depth);
  355. }
  356. EXPORT_SYMBOL_GPL(sbitmap_queue_resize);
  357. int __sbitmap_queue_get(struct sbitmap_queue *sbq)
  358. {
  359. unsigned int hint, depth;
  360. int nr;
  361. hint = this_cpu_read(*sbq->alloc_hint);
  362. depth = READ_ONCE(sbq->sb.depth);
  363. if (unlikely(hint >= depth)) {
  364. hint = depth ? prandom_u32() % depth : 0;
  365. this_cpu_write(*sbq->alloc_hint, hint);
  366. }
  367. nr = sbitmap_get(&sbq->sb, hint, sbq->round_robin);
  368. if (nr == -1) {
  369. /* If the map is full, a hint won't do us much good. */
  370. this_cpu_write(*sbq->alloc_hint, 0);
  371. } else if (nr == hint || unlikely(sbq->round_robin)) {
  372. /* Only update the hint if we used it. */
  373. hint = nr + 1;
  374. if (hint >= depth - 1)
  375. hint = 0;
  376. this_cpu_write(*sbq->alloc_hint, hint);
  377. }
  378. return nr;
  379. }
  380. EXPORT_SYMBOL_GPL(__sbitmap_queue_get);
  381. int __sbitmap_queue_get_shallow(struct sbitmap_queue *sbq,
  382. unsigned int shallow_depth)
  383. {
  384. unsigned int hint, depth;
  385. int nr;
  386. WARN_ON_ONCE(shallow_depth < sbq->min_shallow_depth);
  387. hint = this_cpu_read(*sbq->alloc_hint);
  388. depth = READ_ONCE(sbq->sb.depth);
  389. if (unlikely(hint >= depth)) {
  390. hint = depth ? prandom_u32() % depth : 0;
  391. this_cpu_write(*sbq->alloc_hint, hint);
  392. }
  393. nr = sbitmap_get_shallow(&sbq->sb, hint, shallow_depth);
  394. if (nr == -1) {
  395. /* If the map is full, a hint won't do us much good. */
  396. this_cpu_write(*sbq->alloc_hint, 0);
  397. } else if (nr == hint || unlikely(sbq->round_robin)) {
  398. /* Only update the hint if we used it. */
  399. hint = nr + 1;
  400. if (hint >= depth - 1)
  401. hint = 0;
  402. this_cpu_write(*sbq->alloc_hint, hint);
  403. }
  404. return nr;
  405. }
  406. EXPORT_SYMBOL_GPL(__sbitmap_queue_get_shallow);
  407. void sbitmap_queue_min_shallow_depth(struct sbitmap_queue *sbq,
  408. unsigned int min_shallow_depth)
  409. {
  410. sbq->min_shallow_depth = min_shallow_depth;
  411. sbitmap_queue_update_wake_batch(sbq, sbq->sb.depth);
  412. }
  413. EXPORT_SYMBOL_GPL(sbitmap_queue_min_shallow_depth);
  414. static struct sbq_wait_state *sbq_wake_ptr(struct sbitmap_queue *sbq)
  415. {
  416. int i, wake_index;
  417. if (!atomic_read(&sbq->ws_active))
  418. return NULL;
  419. wake_index = atomic_read(&sbq->wake_index);
  420. for (i = 0; i < SBQ_WAIT_QUEUES; i++) {
  421. struct sbq_wait_state *ws = &sbq->ws[wake_index];
  422. if (waitqueue_active(&ws->wait)) {
  423. if (wake_index != atomic_read(&sbq->wake_index))
  424. atomic_set(&sbq->wake_index, wake_index);
  425. return ws;
  426. }
  427. wake_index = sbq_index_inc(wake_index);
  428. }
  429. return NULL;
  430. }
  431. static bool __sbq_wake_up(struct sbitmap_queue *sbq)
  432. {
  433. struct sbq_wait_state *ws;
  434. unsigned int wake_batch;
  435. int wait_cnt;
  436. ws = sbq_wake_ptr(sbq);
  437. if (!ws)
  438. return false;
  439. wait_cnt = atomic_dec_return(&ws->wait_cnt);
  440. if (wait_cnt <= 0) {
  441. int ret;
  442. wake_batch = READ_ONCE(sbq->wake_batch);
  443. /*
  444. * Pairs with the memory barrier in sbitmap_queue_resize() to
  445. * ensure that we see the batch size update before the wait
  446. * count is reset.
  447. */
  448. smp_mb__before_atomic();
  449. /*
  450. * For concurrent callers of this, the one that failed the
  451. * atomic_cmpxhcg() race should call this function again
  452. * to wakeup a new batch on a different 'ws'.
  453. */
  454. ret = atomic_cmpxchg(&ws->wait_cnt, wait_cnt, wake_batch);
  455. if (ret == wait_cnt) {
  456. sbq_index_atomic_inc(&sbq->wake_index);
  457. wake_up_nr(&ws->wait, wake_batch);
  458. return false;
  459. }
  460. return true;
  461. }
  462. return false;
  463. }
  464. void sbitmap_queue_wake_up(struct sbitmap_queue *sbq)
  465. {
  466. while (__sbq_wake_up(sbq))
  467. ;
  468. }
  469. EXPORT_SYMBOL_GPL(sbitmap_queue_wake_up);
  470. void sbitmap_queue_clear(struct sbitmap_queue *sbq, unsigned int nr,
  471. unsigned int cpu)
  472. {
  473. /*
  474. * Once the clear bit is set, the bit may be allocated out.
  475. *
  476. * Orders READ/WRITE on the asssociated instance(such as request
  477. * of blk_mq) by this bit for avoiding race with re-allocation,
  478. * and its pair is the memory barrier implied in __sbitmap_get_word.
  479. *
  480. * One invariant is that the clear bit has to be zero when the bit
  481. * is in use.
  482. */
  483. smp_mb__before_atomic();
  484. sbitmap_deferred_clear_bit(&sbq->sb, nr);
  485. /*
  486. * Pairs with the memory barrier in set_current_state() to ensure the
  487. * proper ordering of clear_bit_unlock()/waitqueue_active() in the waker
  488. * and test_and_set_bit_lock()/prepare_to_wait()/finish_wait() in the
  489. * waiter. See the comment on waitqueue_active().
  490. */
  491. smp_mb__after_atomic();
  492. sbitmap_queue_wake_up(sbq);
  493. if (likely(!sbq->round_robin && nr < sbq->sb.depth))
  494. *per_cpu_ptr(sbq->alloc_hint, cpu) = nr;
  495. }
  496. EXPORT_SYMBOL_GPL(sbitmap_queue_clear);
  497. void sbitmap_queue_wake_all(struct sbitmap_queue *sbq)
  498. {
  499. int i, wake_index;
  500. /*
  501. * Pairs with the memory barrier in set_current_state() like in
  502. * sbitmap_queue_wake_up().
  503. */
  504. smp_mb();
  505. wake_index = atomic_read(&sbq->wake_index);
  506. for (i = 0; i < SBQ_WAIT_QUEUES; i++) {
  507. struct sbq_wait_state *ws = &sbq->ws[wake_index];
  508. if (waitqueue_active(&ws->wait))
  509. wake_up(&ws->wait);
  510. wake_index = sbq_index_inc(wake_index);
  511. }
  512. }
  513. EXPORT_SYMBOL_GPL(sbitmap_queue_wake_all);
  514. void sbitmap_queue_show(struct sbitmap_queue *sbq, struct seq_file *m)
  515. {
  516. bool first;
  517. int i;
  518. sbitmap_show(&sbq->sb, m);
  519. seq_puts(m, "alloc_hint={");
  520. first = true;
  521. for_each_possible_cpu(i) {
  522. if (!first)
  523. seq_puts(m, ", ");
  524. first = false;
  525. seq_printf(m, "%u", *per_cpu_ptr(sbq->alloc_hint, i));
  526. }
  527. seq_puts(m, "}\n");
  528. seq_printf(m, "wake_batch=%u\n", sbq->wake_batch);
  529. seq_printf(m, "wake_index=%d\n", atomic_read(&sbq->wake_index));
  530. seq_printf(m, "ws_active=%d\n", atomic_read(&sbq->ws_active));
  531. seq_puts(m, "ws={\n");
  532. for (i = 0; i < SBQ_WAIT_QUEUES; i++) {
  533. struct sbq_wait_state *ws = &sbq->ws[i];
  534. seq_printf(m, "\t{.wait_cnt=%d, .wait=%s},\n",
  535. atomic_read(&ws->wait_cnt),
  536. waitqueue_active(&ws->wait) ? "active" : "inactive");
  537. }
  538. seq_puts(m, "}\n");
  539. seq_printf(m, "round_robin=%d\n", sbq->round_robin);
  540. seq_printf(m, "min_shallow_depth=%u\n", sbq->min_shallow_depth);
  541. }
  542. EXPORT_SYMBOL_GPL(sbitmap_queue_show);
  543. void sbitmap_add_wait_queue(struct sbitmap_queue *sbq,
  544. struct sbq_wait_state *ws,
  545. struct sbq_wait *sbq_wait)
  546. {
  547. if (!sbq_wait->sbq) {
  548. sbq_wait->sbq = sbq;
  549. atomic_inc(&sbq->ws_active);
  550. add_wait_queue(&ws->wait, &sbq_wait->wait);
  551. }
  552. }
  553. EXPORT_SYMBOL_GPL(sbitmap_add_wait_queue);
  554. void sbitmap_del_wait_queue(struct sbq_wait *sbq_wait)
  555. {
  556. list_del_init(&sbq_wait->wait.entry);
  557. if (sbq_wait->sbq) {
  558. atomic_dec(&sbq_wait->sbq->ws_active);
  559. sbq_wait->sbq = NULL;
  560. }
  561. }
  562. EXPORT_SYMBOL_GPL(sbitmap_del_wait_queue);
  563. void sbitmap_prepare_to_wait(struct sbitmap_queue *sbq,
  564. struct sbq_wait_state *ws,
  565. struct sbq_wait *sbq_wait, int state)
  566. {
  567. if (!sbq_wait->sbq) {
  568. atomic_inc(&sbq->ws_active);
  569. sbq_wait->sbq = sbq;
  570. }
  571. prepare_to_wait_exclusive(&ws->wait, &sbq_wait->wait, state);
  572. }
  573. EXPORT_SYMBOL_GPL(sbitmap_prepare_to_wait);
  574. void sbitmap_finish_wait(struct sbitmap_queue *sbq, struct sbq_wait_state *ws,
  575. struct sbq_wait *sbq_wait)
  576. {
  577. finish_wait(&ws->wait, &sbq_wait->wait);
  578. if (sbq_wait->sbq) {
  579. atomic_dec(&sbq->ws_active);
  580. sbq_wait->sbq = NULL;
  581. }
  582. }
  583. EXPORT_SYMBOL_GPL(sbitmap_finish_wait);