mq-deadline-cgroup.h 2.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. #if !defined(_MQ_DEADLINE_CGROUP_H_)
  3. #define _MQ_DEADLINE_CGROUP_H_
  4. #include <linux/blk-cgroup.h>
  5. struct request_queue;
  6. /**
  7. * struct io_stats_per_prio - I/O statistics per I/O priority class.
  8. * @inserted: Number of inserted requests.
  9. * @merged: Number of merged requests.
  10. * @dispatched: Number of dispatched requests.
  11. * @completed: Number of I/O completions.
  12. */
  13. struct io_stats_per_prio {
  14. local_t inserted;
  15. local_t merged;
  16. local_t dispatched;
  17. local_t completed;
  18. };
  19. /* I/O statistics per I/O cgroup per I/O priority class (IOPRIO_CLASS_*). */
  20. struct blkcg_io_stats {
  21. struct io_stats_per_prio stats[4];
  22. };
  23. /**
  24. * struct dd_blkcg - Per cgroup data.
  25. * @cpd: blkcg_policy_data structure.
  26. * @stats: I/O statistics.
  27. */
  28. struct dd_blkcg {
  29. struct blkcg_policy_data cpd; /* must be the first member */
  30. struct blkcg_io_stats __percpu *stats;
  31. };
  32. /*
  33. * Count one event of type 'event_type' and with I/O priority class
  34. * 'prio_class'.
  35. */
  36. #define ddcg_count(ddcg, event_type, prio_class) do { \
  37. if (ddcg) { \
  38. struct blkcg_io_stats *io_stats = get_cpu_ptr((ddcg)->stats); \
  39. \
  40. BUILD_BUG_ON(!__same_type((ddcg), struct dd_blkcg *)); \
  41. BUILD_BUG_ON(!__same_type((prio_class), u8)); \
  42. local_inc(&io_stats->stats[(prio_class)].event_type); \
  43. put_cpu_ptr(io_stats); \
  44. } \
  45. } while (0)
  46. /*
  47. * Returns the total number of ddcg_count(ddcg, event_type, prio_class) calls
  48. * across all CPUs. No locking or barriers since it is fine if the returned
  49. * sum is slightly outdated.
  50. */
  51. #define ddcg_sum(ddcg, event_type, prio) ({ \
  52. unsigned int cpu; \
  53. u32 sum = 0; \
  54. \
  55. BUILD_BUG_ON(!__same_type((ddcg), struct dd_blkcg *)); \
  56. BUILD_BUG_ON(!__same_type((prio), u8)); \
  57. for_each_present_cpu(cpu) \
  58. sum += local_read(&per_cpu_ptr((ddcg)->stats, cpu)-> \
  59. stats[(prio)].event_type); \
  60. sum; \
  61. })
  62. #ifdef CONFIG_BLK_CGROUP
  63. /**
  64. * struct dd_blkg - Per (cgroup, request queue) data.
  65. * @pd: blkg_policy_data structure.
  66. */
  67. struct dd_blkg {
  68. struct blkg_policy_data pd; /* must be the first member */
  69. };
  70. struct dd_blkcg *dd_blkcg_from_bio(struct bio *bio);
  71. int dd_activate_policy(struct request_queue *q);
  72. void dd_deactivate_policy(struct request_queue *q);
  73. int __init dd_blkcg_init(void);
  74. void __exit dd_blkcg_exit(void);
  75. #else /* CONFIG_BLK_CGROUP */
  76. static inline struct dd_blkcg *dd_blkcg_from_bio(struct bio *bio)
  77. {
  78. return NULL;
  79. }
  80. static inline int dd_activate_policy(struct request_queue *q)
  81. {
  82. return 0;
  83. }
  84. static inline void dd_deactivate_policy(struct request_queue *q)
  85. {
  86. }
  87. static inline int dd_blkcg_init(void)
  88. {
  89. return 0;
  90. }
  91. static inline void dd_blkcg_exit(void)
  92. {
  93. }
  94. #endif /* CONFIG_BLK_CGROUP */
  95. #endif /* _MQ_DEADLINE_CGROUP_H_ */