percpu_counter.h 2.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114
  1. #ifndef _LINUX_PERCPU_COUNTER_H
  2. #define _LINUX_PERCPU_COUNTER_H
  3. /*
  4. * A simple "approximate counter" for use in ext2 and ext3 superblocks.
  5. *
  6. * WARNING: these things are HUGE. 4 kbytes per counter on 32-way P4.
  7. */
  8. #include <linux/spinlock.h>
  9. #include <linux/smp.h>
  10. #include <linux/threads.h>
  11. #include <linux/percpu.h>
  12. #include <linux/types.h>
  13. #ifdef CONFIG_SMP
  14. struct percpu_counter {
  15. spinlock_t lock;
  16. s64 count;
  17. s32 *counters;
  18. };
  19. #if NR_CPUS >= 16
  20. #define FBC_BATCH (NR_CPUS*2)
  21. #else
  22. #define FBC_BATCH (NR_CPUS*4)
  23. #endif
  24. static inline void percpu_counter_init(struct percpu_counter *fbc, s64 amount)
  25. {
  26. spin_lock_init(&fbc->lock);
  27. fbc->count = amount;
  28. fbc->counters = alloc_percpu(s32);
  29. }
  30. static inline void percpu_counter_destroy(struct percpu_counter *fbc)
  31. {
  32. free_percpu(fbc->counters);
  33. }
  34. void percpu_counter_mod(struct percpu_counter *fbc, s32 amount);
  35. s64 percpu_counter_sum(struct percpu_counter *fbc);
  36. static inline s64 percpu_counter_read(struct percpu_counter *fbc)
  37. {
  38. return fbc->count;
  39. }
  40. /*
  41. * It is possible for the percpu_counter_read() to return a small negative
  42. * number for some counter which should never be negative.
  43. *
  44. */
  45. static inline s64 percpu_counter_read_positive(struct percpu_counter *fbc)
  46. {
  47. s64 ret = fbc->count;
  48. barrier(); /* Prevent reloads of fbc->count */
  49. if (ret >= 0)
  50. return ret;
  51. return 1;
  52. }
  53. #else
  54. struct percpu_counter {
  55. s64 count;
  56. };
  57. static inline void percpu_counter_init(struct percpu_counter *fbc, s64 amount)
  58. {
  59. fbc->count = amount;
  60. }
  61. static inline void percpu_counter_destroy(struct percpu_counter *fbc)
  62. {
  63. }
  64. static inline void
  65. percpu_counter_mod(struct percpu_counter *fbc, s32 amount)
  66. {
  67. preempt_disable();
  68. fbc->count += amount;
  69. preempt_enable();
  70. }
  71. static inline s64 percpu_counter_read(struct percpu_counter *fbc)
  72. {
  73. return fbc->count;
  74. }
  75. static inline s64 percpu_counter_read_positive(struct percpu_counter *fbc)
  76. {
  77. return fbc->count;
  78. }
  79. static inline s64 percpu_counter_sum(struct percpu_counter *fbc)
  80. {
  81. return percpu_counter_read_positive(fbc);
  82. }
  83. #endif /* CONFIG_SMP */
  84. static inline void percpu_counter_inc(struct percpu_counter *fbc)
  85. {
  86. percpu_counter_mod(fbc, 1);
  87. }
  88. static inline void percpu_counter_dec(struct percpu_counter *fbc)
  89. {
  90. percpu_counter_mod(fbc, -1);
  91. }
  92. #endif /* _LINUX_PERCPU_COUNTER_H */