bitops.h 7.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. #ifndef _LINUX_BITOPS_H
  3. #define _LINUX_BITOPS_H
  4. #include <asm/types.h>
  5. #include <linux/bits.h>
  6. /* Set bits in the first 'n' bytes when loaded from memory */
  7. #ifdef __LITTLE_ENDIAN
  8. # define aligned_byte_mask(n) ((1UL << 8*(n))-1)
  9. #else
  10. # define aligned_byte_mask(n) (~0xffUL << (BITS_PER_LONG - 8 - 8*(n)))
  11. #endif
  12. #define BITS_PER_TYPE(type) (sizeof(type) * BITS_PER_BYTE)
  13. #define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, BITS_PER_TYPE(long))
  14. #define BITS_TO_U64(nr) DIV_ROUND_UP(nr, BITS_PER_TYPE(u64))
  15. #define BITS_TO_U32(nr) DIV_ROUND_UP(nr, BITS_PER_TYPE(u32))
  16. #define BITS_TO_BYTES(nr) DIV_ROUND_UP(nr, BITS_PER_TYPE(char))
  17. extern unsigned int __sw_hweight8(unsigned int w);
  18. extern unsigned int __sw_hweight16(unsigned int w);
  19. extern unsigned int __sw_hweight32(unsigned int w);
  20. extern unsigned long __sw_hweight64(__u64 w);
  21. /*
  22. * Include this here because some architectures need generic_ffs/fls in
  23. * scope
  24. */
  25. #include <asm/bitops.h>
  26. #define for_each_set_bit(bit, addr, size) \
  27. for ((bit) = find_first_bit((addr), (size)); \
  28. (bit) < (size); \
  29. (bit) = find_next_bit((addr), (size), (bit) + 1))
  30. /* same as for_each_set_bit() but use bit as value to start with */
  31. #define for_each_set_bit_from(bit, addr, size) \
  32. for ((bit) = find_next_bit((addr), (size), (bit)); \
  33. (bit) < (size); \
  34. (bit) = find_next_bit((addr), (size), (bit) + 1))
  35. #define for_each_clear_bit(bit, addr, size) \
  36. for ((bit) = find_first_zero_bit((addr), (size)); \
  37. (bit) < (size); \
  38. (bit) = find_next_zero_bit((addr), (size), (bit) + 1))
  39. /* same as for_each_clear_bit() but use bit as value to start with */
  40. #define for_each_clear_bit_from(bit, addr, size) \
  41. for ((bit) = find_next_zero_bit((addr), (size), (bit)); \
  42. (bit) < (size); \
  43. (bit) = find_next_zero_bit((addr), (size), (bit) + 1))
  44. /**
  45. * for_each_set_clump8 - iterate over bitmap for each 8-bit clump with set bits
  46. * @start: bit offset to start search and to store the current iteration offset
  47. * @clump: location to store copy of current 8-bit clump
  48. * @bits: bitmap address to base the search on
  49. * @size: bitmap size in number of bits
  50. */
  51. #define for_each_set_clump8(start, clump, bits, size) \
  52. for ((start) = find_first_clump8(&(clump), (bits), (size)); \
  53. (start) < (size); \
  54. (start) = find_next_clump8(&(clump), (bits), (size), (start) + 8))
  55. static inline int get_bitmask_order(unsigned int count)
  56. {
  57. int order;
  58. order = fls(count);
  59. return order; /* We could be slightly more clever with -1 here... */
  60. }
  61. static __always_inline unsigned long hweight_long(unsigned long w)
  62. {
  63. return sizeof(w) == 4 ? hweight32(w) : hweight64((__u64)w);
  64. }
  65. /**
  66. * rol64 - rotate a 64-bit value left
  67. * @word: value to rotate
  68. * @shift: bits to roll
  69. */
  70. static inline __u64 rol64(__u64 word, unsigned int shift)
  71. {
  72. return (word << (shift & 63)) | (word >> ((-shift) & 63));
  73. }
  74. /**
  75. * ror64 - rotate a 64-bit value right
  76. * @word: value to rotate
  77. * @shift: bits to roll
  78. */
  79. static inline __u64 ror64(__u64 word, unsigned int shift)
  80. {
  81. return (word >> (shift & 63)) | (word << ((-shift) & 63));
  82. }
  83. /**
  84. * rol32 - rotate a 32-bit value left
  85. * @word: value to rotate
  86. * @shift: bits to roll
  87. */
  88. static inline __u32 rol32(__u32 word, unsigned int shift)
  89. {
  90. return (word << (shift & 31)) | (word >> ((-shift) & 31));
  91. }
  92. /**
  93. * ror32 - rotate a 32-bit value right
  94. * @word: value to rotate
  95. * @shift: bits to roll
  96. */
  97. static inline __u32 ror32(__u32 word, unsigned int shift)
  98. {
  99. return (word >> (shift & 31)) | (word << ((-shift) & 31));
  100. }
  101. /**
  102. * rol16 - rotate a 16-bit value left
  103. * @word: value to rotate
  104. * @shift: bits to roll
  105. */
  106. static inline __u16 rol16(__u16 word, unsigned int shift)
  107. {
  108. return (word << (shift & 15)) | (word >> ((-shift) & 15));
  109. }
  110. /**
  111. * ror16 - rotate a 16-bit value right
  112. * @word: value to rotate
  113. * @shift: bits to roll
  114. */
  115. static inline __u16 ror16(__u16 word, unsigned int shift)
  116. {
  117. return (word >> (shift & 15)) | (word << ((-shift) & 15));
  118. }
  119. /**
  120. * rol8 - rotate an 8-bit value left
  121. * @word: value to rotate
  122. * @shift: bits to roll
  123. */
  124. static inline __u8 rol8(__u8 word, unsigned int shift)
  125. {
  126. return (word << (shift & 7)) | (word >> ((-shift) & 7));
  127. }
  128. /**
  129. * ror8 - rotate an 8-bit value right
  130. * @word: value to rotate
  131. * @shift: bits to roll
  132. */
  133. static inline __u8 ror8(__u8 word, unsigned int shift)
  134. {
  135. return (word >> (shift & 7)) | (word << ((-shift) & 7));
  136. }
  137. /**
  138. * sign_extend32 - sign extend a 32-bit value using specified bit as sign-bit
  139. * @value: value to sign extend
  140. * @index: 0 based bit index (0<=index<32) to sign bit
  141. *
  142. * This is safe to use for 16- and 8-bit types as well.
  143. */
  144. static __always_inline __s32 sign_extend32(__u32 value, int index)
  145. {
  146. __u8 shift = 31 - index;
  147. return (__s32)(value << shift) >> shift;
  148. }
  149. /**
  150. * sign_extend64 - sign extend a 64-bit value using specified bit as sign-bit
  151. * @value: value to sign extend
  152. * @index: 0 based bit index (0<=index<64) to sign bit
  153. */
  154. static __always_inline __s64 sign_extend64(__u64 value, int index)
  155. {
  156. __u8 shift = 63 - index;
  157. return (__s64)(value << shift) >> shift;
  158. }
  159. static inline unsigned fls_long(unsigned long l)
  160. {
  161. if (sizeof(l) == 4)
  162. return fls(l);
  163. return fls64(l);
  164. }
  165. static inline int get_count_order(unsigned int count)
  166. {
  167. if (count == 0)
  168. return -1;
  169. return fls(--count);
  170. }
  171. /**
  172. * get_count_order_long - get order after rounding @l up to power of 2
  173. * @l: parameter
  174. *
  175. * it is same as get_count_order() but with long type parameter
  176. */
  177. static inline int get_count_order_long(unsigned long l)
  178. {
  179. if (l == 0UL)
  180. return -1;
  181. return (int)fls_long(--l);
  182. }
  183. /**
  184. * __ffs64 - find first set bit in a 64 bit word
  185. * @word: The 64 bit word
  186. *
  187. * On 64 bit arches this is a synomyn for __ffs
  188. * The result is not defined if no bits are set, so check that @word
  189. * is non-zero before calling this.
  190. */
  191. static inline unsigned long __ffs64(u64 word)
  192. {
  193. #if BITS_PER_LONG == 32
  194. if (((u32)word) == 0UL)
  195. return __ffs((u32)(word >> 32)) + 32;
  196. #elif BITS_PER_LONG != 64
  197. #error BITS_PER_LONG not 32 or 64
  198. #endif
  199. return __ffs((unsigned long)word);
  200. }
  201. /**
  202. * assign_bit - Assign value to a bit in memory
  203. * @nr: the bit to set
  204. * @addr: the address to start counting from
  205. * @value: the value to assign
  206. */
  207. static __always_inline void assign_bit(long nr, volatile unsigned long *addr,
  208. bool value)
  209. {
  210. if (value)
  211. set_bit(nr, addr);
  212. else
  213. clear_bit(nr, addr);
  214. }
  215. static __always_inline void __assign_bit(long nr, volatile unsigned long *addr,
  216. bool value)
  217. {
  218. if (value)
  219. __set_bit(nr, addr);
  220. else
  221. __clear_bit(nr, addr);
  222. }
  223. #ifdef __KERNEL__
  224. #ifndef set_mask_bits
  225. #define set_mask_bits(ptr, mask, bits) \
  226. ({ \
  227. const typeof(*(ptr)) mask__ = (mask), bits__ = (bits); \
  228. typeof(*(ptr)) old__, new__; \
  229. \
  230. do { \
  231. old__ = READ_ONCE(*(ptr)); \
  232. new__ = (old__ & ~mask__) | bits__; \
  233. } while (cmpxchg(ptr, old__, new__) != old__); \
  234. \
  235. old__; \
  236. })
  237. #endif
  238. #ifndef bit_clear_unless
  239. #define bit_clear_unless(ptr, clear, test) \
  240. ({ \
  241. const typeof(*(ptr)) clear__ = (clear), test__ = (test);\
  242. typeof(*(ptr)) old__, new__; \
  243. \
  244. do { \
  245. old__ = READ_ONCE(*(ptr)); \
  246. new__ = old__ & ~clear__; \
  247. } while (!(old__ & test__) && \
  248. cmpxchg(ptr, old__, new__) != old__); \
  249. \
  250. !(old__ & test__); \
  251. })
  252. #endif
  253. #ifndef find_last_bit
  254. /**
  255. * find_last_bit - find the last set bit in a memory region
  256. * @addr: The address to start the search at
  257. * @size: The number of bits to search
  258. *
  259. * Returns the bit number of the last set bit, or size.
  260. */
  261. extern unsigned long find_last_bit(const unsigned long *addr,
  262. unsigned long size);
  263. #endif
  264. #endif /* __KERNEL__ */
  265. #endif