bitfield.h 4.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159
  1. /* SPDX-License-Identifier: GPL-2.0-only */
  2. /*
  3. * Copyright (C) 2014 Felix Fietkau <nbd@nbd.name>
  4. * Copyright (C) 2004 - 2009 Ivo van Doorn <IvDoorn@gmail.com>
  5. */
  6. #ifndef _LINUX_BITFIELD_H
  7. #define _LINUX_BITFIELD_H
  8. #include <linux/build_bug.h>
  9. #include <asm/byteorder.h>
  10. /*
  11. * Bitfield access macros
  12. *
  13. * FIELD_{GET,PREP} macros take as first parameter shifted mask
  14. * from which they extract the base mask and shift amount.
  15. * Mask must be a compilation time constant.
  16. *
  17. * Example:
  18. *
  19. * #define REG_FIELD_A GENMASK(6, 0)
  20. * #define REG_FIELD_B BIT(7)
  21. * #define REG_FIELD_C GENMASK(15, 8)
  22. * #define REG_FIELD_D GENMASK(31, 16)
  23. *
  24. * Get:
  25. * a = FIELD_GET(REG_FIELD_A, reg);
  26. * b = FIELD_GET(REG_FIELD_B, reg);
  27. *
  28. * Set:
  29. * reg = FIELD_PREP(REG_FIELD_A, 1) |
  30. * FIELD_PREP(REG_FIELD_B, 0) |
  31. * FIELD_PREP(REG_FIELD_C, c) |
  32. * FIELD_PREP(REG_FIELD_D, 0x40);
  33. *
  34. * Modify:
  35. * reg &= ~REG_FIELD_C;
  36. * reg |= FIELD_PREP(REG_FIELD_C, c);
  37. */
  38. #define __bf_shf(x) (__builtin_ffsll(x) - 1)
  39. #define __BF_FIELD_CHECK(_mask, _reg, _val, _pfx) \
  40. ({ \
  41. BUILD_BUG_ON_MSG(!__builtin_constant_p(_mask), \
  42. _pfx "mask is not constant"); \
  43. BUILD_BUG_ON_MSG((_mask) == 0, _pfx "mask is zero"); \
  44. BUILD_BUG_ON_MSG(__builtin_constant_p(_val) ? \
  45. ~((_mask) >> __bf_shf(_mask)) & (_val) : 0, \
  46. _pfx "value too large for the field"); \
  47. BUILD_BUG_ON_MSG((_mask) > (typeof(_reg))~0ull, \
  48. _pfx "type of reg too small for mask"); \
  49. __BUILD_BUG_ON_NOT_POWER_OF_2((_mask) + \
  50. (1ULL << __bf_shf(_mask))); \
  51. })
  52. /**
  53. * FIELD_MAX() - produce the maximum value representable by a field
  54. * @_mask: shifted mask defining the field's length and position
  55. *
  56. * FIELD_MAX() returns the maximum value that can be held in the field
  57. * specified by @_mask.
  58. */
  59. #define FIELD_MAX(_mask) \
  60. ({ \
  61. __BF_FIELD_CHECK(_mask, 0ULL, 0ULL, "FIELD_MAX: "); \
  62. (typeof(_mask))((_mask) >> __bf_shf(_mask)); \
  63. })
  64. /**
  65. * FIELD_FIT() - check if value fits in the field
  66. * @_mask: shifted mask defining the field's length and position
  67. * @_val: value to test against the field
  68. *
  69. * Return: true if @_val can fit inside @_mask, false if @_val is too big.
  70. */
  71. #define FIELD_FIT(_mask, _val) \
  72. ({ \
  73. __BF_FIELD_CHECK(_mask, 0ULL, 0ULL, "FIELD_FIT: "); \
  74. !((((typeof(_mask))_val) << __bf_shf(_mask)) & ~(_mask)); \
  75. })
  76. /**
  77. * FIELD_PREP() - prepare a bitfield element
  78. * @_mask: shifted mask defining the field's length and position
  79. * @_val: value to put in the field
  80. *
  81. * FIELD_PREP() masks and shifts up the value. The result should
  82. * be combined with other fields of the bitfield using logical OR.
  83. */
  84. #define FIELD_PREP(_mask, _val) \
  85. ({ \
  86. __BF_FIELD_CHECK(_mask, 0ULL, _val, "FIELD_PREP: "); \
  87. ((typeof(_mask))(_val) << __bf_shf(_mask)) & (_mask); \
  88. })
  89. /**
  90. * FIELD_GET() - extract a bitfield element
  91. * @_mask: shifted mask defining the field's length and position
  92. * @_reg: value of entire bitfield
  93. *
  94. * FIELD_GET() extracts the field specified by @_mask from the
  95. * bitfield passed in as @_reg by masking and shifting it down.
  96. */
  97. #define FIELD_GET(_mask, _reg) \
  98. ({ \
  99. __BF_FIELD_CHECK(_mask, _reg, 0U, "FIELD_GET: "); \
  100. (typeof(_mask))(((_reg) & (_mask)) >> __bf_shf(_mask)); \
  101. })
  102. extern void __compiletime_error("value doesn't fit into mask")
  103. __field_overflow(void);
  104. extern void __compiletime_error("bad bitfield mask")
  105. __bad_mask(void);
  106. static __always_inline u64 field_multiplier(u64 field)
  107. {
  108. if ((field | (field - 1)) & ((field | (field - 1)) + 1))
  109. __bad_mask();
  110. return field & -field;
  111. }
  112. static __always_inline u64 field_mask(u64 field)
  113. {
  114. return field / field_multiplier(field);
  115. }
  116. #define field_max(field) ((typeof(field))field_mask(field))
  117. #define ____MAKE_OP(type,base,to,from) \
  118. static __always_inline __##type type##_encode_bits(base v, base field) \
  119. { \
  120. if (__builtin_constant_p(v) && (v & ~field_mask(field))) \
  121. __field_overflow(); \
  122. return to((v & field_mask(field)) * field_multiplier(field)); \
  123. } \
  124. static __always_inline __##type type##_replace_bits(__##type old, \
  125. base val, base field) \
  126. { \
  127. return (old & ~to(field)) | type##_encode_bits(val, field); \
  128. } \
  129. static __always_inline void type##p_replace_bits(__##type *p, \
  130. base val, base field) \
  131. { \
  132. *p = (*p & ~to(field)) | type##_encode_bits(val, field); \
  133. } \
  134. static __always_inline base type##_get_bits(__##type v, base field) \
  135. { \
  136. return (from(v) & field)/field_multiplier(field); \
  137. }
  138. #define __MAKE_OP(size) \
  139. ____MAKE_OP(le##size,u##size,cpu_to_le##size,le##size##_to_cpu) \
  140. ____MAKE_OP(be##size,u##size,cpu_to_be##size,be##size##_to_cpu) \
  141. ____MAKE_OP(u##size,u##size,,)
  142. ____MAKE_OP(u8,u8,,)
  143. __MAKE_OP(16)
  144. __MAKE_OP(32)
  145. __MAKE_OP(64)
  146. #undef __MAKE_OP
  147. #undef ____MAKE_OP
  148. #endif