bitfield.h 4.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159
  1. /*
  2. * Copyright (C) 2014 Felix Fietkau <nbd@nbd.name>
  3. * Copyright (C) 2004 - 2009 Ivo van Doorn <IvDoorn@gmail.com>
  4. *
  5. * This program is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU General Public License version 2
  7. * as published by the Free Software Foundation
  8. *
  9. * This program is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. */
  14. #ifndef _LINUX_BITFIELD_H
  15. #define _LINUX_BITFIELD_H
  16. #include <linux/bug.h>
  17. #include <asm/byteorder.h>
  18. /*
  19. * Bitfield access macros
  20. *
  21. * FIELD_{GET,PREP} macros take as first parameter shifted mask
  22. * from which they extract the base mask and shift amount.
  23. * Mask must be a compilation time constant.
  24. *
  25. * Example:
  26. *
  27. * #define REG_FIELD_A GENMASK(6, 0)
  28. * #define REG_FIELD_B BIT(7)
  29. * #define REG_FIELD_C GENMASK(15, 8)
  30. * #define REG_FIELD_D GENMASK(31, 16)
  31. *
  32. * Get:
  33. * a = FIELD_GET(REG_FIELD_A, reg);
  34. * b = FIELD_GET(REG_FIELD_B, reg);
  35. *
  36. * Set:
  37. * reg = FIELD_PREP(REG_FIELD_A, 1) |
  38. * FIELD_PREP(REG_FIELD_B, 0) |
  39. * FIELD_PREP(REG_FIELD_C, c) |
  40. * FIELD_PREP(REG_FIELD_D, 0x40);
  41. *
  42. * Modify:
  43. * reg &= ~REG_FIELD_C;
  44. * reg |= FIELD_PREP(REG_FIELD_C, c);
  45. */
  46. #define __bf_shf(x) (__builtin_ffsll(x) - 1)
  47. #define __BF_FIELD_CHECK(_mask, _reg, _val, _pfx) \
  48. ({ \
  49. BUILD_BUG_ON_MSG(!__builtin_constant_p(_mask), \
  50. _pfx "mask is not constant"); \
  51. BUILD_BUG_ON_MSG(!(_mask), _pfx "mask is zero"); \
  52. BUILD_BUG_ON_MSG(__builtin_constant_p(_val) ? \
  53. ~((_mask) >> __bf_shf(_mask)) & (_val) : 0, \
  54. _pfx "value too large for the field"); \
  55. BUILD_BUG_ON_MSG((_mask) > (typeof(_reg))~0ull, \
  56. _pfx "type of reg too small for mask"); \
  57. __BUILD_BUG_ON_NOT_POWER_OF_2((_mask) + \
  58. (1ULL << __bf_shf(_mask))); \
  59. })
  60. /**
  61. * FIELD_FIT() - check if value fits in the field
  62. * @_mask: shifted mask defining the field's length and position
  63. * @_val: value to test against the field
  64. *
  65. * Return: true if @_val can fit inside @_mask, false if @_val is too big.
  66. */
  67. #define FIELD_FIT(_mask, _val) \
  68. ({ \
  69. __BF_FIELD_CHECK(_mask, 0ULL, _val, "FIELD_FIT: "); \
  70. !((((typeof(_mask))_val) << __bf_shf(_mask)) & ~(_mask)); \
  71. })
  72. /**
  73. * FIELD_PREP() - prepare a bitfield element
  74. * @_mask: shifted mask defining the field's length and position
  75. * @_val: value to put in the field
  76. *
  77. * FIELD_PREP() masks and shifts up the value. The result should
  78. * be combined with other fields of the bitfield using logical OR.
  79. */
  80. #define FIELD_PREP(_mask, _val) \
  81. ({ \
  82. __BF_FIELD_CHECK(_mask, 0ULL, _val, "FIELD_PREP: "); \
  83. ((typeof(_mask))(_val) << __bf_shf(_mask)) & (_mask); \
  84. })
  85. /**
  86. * FIELD_GET() - extract a bitfield element
  87. * @_mask: shifted mask defining the field's length and position
  88. * @_reg: 32bit value of entire bitfield
  89. *
  90. * FIELD_GET() extracts the field specified by @_mask from the
  91. * bitfield passed in as @_reg by masking and shifting it down.
  92. */
  93. #define FIELD_GET(_mask, _reg) \
  94. ({ \
  95. __BF_FIELD_CHECK(_mask, _reg, 0U, "FIELD_GET: "); \
  96. (typeof(_mask))(((_reg) & (_mask)) >> __bf_shf(_mask)); \
  97. })
  98. extern void __compiletime_error("value doesn't fit into mask")
  99. __field_overflow(void);
  100. extern void __compiletime_error("bad bitfield mask")
  101. __bad_mask(void);
  102. static __always_inline u64 field_multiplier(u64 field)
  103. {
  104. if ((field | (field - 1)) & ((field | (field - 1)) + 1))
  105. __bad_mask();
  106. return field & -field;
  107. }
  108. static __always_inline u64 field_mask(u64 field)
  109. {
  110. return field / field_multiplier(field);
  111. }
  112. #define ____MAKE_OP(type, base, to, from) \
  113. static __always_inline __##type type##_encode_bits(base v, base field) \
  114. { \
  115. if (__builtin_constant_p(v) && (v & ~field_mask(field))) \
  116. __field_overflow(); \
  117. return to((v & field_mask(field)) * field_multiplier(field)); \
  118. } \
  119. static __always_inline __##type type##_replace_bits(__##type old, \
  120. base val, base field) \
  121. { \
  122. return (old & ~to(field)) | type##_encode_bits(val, field); \
  123. } \
  124. static __always_inline void type##p_replace_bits(__##type * p, \
  125. base val, base field) \
  126. { \
  127. *p = (*p & ~to(field)) | type##_encode_bits(val, field); \
  128. } \
  129. static __always_inline base type##_get_bits(__##type v, base field) \
  130. { \
  131. return (from(v) & field) / field_multiplier(field); \
  132. }
  133. #define __MAKE_OP(size) \
  134. ____MAKE_OP(le##size, u##size, cpu_to_le##size, le##size##_to_cpu) \
  135. ____MAKE_OP(be##size, u##size, cpu_to_be##size, be##size##_to_cpu) \
  136. ____MAKE_OP(u##size, u##size, ,)
  137. ____MAKE_OP(u8, u8, ,)
  138. __MAKE_OP(16)
  139. __MAKE_OP(32)
  140. __MAKE_OP(64)
  141. #undef __MAKE_OP
  142. #undef ____MAKE_OP
  143. #endif