div64.c 5.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (C) 2003 Bernardo Innocenti <bernie@develer.com>
  4. *
  5. * Based on former do_div() implementation from asm-parisc/div64.h:
  6. * Copyright (C) 1999 Hewlett-Packard Co
  7. * Copyright (C) 1999 David Mosberger-Tang <davidm@hpl.hp.com>
  8. *
  9. *
  10. * Generic C version of 64bit/32bit division and modulo, with
  11. * 64bit result and 32bit remainder.
  12. *
  13. * The fast case for (n>>32 == 0) is handled inline by do_div().
  14. *
  15. * Code generated for this function might be very inefficient
  16. * for some CPUs. __div64_32() can be overridden by linking arch-specific
  17. * assembly versions such as arch/ppc/lib/div64.S and arch/sh/lib/div64.S
  18. * or by defining a preprocessor macro in arch/include/asm/div64.h.
  19. */
  20. #include <linux/export.h>
  21. #include <linux/kernel.h>
  22. #include <linux/math64.h>
  23. /* Not needed on 64bit architectures */
  24. #if BITS_PER_LONG == 32
  25. #ifndef __div64_32
  26. uint32_t __attribute__((weak)) __div64_32(uint64_t *n, uint32_t base)
  27. {
  28. uint64_t rem = *n;
  29. uint64_t b = base;
  30. uint64_t res, d = 1;
  31. uint32_t high = rem >> 32;
  32. /* Reduce the thing a bit first */
  33. res = 0;
  34. if (high >= base) {
  35. high /= base;
  36. res = (uint64_t) high << 32;
  37. rem -= (uint64_t) (high*base) << 32;
  38. }
  39. while ((int64_t)b > 0 && b < rem) {
  40. b = b+b;
  41. d = d+d;
  42. }
  43. do {
  44. if (rem >= b) {
  45. rem -= b;
  46. res += d;
  47. }
  48. b >>= 1;
  49. d >>= 1;
  50. } while (d);
  51. *n = res;
  52. return rem;
  53. }
  54. EXPORT_SYMBOL(__div64_32);
  55. #endif
  56. /**
  57. * div_s64_rem - signed 64bit divide with 64bit divisor and remainder
  58. * @dividend: 64bit dividend
  59. * @divisor: 64bit divisor
  60. * @remainder: 64bit remainder
  61. */
  62. #ifndef div_s64_rem
  63. s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder)
  64. {
  65. u64 quotient;
  66. if (dividend < 0) {
  67. quotient = div_u64_rem(-dividend, abs(divisor), (u32 *)remainder);
  68. *remainder = -*remainder;
  69. if (divisor > 0)
  70. quotient = -quotient;
  71. } else {
  72. quotient = div_u64_rem(dividend, abs(divisor), (u32 *)remainder);
  73. if (divisor < 0)
  74. quotient = -quotient;
  75. }
  76. return quotient;
  77. }
  78. EXPORT_SYMBOL(div_s64_rem);
  79. #endif
  80. /**
  81. * div64_u64_rem - unsigned 64bit divide with 64bit divisor and remainder
  82. * @dividend: 64bit dividend
  83. * @divisor: 64bit divisor
  84. * @remainder: 64bit remainder
  85. *
  86. * This implementation is a comparable to algorithm used by div64_u64.
  87. * But this operation, which includes math for calculating the remainder,
  88. * is kept distinct to avoid slowing down the div64_u64 operation on 32bit
  89. * systems.
  90. */
  91. #ifndef div64_u64_rem
  92. u64 div64_u64_rem(u64 dividend, u64 divisor, u64 *remainder)
  93. {
  94. u32 high = divisor >> 32;
  95. u64 quot;
  96. if (high == 0) {
  97. u32 rem32;
  98. quot = div_u64_rem(dividend, divisor, &rem32);
  99. *remainder = rem32;
  100. } else {
  101. int n = fls(high);
  102. quot = div_u64(dividend >> n, divisor >> n);
  103. if (quot != 0)
  104. quot--;
  105. *remainder = dividend - quot * divisor;
  106. if (*remainder >= divisor) {
  107. quot++;
  108. *remainder -= divisor;
  109. }
  110. }
  111. return quot;
  112. }
  113. EXPORT_SYMBOL(div64_u64_rem);
  114. #endif
  115. /**
  116. * div64_u64 - unsigned 64bit divide with 64bit divisor
  117. * @dividend: 64bit dividend
  118. * @divisor: 64bit divisor
  119. *
  120. * This implementation is a modified version of the algorithm proposed
  121. * by the book 'Hacker's Delight'. The original source and full proof
  122. * can be found here and is available for use without restriction.
  123. *
  124. * 'http://www.hackersdelight.org/hdcodetxt/divDouble.c.txt'
  125. */
  126. #ifndef div64_u64
  127. u64 div64_u64(u64 dividend, u64 divisor)
  128. {
  129. u32 high = divisor >> 32;
  130. u64 quot;
  131. if (high == 0) {
  132. quot = div_u64(dividend, divisor);
  133. } else {
  134. int n = fls(high);
  135. quot = div_u64(dividend >> n, divisor >> n);
  136. if (quot != 0)
  137. quot--;
  138. if ((dividend - quot * divisor) >= divisor)
  139. quot++;
  140. }
  141. return quot;
  142. }
  143. EXPORT_SYMBOL(div64_u64);
  144. #endif
  145. /**
  146. * div64_s64 - signed 64bit divide with 64bit divisor
  147. * @dividend: 64bit dividend
  148. * @divisor: 64bit divisor
  149. */
  150. #ifndef div64_s64
  151. s64 div64_s64(s64 dividend, s64 divisor)
  152. {
  153. s64 quot, t;
  154. quot = div64_u64(abs(dividend), abs(divisor));
  155. t = (dividend ^ divisor) >> 63;
  156. return (quot ^ t) - t;
  157. }
  158. EXPORT_SYMBOL(div64_s64);
  159. #endif
  160. #endif /* BITS_PER_LONG == 32 */
  161. /*
  162. * Iterative div/mod for use when dividend is not expected to be much
  163. * bigger than divisor.
  164. */
  165. u32 iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder)
  166. {
  167. return __iter_div_u64_rem(dividend, divisor, remainder);
  168. }
  169. EXPORT_SYMBOL(iter_div_u64_rem);
  170. #ifndef mul_u64_u64_div_u64
  171. u64 mul_u64_u64_div_u64(u64 a, u64 b, u64 c)
  172. {
  173. u64 res = 0, div, rem;
  174. int shift;
  175. /* can a * b overflow ? */
  176. if (ilog2(a) + ilog2(b) > 62) {
  177. /*
  178. * (b * a) / c is equal to
  179. *
  180. * (b / c) * a +
  181. * (b % c) * a / c
  182. *
  183. * if nothing overflows. Can the 1st multiplication
  184. * overflow? Yes, but we do not care: this can only
  185. * happen if the end result can't fit in u64 anyway.
  186. *
  187. * So the code below does
  188. *
  189. * res = (b / c) * a;
  190. * b = b % c;
  191. */
  192. div = div64_u64_rem(b, c, &rem);
  193. res = div * a;
  194. b = rem;
  195. shift = ilog2(a) + ilog2(b) - 62;
  196. if (shift > 0) {
  197. /* drop precision */
  198. b >>= shift;
  199. c >>= shift;
  200. if (!c)
  201. return res;
  202. }
  203. }
  204. return res + div64_u64(a * b, c);
  205. }
  206. EXPORT_SYMBOL(mul_u64_u64_div_u64);
  207. #endif