div64.c 4.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191
  1. /*
  2. * Copyright (C) 2003 Bernardo Innocenti <bernie@develer.com>
  3. *
  4. * Based on former do_div() implementation from asm-parisc/div64.h:
  5. * Copyright (C) 1999 Hewlett-Packard Co
  6. * Copyright (C) 1999 David Mosberger-Tang <davidm@hpl.hp.com>
  7. *
  8. *
  9. * Generic C version of 64bit/32bit division and modulo, with
  10. * 64bit result and 32bit remainder.
  11. *
  12. * The fast case for (n>>32 == 0) is handled inline by do_div().
  13. *
  14. * Code generated for this function might be very inefficient
  15. * for some CPUs. __div64_32() can be overridden by linking arch-specific
  16. * assembly versions such as arch/ppc/lib/div64.S and arch/sh/lib/div64.S
  17. * or by defining a preprocessor macro in arch/include/asm/div64.h.
  18. */
  19. #include <linux/compat.h>
  20. #include <linux/kernel.h>
  21. #include <linux/math64.h>
  22. /* Not needed on 64bit architectures */
  23. #if BITS_PER_LONG == 32
  24. #ifndef __div64_32
  25. /*
  26. * Don't instrument this function as it may be called from tracing code, since
  27. * it needs to read the timer and this often requires calling do_div(), which
  28. * calls this function.
  29. */
  30. uint32_t __attribute__((weak, no_instrument_function)) __div64_32(u64 *n,
  31. u32 base)
  32. {
  33. u64 rem = *n;
  34. u64 b = base;
  35. u64 res, d = 1;
  36. u32 high = rem >> 32;
  37. /* Reduce the thing a bit first */
  38. res = 0;
  39. if (high >= base) {
  40. high /= base;
  41. res = (u64)high << 32;
  42. rem -= (u64)(high * base) << 32;
  43. }
  44. while ((int64_t)b > 0 && b < rem) {
  45. b = b+b;
  46. d = d+d;
  47. }
  48. do {
  49. if (rem >= b) {
  50. rem -= b;
  51. res += d;
  52. }
  53. b >>= 1;
  54. d >>= 1;
  55. } while (d);
  56. *n = res;
  57. return rem;
  58. }
  59. EXPORT_SYMBOL(__div64_32);
  60. #endif
  61. #ifndef div_s64_rem
  62. s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder)
  63. {
  64. u64 quotient;
  65. if (dividend < 0) {
  66. quotient = div_u64_rem(-dividend, abs(divisor), (u32 *)remainder);
  67. *remainder = -*remainder;
  68. if (divisor > 0)
  69. quotient = -quotient;
  70. } else {
  71. quotient = div_u64_rem(dividend, abs(divisor), (u32 *)remainder);
  72. if (divisor < 0)
  73. quotient = -quotient;
  74. }
  75. return quotient;
  76. }
  77. EXPORT_SYMBOL(div_s64_rem);
  78. #endif
  79. /**
  80. * div64_u64_rem - unsigned 64bit divide with 64bit divisor and remainder
  81. * @dividend: 64bit dividend
  82. * @divisor: 64bit divisor
  83. * @remainder: 64bit remainder
  84. *
  85. * This implementation is a comparable to algorithm used by div64_u64.
  86. * But this operation, which includes math for calculating the remainder,
  87. * is kept distinct to avoid slowing down the div64_u64 operation on 32bit
  88. * systems.
  89. */
  90. #ifndef div64_u64_rem
  91. u64 div64_u64_rem(u64 dividend, u64 divisor, u64 *remainder)
  92. {
  93. u32 high = divisor >> 32;
  94. u64 quot;
  95. if (high == 0) {
  96. u32 rem32;
  97. quot = div_u64_rem(dividend, divisor, &rem32);
  98. *remainder = rem32;
  99. } else {
  100. int n = 1 + fls(high);
  101. quot = div_u64(dividend >> n, divisor >> n);
  102. if (quot != 0)
  103. quot--;
  104. *remainder = dividend - quot * divisor;
  105. if (*remainder >= divisor) {
  106. quot++;
  107. *remainder -= divisor;
  108. }
  109. }
  110. return quot;
  111. }
  112. EXPORT_SYMBOL(div64_u64_rem);
  113. #endif
  114. /**
  115. * div64_u64 - unsigned 64bit divide with 64bit divisor
  116. * @dividend: 64bit dividend
  117. * @divisor: 64bit divisor
  118. *
  119. * This implementation is a modified version of the algorithm proposed
  120. * by the book 'Hacker's Delight'. The original source and full proof
  121. * can be found here and is available for use without restriction.
  122. *
  123. * 'http://www.hackersdelight.org/hdcodetxt/divDouble.c.txt'
  124. */
  125. #ifndef div64_u64
  126. u64 div64_u64(u64 dividend, u64 divisor)
  127. {
  128. u32 high = divisor >> 32;
  129. u64 quot;
  130. if (high == 0) {
  131. quot = div_u64(dividend, divisor);
  132. } else {
  133. int n = 1 + fls(high);
  134. quot = div_u64(dividend >> n, divisor >> n);
  135. if (quot != 0)
  136. quot--;
  137. if ((dividend - quot * divisor) >= divisor)
  138. quot++;
  139. }
  140. return quot;
  141. }
  142. EXPORT_SYMBOL(div64_u64);
  143. #endif
  144. /**
  145. * div64_s64 - signed 64bit divide with 64bit divisor
  146. * @dividend: 64bit dividend
  147. * @divisor: 64bit divisor
  148. */
  149. #ifndef div64_s64
  150. s64 div64_s64(s64 dividend, s64 divisor)
  151. {
  152. s64 quot, t;
  153. quot = div64_u64(abs(dividend), abs(divisor));
  154. t = (dividend ^ divisor) >> 63;
  155. return (quot ^ t) - t;
  156. }
  157. EXPORT_SYMBOL(div64_s64);
  158. #endif
  159. #endif /* BITS_PER_LONG == 32 */
  160. /*
  161. * Iterative div/mod for use when dividend is not expected to be much
  162. * bigger than divisor.
  163. */
  164. u32 iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder)
  165. {
  166. return __iter_div_u64_rem(dividend, divisor, remainder);
  167. }
  168. EXPORT_SYMBOL(iter_div_u64_rem);