div64.S 3.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107
  1. /* SPDX-License-Identifier: GPL-2.0-or-later */
  2. /*
  3. * Divide a 64-bit unsigned number by a 32-bit unsigned number.
  4. * This routine assumes that the top 32 bits of the dividend are
  5. * non-zero to start with.
  6. * On entry, r3 points to the dividend, which get overwritten with
  7. * the 64-bit quotient, and r4 contains the divisor.
  8. * On exit, r3 contains the remainder.
  9. *
  10. * Copyright (C) 2002 Paul Mackerras, IBM Corp.
  11. */
  12. #include "ppc_asm.h"
  13. .globl __div64_32
  14. __div64_32:
  15. lwz r5,0(r3) # get the dividend into r5/r6
  16. lwz r6,4(r3)
  17. cmplw r5,r4
  18. li r7,0
  19. li r8,0
  20. blt 1f
  21. divwu r7,r5,r4 # if dividend.hi >= divisor,
  22. mullw r0,r7,r4 # quotient.hi = dividend.hi / divisor
  23. subf. r5,r0,r5 # dividend.hi %= divisor
  24. beq 3f
  25. 1: mr r11,r5 # here dividend.hi != 0
  26. andis. r0,r5,0xc000
  27. bne 2f
  28. cntlzw r0,r5 # we are shifting the dividend right
  29. li r10,-1 # to make it < 2^32, and shifting
  30. srw r10,r10,r0 # the divisor right the same amount,
  31. addc r9,r4,r10 # rounding up (so the estimate cannot
  32. andc r11,r6,r10 # ever be too large, only too small)
  33. andc r9,r9,r10
  34. addze r9,r9
  35. or r11,r5,r11
  36. rotlw r9,r9,r0
  37. rotlw r11,r11,r0
  38. divwu r11,r11,r9 # then we divide the shifted quantities
  39. 2: mullw r10,r11,r4 # to get an estimate of the quotient,
  40. mulhwu r9,r11,r4 # multiply the estimate by the divisor,
  41. subfc r6,r10,r6 # take the product from the divisor,
  42. add r8,r8,r11 # and add the estimate to the accumulated
  43. subfe. r5,r9,r5 # quotient
  44. bne 1b
  45. 3: cmplw r6,r4
  46. blt 4f
  47. divwu r0,r6,r4 # perform the remaining 32-bit division
  48. mullw r10,r0,r4 # and get the remainder
  49. add r8,r8,r0
  50. subf r6,r10,r6
  51. 4: stw r7,0(r3) # return the quotient in *r3
  52. stw r8,4(r3)
  53. mr r3,r6 # return the remainder in r3
  54. blr
  55. /*
  56. * Extended precision shifts.
  57. *
  58. * Updated to be valid for shift counts from 0 to 63 inclusive.
  59. * -- Gabriel
  60. *
  61. * R3/R4 has 64 bit value
  62. * R5 has shift count
  63. * result in R3/R4
  64. *
  65. * ashrdi3: arithmetic right shift (sign propagation)
  66. * lshrdi3: logical right shift
  67. * ashldi3: left shift
  68. */
  69. .globl __ashrdi3
  70. __ashrdi3:
  71. subfic r6,r5,32
  72. srw r4,r4,r5 # LSW = count > 31 ? 0 : LSW >> count
  73. addi r7,r5,32 # could be xori, or addi with -32
  74. slw r6,r3,r6 # t1 = count > 31 ? 0 : MSW << (32-count)
  75. rlwinm r8,r7,0,32 # t3 = (count < 32) ? 32 : 0
  76. sraw r7,r3,r7 # t2 = MSW >> (count-32)
  77. or r4,r4,r6 # LSW |= t1
  78. slw r7,r7,r8 # t2 = (count < 32) ? 0 : t2
  79. sraw r3,r3,r5 # MSW = MSW >> count
  80. or r4,r4,r7 # LSW |= t2
  81. blr
  82. .globl __ashldi3
  83. __ashldi3:
  84. subfic r6,r5,32
  85. slw r3,r3,r5 # MSW = count > 31 ? 0 : MSW << count
  86. addi r7,r5,32 # could be xori, or addi with -32
  87. srw r6,r4,r6 # t1 = count > 31 ? 0 : LSW >> (32-count)
  88. slw r7,r4,r7 # t2 = count < 32 ? 0 : LSW << (count-32)
  89. or r3,r3,r6 # MSW |= t1
  90. slw r4,r4,r5 # LSW = LSW << count
  91. or r3,r3,r7 # MSW |= t2
  92. blr
  93. .globl __lshrdi3
  94. __lshrdi3:
  95. subfic r6,r5,32
  96. srw r4,r4,r5 # LSW = count > 31 ? 0 : LSW >> count
  97. addi r7,r5,32 # could be xori, or addi with -32
  98. slw r6,r3,r6 # t1 = count > 31 ? 0 : MSW << (32-count)
  99. srw r7,r3,r7 # t2 = count < 32 ? 0 : MSW >> (count-32)
  100. or r4,r4,r6 # LSW |= t1
  101. srw r3,r3,r5 # MSW = MSW >> count
  102. or r4,r4,r7 # LSW |= t2
  103. blr