arm_utils.s 7.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338
  1. /*
  2. * some color conversion and blitting routines
  3. * (C) notaz, 2006-2009
  4. *
  5. * This work is licensed under the terms of MAME license.
  6. * See COPYING file in the top-level directory.
  7. */
  8. .text
  9. .align 4
  10. @ Convert 0000bbb0 ggg0rrr0 0000bbb0 ggg0rrr0
  11. @ to 00000000 rrr00000 ggg00000 bbb00000 ...
  12. @ lr = 0x00e000e0, out: r3=lower_pix, r2=higher_pix; trashes rin
  13. .macro convRGB32_2 rin sh=0
  14. and r2, lr, \rin, lsr #4 @ blue
  15. and r3, \rin, lr
  16. orr r2, r2, r3, lsl #8 @ g0b0g0b0
  17. mov r3, r2, lsl #16 @ g0b00000
  18. and \rin,lr, \rin, ror #12 @ 00r000r0 (reversed)
  19. orr r3, r3, \rin, lsr #16 @ g0b000r0
  20. .if \sh == 1
  21. mov r3, r3, ror #17 @ shadow mode
  22. .elseif \sh == 2
  23. adds r3, r3, #0x40000000 @ green
  24. orrcs r3, r3, #0xe0000000
  25. mov r3, r3, ror #8
  26. adds r3, r3, #0x40000000
  27. orrcs r3, r3, #0xe0000000
  28. mov r3, r3, ror #16
  29. adds r3, r3, #0x40000000
  30. orrcs r3, r3, #0xe0000000
  31. mov r3, r3, ror #24
  32. .else
  33. mov r3, r3, ror #16 @ r3=low
  34. .endif
  35. orr r3, r3, r3, lsr #3
  36. str r3, [r0], #4
  37. mov r2, r2, lsr #16
  38. orr r2, r2, \rin, lsl #16
  39. .if \sh == 1
  40. mov r2, r2, lsr #1
  41. .elseif \sh == 2
  42. mov r2, r2, ror #8
  43. adds r2, r2, #0x40000000 @ blue
  44. orrcs r2, r2, #0xe0000000
  45. mov r2, r2, ror #8
  46. adds r2, r2, #0x40000000
  47. orrcs r2, r2, #0xe0000000
  48. mov r2, r2, ror #8
  49. adds r2, r2, #0x40000000
  50. orrcs r2, r2, #0xe0000000
  51. mov r2, r2, ror #8
  52. .endif
  53. orr r2, r2, r2, lsr #3
  54. .if \sh == 1
  55. str r2, [r0, #0x40*2*4]
  56. .endif
  57. str r2, [r0], #4
  58. .endm
  59. .global bgr444_to_rgb32 @ void *to, void *from
  60. bgr444_to_rgb32:
  61. stmfd sp!, {r4-r7,lr}
  62. mov r12, #0x40>>3 @ repeats
  63. mov lr, #0x00e00000
  64. orr lr, lr, #0x00e0
  65. .loopRGB32:
  66. subs r12, r12, #1
  67. ldmia r1!, {r4-r7}
  68. convRGB32_2 r4
  69. convRGB32_2 r5
  70. convRGB32_2 r6
  71. convRGB32_2 r7
  72. bgt .loopRGB32
  73. ldmfd sp!, {r4-r7,pc}
  74. .global bgr444_to_rgb32_sh @ void *to, void *from
  75. bgr444_to_rgb32_sh:
  76. stmfd sp!, {r4-r7,lr}
  77. mov r12, #0x40>>3 @ repeats
  78. add r0, r0, #0x40*4
  79. mov lr, #0x00e00000
  80. orr lr, lr, #0x00e0
  81. .loopRGB32sh:
  82. subs r12, r12, #1
  83. ldmia r1!, {r4-r7}
  84. convRGB32_2 r4, 1
  85. convRGB32_2 r5, 1
  86. convRGB32_2 r6, 1
  87. convRGB32_2 r7, 1
  88. bgt .loopRGB32sh
  89. mov r12, #0x40>>3 @ repeats
  90. sub r1, r1, #0x40*2
  91. .loopRGB32hi:
  92. ldmia r1!, {r4-r7}
  93. convRGB32_2 r4, 2
  94. convRGB32_2 r5, 2
  95. convRGB32_2 r6, 2
  96. convRGB32_2 r7, 2
  97. subs r12, r12, #1
  98. bgt .loopRGB32hi
  99. ldmfd sp!, {r4-r7,lr}
  100. bx lr
  101. @ @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
  102. @ mode2 blitter
  103. .global vidcpy_m2 @ void *dest, void *src, int m32col, int with_32c_border
  104. vidcpy_m2:
  105. stmfd sp!, {r4-r6,lr}
  106. mov r12, #224 @ lines
  107. add r0, r0, #320*8
  108. add r1, r1, #8
  109. mov lr, #0
  110. tst r2, r2
  111. movne lr, #64
  112. tstne r3, r3
  113. addne r0, r0, #32
  114. vidCpyM2_loop_out:
  115. mov r6, #10
  116. sub r6, r6, lr, lsr #5 @ -= 2 in 32col mode
  117. vidCpyM2_loop:
  118. subs r6, r6, #1
  119. ldmia r1!, {r2-r5}
  120. stmia r0!, {r2-r5}
  121. ldmia r1!, {r2-r5}
  122. stmia r0!, {r2-r5}
  123. bne vidCpyM2_loop
  124. subs r12,r12,#1
  125. add r0, r0, lr
  126. add r1, r1, #8
  127. add r1, r1, lr
  128. bne vidCpyM2_loop_out
  129. ldmfd sp!, {r4-r6,pc}
  130. .global vidcpy_m2_rot @ void *dest, void *src, int m32col, int with_32c_border
  131. vidcpy_m2_rot:
  132. stmfd sp!,{r4-r8,lr}
  133. add r1, r1, #8
  134. tst r2, r2
  135. subne r1, r1, #32 @ adjust
  136. mov r4, r0
  137. mov r5, r1
  138. mov r6, r2
  139. mov r7, #8+4
  140. vidcpy_m2_rot_loop:
  141. @ a bit lame but oh well..
  142. mov r0, r4
  143. mov r1, r5
  144. mov r2, r7
  145. mov r3, r6
  146. mov r8, #328
  147. adr lr, after_rot_blit8
  148. stmfd sp!,{r4-r8,lr}
  149. b rotated_blit8_2
  150. after_rot_blit8:
  151. add r5, r5, #328*4
  152. add r7, r7, #4
  153. cmp r7, #224+8+4
  154. ldmgefd sp!,{r4-r8,pc}
  155. b vidcpy_m2_rot_loop
  156. .global rotated_blit8 @ void *dst, void *linesx4, u32 y, int is_32col
  157. rotated_blit8:
  158. stmfd sp!,{r4-r8,lr}
  159. mov r8, #320
  160. rotated_blit8_2:
  161. add r0, r0, #(240*320)
  162. sub r0, r0, #(240+4) @ y starts from 4
  163. add r0, r0, r2
  164. tst r3, r3
  165. subne r0, r0, #(240*32)
  166. addne r1, r1, #32
  167. movne lr, #256/4
  168. moveq lr, #320/4
  169. rotated_blit_loop8:
  170. mov r6, r1
  171. ldr r2, [r6], r8
  172. ldr r3, [r6], r8
  173. ldr r4, [r6], r8
  174. ldr r5, [r6], r8
  175. mov r6, r2, lsl #24
  176. mov r6, r6, lsr #8
  177. orr r6, r6, r3, lsl #24
  178. mov r6, r6, lsr #8
  179. orr r6, r6, r4, lsl #24
  180. mov r6, r6, lsr #8
  181. orr r6, r6, r5, lsl #24
  182. str r6, [r0], #-240
  183. and r6, r3, #0xff00
  184. and r7, r2, #0xff00
  185. orr r6, r6, r7, lsr #8
  186. and r7, r4, #0xff00
  187. orr r6, r6, r7, lsl #8
  188. and r7, r5, #0xff00
  189. orr r6, r6, r7, lsl #16
  190. str r6, [r0], #-240
  191. and r6, r4, #0xff0000
  192. and r7, r2, #0xff0000
  193. orr r6, r6, r7, lsr #16
  194. and r7, r3, #0xff0000
  195. orr r6, r6, r7, lsr #8
  196. and r7, r5, #0xff0000
  197. orr r6, r6, r7, lsl #8
  198. str r6, [r0], #-240
  199. mov r6, r5, lsr #24
  200. mov r6, r6, lsl #8
  201. orr r6, r6, r4, lsr #24
  202. mov r6, r6, lsl #8
  203. orr r6, r6, r3, lsr #24
  204. mov r6, r6, lsl #8
  205. orr r6, r6, r2, lsr #24
  206. str r6, [r0], #-240
  207. subs lr, lr, #1
  208. add r1, r1, #4
  209. bne rotated_blit_loop8
  210. ldmfd sp!,{r4-r8,pc}
  211. @ input: r2-r5
  212. @ output: r7,r8
  213. @ trash: r6
  214. .macro rb_line_low
  215. mov r6, r2, lsl #16
  216. mov r7, r3, lsl #16
  217. orr r7, r7, r6, lsr #16
  218. mov r6, r4, lsl #16
  219. mov r8, r5, lsl #16
  220. orr r8, r8, r6, lsr #16
  221. .endm
  222. .macro rb_line_hi
  223. mov r6, r2, lsr #16
  224. mov r7, r3, lsr #16
  225. orr r7, r6, r7, lsl #16
  226. mov r6, r4, lsr #16
  227. mov r8, r5, lsr #16
  228. orr r8, r6, r8, lsl #16
  229. .endm
  230. .global rotated_blit16 @ void *dst, void *linesx4, u32 y, int is_32col
  231. rotated_blit16:
  232. stmfd sp!,{r4-r8,lr}
  233. add r0, r0, #(240*320)*2
  234. sub r0, r0, #(240+4)*2 @ y starts from 4
  235. add r0, r0, r2, lsl #1
  236. tst r3, r3
  237. subne r0, r0, #(240*32)*2
  238. addne r1, r1, #32*2
  239. movne lr, #256/4
  240. moveq lr, #320/4
  241. rotated_blit_loop16:
  242. ldr r2, [r1, #320*0*2]
  243. ldr r3, [r1, #320*1*2]
  244. ldr r4, [r1, #320*2*2]
  245. ldr r5, [r1, #320*3*2]
  246. rb_line_low
  247. stmia r0, {r7,r8}
  248. sub r0, r0, #240*2
  249. rb_line_hi
  250. stmia r0, {r7,r8}
  251. sub r0, r0, #240*2
  252. ldr r2, [r1, #320*0*2+4]
  253. ldr r3, [r1, #320*1*2+4]
  254. ldr r4, [r1, #320*2*2+4]
  255. ldr r5, [r1, #320*3*2+4]
  256. rb_line_low
  257. stmia r0, {r7,r8}
  258. sub r0, r0, #240*2
  259. rb_line_hi
  260. stmia r0, {r7,r8}
  261. sub r0, r0, #240*2
  262. subs lr, lr, #1
  263. add r1, r1, #8
  264. bne rotated_blit_loop16
  265. ldmfd sp!,{r4-r8,pc}
  266. .global spend_cycles @ c
  267. spend_cycles:
  268. mov r0, r0, lsr #2 @ 4 cycles/iteration
  269. sub r0, r0, #2 @ entry/exit/init
  270. .sc_loop:
  271. subs r0, r0, #1
  272. bpl .sc_loop
  273. bx lr
  274. @ vim:filetype=armasm