bpf_jit_asm.S 6.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226
  1. /* SPDX-License-Identifier: GPL-2.0-only */
  2. /* bpf_jit.S: Packet/header access helper functions
  3. * for PPC64 BPF compiler.
  4. *
  5. * Copyright 2011 Matt Evans <matt@ozlabs.org>, IBM Corporation
  6. */
  7. #include <asm/ppc_asm.h>
  8. #include <asm/asm-compat.h>
  9. #include "bpf_jit32.h"
  10. /*
  11. * All of these routines are called directly from generated code,
  12. * whose register usage is:
  13. *
  14. * r3 skb
  15. * r4,r5 A,X
  16. * r6 *** address parameter to helper ***
  17. * r7-r10 scratch
  18. * r14 skb->data
  19. * r15 skb headlen
  20. * r16-31 M[]
  21. */
  22. /*
  23. * To consider: These helpers are so small it could be better to just
  24. * generate them inline. Inline code can do the simple headlen check
  25. * then branch directly to slow_path_XXX if required. (In fact, could
  26. * load a spare GPR with the address of slow_path_generic and pass size
  27. * as an argument, making the call site a mtlr, li and bllr.)
  28. */
  29. .globl sk_load_word
  30. sk_load_word:
  31. PPC_LCMPI r_addr, 0
  32. blt bpf_slow_path_word_neg
  33. .globl sk_load_word_positive_offset
  34. sk_load_word_positive_offset:
  35. /* Are we accessing past headlen? */
  36. subi r_scratch1, r_HL, 4
  37. PPC_LCMP r_scratch1, r_addr
  38. blt bpf_slow_path_word
  39. /* Nope, just hitting the header. cr0 here is eq or gt! */
  40. #ifdef __LITTLE_ENDIAN__
  41. lwbrx r_A, r_D, r_addr
  42. #else
  43. lwzx r_A, r_D, r_addr
  44. #endif
  45. blr /* Return success, cr0 != LT */
  46. .globl sk_load_half
  47. sk_load_half:
  48. PPC_LCMPI r_addr, 0
  49. blt bpf_slow_path_half_neg
  50. .globl sk_load_half_positive_offset
  51. sk_load_half_positive_offset:
  52. subi r_scratch1, r_HL, 2
  53. PPC_LCMP r_scratch1, r_addr
  54. blt bpf_slow_path_half
  55. #ifdef __LITTLE_ENDIAN__
  56. lhbrx r_A, r_D, r_addr
  57. #else
  58. lhzx r_A, r_D, r_addr
  59. #endif
  60. blr
  61. .globl sk_load_byte
  62. sk_load_byte:
  63. PPC_LCMPI r_addr, 0
  64. blt bpf_slow_path_byte_neg
  65. .globl sk_load_byte_positive_offset
  66. sk_load_byte_positive_offset:
  67. PPC_LCMP r_HL, r_addr
  68. ble bpf_slow_path_byte
  69. lbzx r_A, r_D, r_addr
  70. blr
  71. /*
  72. * BPF_LDX | BPF_B | BPF_MSH: ldxb 4*([offset]&0xf)
  73. * r_addr is the offset value
  74. */
  75. .globl sk_load_byte_msh
  76. sk_load_byte_msh:
  77. PPC_LCMPI r_addr, 0
  78. blt bpf_slow_path_byte_msh_neg
  79. .globl sk_load_byte_msh_positive_offset
  80. sk_load_byte_msh_positive_offset:
  81. PPC_LCMP r_HL, r_addr
  82. ble bpf_slow_path_byte_msh
  83. lbzx r_X, r_D, r_addr
  84. rlwinm r_X, r_X, 2, 32-4-2, 31-2
  85. blr
  86. /* Call out to skb_copy_bits:
  87. * We'll need to back up our volatile regs first; we have
  88. * local variable space at r1+(BPF_PPC_STACK_BASIC).
  89. * Allocate a new stack frame here to remain ABI-compliant in
  90. * stashing LR.
  91. */
  92. #define bpf_slow_path_common(SIZE) \
  93. mflr r0; \
  94. PPC_STL r0, PPC_LR_STKOFF(r1); \
  95. /* R3 goes in parameter space of caller's frame */ \
  96. PPC_STL r_skb, (BPF_PPC_STACKFRAME+BPF_PPC_STACK_R3_OFF)(r1); \
  97. PPC_STL r_A, (BPF_PPC_STACK_BASIC+(0*REG_SZ))(r1); \
  98. PPC_STL r_X, (BPF_PPC_STACK_BASIC+(1*REG_SZ))(r1); \
  99. addi r5, r1, BPF_PPC_STACK_BASIC+(2*REG_SZ); \
  100. PPC_STLU r1, -BPF_PPC_SLOWPATH_FRAME(r1); \
  101. /* R3 = r_skb, as passed */ \
  102. mr r4, r_addr; \
  103. li r6, SIZE; \
  104. bl skb_copy_bits; \
  105. nop; \
  106. /* R3 = 0 on success */ \
  107. addi r1, r1, BPF_PPC_SLOWPATH_FRAME; \
  108. PPC_LL r0, PPC_LR_STKOFF(r1); \
  109. PPC_LL r_A, (BPF_PPC_STACK_BASIC+(0*REG_SZ))(r1); \
  110. PPC_LL r_X, (BPF_PPC_STACK_BASIC+(1*REG_SZ))(r1); \
  111. mtlr r0; \
  112. PPC_LCMPI r3, 0; \
  113. blt bpf_error; /* cr0 = LT */ \
  114. PPC_LL r_skb, (BPF_PPC_STACKFRAME+BPF_PPC_STACK_R3_OFF)(r1); \
  115. /* Great success! */
  116. bpf_slow_path_word:
  117. bpf_slow_path_common(4)
  118. /* Data value is on stack, and cr0 != LT */
  119. lwz r_A, BPF_PPC_STACK_BASIC+(2*REG_SZ)(r1)
  120. blr
  121. bpf_slow_path_half:
  122. bpf_slow_path_common(2)
  123. lhz r_A, BPF_PPC_STACK_BASIC+(2*8)(r1)
  124. blr
  125. bpf_slow_path_byte:
  126. bpf_slow_path_common(1)
  127. lbz r_A, BPF_PPC_STACK_BASIC+(2*8)(r1)
  128. blr
  129. bpf_slow_path_byte_msh:
  130. bpf_slow_path_common(1)
  131. lbz r_X, BPF_PPC_STACK_BASIC+(2*8)(r1)
  132. rlwinm r_X, r_X, 2, 32-4-2, 31-2
  133. blr
  134. /* Call out to bpf_internal_load_pointer_neg_helper:
  135. * We'll need to back up our volatile regs first; we have
  136. * local variable space at r1+(BPF_PPC_STACK_BASIC).
  137. * Allocate a new stack frame here to remain ABI-compliant in
  138. * stashing LR.
  139. */
  140. #define sk_negative_common(SIZE) \
  141. mflr r0; \
  142. PPC_STL r0, PPC_LR_STKOFF(r1); \
  143. /* R3 goes in parameter space of caller's frame */ \
  144. PPC_STL r_skb, (BPF_PPC_STACKFRAME+BPF_PPC_STACK_R3_OFF)(r1); \
  145. PPC_STL r_A, (BPF_PPC_STACK_BASIC+(0*REG_SZ))(r1); \
  146. PPC_STL r_X, (BPF_PPC_STACK_BASIC+(1*REG_SZ))(r1); \
  147. PPC_STLU r1, -BPF_PPC_SLOWPATH_FRAME(r1); \
  148. /* R3 = r_skb, as passed */ \
  149. mr r4, r_addr; \
  150. li r5, SIZE; \
  151. bl bpf_internal_load_pointer_neg_helper; \
  152. nop; \
  153. /* R3 != 0 on success */ \
  154. addi r1, r1, BPF_PPC_SLOWPATH_FRAME; \
  155. PPC_LL r0, PPC_LR_STKOFF(r1); \
  156. PPC_LL r_A, (BPF_PPC_STACK_BASIC+(0*REG_SZ))(r1); \
  157. PPC_LL r_X, (BPF_PPC_STACK_BASIC+(1*REG_SZ))(r1); \
  158. mtlr r0; \
  159. PPC_LCMPLI r3, 0; \
  160. beq bpf_error_slow; /* cr0 = EQ */ \
  161. mr r_addr, r3; \
  162. PPC_LL r_skb, (BPF_PPC_STACKFRAME+BPF_PPC_STACK_R3_OFF)(r1); \
  163. /* Great success! */
  164. bpf_slow_path_word_neg:
  165. lis r_scratch1,-32 /* SKF_LL_OFF */
  166. PPC_LCMP r_addr, r_scratch1 /* addr < SKF_* */
  167. blt bpf_error /* cr0 = LT */
  168. .globl sk_load_word_negative_offset
  169. sk_load_word_negative_offset:
  170. sk_negative_common(4)
  171. lwz r_A, 0(r_addr)
  172. blr
  173. bpf_slow_path_half_neg:
  174. lis r_scratch1,-32 /* SKF_LL_OFF */
  175. PPC_LCMP r_addr, r_scratch1 /* addr < SKF_* */
  176. blt bpf_error /* cr0 = LT */
  177. .globl sk_load_half_negative_offset
  178. sk_load_half_negative_offset:
  179. sk_negative_common(2)
  180. lhz r_A, 0(r_addr)
  181. blr
  182. bpf_slow_path_byte_neg:
  183. lis r_scratch1,-32 /* SKF_LL_OFF */
  184. PPC_LCMP r_addr, r_scratch1 /* addr < SKF_* */
  185. blt bpf_error /* cr0 = LT */
  186. .globl sk_load_byte_negative_offset
  187. sk_load_byte_negative_offset:
  188. sk_negative_common(1)
  189. lbz r_A, 0(r_addr)
  190. blr
  191. bpf_slow_path_byte_msh_neg:
  192. lis r_scratch1,-32 /* SKF_LL_OFF */
  193. PPC_LCMP r_addr, r_scratch1 /* addr < SKF_* */
  194. blt bpf_error /* cr0 = LT */
  195. .globl sk_load_byte_msh_negative_offset
  196. sk_load_byte_msh_negative_offset:
  197. sk_negative_common(1)
  198. lbz r_X, 0(r_addr)
  199. rlwinm r_X, r_X, 2, 32-4-2, 31-2
  200. blr
  201. bpf_error_slow:
  202. /* fabricate a cr0 = lt */
  203. li r_scratch1, -1
  204. PPC_LCMPI r_scratch1, 0
  205. bpf_error:
  206. /* Entered with cr0 = lt */
  207. li r3, 0
  208. /* Generated code will 'blt epilogue', returning 0. */
  209. blr