bpf_jit_comp64.c 38 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * bpf_jit_comp64.c: eBPF JIT compiler
  4. *
  5. * Copyright 2016 Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>
  6. * IBM Corporation
  7. *
  8. * Based on the powerpc classic BPF JIT compiler by Matt Evans
  9. */
  10. #include <linux/moduleloader.h>
  11. #include <asm/cacheflush.h>
  12. #include <asm/asm-compat.h>
  13. #include <linux/netdevice.h>
  14. #include <linux/filter.h>
  15. #include <linux/if_vlan.h>
  16. #include <asm/kprobes.h>
  17. #include <linux/bpf.h>
  18. #include <asm/security_features.h>
  19. #include "bpf_jit64.h"
  20. static void bpf_jit_fill_ill_insns(void *area, unsigned int size)
  21. {
  22. memset32(area, BREAKPOINT_INSTRUCTION, size/4);
  23. }
  24. static inline void bpf_flush_icache(void *start, void *end)
  25. {
  26. smp_wmb();
  27. flush_icache_range((unsigned long)start, (unsigned long)end);
  28. }
  29. static inline bool bpf_is_seen_register(struct codegen_context *ctx, int i)
  30. {
  31. return (ctx->seen & (1 << (31 - b2p[i])));
  32. }
  33. static inline void bpf_set_seen_register(struct codegen_context *ctx, int i)
  34. {
  35. ctx->seen |= (1 << (31 - b2p[i]));
  36. }
  37. static inline bool bpf_has_stack_frame(struct codegen_context *ctx)
  38. {
  39. /*
  40. * We only need a stack frame if:
  41. * - we call other functions (kernel helpers), or
  42. * - the bpf program uses its stack area
  43. * The latter condition is deduced from the usage of BPF_REG_FP
  44. */
  45. return ctx->seen & SEEN_FUNC || bpf_is_seen_register(ctx, BPF_REG_FP);
  46. }
  47. /*
  48. * When not setting up our own stackframe, the redzone usage is:
  49. *
  50. * [ prev sp ] <-------------
  51. * [ ... ] |
  52. * sp (r1) ---> [ stack pointer ] --------------
  53. * [ nv gpr save area ] 5*8
  54. * [ tail_call_cnt ] 8
  55. * [ local_tmp_var ] 16
  56. * [ unused red zone ] 208 bytes protected
  57. */
  58. static int bpf_jit_stack_local(struct codegen_context *ctx)
  59. {
  60. if (bpf_has_stack_frame(ctx))
  61. return STACK_FRAME_MIN_SIZE + ctx->stack_size;
  62. else
  63. return -(BPF_PPC_STACK_SAVE + 24);
  64. }
  65. static int bpf_jit_stack_tailcallcnt(struct codegen_context *ctx)
  66. {
  67. return bpf_jit_stack_local(ctx) + 16;
  68. }
  69. static int bpf_jit_stack_offsetof(struct codegen_context *ctx, int reg)
  70. {
  71. if (reg >= BPF_PPC_NVR_MIN && reg < 32)
  72. return (bpf_has_stack_frame(ctx) ?
  73. (BPF_PPC_STACKFRAME + ctx->stack_size) : 0)
  74. - (8 * (32 - reg));
  75. pr_err("BPF JIT is asking about unknown registers");
  76. BUG();
  77. }
  78. static void bpf_jit_build_prologue(u32 *image, struct codegen_context *ctx)
  79. {
  80. int i;
  81. /*
  82. * Initialize tail_call_cnt if we do tail calls.
  83. * Otherwise, put in NOPs so that it can be skipped when we are
  84. * invoked through a tail call.
  85. */
  86. if (ctx->seen & SEEN_TAILCALL) {
  87. EMIT(PPC_RAW_LI(b2p[TMP_REG_1], 0));
  88. /* this goes in the redzone */
  89. PPC_BPF_STL(b2p[TMP_REG_1], 1, -(BPF_PPC_STACK_SAVE + 8));
  90. } else {
  91. EMIT(PPC_RAW_NOP());
  92. EMIT(PPC_RAW_NOP());
  93. }
  94. #define BPF_TAILCALL_PROLOGUE_SIZE 8
  95. if (bpf_has_stack_frame(ctx)) {
  96. /*
  97. * We need a stack frame, but we don't necessarily need to
  98. * save/restore LR unless we call other functions
  99. */
  100. if (ctx->seen & SEEN_FUNC) {
  101. EMIT(PPC_INST_MFLR | __PPC_RT(R0));
  102. PPC_BPF_STL(0, 1, PPC_LR_STKOFF);
  103. }
  104. PPC_BPF_STLU(1, 1, -(BPF_PPC_STACKFRAME + ctx->stack_size));
  105. }
  106. /*
  107. * Back up non-volatile regs -- BPF registers 6-10
  108. * If we haven't created our own stack frame, we save these
  109. * in the protected zone below the previous stack frame
  110. */
  111. for (i = BPF_REG_6; i <= BPF_REG_10; i++)
  112. if (bpf_is_seen_register(ctx, i))
  113. PPC_BPF_STL(b2p[i], 1, bpf_jit_stack_offsetof(ctx, b2p[i]));
  114. /* Setup frame pointer to point to the bpf stack area */
  115. if (bpf_is_seen_register(ctx, BPF_REG_FP))
  116. EMIT(PPC_RAW_ADDI(b2p[BPF_REG_FP], 1,
  117. STACK_FRAME_MIN_SIZE + ctx->stack_size));
  118. }
  119. static void bpf_jit_emit_common_epilogue(u32 *image, struct codegen_context *ctx)
  120. {
  121. int i;
  122. /* Restore NVRs */
  123. for (i = BPF_REG_6; i <= BPF_REG_10; i++)
  124. if (bpf_is_seen_register(ctx, i))
  125. PPC_BPF_LL(b2p[i], 1, bpf_jit_stack_offsetof(ctx, b2p[i]));
  126. /* Tear down our stack frame */
  127. if (bpf_has_stack_frame(ctx)) {
  128. EMIT(PPC_RAW_ADDI(1, 1, BPF_PPC_STACKFRAME + ctx->stack_size));
  129. if (ctx->seen & SEEN_FUNC) {
  130. PPC_BPF_LL(0, 1, PPC_LR_STKOFF);
  131. EMIT(PPC_RAW_MTLR(0));
  132. }
  133. }
  134. }
  135. static void bpf_jit_build_epilogue(u32 *image, struct codegen_context *ctx)
  136. {
  137. bpf_jit_emit_common_epilogue(image, ctx);
  138. /* Move result to r3 */
  139. EMIT(PPC_RAW_MR(3, b2p[BPF_REG_0]));
  140. EMIT(PPC_RAW_BLR());
  141. }
  142. static void bpf_jit_emit_func_call_hlp(u32 *image, struct codegen_context *ctx,
  143. u64 func)
  144. {
  145. #ifdef PPC64_ELF_ABI_v1
  146. /* func points to the function descriptor */
  147. PPC_LI64(b2p[TMP_REG_2], func);
  148. /* Load actual entry point from function descriptor */
  149. PPC_BPF_LL(b2p[TMP_REG_1], b2p[TMP_REG_2], 0);
  150. /* ... and move it to LR */
  151. EMIT(PPC_RAW_MTLR(b2p[TMP_REG_1]));
  152. /*
  153. * Load TOC from function descriptor at offset 8.
  154. * We can clobber r2 since we get called through a
  155. * function pointer (so caller will save/restore r2)
  156. * and since we don't use a TOC ourself.
  157. */
  158. PPC_BPF_LL(2, b2p[TMP_REG_2], 8);
  159. #else
  160. /* We can clobber r12 */
  161. PPC_FUNC_ADDR(12, func);
  162. EMIT(PPC_RAW_MTLR(12));
  163. #endif
  164. EMIT(PPC_RAW_BLRL());
  165. }
  166. static void bpf_jit_emit_func_call_rel(u32 *image, struct codegen_context *ctx,
  167. u64 func)
  168. {
  169. unsigned int i, ctx_idx = ctx->idx;
  170. /* Load function address into r12 */
  171. PPC_LI64(12, func);
  172. /* For bpf-to-bpf function calls, the callee's address is unknown
  173. * until the last extra pass. As seen above, we use PPC_LI64() to
  174. * load the callee's address, but this may optimize the number of
  175. * instructions required based on the nature of the address.
  176. *
  177. * Since we don't want the number of instructions emitted to change,
  178. * we pad the optimized PPC_LI64() call with NOPs to guarantee that
  179. * we always have a five-instruction sequence, which is the maximum
  180. * that PPC_LI64() can emit.
  181. */
  182. for (i = ctx->idx - ctx_idx; i < 5; i++)
  183. EMIT(PPC_RAW_NOP());
  184. #ifdef PPC64_ELF_ABI_v1
  185. /*
  186. * Load TOC from function descriptor at offset 8.
  187. * We can clobber r2 since we get called through a
  188. * function pointer (so caller will save/restore r2)
  189. * and since we don't use a TOC ourself.
  190. */
  191. PPC_BPF_LL(2, 12, 8);
  192. /* Load actual entry point from function descriptor */
  193. PPC_BPF_LL(12, 12, 0);
  194. #endif
  195. EMIT(PPC_RAW_MTLR(12));
  196. EMIT(PPC_RAW_BLRL());
  197. }
  198. static int bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32 out)
  199. {
  200. /*
  201. * By now, the eBPF program has already setup parameters in r3, r4 and r5
  202. * r3/BPF_REG_1 - pointer to ctx -- passed as is to the next bpf program
  203. * r4/BPF_REG_2 - pointer to bpf_array
  204. * r5/BPF_REG_3 - index in bpf_array
  205. */
  206. int b2p_bpf_array = b2p[BPF_REG_2];
  207. int b2p_index = b2p[BPF_REG_3];
  208. /*
  209. * if (index >= array->map.max_entries)
  210. * goto out;
  211. */
  212. EMIT(PPC_RAW_LWZ(b2p[TMP_REG_1], b2p_bpf_array, offsetof(struct bpf_array, map.max_entries)));
  213. EMIT(PPC_RAW_RLWINM(b2p_index, b2p_index, 0, 0, 31));
  214. EMIT(PPC_RAW_CMPLW(b2p_index, b2p[TMP_REG_1]));
  215. PPC_BCC(COND_GE, out);
  216. /*
  217. * if (tail_call_cnt > MAX_TAIL_CALL_CNT)
  218. * goto out;
  219. */
  220. PPC_BPF_LL(b2p[TMP_REG_1], 1, bpf_jit_stack_tailcallcnt(ctx));
  221. EMIT(PPC_RAW_CMPLWI(b2p[TMP_REG_1], MAX_TAIL_CALL_CNT));
  222. PPC_BCC(COND_GT, out);
  223. /*
  224. * tail_call_cnt++;
  225. */
  226. EMIT(PPC_RAW_ADDI(b2p[TMP_REG_1], b2p[TMP_REG_1], 1));
  227. PPC_BPF_STL(b2p[TMP_REG_1], 1, bpf_jit_stack_tailcallcnt(ctx));
  228. /* prog = array->ptrs[index]; */
  229. EMIT(PPC_RAW_MULI(b2p[TMP_REG_1], b2p_index, 8));
  230. EMIT(PPC_RAW_ADD(b2p[TMP_REG_1], b2p[TMP_REG_1], b2p_bpf_array));
  231. PPC_BPF_LL(b2p[TMP_REG_1], b2p[TMP_REG_1], offsetof(struct bpf_array, ptrs));
  232. /*
  233. * if (prog == NULL)
  234. * goto out;
  235. */
  236. EMIT(PPC_RAW_CMPLDI(b2p[TMP_REG_1], 0));
  237. PPC_BCC(COND_EQ, out);
  238. /* goto *(prog->bpf_func + prologue_size); */
  239. PPC_BPF_LL(b2p[TMP_REG_1], b2p[TMP_REG_1], offsetof(struct bpf_prog, bpf_func));
  240. #ifdef PPC64_ELF_ABI_v1
  241. /* skip past the function descriptor */
  242. EMIT(PPC_RAW_ADDI(b2p[TMP_REG_1], b2p[TMP_REG_1],
  243. FUNCTION_DESCR_SIZE + BPF_TAILCALL_PROLOGUE_SIZE));
  244. #else
  245. EMIT(PPC_RAW_ADDI(b2p[TMP_REG_1], b2p[TMP_REG_1], BPF_TAILCALL_PROLOGUE_SIZE));
  246. #endif
  247. EMIT(PPC_RAW_MTCTR(b2p[TMP_REG_1]));
  248. /* tear down stack, restore NVRs, ... */
  249. bpf_jit_emit_common_epilogue(image, ctx);
  250. EMIT(PPC_RAW_BCTR());
  251. /* out: */
  252. return 0;
  253. }
  254. /*
  255. * We spill into the redzone always, even if the bpf program has its own stackframe.
  256. * Offsets hardcoded based on BPF_PPC_STACK_SAVE -- see bpf_jit_stack_local()
  257. */
  258. void bpf_stf_barrier(void);
  259. asm (
  260. " .global bpf_stf_barrier ;"
  261. " bpf_stf_barrier: ;"
  262. " std 21,-64(1) ;"
  263. " std 22,-56(1) ;"
  264. " sync ;"
  265. " ld 21,-64(1) ;"
  266. " ld 22,-56(1) ;"
  267. " ori 31,31,0 ;"
  268. " .rept 14 ;"
  269. " b 1f ;"
  270. " 1: ;"
  271. " .endr ;"
  272. " blr ;"
  273. );
  274. /* Assemble the body code between the prologue & epilogue */
  275. static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image,
  276. struct codegen_context *ctx,
  277. u32 *addrs, bool extra_pass)
  278. {
  279. enum stf_barrier_type stf_barrier = stf_barrier_type_get();
  280. const struct bpf_insn *insn = fp->insnsi;
  281. int flen = fp->len;
  282. int i, ret;
  283. /* Start of epilogue code - will only be valid 2nd pass onwards */
  284. u32 exit_addr = addrs[flen];
  285. for (i = 0; i < flen; i++) {
  286. u32 code = insn[i].code;
  287. u32 dst_reg = b2p[insn[i].dst_reg];
  288. u32 src_reg = b2p[insn[i].src_reg];
  289. s16 off = insn[i].off;
  290. s32 imm = insn[i].imm;
  291. bool func_addr_fixed;
  292. u64 func_addr;
  293. u64 imm64;
  294. u32 true_cond;
  295. u32 tmp_idx;
  296. /*
  297. * addrs[] maps a BPF bytecode address into a real offset from
  298. * the start of the body code.
  299. */
  300. addrs[i] = ctx->idx * 4;
  301. /*
  302. * As an optimization, we note down which non-volatile registers
  303. * are used so that we can only save/restore those in our
  304. * prologue and epilogue. We do this here regardless of whether
  305. * the actual BPF instruction uses src/dst registers or not
  306. * (for instance, BPF_CALL does not use them). The expectation
  307. * is that those instructions will have src_reg/dst_reg set to
  308. * 0. Even otherwise, we just lose some prologue/epilogue
  309. * optimization but everything else should work without
  310. * any issues.
  311. */
  312. if (dst_reg >= BPF_PPC_NVR_MIN && dst_reg < 32)
  313. bpf_set_seen_register(ctx, insn[i].dst_reg);
  314. if (src_reg >= BPF_PPC_NVR_MIN && src_reg < 32)
  315. bpf_set_seen_register(ctx, insn[i].src_reg);
  316. switch (code) {
  317. /*
  318. * Arithmetic operations: ADD/SUB/MUL/DIV/MOD/NEG
  319. */
  320. case BPF_ALU | BPF_ADD | BPF_X: /* (u32) dst += (u32) src */
  321. case BPF_ALU64 | BPF_ADD | BPF_X: /* dst += src */
  322. EMIT(PPC_RAW_ADD(dst_reg, dst_reg, src_reg));
  323. goto bpf_alu32_trunc;
  324. case BPF_ALU | BPF_SUB | BPF_X: /* (u32) dst -= (u32) src */
  325. case BPF_ALU64 | BPF_SUB | BPF_X: /* dst -= src */
  326. EMIT(PPC_RAW_SUB(dst_reg, dst_reg, src_reg));
  327. goto bpf_alu32_trunc;
  328. case BPF_ALU | BPF_ADD | BPF_K: /* (u32) dst += (u32) imm */
  329. case BPF_ALU64 | BPF_ADD | BPF_K: /* dst += imm */
  330. if (!imm) {
  331. goto bpf_alu32_trunc;
  332. } else if (imm >= -32768 && imm < 32768) {
  333. EMIT(PPC_RAW_ADDI(dst_reg, dst_reg, IMM_L(imm)));
  334. } else {
  335. PPC_LI32(b2p[TMP_REG_1], imm);
  336. EMIT(PPC_RAW_ADD(dst_reg, dst_reg, b2p[TMP_REG_1]));
  337. }
  338. goto bpf_alu32_trunc;
  339. case BPF_ALU | BPF_SUB | BPF_K: /* (u32) dst -= (u32) imm */
  340. case BPF_ALU64 | BPF_SUB | BPF_K: /* dst -= imm */
  341. if (!imm) {
  342. goto bpf_alu32_trunc;
  343. } else if (imm > -32768 && imm <= 32768) {
  344. EMIT(PPC_RAW_ADDI(dst_reg, dst_reg, IMM_L(-imm)));
  345. } else {
  346. PPC_LI32(b2p[TMP_REG_1], imm);
  347. EMIT(PPC_RAW_SUB(dst_reg, dst_reg, b2p[TMP_REG_1]));
  348. }
  349. goto bpf_alu32_trunc;
  350. case BPF_ALU | BPF_MUL | BPF_X: /* (u32) dst *= (u32) src */
  351. case BPF_ALU64 | BPF_MUL | BPF_X: /* dst *= src */
  352. if (BPF_CLASS(code) == BPF_ALU)
  353. EMIT(PPC_RAW_MULW(dst_reg, dst_reg, src_reg));
  354. else
  355. EMIT(PPC_RAW_MULD(dst_reg, dst_reg, src_reg));
  356. goto bpf_alu32_trunc;
  357. case BPF_ALU | BPF_MUL | BPF_K: /* (u32) dst *= (u32) imm */
  358. case BPF_ALU64 | BPF_MUL | BPF_K: /* dst *= imm */
  359. if (imm >= -32768 && imm < 32768)
  360. EMIT(PPC_RAW_MULI(dst_reg, dst_reg, IMM_L(imm)));
  361. else {
  362. PPC_LI32(b2p[TMP_REG_1], imm);
  363. if (BPF_CLASS(code) == BPF_ALU)
  364. EMIT(PPC_RAW_MULW(dst_reg, dst_reg,
  365. b2p[TMP_REG_1]));
  366. else
  367. EMIT(PPC_RAW_MULD(dst_reg, dst_reg,
  368. b2p[TMP_REG_1]));
  369. }
  370. goto bpf_alu32_trunc;
  371. case BPF_ALU | BPF_DIV | BPF_X: /* (u32) dst /= (u32) src */
  372. case BPF_ALU | BPF_MOD | BPF_X: /* (u32) dst %= (u32) src */
  373. if (BPF_OP(code) == BPF_MOD) {
  374. EMIT(PPC_RAW_DIVWU(b2p[TMP_REG_1], dst_reg, src_reg));
  375. EMIT(PPC_RAW_MULW(b2p[TMP_REG_1], src_reg,
  376. b2p[TMP_REG_1]));
  377. EMIT(PPC_RAW_SUB(dst_reg, dst_reg, b2p[TMP_REG_1]));
  378. } else
  379. EMIT(PPC_RAW_DIVWU(dst_reg, dst_reg, src_reg));
  380. goto bpf_alu32_trunc;
  381. case BPF_ALU64 | BPF_DIV | BPF_X: /* dst /= src */
  382. case BPF_ALU64 | BPF_MOD | BPF_X: /* dst %= src */
  383. if (BPF_OP(code) == BPF_MOD) {
  384. EMIT(PPC_RAW_DIVDU(b2p[TMP_REG_1], dst_reg, src_reg));
  385. EMIT(PPC_RAW_MULD(b2p[TMP_REG_1], src_reg,
  386. b2p[TMP_REG_1]));
  387. EMIT(PPC_RAW_SUB(dst_reg, dst_reg, b2p[TMP_REG_1]));
  388. } else
  389. EMIT(PPC_RAW_DIVDU(dst_reg, dst_reg, src_reg));
  390. break;
  391. case BPF_ALU | BPF_MOD | BPF_K: /* (u32) dst %= (u32) imm */
  392. case BPF_ALU | BPF_DIV | BPF_K: /* (u32) dst /= (u32) imm */
  393. case BPF_ALU64 | BPF_MOD | BPF_K: /* dst %= imm */
  394. case BPF_ALU64 | BPF_DIV | BPF_K: /* dst /= imm */
  395. if (imm == 0)
  396. return -EINVAL;
  397. if (imm == 1) {
  398. if (BPF_OP(code) == BPF_DIV) {
  399. goto bpf_alu32_trunc;
  400. } else {
  401. EMIT(PPC_RAW_LI(dst_reg, 0));
  402. break;
  403. }
  404. }
  405. PPC_LI32(b2p[TMP_REG_1], imm);
  406. switch (BPF_CLASS(code)) {
  407. case BPF_ALU:
  408. if (BPF_OP(code) == BPF_MOD) {
  409. EMIT(PPC_RAW_DIVWU(b2p[TMP_REG_2],
  410. dst_reg,
  411. b2p[TMP_REG_1]));
  412. EMIT(PPC_RAW_MULW(b2p[TMP_REG_1],
  413. b2p[TMP_REG_1],
  414. b2p[TMP_REG_2]));
  415. EMIT(PPC_RAW_SUB(dst_reg, dst_reg,
  416. b2p[TMP_REG_1]));
  417. } else
  418. EMIT(PPC_RAW_DIVWU(dst_reg, dst_reg,
  419. b2p[TMP_REG_1]));
  420. break;
  421. case BPF_ALU64:
  422. if (BPF_OP(code) == BPF_MOD) {
  423. EMIT(PPC_RAW_DIVDU(b2p[TMP_REG_2],
  424. dst_reg,
  425. b2p[TMP_REG_1]));
  426. EMIT(PPC_RAW_MULD(b2p[TMP_REG_1],
  427. b2p[TMP_REG_1],
  428. b2p[TMP_REG_2]));
  429. EMIT(PPC_RAW_SUB(dst_reg, dst_reg,
  430. b2p[TMP_REG_1]));
  431. } else
  432. EMIT(PPC_RAW_DIVDU(dst_reg, dst_reg,
  433. b2p[TMP_REG_1]));
  434. break;
  435. }
  436. goto bpf_alu32_trunc;
  437. case BPF_ALU | BPF_NEG: /* (u32) dst = -dst */
  438. case BPF_ALU64 | BPF_NEG: /* dst = -dst */
  439. EMIT(PPC_RAW_NEG(dst_reg, dst_reg));
  440. goto bpf_alu32_trunc;
  441. /*
  442. * Logical operations: AND/OR/XOR/[A]LSH/[A]RSH
  443. */
  444. case BPF_ALU | BPF_AND | BPF_X: /* (u32) dst = dst & src */
  445. case BPF_ALU64 | BPF_AND | BPF_X: /* dst = dst & src */
  446. EMIT(PPC_RAW_AND(dst_reg, dst_reg, src_reg));
  447. goto bpf_alu32_trunc;
  448. case BPF_ALU | BPF_AND | BPF_K: /* (u32) dst = dst & imm */
  449. case BPF_ALU64 | BPF_AND | BPF_K: /* dst = dst & imm */
  450. if (!IMM_H(imm))
  451. EMIT(PPC_RAW_ANDI(dst_reg, dst_reg, IMM_L(imm)));
  452. else {
  453. /* Sign-extended */
  454. PPC_LI32(b2p[TMP_REG_1], imm);
  455. EMIT(PPC_RAW_AND(dst_reg, dst_reg, b2p[TMP_REG_1]));
  456. }
  457. goto bpf_alu32_trunc;
  458. case BPF_ALU | BPF_OR | BPF_X: /* dst = (u32) dst | (u32) src */
  459. case BPF_ALU64 | BPF_OR | BPF_X: /* dst = dst | src */
  460. EMIT(PPC_RAW_OR(dst_reg, dst_reg, src_reg));
  461. goto bpf_alu32_trunc;
  462. case BPF_ALU | BPF_OR | BPF_K:/* dst = (u32) dst | (u32) imm */
  463. case BPF_ALU64 | BPF_OR | BPF_K:/* dst = dst | imm */
  464. if (imm < 0 && BPF_CLASS(code) == BPF_ALU64) {
  465. /* Sign-extended */
  466. PPC_LI32(b2p[TMP_REG_1], imm);
  467. EMIT(PPC_RAW_OR(dst_reg, dst_reg, b2p[TMP_REG_1]));
  468. } else {
  469. if (IMM_L(imm))
  470. EMIT(PPC_RAW_ORI(dst_reg, dst_reg, IMM_L(imm)));
  471. if (IMM_H(imm))
  472. EMIT(PPC_RAW_ORIS(dst_reg, dst_reg, IMM_H(imm)));
  473. }
  474. goto bpf_alu32_trunc;
  475. case BPF_ALU | BPF_XOR | BPF_X: /* (u32) dst ^= src */
  476. case BPF_ALU64 | BPF_XOR | BPF_X: /* dst ^= src */
  477. EMIT(PPC_RAW_XOR(dst_reg, dst_reg, src_reg));
  478. goto bpf_alu32_trunc;
  479. case BPF_ALU | BPF_XOR | BPF_K: /* (u32) dst ^= (u32) imm */
  480. case BPF_ALU64 | BPF_XOR | BPF_K: /* dst ^= imm */
  481. if (imm < 0 && BPF_CLASS(code) == BPF_ALU64) {
  482. /* Sign-extended */
  483. PPC_LI32(b2p[TMP_REG_1], imm);
  484. EMIT(PPC_RAW_XOR(dst_reg, dst_reg, b2p[TMP_REG_1]));
  485. } else {
  486. if (IMM_L(imm))
  487. EMIT(PPC_RAW_XORI(dst_reg, dst_reg, IMM_L(imm)));
  488. if (IMM_H(imm))
  489. EMIT(PPC_RAW_XORIS(dst_reg, dst_reg, IMM_H(imm)));
  490. }
  491. goto bpf_alu32_trunc;
  492. case BPF_ALU | BPF_LSH | BPF_X: /* (u32) dst <<= (u32) src */
  493. /* slw clears top 32 bits */
  494. EMIT(PPC_RAW_SLW(dst_reg, dst_reg, src_reg));
  495. /* skip zero extension move, but set address map. */
  496. if (insn_is_zext(&insn[i + 1]))
  497. addrs[++i] = ctx->idx * 4;
  498. break;
  499. case BPF_ALU64 | BPF_LSH | BPF_X: /* dst <<= src; */
  500. EMIT(PPC_RAW_SLD(dst_reg, dst_reg, src_reg));
  501. break;
  502. case BPF_ALU | BPF_LSH | BPF_K: /* (u32) dst <<== (u32) imm */
  503. /* with imm 0, we still need to clear top 32 bits */
  504. EMIT(PPC_RAW_SLWI(dst_reg, dst_reg, imm));
  505. if (insn_is_zext(&insn[i + 1]))
  506. addrs[++i] = ctx->idx * 4;
  507. break;
  508. case BPF_ALU64 | BPF_LSH | BPF_K: /* dst <<== imm */
  509. if (imm != 0)
  510. EMIT(PPC_RAW_SLDI(dst_reg, dst_reg, imm));
  511. break;
  512. case BPF_ALU | BPF_RSH | BPF_X: /* (u32) dst >>= (u32) src */
  513. EMIT(PPC_RAW_SRW(dst_reg, dst_reg, src_reg));
  514. if (insn_is_zext(&insn[i + 1]))
  515. addrs[++i] = ctx->idx * 4;
  516. break;
  517. case BPF_ALU64 | BPF_RSH | BPF_X: /* dst >>= src */
  518. EMIT(PPC_RAW_SRD(dst_reg, dst_reg, src_reg));
  519. break;
  520. case BPF_ALU | BPF_RSH | BPF_K: /* (u32) dst >>= (u32) imm */
  521. EMIT(PPC_RAW_SRWI(dst_reg, dst_reg, imm));
  522. if (insn_is_zext(&insn[i + 1]))
  523. addrs[++i] = ctx->idx * 4;
  524. break;
  525. case BPF_ALU64 | BPF_RSH | BPF_K: /* dst >>= imm */
  526. if (imm != 0)
  527. EMIT(PPC_RAW_SRDI(dst_reg, dst_reg, imm));
  528. break;
  529. case BPF_ALU | BPF_ARSH | BPF_X: /* (s32) dst >>= src */
  530. EMIT(PPC_RAW_SRAW(dst_reg, dst_reg, src_reg));
  531. goto bpf_alu32_trunc;
  532. case BPF_ALU64 | BPF_ARSH | BPF_X: /* (s64) dst >>= src */
  533. EMIT(PPC_RAW_SRAD(dst_reg, dst_reg, src_reg));
  534. break;
  535. case BPF_ALU | BPF_ARSH | BPF_K: /* (s32) dst >>= imm */
  536. EMIT(PPC_RAW_SRAWI(dst_reg, dst_reg, imm));
  537. goto bpf_alu32_trunc;
  538. case BPF_ALU64 | BPF_ARSH | BPF_K: /* (s64) dst >>= imm */
  539. if (imm != 0)
  540. EMIT(PPC_RAW_SRADI(dst_reg, dst_reg, imm));
  541. break;
  542. /*
  543. * MOV
  544. */
  545. case BPF_ALU | BPF_MOV | BPF_X: /* (u32) dst = src */
  546. case BPF_ALU64 | BPF_MOV | BPF_X: /* dst = src */
  547. if (imm == 1) {
  548. /* special mov32 for zext */
  549. EMIT(PPC_RAW_RLWINM(dst_reg, dst_reg, 0, 0, 31));
  550. break;
  551. }
  552. EMIT(PPC_RAW_MR(dst_reg, src_reg));
  553. goto bpf_alu32_trunc;
  554. case BPF_ALU | BPF_MOV | BPF_K: /* (u32) dst = imm */
  555. case BPF_ALU64 | BPF_MOV | BPF_K: /* dst = (s64) imm */
  556. PPC_LI32(dst_reg, imm);
  557. if (imm < 0)
  558. goto bpf_alu32_trunc;
  559. else if (insn_is_zext(&insn[i + 1]))
  560. addrs[++i] = ctx->idx * 4;
  561. break;
  562. bpf_alu32_trunc:
  563. /* Truncate to 32-bits */
  564. if (BPF_CLASS(code) == BPF_ALU && !fp->aux->verifier_zext)
  565. EMIT(PPC_RAW_RLWINM(dst_reg, dst_reg, 0, 0, 31));
  566. break;
  567. /*
  568. * BPF_FROM_BE/LE
  569. */
  570. case BPF_ALU | BPF_END | BPF_FROM_LE:
  571. case BPF_ALU | BPF_END | BPF_FROM_BE:
  572. #ifdef __BIG_ENDIAN__
  573. if (BPF_SRC(code) == BPF_FROM_BE)
  574. goto emit_clear;
  575. #else /* !__BIG_ENDIAN__ */
  576. if (BPF_SRC(code) == BPF_FROM_LE)
  577. goto emit_clear;
  578. #endif
  579. switch (imm) {
  580. case 16:
  581. /* Rotate 8 bits left & mask with 0x0000ff00 */
  582. EMIT(PPC_RAW_RLWINM(b2p[TMP_REG_1], dst_reg, 8, 16, 23));
  583. /* Rotate 8 bits right & insert LSB to reg */
  584. EMIT(PPC_RAW_RLWIMI(b2p[TMP_REG_1], dst_reg, 24, 24, 31));
  585. /* Move result back to dst_reg */
  586. EMIT(PPC_RAW_MR(dst_reg, b2p[TMP_REG_1]));
  587. break;
  588. case 32:
  589. /*
  590. * Rotate word left by 8 bits:
  591. * 2 bytes are already in their final position
  592. * -- byte 2 and 4 (of bytes 1, 2, 3 and 4)
  593. */
  594. EMIT(PPC_RAW_RLWINM(b2p[TMP_REG_1], dst_reg, 8, 0, 31));
  595. /* Rotate 24 bits and insert byte 1 */
  596. EMIT(PPC_RAW_RLWIMI(b2p[TMP_REG_1], dst_reg, 24, 0, 7));
  597. /* Rotate 24 bits and insert byte 3 */
  598. EMIT(PPC_RAW_RLWIMI(b2p[TMP_REG_1], dst_reg, 24, 16, 23));
  599. EMIT(PPC_RAW_MR(dst_reg, b2p[TMP_REG_1]));
  600. break;
  601. case 64:
  602. /* Store the value to stack and then use byte-reverse loads */
  603. PPC_BPF_STL(dst_reg, 1, bpf_jit_stack_local(ctx));
  604. EMIT(PPC_RAW_ADDI(b2p[TMP_REG_1], 1, bpf_jit_stack_local(ctx)));
  605. if (cpu_has_feature(CPU_FTR_ARCH_206)) {
  606. EMIT(PPC_RAW_LDBRX(dst_reg, 0, b2p[TMP_REG_1]));
  607. } else {
  608. EMIT(PPC_RAW_LWBRX(dst_reg, 0, b2p[TMP_REG_1]));
  609. if (IS_ENABLED(CONFIG_CPU_LITTLE_ENDIAN))
  610. EMIT(PPC_RAW_SLDI(dst_reg, dst_reg, 32));
  611. EMIT(PPC_RAW_LI(b2p[TMP_REG_2], 4));
  612. EMIT(PPC_RAW_LWBRX(b2p[TMP_REG_2], b2p[TMP_REG_2], b2p[TMP_REG_1]));
  613. if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
  614. EMIT(PPC_RAW_SLDI(b2p[TMP_REG_2], b2p[TMP_REG_2], 32));
  615. EMIT(PPC_RAW_OR(dst_reg, dst_reg, b2p[TMP_REG_2]));
  616. }
  617. break;
  618. }
  619. break;
  620. emit_clear:
  621. switch (imm) {
  622. case 16:
  623. /* zero-extend 16 bits into 64 bits */
  624. EMIT(PPC_RAW_RLDICL(dst_reg, dst_reg, 0, 48));
  625. if (insn_is_zext(&insn[i + 1]))
  626. addrs[++i] = ctx->idx * 4;
  627. break;
  628. case 32:
  629. if (!fp->aux->verifier_zext)
  630. /* zero-extend 32 bits into 64 bits */
  631. EMIT(PPC_RAW_RLDICL(dst_reg, dst_reg, 0, 32));
  632. break;
  633. case 64:
  634. /* nop */
  635. break;
  636. }
  637. break;
  638. /*
  639. * BPF_ST NOSPEC (speculation barrier)
  640. */
  641. case BPF_ST | BPF_NOSPEC:
  642. if (!security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) ||
  643. (!security_ftr_enabled(SEC_FTR_L1D_FLUSH_PR) &&
  644. (!security_ftr_enabled(SEC_FTR_L1D_FLUSH_HV) || !cpu_has_feature(CPU_FTR_HVMODE))))
  645. break;
  646. switch (stf_barrier) {
  647. case STF_BARRIER_EIEIO:
  648. EMIT(0x7c0006ac | 0x02000000);
  649. break;
  650. case STF_BARRIER_SYNC_ORI:
  651. EMIT(PPC_INST_SYNC);
  652. EMIT(PPC_RAW_LD(b2p[TMP_REG_1], 13, 0));
  653. EMIT(PPC_RAW_ORI(31, 31, 0));
  654. break;
  655. case STF_BARRIER_FALLBACK:
  656. EMIT(PPC_INST_MFLR | ___PPC_RT(b2p[TMP_REG_1]));
  657. PPC_LI64(12, dereference_kernel_function_descriptor(bpf_stf_barrier));
  658. EMIT(PPC_RAW_MTCTR(12));
  659. EMIT(PPC_INST_BCTR | 0x1);
  660. EMIT(PPC_RAW_MTLR(b2p[TMP_REG_1]));
  661. break;
  662. case STF_BARRIER_NONE:
  663. break;
  664. }
  665. break;
  666. /*
  667. * BPF_ST(X)
  668. */
  669. case BPF_STX | BPF_MEM | BPF_B: /* *(u8 *)(dst + off) = src */
  670. case BPF_ST | BPF_MEM | BPF_B: /* *(u8 *)(dst + off) = imm */
  671. if (BPF_CLASS(code) == BPF_ST) {
  672. EMIT(PPC_RAW_LI(b2p[TMP_REG_1], imm));
  673. src_reg = b2p[TMP_REG_1];
  674. }
  675. EMIT(PPC_RAW_STB(src_reg, dst_reg, off));
  676. break;
  677. case BPF_STX | BPF_MEM | BPF_H: /* (u16 *)(dst + off) = src */
  678. case BPF_ST | BPF_MEM | BPF_H: /* (u16 *)(dst + off) = imm */
  679. if (BPF_CLASS(code) == BPF_ST) {
  680. EMIT(PPC_RAW_LI(b2p[TMP_REG_1], imm));
  681. src_reg = b2p[TMP_REG_1];
  682. }
  683. EMIT(PPC_RAW_STH(src_reg, dst_reg, off));
  684. break;
  685. case BPF_STX | BPF_MEM | BPF_W: /* *(u32 *)(dst + off) = src */
  686. case BPF_ST | BPF_MEM | BPF_W: /* *(u32 *)(dst + off) = imm */
  687. if (BPF_CLASS(code) == BPF_ST) {
  688. PPC_LI32(b2p[TMP_REG_1], imm);
  689. src_reg = b2p[TMP_REG_1];
  690. }
  691. EMIT(PPC_RAW_STW(src_reg, dst_reg, off));
  692. break;
  693. case BPF_STX | BPF_MEM | BPF_DW: /* (u64 *)(dst + off) = src */
  694. case BPF_ST | BPF_MEM | BPF_DW: /* *(u64 *)(dst + off) = imm */
  695. if (BPF_CLASS(code) == BPF_ST) {
  696. PPC_LI32(b2p[TMP_REG_1], imm);
  697. src_reg = b2p[TMP_REG_1];
  698. }
  699. PPC_BPF_STL(src_reg, dst_reg, off);
  700. break;
  701. /*
  702. * BPF_STX XADD (atomic_add)
  703. */
  704. /* *(u32 *)(dst + off) += src */
  705. case BPF_STX | BPF_XADD | BPF_W:
  706. /* Get EA into TMP_REG_1 */
  707. EMIT(PPC_RAW_ADDI(b2p[TMP_REG_1], dst_reg, off));
  708. tmp_idx = ctx->idx * 4;
  709. /* load value from memory into TMP_REG_2 */
  710. EMIT(PPC_RAW_LWARX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1], 0));
  711. /* add value from src_reg into this */
  712. EMIT(PPC_RAW_ADD(b2p[TMP_REG_2], b2p[TMP_REG_2], src_reg));
  713. /* store result back */
  714. EMIT(PPC_RAW_STWCX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1]));
  715. /* we're done if this succeeded */
  716. PPC_BCC_SHORT(COND_NE, tmp_idx);
  717. break;
  718. /* *(u64 *)(dst + off) += src */
  719. case BPF_STX | BPF_XADD | BPF_DW:
  720. EMIT(PPC_RAW_ADDI(b2p[TMP_REG_1], dst_reg, off));
  721. tmp_idx = ctx->idx * 4;
  722. EMIT(PPC_RAW_LDARX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1], 0));
  723. EMIT(PPC_RAW_ADD(b2p[TMP_REG_2], b2p[TMP_REG_2], src_reg));
  724. EMIT(PPC_RAW_STDCX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1]));
  725. PPC_BCC_SHORT(COND_NE, tmp_idx);
  726. break;
  727. /*
  728. * BPF_LDX
  729. */
  730. /* dst = *(u8 *)(ul) (src + off) */
  731. case BPF_LDX | BPF_MEM | BPF_B:
  732. EMIT(PPC_RAW_LBZ(dst_reg, src_reg, off));
  733. if (insn_is_zext(&insn[i + 1]))
  734. addrs[++i] = ctx->idx * 4;
  735. break;
  736. /* dst = *(u16 *)(ul) (src + off) */
  737. case BPF_LDX | BPF_MEM | BPF_H:
  738. EMIT(PPC_RAW_LHZ(dst_reg, src_reg, off));
  739. if (insn_is_zext(&insn[i + 1]))
  740. addrs[++i] = ctx->idx * 4;
  741. break;
  742. /* dst = *(u32 *)(ul) (src + off) */
  743. case BPF_LDX | BPF_MEM | BPF_W:
  744. EMIT(PPC_RAW_LWZ(dst_reg, src_reg, off));
  745. if (insn_is_zext(&insn[i + 1]))
  746. addrs[++i] = ctx->idx * 4;
  747. break;
  748. /* dst = *(u64 *)(ul) (src + off) */
  749. case BPF_LDX | BPF_MEM | BPF_DW:
  750. PPC_BPF_LL(dst_reg, src_reg, off);
  751. break;
  752. /*
  753. * Doubleword load
  754. * 16 byte instruction that uses two 'struct bpf_insn'
  755. */
  756. case BPF_LD | BPF_IMM | BPF_DW: /* dst = (u64) imm */
  757. imm64 = ((u64)(u32) insn[i].imm) |
  758. (((u64)(u32) insn[i+1].imm) << 32);
  759. /* Adjust for two bpf instructions */
  760. addrs[++i] = ctx->idx * 4;
  761. PPC_LI64(dst_reg, imm64);
  762. break;
  763. /*
  764. * Return/Exit
  765. */
  766. case BPF_JMP | BPF_EXIT:
  767. /*
  768. * If this isn't the very last instruction, branch to
  769. * the epilogue. If we _are_ the last instruction,
  770. * we'll just fall through to the epilogue.
  771. */
  772. if (i != flen - 1)
  773. PPC_JMP(exit_addr);
  774. /* else fall through to the epilogue */
  775. break;
  776. /*
  777. * Call kernel helper or bpf function
  778. */
  779. case BPF_JMP | BPF_CALL:
  780. ctx->seen |= SEEN_FUNC;
  781. ret = bpf_jit_get_func_addr(fp, &insn[i], extra_pass,
  782. &func_addr, &func_addr_fixed);
  783. if (ret < 0)
  784. return ret;
  785. if (func_addr_fixed)
  786. bpf_jit_emit_func_call_hlp(image, ctx, func_addr);
  787. else
  788. bpf_jit_emit_func_call_rel(image, ctx, func_addr);
  789. /* move return value from r3 to BPF_REG_0 */
  790. EMIT(PPC_RAW_MR(b2p[BPF_REG_0], 3));
  791. break;
  792. /*
  793. * Jumps and branches
  794. */
  795. case BPF_JMP | BPF_JA:
  796. PPC_JMP(addrs[i + 1 + off]);
  797. break;
  798. case BPF_JMP | BPF_JGT | BPF_K:
  799. case BPF_JMP | BPF_JGT | BPF_X:
  800. case BPF_JMP | BPF_JSGT | BPF_K:
  801. case BPF_JMP | BPF_JSGT | BPF_X:
  802. case BPF_JMP32 | BPF_JGT | BPF_K:
  803. case BPF_JMP32 | BPF_JGT | BPF_X:
  804. case BPF_JMP32 | BPF_JSGT | BPF_K:
  805. case BPF_JMP32 | BPF_JSGT | BPF_X:
  806. true_cond = COND_GT;
  807. goto cond_branch;
  808. case BPF_JMP | BPF_JLT | BPF_K:
  809. case BPF_JMP | BPF_JLT | BPF_X:
  810. case BPF_JMP | BPF_JSLT | BPF_K:
  811. case BPF_JMP | BPF_JSLT | BPF_X:
  812. case BPF_JMP32 | BPF_JLT | BPF_K:
  813. case BPF_JMP32 | BPF_JLT | BPF_X:
  814. case BPF_JMP32 | BPF_JSLT | BPF_K:
  815. case BPF_JMP32 | BPF_JSLT | BPF_X:
  816. true_cond = COND_LT;
  817. goto cond_branch;
  818. case BPF_JMP | BPF_JGE | BPF_K:
  819. case BPF_JMP | BPF_JGE | BPF_X:
  820. case BPF_JMP | BPF_JSGE | BPF_K:
  821. case BPF_JMP | BPF_JSGE | BPF_X:
  822. case BPF_JMP32 | BPF_JGE | BPF_K:
  823. case BPF_JMP32 | BPF_JGE | BPF_X:
  824. case BPF_JMP32 | BPF_JSGE | BPF_K:
  825. case BPF_JMP32 | BPF_JSGE | BPF_X:
  826. true_cond = COND_GE;
  827. goto cond_branch;
  828. case BPF_JMP | BPF_JLE | BPF_K:
  829. case BPF_JMP | BPF_JLE | BPF_X:
  830. case BPF_JMP | BPF_JSLE | BPF_K:
  831. case BPF_JMP | BPF_JSLE | BPF_X:
  832. case BPF_JMP32 | BPF_JLE | BPF_K:
  833. case BPF_JMP32 | BPF_JLE | BPF_X:
  834. case BPF_JMP32 | BPF_JSLE | BPF_K:
  835. case BPF_JMP32 | BPF_JSLE | BPF_X:
  836. true_cond = COND_LE;
  837. goto cond_branch;
  838. case BPF_JMP | BPF_JEQ | BPF_K:
  839. case BPF_JMP | BPF_JEQ | BPF_X:
  840. case BPF_JMP32 | BPF_JEQ | BPF_K:
  841. case BPF_JMP32 | BPF_JEQ | BPF_X:
  842. true_cond = COND_EQ;
  843. goto cond_branch;
  844. case BPF_JMP | BPF_JNE | BPF_K:
  845. case BPF_JMP | BPF_JNE | BPF_X:
  846. case BPF_JMP32 | BPF_JNE | BPF_K:
  847. case BPF_JMP32 | BPF_JNE | BPF_X:
  848. true_cond = COND_NE;
  849. goto cond_branch;
  850. case BPF_JMP | BPF_JSET | BPF_K:
  851. case BPF_JMP | BPF_JSET | BPF_X:
  852. case BPF_JMP32 | BPF_JSET | BPF_K:
  853. case BPF_JMP32 | BPF_JSET | BPF_X:
  854. true_cond = COND_NE;
  855. /* Fall through */
  856. cond_branch:
  857. switch (code) {
  858. case BPF_JMP | BPF_JGT | BPF_X:
  859. case BPF_JMP | BPF_JLT | BPF_X:
  860. case BPF_JMP | BPF_JGE | BPF_X:
  861. case BPF_JMP | BPF_JLE | BPF_X:
  862. case BPF_JMP | BPF_JEQ | BPF_X:
  863. case BPF_JMP | BPF_JNE | BPF_X:
  864. case BPF_JMP32 | BPF_JGT | BPF_X:
  865. case BPF_JMP32 | BPF_JLT | BPF_X:
  866. case BPF_JMP32 | BPF_JGE | BPF_X:
  867. case BPF_JMP32 | BPF_JLE | BPF_X:
  868. case BPF_JMP32 | BPF_JEQ | BPF_X:
  869. case BPF_JMP32 | BPF_JNE | BPF_X:
  870. /* unsigned comparison */
  871. if (BPF_CLASS(code) == BPF_JMP32)
  872. EMIT(PPC_RAW_CMPLW(dst_reg, src_reg));
  873. else
  874. EMIT(PPC_RAW_CMPLD(dst_reg, src_reg));
  875. break;
  876. case BPF_JMP | BPF_JSGT | BPF_X:
  877. case BPF_JMP | BPF_JSLT | BPF_X:
  878. case BPF_JMP | BPF_JSGE | BPF_X:
  879. case BPF_JMP | BPF_JSLE | BPF_X:
  880. case BPF_JMP32 | BPF_JSGT | BPF_X:
  881. case BPF_JMP32 | BPF_JSLT | BPF_X:
  882. case BPF_JMP32 | BPF_JSGE | BPF_X:
  883. case BPF_JMP32 | BPF_JSLE | BPF_X:
  884. /* signed comparison */
  885. if (BPF_CLASS(code) == BPF_JMP32)
  886. EMIT(PPC_RAW_CMPW(dst_reg, src_reg));
  887. else
  888. EMIT(PPC_RAW_CMPD(dst_reg, src_reg));
  889. break;
  890. case BPF_JMP | BPF_JSET | BPF_X:
  891. case BPF_JMP32 | BPF_JSET | BPF_X:
  892. if (BPF_CLASS(code) == BPF_JMP) {
  893. EMIT(PPC_RAW_AND_DOT(b2p[TMP_REG_1], dst_reg,
  894. src_reg));
  895. } else {
  896. int tmp_reg = b2p[TMP_REG_1];
  897. EMIT(PPC_RAW_AND(tmp_reg, dst_reg, src_reg));
  898. EMIT(PPC_RAW_RLWINM_DOT(tmp_reg, tmp_reg, 0, 0,
  899. 31));
  900. }
  901. break;
  902. case BPF_JMP | BPF_JNE | BPF_K:
  903. case BPF_JMP | BPF_JEQ | BPF_K:
  904. case BPF_JMP | BPF_JGT | BPF_K:
  905. case BPF_JMP | BPF_JLT | BPF_K:
  906. case BPF_JMP | BPF_JGE | BPF_K:
  907. case BPF_JMP | BPF_JLE | BPF_K:
  908. case BPF_JMP32 | BPF_JNE | BPF_K:
  909. case BPF_JMP32 | BPF_JEQ | BPF_K:
  910. case BPF_JMP32 | BPF_JGT | BPF_K:
  911. case BPF_JMP32 | BPF_JLT | BPF_K:
  912. case BPF_JMP32 | BPF_JGE | BPF_K:
  913. case BPF_JMP32 | BPF_JLE | BPF_K:
  914. {
  915. bool is_jmp32 = BPF_CLASS(code) == BPF_JMP32;
  916. /*
  917. * Need sign-extended load, so only positive
  918. * values can be used as imm in cmpldi
  919. */
  920. if (imm >= 0 && imm < 32768) {
  921. if (is_jmp32)
  922. EMIT(PPC_RAW_CMPLWI(dst_reg, imm));
  923. else
  924. EMIT(PPC_RAW_CMPLDI(dst_reg, imm));
  925. } else {
  926. /* sign-extending load */
  927. PPC_LI32(b2p[TMP_REG_1], imm);
  928. /* ... but unsigned comparison */
  929. if (is_jmp32)
  930. EMIT(PPC_RAW_CMPLW(dst_reg,
  931. b2p[TMP_REG_1]));
  932. else
  933. EMIT(PPC_RAW_CMPLD(dst_reg,
  934. b2p[TMP_REG_1]));
  935. }
  936. break;
  937. }
  938. case BPF_JMP | BPF_JSGT | BPF_K:
  939. case BPF_JMP | BPF_JSLT | BPF_K:
  940. case BPF_JMP | BPF_JSGE | BPF_K:
  941. case BPF_JMP | BPF_JSLE | BPF_K:
  942. case BPF_JMP32 | BPF_JSGT | BPF_K:
  943. case BPF_JMP32 | BPF_JSLT | BPF_K:
  944. case BPF_JMP32 | BPF_JSGE | BPF_K:
  945. case BPF_JMP32 | BPF_JSLE | BPF_K:
  946. {
  947. bool is_jmp32 = BPF_CLASS(code) == BPF_JMP32;
  948. /*
  949. * signed comparison, so any 16-bit value
  950. * can be used in cmpdi
  951. */
  952. if (imm >= -32768 && imm < 32768) {
  953. if (is_jmp32)
  954. EMIT(PPC_RAW_CMPWI(dst_reg, imm));
  955. else
  956. EMIT(PPC_RAW_CMPDI(dst_reg, imm));
  957. } else {
  958. PPC_LI32(b2p[TMP_REG_1], imm);
  959. if (is_jmp32)
  960. EMIT(PPC_RAW_CMPW(dst_reg,
  961. b2p[TMP_REG_1]));
  962. else
  963. EMIT(PPC_RAW_CMPD(dst_reg,
  964. b2p[TMP_REG_1]));
  965. }
  966. break;
  967. }
  968. case BPF_JMP | BPF_JSET | BPF_K:
  969. case BPF_JMP32 | BPF_JSET | BPF_K:
  970. /* andi does not sign-extend the immediate */
  971. if (imm >= 0 && imm < 32768)
  972. /* PPC_ANDI is _only/always_ dot-form */
  973. EMIT(PPC_RAW_ANDI(b2p[TMP_REG_1], dst_reg, imm));
  974. else {
  975. int tmp_reg = b2p[TMP_REG_1];
  976. PPC_LI32(tmp_reg, imm);
  977. if (BPF_CLASS(code) == BPF_JMP) {
  978. EMIT(PPC_RAW_AND_DOT(tmp_reg, dst_reg,
  979. tmp_reg));
  980. } else {
  981. EMIT(PPC_RAW_AND(tmp_reg, dst_reg,
  982. tmp_reg));
  983. EMIT(PPC_RAW_RLWINM_DOT(tmp_reg, tmp_reg,
  984. 0, 0, 31));
  985. }
  986. }
  987. break;
  988. }
  989. PPC_BCC(true_cond, addrs[i + 1 + off]);
  990. break;
  991. /*
  992. * Tail call
  993. */
  994. case BPF_JMP | BPF_TAIL_CALL:
  995. ctx->seen |= SEEN_TAILCALL;
  996. ret = bpf_jit_emit_tail_call(image, ctx, addrs[i + 1]);
  997. if (ret < 0)
  998. return ret;
  999. break;
  1000. default:
  1001. /*
  1002. * The filter contains something cruel & unusual.
  1003. * We don't handle it, but also there shouldn't be
  1004. * anything missing from our list.
  1005. */
  1006. pr_err_ratelimited("eBPF filter opcode %04x (@%d) unsupported\n",
  1007. code, i);
  1008. return -ENOTSUPP;
  1009. }
  1010. }
  1011. /* Set end-of-body-code address for exit. */
  1012. addrs[i] = ctx->idx * 4;
  1013. return 0;
  1014. }
  1015. /* Fix the branch target addresses for subprog calls */
  1016. static int bpf_jit_fixup_subprog_calls(struct bpf_prog *fp, u32 *image,
  1017. struct codegen_context *ctx, u32 *addrs)
  1018. {
  1019. const struct bpf_insn *insn = fp->insnsi;
  1020. bool func_addr_fixed;
  1021. u64 func_addr;
  1022. u32 tmp_idx;
  1023. int i, ret;
  1024. for (i = 0; i < fp->len; i++) {
  1025. /*
  1026. * During the extra pass, only the branch target addresses for
  1027. * the subprog calls need to be fixed. All other instructions
  1028. * can left untouched.
  1029. *
  1030. * The JITed image length does not change because we already
  1031. * ensure that the JITed instruction sequence for these calls
  1032. * are of fixed length by padding them with NOPs.
  1033. */
  1034. if (insn[i].code == (BPF_JMP | BPF_CALL) &&
  1035. insn[i].src_reg == BPF_PSEUDO_CALL) {
  1036. ret = bpf_jit_get_func_addr(fp, &insn[i], true,
  1037. &func_addr,
  1038. &func_addr_fixed);
  1039. if (ret < 0)
  1040. return ret;
  1041. /*
  1042. * Save ctx->idx as this would currently point to the
  1043. * end of the JITed image and set it to the offset of
  1044. * the instruction sequence corresponding to the
  1045. * subprog call temporarily.
  1046. */
  1047. tmp_idx = ctx->idx;
  1048. ctx->idx = addrs[i] / 4;
  1049. bpf_jit_emit_func_call_rel(image, ctx, func_addr);
  1050. /*
  1051. * Restore ctx->idx here. This is safe as the length
  1052. * of the JITed sequence remains unchanged.
  1053. */
  1054. ctx->idx = tmp_idx;
  1055. }
  1056. }
  1057. return 0;
  1058. }
  1059. struct powerpc64_jit_data {
  1060. struct bpf_binary_header *header;
  1061. u32 *addrs;
  1062. u8 *image;
  1063. u32 proglen;
  1064. struct codegen_context ctx;
  1065. };
  1066. bool bpf_jit_needs_zext(void)
  1067. {
  1068. return true;
  1069. }
  1070. struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp)
  1071. {
  1072. u32 proglen;
  1073. u32 alloclen;
  1074. u8 *image = NULL;
  1075. u32 *code_base;
  1076. u32 *addrs;
  1077. struct powerpc64_jit_data *jit_data;
  1078. struct codegen_context cgctx;
  1079. int pass;
  1080. int flen;
  1081. struct bpf_binary_header *bpf_hdr;
  1082. struct bpf_prog *org_fp = fp;
  1083. struct bpf_prog *tmp_fp;
  1084. bool bpf_blinded = false;
  1085. bool extra_pass = false;
  1086. if (!fp->jit_requested)
  1087. return org_fp;
  1088. tmp_fp = bpf_jit_blind_constants(org_fp);
  1089. if (IS_ERR(tmp_fp))
  1090. return org_fp;
  1091. if (tmp_fp != org_fp) {
  1092. bpf_blinded = true;
  1093. fp = tmp_fp;
  1094. }
  1095. jit_data = fp->aux->jit_data;
  1096. if (!jit_data) {
  1097. jit_data = kzalloc(sizeof(*jit_data), GFP_KERNEL);
  1098. if (!jit_data) {
  1099. fp = org_fp;
  1100. goto out;
  1101. }
  1102. fp->aux->jit_data = jit_data;
  1103. }
  1104. flen = fp->len;
  1105. addrs = jit_data->addrs;
  1106. if (addrs) {
  1107. cgctx = jit_data->ctx;
  1108. image = jit_data->image;
  1109. bpf_hdr = jit_data->header;
  1110. proglen = jit_data->proglen;
  1111. alloclen = proglen + FUNCTION_DESCR_SIZE;
  1112. extra_pass = true;
  1113. goto skip_init_ctx;
  1114. }
  1115. addrs = kcalloc(flen + 1, sizeof(*addrs), GFP_KERNEL);
  1116. if (addrs == NULL) {
  1117. fp = org_fp;
  1118. goto out_addrs;
  1119. }
  1120. memset(&cgctx, 0, sizeof(struct codegen_context));
  1121. /* Make sure that the stack is quadword aligned. */
  1122. cgctx.stack_size = round_up(fp->aux->stack_depth, 16);
  1123. /* Scouting faux-generate pass 0 */
  1124. if (bpf_jit_build_body(fp, 0, &cgctx, addrs, false)) {
  1125. /* We hit something illegal or unsupported. */
  1126. fp = org_fp;
  1127. goto out_addrs;
  1128. }
  1129. /*
  1130. * If we have seen a tail call, we need a second pass.
  1131. * This is because bpf_jit_emit_common_epilogue() is called
  1132. * from bpf_jit_emit_tail_call() with a not yet stable ctx->seen.
  1133. */
  1134. if (cgctx.seen & SEEN_TAILCALL) {
  1135. cgctx.idx = 0;
  1136. if (bpf_jit_build_body(fp, 0, &cgctx, addrs, false)) {
  1137. fp = org_fp;
  1138. goto out_addrs;
  1139. }
  1140. }
  1141. /*
  1142. * Pretend to build prologue, given the features we've seen. This will
  1143. * update ctgtx.idx as it pretends to output instructions, then we can
  1144. * calculate total size from idx.
  1145. */
  1146. bpf_jit_build_prologue(0, &cgctx);
  1147. bpf_jit_build_epilogue(0, &cgctx);
  1148. proglen = cgctx.idx * 4;
  1149. alloclen = proglen + FUNCTION_DESCR_SIZE;
  1150. bpf_hdr = bpf_jit_binary_alloc(alloclen, &image, 4,
  1151. bpf_jit_fill_ill_insns);
  1152. if (!bpf_hdr) {
  1153. fp = org_fp;
  1154. goto out_addrs;
  1155. }
  1156. skip_init_ctx:
  1157. code_base = (u32 *)(image + FUNCTION_DESCR_SIZE);
  1158. if (extra_pass) {
  1159. /*
  1160. * Do not touch the prologue and epilogue as they will remain
  1161. * unchanged. Only fix the branch target address for subprog
  1162. * calls in the body.
  1163. *
  1164. * This does not change the offsets and lengths of the subprog
  1165. * call instruction sequences and hence, the size of the JITed
  1166. * image as well.
  1167. */
  1168. bpf_jit_fixup_subprog_calls(fp, code_base, &cgctx, addrs);
  1169. /* There is no need to perform the usual passes. */
  1170. goto skip_codegen_passes;
  1171. }
  1172. /* Code generation passes 1-2 */
  1173. for (pass = 1; pass < 3; pass++) {
  1174. /* Now build the prologue, body code & epilogue for real. */
  1175. cgctx.idx = 0;
  1176. bpf_jit_build_prologue(code_base, &cgctx);
  1177. bpf_jit_build_body(fp, code_base, &cgctx, addrs, extra_pass);
  1178. bpf_jit_build_epilogue(code_base, &cgctx);
  1179. if (bpf_jit_enable > 1)
  1180. pr_info("Pass %d: shrink = %d, seen = 0x%x\n", pass,
  1181. proglen - (cgctx.idx * 4), cgctx.seen);
  1182. }
  1183. skip_codegen_passes:
  1184. if (bpf_jit_enable > 1)
  1185. /*
  1186. * Note that we output the base address of the code_base
  1187. * rather than image, since opcodes are in code_base.
  1188. */
  1189. bpf_jit_dump(flen, proglen, pass, code_base);
  1190. #ifdef PPC64_ELF_ABI_v1
  1191. /* Function descriptor nastiness: Address + TOC */
  1192. ((u64 *)image)[0] = (u64)code_base;
  1193. ((u64 *)image)[1] = local_paca->kernel_toc;
  1194. #endif
  1195. fp->bpf_func = (void *)image;
  1196. fp->jited = 1;
  1197. fp->jited_len = alloclen;
  1198. bpf_flush_icache(bpf_hdr, (u8 *)bpf_hdr + (bpf_hdr->pages * PAGE_SIZE));
  1199. if (!fp->is_func || extra_pass) {
  1200. bpf_prog_fill_jited_linfo(fp, addrs);
  1201. out_addrs:
  1202. kfree(addrs);
  1203. kfree(jit_data);
  1204. fp->aux->jit_data = NULL;
  1205. } else {
  1206. jit_data->addrs = addrs;
  1207. jit_data->ctx = cgctx;
  1208. jit_data->proglen = proglen;
  1209. jit_data->image = image;
  1210. jit_data->header = bpf_hdr;
  1211. }
  1212. out:
  1213. if (bpf_blinded)
  1214. bpf_jit_prog_release_other(fp, fp == org_fp ? tmp_fp : org_fp);
  1215. return fp;
  1216. }
  1217. /* Overriding bpf_jit_free() as we don't set images read-only. */
  1218. void bpf_jit_free(struct bpf_prog *fp)
  1219. {
  1220. unsigned long addr = (unsigned long)fp->bpf_func & PAGE_MASK;
  1221. struct bpf_binary_header *bpf_hdr = (void *)addr;
  1222. if (fp->jited)
  1223. bpf_jit_binary_free(bpf_hdr);
  1224. bpf_prog_unlock_free(fp);
  1225. }