bpf_jit.c 33 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299
  1. /*
  2. * Just-In-Time compiler for BPF filters on MIPS
  3. *
  4. * Copyright (c) 2014 Imagination Technologies Ltd.
  5. * Author: Markos Chandras <markos.chandras@imgtec.com>
  6. *
  7. * This program is free software; you can redistribute it and/or modify it
  8. * under the terms of the GNU General Public License as published by the
  9. * Free Software Foundation; version 2 of the License.
  10. */
  11. #include <linux/bitops.h>
  12. #include <linux/compiler.h>
  13. #include <linux/errno.h>
  14. #include <linux/filter.h>
  15. #include <linux/if_vlan.h>
  16. #include <linux/moduleloader.h>
  17. #include <linux/netdevice.h>
  18. #include <linux/string.h>
  19. #include <linux/slab.h>
  20. #include <linux/types.h>
  21. #include <asm/asm.h>
  22. #include <asm/bitops.h>
  23. #include <asm/cacheflush.h>
  24. #include <asm/cpu-features.h>
  25. #include <asm/uasm.h>
  26. #include "bpf_jit.h"
  27. /* ABI
  28. * r_skb_hl SKB header length
  29. * r_data SKB data pointer
  30. * r_off Offset
  31. * r_A BPF register A
  32. * r_X BPF register X
  33. * r_skb *skb
  34. * r_M *scratch memory
  35. * r_skb_len SKB length
  36. *
  37. * On entry (*bpf_func)(*skb, *filter)
  38. * a0 = MIPS_R_A0 = skb;
  39. * a1 = MIPS_R_A1 = filter;
  40. *
  41. * Stack
  42. * ...
  43. * M[15]
  44. * M[14]
  45. * M[13]
  46. * ...
  47. * M[0] <-- r_M
  48. * saved reg k-1
  49. * saved reg k-2
  50. * ...
  51. * saved reg 0 <-- r_sp
  52. * <no argument area>
  53. *
  54. * Packet layout
  55. *
  56. * <--------------------- len ------------------------>
  57. * <--skb-len(r_skb_hl)-->< ----- skb->data_len ------>
  58. * ----------------------------------------------------
  59. * | skb->data |
  60. * ----------------------------------------------------
  61. */
  62. #define ptr typeof(unsigned long)
  63. #define SCRATCH_OFF(k) (4 * (k))
  64. /* JIT flags */
  65. #define SEEN_CALL (1 << BPF_MEMWORDS)
  66. #define SEEN_SREG_SFT (BPF_MEMWORDS + 1)
  67. #define SEEN_SREG_BASE (1 << SEEN_SREG_SFT)
  68. #define SEEN_SREG(x) (SEEN_SREG_BASE << (x))
  69. #define SEEN_OFF SEEN_SREG(2)
  70. #define SEEN_A SEEN_SREG(3)
  71. #define SEEN_X SEEN_SREG(4)
  72. #define SEEN_SKB SEEN_SREG(5)
  73. #define SEEN_MEM SEEN_SREG(6)
  74. /* SEEN_SK_DATA also implies skb_hl an skb_len */
  75. #define SEEN_SKB_DATA (SEEN_SREG(7) | SEEN_SREG(1) | SEEN_SREG(0))
  76. /* Arguments used by JIT */
  77. #define ARGS_USED_BY_JIT 2 /* only applicable to 64-bit */
  78. #define SBIT(x) (1 << (x)) /* Signed version of BIT() */
  79. /**
  80. * struct jit_ctx - JIT context
  81. * @skf: The sk_filter
  82. * @prologue_bytes: Number of bytes for prologue
  83. * @idx: Instruction index
  84. * @flags: JIT flags
  85. * @offsets: Instruction offsets
  86. * @target: Memory location for the compiled filter
  87. */
  88. struct jit_ctx {
  89. const struct bpf_prog *skf;
  90. unsigned int prologue_bytes;
  91. u32 idx;
  92. u32 flags;
  93. u32 *offsets;
  94. u32 *target;
  95. };
  96. static inline int optimize_div(u32 *k)
  97. {
  98. /* power of 2 divides can be implemented with right shift */
  99. if (!(*k & (*k-1))) {
  100. *k = ilog2(*k);
  101. return 1;
  102. }
  103. return 0;
  104. }
  105. static inline void emit_jit_reg_move(ptr dst, ptr src, struct jit_ctx *ctx);
  106. /* Simply emit the instruction if the JIT memory space has been allocated */
  107. #define emit_instr(ctx, func, ...) \
  108. do { \
  109. if ((ctx)->target != NULL) { \
  110. u32 *p = &(ctx)->target[ctx->idx]; \
  111. uasm_i_##func(&p, ##__VA_ARGS__); \
  112. } \
  113. (ctx)->idx++; \
  114. } while (0)
  115. /*
  116. * Similar to emit_instr but it must be used when we need to emit
  117. * 32-bit or 64-bit instructions
  118. */
  119. #define emit_long_instr(ctx, func, ...) \
  120. do { \
  121. if ((ctx)->target != NULL) { \
  122. u32 *p = &(ctx)->target[ctx->idx]; \
  123. UASM_i_##func(&p, ##__VA_ARGS__); \
  124. } \
  125. (ctx)->idx++; \
  126. } while (0)
  127. /* Determine if immediate is within the 16-bit signed range */
  128. static inline bool is_range16(s32 imm)
  129. {
  130. return !(imm >= SBIT(15) || imm < -SBIT(15));
  131. }
  132. static inline void emit_addu(unsigned int dst, unsigned int src1,
  133. unsigned int src2, struct jit_ctx *ctx)
  134. {
  135. emit_instr(ctx, addu, dst, src1, src2);
  136. }
  137. static inline void emit_nop(struct jit_ctx *ctx)
  138. {
  139. emit_instr(ctx, nop);
  140. }
  141. /* Load a u32 immediate to a register */
  142. static inline void emit_load_imm(unsigned int dst, u32 imm, struct jit_ctx *ctx)
  143. {
  144. if (ctx->target != NULL) {
  145. /* addiu can only handle s16 */
  146. if (!is_range16(imm)) {
  147. u32 *p = &ctx->target[ctx->idx];
  148. uasm_i_lui(&p, r_tmp_imm, (s32)imm >> 16);
  149. p = &ctx->target[ctx->idx + 1];
  150. uasm_i_ori(&p, dst, r_tmp_imm, imm & 0xffff);
  151. } else {
  152. u32 *p = &ctx->target[ctx->idx];
  153. uasm_i_addiu(&p, dst, r_zero, imm);
  154. }
  155. }
  156. ctx->idx++;
  157. if (!is_range16(imm))
  158. ctx->idx++;
  159. }
  160. static inline void emit_or(unsigned int dst, unsigned int src1,
  161. unsigned int src2, struct jit_ctx *ctx)
  162. {
  163. emit_instr(ctx, or, dst, src1, src2);
  164. }
  165. static inline void emit_ori(unsigned int dst, unsigned src, u32 imm,
  166. struct jit_ctx *ctx)
  167. {
  168. if (imm >= BIT(16)) {
  169. emit_load_imm(r_tmp, imm, ctx);
  170. emit_or(dst, src, r_tmp, ctx);
  171. } else {
  172. emit_instr(ctx, ori, dst, src, imm);
  173. }
  174. }
  175. static inline void emit_daddiu(unsigned int dst, unsigned int src,
  176. int imm, struct jit_ctx *ctx)
  177. {
  178. /*
  179. * Only used for stack, so the imm is relatively small
  180. * and it fits in 15-bits
  181. */
  182. emit_instr(ctx, daddiu, dst, src, imm);
  183. }
  184. static inline void emit_addiu(unsigned int dst, unsigned int src,
  185. u32 imm, struct jit_ctx *ctx)
  186. {
  187. if (!is_range16(imm)) {
  188. emit_load_imm(r_tmp, imm, ctx);
  189. emit_addu(dst, r_tmp, src, ctx);
  190. } else {
  191. emit_instr(ctx, addiu, dst, src, imm);
  192. }
  193. }
  194. static inline void emit_and(unsigned int dst, unsigned int src1,
  195. unsigned int src2, struct jit_ctx *ctx)
  196. {
  197. emit_instr(ctx, and, dst, src1, src2);
  198. }
  199. static inline void emit_andi(unsigned int dst, unsigned int src,
  200. u32 imm, struct jit_ctx *ctx)
  201. {
  202. /* If imm does not fit in u16 then load it to register */
  203. if (imm >= BIT(16)) {
  204. emit_load_imm(r_tmp, imm, ctx);
  205. emit_and(dst, src, r_tmp, ctx);
  206. } else {
  207. emit_instr(ctx, andi, dst, src, imm);
  208. }
  209. }
  210. static inline void emit_xor(unsigned int dst, unsigned int src1,
  211. unsigned int src2, struct jit_ctx *ctx)
  212. {
  213. emit_instr(ctx, xor, dst, src1, src2);
  214. }
  215. static inline void emit_xori(ptr dst, ptr src, u32 imm, struct jit_ctx *ctx)
  216. {
  217. /* If imm does not fit in u16 then load it to register */
  218. if (imm >= BIT(16)) {
  219. emit_load_imm(r_tmp, imm, ctx);
  220. emit_xor(dst, src, r_tmp, ctx);
  221. } else {
  222. emit_instr(ctx, xori, dst, src, imm);
  223. }
  224. }
  225. static inline void emit_stack_offset(int offset, struct jit_ctx *ctx)
  226. {
  227. emit_long_instr(ctx, ADDIU, r_sp, r_sp, offset);
  228. }
  229. static inline void emit_subu(unsigned int dst, unsigned int src1,
  230. unsigned int src2, struct jit_ctx *ctx)
  231. {
  232. emit_instr(ctx, subu, dst, src1, src2);
  233. }
  234. static inline void emit_neg(unsigned int reg, struct jit_ctx *ctx)
  235. {
  236. emit_subu(reg, r_zero, reg, ctx);
  237. }
  238. static inline void emit_sllv(unsigned int dst, unsigned int src,
  239. unsigned int sa, struct jit_ctx *ctx)
  240. {
  241. emit_instr(ctx, sllv, dst, src, sa);
  242. }
  243. static inline void emit_sll(unsigned int dst, unsigned int src,
  244. unsigned int sa, struct jit_ctx *ctx)
  245. {
  246. /* sa is 5-bits long */
  247. if (sa >= BIT(5))
  248. /* Shifting >= 32 results in zero */
  249. emit_jit_reg_move(dst, r_zero, ctx);
  250. else
  251. emit_instr(ctx, sll, dst, src, sa);
  252. }
  253. static inline void emit_srlv(unsigned int dst, unsigned int src,
  254. unsigned int sa, struct jit_ctx *ctx)
  255. {
  256. emit_instr(ctx, srlv, dst, src, sa);
  257. }
  258. static inline void emit_srl(unsigned int dst, unsigned int src,
  259. unsigned int sa, struct jit_ctx *ctx)
  260. {
  261. /* sa is 5-bits long */
  262. if (sa >= BIT(5))
  263. /* Shifting >= 32 results in zero */
  264. emit_jit_reg_move(dst, r_zero, ctx);
  265. else
  266. emit_instr(ctx, srl, dst, src, sa);
  267. }
  268. static inline void emit_slt(unsigned int dst, unsigned int src1,
  269. unsigned int src2, struct jit_ctx *ctx)
  270. {
  271. emit_instr(ctx, slt, dst, src1, src2);
  272. }
  273. static inline void emit_sltu(unsigned int dst, unsigned int src1,
  274. unsigned int src2, struct jit_ctx *ctx)
  275. {
  276. emit_instr(ctx, sltu, dst, src1, src2);
  277. }
  278. static inline void emit_sltiu(unsigned dst, unsigned int src,
  279. unsigned int imm, struct jit_ctx *ctx)
  280. {
  281. /* 16 bit immediate */
  282. if (!is_range16((s32)imm)) {
  283. emit_load_imm(r_tmp, imm, ctx);
  284. emit_sltu(dst, src, r_tmp, ctx);
  285. } else {
  286. emit_instr(ctx, sltiu, dst, src, imm);
  287. }
  288. }
  289. /* Store register on the stack */
  290. static inline void emit_store_stack_reg(ptr reg, ptr base,
  291. unsigned int offset,
  292. struct jit_ctx *ctx)
  293. {
  294. emit_long_instr(ctx, SW, reg, offset, base);
  295. }
  296. static inline void emit_store(ptr reg, ptr base, unsigned int offset,
  297. struct jit_ctx *ctx)
  298. {
  299. emit_instr(ctx, sw, reg, offset, base);
  300. }
  301. static inline void emit_load_stack_reg(ptr reg, ptr base,
  302. unsigned int offset,
  303. struct jit_ctx *ctx)
  304. {
  305. emit_long_instr(ctx, LW, reg, offset, base);
  306. }
  307. static inline void emit_load(unsigned int reg, unsigned int base,
  308. unsigned int offset, struct jit_ctx *ctx)
  309. {
  310. emit_instr(ctx, lw, reg, offset, base);
  311. }
  312. static inline void emit_load_byte(unsigned int reg, unsigned int base,
  313. unsigned int offset, struct jit_ctx *ctx)
  314. {
  315. emit_instr(ctx, lb, reg, offset, base);
  316. }
  317. static inline void emit_half_load(unsigned int reg, unsigned int base,
  318. unsigned int offset, struct jit_ctx *ctx)
  319. {
  320. emit_instr(ctx, lh, reg, offset, base);
  321. }
  322. static inline void emit_half_load_unsigned(unsigned int reg, unsigned int base,
  323. unsigned int offset, struct jit_ctx *ctx)
  324. {
  325. emit_instr(ctx, lhu, reg, offset, base);
  326. }
  327. static inline void emit_mul(unsigned int dst, unsigned int src1,
  328. unsigned int src2, struct jit_ctx *ctx)
  329. {
  330. emit_instr(ctx, mul, dst, src1, src2);
  331. }
  332. static inline void emit_div(unsigned int dst, unsigned int src,
  333. struct jit_ctx *ctx)
  334. {
  335. if (ctx->target != NULL) {
  336. u32 *p = &ctx->target[ctx->idx];
  337. uasm_i_divu(&p, dst, src);
  338. p = &ctx->target[ctx->idx + 1];
  339. uasm_i_mflo(&p, dst);
  340. }
  341. ctx->idx += 2; /* 2 insts */
  342. }
  343. static inline void emit_mod(unsigned int dst, unsigned int src,
  344. struct jit_ctx *ctx)
  345. {
  346. if (ctx->target != NULL) {
  347. u32 *p = &ctx->target[ctx->idx];
  348. uasm_i_divu(&p, dst, src);
  349. p = &ctx->target[ctx->idx + 1];
  350. uasm_i_mfhi(&p, dst);
  351. }
  352. ctx->idx += 2; /* 2 insts */
  353. }
  354. static inline void emit_dsll(unsigned int dst, unsigned int src,
  355. unsigned int sa, struct jit_ctx *ctx)
  356. {
  357. emit_instr(ctx, dsll, dst, src, sa);
  358. }
  359. static inline void emit_dsrl32(unsigned int dst, unsigned int src,
  360. unsigned int sa, struct jit_ctx *ctx)
  361. {
  362. emit_instr(ctx, dsrl32, dst, src, sa);
  363. }
  364. static inline void emit_wsbh(unsigned int dst, unsigned int src,
  365. struct jit_ctx *ctx)
  366. {
  367. emit_instr(ctx, wsbh, dst, src);
  368. }
  369. /* load pointer to register */
  370. static inline void emit_load_ptr(unsigned int dst, unsigned int src,
  371. int imm, struct jit_ctx *ctx)
  372. {
  373. /* src contains the base addr of the 32/64-pointer */
  374. emit_long_instr(ctx, LW, dst, imm, src);
  375. }
  376. /* load a function pointer to register */
  377. static inline void emit_load_func(unsigned int reg, ptr imm,
  378. struct jit_ctx *ctx)
  379. {
  380. if (IS_ENABLED(CONFIG_64BIT)) {
  381. /* At this point imm is always 64-bit */
  382. emit_load_imm(r_tmp, (u64)imm >> 32, ctx);
  383. emit_dsll(r_tmp_imm, r_tmp, 16, ctx); /* left shift by 16 */
  384. emit_ori(r_tmp, r_tmp_imm, (imm >> 16) & 0xffff, ctx);
  385. emit_dsll(r_tmp_imm, r_tmp, 16, ctx); /* left shift by 16 */
  386. emit_ori(reg, r_tmp_imm, imm & 0xffff, ctx);
  387. } else {
  388. emit_load_imm(reg, imm, ctx);
  389. }
  390. }
  391. /* Move to real MIPS register */
  392. static inline void emit_reg_move(ptr dst, ptr src, struct jit_ctx *ctx)
  393. {
  394. emit_long_instr(ctx, ADDU, dst, src, r_zero);
  395. }
  396. /* Move to JIT (32-bit) register */
  397. static inline void emit_jit_reg_move(ptr dst, ptr src, struct jit_ctx *ctx)
  398. {
  399. emit_addu(dst, src, r_zero, ctx);
  400. }
  401. /* Compute the immediate value for PC-relative branches. */
  402. static inline u32 b_imm(unsigned int tgt, struct jit_ctx *ctx)
  403. {
  404. if (ctx->target == NULL)
  405. return 0;
  406. /*
  407. * We want a pc-relative branch. We only do forward branches
  408. * so tgt is always after pc. tgt is the instruction offset
  409. * we want to jump to.
  410. * Branch on MIPS:
  411. * I: target_offset <- sign_extend(offset)
  412. * I+1: PC += target_offset (delay slot)
  413. *
  414. * ctx->idx currently points to the branch instruction
  415. * but the offset is added to the delay slot so we need
  416. * to subtract 4.
  417. */
  418. return ctx->offsets[tgt] -
  419. (ctx->idx * 4 - ctx->prologue_bytes) - 4;
  420. }
  421. static inline void emit_bcond(int cond, unsigned int reg1, unsigned int reg2,
  422. unsigned int imm, struct jit_ctx *ctx)
  423. {
  424. if (ctx->target != NULL) {
  425. u32 *p = &ctx->target[ctx->idx];
  426. switch (cond) {
  427. case MIPS_COND_EQ:
  428. uasm_i_beq(&p, reg1, reg2, imm);
  429. break;
  430. case MIPS_COND_NE:
  431. uasm_i_bne(&p, reg1, reg2, imm);
  432. break;
  433. case MIPS_COND_ALL:
  434. uasm_i_b(&p, imm);
  435. break;
  436. default:
  437. pr_warn("%s: Unhandled branch conditional: %d\n",
  438. __func__, cond);
  439. }
  440. }
  441. ctx->idx++;
  442. }
  443. static inline void emit_b(unsigned int imm, struct jit_ctx *ctx)
  444. {
  445. emit_bcond(MIPS_COND_ALL, r_zero, r_zero, imm, ctx);
  446. }
  447. static inline void emit_jalr(unsigned int link, unsigned int reg,
  448. struct jit_ctx *ctx)
  449. {
  450. emit_instr(ctx, jalr, link, reg);
  451. }
  452. static inline void emit_jr(unsigned int reg, struct jit_ctx *ctx)
  453. {
  454. emit_instr(ctx, jr, reg);
  455. }
  456. static inline u16 align_sp(unsigned int num)
  457. {
  458. /* Double word alignment for 32-bit, quadword for 64-bit */
  459. unsigned int align = IS_ENABLED(CONFIG_64BIT) ? 16 : 8;
  460. num = (num + (align - 1)) & -align;
  461. return num;
  462. }
  463. static void save_bpf_jit_regs(struct jit_ctx *ctx, unsigned offset)
  464. {
  465. int i = 0, real_off = 0;
  466. u32 sflags, tmp_flags;
  467. /* Adjust the stack pointer */
  468. if (offset)
  469. emit_stack_offset(-align_sp(offset), ctx);
  470. tmp_flags = sflags = ctx->flags >> SEEN_SREG_SFT;
  471. /* sflags is essentially a bitmap */
  472. while (tmp_flags) {
  473. if ((sflags >> i) & 0x1) {
  474. emit_store_stack_reg(MIPS_R_S0 + i, r_sp, real_off,
  475. ctx);
  476. real_off += SZREG;
  477. }
  478. i++;
  479. tmp_flags >>= 1;
  480. }
  481. /* save return address */
  482. if (ctx->flags & SEEN_CALL) {
  483. emit_store_stack_reg(r_ra, r_sp, real_off, ctx);
  484. real_off += SZREG;
  485. }
  486. /* Setup r_M leaving the alignment gap if necessary */
  487. if (ctx->flags & SEEN_MEM) {
  488. if (real_off % (SZREG * 2))
  489. real_off += SZREG;
  490. emit_long_instr(ctx, ADDIU, r_M, r_sp, real_off);
  491. }
  492. }
  493. static void restore_bpf_jit_regs(struct jit_ctx *ctx,
  494. unsigned int offset)
  495. {
  496. int i, real_off = 0;
  497. u32 sflags, tmp_flags;
  498. tmp_flags = sflags = ctx->flags >> SEEN_SREG_SFT;
  499. /* sflags is a bitmap */
  500. i = 0;
  501. while (tmp_flags) {
  502. if ((sflags >> i) & 0x1) {
  503. emit_load_stack_reg(MIPS_R_S0 + i, r_sp, real_off,
  504. ctx);
  505. real_off += SZREG;
  506. }
  507. i++;
  508. tmp_flags >>= 1;
  509. }
  510. /* restore return address */
  511. if (ctx->flags & SEEN_CALL)
  512. emit_load_stack_reg(r_ra, r_sp, real_off, ctx);
  513. /* Restore the sp and discard the scrach memory */
  514. if (offset)
  515. emit_stack_offset(align_sp(offset), ctx);
  516. }
  517. static unsigned int get_stack_depth(struct jit_ctx *ctx)
  518. {
  519. int sp_off = 0;
  520. /* How may s* regs do we need to preserved? */
  521. sp_off += hweight32(ctx->flags >> SEEN_SREG_SFT) * SZREG;
  522. if (ctx->flags & SEEN_MEM)
  523. sp_off += 4 * BPF_MEMWORDS; /* BPF_MEMWORDS are 32-bit */
  524. if (ctx->flags & SEEN_CALL)
  525. sp_off += SZREG; /* Space for our ra register */
  526. return sp_off;
  527. }
  528. static void build_prologue(struct jit_ctx *ctx)
  529. {
  530. int sp_off;
  531. /* Calculate the total offset for the stack pointer */
  532. sp_off = get_stack_depth(ctx);
  533. save_bpf_jit_regs(ctx, sp_off);
  534. if (ctx->flags & SEEN_SKB)
  535. emit_reg_move(r_skb, MIPS_R_A0, ctx);
  536. if (ctx->flags & SEEN_SKB_DATA) {
  537. /* Load packet length */
  538. emit_load(r_skb_len, r_skb, offsetof(struct sk_buff, len),
  539. ctx);
  540. emit_load(r_tmp, r_skb, offsetof(struct sk_buff, data_len),
  541. ctx);
  542. /* Load the data pointer */
  543. emit_load_ptr(r_skb_data, r_skb,
  544. offsetof(struct sk_buff, data), ctx);
  545. /* Load the header length */
  546. emit_subu(r_skb_hl, r_skb_len, r_tmp, ctx);
  547. }
  548. if (ctx->flags & SEEN_X)
  549. emit_jit_reg_move(r_X, r_zero, ctx);
  550. /*
  551. * Do not leak kernel data to userspace, we only need to clear
  552. * r_A if it is ever used. In fact if it is never used, we
  553. * will not save/restore it, so clearing it in this case would
  554. * corrupt the state of the caller.
  555. */
  556. if (bpf_needs_clear_a(&ctx->skf->insns[0]) &&
  557. (ctx->flags & SEEN_A))
  558. emit_jit_reg_move(r_A, r_zero, ctx);
  559. }
  560. static void build_epilogue(struct jit_ctx *ctx)
  561. {
  562. unsigned int sp_off;
  563. /* Calculate the total offset for the stack pointer */
  564. sp_off = get_stack_depth(ctx);
  565. restore_bpf_jit_regs(ctx, sp_off);
  566. /* Return */
  567. emit_jr(r_ra, ctx);
  568. emit_nop(ctx);
  569. }
  570. #define CHOOSE_LOAD_FUNC(K, func) \
  571. ((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative : func) : \
  572. func##_positive)
  573. static bool is_bad_offset(int b_off)
  574. {
  575. return b_off > 0x1ffff || b_off < -0x20000;
  576. }
  577. static int build_body(struct jit_ctx *ctx)
  578. {
  579. const struct bpf_prog *prog = ctx->skf;
  580. const struct sock_filter *inst;
  581. unsigned int i, off, condt;
  582. u32 k, b_off __maybe_unused;
  583. u8 (*sk_load_func)(unsigned long *skb, int offset);
  584. for (i = 0; i < prog->len; i++) {
  585. u16 code;
  586. inst = &(prog->insns[i]);
  587. pr_debug("%s: code->0x%02x, jt->0x%x, jf->0x%x, k->0x%x\n",
  588. __func__, inst->code, inst->jt, inst->jf, inst->k);
  589. k = inst->k;
  590. code = bpf_anc_helper(inst);
  591. if (ctx->target == NULL)
  592. ctx->offsets[i] = ctx->idx * 4;
  593. switch (code) {
  594. case BPF_LD | BPF_IMM:
  595. /* A <- k ==> li r_A, k */
  596. ctx->flags |= SEEN_A;
  597. emit_load_imm(r_A, k, ctx);
  598. break;
  599. case BPF_LD | BPF_W | BPF_LEN:
  600. BUILD_BUG_ON(sizeof_field(struct sk_buff, len) != 4);
  601. /* A <- len ==> lw r_A, offset(skb) */
  602. ctx->flags |= SEEN_SKB | SEEN_A;
  603. off = offsetof(struct sk_buff, len);
  604. emit_load(r_A, r_skb, off, ctx);
  605. break;
  606. case BPF_LD | BPF_MEM:
  607. /* A <- M[k] ==> lw r_A, offset(M) */
  608. ctx->flags |= SEEN_MEM | SEEN_A;
  609. emit_load(r_A, r_M, SCRATCH_OFF(k), ctx);
  610. break;
  611. case BPF_LD | BPF_W | BPF_ABS:
  612. /* A <- P[k:4] */
  613. sk_load_func = CHOOSE_LOAD_FUNC(k, sk_load_word);
  614. goto load;
  615. case BPF_LD | BPF_H | BPF_ABS:
  616. /* A <- P[k:2] */
  617. sk_load_func = CHOOSE_LOAD_FUNC(k, sk_load_half);
  618. goto load;
  619. case BPF_LD | BPF_B | BPF_ABS:
  620. /* A <- P[k:1] */
  621. sk_load_func = CHOOSE_LOAD_FUNC(k, sk_load_byte);
  622. load:
  623. emit_load_imm(r_off, k, ctx);
  624. load_common:
  625. ctx->flags |= SEEN_CALL | SEEN_OFF |
  626. SEEN_SKB | SEEN_A | SEEN_SKB_DATA;
  627. emit_load_func(r_s0, (ptr)sk_load_func, ctx);
  628. emit_reg_move(MIPS_R_A0, r_skb, ctx);
  629. emit_jalr(MIPS_R_RA, r_s0, ctx);
  630. /* Load second argument to delay slot */
  631. emit_reg_move(MIPS_R_A1, r_off, ctx);
  632. /* Check the error value */
  633. emit_bcond(MIPS_COND_EQ, r_ret, 0, b_imm(i + 1, ctx),
  634. ctx);
  635. /* Load return register on DS for failures */
  636. emit_reg_move(r_ret, r_zero, ctx);
  637. /* Return with error */
  638. b_off = b_imm(prog->len, ctx);
  639. if (is_bad_offset(b_off))
  640. return -E2BIG;
  641. emit_b(b_off, ctx);
  642. emit_nop(ctx);
  643. break;
  644. case BPF_LD | BPF_W | BPF_IND:
  645. /* A <- P[X + k:4] */
  646. sk_load_func = sk_load_word;
  647. goto load_ind;
  648. case BPF_LD | BPF_H | BPF_IND:
  649. /* A <- P[X + k:2] */
  650. sk_load_func = sk_load_half;
  651. goto load_ind;
  652. case BPF_LD | BPF_B | BPF_IND:
  653. /* A <- P[X + k:1] */
  654. sk_load_func = sk_load_byte;
  655. load_ind:
  656. ctx->flags |= SEEN_OFF | SEEN_X;
  657. emit_addiu(r_off, r_X, k, ctx);
  658. goto load_common;
  659. case BPF_LDX | BPF_IMM:
  660. /* X <- k */
  661. ctx->flags |= SEEN_X;
  662. emit_load_imm(r_X, k, ctx);
  663. break;
  664. case BPF_LDX | BPF_MEM:
  665. /* X <- M[k] */
  666. ctx->flags |= SEEN_X | SEEN_MEM;
  667. emit_load(r_X, r_M, SCRATCH_OFF(k), ctx);
  668. break;
  669. case BPF_LDX | BPF_W | BPF_LEN:
  670. /* X <- len */
  671. ctx->flags |= SEEN_X | SEEN_SKB;
  672. off = offsetof(struct sk_buff, len);
  673. emit_load(r_X, r_skb, off, ctx);
  674. break;
  675. case BPF_LDX | BPF_B | BPF_MSH:
  676. /* X <- 4 * (P[k:1] & 0xf) */
  677. ctx->flags |= SEEN_X | SEEN_CALL | SEEN_SKB;
  678. /* Load offset to a1 */
  679. emit_load_func(r_s0, (ptr)sk_load_byte, ctx);
  680. /*
  681. * This may emit two instructions so it may not fit
  682. * in the delay slot. So use a0 in the delay slot.
  683. */
  684. emit_load_imm(MIPS_R_A1, k, ctx);
  685. emit_jalr(MIPS_R_RA, r_s0, ctx);
  686. emit_reg_move(MIPS_R_A0, r_skb, ctx); /* delay slot */
  687. /* Check the error value */
  688. b_off = b_imm(prog->len, ctx);
  689. if (is_bad_offset(b_off))
  690. return -E2BIG;
  691. emit_bcond(MIPS_COND_NE, r_ret, 0, b_off, ctx);
  692. emit_reg_move(r_ret, r_zero, ctx);
  693. /* We are good */
  694. /* X <- P[1:K] & 0xf */
  695. emit_andi(r_X, r_A, 0xf, ctx);
  696. /* X << 2 */
  697. emit_b(b_imm(i + 1, ctx), ctx);
  698. emit_sll(r_X, r_X, 2, ctx); /* delay slot */
  699. break;
  700. case BPF_ST:
  701. /* M[k] <- A */
  702. ctx->flags |= SEEN_MEM | SEEN_A;
  703. emit_store(r_A, r_M, SCRATCH_OFF(k), ctx);
  704. break;
  705. case BPF_STX:
  706. /* M[k] <- X */
  707. ctx->flags |= SEEN_MEM | SEEN_X;
  708. emit_store(r_X, r_M, SCRATCH_OFF(k), ctx);
  709. break;
  710. case BPF_ALU | BPF_ADD | BPF_K:
  711. /* A += K */
  712. ctx->flags |= SEEN_A;
  713. emit_addiu(r_A, r_A, k, ctx);
  714. break;
  715. case BPF_ALU | BPF_ADD | BPF_X:
  716. /* A += X */
  717. ctx->flags |= SEEN_A | SEEN_X;
  718. emit_addu(r_A, r_A, r_X, ctx);
  719. break;
  720. case BPF_ALU | BPF_SUB | BPF_K:
  721. /* A -= K */
  722. ctx->flags |= SEEN_A;
  723. emit_addiu(r_A, r_A, -k, ctx);
  724. break;
  725. case BPF_ALU | BPF_SUB | BPF_X:
  726. /* A -= X */
  727. ctx->flags |= SEEN_A | SEEN_X;
  728. emit_subu(r_A, r_A, r_X, ctx);
  729. break;
  730. case BPF_ALU | BPF_MUL | BPF_K:
  731. /* A *= K */
  732. /* Load K to scratch register before MUL */
  733. ctx->flags |= SEEN_A;
  734. emit_load_imm(r_s0, k, ctx);
  735. emit_mul(r_A, r_A, r_s0, ctx);
  736. break;
  737. case BPF_ALU | BPF_MUL | BPF_X:
  738. /* A *= X */
  739. ctx->flags |= SEEN_A | SEEN_X;
  740. emit_mul(r_A, r_A, r_X, ctx);
  741. break;
  742. case BPF_ALU | BPF_DIV | BPF_K:
  743. /* A /= k */
  744. if (k == 1)
  745. break;
  746. if (optimize_div(&k)) {
  747. ctx->flags |= SEEN_A;
  748. emit_srl(r_A, r_A, k, ctx);
  749. break;
  750. }
  751. ctx->flags |= SEEN_A;
  752. emit_load_imm(r_s0, k, ctx);
  753. emit_div(r_A, r_s0, ctx);
  754. break;
  755. case BPF_ALU | BPF_MOD | BPF_K:
  756. /* A %= k */
  757. if (k == 1) {
  758. ctx->flags |= SEEN_A;
  759. emit_jit_reg_move(r_A, r_zero, ctx);
  760. } else {
  761. ctx->flags |= SEEN_A;
  762. emit_load_imm(r_s0, k, ctx);
  763. emit_mod(r_A, r_s0, ctx);
  764. }
  765. break;
  766. case BPF_ALU | BPF_DIV | BPF_X:
  767. /* A /= X */
  768. ctx->flags |= SEEN_X | SEEN_A;
  769. /* Check if r_X is zero */
  770. b_off = b_imm(prog->len, ctx);
  771. if (is_bad_offset(b_off))
  772. return -E2BIG;
  773. emit_bcond(MIPS_COND_EQ, r_X, r_zero, b_off, ctx);
  774. emit_load_imm(r_ret, 0, ctx); /* delay slot */
  775. emit_div(r_A, r_X, ctx);
  776. break;
  777. case BPF_ALU | BPF_MOD | BPF_X:
  778. /* A %= X */
  779. ctx->flags |= SEEN_X | SEEN_A;
  780. /* Check if r_X is zero */
  781. b_off = b_imm(prog->len, ctx);
  782. if (is_bad_offset(b_off))
  783. return -E2BIG;
  784. emit_bcond(MIPS_COND_EQ, r_X, r_zero, b_off, ctx);
  785. emit_load_imm(r_ret, 0, ctx); /* delay slot */
  786. emit_mod(r_A, r_X, ctx);
  787. break;
  788. case BPF_ALU | BPF_OR | BPF_K:
  789. /* A |= K */
  790. ctx->flags |= SEEN_A;
  791. emit_ori(r_A, r_A, k, ctx);
  792. break;
  793. case BPF_ALU | BPF_OR | BPF_X:
  794. /* A |= X */
  795. ctx->flags |= SEEN_A;
  796. emit_ori(r_A, r_A, r_X, ctx);
  797. break;
  798. case BPF_ALU | BPF_XOR | BPF_K:
  799. /* A ^= k */
  800. ctx->flags |= SEEN_A;
  801. emit_xori(r_A, r_A, k, ctx);
  802. break;
  803. case BPF_ANC | SKF_AD_ALU_XOR_X:
  804. case BPF_ALU | BPF_XOR | BPF_X:
  805. /* A ^= X */
  806. ctx->flags |= SEEN_A;
  807. emit_xor(r_A, r_A, r_X, ctx);
  808. break;
  809. case BPF_ALU | BPF_AND | BPF_K:
  810. /* A &= K */
  811. ctx->flags |= SEEN_A;
  812. emit_andi(r_A, r_A, k, ctx);
  813. break;
  814. case BPF_ALU | BPF_AND | BPF_X:
  815. /* A &= X */
  816. ctx->flags |= SEEN_A | SEEN_X;
  817. emit_and(r_A, r_A, r_X, ctx);
  818. break;
  819. case BPF_ALU | BPF_LSH | BPF_K:
  820. /* A <<= K */
  821. ctx->flags |= SEEN_A;
  822. emit_sll(r_A, r_A, k, ctx);
  823. break;
  824. case BPF_ALU | BPF_LSH | BPF_X:
  825. /* A <<= X */
  826. ctx->flags |= SEEN_A | SEEN_X;
  827. emit_sllv(r_A, r_A, r_X, ctx);
  828. break;
  829. case BPF_ALU | BPF_RSH | BPF_K:
  830. /* A >>= K */
  831. ctx->flags |= SEEN_A;
  832. emit_srl(r_A, r_A, k, ctx);
  833. break;
  834. case BPF_ALU | BPF_RSH | BPF_X:
  835. ctx->flags |= SEEN_A | SEEN_X;
  836. emit_srlv(r_A, r_A, r_X, ctx);
  837. break;
  838. case BPF_ALU | BPF_NEG:
  839. /* A = -A */
  840. ctx->flags |= SEEN_A;
  841. emit_neg(r_A, ctx);
  842. break;
  843. case BPF_JMP | BPF_JA:
  844. /* pc += K */
  845. b_off = b_imm(i + k + 1, ctx);
  846. if (is_bad_offset(b_off))
  847. return -E2BIG;
  848. emit_b(b_off, ctx);
  849. emit_nop(ctx);
  850. break;
  851. case BPF_JMP | BPF_JEQ | BPF_K:
  852. /* pc += ( A == K ) ? pc->jt : pc->jf */
  853. condt = MIPS_COND_EQ | MIPS_COND_K;
  854. goto jmp_cmp;
  855. case BPF_JMP | BPF_JEQ | BPF_X:
  856. ctx->flags |= SEEN_X;
  857. /* pc += ( A == X ) ? pc->jt : pc->jf */
  858. condt = MIPS_COND_EQ | MIPS_COND_X;
  859. goto jmp_cmp;
  860. case BPF_JMP | BPF_JGE | BPF_K:
  861. /* pc += ( A >= K ) ? pc->jt : pc->jf */
  862. condt = MIPS_COND_GE | MIPS_COND_K;
  863. goto jmp_cmp;
  864. case BPF_JMP | BPF_JGE | BPF_X:
  865. ctx->flags |= SEEN_X;
  866. /* pc += ( A >= X ) ? pc->jt : pc->jf */
  867. condt = MIPS_COND_GE | MIPS_COND_X;
  868. goto jmp_cmp;
  869. case BPF_JMP | BPF_JGT | BPF_K:
  870. /* pc += ( A > K ) ? pc->jt : pc->jf */
  871. condt = MIPS_COND_GT | MIPS_COND_K;
  872. goto jmp_cmp;
  873. case BPF_JMP | BPF_JGT | BPF_X:
  874. ctx->flags |= SEEN_X;
  875. /* pc += ( A > X ) ? pc->jt : pc->jf */
  876. condt = MIPS_COND_GT | MIPS_COND_X;
  877. jmp_cmp:
  878. /* Greater or Equal */
  879. if ((condt & MIPS_COND_GE) ||
  880. (condt & MIPS_COND_GT)) {
  881. if (condt & MIPS_COND_K) { /* K */
  882. ctx->flags |= SEEN_A;
  883. emit_sltiu(r_s0, r_A, k, ctx);
  884. } else { /* X */
  885. ctx->flags |= SEEN_A |
  886. SEEN_X;
  887. emit_sltu(r_s0, r_A, r_X, ctx);
  888. }
  889. /* A < (K|X) ? r_scrach = 1 */
  890. b_off = b_imm(i + inst->jf + 1, ctx);
  891. emit_bcond(MIPS_COND_NE, r_s0, r_zero, b_off,
  892. ctx);
  893. emit_nop(ctx);
  894. /* A > (K|X) ? scratch = 0 */
  895. if (condt & MIPS_COND_GT) {
  896. /* Checking for equality */
  897. ctx->flags |= SEEN_A | SEEN_X;
  898. if (condt & MIPS_COND_K)
  899. emit_load_imm(r_s0, k, ctx);
  900. else
  901. emit_jit_reg_move(r_s0, r_X,
  902. ctx);
  903. b_off = b_imm(i + inst->jf + 1, ctx);
  904. emit_bcond(MIPS_COND_EQ, r_A, r_s0,
  905. b_off, ctx);
  906. emit_nop(ctx);
  907. /* Finally, A > K|X */
  908. b_off = b_imm(i + inst->jt + 1, ctx);
  909. emit_b(b_off, ctx);
  910. emit_nop(ctx);
  911. } else {
  912. /* A >= (K|X) so jump */
  913. b_off = b_imm(i + inst->jt + 1, ctx);
  914. emit_b(b_off, ctx);
  915. emit_nop(ctx);
  916. }
  917. } else {
  918. /* A == K|X */
  919. if (condt & MIPS_COND_K) { /* K */
  920. ctx->flags |= SEEN_A;
  921. emit_load_imm(r_s0, k, ctx);
  922. /* jump true */
  923. b_off = b_imm(i + inst->jt + 1, ctx);
  924. emit_bcond(MIPS_COND_EQ, r_A, r_s0,
  925. b_off, ctx);
  926. emit_nop(ctx);
  927. /* jump false */
  928. b_off = b_imm(i + inst->jf + 1,
  929. ctx);
  930. emit_bcond(MIPS_COND_NE, r_A, r_s0,
  931. b_off, ctx);
  932. emit_nop(ctx);
  933. } else { /* X */
  934. /* jump true */
  935. ctx->flags |= SEEN_A | SEEN_X;
  936. b_off = b_imm(i + inst->jt + 1,
  937. ctx);
  938. emit_bcond(MIPS_COND_EQ, r_A, r_X,
  939. b_off, ctx);
  940. emit_nop(ctx);
  941. /* jump false */
  942. b_off = b_imm(i + inst->jf + 1, ctx);
  943. emit_bcond(MIPS_COND_NE, r_A, r_X,
  944. b_off, ctx);
  945. emit_nop(ctx);
  946. }
  947. }
  948. break;
  949. case BPF_JMP | BPF_JSET | BPF_K:
  950. ctx->flags |= SEEN_A;
  951. /* pc += (A & K) ? pc -> jt : pc -> jf */
  952. emit_load_imm(r_s1, k, ctx);
  953. emit_and(r_s0, r_A, r_s1, ctx);
  954. /* jump true */
  955. b_off = b_imm(i + inst->jt + 1, ctx);
  956. emit_bcond(MIPS_COND_NE, r_s0, r_zero, b_off, ctx);
  957. emit_nop(ctx);
  958. /* jump false */
  959. b_off = b_imm(i + inst->jf + 1, ctx);
  960. emit_b(b_off, ctx);
  961. emit_nop(ctx);
  962. break;
  963. case BPF_JMP | BPF_JSET | BPF_X:
  964. ctx->flags |= SEEN_X | SEEN_A;
  965. /* pc += (A & X) ? pc -> jt : pc -> jf */
  966. emit_and(r_s0, r_A, r_X, ctx);
  967. /* jump true */
  968. b_off = b_imm(i + inst->jt + 1, ctx);
  969. emit_bcond(MIPS_COND_NE, r_s0, r_zero, b_off, ctx);
  970. emit_nop(ctx);
  971. /* jump false */
  972. b_off = b_imm(i + inst->jf + 1, ctx);
  973. emit_b(b_off, ctx);
  974. emit_nop(ctx);
  975. break;
  976. case BPF_RET | BPF_A:
  977. ctx->flags |= SEEN_A;
  978. if (i != prog->len - 1) {
  979. /*
  980. * If this is not the last instruction
  981. * then jump to the epilogue
  982. */
  983. b_off = b_imm(prog->len, ctx);
  984. if (is_bad_offset(b_off))
  985. return -E2BIG;
  986. emit_b(b_off, ctx);
  987. }
  988. emit_reg_move(r_ret, r_A, ctx); /* delay slot */
  989. break;
  990. case BPF_RET | BPF_K:
  991. /*
  992. * It can emit two instructions so it does not fit on
  993. * the delay slot.
  994. */
  995. emit_load_imm(r_ret, k, ctx);
  996. if (i != prog->len - 1) {
  997. /*
  998. * If this is not the last instruction
  999. * then jump to the epilogue
  1000. */
  1001. b_off = b_imm(prog->len, ctx);
  1002. if (is_bad_offset(b_off))
  1003. return -E2BIG;
  1004. emit_b(b_off, ctx);
  1005. emit_nop(ctx);
  1006. }
  1007. break;
  1008. case BPF_MISC | BPF_TAX:
  1009. /* X = A */
  1010. ctx->flags |= SEEN_X | SEEN_A;
  1011. emit_jit_reg_move(r_X, r_A, ctx);
  1012. break;
  1013. case BPF_MISC | BPF_TXA:
  1014. /* A = X */
  1015. ctx->flags |= SEEN_A | SEEN_X;
  1016. emit_jit_reg_move(r_A, r_X, ctx);
  1017. break;
  1018. /* AUX */
  1019. case BPF_ANC | SKF_AD_PROTOCOL:
  1020. /* A = ntohs(skb->protocol */
  1021. ctx->flags |= SEEN_SKB | SEEN_OFF | SEEN_A;
  1022. BUILD_BUG_ON(sizeof_field(struct sk_buff,
  1023. protocol) != 2);
  1024. off = offsetof(struct sk_buff, protocol);
  1025. emit_half_load(r_A, r_skb, off, ctx);
  1026. #ifdef CONFIG_CPU_LITTLE_ENDIAN
  1027. /* This needs little endian fixup */
  1028. if (cpu_has_wsbh) {
  1029. /* R2 and later have the wsbh instruction */
  1030. emit_wsbh(r_A, r_A, ctx);
  1031. } else {
  1032. /* Get first byte */
  1033. emit_andi(r_tmp_imm, r_A, 0xff, ctx);
  1034. /* Shift it */
  1035. emit_sll(r_tmp, r_tmp_imm, 8, ctx);
  1036. /* Get second byte */
  1037. emit_srl(r_tmp_imm, r_A, 8, ctx);
  1038. emit_andi(r_tmp_imm, r_tmp_imm, 0xff, ctx);
  1039. /* Put everyting together in r_A */
  1040. emit_or(r_A, r_tmp, r_tmp_imm, ctx);
  1041. }
  1042. #endif
  1043. break;
  1044. case BPF_ANC | SKF_AD_CPU:
  1045. ctx->flags |= SEEN_A | SEEN_OFF;
  1046. /* A = current_thread_info()->cpu */
  1047. BUILD_BUG_ON(sizeof_field(struct thread_info,
  1048. cpu) != 4);
  1049. off = offsetof(struct thread_info, cpu);
  1050. /* $28/gp points to the thread_info struct */
  1051. emit_load(r_A, 28, off, ctx);
  1052. break;
  1053. case BPF_ANC | SKF_AD_IFINDEX:
  1054. /* A = skb->dev->ifindex */
  1055. case BPF_ANC | SKF_AD_HATYPE:
  1056. /* A = skb->dev->type */
  1057. ctx->flags |= SEEN_SKB | SEEN_A;
  1058. off = offsetof(struct sk_buff, dev);
  1059. /* Load *dev pointer */
  1060. emit_load_ptr(r_s0, r_skb, off, ctx);
  1061. /* error (0) in the delay slot */
  1062. b_off = b_imm(prog->len, ctx);
  1063. if (is_bad_offset(b_off))
  1064. return -E2BIG;
  1065. emit_bcond(MIPS_COND_EQ, r_s0, r_zero, b_off, ctx);
  1066. emit_reg_move(r_ret, r_zero, ctx);
  1067. if (code == (BPF_ANC | SKF_AD_IFINDEX)) {
  1068. BUILD_BUG_ON(sizeof_field(struct net_device, ifindex) != 4);
  1069. off = offsetof(struct net_device, ifindex);
  1070. emit_load(r_A, r_s0, off, ctx);
  1071. } else { /* (code == (BPF_ANC | SKF_AD_HATYPE) */
  1072. BUILD_BUG_ON(sizeof_field(struct net_device, type) != 2);
  1073. off = offsetof(struct net_device, type);
  1074. emit_half_load_unsigned(r_A, r_s0, off, ctx);
  1075. }
  1076. break;
  1077. case BPF_ANC | SKF_AD_MARK:
  1078. ctx->flags |= SEEN_SKB | SEEN_A;
  1079. BUILD_BUG_ON(sizeof_field(struct sk_buff, mark) != 4);
  1080. off = offsetof(struct sk_buff, mark);
  1081. emit_load(r_A, r_skb, off, ctx);
  1082. break;
  1083. case BPF_ANC | SKF_AD_RXHASH:
  1084. ctx->flags |= SEEN_SKB | SEEN_A;
  1085. BUILD_BUG_ON(sizeof_field(struct sk_buff, hash) != 4);
  1086. off = offsetof(struct sk_buff, hash);
  1087. emit_load(r_A, r_skb, off, ctx);
  1088. break;
  1089. case BPF_ANC | SKF_AD_VLAN_TAG:
  1090. ctx->flags |= SEEN_SKB | SEEN_A;
  1091. BUILD_BUG_ON(sizeof_field(struct sk_buff,
  1092. vlan_tci) != 2);
  1093. off = offsetof(struct sk_buff, vlan_tci);
  1094. emit_half_load_unsigned(r_A, r_skb, off, ctx);
  1095. break;
  1096. case BPF_ANC | SKF_AD_VLAN_TAG_PRESENT:
  1097. ctx->flags |= SEEN_SKB | SEEN_A;
  1098. emit_load_byte(r_A, r_skb, PKT_VLAN_PRESENT_OFFSET(), ctx);
  1099. if (PKT_VLAN_PRESENT_BIT)
  1100. emit_srl(r_A, r_A, PKT_VLAN_PRESENT_BIT, ctx);
  1101. if (PKT_VLAN_PRESENT_BIT < 7)
  1102. emit_andi(r_A, r_A, 1, ctx);
  1103. break;
  1104. case BPF_ANC | SKF_AD_PKTTYPE:
  1105. ctx->flags |= SEEN_SKB;
  1106. emit_load_byte(r_tmp, r_skb, PKT_TYPE_OFFSET(), ctx);
  1107. /* Keep only the last 3 bits */
  1108. emit_andi(r_A, r_tmp, PKT_TYPE_MAX, ctx);
  1109. #ifdef __BIG_ENDIAN_BITFIELD
  1110. /* Get the actual packet type to the lower 3 bits */
  1111. emit_srl(r_A, r_A, 5, ctx);
  1112. #endif
  1113. break;
  1114. case BPF_ANC | SKF_AD_QUEUE:
  1115. ctx->flags |= SEEN_SKB | SEEN_A;
  1116. BUILD_BUG_ON(sizeof_field(struct sk_buff,
  1117. queue_mapping) != 2);
  1118. BUILD_BUG_ON(offsetof(struct sk_buff,
  1119. queue_mapping) > 0xff);
  1120. off = offsetof(struct sk_buff, queue_mapping);
  1121. emit_half_load_unsigned(r_A, r_skb, off, ctx);
  1122. break;
  1123. default:
  1124. pr_debug("%s: Unhandled opcode: 0x%02x\n", __FILE__,
  1125. inst->code);
  1126. return -1;
  1127. }
  1128. }
  1129. /* compute offsets only during the first pass */
  1130. if (ctx->target == NULL)
  1131. ctx->offsets[i] = ctx->idx * 4;
  1132. return 0;
  1133. }
  1134. void bpf_jit_compile(struct bpf_prog *fp)
  1135. {
  1136. struct jit_ctx ctx;
  1137. unsigned int alloc_size, tmp_idx;
  1138. if (!bpf_jit_enable)
  1139. return;
  1140. memset(&ctx, 0, sizeof(ctx));
  1141. ctx.offsets = kcalloc(fp->len + 1, sizeof(*ctx.offsets), GFP_KERNEL);
  1142. if (ctx.offsets == NULL)
  1143. return;
  1144. ctx.skf = fp;
  1145. if (build_body(&ctx))
  1146. goto out;
  1147. tmp_idx = ctx.idx;
  1148. build_prologue(&ctx);
  1149. ctx.prologue_bytes = (ctx.idx - tmp_idx) * 4;
  1150. /* just to complete the ctx.idx count */
  1151. build_epilogue(&ctx);
  1152. alloc_size = 4 * ctx.idx;
  1153. ctx.target = module_alloc(alloc_size);
  1154. if (ctx.target == NULL)
  1155. goto out;
  1156. /* Clean it */
  1157. memset(ctx.target, 0, alloc_size);
  1158. ctx.idx = 0;
  1159. /* Generate the actual JIT code */
  1160. build_prologue(&ctx);
  1161. if (build_body(&ctx)) {
  1162. module_memfree(ctx.target);
  1163. goto out;
  1164. }
  1165. build_epilogue(&ctx);
  1166. /* Update the icache */
  1167. flush_icache_range((ptr)ctx.target, (ptr)(ctx.target + ctx.idx));
  1168. if (bpf_jit_enable > 1)
  1169. /* Dump JIT code */
  1170. bpf_jit_dump(fp->len, alloc_size, 2, ctx.target);
  1171. fp->bpf_func = (void *)ctx.target;
  1172. fp->jited = 1;
  1173. out:
  1174. kfree(ctx.offsets);
  1175. }
  1176. void bpf_jit_free(struct bpf_prog *fp)
  1177. {
  1178. if (fp->jited)
  1179. module_memfree(fp->bpf_func);
  1180. bpf_prog_unlock_free(fp);
  1181. }