nft_payload.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2008-2009 Patrick McHardy <kaber@trash.net>
  4. * Copyright (c) 2016 Pablo Neira Ayuso <pablo@netfilter.org>
  5. *
  6. * Development of this code funded by Astaro AG (http://www.astaro.com/)
  7. */
  8. #include <linux/kernel.h>
  9. #include <linux/if_vlan.h>
  10. #include <linux/init.h>
  11. #include <linux/module.h>
  12. #include <linux/netlink.h>
  13. #include <linux/netfilter.h>
  14. #include <linux/netfilter/nf_tables.h>
  15. #include <net/netfilter/nf_tables_core.h>
  16. #include <net/netfilter/nf_tables.h>
  17. #include <net/netfilter/nf_tables_offload.h>
  18. /* For layer 4 checksum field offset. */
  19. #include <linux/tcp.h>
  20. #include <linux/udp.h>
  21. #include <linux/icmpv6.h>
  22. #include <linux/ip.h>
  23. #include <linux/ipv6.h>
  24. #include <net/sctp/checksum.h>
  25. static bool nft_payload_rebuild_vlan_hdr(const struct sk_buff *skb, int mac_off,
  26. struct vlan_ethhdr *veth)
  27. {
  28. if (skb_copy_bits(skb, mac_off, veth, ETH_HLEN))
  29. return false;
  30. veth->h_vlan_proto = skb->vlan_proto;
  31. veth->h_vlan_TCI = htons(skb_vlan_tag_get(skb));
  32. veth->h_vlan_encapsulated_proto = skb->protocol;
  33. return true;
  34. }
  35. /* add vlan header into the user buffer for if tag was removed by offloads */
  36. static bool
  37. nft_payload_copy_vlan(u32 *d, const struct sk_buff *skb, u8 offset, u8 len)
  38. {
  39. int mac_off = skb_mac_header(skb) - skb->data;
  40. u8 *vlanh, *dst_u8 = (u8 *) d;
  41. struct vlan_ethhdr veth;
  42. u8 vlan_hlen = 0;
  43. if ((skb->protocol == htons(ETH_P_8021AD) ||
  44. skb->protocol == htons(ETH_P_8021Q)) &&
  45. offset >= VLAN_ETH_HLEN && offset < VLAN_ETH_HLEN + VLAN_HLEN)
  46. vlan_hlen += VLAN_HLEN;
  47. vlanh = (u8 *) &veth;
  48. if (offset < VLAN_ETH_HLEN + vlan_hlen) {
  49. u8 ethlen = len;
  50. if (vlan_hlen &&
  51. skb_copy_bits(skb, mac_off, &veth, VLAN_ETH_HLEN) < 0)
  52. return false;
  53. else if (!nft_payload_rebuild_vlan_hdr(skb, mac_off, &veth))
  54. return false;
  55. if (offset + len > VLAN_ETH_HLEN + vlan_hlen)
  56. ethlen -= offset + len - VLAN_ETH_HLEN + vlan_hlen;
  57. memcpy(dst_u8, vlanh + offset - vlan_hlen, ethlen);
  58. len -= ethlen;
  59. if (len == 0)
  60. return true;
  61. dst_u8 += ethlen;
  62. offset = ETH_HLEN + vlan_hlen;
  63. } else {
  64. offset -= VLAN_HLEN + vlan_hlen;
  65. }
  66. return skb_copy_bits(skb, offset + mac_off, dst_u8, len) == 0;
  67. }
  68. void nft_payload_eval(const struct nft_expr *expr,
  69. struct nft_regs *regs,
  70. const struct nft_pktinfo *pkt)
  71. {
  72. const struct nft_payload *priv = nft_expr_priv(expr);
  73. const struct sk_buff *skb = pkt->skb;
  74. u32 *dest = &regs->data[priv->dreg];
  75. int offset;
  76. if (priv->len % NFT_REG32_SIZE)
  77. dest[priv->len / NFT_REG32_SIZE] = 0;
  78. switch (priv->base) {
  79. case NFT_PAYLOAD_LL_HEADER:
  80. if (!skb_mac_header_was_set(skb))
  81. goto err;
  82. if (skb_vlan_tag_present(skb)) {
  83. if (!nft_payload_copy_vlan(dest, skb,
  84. priv->offset, priv->len))
  85. goto err;
  86. return;
  87. }
  88. offset = skb_mac_header(skb) - skb->data;
  89. break;
  90. case NFT_PAYLOAD_NETWORK_HEADER:
  91. offset = skb_network_offset(skb);
  92. break;
  93. case NFT_PAYLOAD_TRANSPORT_HEADER:
  94. if (!pkt->tprot_set)
  95. goto err;
  96. offset = pkt->xt.thoff;
  97. break;
  98. default:
  99. BUG();
  100. }
  101. offset += priv->offset;
  102. if (skb_copy_bits(skb, offset, dest, priv->len) < 0)
  103. goto err;
  104. return;
  105. err:
  106. regs->verdict.code = NFT_BREAK;
  107. }
  108. static const struct nla_policy nft_payload_policy[NFTA_PAYLOAD_MAX + 1] = {
  109. [NFTA_PAYLOAD_SREG] = { .type = NLA_U32 },
  110. [NFTA_PAYLOAD_DREG] = { .type = NLA_U32 },
  111. [NFTA_PAYLOAD_BASE] = { .type = NLA_U32 },
  112. [NFTA_PAYLOAD_OFFSET] = { .type = NLA_U32 },
  113. [NFTA_PAYLOAD_LEN] = { .type = NLA_U32 },
  114. [NFTA_PAYLOAD_CSUM_TYPE] = { .type = NLA_U32 },
  115. [NFTA_PAYLOAD_CSUM_OFFSET] = { .type = NLA_U32 },
  116. [NFTA_PAYLOAD_CSUM_FLAGS] = { .type = NLA_U32 },
  117. };
  118. static int nft_payload_init(const struct nft_ctx *ctx,
  119. const struct nft_expr *expr,
  120. const struct nlattr * const tb[])
  121. {
  122. struct nft_payload *priv = nft_expr_priv(expr);
  123. priv->base = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_BASE]));
  124. priv->offset = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_OFFSET]));
  125. priv->len = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_LEN]));
  126. priv->dreg = nft_parse_register(tb[NFTA_PAYLOAD_DREG]);
  127. return nft_validate_register_store(ctx, priv->dreg, NULL,
  128. NFT_DATA_VALUE, priv->len);
  129. }
  130. static int nft_payload_dump(struct sk_buff *skb, const struct nft_expr *expr)
  131. {
  132. const struct nft_payload *priv = nft_expr_priv(expr);
  133. if (nft_dump_register(skb, NFTA_PAYLOAD_DREG, priv->dreg) ||
  134. nla_put_be32(skb, NFTA_PAYLOAD_BASE, htonl(priv->base)) ||
  135. nla_put_be32(skb, NFTA_PAYLOAD_OFFSET, htonl(priv->offset)) ||
  136. nla_put_be32(skb, NFTA_PAYLOAD_LEN, htonl(priv->len)))
  137. goto nla_put_failure;
  138. return 0;
  139. nla_put_failure:
  140. return -1;
  141. }
  142. static bool nft_payload_offload_mask(struct nft_offload_reg *reg,
  143. u32 priv_len, u32 field_len)
  144. {
  145. unsigned int remainder, delta, k;
  146. struct nft_data mask = {};
  147. __be32 remainder_mask;
  148. if (priv_len == field_len) {
  149. memset(&reg->mask, 0xff, priv_len);
  150. return true;
  151. } else if (priv_len > field_len) {
  152. return false;
  153. }
  154. memset(&mask, 0xff, field_len);
  155. remainder = priv_len % sizeof(u32);
  156. if (remainder) {
  157. k = priv_len / sizeof(u32);
  158. delta = field_len - priv_len;
  159. remainder_mask = htonl(~((1 << (delta * BITS_PER_BYTE)) - 1));
  160. mask.data[k] = (__force u32)remainder_mask;
  161. }
  162. memcpy(&reg->mask, &mask, field_len);
  163. return true;
  164. }
  165. static int nft_payload_offload_ll(struct nft_offload_ctx *ctx,
  166. struct nft_flow_rule *flow,
  167. const struct nft_payload *priv)
  168. {
  169. struct nft_offload_reg *reg = &ctx->regs[priv->dreg];
  170. switch (priv->offset) {
  171. case offsetof(struct ethhdr, h_source):
  172. if (!nft_payload_offload_mask(reg, priv->len, ETH_ALEN))
  173. return -EOPNOTSUPP;
  174. NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_ETH_ADDRS, eth_addrs,
  175. src, ETH_ALEN, reg);
  176. break;
  177. case offsetof(struct ethhdr, h_dest):
  178. if (!nft_payload_offload_mask(reg, priv->len, ETH_ALEN))
  179. return -EOPNOTSUPP;
  180. NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_ETH_ADDRS, eth_addrs,
  181. dst, ETH_ALEN, reg);
  182. break;
  183. case offsetof(struct ethhdr, h_proto):
  184. if (!nft_payload_offload_mask(reg, priv->len, sizeof(__be16)))
  185. return -EOPNOTSUPP;
  186. NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_BASIC, basic,
  187. n_proto, sizeof(__be16), reg);
  188. nft_offload_set_dependency(ctx, NFT_OFFLOAD_DEP_NETWORK);
  189. break;
  190. case offsetof(struct vlan_ethhdr, h_vlan_TCI):
  191. if (!nft_payload_offload_mask(reg, priv->len, sizeof(__be16)))
  192. return -EOPNOTSUPP;
  193. NFT_OFFLOAD_MATCH_FLAGS(FLOW_DISSECTOR_KEY_VLAN, vlan,
  194. vlan_tci, sizeof(__be16), reg,
  195. NFT_OFFLOAD_F_NETWORK2HOST);
  196. break;
  197. case offsetof(struct vlan_ethhdr, h_vlan_encapsulated_proto):
  198. if (!nft_payload_offload_mask(reg, priv->len, sizeof(__be16)))
  199. return -EOPNOTSUPP;
  200. NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_VLAN, vlan,
  201. vlan_tpid, sizeof(__be16), reg);
  202. nft_offload_set_dependency(ctx, NFT_OFFLOAD_DEP_NETWORK);
  203. break;
  204. case offsetof(struct vlan_ethhdr, h_vlan_TCI) + sizeof(struct vlan_hdr):
  205. if (!nft_payload_offload_mask(reg, priv->len, sizeof(__be16)))
  206. return -EOPNOTSUPP;
  207. NFT_OFFLOAD_MATCH_FLAGS(FLOW_DISSECTOR_KEY_CVLAN, cvlan,
  208. vlan_tci, sizeof(__be16), reg,
  209. NFT_OFFLOAD_F_NETWORK2HOST);
  210. break;
  211. case offsetof(struct vlan_ethhdr, h_vlan_encapsulated_proto) +
  212. sizeof(struct vlan_hdr):
  213. if (!nft_payload_offload_mask(reg, priv->len, sizeof(__be16)))
  214. return -EOPNOTSUPP;
  215. NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_CVLAN, cvlan,
  216. vlan_tpid, sizeof(__be16), reg);
  217. nft_offload_set_dependency(ctx, NFT_OFFLOAD_DEP_NETWORK);
  218. break;
  219. default:
  220. return -EOPNOTSUPP;
  221. }
  222. return 0;
  223. }
  224. static int nft_payload_offload_ip(struct nft_offload_ctx *ctx,
  225. struct nft_flow_rule *flow,
  226. const struct nft_payload *priv)
  227. {
  228. struct nft_offload_reg *reg = &ctx->regs[priv->dreg];
  229. switch (priv->offset) {
  230. case offsetof(struct iphdr, saddr):
  231. if (!nft_payload_offload_mask(reg, priv->len,
  232. sizeof(struct in_addr)))
  233. return -EOPNOTSUPP;
  234. NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_IPV4_ADDRS, ipv4, src,
  235. sizeof(struct in_addr), reg);
  236. nft_flow_rule_set_addr_type(flow, FLOW_DISSECTOR_KEY_IPV4_ADDRS);
  237. break;
  238. case offsetof(struct iphdr, daddr):
  239. if (!nft_payload_offload_mask(reg, priv->len,
  240. sizeof(struct in_addr)))
  241. return -EOPNOTSUPP;
  242. NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_IPV4_ADDRS, ipv4, dst,
  243. sizeof(struct in_addr), reg);
  244. nft_flow_rule_set_addr_type(flow, FLOW_DISSECTOR_KEY_IPV4_ADDRS);
  245. break;
  246. case offsetof(struct iphdr, protocol):
  247. if (!nft_payload_offload_mask(reg, priv->len, sizeof(__u8)))
  248. return -EOPNOTSUPP;
  249. NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_BASIC, basic, ip_proto,
  250. sizeof(__u8), reg);
  251. nft_offload_set_dependency(ctx, NFT_OFFLOAD_DEP_TRANSPORT);
  252. break;
  253. default:
  254. return -EOPNOTSUPP;
  255. }
  256. return 0;
  257. }
  258. static int nft_payload_offload_ip6(struct nft_offload_ctx *ctx,
  259. struct nft_flow_rule *flow,
  260. const struct nft_payload *priv)
  261. {
  262. struct nft_offload_reg *reg = &ctx->regs[priv->dreg];
  263. switch (priv->offset) {
  264. case offsetof(struct ipv6hdr, saddr):
  265. if (!nft_payload_offload_mask(reg, priv->len,
  266. sizeof(struct in6_addr)))
  267. return -EOPNOTSUPP;
  268. NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_IPV6_ADDRS, ipv6, src,
  269. sizeof(struct in6_addr), reg);
  270. nft_flow_rule_set_addr_type(flow, FLOW_DISSECTOR_KEY_IPV6_ADDRS);
  271. break;
  272. case offsetof(struct ipv6hdr, daddr):
  273. if (!nft_payload_offload_mask(reg, priv->len,
  274. sizeof(struct in6_addr)))
  275. return -EOPNOTSUPP;
  276. NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_IPV6_ADDRS, ipv6, dst,
  277. sizeof(struct in6_addr), reg);
  278. nft_flow_rule_set_addr_type(flow, FLOW_DISSECTOR_KEY_IPV6_ADDRS);
  279. break;
  280. case offsetof(struct ipv6hdr, nexthdr):
  281. if (!nft_payload_offload_mask(reg, priv->len, sizeof(__u8)))
  282. return -EOPNOTSUPP;
  283. NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_BASIC, basic, ip_proto,
  284. sizeof(__u8), reg);
  285. nft_offload_set_dependency(ctx, NFT_OFFLOAD_DEP_TRANSPORT);
  286. break;
  287. default:
  288. return -EOPNOTSUPP;
  289. }
  290. return 0;
  291. }
  292. static int nft_payload_offload_nh(struct nft_offload_ctx *ctx,
  293. struct nft_flow_rule *flow,
  294. const struct nft_payload *priv)
  295. {
  296. int err;
  297. switch (ctx->dep.l3num) {
  298. case htons(ETH_P_IP):
  299. err = nft_payload_offload_ip(ctx, flow, priv);
  300. break;
  301. case htons(ETH_P_IPV6):
  302. err = nft_payload_offload_ip6(ctx, flow, priv);
  303. break;
  304. default:
  305. return -EOPNOTSUPP;
  306. }
  307. return err;
  308. }
  309. static int nft_payload_offload_tcp(struct nft_offload_ctx *ctx,
  310. struct nft_flow_rule *flow,
  311. const struct nft_payload *priv)
  312. {
  313. struct nft_offload_reg *reg = &ctx->regs[priv->dreg];
  314. switch (priv->offset) {
  315. case offsetof(struct tcphdr, source):
  316. if (!nft_payload_offload_mask(reg, priv->len, sizeof(__be16)))
  317. return -EOPNOTSUPP;
  318. NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_PORTS, tp, src,
  319. sizeof(__be16), reg);
  320. break;
  321. case offsetof(struct tcphdr, dest):
  322. if (!nft_payload_offload_mask(reg, priv->len, sizeof(__be16)))
  323. return -EOPNOTSUPP;
  324. NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_PORTS, tp, dst,
  325. sizeof(__be16), reg);
  326. break;
  327. default:
  328. return -EOPNOTSUPP;
  329. }
  330. return 0;
  331. }
  332. static int nft_payload_offload_udp(struct nft_offload_ctx *ctx,
  333. struct nft_flow_rule *flow,
  334. const struct nft_payload *priv)
  335. {
  336. struct nft_offload_reg *reg = &ctx->regs[priv->dreg];
  337. switch (priv->offset) {
  338. case offsetof(struct udphdr, source):
  339. if (!nft_payload_offload_mask(reg, priv->len, sizeof(__be16)))
  340. return -EOPNOTSUPP;
  341. NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_PORTS, tp, src,
  342. sizeof(__be16), reg);
  343. break;
  344. case offsetof(struct udphdr, dest):
  345. if (!nft_payload_offload_mask(reg, priv->len, sizeof(__be16)))
  346. return -EOPNOTSUPP;
  347. NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_PORTS, tp, dst,
  348. sizeof(__be16), reg);
  349. break;
  350. default:
  351. return -EOPNOTSUPP;
  352. }
  353. return 0;
  354. }
  355. static int nft_payload_offload_th(struct nft_offload_ctx *ctx,
  356. struct nft_flow_rule *flow,
  357. const struct nft_payload *priv)
  358. {
  359. int err;
  360. switch (ctx->dep.protonum) {
  361. case IPPROTO_TCP:
  362. err = nft_payload_offload_tcp(ctx, flow, priv);
  363. break;
  364. case IPPROTO_UDP:
  365. err = nft_payload_offload_udp(ctx, flow, priv);
  366. break;
  367. default:
  368. return -EOPNOTSUPP;
  369. }
  370. return err;
  371. }
  372. static int nft_payload_offload(struct nft_offload_ctx *ctx,
  373. struct nft_flow_rule *flow,
  374. const struct nft_expr *expr)
  375. {
  376. const struct nft_payload *priv = nft_expr_priv(expr);
  377. int err;
  378. switch (priv->base) {
  379. case NFT_PAYLOAD_LL_HEADER:
  380. err = nft_payload_offload_ll(ctx, flow, priv);
  381. break;
  382. case NFT_PAYLOAD_NETWORK_HEADER:
  383. err = nft_payload_offload_nh(ctx, flow, priv);
  384. break;
  385. case NFT_PAYLOAD_TRANSPORT_HEADER:
  386. err = nft_payload_offload_th(ctx, flow, priv);
  387. break;
  388. default:
  389. err = -EOPNOTSUPP;
  390. break;
  391. }
  392. return err;
  393. }
  394. static const struct nft_expr_ops nft_payload_ops = {
  395. .type = &nft_payload_type,
  396. .size = NFT_EXPR_SIZE(sizeof(struct nft_payload)),
  397. .eval = nft_payload_eval,
  398. .init = nft_payload_init,
  399. .dump = nft_payload_dump,
  400. .offload = nft_payload_offload,
  401. };
  402. const struct nft_expr_ops nft_payload_fast_ops = {
  403. .type = &nft_payload_type,
  404. .size = NFT_EXPR_SIZE(sizeof(struct nft_payload)),
  405. .eval = nft_payload_eval,
  406. .init = nft_payload_init,
  407. .dump = nft_payload_dump,
  408. .offload = nft_payload_offload,
  409. };
  410. static inline void nft_csum_replace(__sum16 *sum, __wsum fsum, __wsum tsum)
  411. {
  412. *sum = csum_fold(csum_add(csum_sub(~csum_unfold(*sum), fsum), tsum));
  413. if (*sum == 0)
  414. *sum = CSUM_MANGLED_0;
  415. }
  416. static bool nft_payload_udp_checksum(struct sk_buff *skb, unsigned int thoff)
  417. {
  418. struct udphdr *uh, _uh;
  419. uh = skb_header_pointer(skb, thoff, sizeof(_uh), &_uh);
  420. if (!uh)
  421. return false;
  422. return (__force bool)uh->check;
  423. }
  424. static int nft_payload_l4csum_offset(const struct nft_pktinfo *pkt,
  425. struct sk_buff *skb,
  426. unsigned int *l4csum_offset)
  427. {
  428. if (pkt->xt.fragoff)
  429. return -1;
  430. switch (pkt->tprot) {
  431. case IPPROTO_TCP:
  432. *l4csum_offset = offsetof(struct tcphdr, check);
  433. break;
  434. case IPPROTO_UDP:
  435. if (!nft_payload_udp_checksum(skb, pkt->xt.thoff))
  436. return -1;
  437. fallthrough;
  438. case IPPROTO_UDPLITE:
  439. *l4csum_offset = offsetof(struct udphdr, check);
  440. break;
  441. case IPPROTO_ICMPV6:
  442. *l4csum_offset = offsetof(struct icmp6hdr, icmp6_cksum);
  443. break;
  444. default:
  445. return -1;
  446. }
  447. *l4csum_offset += pkt->xt.thoff;
  448. return 0;
  449. }
  450. static int nft_payload_csum_sctp(struct sk_buff *skb, int offset)
  451. {
  452. struct sctphdr *sh;
  453. if (skb_ensure_writable(skb, offset + sizeof(*sh)))
  454. return -1;
  455. sh = (struct sctphdr *)(skb->data + offset);
  456. sh->checksum = sctp_compute_cksum(skb, offset);
  457. skb->ip_summed = CHECKSUM_UNNECESSARY;
  458. return 0;
  459. }
  460. static int nft_payload_l4csum_update(const struct nft_pktinfo *pkt,
  461. struct sk_buff *skb,
  462. __wsum fsum, __wsum tsum)
  463. {
  464. int l4csum_offset;
  465. __sum16 sum;
  466. /* If we cannot determine layer 4 checksum offset or this packet doesn't
  467. * require layer 4 checksum recalculation, skip this packet.
  468. */
  469. if (nft_payload_l4csum_offset(pkt, skb, &l4csum_offset) < 0)
  470. return 0;
  471. if (skb_copy_bits(skb, l4csum_offset, &sum, sizeof(sum)) < 0)
  472. return -1;
  473. /* Checksum mangling for an arbitrary amount of bytes, based on
  474. * inet_proto_csum_replace*() functions.
  475. */
  476. if (skb->ip_summed != CHECKSUM_PARTIAL) {
  477. nft_csum_replace(&sum, fsum, tsum);
  478. if (skb->ip_summed == CHECKSUM_COMPLETE) {
  479. skb->csum = ~csum_add(csum_sub(~(skb->csum), fsum),
  480. tsum);
  481. }
  482. } else {
  483. sum = ~csum_fold(csum_add(csum_sub(csum_unfold(sum), fsum),
  484. tsum));
  485. }
  486. if (skb_ensure_writable(skb, l4csum_offset + sizeof(sum)) ||
  487. skb_store_bits(skb, l4csum_offset, &sum, sizeof(sum)) < 0)
  488. return -1;
  489. return 0;
  490. }
  491. static int nft_payload_csum_inet(struct sk_buff *skb, const u32 *src,
  492. __wsum fsum, __wsum tsum, int csum_offset)
  493. {
  494. __sum16 sum;
  495. if (skb_copy_bits(skb, csum_offset, &sum, sizeof(sum)) < 0)
  496. return -1;
  497. nft_csum_replace(&sum, fsum, tsum);
  498. if (skb_ensure_writable(skb, csum_offset + sizeof(sum)) ||
  499. skb_store_bits(skb, csum_offset, &sum, sizeof(sum)) < 0)
  500. return -1;
  501. return 0;
  502. }
  503. static void nft_payload_set_eval(const struct nft_expr *expr,
  504. struct nft_regs *regs,
  505. const struct nft_pktinfo *pkt)
  506. {
  507. const struct nft_payload_set *priv = nft_expr_priv(expr);
  508. struct sk_buff *skb = pkt->skb;
  509. const u32 *src = &regs->data[priv->sreg];
  510. int offset, csum_offset;
  511. __wsum fsum, tsum;
  512. switch (priv->base) {
  513. case NFT_PAYLOAD_LL_HEADER:
  514. if (!skb_mac_header_was_set(skb))
  515. goto err;
  516. offset = skb_mac_header(skb) - skb->data;
  517. break;
  518. case NFT_PAYLOAD_NETWORK_HEADER:
  519. offset = skb_network_offset(skb);
  520. break;
  521. case NFT_PAYLOAD_TRANSPORT_HEADER:
  522. if (!pkt->tprot_set)
  523. goto err;
  524. offset = pkt->xt.thoff;
  525. break;
  526. default:
  527. BUG();
  528. }
  529. csum_offset = offset + priv->csum_offset;
  530. offset += priv->offset;
  531. if ((priv->csum_type == NFT_PAYLOAD_CSUM_INET || priv->csum_flags) &&
  532. (priv->base != NFT_PAYLOAD_TRANSPORT_HEADER ||
  533. skb->ip_summed != CHECKSUM_PARTIAL)) {
  534. fsum = skb_checksum(skb, offset, priv->len, 0);
  535. tsum = csum_partial(src, priv->len, 0);
  536. if (priv->csum_type == NFT_PAYLOAD_CSUM_INET &&
  537. nft_payload_csum_inet(skb, src, fsum, tsum, csum_offset))
  538. goto err;
  539. if (priv->csum_flags &&
  540. nft_payload_l4csum_update(pkt, skb, fsum, tsum) < 0)
  541. goto err;
  542. }
  543. if (skb_ensure_writable(skb, max(offset + priv->len, 0)) ||
  544. skb_store_bits(skb, offset, src, priv->len) < 0)
  545. goto err;
  546. if (priv->csum_type == NFT_PAYLOAD_CSUM_SCTP &&
  547. pkt->tprot == IPPROTO_SCTP &&
  548. skb->ip_summed != CHECKSUM_PARTIAL) {
  549. if (nft_payload_csum_sctp(skb, pkt->xt.thoff))
  550. goto err;
  551. }
  552. return;
  553. err:
  554. regs->verdict.code = NFT_BREAK;
  555. }
  556. static int nft_payload_set_init(const struct nft_ctx *ctx,
  557. const struct nft_expr *expr,
  558. const struct nlattr * const tb[])
  559. {
  560. struct nft_payload_set *priv = nft_expr_priv(expr);
  561. priv->base = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_BASE]));
  562. priv->offset = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_OFFSET]));
  563. priv->len = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_LEN]));
  564. priv->sreg = nft_parse_register(tb[NFTA_PAYLOAD_SREG]);
  565. if (tb[NFTA_PAYLOAD_CSUM_TYPE])
  566. priv->csum_type =
  567. ntohl(nla_get_be32(tb[NFTA_PAYLOAD_CSUM_TYPE]));
  568. if (tb[NFTA_PAYLOAD_CSUM_OFFSET])
  569. priv->csum_offset =
  570. ntohl(nla_get_be32(tb[NFTA_PAYLOAD_CSUM_OFFSET]));
  571. if (tb[NFTA_PAYLOAD_CSUM_FLAGS]) {
  572. u32 flags;
  573. flags = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_CSUM_FLAGS]));
  574. if (flags & ~NFT_PAYLOAD_L4CSUM_PSEUDOHDR)
  575. return -EINVAL;
  576. priv->csum_flags = flags;
  577. }
  578. switch (priv->csum_type) {
  579. case NFT_PAYLOAD_CSUM_NONE:
  580. case NFT_PAYLOAD_CSUM_INET:
  581. break;
  582. case NFT_PAYLOAD_CSUM_SCTP:
  583. if (priv->base != NFT_PAYLOAD_TRANSPORT_HEADER)
  584. return -EINVAL;
  585. if (priv->csum_offset != offsetof(struct sctphdr, checksum))
  586. return -EINVAL;
  587. break;
  588. default:
  589. return -EOPNOTSUPP;
  590. }
  591. return nft_validate_register_load(priv->sreg, priv->len);
  592. }
  593. static int nft_payload_set_dump(struct sk_buff *skb, const struct nft_expr *expr)
  594. {
  595. const struct nft_payload_set *priv = nft_expr_priv(expr);
  596. if (nft_dump_register(skb, NFTA_PAYLOAD_SREG, priv->sreg) ||
  597. nla_put_be32(skb, NFTA_PAYLOAD_BASE, htonl(priv->base)) ||
  598. nla_put_be32(skb, NFTA_PAYLOAD_OFFSET, htonl(priv->offset)) ||
  599. nla_put_be32(skb, NFTA_PAYLOAD_LEN, htonl(priv->len)) ||
  600. nla_put_be32(skb, NFTA_PAYLOAD_CSUM_TYPE, htonl(priv->csum_type)) ||
  601. nla_put_be32(skb, NFTA_PAYLOAD_CSUM_OFFSET,
  602. htonl(priv->csum_offset)) ||
  603. nla_put_be32(skb, NFTA_PAYLOAD_CSUM_FLAGS, htonl(priv->csum_flags)))
  604. goto nla_put_failure;
  605. return 0;
  606. nla_put_failure:
  607. return -1;
  608. }
  609. static const struct nft_expr_ops nft_payload_set_ops = {
  610. .type = &nft_payload_type,
  611. .size = NFT_EXPR_SIZE(sizeof(struct nft_payload_set)),
  612. .eval = nft_payload_set_eval,
  613. .init = nft_payload_set_init,
  614. .dump = nft_payload_set_dump,
  615. };
  616. static const struct nft_expr_ops *
  617. nft_payload_select_ops(const struct nft_ctx *ctx,
  618. const struct nlattr * const tb[])
  619. {
  620. enum nft_payload_bases base;
  621. unsigned int offset, len;
  622. if (tb[NFTA_PAYLOAD_BASE] == NULL ||
  623. tb[NFTA_PAYLOAD_OFFSET] == NULL ||
  624. tb[NFTA_PAYLOAD_LEN] == NULL)
  625. return ERR_PTR(-EINVAL);
  626. base = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_BASE]));
  627. switch (base) {
  628. case NFT_PAYLOAD_LL_HEADER:
  629. case NFT_PAYLOAD_NETWORK_HEADER:
  630. case NFT_PAYLOAD_TRANSPORT_HEADER:
  631. break;
  632. default:
  633. return ERR_PTR(-EOPNOTSUPP);
  634. }
  635. if (tb[NFTA_PAYLOAD_SREG] != NULL) {
  636. if (tb[NFTA_PAYLOAD_DREG] != NULL)
  637. return ERR_PTR(-EINVAL);
  638. return &nft_payload_set_ops;
  639. }
  640. if (tb[NFTA_PAYLOAD_DREG] == NULL)
  641. return ERR_PTR(-EINVAL);
  642. offset = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_OFFSET]));
  643. len = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_LEN]));
  644. if (len <= 4 && is_power_of_2(len) && IS_ALIGNED(offset, len) &&
  645. base != NFT_PAYLOAD_LL_HEADER)
  646. return &nft_payload_fast_ops;
  647. else
  648. return &nft_payload_ops;
  649. }
  650. struct nft_expr_type nft_payload_type __read_mostly = {
  651. .name = "payload",
  652. .select_ops = nft_payload_select_ops,
  653. .policy = nft_payload_policy,
  654. .maxattr = NFTA_PAYLOAD_MAX,
  655. .owner = THIS_MODULE,
  656. };