nft_cmp.c 7.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2008-2009 Patrick McHardy <kaber@trash.net>
  4. *
  5. * Development of this code funded by Astaro AG (http://www.astaro.com/)
  6. */
  7. #include <linux/kernel.h>
  8. #include <linux/init.h>
  9. #include <linux/module.h>
  10. #include <linux/netlink.h>
  11. #include <linux/netfilter.h>
  12. #include <linux/if_arp.h>
  13. #include <linux/netfilter/nf_tables.h>
  14. #include <net/netfilter/nf_tables_core.h>
  15. #include <net/netfilter/nf_tables_offload.h>
  16. #include <net/netfilter/nf_tables.h>
  17. struct nft_cmp_expr {
  18. struct nft_data data;
  19. enum nft_registers sreg:8;
  20. u8 len;
  21. enum nft_cmp_ops op:8;
  22. };
  23. void nft_cmp_eval(const struct nft_expr *expr,
  24. struct nft_regs *regs,
  25. const struct nft_pktinfo *pkt)
  26. {
  27. const struct nft_cmp_expr *priv = nft_expr_priv(expr);
  28. int d;
  29. d = memcmp(&regs->data[priv->sreg], &priv->data, priv->len);
  30. switch (priv->op) {
  31. case NFT_CMP_EQ:
  32. if (d != 0)
  33. goto mismatch;
  34. break;
  35. case NFT_CMP_NEQ:
  36. if (d == 0)
  37. goto mismatch;
  38. break;
  39. case NFT_CMP_LT:
  40. if (d == 0)
  41. goto mismatch;
  42. fallthrough;
  43. case NFT_CMP_LTE:
  44. if (d > 0)
  45. goto mismatch;
  46. break;
  47. case NFT_CMP_GT:
  48. if (d == 0)
  49. goto mismatch;
  50. fallthrough;
  51. case NFT_CMP_GTE:
  52. if (d < 0)
  53. goto mismatch;
  54. break;
  55. }
  56. return;
  57. mismatch:
  58. regs->verdict.code = NFT_BREAK;
  59. }
  60. static const struct nla_policy nft_cmp_policy[NFTA_CMP_MAX + 1] = {
  61. [NFTA_CMP_SREG] = { .type = NLA_U32 },
  62. [NFTA_CMP_OP] = { .type = NLA_U32 },
  63. [NFTA_CMP_DATA] = { .type = NLA_NESTED },
  64. };
  65. static int nft_cmp_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
  66. const struct nlattr * const tb[])
  67. {
  68. struct nft_cmp_expr *priv = nft_expr_priv(expr);
  69. struct nft_data_desc desc;
  70. int err;
  71. err = nft_data_init(NULL, &priv->data, sizeof(priv->data), &desc,
  72. tb[NFTA_CMP_DATA]);
  73. if (err < 0)
  74. return err;
  75. if (desc.type != NFT_DATA_VALUE) {
  76. err = -EINVAL;
  77. nft_data_release(&priv->data, desc.type);
  78. return err;
  79. }
  80. priv->sreg = nft_parse_register(tb[NFTA_CMP_SREG]);
  81. err = nft_validate_register_load(priv->sreg, desc.len);
  82. if (err < 0)
  83. return err;
  84. priv->op = ntohl(nla_get_be32(tb[NFTA_CMP_OP]));
  85. priv->len = desc.len;
  86. return 0;
  87. }
  88. static int nft_cmp_dump(struct sk_buff *skb, const struct nft_expr *expr)
  89. {
  90. const struct nft_cmp_expr *priv = nft_expr_priv(expr);
  91. if (nft_dump_register(skb, NFTA_CMP_SREG, priv->sreg))
  92. goto nla_put_failure;
  93. if (nla_put_be32(skb, NFTA_CMP_OP, htonl(priv->op)))
  94. goto nla_put_failure;
  95. if (nft_data_dump(skb, NFTA_CMP_DATA, &priv->data,
  96. NFT_DATA_VALUE, priv->len) < 0)
  97. goto nla_put_failure;
  98. return 0;
  99. nla_put_failure:
  100. return -1;
  101. }
  102. union nft_cmp_offload_data {
  103. u16 val16;
  104. u32 val32;
  105. u64 val64;
  106. };
  107. static void nft_payload_n2h(union nft_cmp_offload_data *data,
  108. const u8 *val, u32 len)
  109. {
  110. switch (len) {
  111. case 2:
  112. data->val16 = ntohs(*((u16 *)val));
  113. break;
  114. case 4:
  115. data->val32 = ntohl(*((u32 *)val));
  116. break;
  117. case 8:
  118. data->val64 = be64_to_cpu(*((u64 *)val));
  119. break;
  120. default:
  121. WARN_ON_ONCE(1);
  122. break;
  123. }
  124. }
  125. static int __nft_cmp_offload(struct nft_offload_ctx *ctx,
  126. struct nft_flow_rule *flow,
  127. const struct nft_cmp_expr *priv)
  128. {
  129. struct nft_offload_reg *reg = &ctx->regs[priv->sreg];
  130. union nft_cmp_offload_data _data, _datamask;
  131. u8 *mask = (u8 *)&flow->match.mask;
  132. u8 *key = (u8 *)&flow->match.key;
  133. u8 *data, *datamask;
  134. if (priv->op != NFT_CMP_EQ || priv->len > reg->len)
  135. return -EOPNOTSUPP;
  136. if (reg->flags & NFT_OFFLOAD_F_NETWORK2HOST) {
  137. nft_payload_n2h(&_data, (u8 *)&priv->data, reg->len);
  138. nft_payload_n2h(&_datamask, (u8 *)&reg->mask, reg->len);
  139. data = (u8 *)&_data;
  140. datamask = (u8 *)&_datamask;
  141. } else {
  142. data = (u8 *)&priv->data;
  143. datamask = (u8 *)&reg->mask;
  144. }
  145. memcpy(key + reg->offset, data, reg->len);
  146. memcpy(mask + reg->offset, datamask, reg->len);
  147. flow->match.dissector.used_keys |= BIT(reg->key);
  148. flow->match.dissector.offset[reg->key] = reg->base_offset;
  149. if (reg->key == FLOW_DISSECTOR_KEY_META &&
  150. reg->offset == offsetof(struct nft_flow_key, meta.ingress_iftype) &&
  151. nft_reg_load16(priv->data.data) != ARPHRD_ETHER)
  152. return -EOPNOTSUPP;
  153. nft_offload_update_dependency(ctx, &priv->data, reg->len);
  154. return 0;
  155. }
  156. static int nft_cmp_offload(struct nft_offload_ctx *ctx,
  157. struct nft_flow_rule *flow,
  158. const struct nft_expr *expr)
  159. {
  160. const struct nft_cmp_expr *priv = nft_expr_priv(expr);
  161. return __nft_cmp_offload(ctx, flow, priv);
  162. }
  163. static const struct nft_expr_ops nft_cmp_ops = {
  164. .type = &nft_cmp_type,
  165. .size = NFT_EXPR_SIZE(sizeof(struct nft_cmp_expr)),
  166. .eval = nft_cmp_eval,
  167. .init = nft_cmp_init,
  168. .dump = nft_cmp_dump,
  169. .offload = nft_cmp_offload,
  170. };
  171. static int nft_cmp_fast_init(const struct nft_ctx *ctx,
  172. const struct nft_expr *expr,
  173. const struct nlattr * const tb[])
  174. {
  175. struct nft_cmp_fast_expr *priv = nft_expr_priv(expr);
  176. struct nft_data_desc desc;
  177. struct nft_data data;
  178. int err;
  179. err = nft_data_init(NULL, &data, sizeof(data), &desc,
  180. tb[NFTA_CMP_DATA]);
  181. if (err < 0)
  182. return err;
  183. priv->sreg = nft_parse_register(tb[NFTA_CMP_SREG]);
  184. err = nft_validate_register_load(priv->sreg, desc.len);
  185. if (err < 0)
  186. return err;
  187. desc.len *= BITS_PER_BYTE;
  188. priv->mask = nft_cmp_fast_mask(desc.len);
  189. priv->data = data.data[0] & priv->mask;
  190. priv->len = desc.len;
  191. priv->inv = ntohl(nla_get_be32(tb[NFTA_CMP_OP])) != NFT_CMP_EQ;
  192. return 0;
  193. }
  194. static int nft_cmp_fast_offload(struct nft_offload_ctx *ctx,
  195. struct nft_flow_rule *flow,
  196. const struct nft_expr *expr)
  197. {
  198. const struct nft_cmp_fast_expr *priv = nft_expr_priv(expr);
  199. struct nft_cmp_expr cmp = {
  200. .data = {
  201. .data = {
  202. [0] = priv->data,
  203. },
  204. },
  205. .sreg = priv->sreg,
  206. .len = priv->len / BITS_PER_BYTE,
  207. .op = priv->inv ? NFT_CMP_NEQ : NFT_CMP_EQ,
  208. };
  209. return __nft_cmp_offload(ctx, flow, &cmp);
  210. }
  211. static int nft_cmp_fast_dump(struct sk_buff *skb, const struct nft_expr *expr)
  212. {
  213. const struct nft_cmp_fast_expr *priv = nft_expr_priv(expr);
  214. enum nft_cmp_ops op = priv->inv ? NFT_CMP_NEQ : NFT_CMP_EQ;
  215. struct nft_data data;
  216. if (nft_dump_register(skb, NFTA_CMP_SREG, priv->sreg))
  217. goto nla_put_failure;
  218. if (nla_put_be32(skb, NFTA_CMP_OP, htonl(op)))
  219. goto nla_put_failure;
  220. data.data[0] = priv->data;
  221. if (nft_data_dump(skb, NFTA_CMP_DATA, &data,
  222. NFT_DATA_VALUE, priv->len / BITS_PER_BYTE) < 0)
  223. goto nla_put_failure;
  224. return 0;
  225. nla_put_failure:
  226. return -1;
  227. }
  228. const struct nft_expr_ops nft_cmp_fast_ops = {
  229. .type = &nft_cmp_type,
  230. .size = NFT_EXPR_SIZE(sizeof(struct nft_cmp_fast_expr)),
  231. .eval = NULL, /* inlined */
  232. .init = nft_cmp_fast_init,
  233. .dump = nft_cmp_fast_dump,
  234. .offload = nft_cmp_fast_offload,
  235. };
  236. static const struct nft_expr_ops *
  237. nft_cmp_select_ops(const struct nft_ctx *ctx, const struct nlattr * const tb[])
  238. {
  239. struct nft_data_desc desc;
  240. struct nft_data data;
  241. enum nft_cmp_ops op;
  242. int err;
  243. if (tb[NFTA_CMP_SREG] == NULL ||
  244. tb[NFTA_CMP_OP] == NULL ||
  245. tb[NFTA_CMP_DATA] == NULL)
  246. return ERR_PTR(-EINVAL);
  247. op = ntohl(nla_get_be32(tb[NFTA_CMP_OP]));
  248. switch (op) {
  249. case NFT_CMP_EQ:
  250. case NFT_CMP_NEQ:
  251. case NFT_CMP_LT:
  252. case NFT_CMP_LTE:
  253. case NFT_CMP_GT:
  254. case NFT_CMP_GTE:
  255. break;
  256. default:
  257. return ERR_PTR(-EINVAL);
  258. }
  259. err = nft_data_init(NULL, &data, sizeof(data), &desc,
  260. tb[NFTA_CMP_DATA]);
  261. if (err < 0)
  262. return ERR_PTR(err);
  263. if (desc.type != NFT_DATA_VALUE) {
  264. err = -EINVAL;
  265. goto err1;
  266. }
  267. if (desc.len <= sizeof(u32) && (op == NFT_CMP_EQ || op == NFT_CMP_NEQ))
  268. return &nft_cmp_fast_ops;
  269. return &nft_cmp_ops;
  270. err1:
  271. nft_data_release(&data, desc.type);
  272. return ERR_PTR(-EINVAL);
  273. }
  274. struct nft_expr_type nft_cmp_type __read_mostly = {
  275. .name = "cmp",
  276. .select_ops = nft_cmp_select_ops,
  277. .policy = nft_cmp_policy,
  278. .maxattr = NFTA_CMP_MAX,
  279. .owner = THIS_MODULE,
  280. };