nft_tunnel.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. #include <linux/kernel.h>
  3. #include <linux/init.h>
  4. #include <linux/module.h>
  5. #include <linux/seqlock.h>
  6. #include <linux/netlink.h>
  7. #include <linux/netfilter.h>
  8. #include <linux/netfilter/nf_tables.h>
  9. #include <net/netfilter/nf_tables.h>
  10. #include <net/dst_metadata.h>
  11. #include <net/ip_tunnels.h>
  12. #include <net/vxlan.h>
  13. #include <net/erspan.h>
  14. #include <net/geneve.h>
  15. struct nft_tunnel {
  16. enum nft_tunnel_keys key:8;
  17. enum nft_registers dreg:8;
  18. enum nft_tunnel_mode mode:8;
  19. };
  20. static void nft_tunnel_get_eval(const struct nft_expr *expr,
  21. struct nft_regs *regs,
  22. const struct nft_pktinfo *pkt)
  23. {
  24. const struct nft_tunnel *priv = nft_expr_priv(expr);
  25. u32 *dest = &regs->data[priv->dreg];
  26. struct ip_tunnel_info *tun_info;
  27. tun_info = skb_tunnel_info(pkt->skb);
  28. switch (priv->key) {
  29. case NFT_TUNNEL_PATH:
  30. if (!tun_info) {
  31. nft_reg_store8(dest, false);
  32. return;
  33. }
  34. if (priv->mode == NFT_TUNNEL_MODE_NONE ||
  35. (priv->mode == NFT_TUNNEL_MODE_RX &&
  36. !(tun_info->mode & IP_TUNNEL_INFO_TX)) ||
  37. (priv->mode == NFT_TUNNEL_MODE_TX &&
  38. (tun_info->mode & IP_TUNNEL_INFO_TX)))
  39. nft_reg_store8(dest, true);
  40. else
  41. nft_reg_store8(dest, false);
  42. break;
  43. case NFT_TUNNEL_ID:
  44. if (!tun_info) {
  45. regs->verdict.code = NFT_BREAK;
  46. return;
  47. }
  48. if (priv->mode == NFT_TUNNEL_MODE_NONE ||
  49. (priv->mode == NFT_TUNNEL_MODE_RX &&
  50. !(tun_info->mode & IP_TUNNEL_INFO_TX)) ||
  51. (priv->mode == NFT_TUNNEL_MODE_TX &&
  52. (tun_info->mode & IP_TUNNEL_INFO_TX)))
  53. *dest = ntohl(tunnel_id_to_key32(tun_info->key.tun_id));
  54. else
  55. regs->verdict.code = NFT_BREAK;
  56. break;
  57. default:
  58. WARN_ON(1);
  59. regs->verdict.code = NFT_BREAK;
  60. }
  61. }
  62. static const struct nla_policy nft_tunnel_policy[NFTA_TUNNEL_MAX + 1] = {
  63. [NFTA_TUNNEL_KEY] = { .type = NLA_U32 },
  64. [NFTA_TUNNEL_DREG] = { .type = NLA_U32 },
  65. [NFTA_TUNNEL_MODE] = { .type = NLA_U32 },
  66. };
  67. static int nft_tunnel_get_init(const struct nft_ctx *ctx,
  68. const struct nft_expr *expr,
  69. const struct nlattr * const tb[])
  70. {
  71. struct nft_tunnel *priv = nft_expr_priv(expr);
  72. u32 len;
  73. if (!tb[NFTA_TUNNEL_KEY] ||
  74. !tb[NFTA_TUNNEL_DREG])
  75. return -EINVAL;
  76. priv->key = ntohl(nla_get_be32(tb[NFTA_TUNNEL_KEY]));
  77. switch (priv->key) {
  78. case NFT_TUNNEL_PATH:
  79. len = sizeof(u8);
  80. break;
  81. case NFT_TUNNEL_ID:
  82. len = sizeof(u32);
  83. break;
  84. default:
  85. return -EOPNOTSUPP;
  86. }
  87. priv->dreg = nft_parse_register(tb[NFTA_TUNNEL_DREG]);
  88. if (tb[NFTA_TUNNEL_MODE]) {
  89. priv->mode = ntohl(nla_get_be32(tb[NFTA_TUNNEL_MODE]));
  90. if (priv->mode > NFT_TUNNEL_MODE_MAX)
  91. return -EOPNOTSUPP;
  92. } else {
  93. priv->mode = NFT_TUNNEL_MODE_NONE;
  94. }
  95. return nft_validate_register_store(ctx, priv->dreg, NULL,
  96. NFT_DATA_VALUE, len);
  97. }
  98. static int nft_tunnel_get_dump(struct sk_buff *skb,
  99. const struct nft_expr *expr)
  100. {
  101. const struct nft_tunnel *priv = nft_expr_priv(expr);
  102. if (nla_put_be32(skb, NFTA_TUNNEL_KEY, htonl(priv->key)))
  103. goto nla_put_failure;
  104. if (nft_dump_register(skb, NFTA_TUNNEL_DREG, priv->dreg))
  105. goto nla_put_failure;
  106. if (nla_put_be32(skb, NFTA_TUNNEL_MODE, htonl(priv->mode)))
  107. goto nla_put_failure;
  108. return 0;
  109. nla_put_failure:
  110. return -1;
  111. }
  112. static struct nft_expr_type nft_tunnel_type;
  113. static const struct nft_expr_ops nft_tunnel_get_ops = {
  114. .type = &nft_tunnel_type,
  115. .size = NFT_EXPR_SIZE(sizeof(struct nft_tunnel)),
  116. .eval = nft_tunnel_get_eval,
  117. .init = nft_tunnel_get_init,
  118. .dump = nft_tunnel_get_dump,
  119. };
  120. static struct nft_expr_type nft_tunnel_type __read_mostly = {
  121. .name = "tunnel",
  122. .ops = &nft_tunnel_get_ops,
  123. .policy = nft_tunnel_policy,
  124. .maxattr = NFTA_TUNNEL_MAX,
  125. .owner = THIS_MODULE,
  126. };
  127. struct nft_tunnel_opts {
  128. union {
  129. struct vxlan_metadata vxlan;
  130. struct erspan_metadata erspan;
  131. u8 data[IP_TUNNEL_OPTS_MAX];
  132. } u;
  133. u32 len;
  134. __be16 flags;
  135. };
  136. struct nft_tunnel_obj {
  137. struct metadata_dst *md;
  138. struct nft_tunnel_opts opts;
  139. };
  140. static const struct nla_policy nft_tunnel_ip_policy[NFTA_TUNNEL_KEY_IP_MAX + 1] = {
  141. [NFTA_TUNNEL_KEY_IP_SRC] = { .type = NLA_U32 },
  142. [NFTA_TUNNEL_KEY_IP_DST] = { .type = NLA_U32 },
  143. };
  144. static int nft_tunnel_obj_ip_init(const struct nft_ctx *ctx,
  145. const struct nlattr *attr,
  146. struct ip_tunnel_info *info)
  147. {
  148. struct nlattr *tb[NFTA_TUNNEL_KEY_IP_MAX + 1];
  149. int err;
  150. err = nla_parse_nested_deprecated(tb, NFTA_TUNNEL_KEY_IP_MAX, attr,
  151. nft_tunnel_ip_policy, NULL);
  152. if (err < 0)
  153. return err;
  154. if (!tb[NFTA_TUNNEL_KEY_IP_DST])
  155. return -EINVAL;
  156. if (tb[NFTA_TUNNEL_KEY_IP_SRC])
  157. info->key.u.ipv4.src = nla_get_be32(tb[NFTA_TUNNEL_KEY_IP_SRC]);
  158. if (tb[NFTA_TUNNEL_KEY_IP_DST])
  159. info->key.u.ipv4.dst = nla_get_be32(tb[NFTA_TUNNEL_KEY_IP_DST]);
  160. return 0;
  161. }
  162. static const struct nla_policy nft_tunnel_ip6_policy[NFTA_TUNNEL_KEY_IP6_MAX + 1] = {
  163. [NFTA_TUNNEL_KEY_IP6_SRC] = { .len = sizeof(struct in6_addr), },
  164. [NFTA_TUNNEL_KEY_IP6_DST] = { .len = sizeof(struct in6_addr), },
  165. [NFTA_TUNNEL_KEY_IP6_FLOWLABEL] = { .type = NLA_U32, }
  166. };
  167. static int nft_tunnel_obj_ip6_init(const struct nft_ctx *ctx,
  168. const struct nlattr *attr,
  169. struct ip_tunnel_info *info)
  170. {
  171. struct nlattr *tb[NFTA_TUNNEL_KEY_IP6_MAX + 1];
  172. int err;
  173. err = nla_parse_nested_deprecated(tb, NFTA_TUNNEL_KEY_IP6_MAX, attr,
  174. nft_tunnel_ip6_policy, NULL);
  175. if (err < 0)
  176. return err;
  177. if (!tb[NFTA_TUNNEL_KEY_IP6_DST])
  178. return -EINVAL;
  179. if (tb[NFTA_TUNNEL_KEY_IP6_SRC]) {
  180. memcpy(&info->key.u.ipv6.src,
  181. nla_data(tb[NFTA_TUNNEL_KEY_IP6_SRC]),
  182. sizeof(struct in6_addr));
  183. }
  184. if (tb[NFTA_TUNNEL_KEY_IP6_DST]) {
  185. memcpy(&info->key.u.ipv6.dst,
  186. nla_data(tb[NFTA_TUNNEL_KEY_IP6_DST]),
  187. sizeof(struct in6_addr));
  188. }
  189. if (tb[NFTA_TUNNEL_KEY_IP6_FLOWLABEL])
  190. info->key.label = nla_get_be32(tb[NFTA_TUNNEL_KEY_IP6_FLOWLABEL]);
  191. info->mode |= IP_TUNNEL_INFO_IPV6;
  192. return 0;
  193. }
  194. static const struct nla_policy nft_tunnel_opts_vxlan_policy[NFTA_TUNNEL_KEY_VXLAN_MAX + 1] = {
  195. [NFTA_TUNNEL_KEY_VXLAN_GBP] = { .type = NLA_U32 },
  196. };
  197. static int nft_tunnel_obj_vxlan_init(const struct nlattr *attr,
  198. struct nft_tunnel_opts *opts)
  199. {
  200. struct nlattr *tb[NFTA_TUNNEL_KEY_VXLAN_MAX + 1];
  201. int err;
  202. err = nla_parse_nested_deprecated(tb, NFTA_TUNNEL_KEY_VXLAN_MAX, attr,
  203. nft_tunnel_opts_vxlan_policy, NULL);
  204. if (err < 0)
  205. return err;
  206. if (!tb[NFTA_TUNNEL_KEY_VXLAN_GBP])
  207. return -EINVAL;
  208. opts->u.vxlan.gbp = ntohl(nla_get_be32(tb[NFTA_TUNNEL_KEY_VXLAN_GBP]));
  209. opts->len = sizeof(struct vxlan_metadata);
  210. opts->flags = TUNNEL_VXLAN_OPT;
  211. return 0;
  212. }
  213. static const struct nla_policy nft_tunnel_opts_erspan_policy[NFTA_TUNNEL_KEY_ERSPAN_MAX + 1] = {
  214. [NFTA_TUNNEL_KEY_ERSPAN_VERSION] = { .type = NLA_U32 },
  215. [NFTA_TUNNEL_KEY_ERSPAN_V1_INDEX] = { .type = NLA_U32 },
  216. [NFTA_TUNNEL_KEY_ERSPAN_V2_DIR] = { .type = NLA_U8 },
  217. [NFTA_TUNNEL_KEY_ERSPAN_V2_HWID] = { .type = NLA_U8 },
  218. };
  219. static int nft_tunnel_obj_erspan_init(const struct nlattr *attr,
  220. struct nft_tunnel_opts *opts)
  221. {
  222. struct nlattr *tb[NFTA_TUNNEL_KEY_ERSPAN_MAX + 1];
  223. uint8_t hwid, dir;
  224. int err, version;
  225. err = nla_parse_nested_deprecated(tb, NFTA_TUNNEL_KEY_ERSPAN_MAX,
  226. attr, nft_tunnel_opts_erspan_policy,
  227. NULL);
  228. if (err < 0)
  229. return err;
  230. if (!tb[NFTA_TUNNEL_KEY_ERSPAN_VERSION])
  231. return -EINVAL;
  232. version = ntohl(nla_get_be32(tb[NFTA_TUNNEL_KEY_ERSPAN_VERSION]));
  233. switch (version) {
  234. case ERSPAN_VERSION:
  235. if (!tb[NFTA_TUNNEL_KEY_ERSPAN_V1_INDEX])
  236. return -EINVAL;
  237. opts->u.erspan.u.index =
  238. nla_get_be32(tb[NFTA_TUNNEL_KEY_ERSPAN_V1_INDEX]);
  239. break;
  240. case ERSPAN_VERSION2:
  241. if (!tb[NFTA_TUNNEL_KEY_ERSPAN_V2_DIR] ||
  242. !tb[NFTA_TUNNEL_KEY_ERSPAN_V2_HWID])
  243. return -EINVAL;
  244. hwid = nla_get_u8(tb[NFTA_TUNNEL_KEY_ERSPAN_V2_HWID]);
  245. dir = nla_get_u8(tb[NFTA_TUNNEL_KEY_ERSPAN_V2_DIR]);
  246. set_hwid(&opts->u.erspan.u.md2, hwid);
  247. opts->u.erspan.u.md2.dir = dir;
  248. break;
  249. default:
  250. return -EOPNOTSUPP;
  251. }
  252. opts->u.erspan.version = version;
  253. opts->len = sizeof(struct erspan_metadata);
  254. opts->flags = TUNNEL_ERSPAN_OPT;
  255. return 0;
  256. }
  257. static const struct nla_policy nft_tunnel_opts_geneve_policy[NFTA_TUNNEL_KEY_GENEVE_MAX + 1] = {
  258. [NFTA_TUNNEL_KEY_GENEVE_CLASS] = { .type = NLA_U16 },
  259. [NFTA_TUNNEL_KEY_GENEVE_TYPE] = { .type = NLA_U8 },
  260. [NFTA_TUNNEL_KEY_GENEVE_DATA] = { .type = NLA_BINARY, .len = 128 },
  261. };
  262. static int nft_tunnel_obj_geneve_init(const struct nlattr *attr,
  263. struct nft_tunnel_opts *opts)
  264. {
  265. struct geneve_opt *opt = (struct geneve_opt *)opts->u.data + opts->len;
  266. struct nlattr *tb[NFTA_TUNNEL_KEY_GENEVE_MAX + 1];
  267. int err, data_len;
  268. err = nla_parse_nested(tb, NFTA_TUNNEL_KEY_GENEVE_MAX, attr,
  269. nft_tunnel_opts_geneve_policy, NULL);
  270. if (err < 0)
  271. return err;
  272. if (!tb[NFTA_TUNNEL_KEY_GENEVE_CLASS] ||
  273. !tb[NFTA_TUNNEL_KEY_GENEVE_TYPE] ||
  274. !tb[NFTA_TUNNEL_KEY_GENEVE_DATA])
  275. return -EINVAL;
  276. attr = tb[NFTA_TUNNEL_KEY_GENEVE_DATA];
  277. data_len = nla_len(attr);
  278. if (data_len % 4)
  279. return -EINVAL;
  280. opts->len += sizeof(*opt) + data_len;
  281. if (opts->len > IP_TUNNEL_OPTS_MAX)
  282. return -EINVAL;
  283. memcpy(opt->opt_data, nla_data(attr), data_len);
  284. opt->length = data_len / 4;
  285. opt->opt_class = nla_get_be16(tb[NFTA_TUNNEL_KEY_GENEVE_CLASS]);
  286. opt->type = nla_get_u8(tb[NFTA_TUNNEL_KEY_GENEVE_TYPE]);
  287. opts->flags = TUNNEL_GENEVE_OPT;
  288. return 0;
  289. }
  290. static const struct nla_policy nft_tunnel_opts_policy[NFTA_TUNNEL_KEY_OPTS_MAX + 1] = {
  291. [NFTA_TUNNEL_KEY_OPTS_UNSPEC] = {
  292. .strict_start_type = NFTA_TUNNEL_KEY_OPTS_GENEVE },
  293. [NFTA_TUNNEL_KEY_OPTS_VXLAN] = { .type = NLA_NESTED, },
  294. [NFTA_TUNNEL_KEY_OPTS_ERSPAN] = { .type = NLA_NESTED, },
  295. [NFTA_TUNNEL_KEY_OPTS_GENEVE] = { .type = NLA_NESTED, },
  296. };
  297. static int nft_tunnel_obj_opts_init(const struct nft_ctx *ctx,
  298. const struct nlattr *attr,
  299. struct ip_tunnel_info *info,
  300. struct nft_tunnel_opts *opts)
  301. {
  302. int err, rem, type = 0;
  303. struct nlattr *nla;
  304. err = nla_validate_nested_deprecated(attr, NFTA_TUNNEL_KEY_OPTS_MAX,
  305. nft_tunnel_opts_policy, NULL);
  306. if (err < 0)
  307. return err;
  308. nla_for_each_attr(nla, nla_data(attr), nla_len(attr), rem) {
  309. switch (nla_type(nla)) {
  310. case NFTA_TUNNEL_KEY_OPTS_VXLAN:
  311. if (type)
  312. return -EINVAL;
  313. err = nft_tunnel_obj_vxlan_init(nla, opts);
  314. if (err)
  315. return err;
  316. type = TUNNEL_VXLAN_OPT;
  317. break;
  318. case NFTA_TUNNEL_KEY_OPTS_ERSPAN:
  319. if (type)
  320. return -EINVAL;
  321. err = nft_tunnel_obj_erspan_init(nla, opts);
  322. if (err)
  323. return err;
  324. type = TUNNEL_ERSPAN_OPT;
  325. break;
  326. case NFTA_TUNNEL_KEY_OPTS_GENEVE:
  327. if (type && type != TUNNEL_GENEVE_OPT)
  328. return -EINVAL;
  329. err = nft_tunnel_obj_geneve_init(nla, opts);
  330. if (err)
  331. return err;
  332. type = TUNNEL_GENEVE_OPT;
  333. break;
  334. default:
  335. return -EOPNOTSUPP;
  336. }
  337. }
  338. return err;
  339. }
  340. static const struct nla_policy nft_tunnel_key_policy[NFTA_TUNNEL_KEY_MAX + 1] = {
  341. [NFTA_TUNNEL_KEY_IP] = { .type = NLA_NESTED, },
  342. [NFTA_TUNNEL_KEY_IP6] = { .type = NLA_NESTED, },
  343. [NFTA_TUNNEL_KEY_ID] = { .type = NLA_U32, },
  344. [NFTA_TUNNEL_KEY_FLAGS] = { .type = NLA_U32, },
  345. [NFTA_TUNNEL_KEY_TOS] = { .type = NLA_U8, },
  346. [NFTA_TUNNEL_KEY_TTL] = { .type = NLA_U8, },
  347. [NFTA_TUNNEL_KEY_SPORT] = { .type = NLA_U16, },
  348. [NFTA_TUNNEL_KEY_DPORT] = { .type = NLA_U16, },
  349. [NFTA_TUNNEL_KEY_OPTS] = { .type = NLA_NESTED, },
  350. };
  351. static int nft_tunnel_obj_init(const struct nft_ctx *ctx,
  352. const struct nlattr * const tb[],
  353. struct nft_object *obj)
  354. {
  355. struct nft_tunnel_obj *priv = nft_obj_data(obj);
  356. struct ip_tunnel_info info;
  357. struct metadata_dst *md;
  358. int err;
  359. if (!tb[NFTA_TUNNEL_KEY_ID])
  360. return -EINVAL;
  361. memset(&info, 0, sizeof(info));
  362. info.mode = IP_TUNNEL_INFO_TX;
  363. info.key.tun_id = key32_to_tunnel_id(nla_get_be32(tb[NFTA_TUNNEL_KEY_ID]));
  364. info.key.tun_flags = TUNNEL_KEY | TUNNEL_CSUM | TUNNEL_NOCACHE;
  365. if (tb[NFTA_TUNNEL_KEY_IP]) {
  366. err = nft_tunnel_obj_ip_init(ctx, tb[NFTA_TUNNEL_KEY_IP], &info);
  367. if (err < 0)
  368. return err;
  369. } else if (tb[NFTA_TUNNEL_KEY_IP6]) {
  370. err = nft_tunnel_obj_ip6_init(ctx, tb[NFTA_TUNNEL_KEY_IP6], &info);
  371. if (err < 0)
  372. return err;
  373. } else {
  374. return -EINVAL;
  375. }
  376. if (tb[NFTA_TUNNEL_KEY_SPORT]) {
  377. info.key.tp_src = nla_get_be16(tb[NFTA_TUNNEL_KEY_SPORT]);
  378. }
  379. if (tb[NFTA_TUNNEL_KEY_DPORT]) {
  380. info.key.tp_dst = nla_get_be16(tb[NFTA_TUNNEL_KEY_DPORT]);
  381. }
  382. if (tb[NFTA_TUNNEL_KEY_FLAGS]) {
  383. u32 tun_flags;
  384. tun_flags = ntohl(nla_get_be32(tb[NFTA_TUNNEL_KEY_FLAGS]));
  385. if (tun_flags & ~NFT_TUNNEL_F_MASK)
  386. return -EOPNOTSUPP;
  387. if (tun_flags & NFT_TUNNEL_F_ZERO_CSUM_TX)
  388. info.key.tun_flags &= ~TUNNEL_CSUM;
  389. if (tun_flags & NFT_TUNNEL_F_DONT_FRAGMENT)
  390. info.key.tun_flags |= TUNNEL_DONT_FRAGMENT;
  391. if (tun_flags & NFT_TUNNEL_F_SEQ_NUMBER)
  392. info.key.tun_flags |= TUNNEL_SEQ;
  393. }
  394. if (tb[NFTA_TUNNEL_KEY_TOS])
  395. info.key.tos = nla_get_u8(tb[NFTA_TUNNEL_KEY_TOS]);
  396. if (tb[NFTA_TUNNEL_KEY_TTL])
  397. info.key.ttl = nla_get_u8(tb[NFTA_TUNNEL_KEY_TTL]);
  398. else
  399. info.key.ttl = U8_MAX;
  400. if (tb[NFTA_TUNNEL_KEY_OPTS]) {
  401. err = nft_tunnel_obj_opts_init(ctx, tb[NFTA_TUNNEL_KEY_OPTS],
  402. &info, &priv->opts);
  403. if (err < 0)
  404. return err;
  405. }
  406. md = metadata_dst_alloc(priv->opts.len, METADATA_IP_TUNNEL, GFP_KERNEL);
  407. if (!md)
  408. return -ENOMEM;
  409. memcpy(&md->u.tun_info, &info, sizeof(info));
  410. #ifdef CONFIG_DST_CACHE
  411. err = dst_cache_init(&md->u.tun_info.dst_cache, GFP_KERNEL);
  412. if (err < 0) {
  413. metadata_dst_free(md);
  414. return err;
  415. }
  416. #endif
  417. ip_tunnel_info_opts_set(&md->u.tun_info, &priv->opts.u, priv->opts.len,
  418. priv->opts.flags);
  419. priv->md = md;
  420. return 0;
  421. }
  422. static inline void nft_tunnel_obj_eval(struct nft_object *obj,
  423. struct nft_regs *regs,
  424. const struct nft_pktinfo *pkt)
  425. {
  426. struct nft_tunnel_obj *priv = nft_obj_data(obj);
  427. struct sk_buff *skb = pkt->skb;
  428. skb_dst_drop(skb);
  429. dst_hold((struct dst_entry *) priv->md);
  430. skb_dst_set(skb, (struct dst_entry *) priv->md);
  431. }
  432. static int nft_tunnel_ip_dump(struct sk_buff *skb, struct ip_tunnel_info *info)
  433. {
  434. struct nlattr *nest;
  435. if (info->mode & IP_TUNNEL_INFO_IPV6) {
  436. nest = nla_nest_start_noflag(skb, NFTA_TUNNEL_KEY_IP6);
  437. if (!nest)
  438. return -1;
  439. if (nla_put_in6_addr(skb, NFTA_TUNNEL_KEY_IP6_SRC,
  440. &info->key.u.ipv6.src) < 0 ||
  441. nla_put_in6_addr(skb, NFTA_TUNNEL_KEY_IP6_DST,
  442. &info->key.u.ipv6.dst) < 0 ||
  443. nla_put_be32(skb, NFTA_TUNNEL_KEY_IP6_FLOWLABEL,
  444. info->key.label)) {
  445. nla_nest_cancel(skb, nest);
  446. return -1;
  447. }
  448. nla_nest_end(skb, nest);
  449. } else {
  450. nest = nla_nest_start_noflag(skb, NFTA_TUNNEL_KEY_IP);
  451. if (!nest)
  452. return -1;
  453. if (nla_put_in_addr(skb, NFTA_TUNNEL_KEY_IP_SRC,
  454. info->key.u.ipv4.src) < 0 ||
  455. nla_put_in_addr(skb, NFTA_TUNNEL_KEY_IP_DST,
  456. info->key.u.ipv4.dst) < 0) {
  457. nla_nest_cancel(skb, nest);
  458. return -1;
  459. }
  460. nla_nest_end(skb, nest);
  461. }
  462. return 0;
  463. }
  464. static int nft_tunnel_opts_dump(struct sk_buff *skb,
  465. struct nft_tunnel_obj *priv)
  466. {
  467. struct nft_tunnel_opts *opts = &priv->opts;
  468. struct nlattr *nest, *inner;
  469. nest = nla_nest_start_noflag(skb, NFTA_TUNNEL_KEY_OPTS);
  470. if (!nest)
  471. return -1;
  472. if (opts->flags & TUNNEL_VXLAN_OPT) {
  473. inner = nla_nest_start_noflag(skb, NFTA_TUNNEL_KEY_OPTS_VXLAN);
  474. if (!inner)
  475. goto failure;
  476. if (nla_put_be32(skb, NFTA_TUNNEL_KEY_VXLAN_GBP,
  477. htonl(opts->u.vxlan.gbp)))
  478. goto inner_failure;
  479. nla_nest_end(skb, inner);
  480. } else if (opts->flags & TUNNEL_ERSPAN_OPT) {
  481. inner = nla_nest_start_noflag(skb, NFTA_TUNNEL_KEY_OPTS_ERSPAN);
  482. if (!inner)
  483. goto failure;
  484. if (nla_put_be32(skb, NFTA_TUNNEL_KEY_ERSPAN_VERSION,
  485. htonl(opts->u.erspan.version)))
  486. goto inner_failure;
  487. switch (opts->u.erspan.version) {
  488. case ERSPAN_VERSION:
  489. if (nla_put_be32(skb, NFTA_TUNNEL_KEY_ERSPAN_V1_INDEX,
  490. opts->u.erspan.u.index))
  491. goto inner_failure;
  492. break;
  493. case ERSPAN_VERSION2:
  494. if (nla_put_u8(skb, NFTA_TUNNEL_KEY_ERSPAN_V2_HWID,
  495. get_hwid(&opts->u.erspan.u.md2)) ||
  496. nla_put_u8(skb, NFTA_TUNNEL_KEY_ERSPAN_V2_DIR,
  497. opts->u.erspan.u.md2.dir))
  498. goto inner_failure;
  499. break;
  500. }
  501. nla_nest_end(skb, inner);
  502. } else if (opts->flags & TUNNEL_GENEVE_OPT) {
  503. struct geneve_opt *opt;
  504. int offset = 0;
  505. inner = nla_nest_start_noflag(skb, NFTA_TUNNEL_KEY_OPTS_GENEVE);
  506. if (!inner)
  507. goto failure;
  508. while (opts->len > offset) {
  509. opt = (struct geneve_opt *)opts->u.data + offset;
  510. if (nla_put_be16(skb, NFTA_TUNNEL_KEY_GENEVE_CLASS,
  511. opt->opt_class) ||
  512. nla_put_u8(skb, NFTA_TUNNEL_KEY_GENEVE_TYPE,
  513. opt->type) ||
  514. nla_put(skb, NFTA_TUNNEL_KEY_GENEVE_DATA,
  515. opt->length * 4, opt->opt_data))
  516. goto inner_failure;
  517. offset += sizeof(*opt) + opt->length * 4;
  518. }
  519. nla_nest_end(skb, inner);
  520. }
  521. nla_nest_end(skb, nest);
  522. return 0;
  523. inner_failure:
  524. nla_nest_cancel(skb, inner);
  525. failure:
  526. nla_nest_cancel(skb, nest);
  527. return -1;
  528. }
  529. static int nft_tunnel_ports_dump(struct sk_buff *skb,
  530. struct ip_tunnel_info *info)
  531. {
  532. if (nla_put_be16(skb, NFTA_TUNNEL_KEY_SPORT, info->key.tp_src) < 0 ||
  533. nla_put_be16(skb, NFTA_TUNNEL_KEY_DPORT, info->key.tp_dst) < 0)
  534. return -1;
  535. return 0;
  536. }
  537. static int nft_tunnel_flags_dump(struct sk_buff *skb,
  538. struct ip_tunnel_info *info)
  539. {
  540. u32 flags = 0;
  541. if (info->key.tun_flags & TUNNEL_DONT_FRAGMENT)
  542. flags |= NFT_TUNNEL_F_DONT_FRAGMENT;
  543. if (!(info->key.tun_flags & TUNNEL_CSUM))
  544. flags |= NFT_TUNNEL_F_ZERO_CSUM_TX;
  545. if (info->key.tun_flags & TUNNEL_SEQ)
  546. flags |= NFT_TUNNEL_F_SEQ_NUMBER;
  547. if (nla_put_be32(skb, NFTA_TUNNEL_KEY_FLAGS, htonl(flags)) < 0)
  548. return -1;
  549. return 0;
  550. }
  551. static int nft_tunnel_obj_dump(struct sk_buff *skb,
  552. struct nft_object *obj, bool reset)
  553. {
  554. struct nft_tunnel_obj *priv = nft_obj_data(obj);
  555. struct ip_tunnel_info *info = &priv->md->u.tun_info;
  556. if (nla_put_be32(skb, NFTA_TUNNEL_KEY_ID,
  557. tunnel_id_to_key32(info->key.tun_id)) ||
  558. nft_tunnel_ip_dump(skb, info) < 0 ||
  559. nft_tunnel_ports_dump(skb, info) < 0 ||
  560. nft_tunnel_flags_dump(skb, info) < 0 ||
  561. nla_put_u8(skb, NFTA_TUNNEL_KEY_TOS, info->key.tos) ||
  562. nla_put_u8(skb, NFTA_TUNNEL_KEY_TTL, info->key.ttl) ||
  563. nft_tunnel_opts_dump(skb, priv) < 0)
  564. goto nla_put_failure;
  565. return 0;
  566. nla_put_failure:
  567. return -1;
  568. }
  569. static void nft_tunnel_obj_destroy(const struct nft_ctx *ctx,
  570. struct nft_object *obj)
  571. {
  572. struct nft_tunnel_obj *priv = nft_obj_data(obj);
  573. metadata_dst_free(priv->md);
  574. }
  575. static struct nft_object_type nft_tunnel_obj_type;
  576. static const struct nft_object_ops nft_tunnel_obj_ops = {
  577. .type = &nft_tunnel_obj_type,
  578. .size = sizeof(struct nft_tunnel_obj),
  579. .eval = nft_tunnel_obj_eval,
  580. .init = nft_tunnel_obj_init,
  581. .destroy = nft_tunnel_obj_destroy,
  582. .dump = nft_tunnel_obj_dump,
  583. };
  584. static struct nft_object_type nft_tunnel_obj_type __read_mostly = {
  585. .type = NFT_OBJECT_TUNNEL,
  586. .ops = &nft_tunnel_obj_ops,
  587. .maxattr = NFTA_TUNNEL_KEY_MAX,
  588. .policy = nft_tunnel_key_policy,
  589. .owner = THIS_MODULE,
  590. };
  591. static int __init nft_tunnel_module_init(void)
  592. {
  593. int err;
  594. err = nft_register_expr(&nft_tunnel_type);
  595. if (err < 0)
  596. return err;
  597. err = nft_register_obj(&nft_tunnel_obj_type);
  598. if (err < 0)
  599. nft_unregister_expr(&nft_tunnel_type);
  600. return err;
  601. }
  602. static void __exit nft_tunnel_module_exit(void)
  603. {
  604. nft_unregister_obj(&nft_tunnel_obj_type);
  605. nft_unregister_expr(&nft_tunnel_type);
  606. }
  607. module_init(nft_tunnel_module_init);
  608. module_exit(nft_tunnel_module_exit);
  609. MODULE_LICENSE("GPL");
  610. MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");
  611. MODULE_ALIAS_NFT_EXPR("tunnel");
  612. MODULE_ALIAS_NFT_OBJ(NFT_OBJECT_TUNNEL);
  613. MODULE_DESCRIPTION("nftables tunnel expression support");