act_vlan.c 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * Copyright (c) 2014 Jiri Pirko <jiri@resnulli.us>
  4. */
  5. #include <linux/module.h>
  6. #include <linux/init.h>
  7. #include <linux/kernel.h>
  8. #include <linux/skbuff.h>
  9. #include <linux/rtnetlink.h>
  10. #include <linux/if_vlan.h>
  11. #include <net/netlink.h>
  12. #include <net/pkt_sched.h>
  13. #include <net/pkt_cls.h>
  14. #include <linux/tc_act/tc_vlan.h>
  15. #include <net/tc_act/tc_vlan.h>
  16. static unsigned int vlan_net_id;
  17. static struct tc_action_ops act_vlan_ops;
  18. static int tcf_vlan_act(struct sk_buff *skb, const struct tc_action *a,
  19. struct tcf_result *res)
  20. {
  21. struct tcf_vlan *v = to_vlan(a);
  22. struct tcf_vlan_params *p;
  23. int action;
  24. int err;
  25. u16 tci;
  26. tcf_lastuse_update(&v->tcf_tm);
  27. tcf_action_update_bstats(&v->common, skb);
  28. /* Ensure 'data' points at mac_header prior calling vlan manipulating
  29. * functions.
  30. */
  31. if (skb_at_tc_ingress(skb))
  32. skb_push_rcsum(skb, skb->mac_len);
  33. action = READ_ONCE(v->tcf_action);
  34. p = rcu_dereference_bh(v->vlan_p);
  35. switch (p->tcfv_action) {
  36. case TCA_VLAN_ACT_POP:
  37. err = skb_vlan_pop(skb);
  38. if (err)
  39. goto drop;
  40. break;
  41. case TCA_VLAN_ACT_PUSH:
  42. err = skb_vlan_push(skb, p->tcfv_push_proto, p->tcfv_push_vid |
  43. (p->tcfv_push_prio << VLAN_PRIO_SHIFT));
  44. if (err)
  45. goto drop;
  46. break;
  47. case TCA_VLAN_ACT_MODIFY:
  48. /* No-op if no vlan tag (either hw-accel or in-payload) */
  49. if (!skb_vlan_tagged(skb))
  50. goto out;
  51. /* extract existing tag (and guarantee no hw-accel tag) */
  52. if (skb_vlan_tag_present(skb)) {
  53. tci = skb_vlan_tag_get(skb);
  54. __vlan_hwaccel_clear_tag(skb);
  55. } else {
  56. /* in-payload vlan tag, pop it */
  57. err = __skb_vlan_pop(skb, &tci);
  58. if (err)
  59. goto drop;
  60. }
  61. /* replace the vid */
  62. tci = (tci & ~VLAN_VID_MASK) | p->tcfv_push_vid;
  63. /* replace prio bits, if tcfv_push_prio specified */
  64. if (p->tcfv_push_prio_exists) {
  65. tci &= ~VLAN_PRIO_MASK;
  66. tci |= p->tcfv_push_prio << VLAN_PRIO_SHIFT;
  67. }
  68. /* put updated tci as hwaccel tag */
  69. __vlan_hwaccel_put_tag(skb, p->tcfv_push_proto, tci);
  70. break;
  71. case TCA_VLAN_ACT_POP_ETH:
  72. err = skb_eth_pop(skb);
  73. if (err)
  74. goto drop;
  75. break;
  76. case TCA_VLAN_ACT_PUSH_ETH:
  77. err = skb_eth_push(skb, p->tcfv_push_dst, p->tcfv_push_src);
  78. if (err)
  79. goto drop;
  80. break;
  81. default:
  82. BUG();
  83. }
  84. out:
  85. if (skb_at_tc_ingress(skb))
  86. skb_pull_rcsum(skb, skb->mac_len);
  87. return action;
  88. drop:
  89. tcf_action_inc_drop_qstats(&v->common);
  90. return TC_ACT_SHOT;
  91. }
  92. static const struct nla_policy vlan_policy[TCA_VLAN_MAX + 1] = {
  93. [TCA_VLAN_UNSPEC] = { .strict_start_type = TCA_VLAN_PUSH_ETH_DST },
  94. [TCA_VLAN_PARMS] = { .len = sizeof(struct tc_vlan) },
  95. [TCA_VLAN_PUSH_VLAN_ID] = { .type = NLA_U16 },
  96. [TCA_VLAN_PUSH_VLAN_PROTOCOL] = { .type = NLA_U16 },
  97. [TCA_VLAN_PUSH_VLAN_PRIORITY] = { .type = NLA_U8 },
  98. [TCA_VLAN_PUSH_ETH_DST] = NLA_POLICY_ETH_ADDR,
  99. [TCA_VLAN_PUSH_ETH_SRC] = NLA_POLICY_ETH_ADDR,
  100. };
  101. static int tcf_vlan_init(struct net *net, struct nlattr *nla,
  102. struct nlattr *est, struct tc_action **a,
  103. int ovr, int bind, bool rtnl_held,
  104. struct tcf_proto *tp, u32 flags,
  105. struct netlink_ext_ack *extack)
  106. {
  107. struct tc_action_net *tn = net_generic(net, vlan_net_id);
  108. struct nlattr *tb[TCA_VLAN_MAX + 1];
  109. struct tcf_chain *goto_ch = NULL;
  110. bool push_prio_exists = false;
  111. struct tcf_vlan_params *p;
  112. struct tc_vlan *parm;
  113. struct tcf_vlan *v;
  114. int action;
  115. u16 push_vid = 0;
  116. __be16 push_proto = 0;
  117. u8 push_prio = 0;
  118. bool exists = false;
  119. int ret = 0, err;
  120. u32 index;
  121. if (!nla)
  122. return -EINVAL;
  123. err = nla_parse_nested_deprecated(tb, TCA_VLAN_MAX, nla, vlan_policy,
  124. NULL);
  125. if (err < 0)
  126. return err;
  127. if (!tb[TCA_VLAN_PARMS])
  128. return -EINVAL;
  129. parm = nla_data(tb[TCA_VLAN_PARMS]);
  130. index = parm->index;
  131. err = tcf_idr_check_alloc(tn, &index, a, bind);
  132. if (err < 0)
  133. return err;
  134. exists = err;
  135. if (exists && bind)
  136. return 0;
  137. switch (parm->v_action) {
  138. case TCA_VLAN_ACT_POP:
  139. break;
  140. case TCA_VLAN_ACT_PUSH:
  141. case TCA_VLAN_ACT_MODIFY:
  142. if (!tb[TCA_VLAN_PUSH_VLAN_ID]) {
  143. if (exists)
  144. tcf_idr_release(*a, bind);
  145. else
  146. tcf_idr_cleanup(tn, index);
  147. return -EINVAL;
  148. }
  149. push_vid = nla_get_u16(tb[TCA_VLAN_PUSH_VLAN_ID]);
  150. if (push_vid >= VLAN_VID_MASK) {
  151. if (exists)
  152. tcf_idr_release(*a, bind);
  153. else
  154. tcf_idr_cleanup(tn, index);
  155. return -ERANGE;
  156. }
  157. if (tb[TCA_VLAN_PUSH_VLAN_PROTOCOL]) {
  158. push_proto = nla_get_be16(tb[TCA_VLAN_PUSH_VLAN_PROTOCOL]);
  159. switch (push_proto) {
  160. case htons(ETH_P_8021Q):
  161. case htons(ETH_P_8021AD):
  162. break;
  163. default:
  164. if (exists)
  165. tcf_idr_release(*a, bind);
  166. else
  167. tcf_idr_cleanup(tn, index);
  168. return -EPROTONOSUPPORT;
  169. }
  170. } else {
  171. push_proto = htons(ETH_P_8021Q);
  172. }
  173. push_prio_exists = !!tb[TCA_VLAN_PUSH_VLAN_PRIORITY];
  174. if (push_prio_exists)
  175. push_prio = nla_get_u8(tb[TCA_VLAN_PUSH_VLAN_PRIORITY]);
  176. break;
  177. case TCA_VLAN_ACT_POP_ETH:
  178. break;
  179. case TCA_VLAN_ACT_PUSH_ETH:
  180. if (!tb[TCA_VLAN_PUSH_ETH_DST] || !tb[TCA_VLAN_PUSH_ETH_SRC]) {
  181. if (exists)
  182. tcf_idr_release(*a, bind);
  183. else
  184. tcf_idr_cleanup(tn, index);
  185. return -EINVAL;
  186. }
  187. break;
  188. default:
  189. if (exists)
  190. tcf_idr_release(*a, bind);
  191. else
  192. tcf_idr_cleanup(tn, index);
  193. return -EINVAL;
  194. }
  195. action = parm->v_action;
  196. if (!exists) {
  197. ret = tcf_idr_create_from_flags(tn, index, est, a,
  198. &act_vlan_ops, bind, flags);
  199. if (ret) {
  200. tcf_idr_cleanup(tn, index);
  201. return ret;
  202. }
  203. ret = ACT_P_CREATED;
  204. } else if (!ovr) {
  205. tcf_idr_release(*a, bind);
  206. return -EEXIST;
  207. }
  208. err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack);
  209. if (err < 0)
  210. goto release_idr;
  211. v = to_vlan(*a);
  212. p = kzalloc(sizeof(*p), GFP_KERNEL);
  213. if (!p) {
  214. err = -ENOMEM;
  215. goto put_chain;
  216. }
  217. p->tcfv_action = action;
  218. p->tcfv_push_vid = push_vid;
  219. p->tcfv_push_prio = push_prio;
  220. p->tcfv_push_prio_exists = push_prio_exists || action == TCA_VLAN_ACT_PUSH;
  221. p->tcfv_push_proto = push_proto;
  222. if (action == TCA_VLAN_ACT_PUSH_ETH) {
  223. nla_memcpy(&p->tcfv_push_dst, tb[TCA_VLAN_PUSH_ETH_DST],
  224. ETH_ALEN);
  225. nla_memcpy(&p->tcfv_push_src, tb[TCA_VLAN_PUSH_ETH_SRC],
  226. ETH_ALEN);
  227. }
  228. spin_lock_bh(&v->tcf_lock);
  229. goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch);
  230. p = rcu_replace_pointer(v->vlan_p, p, lockdep_is_held(&v->tcf_lock));
  231. spin_unlock_bh(&v->tcf_lock);
  232. if (goto_ch)
  233. tcf_chain_put_by_act(goto_ch);
  234. if (p)
  235. kfree_rcu(p, rcu);
  236. return ret;
  237. put_chain:
  238. if (goto_ch)
  239. tcf_chain_put_by_act(goto_ch);
  240. release_idr:
  241. tcf_idr_release(*a, bind);
  242. return err;
  243. }
  244. static void tcf_vlan_cleanup(struct tc_action *a)
  245. {
  246. struct tcf_vlan *v = to_vlan(a);
  247. struct tcf_vlan_params *p;
  248. p = rcu_dereference_protected(v->vlan_p, 1);
  249. if (p)
  250. kfree_rcu(p, rcu);
  251. }
  252. static int tcf_vlan_dump(struct sk_buff *skb, struct tc_action *a,
  253. int bind, int ref)
  254. {
  255. unsigned char *b = skb_tail_pointer(skb);
  256. struct tcf_vlan *v = to_vlan(a);
  257. struct tcf_vlan_params *p;
  258. struct tc_vlan opt = {
  259. .index = v->tcf_index,
  260. .refcnt = refcount_read(&v->tcf_refcnt) - ref,
  261. .bindcnt = atomic_read(&v->tcf_bindcnt) - bind,
  262. };
  263. struct tcf_t t;
  264. spin_lock_bh(&v->tcf_lock);
  265. opt.action = v->tcf_action;
  266. p = rcu_dereference_protected(v->vlan_p, lockdep_is_held(&v->tcf_lock));
  267. opt.v_action = p->tcfv_action;
  268. if (nla_put(skb, TCA_VLAN_PARMS, sizeof(opt), &opt))
  269. goto nla_put_failure;
  270. if ((p->tcfv_action == TCA_VLAN_ACT_PUSH ||
  271. p->tcfv_action == TCA_VLAN_ACT_MODIFY) &&
  272. (nla_put_u16(skb, TCA_VLAN_PUSH_VLAN_ID, p->tcfv_push_vid) ||
  273. nla_put_be16(skb, TCA_VLAN_PUSH_VLAN_PROTOCOL,
  274. p->tcfv_push_proto) ||
  275. (nla_put_u8(skb, TCA_VLAN_PUSH_VLAN_PRIORITY,
  276. p->tcfv_push_prio))))
  277. goto nla_put_failure;
  278. if (p->tcfv_action == TCA_VLAN_ACT_PUSH_ETH) {
  279. if (nla_put(skb, TCA_VLAN_PUSH_ETH_DST, ETH_ALEN,
  280. p->tcfv_push_dst))
  281. goto nla_put_failure;
  282. if (nla_put(skb, TCA_VLAN_PUSH_ETH_SRC, ETH_ALEN,
  283. p->tcfv_push_src))
  284. goto nla_put_failure;
  285. }
  286. tcf_tm_dump(&t, &v->tcf_tm);
  287. if (nla_put_64bit(skb, TCA_VLAN_TM, sizeof(t), &t, TCA_VLAN_PAD))
  288. goto nla_put_failure;
  289. spin_unlock_bh(&v->tcf_lock);
  290. return skb->len;
  291. nla_put_failure:
  292. spin_unlock_bh(&v->tcf_lock);
  293. nlmsg_trim(skb, b);
  294. return -1;
  295. }
  296. static int tcf_vlan_walker(struct net *net, struct sk_buff *skb,
  297. struct netlink_callback *cb, int type,
  298. const struct tc_action_ops *ops,
  299. struct netlink_ext_ack *extack)
  300. {
  301. struct tc_action_net *tn = net_generic(net, vlan_net_id);
  302. return tcf_generic_walker(tn, skb, cb, type, ops, extack);
  303. }
  304. static void tcf_vlan_stats_update(struct tc_action *a, u64 bytes, u64 packets,
  305. u64 drops, u64 lastuse, bool hw)
  306. {
  307. struct tcf_vlan *v = to_vlan(a);
  308. struct tcf_t *tm = &v->tcf_tm;
  309. tcf_action_update_stats(a, bytes, packets, drops, hw);
  310. tm->lastuse = max_t(u64, tm->lastuse, lastuse);
  311. }
  312. static int tcf_vlan_search(struct net *net, struct tc_action **a, u32 index)
  313. {
  314. struct tc_action_net *tn = net_generic(net, vlan_net_id);
  315. return tcf_idr_search(tn, a, index);
  316. }
  317. static size_t tcf_vlan_get_fill_size(const struct tc_action *act)
  318. {
  319. return nla_total_size(sizeof(struct tc_vlan))
  320. + nla_total_size(sizeof(u16)) /* TCA_VLAN_PUSH_VLAN_ID */
  321. + nla_total_size(sizeof(u16)) /* TCA_VLAN_PUSH_VLAN_PROTOCOL */
  322. + nla_total_size(sizeof(u8)); /* TCA_VLAN_PUSH_VLAN_PRIORITY */
  323. }
  324. static struct tc_action_ops act_vlan_ops = {
  325. .kind = "vlan",
  326. .id = TCA_ID_VLAN,
  327. .owner = THIS_MODULE,
  328. .act = tcf_vlan_act,
  329. .dump = tcf_vlan_dump,
  330. .init = tcf_vlan_init,
  331. .cleanup = tcf_vlan_cleanup,
  332. .walk = tcf_vlan_walker,
  333. .stats_update = tcf_vlan_stats_update,
  334. .get_fill_size = tcf_vlan_get_fill_size,
  335. .lookup = tcf_vlan_search,
  336. .size = sizeof(struct tcf_vlan),
  337. };
  338. static __net_init int vlan_init_net(struct net *net)
  339. {
  340. struct tc_action_net *tn = net_generic(net, vlan_net_id);
  341. return tc_action_net_init(net, tn, &act_vlan_ops);
  342. }
  343. static void __net_exit vlan_exit_net(struct list_head *net_list)
  344. {
  345. tc_action_net_exit(net_list, vlan_net_id);
  346. }
  347. static struct pernet_operations vlan_net_ops = {
  348. .init = vlan_init_net,
  349. .exit_batch = vlan_exit_net,
  350. .id = &vlan_net_id,
  351. .size = sizeof(struct tc_action_net),
  352. };
  353. static int __init vlan_init_module(void)
  354. {
  355. return tcf_register_action(&act_vlan_ops, &vlan_net_ops);
  356. }
  357. static void __exit vlan_cleanup_module(void)
  358. {
  359. tcf_unregister_action(&act_vlan_ops, &vlan_net_ops);
  360. }
  361. module_init(vlan_init_module);
  362. module_exit(vlan_cleanup_module);
  363. MODULE_AUTHOR("Jiri Pirko <jiri@resnulli.us>");
  364. MODULE_DESCRIPTION("vlan manipulation actions");
  365. MODULE_LICENSE("GPL v2");