br_mdb.c 26 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114
  1. // SPDX-License-Identifier: GPL-2.0
  2. #include <linux/err.h>
  3. #include <linux/igmp.h>
  4. #include <linux/kernel.h>
  5. #include <linux/netdevice.h>
  6. #include <linux/rculist.h>
  7. #include <linux/skbuff.h>
  8. #include <linux/if_ether.h>
  9. #include <net/ip.h>
  10. #include <net/netlink.h>
  11. #include <net/switchdev.h>
  12. #if IS_ENABLED(CONFIG_IPV6)
  13. #include <net/ipv6.h>
  14. #include <net/addrconf.h>
  15. #endif
  16. #include "br_private.h"
  17. static int br_rports_fill_info(struct sk_buff *skb, struct netlink_callback *cb,
  18. struct net_device *dev)
  19. {
  20. struct net_bridge *br = netdev_priv(dev);
  21. struct net_bridge_port *p;
  22. struct nlattr *nest, *port_nest;
  23. if (!br->multicast_router || hlist_empty(&br->router_list))
  24. return 0;
  25. nest = nla_nest_start_noflag(skb, MDBA_ROUTER);
  26. if (nest == NULL)
  27. return -EMSGSIZE;
  28. hlist_for_each_entry_rcu(p, &br->router_list, rlist) {
  29. if (!p)
  30. continue;
  31. port_nest = nla_nest_start_noflag(skb, MDBA_ROUTER_PORT);
  32. if (!port_nest)
  33. goto fail;
  34. if (nla_put_nohdr(skb, sizeof(u32), &p->dev->ifindex) ||
  35. nla_put_u32(skb, MDBA_ROUTER_PATTR_TIMER,
  36. br_timer_value(&p->multicast_router_timer)) ||
  37. nla_put_u8(skb, MDBA_ROUTER_PATTR_TYPE,
  38. p->multicast_router)) {
  39. nla_nest_cancel(skb, port_nest);
  40. goto fail;
  41. }
  42. nla_nest_end(skb, port_nest);
  43. }
  44. nla_nest_end(skb, nest);
  45. return 0;
  46. fail:
  47. nla_nest_cancel(skb, nest);
  48. return -EMSGSIZE;
  49. }
  50. static void __mdb_entry_fill_flags(struct br_mdb_entry *e, unsigned char flags)
  51. {
  52. e->state = flags & MDB_PG_FLAGS_PERMANENT;
  53. e->flags = 0;
  54. if (flags & MDB_PG_FLAGS_OFFLOAD)
  55. e->flags |= MDB_FLAGS_OFFLOAD;
  56. if (flags & MDB_PG_FLAGS_FAST_LEAVE)
  57. e->flags |= MDB_FLAGS_FAST_LEAVE;
  58. if (flags & MDB_PG_FLAGS_STAR_EXCL)
  59. e->flags |= MDB_FLAGS_STAR_EXCL;
  60. if (flags & MDB_PG_FLAGS_BLOCKED)
  61. e->flags |= MDB_FLAGS_BLOCKED;
  62. }
  63. static void __mdb_entry_to_br_ip(struct br_mdb_entry *entry, struct br_ip *ip,
  64. struct nlattr **mdb_attrs)
  65. {
  66. memset(ip, 0, sizeof(struct br_ip));
  67. ip->vid = entry->vid;
  68. ip->proto = entry->addr.proto;
  69. switch (ip->proto) {
  70. case htons(ETH_P_IP):
  71. ip->dst.ip4 = entry->addr.u.ip4;
  72. if (mdb_attrs && mdb_attrs[MDBE_ATTR_SOURCE])
  73. ip->src.ip4 = nla_get_in_addr(mdb_attrs[MDBE_ATTR_SOURCE]);
  74. break;
  75. #if IS_ENABLED(CONFIG_IPV6)
  76. case htons(ETH_P_IPV6):
  77. ip->dst.ip6 = entry->addr.u.ip6;
  78. if (mdb_attrs && mdb_attrs[MDBE_ATTR_SOURCE])
  79. ip->src.ip6 = nla_get_in6_addr(mdb_attrs[MDBE_ATTR_SOURCE]);
  80. break;
  81. #endif
  82. }
  83. }
  84. static int __mdb_fill_srcs(struct sk_buff *skb,
  85. struct net_bridge_port_group *p)
  86. {
  87. struct net_bridge_group_src *ent;
  88. struct nlattr *nest, *nest_ent;
  89. if (hlist_empty(&p->src_list))
  90. return 0;
  91. nest = nla_nest_start(skb, MDBA_MDB_EATTR_SRC_LIST);
  92. if (!nest)
  93. return -EMSGSIZE;
  94. hlist_for_each_entry_rcu(ent, &p->src_list, node,
  95. lockdep_is_held(&p->key.port->br->multicast_lock)) {
  96. nest_ent = nla_nest_start(skb, MDBA_MDB_SRCLIST_ENTRY);
  97. if (!nest_ent)
  98. goto out_cancel_err;
  99. switch (ent->addr.proto) {
  100. case htons(ETH_P_IP):
  101. if (nla_put_in_addr(skb, MDBA_MDB_SRCATTR_ADDRESS,
  102. ent->addr.src.ip4)) {
  103. nla_nest_cancel(skb, nest_ent);
  104. goto out_cancel_err;
  105. }
  106. break;
  107. #if IS_ENABLED(CONFIG_IPV6)
  108. case htons(ETH_P_IPV6):
  109. if (nla_put_in6_addr(skb, MDBA_MDB_SRCATTR_ADDRESS,
  110. &ent->addr.src.ip6)) {
  111. nla_nest_cancel(skb, nest_ent);
  112. goto out_cancel_err;
  113. }
  114. break;
  115. #endif
  116. default:
  117. nla_nest_cancel(skb, nest_ent);
  118. continue;
  119. }
  120. if (nla_put_u32(skb, MDBA_MDB_SRCATTR_TIMER,
  121. br_timer_value(&ent->timer))) {
  122. nla_nest_cancel(skb, nest_ent);
  123. goto out_cancel_err;
  124. }
  125. nla_nest_end(skb, nest_ent);
  126. }
  127. nla_nest_end(skb, nest);
  128. return 0;
  129. out_cancel_err:
  130. nla_nest_cancel(skb, nest);
  131. return -EMSGSIZE;
  132. }
  133. static int __mdb_fill_info(struct sk_buff *skb,
  134. struct net_bridge_mdb_entry *mp,
  135. struct net_bridge_port_group *p)
  136. {
  137. bool dump_srcs_mode = false;
  138. struct timer_list *mtimer;
  139. struct nlattr *nest_ent;
  140. struct br_mdb_entry e;
  141. u8 flags = 0;
  142. int ifindex;
  143. memset(&e, 0, sizeof(e));
  144. if (p) {
  145. ifindex = p->key.port->dev->ifindex;
  146. mtimer = &p->timer;
  147. flags = p->flags;
  148. } else {
  149. ifindex = mp->br->dev->ifindex;
  150. mtimer = &mp->timer;
  151. }
  152. __mdb_entry_fill_flags(&e, flags);
  153. e.ifindex = ifindex;
  154. e.vid = mp->addr.vid;
  155. if (mp->addr.proto == htons(ETH_P_IP))
  156. e.addr.u.ip4 = mp->addr.dst.ip4;
  157. #if IS_ENABLED(CONFIG_IPV6)
  158. if (mp->addr.proto == htons(ETH_P_IPV6))
  159. e.addr.u.ip6 = mp->addr.dst.ip6;
  160. #endif
  161. e.addr.proto = mp->addr.proto;
  162. nest_ent = nla_nest_start_noflag(skb,
  163. MDBA_MDB_ENTRY_INFO);
  164. if (!nest_ent)
  165. return -EMSGSIZE;
  166. if (nla_put_nohdr(skb, sizeof(e), &e) ||
  167. nla_put_u32(skb,
  168. MDBA_MDB_EATTR_TIMER,
  169. br_timer_value(mtimer)))
  170. goto nest_err;
  171. switch (mp->addr.proto) {
  172. case htons(ETH_P_IP):
  173. dump_srcs_mode = !!(mp->br->multicast_igmp_version == 3);
  174. if (mp->addr.src.ip4) {
  175. if (nla_put_in_addr(skb, MDBA_MDB_EATTR_SOURCE,
  176. mp->addr.src.ip4))
  177. goto nest_err;
  178. break;
  179. }
  180. break;
  181. #if IS_ENABLED(CONFIG_IPV6)
  182. case htons(ETH_P_IPV6):
  183. dump_srcs_mode = !!(mp->br->multicast_mld_version == 2);
  184. if (!ipv6_addr_any(&mp->addr.src.ip6)) {
  185. if (nla_put_in6_addr(skb, MDBA_MDB_EATTR_SOURCE,
  186. &mp->addr.src.ip6))
  187. goto nest_err;
  188. break;
  189. }
  190. break;
  191. #endif
  192. }
  193. if (p) {
  194. if (nla_put_u8(skb, MDBA_MDB_EATTR_RTPROT, p->rt_protocol))
  195. goto nest_err;
  196. if (dump_srcs_mode &&
  197. (__mdb_fill_srcs(skb, p) ||
  198. nla_put_u8(skb, MDBA_MDB_EATTR_GROUP_MODE,
  199. p->filter_mode)))
  200. goto nest_err;
  201. }
  202. nla_nest_end(skb, nest_ent);
  203. return 0;
  204. nest_err:
  205. nla_nest_cancel(skb, nest_ent);
  206. return -EMSGSIZE;
  207. }
  208. static int br_mdb_fill_info(struct sk_buff *skb, struct netlink_callback *cb,
  209. struct net_device *dev)
  210. {
  211. int idx = 0, s_idx = cb->args[1], err = 0, pidx = 0, s_pidx = cb->args[2];
  212. struct net_bridge *br = netdev_priv(dev);
  213. struct net_bridge_mdb_entry *mp;
  214. struct nlattr *nest, *nest2;
  215. if (!br_opt_get(br, BROPT_MULTICAST_ENABLED))
  216. return 0;
  217. nest = nla_nest_start_noflag(skb, MDBA_MDB);
  218. if (nest == NULL)
  219. return -EMSGSIZE;
  220. hlist_for_each_entry_rcu(mp, &br->mdb_list, mdb_node) {
  221. struct net_bridge_port_group *p;
  222. struct net_bridge_port_group __rcu **pp;
  223. if (idx < s_idx)
  224. goto skip;
  225. nest2 = nla_nest_start_noflag(skb, MDBA_MDB_ENTRY);
  226. if (!nest2) {
  227. err = -EMSGSIZE;
  228. break;
  229. }
  230. if (!s_pidx && mp->host_joined) {
  231. err = __mdb_fill_info(skb, mp, NULL);
  232. if (err) {
  233. nla_nest_cancel(skb, nest2);
  234. break;
  235. }
  236. }
  237. for (pp = &mp->ports; (p = rcu_dereference(*pp)) != NULL;
  238. pp = &p->next) {
  239. if (!p->key.port)
  240. continue;
  241. if (pidx < s_pidx)
  242. goto skip_pg;
  243. err = __mdb_fill_info(skb, mp, p);
  244. if (err) {
  245. nla_nest_end(skb, nest2);
  246. goto out;
  247. }
  248. skip_pg:
  249. pidx++;
  250. }
  251. pidx = 0;
  252. s_pidx = 0;
  253. nla_nest_end(skb, nest2);
  254. skip:
  255. idx++;
  256. }
  257. out:
  258. cb->args[1] = idx;
  259. cb->args[2] = pidx;
  260. nla_nest_end(skb, nest);
  261. return err;
  262. }
  263. static int br_mdb_valid_dump_req(const struct nlmsghdr *nlh,
  264. struct netlink_ext_ack *extack)
  265. {
  266. struct br_port_msg *bpm;
  267. if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*bpm))) {
  268. NL_SET_ERR_MSG_MOD(extack, "Invalid header for mdb dump request");
  269. return -EINVAL;
  270. }
  271. bpm = nlmsg_data(nlh);
  272. if (bpm->ifindex) {
  273. NL_SET_ERR_MSG_MOD(extack, "Filtering by device index is not supported for mdb dump request");
  274. return -EINVAL;
  275. }
  276. if (nlmsg_attrlen(nlh, sizeof(*bpm))) {
  277. NL_SET_ERR_MSG(extack, "Invalid data after header in mdb dump request");
  278. return -EINVAL;
  279. }
  280. return 0;
  281. }
  282. static int br_mdb_dump(struct sk_buff *skb, struct netlink_callback *cb)
  283. {
  284. struct net_device *dev;
  285. struct net *net = sock_net(skb->sk);
  286. struct nlmsghdr *nlh = NULL;
  287. int idx = 0, s_idx;
  288. if (cb->strict_check) {
  289. int err = br_mdb_valid_dump_req(cb->nlh, cb->extack);
  290. if (err < 0)
  291. return err;
  292. }
  293. s_idx = cb->args[0];
  294. rcu_read_lock();
  295. cb->seq = net->dev_base_seq;
  296. for_each_netdev_rcu(net, dev) {
  297. if (dev->priv_flags & IFF_EBRIDGE) {
  298. struct br_port_msg *bpm;
  299. if (idx < s_idx)
  300. goto skip;
  301. nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid,
  302. cb->nlh->nlmsg_seq, RTM_GETMDB,
  303. sizeof(*bpm), NLM_F_MULTI);
  304. if (nlh == NULL)
  305. break;
  306. bpm = nlmsg_data(nlh);
  307. memset(bpm, 0, sizeof(*bpm));
  308. bpm->ifindex = dev->ifindex;
  309. if (br_mdb_fill_info(skb, cb, dev) < 0)
  310. goto out;
  311. if (br_rports_fill_info(skb, cb, dev) < 0)
  312. goto out;
  313. cb->args[1] = 0;
  314. nlmsg_end(skb, nlh);
  315. skip:
  316. idx++;
  317. }
  318. }
  319. out:
  320. if (nlh)
  321. nlmsg_end(skb, nlh);
  322. rcu_read_unlock();
  323. cb->args[0] = idx;
  324. return skb->len;
  325. }
  326. static int nlmsg_populate_mdb_fill(struct sk_buff *skb,
  327. struct net_device *dev,
  328. struct net_bridge_mdb_entry *mp,
  329. struct net_bridge_port_group *pg,
  330. int type)
  331. {
  332. struct nlmsghdr *nlh;
  333. struct br_port_msg *bpm;
  334. struct nlattr *nest, *nest2;
  335. nlh = nlmsg_put(skb, 0, 0, type, sizeof(*bpm), 0);
  336. if (!nlh)
  337. return -EMSGSIZE;
  338. bpm = nlmsg_data(nlh);
  339. memset(bpm, 0, sizeof(*bpm));
  340. bpm->family = AF_BRIDGE;
  341. bpm->ifindex = dev->ifindex;
  342. nest = nla_nest_start_noflag(skb, MDBA_MDB);
  343. if (nest == NULL)
  344. goto cancel;
  345. nest2 = nla_nest_start_noflag(skb, MDBA_MDB_ENTRY);
  346. if (nest2 == NULL)
  347. goto end;
  348. if (__mdb_fill_info(skb, mp, pg))
  349. goto end;
  350. nla_nest_end(skb, nest2);
  351. nla_nest_end(skb, nest);
  352. nlmsg_end(skb, nlh);
  353. return 0;
  354. end:
  355. nla_nest_end(skb, nest);
  356. cancel:
  357. nlmsg_cancel(skb, nlh);
  358. return -EMSGSIZE;
  359. }
  360. static size_t rtnl_mdb_nlmsg_size(struct net_bridge_port_group *pg)
  361. {
  362. size_t nlmsg_size = NLMSG_ALIGN(sizeof(struct br_port_msg)) +
  363. nla_total_size(sizeof(struct br_mdb_entry)) +
  364. nla_total_size(sizeof(u32));
  365. struct net_bridge_group_src *ent;
  366. size_t addr_size = 0;
  367. if (!pg)
  368. goto out;
  369. /* MDBA_MDB_EATTR_RTPROT */
  370. nlmsg_size += nla_total_size(sizeof(u8));
  371. switch (pg->key.addr.proto) {
  372. case htons(ETH_P_IP):
  373. /* MDBA_MDB_EATTR_SOURCE */
  374. if (pg->key.addr.src.ip4)
  375. nlmsg_size += nla_total_size(sizeof(__be32));
  376. if (pg->key.port->br->multicast_igmp_version == 2)
  377. goto out;
  378. addr_size = sizeof(__be32);
  379. break;
  380. #if IS_ENABLED(CONFIG_IPV6)
  381. case htons(ETH_P_IPV6):
  382. /* MDBA_MDB_EATTR_SOURCE */
  383. if (!ipv6_addr_any(&pg->key.addr.src.ip6))
  384. nlmsg_size += nla_total_size(sizeof(struct in6_addr));
  385. if (pg->key.port->br->multicast_mld_version == 1)
  386. goto out;
  387. addr_size = sizeof(struct in6_addr);
  388. break;
  389. #endif
  390. }
  391. /* MDBA_MDB_EATTR_GROUP_MODE */
  392. nlmsg_size += nla_total_size(sizeof(u8));
  393. /* MDBA_MDB_EATTR_SRC_LIST nested attr */
  394. if (!hlist_empty(&pg->src_list))
  395. nlmsg_size += nla_total_size(0);
  396. hlist_for_each_entry(ent, &pg->src_list, node) {
  397. /* MDBA_MDB_SRCLIST_ENTRY nested attr +
  398. * MDBA_MDB_SRCATTR_ADDRESS + MDBA_MDB_SRCATTR_TIMER
  399. */
  400. nlmsg_size += nla_total_size(0) +
  401. nla_total_size(addr_size) +
  402. nla_total_size(sizeof(u32));
  403. }
  404. out:
  405. return nlmsg_size;
  406. }
  407. struct br_mdb_complete_info {
  408. struct net_bridge_port *port;
  409. struct br_ip ip;
  410. };
  411. static void br_mdb_complete(struct net_device *dev, int err, void *priv)
  412. {
  413. struct br_mdb_complete_info *data = priv;
  414. struct net_bridge_port_group __rcu **pp;
  415. struct net_bridge_port_group *p;
  416. struct net_bridge_mdb_entry *mp;
  417. struct net_bridge_port *port = data->port;
  418. struct net_bridge *br = port->br;
  419. if (err)
  420. goto err;
  421. spin_lock_bh(&br->multicast_lock);
  422. mp = br_mdb_ip_get(br, &data->ip);
  423. if (!mp)
  424. goto out;
  425. for (pp = &mp->ports; (p = mlock_dereference(*pp, br)) != NULL;
  426. pp = &p->next) {
  427. if (p->key.port != port)
  428. continue;
  429. p->flags |= MDB_PG_FLAGS_OFFLOAD;
  430. }
  431. out:
  432. spin_unlock_bh(&br->multicast_lock);
  433. err:
  434. kfree(priv);
  435. }
  436. static void br_mdb_switchdev_host_port(struct net_device *dev,
  437. struct net_device *lower_dev,
  438. struct net_bridge_mdb_entry *mp,
  439. int type)
  440. {
  441. struct switchdev_obj_port_mdb mdb = {
  442. .obj = {
  443. .id = SWITCHDEV_OBJ_ID_HOST_MDB,
  444. .flags = SWITCHDEV_F_DEFER,
  445. },
  446. .vid = mp->addr.vid,
  447. };
  448. if (mp->addr.proto == htons(ETH_P_IP))
  449. ip_eth_mc_map(mp->addr.dst.ip4, mdb.addr);
  450. #if IS_ENABLED(CONFIG_IPV6)
  451. else
  452. ipv6_eth_mc_map(&mp->addr.dst.ip6, mdb.addr);
  453. #endif
  454. mdb.obj.orig_dev = dev;
  455. switch (type) {
  456. case RTM_NEWMDB:
  457. switchdev_port_obj_add(lower_dev, &mdb.obj, NULL);
  458. break;
  459. case RTM_DELMDB:
  460. switchdev_port_obj_del(lower_dev, &mdb.obj);
  461. break;
  462. }
  463. }
  464. static void br_mdb_switchdev_host(struct net_device *dev,
  465. struct net_bridge_mdb_entry *mp, int type)
  466. {
  467. struct net_device *lower_dev;
  468. struct list_head *iter;
  469. netdev_for_each_lower_dev(dev, lower_dev, iter)
  470. br_mdb_switchdev_host_port(dev, lower_dev, mp, type);
  471. }
  472. void br_mdb_notify(struct net_device *dev,
  473. struct net_bridge_mdb_entry *mp,
  474. struct net_bridge_port_group *pg,
  475. int type)
  476. {
  477. struct br_mdb_complete_info *complete_info;
  478. struct switchdev_obj_port_mdb mdb = {
  479. .obj = {
  480. .id = SWITCHDEV_OBJ_ID_PORT_MDB,
  481. .flags = SWITCHDEV_F_DEFER,
  482. },
  483. .vid = mp->addr.vid,
  484. };
  485. struct net *net = dev_net(dev);
  486. struct sk_buff *skb;
  487. int err = -ENOBUFS;
  488. if (pg) {
  489. if (mp->addr.proto == htons(ETH_P_IP))
  490. ip_eth_mc_map(mp->addr.dst.ip4, mdb.addr);
  491. #if IS_ENABLED(CONFIG_IPV6)
  492. else
  493. ipv6_eth_mc_map(&mp->addr.dst.ip6, mdb.addr);
  494. #endif
  495. mdb.obj.orig_dev = pg->key.port->dev;
  496. switch (type) {
  497. case RTM_NEWMDB:
  498. complete_info = kmalloc(sizeof(*complete_info), GFP_ATOMIC);
  499. if (!complete_info)
  500. break;
  501. complete_info->port = pg->key.port;
  502. complete_info->ip = mp->addr;
  503. mdb.obj.complete_priv = complete_info;
  504. mdb.obj.complete = br_mdb_complete;
  505. if (switchdev_port_obj_add(pg->key.port->dev, &mdb.obj, NULL))
  506. kfree(complete_info);
  507. break;
  508. case RTM_DELMDB:
  509. switchdev_port_obj_del(pg->key.port->dev, &mdb.obj);
  510. break;
  511. }
  512. } else {
  513. br_mdb_switchdev_host(dev, mp, type);
  514. }
  515. skb = nlmsg_new(rtnl_mdb_nlmsg_size(pg), GFP_ATOMIC);
  516. if (!skb)
  517. goto errout;
  518. err = nlmsg_populate_mdb_fill(skb, dev, mp, pg, type);
  519. if (err < 0) {
  520. kfree_skb(skb);
  521. goto errout;
  522. }
  523. rtnl_notify(skb, net, 0, RTNLGRP_MDB, NULL, GFP_ATOMIC);
  524. return;
  525. errout:
  526. rtnl_set_sk_err(net, RTNLGRP_MDB, err);
  527. }
  528. static int nlmsg_populate_rtr_fill(struct sk_buff *skb,
  529. struct net_device *dev,
  530. int ifindex, u32 pid,
  531. u32 seq, int type, unsigned int flags)
  532. {
  533. struct br_port_msg *bpm;
  534. struct nlmsghdr *nlh;
  535. struct nlattr *nest;
  536. nlh = nlmsg_put(skb, pid, seq, type, sizeof(*bpm), 0);
  537. if (!nlh)
  538. return -EMSGSIZE;
  539. bpm = nlmsg_data(nlh);
  540. memset(bpm, 0, sizeof(*bpm));
  541. bpm->family = AF_BRIDGE;
  542. bpm->ifindex = dev->ifindex;
  543. nest = nla_nest_start_noflag(skb, MDBA_ROUTER);
  544. if (!nest)
  545. goto cancel;
  546. if (nla_put_u32(skb, MDBA_ROUTER_PORT, ifindex))
  547. goto end;
  548. nla_nest_end(skb, nest);
  549. nlmsg_end(skb, nlh);
  550. return 0;
  551. end:
  552. nla_nest_end(skb, nest);
  553. cancel:
  554. nlmsg_cancel(skb, nlh);
  555. return -EMSGSIZE;
  556. }
  557. static inline size_t rtnl_rtr_nlmsg_size(void)
  558. {
  559. return NLMSG_ALIGN(sizeof(struct br_port_msg))
  560. + nla_total_size(sizeof(__u32));
  561. }
  562. void br_rtr_notify(struct net_device *dev, struct net_bridge_port *port,
  563. int type)
  564. {
  565. struct net *net = dev_net(dev);
  566. struct sk_buff *skb;
  567. int err = -ENOBUFS;
  568. int ifindex;
  569. ifindex = port ? port->dev->ifindex : 0;
  570. skb = nlmsg_new(rtnl_rtr_nlmsg_size(), GFP_ATOMIC);
  571. if (!skb)
  572. goto errout;
  573. err = nlmsg_populate_rtr_fill(skb, dev, ifindex, 0, 0, type, NTF_SELF);
  574. if (err < 0) {
  575. kfree_skb(skb);
  576. goto errout;
  577. }
  578. rtnl_notify(skb, net, 0, RTNLGRP_MDB, NULL, GFP_ATOMIC);
  579. return;
  580. errout:
  581. rtnl_set_sk_err(net, RTNLGRP_MDB, err);
  582. }
  583. static bool is_valid_mdb_entry(struct br_mdb_entry *entry,
  584. struct netlink_ext_ack *extack)
  585. {
  586. if (entry->ifindex == 0) {
  587. NL_SET_ERR_MSG_MOD(extack, "Zero entry ifindex is not allowed");
  588. return false;
  589. }
  590. if (entry->addr.proto == htons(ETH_P_IP)) {
  591. if (!ipv4_is_multicast(entry->addr.u.ip4)) {
  592. NL_SET_ERR_MSG_MOD(extack, "IPv4 entry group address is not multicast");
  593. return false;
  594. }
  595. if (ipv4_is_local_multicast(entry->addr.u.ip4)) {
  596. NL_SET_ERR_MSG_MOD(extack, "IPv4 entry group address is local multicast");
  597. return false;
  598. }
  599. #if IS_ENABLED(CONFIG_IPV6)
  600. } else if (entry->addr.proto == htons(ETH_P_IPV6)) {
  601. if (ipv6_addr_is_ll_all_nodes(&entry->addr.u.ip6)) {
  602. NL_SET_ERR_MSG_MOD(extack, "IPv6 entry group address is link-local all nodes");
  603. return false;
  604. }
  605. #endif
  606. } else {
  607. NL_SET_ERR_MSG_MOD(extack, "Unknown entry protocol");
  608. return false;
  609. }
  610. if (entry->state != MDB_PERMANENT && entry->state != MDB_TEMPORARY) {
  611. NL_SET_ERR_MSG_MOD(extack, "Unknown entry state");
  612. return false;
  613. }
  614. if (entry->vid >= VLAN_VID_MASK) {
  615. NL_SET_ERR_MSG_MOD(extack, "Invalid entry VLAN id");
  616. return false;
  617. }
  618. return true;
  619. }
  620. static bool is_valid_mdb_source(struct nlattr *attr, __be16 proto,
  621. struct netlink_ext_ack *extack)
  622. {
  623. switch (proto) {
  624. case htons(ETH_P_IP):
  625. if (nla_len(attr) != sizeof(struct in_addr)) {
  626. NL_SET_ERR_MSG_MOD(extack, "IPv4 invalid source address length");
  627. return false;
  628. }
  629. if (ipv4_is_multicast(nla_get_in_addr(attr))) {
  630. NL_SET_ERR_MSG_MOD(extack, "IPv4 multicast source address is not allowed");
  631. return false;
  632. }
  633. break;
  634. #if IS_ENABLED(CONFIG_IPV6)
  635. case htons(ETH_P_IPV6): {
  636. struct in6_addr src;
  637. if (nla_len(attr) != sizeof(struct in6_addr)) {
  638. NL_SET_ERR_MSG_MOD(extack, "IPv6 invalid source address length");
  639. return false;
  640. }
  641. src = nla_get_in6_addr(attr);
  642. if (ipv6_addr_is_multicast(&src)) {
  643. NL_SET_ERR_MSG_MOD(extack, "IPv6 multicast source address is not allowed");
  644. return false;
  645. }
  646. break;
  647. }
  648. #endif
  649. default:
  650. NL_SET_ERR_MSG_MOD(extack, "Invalid protocol used with source address");
  651. return false;
  652. }
  653. return true;
  654. }
  655. static const struct nla_policy br_mdbe_attrs_pol[MDBE_ATTR_MAX + 1] = {
  656. [MDBE_ATTR_SOURCE] = NLA_POLICY_RANGE(NLA_BINARY,
  657. sizeof(struct in_addr),
  658. sizeof(struct in6_addr)),
  659. };
  660. static int br_mdb_parse(struct sk_buff *skb, struct nlmsghdr *nlh,
  661. struct net_device **pdev, struct br_mdb_entry **pentry,
  662. struct nlattr **mdb_attrs, struct netlink_ext_ack *extack)
  663. {
  664. struct net *net = sock_net(skb->sk);
  665. struct br_mdb_entry *entry;
  666. struct br_port_msg *bpm;
  667. struct nlattr *tb[MDBA_SET_ENTRY_MAX+1];
  668. struct net_device *dev;
  669. int err;
  670. err = nlmsg_parse_deprecated(nlh, sizeof(*bpm), tb,
  671. MDBA_SET_ENTRY_MAX, NULL, NULL);
  672. if (err < 0)
  673. return err;
  674. bpm = nlmsg_data(nlh);
  675. if (bpm->ifindex == 0) {
  676. NL_SET_ERR_MSG_MOD(extack, "Invalid bridge ifindex");
  677. return -EINVAL;
  678. }
  679. dev = __dev_get_by_index(net, bpm->ifindex);
  680. if (dev == NULL) {
  681. NL_SET_ERR_MSG_MOD(extack, "Bridge device doesn't exist");
  682. return -ENODEV;
  683. }
  684. if (!(dev->priv_flags & IFF_EBRIDGE)) {
  685. NL_SET_ERR_MSG_MOD(extack, "Device is not a bridge");
  686. return -EOPNOTSUPP;
  687. }
  688. *pdev = dev;
  689. if (!tb[MDBA_SET_ENTRY]) {
  690. NL_SET_ERR_MSG_MOD(extack, "Missing MDBA_SET_ENTRY attribute");
  691. return -EINVAL;
  692. }
  693. if (nla_len(tb[MDBA_SET_ENTRY]) != sizeof(struct br_mdb_entry)) {
  694. NL_SET_ERR_MSG_MOD(extack, "Invalid MDBA_SET_ENTRY attribute length");
  695. return -EINVAL;
  696. }
  697. entry = nla_data(tb[MDBA_SET_ENTRY]);
  698. if (!is_valid_mdb_entry(entry, extack))
  699. return -EINVAL;
  700. *pentry = entry;
  701. if (tb[MDBA_SET_ENTRY_ATTRS]) {
  702. err = nla_parse_nested(mdb_attrs, MDBE_ATTR_MAX,
  703. tb[MDBA_SET_ENTRY_ATTRS],
  704. br_mdbe_attrs_pol, extack);
  705. if (err)
  706. return err;
  707. if (mdb_attrs[MDBE_ATTR_SOURCE] &&
  708. !is_valid_mdb_source(mdb_attrs[MDBE_ATTR_SOURCE],
  709. entry->addr.proto, extack))
  710. return -EINVAL;
  711. } else {
  712. memset(mdb_attrs, 0,
  713. sizeof(struct nlattr *) * (MDBE_ATTR_MAX + 1));
  714. }
  715. return 0;
  716. }
  717. static int br_mdb_add_group(struct net_bridge *br, struct net_bridge_port *port,
  718. struct br_mdb_entry *entry,
  719. struct nlattr **mdb_attrs,
  720. struct netlink_ext_ack *extack)
  721. {
  722. struct net_bridge_mdb_entry *mp, *star_mp;
  723. struct net_bridge_port_group *p;
  724. struct net_bridge_port_group __rcu **pp;
  725. struct br_ip group, star_group;
  726. unsigned long now = jiffies;
  727. u8 filter_mode;
  728. int err;
  729. __mdb_entry_to_br_ip(entry, &group, mdb_attrs);
  730. /* host join errors which can happen before creating the group */
  731. if (!port) {
  732. /* don't allow any flags for host-joined groups */
  733. if (entry->state) {
  734. NL_SET_ERR_MSG_MOD(extack, "Flags are not allowed for host groups");
  735. return -EINVAL;
  736. }
  737. if (!br_multicast_is_star_g(&group)) {
  738. NL_SET_ERR_MSG_MOD(extack, "Groups with sources cannot be manually host joined");
  739. return -EINVAL;
  740. }
  741. }
  742. mp = br_mdb_ip_get(br, &group);
  743. if (!mp) {
  744. mp = br_multicast_new_group(br, &group);
  745. err = PTR_ERR_OR_ZERO(mp);
  746. if (err)
  747. return err;
  748. }
  749. /* host join */
  750. if (!port) {
  751. if (mp->host_joined) {
  752. NL_SET_ERR_MSG_MOD(extack, "Group is already joined by host");
  753. return -EEXIST;
  754. }
  755. br_multicast_host_join(mp, false);
  756. br_mdb_notify(br->dev, mp, NULL, RTM_NEWMDB);
  757. return 0;
  758. }
  759. for (pp = &mp->ports;
  760. (p = mlock_dereference(*pp, br)) != NULL;
  761. pp = &p->next) {
  762. if (p->key.port == port) {
  763. NL_SET_ERR_MSG_MOD(extack, "Group is already joined by port");
  764. return -EEXIST;
  765. }
  766. if ((unsigned long)p->key.port < (unsigned long)port)
  767. break;
  768. }
  769. filter_mode = br_multicast_is_star_g(&group) ? MCAST_EXCLUDE :
  770. MCAST_INCLUDE;
  771. p = br_multicast_new_port_group(port, &group, *pp, entry->state, NULL,
  772. filter_mode, RTPROT_STATIC);
  773. if (unlikely(!p)) {
  774. NL_SET_ERR_MSG_MOD(extack, "Couldn't allocate new port group");
  775. return -ENOMEM;
  776. }
  777. rcu_assign_pointer(*pp, p);
  778. if (entry->state == MDB_TEMPORARY)
  779. mod_timer(&p->timer, now + br->multicast_membership_interval);
  780. br_mdb_notify(br->dev, mp, p, RTM_NEWMDB);
  781. /* if we are adding a new EXCLUDE port group (*,G) it needs to be also
  782. * added to all S,G entries for proper replication, if we are adding
  783. * a new INCLUDE port (S,G) then all of *,G EXCLUDE ports need to be
  784. * added to it for proper replication
  785. */
  786. if (br_multicast_should_handle_mode(br, group.proto)) {
  787. switch (filter_mode) {
  788. case MCAST_EXCLUDE:
  789. br_multicast_star_g_handle_mode(p, MCAST_EXCLUDE);
  790. break;
  791. case MCAST_INCLUDE:
  792. star_group = p->key.addr;
  793. memset(&star_group.src, 0, sizeof(star_group.src));
  794. star_mp = br_mdb_ip_get(br, &star_group);
  795. if (star_mp)
  796. br_multicast_sg_add_exclude_ports(star_mp, p);
  797. break;
  798. }
  799. }
  800. return 0;
  801. }
  802. static int __br_mdb_add(struct net *net, struct net_bridge *br,
  803. struct net_bridge_port *p,
  804. struct br_mdb_entry *entry,
  805. struct nlattr **mdb_attrs,
  806. struct netlink_ext_ack *extack)
  807. {
  808. int ret;
  809. spin_lock_bh(&br->multicast_lock);
  810. ret = br_mdb_add_group(br, p, entry, mdb_attrs, extack);
  811. spin_unlock_bh(&br->multicast_lock);
  812. return ret;
  813. }
  814. static int br_mdb_add(struct sk_buff *skb, struct nlmsghdr *nlh,
  815. struct netlink_ext_ack *extack)
  816. {
  817. struct nlattr *mdb_attrs[MDBE_ATTR_MAX + 1];
  818. struct net *net = sock_net(skb->sk);
  819. struct net_bridge_vlan_group *vg;
  820. struct net_bridge_port *p = NULL;
  821. struct net_device *dev, *pdev;
  822. struct br_mdb_entry *entry;
  823. struct net_bridge_vlan *v;
  824. struct net_bridge *br;
  825. int err;
  826. err = br_mdb_parse(skb, nlh, &dev, &entry, mdb_attrs, extack);
  827. if (err < 0)
  828. return err;
  829. br = netdev_priv(dev);
  830. if (!netif_running(br->dev)) {
  831. NL_SET_ERR_MSG_MOD(extack, "Bridge device is not running");
  832. return -EINVAL;
  833. }
  834. if (!br_opt_get(br, BROPT_MULTICAST_ENABLED)) {
  835. NL_SET_ERR_MSG_MOD(extack, "Bridge's multicast processing is disabled");
  836. return -EINVAL;
  837. }
  838. if (entry->ifindex != br->dev->ifindex) {
  839. pdev = __dev_get_by_index(net, entry->ifindex);
  840. if (!pdev) {
  841. NL_SET_ERR_MSG_MOD(extack, "Port net device doesn't exist");
  842. return -ENODEV;
  843. }
  844. p = br_port_get_rtnl(pdev);
  845. if (!p) {
  846. NL_SET_ERR_MSG_MOD(extack, "Net device is not a bridge port");
  847. return -EINVAL;
  848. }
  849. if (p->br != br) {
  850. NL_SET_ERR_MSG_MOD(extack, "Port belongs to a different bridge device");
  851. return -EINVAL;
  852. }
  853. if (p->state == BR_STATE_DISABLED) {
  854. NL_SET_ERR_MSG_MOD(extack, "Port is in disabled state");
  855. return -EINVAL;
  856. }
  857. vg = nbp_vlan_group(p);
  858. } else {
  859. vg = br_vlan_group(br);
  860. }
  861. /* If vlan filtering is enabled and VLAN is not specified
  862. * install mdb entry on all vlans configured on the port.
  863. */
  864. if (br_vlan_enabled(br->dev) && vg && entry->vid == 0) {
  865. list_for_each_entry(v, &vg->vlan_list, vlist) {
  866. entry->vid = v->vid;
  867. err = __br_mdb_add(net, br, p, entry, mdb_attrs, extack);
  868. if (err)
  869. break;
  870. }
  871. } else {
  872. err = __br_mdb_add(net, br, p, entry, mdb_attrs, extack);
  873. }
  874. return err;
  875. }
  876. static int __br_mdb_del(struct net_bridge *br, struct br_mdb_entry *entry,
  877. struct nlattr **mdb_attrs)
  878. {
  879. struct net_bridge_mdb_entry *mp;
  880. struct net_bridge_port_group *p;
  881. struct net_bridge_port_group __rcu **pp;
  882. struct br_ip ip;
  883. int err = -EINVAL;
  884. if (!netif_running(br->dev) || !br_opt_get(br, BROPT_MULTICAST_ENABLED))
  885. return -EINVAL;
  886. __mdb_entry_to_br_ip(entry, &ip, mdb_attrs);
  887. spin_lock_bh(&br->multicast_lock);
  888. mp = br_mdb_ip_get(br, &ip);
  889. if (!mp)
  890. goto unlock;
  891. /* host leave */
  892. if (entry->ifindex == mp->br->dev->ifindex && mp->host_joined) {
  893. br_multicast_host_leave(mp, false);
  894. err = 0;
  895. br_mdb_notify(br->dev, mp, NULL, RTM_DELMDB);
  896. if (!mp->ports && netif_running(br->dev))
  897. mod_timer(&mp->timer, jiffies);
  898. goto unlock;
  899. }
  900. for (pp = &mp->ports;
  901. (p = mlock_dereference(*pp, br)) != NULL;
  902. pp = &p->next) {
  903. if (!p->key.port || p->key.port->dev->ifindex != entry->ifindex)
  904. continue;
  905. if (p->key.port->state == BR_STATE_DISABLED)
  906. goto unlock;
  907. br_multicast_del_pg(mp, p, pp);
  908. err = 0;
  909. break;
  910. }
  911. unlock:
  912. spin_unlock_bh(&br->multicast_lock);
  913. return err;
  914. }
  915. static int br_mdb_del(struct sk_buff *skb, struct nlmsghdr *nlh,
  916. struct netlink_ext_ack *extack)
  917. {
  918. struct nlattr *mdb_attrs[MDBE_ATTR_MAX + 1];
  919. struct net *net = sock_net(skb->sk);
  920. struct net_bridge_vlan_group *vg;
  921. struct net_bridge_port *p = NULL;
  922. struct net_device *dev, *pdev;
  923. struct br_mdb_entry *entry;
  924. struct net_bridge_vlan *v;
  925. struct net_bridge *br;
  926. int err;
  927. err = br_mdb_parse(skb, nlh, &dev, &entry, mdb_attrs, extack);
  928. if (err < 0)
  929. return err;
  930. br = netdev_priv(dev);
  931. if (entry->ifindex != br->dev->ifindex) {
  932. pdev = __dev_get_by_index(net, entry->ifindex);
  933. if (!pdev)
  934. return -ENODEV;
  935. p = br_port_get_rtnl(pdev);
  936. if (!p || p->br != br || p->state == BR_STATE_DISABLED)
  937. return -EINVAL;
  938. vg = nbp_vlan_group(p);
  939. } else {
  940. vg = br_vlan_group(br);
  941. }
  942. /* If vlan filtering is enabled and VLAN is not specified
  943. * delete mdb entry on all vlans configured on the port.
  944. */
  945. if (br_vlan_enabled(br->dev) && vg && entry->vid == 0) {
  946. list_for_each_entry(v, &vg->vlan_list, vlist) {
  947. entry->vid = v->vid;
  948. err = __br_mdb_del(br, entry, mdb_attrs);
  949. }
  950. } else {
  951. err = __br_mdb_del(br, entry, mdb_attrs);
  952. }
  953. return err;
  954. }
  955. void br_mdb_init(void)
  956. {
  957. rtnl_register_module(THIS_MODULE, PF_BRIDGE, RTM_GETMDB, NULL, br_mdb_dump, 0);
  958. rtnl_register_module(THIS_MODULE, PF_BRIDGE, RTM_NEWMDB, br_mdb_add, NULL, 0);
  959. rtnl_register_module(THIS_MODULE, PF_BRIDGE, RTM_DELMDB, br_mdb_del, NULL, 0);
  960. }
  961. void br_mdb_uninit(void)
  962. {
  963. rtnl_unregister(PF_BRIDGE, RTM_GETMDB);
  964. rtnl_unregister(PF_BRIDGE, RTM_NEWMDB);
  965. rtnl_unregister(PF_BRIDGE, RTM_DELMDB);
  966. }