br_fdb.c 33 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * Forwarding database
  4. * Linux ethernet bridge
  5. *
  6. * Authors:
  7. * Lennert Buytenhek <buytenh@gnu.org>
  8. */
  9. #include <linux/kernel.h>
  10. #include <linux/init.h>
  11. #include <linux/rculist.h>
  12. #include <linux/spinlock.h>
  13. #include <linux/times.h>
  14. #include <linux/netdevice.h>
  15. #include <linux/etherdevice.h>
  16. #include <linux/jhash.h>
  17. #include <linux/random.h>
  18. #include <linux/slab.h>
  19. #include <linux/atomic.h>
  20. #include <asm/unaligned.h>
  21. #include <linux/if_vlan.h>
  22. #include <net/switchdev.h>
  23. #include <trace/events/bridge.h>
  24. #include "br_private.h"
  25. static const struct rhashtable_params br_fdb_rht_params = {
  26. .head_offset = offsetof(struct net_bridge_fdb_entry, rhnode),
  27. .key_offset = offsetof(struct net_bridge_fdb_entry, key),
  28. .key_len = sizeof(struct net_bridge_fdb_key),
  29. .automatic_shrinking = true,
  30. };
  31. static struct kmem_cache *br_fdb_cache __read_mostly;
  32. static int fdb_insert(struct net_bridge *br, struct net_bridge_port *source,
  33. const unsigned char *addr, u16 vid);
  34. static void fdb_notify(struct net_bridge *br,
  35. const struct net_bridge_fdb_entry *, int, bool);
  36. int __init br_fdb_init(void)
  37. {
  38. br_fdb_cache = kmem_cache_create("bridge_fdb_cache",
  39. sizeof(struct net_bridge_fdb_entry),
  40. 0,
  41. SLAB_HWCACHE_ALIGN, NULL);
  42. if (!br_fdb_cache)
  43. return -ENOMEM;
  44. return 0;
  45. }
  46. void br_fdb_fini(void)
  47. {
  48. kmem_cache_destroy(br_fdb_cache);
  49. }
  50. int br_fdb_hash_init(struct net_bridge *br)
  51. {
  52. return rhashtable_init(&br->fdb_hash_tbl, &br_fdb_rht_params);
  53. }
  54. void br_fdb_hash_fini(struct net_bridge *br)
  55. {
  56. rhashtable_destroy(&br->fdb_hash_tbl);
  57. }
  58. /* if topology_changing then use forward_delay (default 15 sec)
  59. * otherwise keep longer (default 5 minutes)
  60. */
  61. static inline unsigned long hold_time(const struct net_bridge *br)
  62. {
  63. return br->topology_change ? br->forward_delay : br->ageing_time;
  64. }
  65. static inline int has_expired(const struct net_bridge *br,
  66. const struct net_bridge_fdb_entry *fdb)
  67. {
  68. return !test_bit(BR_FDB_STATIC, &fdb->flags) &&
  69. !test_bit(BR_FDB_ADDED_BY_EXT_LEARN, &fdb->flags) &&
  70. time_before_eq(fdb->updated + hold_time(br), jiffies);
  71. }
  72. static void fdb_rcu_free(struct rcu_head *head)
  73. {
  74. struct net_bridge_fdb_entry *ent
  75. = container_of(head, struct net_bridge_fdb_entry, rcu);
  76. kmem_cache_free(br_fdb_cache, ent);
  77. }
  78. static struct net_bridge_fdb_entry *fdb_find_rcu(struct rhashtable *tbl,
  79. const unsigned char *addr,
  80. __u16 vid)
  81. {
  82. struct net_bridge_fdb_key key;
  83. WARN_ON_ONCE(!rcu_read_lock_held());
  84. key.vlan_id = vid;
  85. memcpy(key.addr.addr, addr, sizeof(key.addr.addr));
  86. return rhashtable_lookup(tbl, &key, br_fdb_rht_params);
  87. }
  88. /* requires bridge hash_lock */
  89. static struct net_bridge_fdb_entry *br_fdb_find(struct net_bridge *br,
  90. const unsigned char *addr,
  91. __u16 vid)
  92. {
  93. struct net_bridge_fdb_entry *fdb;
  94. lockdep_assert_held_once(&br->hash_lock);
  95. rcu_read_lock();
  96. fdb = fdb_find_rcu(&br->fdb_hash_tbl, addr, vid);
  97. rcu_read_unlock();
  98. return fdb;
  99. }
  100. struct net_device *br_fdb_find_port(const struct net_device *br_dev,
  101. const unsigned char *addr,
  102. __u16 vid)
  103. {
  104. struct net_bridge_fdb_entry *f;
  105. struct net_device *dev = NULL;
  106. struct net_bridge *br;
  107. ASSERT_RTNL();
  108. if (!netif_is_bridge_master(br_dev))
  109. return NULL;
  110. br = netdev_priv(br_dev);
  111. rcu_read_lock();
  112. f = br_fdb_find_rcu(br, addr, vid);
  113. if (f && f->dst)
  114. dev = f->dst->dev;
  115. rcu_read_unlock();
  116. return dev;
  117. }
  118. EXPORT_SYMBOL_GPL(br_fdb_find_port);
  119. struct net_bridge_fdb_entry *br_fdb_find_rcu(struct net_bridge *br,
  120. const unsigned char *addr,
  121. __u16 vid)
  122. {
  123. return fdb_find_rcu(&br->fdb_hash_tbl, addr, vid);
  124. }
  125. /* When a static FDB entry is added, the mac address from the entry is
  126. * added to the bridge private HW address list and all required ports
  127. * are then updated with the new information.
  128. * Called under RTNL.
  129. */
  130. static void fdb_add_hw_addr(struct net_bridge *br, const unsigned char *addr)
  131. {
  132. int err;
  133. struct net_bridge_port *p;
  134. ASSERT_RTNL();
  135. list_for_each_entry(p, &br->port_list, list) {
  136. if (!br_promisc_port(p)) {
  137. err = dev_uc_add(p->dev, addr);
  138. if (err)
  139. goto undo;
  140. }
  141. }
  142. return;
  143. undo:
  144. list_for_each_entry_continue_reverse(p, &br->port_list, list) {
  145. if (!br_promisc_port(p))
  146. dev_uc_del(p->dev, addr);
  147. }
  148. }
  149. /* When a static FDB entry is deleted, the HW address from that entry is
  150. * also removed from the bridge private HW address list and updates all
  151. * the ports with needed information.
  152. * Called under RTNL.
  153. */
  154. static void fdb_del_hw_addr(struct net_bridge *br, const unsigned char *addr)
  155. {
  156. struct net_bridge_port *p;
  157. ASSERT_RTNL();
  158. list_for_each_entry(p, &br->port_list, list) {
  159. if (!br_promisc_port(p))
  160. dev_uc_del(p->dev, addr);
  161. }
  162. }
  163. static void fdb_delete(struct net_bridge *br, struct net_bridge_fdb_entry *f,
  164. bool swdev_notify)
  165. {
  166. trace_fdb_delete(br, f);
  167. if (test_bit(BR_FDB_STATIC, &f->flags))
  168. fdb_del_hw_addr(br, f->key.addr.addr);
  169. hlist_del_init_rcu(&f->fdb_node);
  170. rhashtable_remove_fast(&br->fdb_hash_tbl, &f->rhnode,
  171. br_fdb_rht_params);
  172. fdb_notify(br, f, RTM_DELNEIGH, swdev_notify);
  173. call_rcu(&f->rcu, fdb_rcu_free);
  174. }
  175. /* Delete a local entry if no other port had the same address. */
  176. static void fdb_delete_local(struct net_bridge *br,
  177. const struct net_bridge_port *p,
  178. struct net_bridge_fdb_entry *f)
  179. {
  180. const unsigned char *addr = f->key.addr.addr;
  181. struct net_bridge_vlan_group *vg;
  182. const struct net_bridge_vlan *v;
  183. struct net_bridge_port *op;
  184. u16 vid = f->key.vlan_id;
  185. /* Maybe another port has same hw addr? */
  186. list_for_each_entry(op, &br->port_list, list) {
  187. vg = nbp_vlan_group(op);
  188. if (op != p && ether_addr_equal(op->dev->dev_addr, addr) &&
  189. (!vid || br_vlan_find(vg, vid))) {
  190. f->dst = op;
  191. clear_bit(BR_FDB_ADDED_BY_USER, &f->flags);
  192. return;
  193. }
  194. }
  195. vg = br_vlan_group(br);
  196. v = br_vlan_find(vg, vid);
  197. /* Maybe bridge device has same hw addr? */
  198. if (p && ether_addr_equal(br->dev->dev_addr, addr) &&
  199. (!vid || (v && br_vlan_should_use(v)))) {
  200. f->dst = NULL;
  201. clear_bit(BR_FDB_ADDED_BY_USER, &f->flags);
  202. return;
  203. }
  204. fdb_delete(br, f, true);
  205. }
  206. void br_fdb_find_delete_local(struct net_bridge *br,
  207. const struct net_bridge_port *p,
  208. const unsigned char *addr, u16 vid)
  209. {
  210. struct net_bridge_fdb_entry *f;
  211. spin_lock_bh(&br->hash_lock);
  212. f = br_fdb_find(br, addr, vid);
  213. if (f && test_bit(BR_FDB_LOCAL, &f->flags) &&
  214. !test_bit(BR_FDB_ADDED_BY_USER, &f->flags) && f->dst == p)
  215. fdb_delete_local(br, p, f);
  216. spin_unlock_bh(&br->hash_lock);
  217. }
  218. void br_fdb_changeaddr(struct net_bridge_port *p, const unsigned char *newaddr)
  219. {
  220. struct net_bridge_vlan_group *vg;
  221. struct net_bridge_fdb_entry *f;
  222. struct net_bridge *br = p->br;
  223. struct net_bridge_vlan *v;
  224. spin_lock_bh(&br->hash_lock);
  225. vg = nbp_vlan_group(p);
  226. hlist_for_each_entry(f, &br->fdb_list, fdb_node) {
  227. if (f->dst == p && test_bit(BR_FDB_LOCAL, &f->flags) &&
  228. !test_bit(BR_FDB_ADDED_BY_USER, &f->flags)) {
  229. /* delete old one */
  230. fdb_delete_local(br, p, f);
  231. /* if this port has no vlan information
  232. * configured, we can safely be done at
  233. * this point.
  234. */
  235. if (!vg || !vg->num_vlans)
  236. goto insert;
  237. }
  238. }
  239. insert:
  240. /* insert new address, may fail if invalid address or dup. */
  241. fdb_insert(br, p, newaddr, 0);
  242. if (!vg || !vg->num_vlans)
  243. goto done;
  244. /* Now add entries for every VLAN configured on the port.
  245. * This function runs under RTNL so the bitmap will not change
  246. * from under us.
  247. */
  248. list_for_each_entry(v, &vg->vlan_list, vlist)
  249. fdb_insert(br, p, newaddr, v->vid);
  250. done:
  251. spin_unlock_bh(&br->hash_lock);
  252. }
  253. void br_fdb_change_mac_address(struct net_bridge *br, const u8 *newaddr)
  254. {
  255. struct net_bridge_vlan_group *vg;
  256. struct net_bridge_fdb_entry *f;
  257. struct net_bridge_vlan *v;
  258. spin_lock_bh(&br->hash_lock);
  259. /* If old entry was unassociated with any port, then delete it. */
  260. f = br_fdb_find(br, br->dev->dev_addr, 0);
  261. if (f && test_bit(BR_FDB_LOCAL, &f->flags) &&
  262. !f->dst && !test_bit(BR_FDB_ADDED_BY_USER, &f->flags))
  263. fdb_delete_local(br, NULL, f);
  264. fdb_insert(br, NULL, newaddr, 0);
  265. vg = br_vlan_group(br);
  266. if (!vg || !vg->num_vlans)
  267. goto out;
  268. /* Now remove and add entries for every VLAN configured on the
  269. * bridge. This function runs under RTNL so the bitmap will not
  270. * change from under us.
  271. */
  272. list_for_each_entry(v, &vg->vlan_list, vlist) {
  273. if (!br_vlan_should_use(v))
  274. continue;
  275. f = br_fdb_find(br, br->dev->dev_addr, v->vid);
  276. if (f && test_bit(BR_FDB_LOCAL, &f->flags) &&
  277. !f->dst && !test_bit(BR_FDB_ADDED_BY_USER, &f->flags))
  278. fdb_delete_local(br, NULL, f);
  279. fdb_insert(br, NULL, newaddr, v->vid);
  280. }
  281. out:
  282. spin_unlock_bh(&br->hash_lock);
  283. }
  284. void br_fdb_cleanup(struct work_struct *work)
  285. {
  286. struct net_bridge *br = container_of(work, struct net_bridge,
  287. gc_work.work);
  288. struct net_bridge_fdb_entry *f = NULL;
  289. unsigned long delay = hold_time(br);
  290. unsigned long work_delay = delay;
  291. unsigned long now = jiffies;
  292. /* this part is tricky, in order to avoid blocking learning and
  293. * consequently forwarding, we rely on rcu to delete objects with
  294. * delayed freeing allowing us to continue traversing
  295. */
  296. rcu_read_lock();
  297. hlist_for_each_entry_rcu(f, &br->fdb_list, fdb_node) {
  298. unsigned long this_timer = f->updated + delay;
  299. if (test_bit(BR_FDB_STATIC, &f->flags) ||
  300. test_bit(BR_FDB_ADDED_BY_EXT_LEARN, &f->flags)) {
  301. if (test_bit(BR_FDB_NOTIFY, &f->flags)) {
  302. if (time_after(this_timer, now))
  303. work_delay = min(work_delay,
  304. this_timer - now);
  305. else if (!test_and_set_bit(BR_FDB_NOTIFY_INACTIVE,
  306. &f->flags))
  307. fdb_notify(br, f, RTM_NEWNEIGH, false);
  308. }
  309. continue;
  310. }
  311. if (time_after(this_timer, now)) {
  312. work_delay = min(work_delay, this_timer - now);
  313. } else {
  314. spin_lock_bh(&br->hash_lock);
  315. if (!hlist_unhashed(&f->fdb_node))
  316. fdb_delete(br, f, true);
  317. spin_unlock_bh(&br->hash_lock);
  318. }
  319. }
  320. rcu_read_unlock();
  321. /* Cleanup minimum 10 milliseconds apart */
  322. work_delay = max_t(unsigned long, work_delay, msecs_to_jiffies(10));
  323. mod_delayed_work(system_long_wq, &br->gc_work, work_delay);
  324. }
  325. /* Completely flush all dynamic entries in forwarding database.*/
  326. void br_fdb_flush(struct net_bridge *br)
  327. {
  328. struct net_bridge_fdb_entry *f;
  329. struct hlist_node *tmp;
  330. spin_lock_bh(&br->hash_lock);
  331. hlist_for_each_entry_safe(f, tmp, &br->fdb_list, fdb_node) {
  332. if (!test_bit(BR_FDB_STATIC, &f->flags))
  333. fdb_delete(br, f, true);
  334. }
  335. spin_unlock_bh(&br->hash_lock);
  336. }
  337. /* Flush all entries referring to a specific port.
  338. * if do_all is set also flush static entries
  339. * if vid is set delete all entries that match the vlan_id
  340. */
  341. void br_fdb_delete_by_port(struct net_bridge *br,
  342. const struct net_bridge_port *p,
  343. u16 vid,
  344. int do_all)
  345. {
  346. struct net_bridge_fdb_entry *f;
  347. struct hlist_node *tmp;
  348. spin_lock_bh(&br->hash_lock);
  349. hlist_for_each_entry_safe(f, tmp, &br->fdb_list, fdb_node) {
  350. if (f->dst != p)
  351. continue;
  352. if (!do_all)
  353. if (test_bit(BR_FDB_STATIC, &f->flags) ||
  354. (test_bit(BR_FDB_ADDED_BY_EXT_LEARN, &f->flags) &&
  355. !test_bit(BR_FDB_OFFLOADED, &f->flags)) ||
  356. (vid && f->key.vlan_id != vid))
  357. continue;
  358. if (test_bit(BR_FDB_LOCAL, &f->flags))
  359. fdb_delete_local(br, p, f);
  360. else
  361. fdb_delete(br, f, true);
  362. }
  363. spin_unlock_bh(&br->hash_lock);
  364. }
  365. #if IS_ENABLED(CONFIG_ATM_LANE)
  366. /* Interface used by ATM LANE hook to test
  367. * if an addr is on some other bridge port */
  368. int br_fdb_test_addr(struct net_device *dev, unsigned char *addr)
  369. {
  370. struct net_bridge_fdb_entry *fdb;
  371. struct net_bridge_port *port;
  372. int ret;
  373. rcu_read_lock();
  374. port = br_port_get_rcu(dev);
  375. if (!port)
  376. ret = 0;
  377. else {
  378. fdb = br_fdb_find_rcu(port->br, addr, 0);
  379. ret = fdb && fdb->dst && fdb->dst->dev != dev &&
  380. fdb->dst->state == BR_STATE_FORWARDING;
  381. }
  382. rcu_read_unlock();
  383. return ret;
  384. }
  385. #endif /* CONFIG_ATM_LANE */
  386. /*
  387. * Fill buffer with forwarding table records in
  388. * the API format.
  389. */
  390. int br_fdb_fillbuf(struct net_bridge *br, void *buf,
  391. unsigned long maxnum, unsigned long skip)
  392. {
  393. struct net_bridge_fdb_entry *f;
  394. struct __fdb_entry *fe = buf;
  395. int num = 0;
  396. memset(buf, 0, maxnum*sizeof(struct __fdb_entry));
  397. rcu_read_lock();
  398. hlist_for_each_entry_rcu(f, &br->fdb_list, fdb_node) {
  399. if (num >= maxnum)
  400. break;
  401. if (has_expired(br, f))
  402. continue;
  403. /* ignore pseudo entry for local MAC address */
  404. if (!f->dst)
  405. continue;
  406. if (skip) {
  407. --skip;
  408. continue;
  409. }
  410. /* convert from internal format to API */
  411. memcpy(fe->mac_addr, f->key.addr.addr, ETH_ALEN);
  412. /* due to ABI compat need to split into hi/lo */
  413. fe->port_no = f->dst->port_no;
  414. fe->port_hi = f->dst->port_no >> 8;
  415. fe->is_local = test_bit(BR_FDB_LOCAL, &f->flags);
  416. if (!test_bit(BR_FDB_STATIC, &f->flags))
  417. fe->ageing_timer_value = jiffies_delta_to_clock_t(jiffies - f->updated);
  418. ++fe;
  419. ++num;
  420. }
  421. rcu_read_unlock();
  422. return num;
  423. }
  424. static struct net_bridge_fdb_entry *fdb_create(struct net_bridge *br,
  425. struct net_bridge_port *source,
  426. const unsigned char *addr,
  427. __u16 vid,
  428. unsigned long flags)
  429. {
  430. struct net_bridge_fdb_entry *fdb;
  431. fdb = kmem_cache_alloc(br_fdb_cache, GFP_ATOMIC);
  432. if (fdb) {
  433. memcpy(fdb->key.addr.addr, addr, ETH_ALEN);
  434. fdb->dst = source;
  435. fdb->key.vlan_id = vid;
  436. fdb->flags = flags;
  437. fdb->updated = fdb->used = jiffies;
  438. if (rhashtable_lookup_insert_fast(&br->fdb_hash_tbl,
  439. &fdb->rhnode,
  440. br_fdb_rht_params)) {
  441. kmem_cache_free(br_fdb_cache, fdb);
  442. fdb = NULL;
  443. } else {
  444. hlist_add_head_rcu(&fdb->fdb_node, &br->fdb_list);
  445. }
  446. }
  447. return fdb;
  448. }
  449. static int fdb_insert(struct net_bridge *br, struct net_bridge_port *source,
  450. const unsigned char *addr, u16 vid)
  451. {
  452. struct net_bridge_fdb_entry *fdb;
  453. if (!is_valid_ether_addr(addr))
  454. return -EINVAL;
  455. fdb = br_fdb_find(br, addr, vid);
  456. if (fdb) {
  457. /* it is okay to have multiple ports with same
  458. * address, just use the first one.
  459. */
  460. if (test_bit(BR_FDB_LOCAL, &fdb->flags))
  461. return 0;
  462. br_warn(br, "adding interface %s with same address as a received packet (addr:%pM, vlan:%u)\n",
  463. source ? source->dev->name : br->dev->name, addr, vid);
  464. fdb_delete(br, fdb, true);
  465. }
  466. fdb = fdb_create(br, source, addr, vid,
  467. BIT(BR_FDB_LOCAL) | BIT(BR_FDB_STATIC));
  468. if (!fdb)
  469. return -ENOMEM;
  470. fdb_add_hw_addr(br, addr);
  471. fdb_notify(br, fdb, RTM_NEWNEIGH, true);
  472. return 0;
  473. }
  474. /* Add entry for local address of interface */
  475. int br_fdb_insert(struct net_bridge *br, struct net_bridge_port *source,
  476. const unsigned char *addr, u16 vid)
  477. {
  478. int ret;
  479. spin_lock_bh(&br->hash_lock);
  480. ret = fdb_insert(br, source, addr, vid);
  481. spin_unlock_bh(&br->hash_lock);
  482. return ret;
  483. }
  484. /* returns true if the fdb was modified */
  485. static bool __fdb_mark_active(struct net_bridge_fdb_entry *fdb)
  486. {
  487. return !!(test_bit(BR_FDB_NOTIFY_INACTIVE, &fdb->flags) &&
  488. test_and_clear_bit(BR_FDB_NOTIFY_INACTIVE, &fdb->flags));
  489. }
  490. void br_fdb_update(struct net_bridge *br, struct net_bridge_port *source,
  491. const unsigned char *addr, u16 vid, unsigned long flags)
  492. {
  493. struct net_bridge_fdb_entry *fdb;
  494. /* some users want to always flood. */
  495. if (hold_time(br) == 0)
  496. return;
  497. fdb = fdb_find_rcu(&br->fdb_hash_tbl, addr, vid);
  498. if (likely(fdb)) {
  499. /* attempt to update an entry for a local interface */
  500. if (unlikely(test_bit(BR_FDB_LOCAL, &fdb->flags))) {
  501. if (net_ratelimit())
  502. br_warn(br, "received packet on %s with own address as source address (addr:%pM, vlan:%u)\n",
  503. source->dev->name, addr, vid);
  504. } else {
  505. unsigned long now = jiffies;
  506. bool fdb_modified = false;
  507. if (now != fdb->updated) {
  508. fdb->updated = now;
  509. fdb_modified = __fdb_mark_active(fdb);
  510. }
  511. /* fastpath: update of existing entry */
  512. if (unlikely(source != fdb->dst &&
  513. !test_bit(BR_FDB_STICKY, &fdb->flags))) {
  514. fdb->dst = source;
  515. fdb_modified = true;
  516. /* Take over HW learned entry */
  517. if (unlikely(test_bit(BR_FDB_ADDED_BY_EXT_LEARN,
  518. &fdb->flags)))
  519. clear_bit(BR_FDB_ADDED_BY_EXT_LEARN,
  520. &fdb->flags);
  521. }
  522. if (unlikely(test_bit(BR_FDB_ADDED_BY_USER, &flags)))
  523. set_bit(BR_FDB_ADDED_BY_USER, &fdb->flags);
  524. if (unlikely(fdb_modified)) {
  525. trace_br_fdb_update(br, source, addr, vid, flags);
  526. fdb_notify(br, fdb, RTM_NEWNEIGH, true);
  527. }
  528. }
  529. } else {
  530. spin_lock(&br->hash_lock);
  531. fdb = fdb_create(br, source, addr, vid, flags);
  532. if (fdb) {
  533. trace_br_fdb_update(br, source, addr, vid, flags);
  534. fdb_notify(br, fdb, RTM_NEWNEIGH, true);
  535. }
  536. /* else we lose race and someone else inserts
  537. * it first, don't bother updating
  538. */
  539. spin_unlock(&br->hash_lock);
  540. }
  541. }
  542. static int fdb_to_nud(const struct net_bridge *br,
  543. const struct net_bridge_fdb_entry *fdb)
  544. {
  545. if (test_bit(BR_FDB_LOCAL, &fdb->flags))
  546. return NUD_PERMANENT;
  547. else if (test_bit(BR_FDB_STATIC, &fdb->flags))
  548. return NUD_NOARP;
  549. else if (has_expired(br, fdb))
  550. return NUD_STALE;
  551. else
  552. return NUD_REACHABLE;
  553. }
  554. static int fdb_fill_info(struct sk_buff *skb, const struct net_bridge *br,
  555. const struct net_bridge_fdb_entry *fdb,
  556. u32 portid, u32 seq, int type, unsigned int flags)
  557. {
  558. unsigned long now = jiffies;
  559. struct nda_cacheinfo ci;
  560. struct nlmsghdr *nlh;
  561. struct ndmsg *ndm;
  562. nlh = nlmsg_put(skb, portid, seq, type, sizeof(*ndm), flags);
  563. if (nlh == NULL)
  564. return -EMSGSIZE;
  565. ndm = nlmsg_data(nlh);
  566. ndm->ndm_family = AF_BRIDGE;
  567. ndm->ndm_pad1 = 0;
  568. ndm->ndm_pad2 = 0;
  569. ndm->ndm_flags = 0;
  570. ndm->ndm_type = 0;
  571. ndm->ndm_ifindex = fdb->dst ? fdb->dst->dev->ifindex : br->dev->ifindex;
  572. ndm->ndm_state = fdb_to_nud(br, fdb);
  573. if (test_bit(BR_FDB_OFFLOADED, &fdb->flags))
  574. ndm->ndm_flags |= NTF_OFFLOADED;
  575. if (test_bit(BR_FDB_ADDED_BY_EXT_LEARN, &fdb->flags))
  576. ndm->ndm_flags |= NTF_EXT_LEARNED;
  577. if (test_bit(BR_FDB_STICKY, &fdb->flags))
  578. ndm->ndm_flags |= NTF_STICKY;
  579. if (nla_put(skb, NDA_LLADDR, ETH_ALEN, &fdb->key.addr))
  580. goto nla_put_failure;
  581. if (nla_put_u32(skb, NDA_MASTER, br->dev->ifindex))
  582. goto nla_put_failure;
  583. ci.ndm_used = jiffies_to_clock_t(now - fdb->used);
  584. ci.ndm_confirmed = 0;
  585. ci.ndm_updated = jiffies_to_clock_t(now - fdb->updated);
  586. ci.ndm_refcnt = 0;
  587. if (nla_put(skb, NDA_CACHEINFO, sizeof(ci), &ci))
  588. goto nla_put_failure;
  589. if (fdb->key.vlan_id && nla_put(skb, NDA_VLAN, sizeof(u16),
  590. &fdb->key.vlan_id))
  591. goto nla_put_failure;
  592. if (test_bit(BR_FDB_NOTIFY, &fdb->flags)) {
  593. struct nlattr *nest = nla_nest_start(skb, NDA_FDB_EXT_ATTRS);
  594. u8 notify_bits = FDB_NOTIFY_BIT;
  595. if (!nest)
  596. goto nla_put_failure;
  597. if (test_bit(BR_FDB_NOTIFY_INACTIVE, &fdb->flags))
  598. notify_bits |= FDB_NOTIFY_INACTIVE_BIT;
  599. if (nla_put_u8(skb, NFEA_ACTIVITY_NOTIFY, notify_bits)) {
  600. nla_nest_cancel(skb, nest);
  601. goto nla_put_failure;
  602. }
  603. nla_nest_end(skb, nest);
  604. }
  605. nlmsg_end(skb, nlh);
  606. return 0;
  607. nla_put_failure:
  608. nlmsg_cancel(skb, nlh);
  609. return -EMSGSIZE;
  610. }
  611. static inline size_t fdb_nlmsg_size(void)
  612. {
  613. return NLMSG_ALIGN(sizeof(struct ndmsg))
  614. + nla_total_size(ETH_ALEN) /* NDA_LLADDR */
  615. + nla_total_size(sizeof(u32)) /* NDA_MASTER */
  616. + nla_total_size(sizeof(u16)) /* NDA_VLAN */
  617. + nla_total_size(sizeof(struct nda_cacheinfo))
  618. + nla_total_size(0) /* NDA_FDB_EXT_ATTRS */
  619. + nla_total_size(sizeof(u8)); /* NFEA_ACTIVITY_NOTIFY */
  620. }
  621. static void fdb_notify(struct net_bridge *br,
  622. const struct net_bridge_fdb_entry *fdb, int type,
  623. bool swdev_notify)
  624. {
  625. struct net *net = dev_net(br->dev);
  626. struct sk_buff *skb;
  627. int err = -ENOBUFS;
  628. if (swdev_notify)
  629. br_switchdev_fdb_notify(fdb, type);
  630. skb = nlmsg_new(fdb_nlmsg_size(), GFP_ATOMIC);
  631. if (skb == NULL)
  632. goto errout;
  633. err = fdb_fill_info(skb, br, fdb, 0, 0, type, 0);
  634. if (err < 0) {
  635. /* -EMSGSIZE implies BUG in fdb_nlmsg_size() */
  636. WARN_ON(err == -EMSGSIZE);
  637. kfree_skb(skb);
  638. goto errout;
  639. }
  640. rtnl_notify(skb, net, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC);
  641. return;
  642. errout:
  643. rtnl_set_sk_err(net, RTNLGRP_NEIGH, err);
  644. }
  645. /* Dump information about entries, in response to GETNEIGH */
  646. int br_fdb_dump(struct sk_buff *skb,
  647. struct netlink_callback *cb,
  648. struct net_device *dev,
  649. struct net_device *filter_dev,
  650. int *idx)
  651. {
  652. struct net_bridge *br = netdev_priv(dev);
  653. struct net_bridge_fdb_entry *f;
  654. int err = 0;
  655. if (!(dev->priv_flags & IFF_EBRIDGE))
  656. return err;
  657. if (!filter_dev) {
  658. err = ndo_dflt_fdb_dump(skb, cb, dev, NULL, idx);
  659. if (err < 0)
  660. return err;
  661. }
  662. rcu_read_lock();
  663. hlist_for_each_entry_rcu(f, &br->fdb_list, fdb_node) {
  664. if (*idx < cb->args[2])
  665. goto skip;
  666. if (filter_dev && (!f->dst || f->dst->dev != filter_dev)) {
  667. if (filter_dev != dev)
  668. goto skip;
  669. /* !f->dst is a special case for bridge
  670. * It means the MAC belongs to the bridge
  671. * Therefore need a little more filtering
  672. * we only want to dump the !f->dst case
  673. */
  674. if (f->dst)
  675. goto skip;
  676. }
  677. if (!filter_dev && f->dst)
  678. goto skip;
  679. err = fdb_fill_info(skb, br, f,
  680. NETLINK_CB(cb->skb).portid,
  681. cb->nlh->nlmsg_seq,
  682. RTM_NEWNEIGH,
  683. NLM_F_MULTI);
  684. if (err < 0)
  685. break;
  686. skip:
  687. *idx += 1;
  688. }
  689. rcu_read_unlock();
  690. return err;
  691. }
  692. int br_fdb_get(struct sk_buff *skb,
  693. struct nlattr *tb[],
  694. struct net_device *dev,
  695. const unsigned char *addr,
  696. u16 vid, u32 portid, u32 seq,
  697. struct netlink_ext_ack *extack)
  698. {
  699. struct net_bridge *br = netdev_priv(dev);
  700. struct net_bridge_fdb_entry *f;
  701. int err = 0;
  702. rcu_read_lock();
  703. f = br_fdb_find_rcu(br, addr, vid);
  704. if (!f) {
  705. NL_SET_ERR_MSG(extack, "Fdb entry not found");
  706. err = -ENOENT;
  707. goto errout;
  708. }
  709. err = fdb_fill_info(skb, br, f, portid, seq,
  710. RTM_NEWNEIGH, 0);
  711. errout:
  712. rcu_read_unlock();
  713. return err;
  714. }
  715. /* returns true if the fdb is modified */
  716. static bool fdb_handle_notify(struct net_bridge_fdb_entry *fdb, u8 notify)
  717. {
  718. bool modified = false;
  719. /* allow to mark an entry as inactive, usually done on creation */
  720. if ((notify & FDB_NOTIFY_INACTIVE_BIT) &&
  721. !test_and_set_bit(BR_FDB_NOTIFY_INACTIVE, &fdb->flags))
  722. modified = true;
  723. if ((notify & FDB_NOTIFY_BIT) &&
  724. !test_and_set_bit(BR_FDB_NOTIFY, &fdb->flags)) {
  725. /* enabled activity tracking */
  726. modified = true;
  727. } else if (!(notify & FDB_NOTIFY_BIT) &&
  728. test_and_clear_bit(BR_FDB_NOTIFY, &fdb->flags)) {
  729. /* disabled activity tracking, clear notify state */
  730. clear_bit(BR_FDB_NOTIFY_INACTIVE, &fdb->flags);
  731. modified = true;
  732. }
  733. return modified;
  734. }
  735. /* Update (create or replace) forwarding database entry */
  736. static int fdb_add_entry(struct net_bridge *br, struct net_bridge_port *source,
  737. const u8 *addr, struct ndmsg *ndm, u16 flags, u16 vid,
  738. struct nlattr *nfea_tb[])
  739. {
  740. bool is_sticky = !!(ndm->ndm_flags & NTF_STICKY);
  741. bool refresh = !nfea_tb[NFEA_DONT_REFRESH];
  742. struct net_bridge_fdb_entry *fdb;
  743. u16 state = ndm->ndm_state;
  744. bool modified = false;
  745. u8 notify = 0;
  746. /* If the port cannot learn allow only local and static entries */
  747. if (source && !(state & NUD_PERMANENT) && !(state & NUD_NOARP) &&
  748. !(source->state == BR_STATE_LEARNING ||
  749. source->state == BR_STATE_FORWARDING))
  750. return -EPERM;
  751. if (!source && !(state & NUD_PERMANENT)) {
  752. pr_info("bridge: RTM_NEWNEIGH %s without NUD_PERMANENT\n",
  753. br->dev->name);
  754. return -EINVAL;
  755. }
  756. if (is_sticky && (state & NUD_PERMANENT))
  757. return -EINVAL;
  758. if (nfea_tb[NFEA_ACTIVITY_NOTIFY]) {
  759. notify = nla_get_u8(nfea_tb[NFEA_ACTIVITY_NOTIFY]);
  760. if ((notify & ~BR_FDB_NOTIFY_SETTABLE_BITS) ||
  761. (notify & BR_FDB_NOTIFY_SETTABLE_BITS) == FDB_NOTIFY_INACTIVE_BIT)
  762. return -EINVAL;
  763. }
  764. fdb = br_fdb_find(br, addr, vid);
  765. if (fdb == NULL) {
  766. if (!(flags & NLM_F_CREATE))
  767. return -ENOENT;
  768. fdb = fdb_create(br, source, addr, vid, 0);
  769. if (!fdb)
  770. return -ENOMEM;
  771. modified = true;
  772. } else {
  773. if (flags & NLM_F_EXCL)
  774. return -EEXIST;
  775. if (fdb->dst != source) {
  776. fdb->dst = source;
  777. modified = true;
  778. }
  779. }
  780. if (fdb_to_nud(br, fdb) != state) {
  781. if (state & NUD_PERMANENT) {
  782. set_bit(BR_FDB_LOCAL, &fdb->flags);
  783. if (!test_and_set_bit(BR_FDB_STATIC, &fdb->flags))
  784. fdb_add_hw_addr(br, addr);
  785. } else if (state & NUD_NOARP) {
  786. clear_bit(BR_FDB_LOCAL, &fdb->flags);
  787. if (!test_and_set_bit(BR_FDB_STATIC, &fdb->flags))
  788. fdb_add_hw_addr(br, addr);
  789. } else {
  790. clear_bit(BR_FDB_LOCAL, &fdb->flags);
  791. if (test_and_clear_bit(BR_FDB_STATIC, &fdb->flags))
  792. fdb_del_hw_addr(br, addr);
  793. }
  794. modified = true;
  795. }
  796. if (is_sticky != test_bit(BR_FDB_STICKY, &fdb->flags)) {
  797. change_bit(BR_FDB_STICKY, &fdb->flags);
  798. modified = true;
  799. }
  800. if (fdb_handle_notify(fdb, notify))
  801. modified = true;
  802. set_bit(BR_FDB_ADDED_BY_USER, &fdb->flags);
  803. fdb->used = jiffies;
  804. if (modified) {
  805. if (refresh)
  806. fdb->updated = jiffies;
  807. fdb_notify(br, fdb, RTM_NEWNEIGH, true);
  808. }
  809. return 0;
  810. }
  811. static int __br_fdb_add(struct ndmsg *ndm, struct net_bridge *br,
  812. struct net_bridge_port *p, const unsigned char *addr,
  813. u16 nlh_flags, u16 vid, struct nlattr *nfea_tb[],
  814. struct netlink_ext_ack *extack)
  815. {
  816. int err = 0;
  817. if (ndm->ndm_flags & NTF_USE) {
  818. if (!p) {
  819. pr_info("bridge: RTM_NEWNEIGH %s with NTF_USE is not supported\n",
  820. br->dev->name);
  821. return -EINVAL;
  822. }
  823. if (!nbp_state_should_learn(p))
  824. return 0;
  825. local_bh_disable();
  826. rcu_read_lock();
  827. br_fdb_update(br, p, addr, vid, BIT(BR_FDB_ADDED_BY_USER));
  828. rcu_read_unlock();
  829. local_bh_enable();
  830. } else if (ndm->ndm_flags & NTF_EXT_LEARNED) {
  831. if (!p && !(ndm->ndm_state & NUD_PERMANENT)) {
  832. NL_SET_ERR_MSG_MOD(extack,
  833. "FDB entry towards bridge must be permanent");
  834. return -EINVAL;
  835. }
  836. err = br_fdb_external_learn_add(br, p, addr, vid, true);
  837. } else {
  838. spin_lock_bh(&br->hash_lock);
  839. err = fdb_add_entry(br, p, addr, ndm, nlh_flags, vid, nfea_tb);
  840. spin_unlock_bh(&br->hash_lock);
  841. }
  842. return err;
  843. }
  844. static const struct nla_policy br_nda_fdb_pol[NFEA_MAX + 1] = {
  845. [NFEA_ACTIVITY_NOTIFY] = { .type = NLA_U8 },
  846. [NFEA_DONT_REFRESH] = { .type = NLA_FLAG },
  847. };
  848. /* Add new permanent fdb entry with RTM_NEWNEIGH */
  849. int br_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
  850. struct net_device *dev,
  851. const unsigned char *addr, u16 vid, u16 nlh_flags,
  852. struct netlink_ext_ack *extack)
  853. {
  854. struct nlattr *nfea_tb[NFEA_MAX + 1], *attr;
  855. struct net_bridge_vlan_group *vg;
  856. struct net_bridge_port *p = NULL;
  857. struct net_bridge_vlan *v;
  858. struct net_bridge *br = NULL;
  859. int err = 0;
  860. trace_br_fdb_add(ndm, dev, addr, vid, nlh_flags);
  861. if (!(ndm->ndm_state & (NUD_PERMANENT|NUD_NOARP|NUD_REACHABLE))) {
  862. pr_info("bridge: RTM_NEWNEIGH with invalid state %#x\n", ndm->ndm_state);
  863. return -EINVAL;
  864. }
  865. if (is_zero_ether_addr(addr)) {
  866. pr_info("bridge: RTM_NEWNEIGH with invalid ether address\n");
  867. return -EINVAL;
  868. }
  869. if (dev->priv_flags & IFF_EBRIDGE) {
  870. br = netdev_priv(dev);
  871. vg = br_vlan_group(br);
  872. } else {
  873. p = br_port_get_rtnl(dev);
  874. if (!p) {
  875. pr_info("bridge: RTM_NEWNEIGH %s not a bridge port\n",
  876. dev->name);
  877. return -EINVAL;
  878. }
  879. br = p->br;
  880. vg = nbp_vlan_group(p);
  881. }
  882. if (tb[NDA_FDB_EXT_ATTRS]) {
  883. attr = tb[NDA_FDB_EXT_ATTRS];
  884. err = nla_parse_nested(nfea_tb, NFEA_MAX, attr,
  885. br_nda_fdb_pol, extack);
  886. if (err)
  887. return err;
  888. } else {
  889. memset(nfea_tb, 0, sizeof(struct nlattr *) * (NFEA_MAX + 1));
  890. }
  891. if (vid) {
  892. v = br_vlan_find(vg, vid);
  893. if (!v || !br_vlan_should_use(v)) {
  894. pr_info("bridge: RTM_NEWNEIGH with unconfigured vlan %d on %s\n", vid, dev->name);
  895. return -EINVAL;
  896. }
  897. /* VID was specified, so use it. */
  898. err = __br_fdb_add(ndm, br, p, addr, nlh_flags, vid, nfea_tb,
  899. extack);
  900. } else {
  901. err = __br_fdb_add(ndm, br, p, addr, nlh_flags, 0, nfea_tb,
  902. extack);
  903. if (err || !vg || !vg->num_vlans)
  904. goto out;
  905. /* We have vlans configured on this port and user didn't
  906. * specify a VLAN. To be nice, add/update entry for every
  907. * vlan on this port.
  908. */
  909. list_for_each_entry(v, &vg->vlan_list, vlist) {
  910. if (!br_vlan_should_use(v))
  911. continue;
  912. err = __br_fdb_add(ndm, br, p, addr, nlh_flags, v->vid,
  913. nfea_tb, extack);
  914. if (err)
  915. goto out;
  916. }
  917. }
  918. out:
  919. return err;
  920. }
  921. static int fdb_delete_by_addr_and_port(struct net_bridge *br,
  922. const struct net_bridge_port *p,
  923. const u8 *addr, u16 vlan)
  924. {
  925. struct net_bridge_fdb_entry *fdb;
  926. fdb = br_fdb_find(br, addr, vlan);
  927. if (!fdb || fdb->dst != p)
  928. return -ENOENT;
  929. fdb_delete(br, fdb, true);
  930. return 0;
  931. }
  932. static int __br_fdb_delete(struct net_bridge *br,
  933. const struct net_bridge_port *p,
  934. const unsigned char *addr, u16 vid)
  935. {
  936. int err;
  937. spin_lock_bh(&br->hash_lock);
  938. err = fdb_delete_by_addr_and_port(br, p, addr, vid);
  939. spin_unlock_bh(&br->hash_lock);
  940. return err;
  941. }
  942. /* Remove neighbor entry with RTM_DELNEIGH */
  943. int br_fdb_delete(struct ndmsg *ndm, struct nlattr *tb[],
  944. struct net_device *dev,
  945. const unsigned char *addr, u16 vid)
  946. {
  947. struct net_bridge_vlan_group *vg;
  948. struct net_bridge_port *p = NULL;
  949. struct net_bridge_vlan *v;
  950. struct net_bridge *br;
  951. int err;
  952. if (dev->priv_flags & IFF_EBRIDGE) {
  953. br = netdev_priv(dev);
  954. vg = br_vlan_group(br);
  955. } else {
  956. p = br_port_get_rtnl(dev);
  957. if (!p) {
  958. pr_info("bridge: RTM_DELNEIGH %s not a bridge port\n",
  959. dev->name);
  960. return -EINVAL;
  961. }
  962. vg = nbp_vlan_group(p);
  963. br = p->br;
  964. }
  965. if (vid) {
  966. v = br_vlan_find(vg, vid);
  967. if (!v) {
  968. pr_info("bridge: RTM_DELNEIGH with unconfigured vlan %d on %s\n", vid, dev->name);
  969. return -EINVAL;
  970. }
  971. err = __br_fdb_delete(br, p, addr, vid);
  972. } else {
  973. err = -ENOENT;
  974. err &= __br_fdb_delete(br, p, addr, 0);
  975. if (!vg || !vg->num_vlans)
  976. return err;
  977. list_for_each_entry(v, &vg->vlan_list, vlist) {
  978. if (!br_vlan_should_use(v))
  979. continue;
  980. err &= __br_fdb_delete(br, p, addr, v->vid);
  981. }
  982. }
  983. return err;
  984. }
  985. int br_fdb_sync_static(struct net_bridge *br, struct net_bridge_port *p)
  986. {
  987. struct net_bridge_fdb_entry *f, *tmp;
  988. int err = 0;
  989. ASSERT_RTNL();
  990. /* the key here is that static entries change only under rtnl */
  991. rcu_read_lock();
  992. hlist_for_each_entry_rcu(f, &br->fdb_list, fdb_node) {
  993. /* We only care for static entries */
  994. if (!test_bit(BR_FDB_STATIC, &f->flags))
  995. continue;
  996. err = dev_uc_add(p->dev, f->key.addr.addr);
  997. if (err)
  998. goto rollback;
  999. }
  1000. done:
  1001. rcu_read_unlock();
  1002. return err;
  1003. rollback:
  1004. hlist_for_each_entry_rcu(tmp, &br->fdb_list, fdb_node) {
  1005. /* We only care for static entries */
  1006. if (!test_bit(BR_FDB_STATIC, &tmp->flags))
  1007. continue;
  1008. if (tmp == f)
  1009. break;
  1010. dev_uc_del(p->dev, tmp->key.addr.addr);
  1011. }
  1012. goto done;
  1013. }
  1014. void br_fdb_unsync_static(struct net_bridge *br, struct net_bridge_port *p)
  1015. {
  1016. struct net_bridge_fdb_entry *f;
  1017. ASSERT_RTNL();
  1018. rcu_read_lock();
  1019. hlist_for_each_entry_rcu(f, &br->fdb_list, fdb_node) {
  1020. /* We only care for static entries */
  1021. if (!test_bit(BR_FDB_STATIC, &f->flags))
  1022. continue;
  1023. dev_uc_del(p->dev, f->key.addr.addr);
  1024. }
  1025. rcu_read_unlock();
  1026. }
  1027. int br_fdb_external_learn_add(struct net_bridge *br, struct net_bridge_port *p,
  1028. const unsigned char *addr, u16 vid,
  1029. bool swdev_notify)
  1030. {
  1031. struct net_bridge_fdb_entry *fdb;
  1032. bool modified = false;
  1033. int err = 0;
  1034. trace_br_fdb_external_learn_add(br, p, addr, vid);
  1035. spin_lock_bh(&br->hash_lock);
  1036. fdb = br_fdb_find(br, addr, vid);
  1037. if (!fdb) {
  1038. unsigned long flags = BIT(BR_FDB_ADDED_BY_EXT_LEARN);
  1039. if (swdev_notify)
  1040. flags |= BIT(BR_FDB_ADDED_BY_USER);
  1041. if (!p)
  1042. flags |= BIT(BR_FDB_LOCAL);
  1043. fdb = fdb_create(br, p, addr, vid, flags);
  1044. if (!fdb) {
  1045. err = -ENOMEM;
  1046. goto err_unlock;
  1047. }
  1048. fdb_notify(br, fdb, RTM_NEWNEIGH, swdev_notify);
  1049. } else {
  1050. fdb->updated = jiffies;
  1051. if (fdb->dst != p) {
  1052. fdb->dst = p;
  1053. modified = true;
  1054. }
  1055. if (test_bit(BR_FDB_ADDED_BY_EXT_LEARN, &fdb->flags)) {
  1056. /* Refresh entry */
  1057. fdb->used = jiffies;
  1058. } else if (!test_bit(BR_FDB_ADDED_BY_USER, &fdb->flags)) {
  1059. /* Take over SW learned entry */
  1060. set_bit(BR_FDB_ADDED_BY_EXT_LEARN, &fdb->flags);
  1061. modified = true;
  1062. }
  1063. if (swdev_notify)
  1064. set_bit(BR_FDB_ADDED_BY_USER, &fdb->flags);
  1065. if (!p)
  1066. set_bit(BR_FDB_LOCAL, &fdb->flags);
  1067. if (modified)
  1068. fdb_notify(br, fdb, RTM_NEWNEIGH, swdev_notify);
  1069. }
  1070. err_unlock:
  1071. spin_unlock_bh(&br->hash_lock);
  1072. return err;
  1073. }
  1074. int br_fdb_external_learn_del(struct net_bridge *br, struct net_bridge_port *p,
  1075. const unsigned char *addr, u16 vid,
  1076. bool swdev_notify)
  1077. {
  1078. struct net_bridge_fdb_entry *fdb;
  1079. int err = 0;
  1080. spin_lock_bh(&br->hash_lock);
  1081. fdb = br_fdb_find(br, addr, vid);
  1082. if (fdb && test_bit(BR_FDB_ADDED_BY_EXT_LEARN, &fdb->flags))
  1083. fdb_delete(br, fdb, swdev_notify);
  1084. else
  1085. err = -ENOENT;
  1086. spin_unlock_bh(&br->hash_lock);
  1087. return err;
  1088. }
  1089. void br_fdb_offloaded_set(struct net_bridge *br, struct net_bridge_port *p,
  1090. const unsigned char *addr, u16 vid, bool offloaded)
  1091. {
  1092. struct net_bridge_fdb_entry *fdb;
  1093. spin_lock_bh(&br->hash_lock);
  1094. fdb = br_fdb_find(br, addr, vid);
  1095. if (fdb && offloaded != test_bit(BR_FDB_OFFLOADED, &fdb->flags))
  1096. change_bit(BR_FDB_OFFLOADED, &fdb->flags);
  1097. spin_unlock_bh(&br->hash_lock);
  1098. }
  1099. void br_fdb_clear_offload(const struct net_device *dev, u16 vid)
  1100. {
  1101. struct net_bridge_fdb_entry *f;
  1102. struct net_bridge_port *p;
  1103. ASSERT_RTNL();
  1104. p = br_port_get_rtnl(dev);
  1105. if (!p)
  1106. return;
  1107. spin_lock_bh(&p->br->hash_lock);
  1108. hlist_for_each_entry(f, &p->br->fdb_list, fdb_node) {
  1109. if (f->dst == p && f->key.vlan_id == vid)
  1110. clear_bit(BR_FDB_OFFLOADED, &f->flags);
  1111. }
  1112. spin_unlock_bh(&p->br->hash_lock);
  1113. }
  1114. EXPORT_SYMBOL_GPL(br_fdb_clear_offload);