br_mrp.c 30 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. #include <linux/mrp_bridge.h>
  3. #include "br_private_mrp.h"
  4. static const u8 mrp_test_dmac[ETH_ALEN] = { 0x1, 0x15, 0x4e, 0x0, 0x0, 0x1 };
  5. static const u8 mrp_in_test_dmac[ETH_ALEN] = { 0x1, 0x15, 0x4e, 0x0, 0x0, 0x3 };
  6. static bool br_mrp_is_ring_port(struct net_bridge_port *p_port,
  7. struct net_bridge_port *s_port,
  8. struct net_bridge_port *port)
  9. {
  10. if (port == p_port ||
  11. port == s_port)
  12. return true;
  13. return false;
  14. }
  15. static bool br_mrp_is_in_port(struct net_bridge_port *i_port,
  16. struct net_bridge_port *port)
  17. {
  18. if (port == i_port)
  19. return true;
  20. return false;
  21. }
  22. static struct net_bridge_port *br_mrp_get_port(struct net_bridge *br,
  23. u32 ifindex)
  24. {
  25. struct net_bridge_port *res = NULL;
  26. struct net_bridge_port *port;
  27. list_for_each_entry(port, &br->port_list, list) {
  28. if (port->dev->ifindex == ifindex) {
  29. res = port;
  30. break;
  31. }
  32. }
  33. return res;
  34. }
  35. static struct br_mrp *br_mrp_find_id(struct net_bridge *br, u32 ring_id)
  36. {
  37. struct br_mrp *res = NULL;
  38. struct br_mrp *mrp;
  39. list_for_each_entry_rcu(mrp, &br->mrp_list, list,
  40. lockdep_rtnl_is_held()) {
  41. if (mrp->ring_id == ring_id) {
  42. res = mrp;
  43. break;
  44. }
  45. }
  46. return res;
  47. }
  48. static struct br_mrp *br_mrp_find_in_id(struct net_bridge *br, u32 in_id)
  49. {
  50. struct br_mrp *res = NULL;
  51. struct br_mrp *mrp;
  52. list_for_each_entry_rcu(mrp, &br->mrp_list, list,
  53. lockdep_rtnl_is_held()) {
  54. if (mrp->in_id == in_id) {
  55. res = mrp;
  56. break;
  57. }
  58. }
  59. return res;
  60. }
  61. static bool br_mrp_unique_ifindex(struct net_bridge *br, u32 ifindex)
  62. {
  63. struct br_mrp *mrp;
  64. list_for_each_entry_rcu(mrp, &br->mrp_list, list,
  65. lockdep_rtnl_is_held()) {
  66. struct net_bridge_port *p;
  67. p = rtnl_dereference(mrp->p_port);
  68. if (p && p->dev->ifindex == ifindex)
  69. return false;
  70. p = rtnl_dereference(mrp->s_port);
  71. if (p && p->dev->ifindex == ifindex)
  72. return false;
  73. p = rtnl_dereference(mrp->i_port);
  74. if (p && p->dev->ifindex == ifindex)
  75. return false;
  76. }
  77. return true;
  78. }
  79. static struct br_mrp *br_mrp_find_port(struct net_bridge *br,
  80. struct net_bridge_port *p)
  81. {
  82. struct br_mrp *res = NULL;
  83. struct br_mrp *mrp;
  84. list_for_each_entry_rcu(mrp, &br->mrp_list, list,
  85. lockdep_rtnl_is_held()) {
  86. if (rcu_access_pointer(mrp->p_port) == p ||
  87. rcu_access_pointer(mrp->s_port) == p ||
  88. rcu_access_pointer(mrp->i_port) == p) {
  89. res = mrp;
  90. break;
  91. }
  92. }
  93. return res;
  94. }
  95. static int br_mrp_next_seq(struct br_mrp *mrp)
  96. {
  97. mrp->seq_id++;
  98. return mrp->seq_id;
  99. }
  100. static struct sk_buff *br_mrp_skb_alloc(struct net_bridge_port *p,
  101. const u8 *src, const u8 *dst)
  102. {
  103. struct ethhdr *eth_hdr;
  104. struct sk_buff *skb;
  105. __be16 *version;
  106. skb = dev_alloc_skb(MRP_MAX_FRAME_LENGTH);
  107. if (!skb)
  108. return NULL;
  109. skb->dev = p->dev;
  110. skb->protocol = htons(ETH_P_MRP);
  111. skb->priority = MRP_FRAME_PRIO;
  112. skb_reserve(skb, sizeof(*eth_hdr));
  113. eth_hdr = skb_push(skb, sizeof(*eth_hdr));
  114. ether_addr_copy(eth_hdr->h_dest, dst);
  115. ether_addr_copy(eth_hdr->h_source, src);
  116. eth_hdr->h_proto = htons(ETH_P_MRP);
  117. version = skb_put(skb, sizeof(*version));
  118. *version = cpu_to_be16(MRP_VERSION);
  119. return skb;
  120. }
  121. static void br_mrp_skb_tlv(struct sk_buff *skb,
  122. enum br_mrp_tlv_header_type type,
  123. u8 length)
  124. {
  125. struct br_mrp_tlv_hdr *hdr;
  126. hdr = skb_put(skb, sizeof(*hdr));
  127. hdr->type = type;
  128. hdr->length = length;
  129. }
  130. static void br_mrp_skb_common(struct sk_buff *skb, struct br_mrp *mrp)
  131. {
  132. struct br_mrp_common_hdr *hdr;
  133. br_mrp_skb_tlv(skb, BR_MRP_TLV_HEADER_COMMON, sizeof(*hdr));
  134. hdr = skb_put(skb, sizeof(*hdr));
  135. hdr->seq_id = cpu_to_be16(br_mrp_next_seq(mrp));
  136. memset(hdr->domain, 0xff, MRP_DOMAIN_UUID_LENGTH);
  137. }
  138. static struct sk_buff *br_mrp_alloc_test_skb(struct br_mrp *mrp,
  139. struct net_bridge_port *p,
  140. enum br_mrp_port_role_type port_role)
  141. {
  142. struct br_mrp_ring_test_hdr *hdr = NULL;
  143. struct sk_buff *skb = NULL;
  144. if (!p)
  145. return NULL;
  146. skb = br_mrp_skb_alloc(p, p->dev->dev_addr, mrp_test_dmac);
  147. if (!skb)
  148. return NULL;
  149. br_mrp_skb_tlv(skb, BR_MRP_TLV_HEADER_RING_TEST, sizeof(*hdr));
  150. hdr = skb_put(skb, sizeof(*hdr));
  151. hdr->prio = cpu_to_be16(mrp->prio);
  152. ether_addr_copy(hdr->sa, p->br->dev->dev_addr);
  153. hdr->port_role = cpu_to_be16(port_role);
  154. hdr->state = cpu_to_be16(mrp->ring_state);
  155. hdr->transitions = cpu_to_be16(mrp->ring_transitions);
  156. hdr->timestamp = cpu_to_be32(jiffies_to_msecs(jiffies));
  157. br_mrp_skb_common(skb, mrp);
  158. br_mrp_skb_tlv(skb, BR_MRP_TLV_HEADER_END, 0x0);
  159. return skb;
  160. }
  161. static struct sk_buff *br_mrp_alloc_in_test_skb(struct br_mrp *mrp,
  162. struct net_bridge_port *p,
  163. enum br_mrp_port_role_type port_role)
  164. {
  165. struct br_mrp_in_test_hdr *hdr = NULL;
  166. struct sk_buff *skb = NULL;
  167. if (!p)
  168. return NULL;
  169. skb = br_mrp_skb_alloc(p, p->dev->dev_addr, mrp_in_test_dmac);
  170. if (!skb)
  171. return NULL;
  172. br_mrp_skb_tlv(skb, BR_MRP_TLV_HEADER_IN_TEST, sizeof(*hdr));
  173. hdr = skb_put(skb, sizeof(*hdr));
  174. hdr->id = cpu_to_be16(mrp->in_id);
  175. ether_addr_copy(hdr->sa, p->br->dev->dev_addr);
  176. hdr->port_role = cpu_to_be16(port_role);
  177. hdr->state = cpu_to_be16(mrp->in_state);
  178. hdr->transitions = cpu_to_be16(mrp->in_transitions);
  179. hdr->timestamp = cpu_to_be32(jiffies_to_msecs(jiffies));
  180. br_mrp_skb_common(skb, mrp);
  181. br_mrp_skb_tlv(skb, BR_MRP_TLV_HEADER_END, 0x0);
  182. return skb;
  183. }
  184. /* This function is continuously called in the following cases:
  185. * - when node role is MRM, in this case test_monitor is always set to false
  186. * because it needs to notify the userspace that the ring is open and needs to
  187. * send MRP_Test frames
  188. * - when node role is MRA, there are 2 subcases:
  189. * - when MRA behaves as MRM, in this case is similar with MRM role
  190. * - when MRA behaves as MRC, in this case test_monitor is set to true,
  191. * because it needs to detect when it stops seeing MRP_Test frames
  192. * from MRM node but it doesn't need to send MRP_Test frames.
  193. */
  194. static void br_mrp_test_work_expired(struct work_struct *work)
  195. {
  196. struct delayed_work *del_work = to_delayed_work(work);
  197. struct br_mrp *mrp = container_of(del_work, struct br_mrp, test_work);
  198. struct net_bridge_port *p;
  199. bool notify_open = false;
  200. struct sk_buff *skb;
  201. if (time_before_eq(mrp->test_end, jiffies))
  202. return;
  203. if (mrp->test_count_miss < mrp->test_max_miss) {
  204. mrp->test_count_miss++;
  205. } else {
  206. /* Notify that the ring is open only if the ring state is
  207. * closed, otherwise it would continue to notify at every
  208. * interval.
  209. * Also notify that the ring is open when the node has the
  210. * role MRA and behaves as MRC. The reason is that the
  211. * userspace needs to know when the MRM stopped sending
  212. * MRP_Test frames so that the current node to try to take
  213. * the role of a MRM.
  214. */
  215. if (mrp->ring_state == BR_MRP_RING_STATE_CLOSED ||
  216. mrp->test_monitor)
  217. notify_open = true;
  218. }
  219. rcu_read_lock();
  220. p = rcu_dereference(mrp->p_port);
  221. if (p) {
  222. if (!mrp->test_monitor) {
  223. skb = br_mrp_alloc_test_skb(mrp, p,
  224. BR_MRP_PORT_ROLE_PRIMARY);
  225. if (!skb)
  226. goto out;
  227. skb_reset_network_header(skb);
  228. dev_queue_xmit(skb);
  229. }
  230. if (notify_open && !mrp->ring_role_offloaded)
  231. br_mrp_ring_port_open(p->dev, true);
  232. }
  233. p = rcu_dereference(mrp->s_port);
  234. if (p) {
  235. if (!mrp->test_monitor) {
  236. skb = br_mrp_alloc_test_skb(mrp, p,
  237. BR_MRP_PORT_ROLE_SECONDARY);
  238. if (!skb)
  239. goto out;
  240. skb_reset_network_header(skb);
  241. dev_queue_xmit(skb);
  242. }
  243. if (notify_open && !mrp->ring_role_offloaded)
  244. br_mrp_ring_port_open(p->dev, true);
  245. }
  246. out:
  247. rcu_read_unlock();
  248. queue_delayed_work(system_wq, &mrp->test_work,
  249. usecs_to_jiffies(mrp->test_interval));
  250. }
  251. /* This function is continuously called when the node has the interconnect role
  252. * MIM. It would generate interconnect test frames and will send them on all 3
  253. * ports. But will also check if it stop receiving interconnect test frames.
  254. */
  255. static void br_mrp_in_test_work_expired(struct work_struct *work)
  256. {
  257. struct delayed_work *del_work = to_delayed_work(work);
  258. struct br_mrp *mrp = container_of(del_work, struct br_mrp, in_test_work);
  259. struct net_bridge_port *p;
  260. bool notify_open = false;
  261. struct sk_buff *skb;
  262. if (time_before_eq(mrp->in_test_end, jiffies))
  263. return;
  264. if (mrp->in_test_count_miss < mrp->in_test_max_miss) {
  265. mrp->in_test_count_miss++;
  266. } else {
  267. /* Notify that the interconnect ring is open only if the
  268. * interconnect ring state is closed, otherwise it would
  269. * continue to notify at every interval.
  270. */
  271. if (mrp->in_state == BR_MRP_IN_STATE_CLOSED)
  272. notify_open = true;
  273. }
  274. rcu_read_lock();
  275. p = rcu_dereference(mrp->p_port);
  276. if (p) {
  277. skb = br_mrp_alloc_in_test_skb(mrp, p,
  278. BR_MRP_PORT_ROLE_PRIMARY);
  279. if (!skb)
  280. goto out;
  281. skb_reset_network_header(skb);
  282. dev_queue_xmit(skb);
  283. if (notify_open && !mrp->in_role_offloaded)
  284. br_mrp_in_port_open(p->dev, true);
  285. }
  286. p = rcu_dereference(mrp->s_port);
  287. if (p) {
  288. skb = br_mrp_alloc_in_test_skb(mrp, p,
  289. BR_MRP_PORT_ROLE_SECONDARY);
  290. if (!skb)
  291. goto out;
  292. skb_reset_network_header(skb);
  293. dev_queue_xmit(skb);
  294. if (notify_open && !mrp->in_role_offloaded)
  295. br_mrp_in_port_open(p->dev, true);
  296. }
  297. p = rcu_dereference(mrp->i_port);
  298. if (p) {
  299. skb = br_mrp_alloc_in_test_skb(mrp, p,
  300. BR_MRP_PORT_ROLE_INTER);
  301. if (!skb)
  302. goto out;
  303. skb_reset_network_header(skb);
  304. dev_queue_xmit(skb);
  305. if (notify_open && !mrp->in_role_offloaded)
  306. br_mrp_in_port_open(p->dev, true);
  307. }
  308. out:
  309. rcu_read_unlock();
  310. queue_delayed_work(system_wq, &mrp->in_test_work,
  311. usecs_to_jiffies(mrp->in_test_interval));
  312. }
  313. /* Deletes the MRP instance.
  314. * note: called under rtnl_lock
  315. */
  316. static void br_mrp_del_impl(struct net_bridge *br, struct br_mrp *mrp)
  317. {
  318. struct net_bridge_port *p;
  319. u8 state;
  320. /* Stop sending MRP_Test frames */
  321. cancel_delayed_work_sync(&mrp->test_work);
  322. br_mrp_switchdev_send_ring_test(br, mrp, 0, 0, 0, 0);
  323. /* Stop sending MRP_InTest frames if has an interconnect role */
  324. cancel_delayed_work_sync(&mrp->in_test_work);
  325. br_mrp_switchdev_send_in_test(br, mrp, 0, 0, 0);
  326. br_mrp_switchdev_del(br, mrp);
  327. /* Reset the ports */
  328. p = rtnl_dereference(mrp->p_port);
  329. if (p) {
  330. spin_lock_bh(&br->lock);
  331. state = netif_running(br->dev) ?
  332. BR_STATE_FORWARDING : BR_STATE_DISABLED;
  333. p->state = state;
  334. p->flags &= ~BR_MRP_AWARE;
  335. spin_unlock_bh(&br->lock);
  336. br_mrp_port_switchdev_set_state(p, state);
  337. rcu_assign_pointer(mrp->p_port, NULL);
  338. }
  339. p = rtnl_dereference(mrp->s_port);
  340. if (p) {
  341. spin_lock_bh(&br->lock);
  342. state = netif_running(br->dev) ?
  343. BR_STATE_FORWARDING : BR_STATE_DISABLED;
  344. p->state = state;
  345. p->flags &= ~BR_MRP_AWARE;
  346. spin_unlock_bh(&br->lock);
  347. br_mrp_port_switchdev_set_state(p, state);
  348. rcu_assign_pointer(mrp->s_port, NULL);
  349. }
  350. p = rtnl_dereference(mrp->i_port);
  351. if (p) {
  352. spin_lock_bh(&br->lock);
  353. state = netif_running(br->dev) ?
  354. BR_STATE_FORWARDING : BR_STATE_DISABLED;
  355. p->state = state;
  356. p->flags &= ~BR_MRP_AWARE;
  357. spin_unlock_bh(&br->lock);
  358. br_mrp_port_switchdev_set_state(p, state);
  359. rcu_assign_pointer(mrp->i_port, NULL);
  360. }
  361. list_del_rcu(&mrp->list);
  362. kfree_rcu(mrp, rcu);
  363. }
  364. /* Adds a new MRP instance.
  365. * note: called under rtnl_lock
  366. */
  367. int br_mrp_add(struct net_bridge *br, struct br_mrp_instance *instance)
  368. {
  369. struct net_bridge_port *p;
  370. struct br_mrp *mrp;
  371. int err;
  372. /* If the ring exists, it is not possible to create another one with the
  373. * same ring_id
  374. */
  375. mrp = br_mrp_find_id(br, instance->ring_id);
  376. if (mrp)
  377. return -EINVAL;
  378. if (!br_mrp_get_port(br, instance->p_ifindex) ||
  379. !br_mrp_get_port(br, instance->s_ifindex))
  380. return -EINVAL;
  381. /* It is not possible to have the same port part of multiple rings */
  382. if (!br_mrp_unique_ifindex(br, instance->p_ifindex) ||
  383. !br_mrp_unique_ifindex(br, instance->s_ifindex))
  384. return -EINVAL;
  385. mrp = kzalloc(sizeof(*mrp), GFP_KERNEL);
  386. if (!mrp)
  387. return -ENOMEM;
  388. mrp->ring_id = instance->ring_id;
  389. mrp->prio = instance->prio;
  390. p = br_mrp_get_port(br, instance->p_ifindex);
  391. spin_lock_bh(&br->lock);
  392. p->state = BR_STATE_FORWARDING;
  393. p->flags |= BR_MRP_AWARE;
  394. spin_unlock_bh(&br->lock);
  395. rcu_assign_pointer(mrp->p_port, p);
  396. p = br_mrp_get_port(br, instance->s_ifindex);
  397. spin_lock_bh(&br->lock);
  398. p->state = BR_STATE_FORWARDING;
  399. p->flags |= BR_MRP_AWARE;
  400. spin_unlock_bh(&br->lock);
  401. rcu_assign_pointer(mrp->s_port, p);
  402. INIT_DELAYED_WORK(&mrp->test_work, br_mrp_test_work_expired);
  403. INIT_DELAYED_WORK(&mrp->in_test_work, br_mrp_in_test_work_expired);
  404. list_add_tail_rcu(&mrp->list, &br->mrp_list);
  405. err = br_mrp_switchdev_add(br, mrp);
  406. if (err)
  407. goto delete_mrp;
  408. return 0;
  409. delete_mrp:
  410. br_mrp_del_impl(br, mrp);
  411. return err;
  412. }
  413. /* Deletes the MRP instance from which the port is part of
  414. * note: called under rtnl_lock
  415. */
  416. void br_mrp_port_del(struct net_bridge *br, struct net_bridge_port *p)
  417. {
  418. struct br_mrp *mrp = br_mrp_find_port(br, p);
  419. /* If the port is not part of a MRP instance just bail out */
  420. if (!mrp)
  421. return;
  422. br_mrp_del_impl(br, mrp);
  423. }
  424. /* Deletes existing MRP instance based on ring_id
  425. * note: called under rtnl_lock
  426. */
  427. int br_mrp_del(struct net_bridge *br, struct br_mrp_instance *instance)
  428. {
  429. struct br_mrp *mrp = br_mrp_find_id(br, instance->ring_id);
  430. if (!mrp)
  431. return -EINVAL;
  432. br_mrp_del_impl(br, mrp);
  433. return 0;
  434. }
  435. /* Set port state, port state can be forwarding, blocked or disabled
  436. * note: already called with rtnl_lock
  437. */
  438. int br_mrp_set_port_state(struct net_bridge_port *p,
  439. enum br_mrp_port_state_type state)
  440. {
  441. u32 port_state;
  442. if (!p || !(p->flags & BR_MRP_AWARE))
  443. return -EINVAL;
  444. spin_lock_bh(&p->br->lock);
  445. if (state == BR_MRP_PORT_STATE_FORWARDING)
  446. port_state = BR_STATE_FORWARDING;
  447. else
  448. port_state = BR_STATE_BLOCKING;
  449. p->state = port_state;
  450. spin_unlock_bh(&p->br->lock);
  451. br_mrp_port_switchdev_set_state(p, port_state);
  452. return 0;
  453. }
  454. /* Set port role, port role can be primary or secondary
  455. * note: already called with rtnl_lock
  456. */
  457. int br_mrp_set_port_role(struct net_bridge_port *p,
  458. enum br_mrp_port_role_type role)
  459. {
  460. struct br_mrp *mrp;
  461. if (!p || !(p->flags & BR_MRP_AWARE))
  462. return -EINVAL;
  463. mrp = br_mrp_find_port(p->br, p);
  464. if (!mrp)
  465. return -EINVAL;
  466. switch (role) {
  467. case BR_MRP_PORT_ROLE_PRIMARY:
  468. rcu_assign_pointer(mrp->p_port, p);
  469. break;
  470. case BR_MRP_PORT_ROLE_SECONDARY:
  471. rcu_assign_pointer(mrp->s_port, p);
  472. break;
  473. default:
  474. return -EINVAL;
  475. }
  476. br_mrp_port_switchdev_set_role(p, role);
  477. return 0;
  478. }
  479. /* Set ring state, ring state can be only Open or Closed
  480. * note: already called with rtnl_lock
  481. */
  482. int br_mrp_set_ring_state(struct net_bridge *br,
  483. struct br_mrp_ring_state *state)
  484. {
  485. struct br_mrp *mrp = br_mrp_find_id(br, state->ring_id);
  486. if (!mrp)
  487. return -EINVAL;
  488. if (mrp->ring_state != state->ring_state)
  489. mrp->ring_transitions++;
  490. mrp->ring_state = state->ring_state;
  491. br_mrp_switchdev_set_ring_state(br, mrp, state->ring_state);
  492. return 0;
  493. }
  494. /* Set ring role, ring role can be only MRM(Media Redundancy Manager) or
  495. * MRC(Media Redundancy Client).
  496. * note: already called with rtnl_lock
  497. */
  498. int br_mrp_set_ring_role(struct net_bridge *br,
  499. struct br_mrp_ring_role *role)
  500. {
  501. struct br_mrp *mrp = br_mrp_find_id(br, role->ring_id);
  502. int err;
  503. if (!mrp)
  504. return -EINVAL;
  505. mrp->ring_role = role->ring_role;
  506. /* If there is an error just bailed out */
  507. err = br_mrp_switchdev_set_ring_role(br, mrp, role->ring_role);
  508. if (err && err != -EOPNOTSUPP)
  509. return err;
  510. /* Now detect if the HW actually applied the role or not. If the HW
  511. * applied the role it means that the SW will not to do those operations
  512. * anymore. For example if the role ir MRM then the HW will notify the
  513. * SW when ring is open, but if the is not pushed to the HW the SW will
  514. * need to detect when the ring is open
  515. */
  516. mrp->ring_role_offloaded = err == -EOPNOTSUPP ? 0 : 1;
  517. return 0;
  518. }
  519. /* Start to generate or monitor MRP test frames, the frames are generated by
  520. * HW and if it fails, they are generated by the SW.
  521. * note: already called with rtnl_lock
  522. */
  523. int br_mrp_start_test(struct net_bridge *br,
  524. struct br_mrp_start_test *test)
  525. {
  526. struct br_mrp *mrp = br_mrp_find_id(br, test->ring_id);
  527. if (!mrp)
  528. return -EINVAL;
  529. /* Try to push it to the HW and if it fails then continue with SW
  530. * implementation and if that also fails then return error.
  531. */
  532. if (!br_mrp_switchdev_send_ring_test(br, mrp, test->interval,
  533. test->max_miss, test->period,
  534. test->monitor))
  535. return 0;
  536. mrp->test_interval = test->interval;
  537. mrp->test_end = jiffies + usecs_to_jiffies(test->period);
  538. mrp->test_max_miss = test->max_miss;
  539. mrp->test_monitor = test->monitor;
  540. mrp->test_count_miss = 0;
  541. queue_delayed_work(system_wq, &mrp->test_work,
  542. usecs_to_jiffies(test->interval));
  543. return 0;
  544. }
  545. /* Set in state, int state can be only Open or Closed
  546. * note: already called with rtnl_lock
  547. */
  548. int br_mrp_set_in_state(struct net_bridge *br, struct br_mrp_in_state *state)
  549. {
  550. struct br_mrp *mrp = br_mrp_find_in_id(br, state->in_id);
  551. if (!mrp)
  552. return -EINVAL;
  553. if (mrp->in_state != state->in_state)
  554. mrp->in_transitions++;
  555. mrp->in_state = state->in_state;
  556. br_mrp_switchdev_set_in_state(br, mrp, state->in_state);
  557. return 0;
  558. }
  559. /* Set in role, in role can be only MIM(Media Interconnection Manager) or
  560. * MIC(Media Interconnection Client).
  561. * note: already called with rtnl_lock
  562. */
  563. int br_mrp_set_in_role(struct net_bridge *br, struct br_mrp_in_role *role)
  564. {
  565. struct br_mrp *mrp = br_mrp_find_id(br, role->ring_id);
  566. struct net_bridge_port *p;
  567. int err;
  568. if (!mrp)
  569. return -EINVAL;
  570. if (!br_mrp_get_port(br, role->i_ifindex))
  571. return -EINVAL;
  572. if (role->in_role == BR_MRP_IN_ROLE_DISABLED) {
  573. u8 state;
  574. /* It is not allowed to disable a port that doesn't exist */
  575. p = rtnl_dereference(mrp->i_port);
  576. if (!p)
  577. return -EINVAL;
  578. /* Stop the generating MRP_InTest frames */
  579. cancel_delayed_work_sync(&mrp->in_test_work);
  580. br_mrp_switchdev_send_in_test(br, mrp, 0, 0, 0);
  581. /* Remove the port */
  582. spin_lock_bh(&br->lock);
  583. state = netif_running(br->dev) ?
  584. BR_STATE_FORWARDING : BR_STATE_DISABLED;
  585. p->state = state;
  586. p->flags &= ~BR_MRP_AWARE;
  587. spin_unlock_bh(&br->lock);
  588. br_mrp_port_switchdev_set_state(p, state);
  589. rcu_assign_pointer(mrp->i_port, NULL);
  590. mrp->in_role = role->in_role;
  591. mrp->in_id = 0;
  592. return 0;
  593. }
  594. /* It is not possible to have the same port part of multiple rings */
  595. if (!br_mrp_unique_ifindex(br, role->i_ifindex))
  596. return -EINVAL;
  597. /* It is not allowed to set a different interconnect port if the mrp
  598. * instance has already one. First it needs to be disabled and after
  599. * that set the new port
  600. */
  601. if (rcu_access_pointer(mrp->i_port))
  602. return -EINVAL;
  603. p = br_mrp_get_port(br, role->i_ifindex);
  604. spin_lock_bh(&br->lock);
  605. p->state = BR_STATE_FORWARDING;
  606. p->flags |= BR_MRP_AWARE;
  607. spin_unlock_bh(&br->lock);
  608. rcu_assign_pointer(mrp->i_port, p);
  609. mrp->in_role = role->in_role;
  610. mrp->in_id = role->in_id;
  611. /* If there is an error just bailed out */
  612. err = br_mrp_switchdev_set_in_role(br, mrp, role->in_id,
  613. role->ring_id, role->in_role);
  614. if (err && err != -EOPNOTSUPP)
  615. return err;
  616. /* Now detect if the HW actually applied the role or not. If the HW
  617. * applied the role it means that the SW will not to do those operations
  618. * anymore. For example if the role is MIM then the HW will notify the
  619. * SW when interconnect ring is open, but if the is not pushed to the HW
  620. * the SW will need to detect when the interconnect ring is open.
  621. */
  622. mrp->in_role_offloaded = err == -EOPNOTSUPP ? 0 : 1;
  623. return 0;
  624. }
  625. /* Start to generate MRP_InTest frames, the frames are generated by
  626. * HW and if it fails, they are generated by the SW.
  627. * note: already called with rtnl_lock
  628. */
  629. int br_mrp_start_in_test(struct net_bridge *br,
  630. struct br_mrp_start_in_test *in_test)
  631. {
  632. struct br_mrp *mrp = br_mrp_find_in_id(br, in_test->in_id);
  633. if (!mrp)
  634. return -EINVAL;
  635. if (mrp->in_role != BR_MRP_IN_ROLE_MIM)
  636. return -EINVAL;
  637. /* Try to push it to the HW and if it fails then continue with SW
  638. * implementation and if that also fails then return error.
  639. */
  640. if (!br_mrp_switchdev_send_in_test(br, mrp, in_test->interval,
  641. in_test->max_miss, in_test->period))
  642. return 0;
  643. mrp->in_test_interval = in_test->interval;
  644. mrp->in_test_end = jiffies + usecs_to_jiffies(in_test->period);
  645. mrp->in_test_max_miss = in_test->max_miss;
  646. mrp->in_test_count_miss = 0;
  647. queue_delayed_work(system_wq, &mrp->in_test_work,
  648. usecs_to_jiffies(in_test->interval));
  649. return 0;
  650. }
  651. /* Determin if the frame type is a ring frame */
  652. static bool br_mrp_ring_frame(struct sk_buff *skb)
  653. {
  654. const struct br_mrp_tlv_hdr *hdr;
  655. struct br_mrp_tlv_hdr _hdr;
  656. hdr = skb_header_pointer(skb, sizeof(uint16_t), sizeof(_hdr), &_hdr);
  657. if (!hdr)
  658. return false;
  659. if (hdr->type == BR_MRP_TLV_HEADER_RING_TEST ||
  660. hdr->type == BR_MRP_TLV_HEADER_RING_TOPO ||
  661. hdr->type == BR_MRP_TLV_HEADER_RING_LINK_DOWN ||
  662. hdr->type == BR_MRP_TLV_HEADER_RING_LINK_UP ||
  663. hdr->type == BR_MRP_TLV_HEADER_OPTION)
  664. return true;
  665. return false;
  666. }
  667. /* Determin if the frame type is an interconnect frame */
  668. static bool br_mrp_in_frame(struct sk_buff *skb)
  669. {
  670. const struct br_mrp_tlv_hdr *hdr;
  671. struct br_mrp_tlv_hdr _hdr;
  672. hdr = skb_header_pointer(skb, sizeof(uint16_t), sizeof(_hdr), &_hdr);
  673. if (!hdr)
  674. return false;
  675. if (hdr->type == BR_MRP_TLV_HEADER_IN_TEST ||
  676. hdr->type == BR_MRP_TLV_HEADER_IN_TOPO ||
  677. hdr->type == BR_MRP_TLV_HEADER_IN_LINK_DOWN ||
  678. hdr->type == BR_MRP_TLV_HEADER_IN_LINK_UP)
  679. return true;
  680. return false;
  681. }
  682. /* Process only MRP Test frame. All the other MRP frames are processed by
  683. * userspace application
  684. * note: already called with rcu_read_lock
  685. */
  686. static void br_mrp_mrm_process(struct br_mrp *mrp, struct net_bridge_port *port,
  687. struct sk_buff *skb)
  688. {
  689. const struct br_mrp_tlv_hdr *hdr;
  690. struct br_mrp_tlv_hdr _hdr;
  691. /* Each MRP header starts with a version field which is 16 bits.
  692. * Therefore skip the version and get directly the TLV header.
  693. */
  694. hdr = skb_header_pointer(skb, sizeof(uint16_t), sizeof(_hdr), &_hdr);
  695. if (!hdr)
  696. return;
  697. if (hdr->type != BR_MRP_TLV_HEADER_RING_TEST)
  698. return;
  699. mrp->test_count_miss = 0;
  700. /* Notify the userspace that the ring is closed only when the ring is
  701. * not closed
  702. */
  703. if (mrp->ring_state != BR_MRP_RING_STATE_CLOSED)
  704. br_mrp_ring_port_open(port->dev, false);
  705. }
  706. /* Determin if the test hdr has a better priority than the node */
  707. static bool br_mrp_test_better_than_own(struct br_mrp *mrp,
  708. struct net_bridge *br,
  709. const struct br_mrp_ring_test_hdr *hdr)
  710. {
  711. u16 prio = be16_to_cpu(hdr->prio);
  712. if (prio < mrp->prio ||
  713. (prio == mrp->prio &&
  714. ether_addr_to_u64(hdr->sa) < ether_addr_to_u64(br->dev->dev_addr)))
  715. return true;
  716. return false;
  717. }
  718. /* Process only MRP Test frame. All the other MRP frames are processed by
  719. * userspace application
  720. * note: already called with rcu_read_lock
  721. */
  722. static void br_mrp_mra_process(struct br_mrp *mrp, struct net_bridge *br,
  723. struct net_bridge_port *port,
  724. struct sk_buff *skb)
  725. {
  726. const struct br_mrp_ring_test_hdr *test_hdr;
  727. struct br_mrp_ring_test_hdr _test_hdr;
  728. const struct br_mrp_tlv_hdr *hdr;
  729. struct br_mrp_tlv_hdr _hdr;
  730. /* Each MRP header starts with a version field which is 16 bits.
  731. * Therefore skip the version and get directly the TLV header.
  732. */
  733. hdr = skb_header_pointer(skb, sizeof(uint16_t), sizeof(_hdr), &_hdr);
  734. if (!hdr)
  735. return;
  736. if (hdr->type != BR_MRP_TLV_HEADER_RING_TEST)
  737. return;
  738. test_hdr = skb_header_pointer(skb, sizeof(uint16_t) + sizeof(_hdr),
  739. sizeof(_test_hdr), &_test_hdr);
  740. if (!test_hdr)
  741. return;
  742. /* Only frames that have a better priority than the node will
  743. * clear the miss counter because otherwise the node will need to behave
  744. * as MRM.
  745. */
  746. if (br_mrp_test_better_than_own(mrp, br, test_hdr))
  747. mrp->test_count_miss = 0;
  748. }
  749. /* Process only MRP InTest frame. All the other MRP frames are processed by
  750. * userspace application
  751. * note: already called with rcu_read_lock
  752. */
  753. static bool br_mrp_mim_process(struct br_mrp *mrp, struct net_bridge_port *port,
  754. struct sk_buff *skb)
  755. {
  756. const struct br_mrp_in_test_hdr *in_hdr;
  757. struct br_mrp_in_test_hdr _in_hdr;
  758. const struct br_mrp_tlv_hdr *hdr;
  759. struct br_mrp_tlv_hdr _hdr;
  760. /* Each MRP header starts with a version field which is 16 bits.
  761. * Therefore skip the version and get directly the TLV header.
  762. */
  763. hdr = skb_header_pointer(skb, sizeof(uint16_t), sizeof(_hdr), &_hdr);
  764. if (!hdr)
  765. return false;
  766. /* The check for InTest frame type was already done */
  767. in_hdr = skb_header_pointer(skb, sizeof(uint16_t) + sizeof(_hdr),
  768. sizeof(_in_hdr), &_in_hdr);
  769. if (!in_hdr)
  770. return false;
  771. /* It needs to process only it's own InTest frames. */
  772. if (mrp->in_id != ntohs(in_hdr->id))
  773. return false;
  774. mrp->in_test_count_miss = 0;
  775. /* Notify the userspace that the ring is closed only when the ring is
  776. * not closed
  777. */
  778. if (mrp->in_state != BR_MRP_IN_STATE_CLOSED)
  779. br_mrp_in_port_open(port->dev, false);
  780. return true;
  781. }
  782. /* Get the MRP frame type
  783. * note: already called with rcu_read_lock
  784. */
  785. static u8 br_mrp_get_frame_type(struct sk_buff *skb)
  786. {
  787. const struct br_mrp_tlv_hdr *hdr;
  788. struct br_mrp_tlv_hdr _hdr;
  789. /* Each MRP header starts with a version field which is 16 bits.
  790. * Therefore skip the version and get directly the TLV header.
  791. */
  792. hdr = skb_header_pointer(skb, sizeof(uint16_t), sizeof(_hdr), &_hdr);
  793. if (!hdr)
  794. return 0xff;
  795. return hdr->type;
  796. }
  797. static bool br_mrp_mrm_behaviour(struct br_mrp *mrp)
  798. {
  799. if (mrp->ring_role == BR_MRP_RING_ROLE_MRM ||
  800. (mrp->ring_role == BR_MRP_RING_ROLE_MRA && !mrp->test_monitor))
  801. return true;
  802. return false;
  803. }
  804. static bool br_mrp_mrc_behaviour(struct br_mrp *mrp)
  805. {
  806. if (mrp->ring_role == BR_MRP_RING_ROLE_MRC ||
  807. (mrp->ring_role == BR_MRP_RING_ROLE_MRA && mrp->test_monitor))
  808. return true;
  809. return false;
  810. }
  811. /* This will just forward the frame to the other mrp ring ports, depending on
  812. * the frame type, ring role and interconnect role
  813. * note: already called with rcu_read_lock
  814. */
  815. static int br_mrp_rcv(struct net_bridge_port *p,
  816. struct sk_buff *skb, struct net_device *dev)
  817. {
  818. struct net_bridge_port *p_port, *s_port, *i_port = NULL;
  819. struct net_bridge_port *p_dst, *s_dst, *i_dst = NULL;
  820. struct net_bridge *br;
  821. struct br_mrp *mrp;
  822. /* If port is disabled don't accept any frames */
  823. if (p->state == BR_STATE_DISABLED)
  824. return 0;
  825. br = p->br;
  826. mrp = br_mrp_find_port(br, p);
  827. if (unlikely(!mrp))
  828. return 0;
  829. p_port = rcu_dereference(mrp->p_port);
  830. if (!p_port)
  831. return 0;
  832. p_dst = p_port;
  833. s_port = rcu_dereference(mrp->s_port);
  834. if (!s_port)
  835. return 0;
  836. s_dst = s_port;
  837. /* If the frame is a ring frame then it is not required to check the
  838. * interconnect role and ports to process or forward the frame
  839. */
  840. if (br_mrp_ring_frame(skb)) {
  841. /* If the role is MRM then don't forward the frames */
  842. if (mrp->ring_role == BR_MRP_RING_ROLE_MRM) {
  843. br_mrp_mrm_process(mrp, p, skb);
  844. goto no_forward;
  845. }
  846. /* If the role is MRA then don't forward the frames if it
  847. * behaves as MRM node
  848. */
  849. if (mrp->ring_role == BR_MRP_RING_ROLE_MRA) {
  850. if (!mrp->test_monitor) {
  851. br_mrp_mrm_process(mrp, p, skb);
  852. goto no_forward;
  853. }
  854. br_mrp_mra_process(mrp, br, p, skb);
  855. }
  856. goto forward;
  857. }
  858. if (br_mrp_in_frame(skb)) {
  859. u8 in_type = br_mrp_get_frame_type(skb);
  860. i_port = rcu_dereference(mrp->i_port);
  861. i_dst = i_port;
  862. /* If the ring port is in block state it should not forward
  863. * In_Test frames
  864. */
  865. if (br_mrp_is_ring_port(p_port, s_port, p) &&
  866. p->state == BR_STATE_BLOCKING &&
  867. in_type == BR_MRP_TLV_HEADER_IN_TEST)
  868. goto no_forward;
  869. /* Nodes that behaves as MRM needs to stop forwarding the
  870. * frames in case the ring is closed, otherwise will be a loop.
  871. * In this case the frame is no forward between the ring ports.
  872. */
  873. if (br_mrp_mrm_behaviour(mrp) &&
  874. br_mrp_is_ring_port(p_port, s_port, p) &&
  875. (s_port->state != BR_STATE_FORWARDING ||
  876. p_port->state != BR_STATE_FORWARDING)) {
  877. p_dst = NULL;
  878. s_dst = NULL;
  879. }
  880. /* A node that behaves as MRC and doesn't have a interconnect
  881. * role then it should forward all frames between the ring ports
  882. * because it doesn't have an interconnect port
  883. */
  884. if (br_mrp_mrc_behaviour(mrp) &&
  885. mrp->in_role == BR_MRP_IN_ROLE_DISABLED)
  886. goto forward;
  887. if (mrp->in_role == BR_MRP_IN_ROLE_MIM) {
  888. if (in_type == BR_MRP_TLV_HEADER_IN_TEST) {
  889. /* MIM should not forward it's own InTest
  890. * frames
  891. */
  892. if (br_mrp_mim_process(mrp, p, skb)) {
  893. goto no_forward;
  894. } else {
  895. if (br_mrp_is_ring_port(p_port, s_port,
  896. p))
  897. i_dst = NULL;
  898. if (br_mrp_is_in_port(i_port, p))
  899. goto no_forward;
  900. }
  901. } else {
  902. /* MIM should forward IntLinkChange and
  903. * IntTopoChange between ring ports but MIM
  904. * should not forward IntLinkChange and
  905. * IntTopoChange if the frame was received at
  906. * the interconnect port
  907. */
  908. if (br_mrp_is_ring_port(p_port, s_port, p))
  909. i_dst = NULL;
  910. if (br_mrp_is_in_port(i_port, p))
  911. goto no_forward;
  912. }
  913. }
  914. if (mrp->in_role == BR_MRP_IN_ROLE_MIC) {
  915. /* MIC should forward InTest frames on all ports
  916. * regardless of the received port
  917. */
  918. if (in_type == BR_MRP_TLV_HEADER_IN_TEST)
  919. goto forward;
  920. /* MIC should forward IntLinkChange frames only if they
  921. * are received on ring ports to all the ports
  922. */
  923. if (br_mrp_is_ring_port(p_port, s_port, p) &&
  924. (in_type == BR_MRP_TLV_HEADER_IN_LINK_UP ||
  925. in_type == BR_MRP_TLV_HEADER_IN_LINK_DOWN))
  926. goto forward;
  927. /* Should forward the InTopo frames only between the
  928. * ring ports
  929. */
  930. if (in_type == BR_MRP_TLV_HEADER_IN_TOPO) {
  931. i_dst = NULL;
  932. goto forward;
  933. }
  934. /* In all the other cases don't forward the frames */
  935. goto no_forward;
  936. }
  937. }
  938. forward:
  939. if (p_dst)
  940. br_forward(p_dst, skb, true, false);
  941. if (s_dst)
  942. br_forward(s_dst, skb, true, false);
  943. if (i_dst)
  944. br_forward(i_dst, skb, true, false);
  945. no_forward:
  946. return 1;
  947. }
  948. /* Check if the frame was received on a port that is part of MRP ring
  949. * and if the frame has MRP eth. In that case process the frame otherwise do
  950. * normal forwarding.
  951. * note: already called with rcu_read_lock
  952. */
  953. int br_mrp_process(struct net_bridge_port *p, struct sk_buff *skb)
  954. {
  955. /* If there is no MRP instance do normal forwarding */
  956. if (likely(!(p->flags & BR_MRP_AWARE)))
  957. goto out;
  958. if (unlikely(skb->protocol == htons(ETH_P_MRP)))
  959. return br_mrp_rcv(p, skb, p->dev);
  960. out:
  961. return 0;
  962. }
  963. bool br_mrp_enabled(struct net_bridge *br)
  964. {
  965. return !list_empty(&br->mrp_list);
  966. }