br_mrp_netlink.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. #include <net/genetlink.h>
  3. #include <uapi/linux/mrp_bridge.h>
  4. #include "br_private.h"
  5. #include "br_private_mrp.h"
  6. static const struct nla_policy br_mrp_policy[IFLA_BRIDGE_MRP_MAX + 1] = {
  7. [IFLA_BRIDGE_MRP_UNSPEC] = { .type = NLA_REJECT },
  8. [IFLA_BRIDGE_MRP_INSTANCE] = { .type = NLA_NESTED },
  9. [IFLA_BRIDGE_MRP_PORT_STATE] = { .type = NLA_NESTED },
  10. [IFLA_BRIDGE_MRP_PORT_ROLE] = { .type = NLA_NESTED },
  11. [IFLA_BRIDGE_MRP_RING_STATE] = { .type = NLA_NESTED },
  12. [IFLA_BRIDGE_MRP_RING_ROLE] = { .type = NLA_NESTED },
  13. [IFLA_BRIDGE_MRP_START_TEST] = { .type = NLA_NESTED },
  14. [IFLA_BRIDGE_MRP_IN_ROLE] = { .type = NLA_NESTED },
  15. [IFLA_BRIDGE_MRP_IN_STATE] = { .type = NLA_NESTED },
  16. [IFLA_BRIDGE_MRP_START_IN_TEST] = { .type = NLA_NESTED },
  17. };
  18. static const struct nla_policy
  19. br_mrp_instance_policy[IFLA_BRIDGE_MRP_INSTANCE_MAX + 1] = {
  20. [IFLA_BRIDGE_MRP_INSTANCE_UNSPEC] = { .type = NLA_REJECT },
  21. [IFLA_BRIDGE_MRP_INSTANCE_RING_ID] = { .type = NLA_U32 },
  22. [IFLA_BRIDGE_MRP_INSTANCE_P_IFINDEX] = { .type = NLA_U32 },
  23. [IFLA_BRIDGE_MRP_INSTANCE_S_IFINDEX] = { .type = NLA_U32 },
  24. [IFLA_BRIDGE_MRP_INSTANCE_PRIO] = { .type = NLA_U16 },
  25. };
  26. static int br_mrp_instance_parse(struct net_bridge *br, struct nlattr *attr,
  27. int cmd, struct netlink_ext_ack *extack)
  28. {
  29. struct nlattr *tb[IFLA_BRIDGE_MRP_INSTANCE_MAX + 1];
  30. struct br_mrp_instance inst;
  31. int err;
  32. err = nla_parse_nested(tb, IFLA_BRIDGE_MRP_INSTANCE_MAX, attr,
  33. br_mrp_instance_policy, extack);
  34. if (err)
  35. return err;
  36. if (!tb[IFLA_BRIDGE_MRP_INSTANCE_RING_ID] ||
  37. !tb[IFLA_BRIDGE_MRP_INSTANCE_P_IFINDEX] ||
  38. !tb[IFLA_BRIDGE_MRP_INSTANCE_S_IFINDEX]) {
  39. NL_SET_ERR_MSG_MOD(extack,
  40. "Missing attribute: RING_ID or P_IFINDEX or S_IFINDEX");
  41. return -EINVAL;
  42. }
  43. memset(&inst, 0, sizeof(inst));
  44. inst.ring_id = nla_get_u32(tb[IFLA_BRIDGE_MRP_INSTANCE_RING_ID]);
  45. inst.p_ifindex = nla_get_u32(tb[IFLA_BRIDGE_MRP_INSTANCE_P_IFINDEX]);
  46. inst.s_ifindex = nla_get_u32(tb[IFLA_BRIDGE_MRP_INSTANCE_S_IFINDEX]);
  47. inst.prio = MRP_DEFAULT_PRIO;
  48. if (tb[IFLA_BRIDGE_MRP_INSTANCE_PRIO])
  49. inst.prio = nla_get_u16(tb[IFLA_BRIDGE_MRP_INSTANCE_PRIO]);
  50. if (cmd == RTM_SETLINK)
  51. return br_mrp_add(br, &inst);
  52. else
  53. return br_mrp_del(br, &inst);
  54. return 0;
  55. }
  56. static const struct nla_policy
  57. br_mrp_port_state_policy[IFLA_BRIDGE_MRP_PORT_STATE_MAX + 1] = {
  58. [IFLA_BRIDGE_MRP_PORT_STATE_UNSPEC] = { .type = NLA_REJECT },
  59. [IFLA_BRIDGE_MRP_PORT_STATE_STATE] = { .type = NLA_U32 },
  60. };
  61. static int br_mrp_port_state_parse(struct net_bridge_port *p,
  62. struct nlattr *attr,
  63. struct netlink_ext_ack *extack)
  64. {
  65. struct nlattr *tb[IFLA_BRIDGE_MRP_PORT_STATE_MAX + 1];
  66. enum br_mrp_port_state_type state;
  67. int err;
  68. err = nla_parse_nested(tb, IFLA_BRIDGE_MRP_PORT_STATE_MAX, attr,
  69. br_mrp_port_state_policy, extack);
  70. if (err)
  71. return err;
  72. if (!tb[IFLA_BRIDGE_MRP_PORT_STATE_STATE]) {
  73. NL_SET_ERR_MSG_MOD(extack, "Missing attribute: STATE");
  74. return -EINVAL;
  75. }
  76. state = nla_get_u32(tb[IFLA_BRIDGE_MRP_PORT_STATE_STATE]);
  77. return br_mrp_set_port_state(p, state);
  78. }
  79. static const struct nla_policy
  80. br_mrp_port_role_policy[IFLA_BRIDGE_MRP_PORT_ROLE_MAX + 1] = {
  81. [IFLA_BRIDGE_MRP_PORT_ROLE_UNSPEC] = { .type = NLA_REJECT },
  82. [IFLA_BRIDGE_MRP_PORT_ROLE_ROLE] = { .type = NLA_U32 },
  83. };
  84. static int br_mrp_port_role_parse(struct net_bridge_port *p,
  85. struct nlattr *attr,
  86. struct netlink_ext_ack *extack)
  87. {
  88. struct nlattr *tb[IFLA_BRIDGE_MRP_PORT_ROLE_MAX + 1];
  89. enum br_mrp_port_role_type role;
  90. int err;
  91. err = nla_parse_nested(tb, IFLA_BRIDGE_MRP_PORT_ROLE_MAX, attr,
  92. br_mrp_port_role_policy, extack);
  93. if (err)
  94. return err;
  95. if (!tb[IFLA_BRIDGE_MRP_PORT_ROLE_ROLE]) {
  96. NL_SET_ERR_MSG_MOD(extack, "Missing attribute: ROLE");
  97. return -EINVAL;
  98. }
  99. role = nla_get_u32(tb[IFLA_BRIDGE_MRP_PORT_ROLE_ROLE]);
  100. return br_mrp_set_port_role(p, role);
  101. }
  102. static const struct nla_policy
  103. br_mrp_ring_state_policy[IFLA_BRIDGE_MRP_RING_STATE_MAX + 1] = {
  104. [IFLA_BRIDGE_MRP_RING_STATE_UNSPEC] = { .type = NLA_REJECT },
  105. [IFLA_BRIDGE_MRP_RING_STATE_RING_ID] = { .type = NLA_U32 },
  106. [IFLA_BRIDGE_MRP_RING_STATE_STATE] = { .type = NLA_U32 },
  107. };
  108. static int br_mrp_ring_state_parse(struct net_bridge *br, struct nlattr *attr,
  109. struct netlink_ext_ack *extack)
  110. {
  111. struct nlattr *tb[IFLA_BRIDGE_MRP_RING_STATE_MAX + 1];
  112. struct br_mrp_ring_state state;
  113. int err;
  114. err = nla_parse_nested(tb, IFLA_BRIDGE_MRP_RING_STATE_MAX, attr,
  115. br_mrp_ring_state_policy, extack);
  116. if (err)
  117. return err;
  118. if (!tb[IFLA_BRIDGE_MRP_RING_STATE_RING_ID] ||
  119. !tb[IFLA_BRIDGE_MRP_RING_STATE_STATE]) {
  120. NL_SET_ERR_MSG_MOD(extack,
  121. "Missing attribute: RING_ID or STATE");
  122. return -EINVAL;
  123. }
  124. memset(&state, 0x0, sizeof(state));
  125. state.ring_id = nla_get_u32(tb[IFLA_BRIDGE_MRP_RING_STATE_RING_ID]);
  126. state.ring_state = nla_get_u32(tb[IFLA_BRIDGE_MRP_RING_STATE_STATE]);
  127. return br_mrp_set_ring_state(br, &state);
  128. }
  129. static const struct nla_policy
  130. br_mrp_ring_role_policy[IFLA_BRIDGE_MRP_RING_ROLE_MAX + 1] = {
  131. [IFLA_BRIDGE_MRP_RING_ROLE_UNSPEC] = { .type = NLA_REJECT },
  132. [IFLA_BRIDGE_MRP_RING_ROLE_RING_ID] = { .type = NLA_U32 },
  133. [IFLA_BRIDGE_MRP_RING_ROLE_ROLE] = { .type = NLA_U32 },
  134. };
  135. static int br_mrp_ring_role_parse(struct net_bridge *br, struct nlattr *attr,
  136. struct netlink_ext_ack *extack)
  137. {
  138. struct nlattr *tb[IFLA_BRIDGE_MRP_RING_ROLE_MAX + 1];
  139. struct br_mrp_ring_role role;
  140. int err;
  141. err = nla_parse_nested(tb, IFLA_BRIDGE_MRP_RING_ROLE_MAX, attr,
  142. br_mrp_ring_role_policy, extack);
  143. if (err)
  144. return err;
  145. if (!tb[IFLA_BRIDGE_MRP_RING_ROLE_RING_ID] ||
  146. !tb[IFLA_BRIDGE_MRP_RING_ROLE_ROLE]) {
  147. NL_SET_ERR_MSG_MOD(extack,
  148. "Missing attribute: RING_ID or ROLE");
  149. return -EINVAL;
  150. }
  151. memset(&role, 0x0, sizeof(role));
  152. role.ring_id = nla_get_u32(tb[IFLA_BRIDGE_MRP_RING_ROLE_RING_ID]);
  153. role.ring_role = nla_get_u32(tb[IFLA_BRIDGE_MRP_RING_ROLE_ROLE]);
  154. return br_mrp_set_ring_role(br, &role);
  155. }
  156. static const struct nla_policy
  157. br_mrp_start_test_policy[IFLA_BRIDGE_MRP_START_TEST_MAX + 1] = {
  158. [IFLA_BRIDGE_MRP_START_TEST_UNSPEC] = { .type = NLA_REJECT },
  159. [IFLA_BRIDGE_MRP_START_TEST_RING_ID] = { .type = NLA_U32 },
  160. [IFLA_BRIDGE_MRP_START_TEST_INTERVAL] = { .type = NLA_U32 },
  161. [IFLA_BRIDGE_MRP_START_TEST_MAX_MISS] = { .type = NLA_U32 },
  162. [IFLA_BRIDGE_MRP_START_TEST_PERIOD] = { .type = NLA_U32 },
  163. [IFLA_BRIDGE_MRP_START_TEST_MONITOR] = { .type = NLA_U32 },
  164. };
  165. static int br_mrp_start_test_parse(struct net_bridge *br, struct nlattr *attr,
  166. struct netlink_ext_ack *extack)
  167. {
  168. struct nlattr *tb[IFLA_BRIDGE_MRP_START_TEST_MAX + 1];
  169. struct br_mrp_start_test test;
  170. int err;
  171. err = nla_parse_nested(tb, IFLA_BRIDGE_MRP_START_TEST_MAX, attr,
  172. br_mrp_start_test_policy, extack);
  173. if (err)
  174. return err;
  175. if (!tb[IFLA_BRIDGE_MRP_START_TEST_RING_ID] ||
  176. !tb[IFLA_BRIDGE_MRP_START_TEST_INTERVAL] ||
  177. !tb[IFLA_BRIDGE_MRP_START_TEST_MAX_MISS] ||
  178. !tb[IFLA_BRIDGE_MRP_START_TEST_PERIOD]) {
  179. NL_SET_ERR_MSG_MOD(extack,
  180. "Missing attribute: RING_ID or INTERVAL or MAX_MISS or PERIOD");
  181. return -EINVAL;
  182. }
  183. memset(&test, 0x0, sizeof(test));
  184. test.ring_id = nla_get_u32(tb[IFLA_BRIDGE_MRP_START_TEST_RING_ID]);
  185. test.interval = nla_get_u32(tb[IFLA_BRIDGE_MRP_START_TEST_INTERVAL]);
  186. test.max_miss = nla_get_u32(tb[IFLA_BRIDGE_MRP_START_TEST_MAX_MISS]);
  187. test.period = nla_get_u32(tb[IFLA_BRIDGE_MRP_START_TEST_PERIOD]);
  188. test.monitor = false;
  189. if (tb[IFLA_BRIDGE_MRP_START_TEST_MONITOR])
  190. test.monitor =
  191. nla_get_u32(tb[IFLA_BRIDGE_MRP_START_TEST_MONITOR]);
  192. return br_mrp_start_test(br, &test);
  193. }
  194. static const struct nla_policy
  195. br_mrp_in_state_policy[IFLA_BRIDGE_MRP_IN_STATE_MAX + 1] = {
  196. [IFLA_BRIDGE_MRP_IN_STATE_UNSPEC] = { .type = NLA_REJECT },
  197. [IFLA_BRIDGE_MRP_IN_STATE_IN_ID] = { .type = NLA_U32 },
  198. [IFLA_BRIDGE_MRP_IN_STATE_STATE] = { .type = NLA_U32 },
  199. };
  200. static int br_mrp_in_state_parse(struct net_bridge *br, struct nlattr *attr,
  201. struct netlink_ext_ack *extack)
  202. {
  203. struct nlattr *tb[IFLA_BRIDGE_MRP_IN_STATE_MAX + 1];
  204. struct br_mrp_in_state state;
  205. int err;
  206. err = nla_parse_nested(tb, IFLA_BRIDGE_MRP_IN_STATE_MAX, attr,
  207. br_mrp_in_state_policy, extack);
  208. if (err)
  209. return err;
  210. if (!tb[IFLA_BRIDGE_MRP_IN_STATE_IN_ID] ||
  211. !tb[IFLA_BRIDGE_MRP_IN_STATE_STATE]) {
  212. NL_SET_ERR_MSG_MOD(extack,
  213. "Missing attribute: IN_ID or STATE");
  214. return -EINVAL;
  215. }
  216. memset(&state, 0x0, sizeof(state));
  217. state.in_id = nla_get_u32(tb[IFLA_BRIDGE_MRP_IN_STATE_IN_ID]);
  218. state.in_state = nla_get_u32(tb[IFLA_BRIDGE_MRP_IN_STATE_STATE]);
  219. return br_mrp_set_in_state(br, &state);
  220. }
  221. static const struct nla_policy
  222. br_mrp_in_role_policy[IFLA_BRIDGE_MRP_IN_ROLE_MAX + 1] = {
  223. [IFLA_BRIDGE_MRP_IN_ROLE_UNSPEC] = { .type = NLA_REJECT },
  224. [IFLA_BRIDGE_MRP_IN_ROLE_RING_ID] = { .type = NLA_U32 },
  225. [IFLA_BRIDGE_MRP_IN_ROLE_IN_ID] = { .type = NLA_U16 },
  226. [IFLA_BRIDGE_MRP_IN_ROLE_ROLE] = { .type = NLA_U32 },
  227. [IFLA_BRIDGE_MRP_IN_ROLE_I_IFINDEX] = { .type = NLA_U32 },
  228. };
  229. static int br_mrp_in_role_parse(struct net_bridge *br, struct nlattr *attr,
  230. struct netlink_ext_ack *extack)
  231. {
  232. struct nlattr *tb[IFLA_BRIDGE_MRP_IN_ROLE_MAX + 1];
  233. struct br_mrp_in_role role;
  234. int err;
  235. err = nla_parse_nested(tb, IFLA_BRIDGE_MRP_IN_ROLE_MAX, attr,
  236. br_mrp_in_role_policy, extack);
  237. if (err)
  238. return err;
  239. if (!tb[IFLA_BRIDGE_MRP_IN_ROLE_RING_ID] ||
  240. !tb[IFLA_BRIDGE_MRP_IN_ROLE_IN_ID] ||
  241. !tb[IFLA_BRIDGE_MRP_IN_ROLE_I_IFINDEX] ||
  242. !tb[IFLA_BRIDGE_MRP_IN_ROLE_ROLE]) {
  243. NL_SET_ERR_MSG_MOD(extack,
  244. "Missing attribute: RING_ID or ROLE or IN_ID or I_IFINDEX");
  245. return -EINVAL;
  246. }
  247. memset(&role, 0x0, sizeof(role));
  248. role.ring_id = nla_get_u32(tb[IFLA_BRIDGE_MRP_IN_ROLE_RING_ID]);
  249. role.in_id = nla_get_u16(tb[IFLA_BRIDGE_MRP_IN_ROLE_IN_ID]);
  250. role.i_ifindex = nla_get_u32(tb[IFLA_BRIDGE_MRP_IN_ROLE_I_IFINDEX]);
  251. role.in_role = nla_get_u32(tb[IFLA_BRIDGE_MRP_IN_ROLE_ROLE]);
  252. return br_mrp_set_in_role(br, &role);
  253. }
  254. static const struct nla_policy
  255. br_mrp_start_in_test_policy[IFLA_BRIDGE_MRP_START_IN_TEST_MAX + 1] = {
  256. [IFLA_BRIDGE_MRP_START_IN_TEST_UNSPEC] = { .type = NLA_REJECT },
  257. [IFLA_BRIDGE_MRP_START_IN_TEST_IN_ID] = { .type = NLA_U32 },
  258. [IFLA_BRIDGE_MRP_START_IN_TEST_INTERVAL] = { .type = NLA_U32 },
  259. [IFLA_BRIDGE_MRP_START_IN_TEST_MAX_MISS] = { .type = NLA_U32 },
  260. [IFLA_BRIDGE_MRP_START_IN_TEST_PERIOD] = { .type = NLA_U32 },
  261. };
  262. static int br_mrp_start_in_test_parse(struct net_bridge *br,
  263. struct nlattr *attr,
  264. struct netlink_ext_ack *extack)
  265. {
  266. struct nlattr *tb[IFLA_BRIDGE_MRP_START_IN_TEST_MAX + 1];
  267. struct br_mrp_start_in_test test;
  268. int err;
  269. err = nla_parse_nested(tb, IFLA_BRIDGE_MRP_START_IN_TEST_MAX, attr,
  270. br_mrp_start_in_test_policy, extack);
  271. if (err)
  272. return err;
  273. if (!tb[IFLA_BRIDGE_MRP_START_IN_TEST_IN_ID] ||
  274. !tb[IFLA_BRIDGE_MRP_START_IN_TEST_INTERVAL] ||
  275. !tb[IFLA_BRIDGE_MRP_START_IN_TEST_MAX_MISS] ||
  276. !tb[IFLA_BRIDGE_MRP_START_IN_TEST_PERIOD]) {
  277. NL_SET_ERR_MSG_MOD(extack,
  278. "Missing attribute: RING_ID or INTERVAL or MAX_MISS or PERIOD");
  279. return -EINVAL;
  280. }
  281. memset(&test, 0x0, sizeof(test));
  282. test.in_id = nla_get_u32(tb[IFLA_BRIDGE_MRP_START_IN_TEST_IN_ID]);
  283. test.interval = nla_get_u32(tb[IFLA_BRIDGE_MRP_START_IN_TEST_INTERVAL]);
  284. test.max_miss = nla_get_u32(tb[IFLA_BRIDGE_MRP_START_IN_TEST_MAX_MISS]);
  285. test.period = nla_get_u32(tb[IFLA_BRIDGE_MRP_START_IN_TEST_PERIOD]);
  286. return br_mrp_start_in_test(br, &test);
  287. }
  288. int br_mrp_parse(struct net_bridge *br, struct net_bridge_port *p,
  289. struct nlattr *attr, int cmd, struct netlink_ext_ack *extack)
  290. {
  291. struct nlattr *tb[IFLA_BRIDGE_MRP_MAX + 1];
  292. int err;
  293. /* When this function is called for a port then the br pointer is
  294. * invalid, therefor set the br to point correctly
  295. */
  296. if (p)
  297. br = p->br;
  298. if (br->stp_enabled != BR_NO_STP) {
  299. NL_SET_ERR_MSG_MOD(extack, "MRP can't be enabled if STP is already enabled");
  300. return -EINVAL;
  301. }
  302. err = nla_parse_nested(tb, IFLA_BRIDGE_MRP_MAX, attr,
  303. br_mrp_policy, extack);
  304. if (err)
  305. return err;
  306. if (tb[IFLA_BRIDGE_MRP_INSTANCE]) {
  307. err = br_mrp_instance_parse(br, tb[IFLA_BRIDGE_MRP_INSTANCE],
  308. cmd, extack);
  309. if (err)
  310. return err;
  311. }
  312. if (tb[IFLA_BRIDGE_MRP_PORT_STATE]) {
  313. err = br_mrp_port_state_parse(p, tb[IFLA_BRIDGE_MRP_PORT_STATE],
  314. extack);
  315. if (err)
  316. return err;
  317. }
  318. if (tb[IFLA_BRIDGE_MRP_PORT_ROLE]) {
  319. err = br_mrp_port_role_parse(p, tb[IFLA_BRIDGE_MRP_PORT_ROLE],
  320. extack);
  321. if (err)
  322. return err;
  323. }
  324. if (tb[IFLA_BRIDGE_MRP_RING_STATE]) {
  325. err = br_mrp_ring_state_parse(br,
  326. tb[IFLA_BRIDGE_MRP_RING_STATE],
  327. extack);
  328. if (err)
  329. return err;
  330. }
  331. if (tb[IFLA_BRIDGE_MRP_RING_ROLE]) {
  332. err = br_mrp_ring_role_parse(br, tb[IFLA_BRIDGE_MRP_RING_ROLE],
  333. extack);
  334. if (err)
  335. return err;
  336. }
  337. if (tb[IFLA_BRIDGE_MRP_START_TEST]) {
  338. err = br_mrp_start_test_parse(br,
  339. tb[IFLA_BRIDGE_MRP_START_TEST],
  340. extack);
  341. if (err)
  342. return err;
  343. }
  344. if (tb[IFLA_BRIDGE_MRP_IN_STATE]) {
  345. err = br_mrp_in_state_parse(br, tb[IFLA_BRIDGE_MRP_IN_STATE],
  346. extack);
  347. if (err)
  348. return err;
  349. }
  350. if (tb[IFLA_BRIDGE_MRP_IN_ROLE]) {
  351. err = br_mrp_in_role_parse(br, tb[IFLA_BRIDGE_MRP_IN_ROLE],
  352. extack);
  353. if (err)
  354. return err;
  355. }
  356. if (tb[IFLA_BRIDGE_MRP_START_IN_TEST]) {
  357. err = br_mrp_start_in_test_parse(br,
  358. tb[IFLA_BRIDGE_MRP_START_IN_TEST],
  359. extack);
  360. if (err)
  361. return err;
  362. }
  363. return 0;
  364. }
  365. int br_mrp_fill_info(struct sk_buff *skb, struct net_bridge *br)
  366. {
  367. struct nlattr *tb, *mrp_tb;
  368. struct br_mrp *mrp;
  369. mrp_tb = nla_nest_start_noflag(skb, IFLA_BRIDGE_MRP);
  370. if (!mrp_tb)
  371. return -EMSGSIZE;
  372. list_for_each_entry_rcu(mrp, &br->mrp_list, list) {
  373. struct net_bridge_port *p;
  374. tb = nla_nest_start_noflag(skb, IFLA_BRIDGE_MRP_INFO);
  375. if (!tb)
  376. goto nla_info_failure;
  377. if (nla_put_u32(skb, IFLA_BRIDGE_MRP_INFO_RING_ID,
  378. mrp->ring_id))
  379. goto nla_put_failure;
  380. p = rcu_dereference(mrp->p_port);
  381. if (p && nla_put_u32(skb, IFLA_BRIDGE_MRP_INFO_P_IFINDEX,
  382. p->dev->ifindex))
  383. goto nla_put_failure;
  384. p = rcu_dereference(mrp->s_port);
  385. if (p && nla_put_u32(skb, IFLA_BRIDGE_MRP_INFO_S_IFINDEX,
  386. p->dev->ifindex))
  387. goto nla_put_failure;
  388. p = rcu_dereference(mrp->i_port);
  389. if (p && nla_put_u32(skb, IFLA_BRIDGE_MRP_INFO_I_IFINDEX,
  390. p->dev->ifindex))
  391. goto nla_put_failure;
  392. if (nla_put_u16(skb, IFLA_BRIDGE_MRP_INFO_PRIO,
  393. mrp->prio))
  394. goto nla_put_failure;
  395. if (nla_put_u32(skb, IFLA_BRIDGE_MRP_INFO_RING_STATE,
  396. mrp->ring_state))
  397. goto nla_put_failure;
  398. if (nla_put_u32(skb, IFLA_BRIDGE_MRP_INFO_RING_ROLE,
  399. mrp->ring_role))
  400. goto nla_put_failure;
  401. if (nla_put_u32(skb, IFLA_BRIDGE_MRP_INFO_TEST_INTERVAL,
  402. mrp->test_interval))
  403. goto nla_put_failure;
  404. if (nla_put_u32(skb, IFLA_BRIDGE_MRP_INFO_TEST_MAX_MISS,
  405. mrp->test_max_miss))
  406. goto nla_put_failure;
  407. if (nla_put_u32(skb, IFLA_BRIDGE_MRP_INFO_TEST_MONITOR,
  408. mrp->test_monitor))
  409. goto nla_put_failure;
  410. if (nla_put_u32(skb, IFLA_BRIDGE_MRP_INFO_IN_STATE,
  411. mrp->in_state))
  412. goto nla_put_failure;
  413. if (nla_put_u32(skb, IFLA_BRIDGE_MRP_INFO_IN_ROLE,
  414. mrp->in_role))
  415. goto nla_put_failure;
  416. if (nla_put_u32(skb, IFLA_BRIDGE_MRP_INFO_IN_TEST_INTERVAL,
  417. mrp->in_test_interval))
  418. goto nla_put_failure;
  419. if (nla_put_u32(skb, IFLA_BRIDGE_MRP_INFO_IN_TEST_MAX_MISS,
  420. mrp->in_test_max_miss))
  421. goto nla_put_failure;
  422. nla_nest_end(skb, tb);
  423. }
  424. nla_nest_end(skb, mrp_tb);
  425. return 0;
  426. nla_put_failure:
  427. nla_nest_cancel(skb, tb);
  428. nla_info_failure:
  429. nla_nest_cancel(skb, mrp_tb);
  430. return -EMSGSIZE;
  431. }
  432. int br_mrp_ring_port_open(struct net_device *dev, u8 loc)
  433. {
  434. struct net_bridge_port *p;
  435. int err = 0;
  436. p = br_port_get_rcu(dev);
  437. if (!p) {
  438. err = -EINVAL;
  439. goto out;
  440. }
  441. if (loc)
  442. p->flags |= BR_MRP_LOST_CONT;
  443. else
  444. p->flags &= ~BR_MRP_LOST_CONT;
  445. br_ifinfo_notify(RTM_NEWLINK, NULL, p);
  446. out:
  447. return err;
  448. }
  449. int br_mrp_in_port_open(struct net_device *dev, u8 loc)
  450. {
  451. struct net_bridge_port *p;
  452. int err = 0;
  453. p = br_port_get_rcu(dev);
  454. if (!p) {
  455. err = -EINVAL;
  456. goto out;
  457. }
  458. if (loc)
  459. p->flags |= BR_MRP_LOST_IN_CONT;
  460. else
  461. p->flags &= ~BR_MRP_LOST_IN_CONT;
  462. br_ifinfo_notify(RTM_NEWLINK, NULL, p);
  463. out:
  464. return err;
  465. }