roce_gid_mgmt.c 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928
  1. /*
  2. * Copyright (c) 2015, Mellanox Technologies inc. All rights reserved.
  3. *
  4. * This software is available to you under a choice of one of two
  5. * licenses. You may choose to be licensed under the terms of the GNU
  6. * General Public License (GPL) Version 2, available from the file
  7. * COPYING in the main directory of this source tree, or the
  8. * OpenIB.org BSD license below:
  9. *
  10. * Redistribution and use in source and binary forms, with or
  11. * without modification, are permitted provided that the following
  12. * conditions are met:
  13. *
  14. * - Redistributions of source code must retain the above
  15. * copyright notice, this list of conditions and the following
  16. * disclaimer.
  17. *
  18. * - Redistributions in binary form must reproduce the above
  19. * copyright notice, this list of conditions and the following
  20. * disclaimer in the documentation and/or other materials
  21. * provided with the distribution.
  22. *
  23. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30. * SOFTWARE.
  31. */
  32. #include "core_priv.h"
  33. #include <linux/in.h>
  34. #include <linux/in6.h>
  35. /* For in6_dev_get/in6_dev_put */
  36. #include <net/addrconf.h>
  37. #include <net/bonding.h>
  38. #include <rdma/ib_cache.h>
  39. #include <rdma/ib_addr.h>
  40. static struct workqueue_struct *gid_cache_wq;
  41. enum gid_op_type {
  42. GID_DEL = 0,
  43. GID_ADD
  44. };
  45. struct update_gid_event_work {
  46. struct work_struct work;
  47. union ib_gid gid;
  48. struct ib_gid_attr gid_attr;
  49. enum gid_op_type gid_op;
  50. };
  51. #define ROCE_NETDEV_CALLBACK_SZ 3
  52. struct netdev_event_work_cmd {
  53. roce_netdev_callback cb;
  54. roce_netdev_filter filter;
  55. struct net_device *ndev;
  56. struct net_device *filter_ndev;
  57. };
  58. struct netdev_event_work {
  59. struct work_struct work;
  60. struct netdev_event_work_cmd cmds[ROCE_NETDEV_CALLBACK_SZ];
  61. };
  62. static const struct {
  63. bool (*is_supported)(const struct ib_device *device, u8 port_num);
  64. enum ib_gid_type gid_type;
  65. } PORT_CAP_TO_GID_TYPE[] = {
  66. {rdma_protocol_roce_eth_encap, IB_GID_TYPE_ROCE},
  67. {rdma_protocol_roce_udp_encap, IB_GID_TYPE_ROCE_UDP_ENCAP},
  68. };
  69. #define CAP_TO_GID_TABLE_SIZE ARRAY_SIZE(PORT_CAP_TO_GID_TYPE)
  70. unsigned long roce_gid_type_mask_support(struct ib_device *ib_dev, u8 port)
  71. {
  72. int i;
  73. unsigned int ret_flags = 0;
  74. if (!rdma_protocol_roce(ib_dev, port))
  75. return 1UL << IB_GID_TYPE_IB;
  76. for (i = 0; i < CAP_TO_GID_TABLE_SIZE; i++)
  77. if (PORT_CAP_TO_GID_TYPE[i].is_supported(ib_dev, port))
  78. ret_flags |= 1UL << PORT_CAP_TO_GID_TYPE[i].gid_type;
  79. return ret_flags;
  80. }
  81. EXPORT_SYMBOL(roce_gid_type_mask_support);
  82. static void update_gid(enum gid_op_type gid_op, struct ib_device *ib_dev,
  83. u8 port, union ib_gid *gid,
  84. struct ib_gid_attr *gid_attr)
  85. {
  86. int i;
  87. unsigned long gid_type_mask = roce_gid_type_mask_support(ib_dev, port);
  88. for (i = 0; i < IB_GID_TYPE_SIZE; i++) {
  89. if ((1UL << i) & gid_type_mask) {
  90. gid_attr->gid_type = i;
  91. switch (gid_op) {
  92. case GID_ADD:
  93. ib_cache_gid_add(ib_dev, port,
  94. gid, gid_attr);
  95. break;
  96. case GID_DEL:
  97. ib_cache_gid_del(ib_dev, port,
  98. gid, gid_attr);
  99. break;
  100. }
  101. }
  102. }
  103. }
  104. enum bonding_slave_state {
  105. BONDING_SLAVE_STATE_ACTIVE = 1UL << 0,
  106. BONDING_SLAVE_STATE_INACTIVE = 1UL << 1,
  107. /* No primary slave or the device isn't a slave in bonding */
  108. BONDING_SLAVE_STATE_NA = 1UL << 2,
  109. };
  110. static enum bonding_slave_state is_eth_active_slave_of_bonding_rcu(struct net_device *dev,
  111. struct net_device *upper)
  112. {
  113. if (upper && netif_is_bond_master(upper)) {
  114. struct net_device *pdev =
  115. bond_option_active_slave_get_rcu(netdev_priv(upper));
  116. if (pdev)
  117. return dev == pdev ? BONDING_SLAVE_STATE_ACTIVE :
  118. BONDING_SLAVE_STATE_INACTIVE;
  119. }
  120. return BONDING_SLAVE_STATE_NA;
  121. }
  122. #define REQUIRED_BOND_STATES (BONDING_SLAVE_STATE_ACTIVE | \
  123. BONDING_SLAVE_STATE_NA)
  124. static bool
  125. is_eth_port_of_netdev_filter(struct ib_device *ib_dev, u8 port,
  126. struct net_device *rdma_ndev, void *cookie)
  127. {
  128. struct net_device *real_dev;
  129. bool res;
  130. if (!rdma_ndev)
  131. return false;
  132. rcu_read_lock();
  133. real_dev = rdma_vlan_dev_real_dev(cookie);
  134. if (!real_dev)
  135. real_dev = cookie;
  136. res = ((rdma_is_upper_dev_rcu(rdma_ndev, cookie) &&
  137. (is_eth_active_slave_of_bonding_rcu(rdma_ndev, real_dev) &
  138. REQUIRED_BOND_STATES)) ||
  139. real_dev == rdma_ndev);
  140. rcu_read_unlock();
  141. return res;
  142. }
  143. static bool
  144. is_eth_port_inactive_slave_filter(struct ib_device *ib_dev, u8 port,
  145. struct net_device *rdma_ndev, void *cookie)
  146. {
  147. struct net_device *master_dev;
  148. bool res;
  149. if (!rdma_ndev)
  150. return false;
  151. rcu_read_lock();
  152. master_dev = netdev_master_upper_dev_get_rcu(rdma_ndev);
  153. res = is_eth_active_slave_of_bonding_rcu(rdma_ndev, master_dev) ==
  154. BONDING_SLAVE_STATE_INACTIVE;
  155. rcu_read_unlock();
  156. return res;
  157. }
  158. /** is_ndev_for_default_gid_filter - Check if a given netdevice
  159. * can be considered for default GIDs or not.
  160. * @ib_dev: IB device to check
  161. * @port: Port to consider for adding default GID
  162. * @rdma_ndev: rdma netdevice pointer
  163. * @cookie_ndev: Netdevice to consider to form a default GID
  164. *
  165. * is_ndev_for_default_gid_filter() returns true if a given netdevice can be
  166. * considered for deriving default RoCE GID, returns false otherwise.
  167. */
  168. static bool
  169. is_ndev_for_default_gid_filter(struct ib_device *ib_dev, u8 port,
  170. struct net_device *rdma_ndev, void *cookie)
  171. {
  172. struct net_device *cookie_ndev = cookie;
  173. bool res;
  174. if (!rdma_ndev)
  175. return false;
  176. rcu_read_lock();
  177. /*
  178. * When rdma netdevice is used in bonding, bonding master netdevice
  179. * should be considered for default GIDs. Therefore, ignore slave rdma
  180. * netdevices when bonding is considered.
  181. * Additionally when event(cookie) netdevice is bond master device,
  182. * make sure that it the upper netdevice of rdma netdevice.
  183. */
  184. res = ((cookie_ndev == rdma_ndev && !netif_is_bond_slave(rdma_ndev)) ||
  185. (netif_is_bond_master(cookie_ndev) &&
  186. rdma_is_upper_dev_rcu(rdma_ndev, cookie_ndev)));
  187. rcu_read_unlock();
  188. return res;
  189. }
  190. static bool pass_all_filter(struct ib_device *ib_dev, u8 port,
  191. struct net_device *rdma_ndev, void *cookie)
  192. {
  193. return true;
  194. }
  195. static bool upper_device_filter(struct ib_device *ib_dev, u8 port,
  196. struct net_device *rdma_ndev, void *cookie)
  197. {
  198. bool res;
  199. if (!rdma_ndev)
  200. return false;
  201. if (rdma_ndev == cookie)
  202. return true;
  203. rcu_read_lock();
  204. res = rdma_is_upper_dev_rcu(rdma_ndev, cookie);
  205. rcu_read_unlock();
  206. return res;
  207. }
  208. /**
  209. * is_upper_ndev_bond_master_filter - Check if a given netdevice
  210. * is bond master device of netdevice of the the RDMA device of port.
  211. * @ib_dev: IB device to check
  212. * @port: Port to consider for adding default GID
  213. * @rdma_ndev: Pointer to rdma netdevice
  214. * @cookie: Netdevice to consider to form a default GID
  215. *
  216. * is_upper_ndev_bond_master_filter() returns true if a cookie_netdev
  217. * is bond master device and rdma_ndev is its lower netdevice. It might
  218. * not have been established as slave device yet.
  219. */
  220. static bool
  221. is_upper_ndev_bond_master_filter(struct ib_device *ib_dev, u8 port,
  222. struct net_device *rdma_ndev,
  223. void *cookie)
  224. {
  225. struct net_device *cookie_ndev = cookie;
  226. bool match = false;
  227. if (!rdma_ndev)
  228. return false;
  229. rcu_read_lock();
  230. if (netif_is_bond_master(cookie_ndev) &&
  231. rdma_is_upper_dev_rcu(rdma_ndev, cookie_ndev))
  232. match = true;
  233. rcu_read_unlock();
  234. return match;
  235. }
  236. static void update_gid_ip(enum gid_op_type gid_op,
  237. struct ib_device *ib_dev,
  238. u8 port, struct net_device *ndev,
  239. struct sockaddr *addr)
  240. {
  241. union ib_gid gid;
  242. struct ib_gid_attr gid_attr;
  243. rdma_ip2gid(addr, &gid);
  244. memset(&gid_attr, 0, sizeof(gid_attr));
  245. gid_attr.ndev = ndev;
  246. update_gid(gid_op, ib_dev, port, &gid, &gid_attr);
  247. }
  248. static void bond_delete_netdev_default_gids(struct ib_device *ib_dev,
  249. u8 port,
  250. struct net_device *rdma_ndev,
  251. struct net_device *event_ndev)
  252. {
  253. struct net_device *real_dev = rdma_vlan_dev_real_dev(event_ndev);
  254. unsigned long gid_type_mask;
  255. if (!rdma_ndev)
  256. return;
  257. if (!real_dev)
  258. real_dev = event_ndev;
  259. rcu_read_lock();
  260. if (((rdma_ndev != event_ndev &&
  261. !rdma_is_upper_dev_rcu(rdma_ndev, event_ndev)) ||
  262. is_eth_active_slave_of_bonding_rcu(rdma_ndev, real_dev)
  263. ==
  264. BONDING_SLAVE_STATE_INACTIVE)) {
  265. rcu_read_unlock();
  266. return;
  267. }
  268. rcu_read_unlock();
  269. gid_type_mask = roce_gid_type_mask_support(ib_dev, port);
  270. ib_cache_gid_set_default_gid(ib_dev, port, rdma_ndev,
  271. gid_type_mask,
  272. IB_CACHE_GID_DEFAULT_MODE_DELETE);
  273. }
  274. static void enum_netdev_ipv4_ips(struct ib_device *ib_dev,
  275. u8 port, struct net_device *ndev)
  276. {
  277. const struct in_ifaddr *ifa;
  278. struct in_device *in_dev;
  279. struct sin_list {
  280. struct list_head list;
  281. struct sockaddr_in ip;
  282. };
  283. struct sin_list *sin_iter;
  284. struct sin_list *sin_temp;
  285. LIST_HEAD(sin_list);
  286. if (ndev->reg_state >= NETREG_UNREGISTERING)
  287. return;
  288. rcu_read_lock();
  289. in_dev = __in_dev_get_rcu(ndev);
  290. if (!in_dev) {
  291. rcu_read_unlock();
  292. return;
  293. }
  294. in_dev_for_each_ifa_rcu(ifa, in_dev) {
  295. struct sin_list *entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
  296. if (!entry)
  297. continue;
  298. entry->ip.sin_family = AF_INET;
  299. entry->ip.sin_addr.s_addr = ifa->ifa_address;
  300. list_add_tail(&entry->list, &sin_list);
  301. }
  302. rcu_read_unlock();
  303. list_for_each_entry_safe(sin_iter, sin_temp, &sin_list, list) {
  304. update_gid_ip(GID_ADD, ib_dev, port, ndev,
  305. (struct sockaddr *)&sin_iter->ip);
  306. list_del(&sin_iter->list);
  307. kfree(sin_iter);
  308. }
  309. }
  310. static void enum_netdev_ipv6_ips(struct ib_device *ib_dev,
  311. u8 port, struct net_device *ndev)
  312. {
  313. struct inet6_ifaddr *ifp;
  314. struct inet6_dev *in6_dev;
  315. struct sin6_list {
  316. struct list_head list;
  317. struct sockaddr_in6 sin6;
  318. };
  319. struct sin6_list *sin6_iter;
  320. struct sin6_list *sin6_temp;
  321. struct ib_gid_attr gid_attr = {.ndev = ndev};
  322. LIST_HEAD(sin6_list);
  323. if (ndev->reg_state >= NETREG_UNREGISTERING)
  324. return;
  325. in6_dev = in6_dev_get(ndev);
  326. if (!in6_dev)
  327. return;
  328. read_lock_bh(&in6_dev->lock);
  329. list_for_each_entry(ifp, &in6_dev->addr_list, if_list) {
  330. struct sin6_list *entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
  331. if (!entry)
  332. continue;
  333. entry->sin6.sin6_family = AF_INET6;
  334. entry->sin6.sin6_addr = ifp->addr;
  335. list_add_tail(&entry->list, &sin6_list);
  336. }
  337. read_unlock_bh(&in6_dev->lock);
  338. in6_dev_put(in6_dev);
  339. list_for_each_entry_safe(sin6_iter, sin6_temp, &sin6_list, list) {
  340. union ib_gid gid;
  341. rdma_ip2gid((struct sockaddr *)&sin6_iter->sin6, &gid);
  342. update_gid(GID_ADD, ib_dev, port, &gid, &gid_attr);
  343. list_del(&sin6_iter->list);
  344. kfree(sin6_iter);
  345. }
  346. }
  347. static void _add_netdev_ips(struct ib_device *ib_dev, u8 port,
  348. struct net_device *ndev)
  349. {
  350. enum_netdev_ipv4_ips(ib_dev, port, ndev);
  351. if (IS_ENABLED(CONFIG_IPV6))
  352. enum_netdev_ipv6_ips(ib_dev, port, ndev);
  353. }
  354. static void add_netdev_ips(struct ib_device *ib_dev, u8 port,
  355. struct net_device *rdma_ndev, void *cookie)
  356. {
  357. _add_netdev_ips(ib_dev, port, cookie);
  358. }
  359. static void del_netdev_ips(struct ib_device *ib_dev, u8 port,
  360. struct net_device *rdma_ndev, void *cookie)
  361. {
  362. ib_cache_gid_del_all_netdev_gids(ib_dev, port, cookie);
  363. }
  364. /**
  365. * del_default_gids - Delete default GIDs of the event/cookie netdevice
  366. * @ib_dev: RDMA device pointer
  367. * @port: Port of the RDMA device whose GID table to consider
  368. * @rdma_ndev: Unused rdma netdevice
  369. * @cookie: Pointer to event netdevice
  370. *
  371. * del_default_gids() deletes the default GIDs of the event/cookie netdevice.
  372. */
  373. static void del_default_gids(struct ib_device *ib_dev, u8 port,
  374. struct net_device *rdma_ndev, void *cookie)
  375. {
  376. struct net_device *cookie_ndev = cookie;
  377. unsigned long gid_type_mask;
  378. gid_type_mask = roce_gid_type_mask_support(ib_dev, port);
  379. ib_cache_gid_set_default_gid(ib_dev, port, cookie_ndev, gid_type_mask,
  380. IB_CACHE_GID_DEFAULT_MODE_DELETE);
  381. }
  382. static void add_default_gids(struct ib_device *ib_dev, u8 port,
  383. struct net_device *rdma_ndev, void *cookie)
  384. {
  385. struct net_device *event_ndev = cookie;
  386. unsigned long gid_type_mask;
  387. gid_type_mask = roce_gid_type_mask_support(ib_dev, port);
  388. ib_cache_gid_set_default_gid(ib_dev, port, event_ndev, gid_type_mask,
  389. IB_CACHE_GID_DEFAULT_MODE_SET);
  390. }
  391. static void enum_all_gids_of_dev_cb(struct ib_device *ib_dev,
  392. u8 port,
  393. struct net_device *rdma_ndev,
  394. void *cookie)
  395. {
  396. struct net *net;
  397. struct net_device *ndev;
  398. /* Lock the rtnl to make sure the netdevs does not move under
  399. * our feet
  400. */
  401. rtnl_lock();
  402. down_read(&net_rwsem);
  403. for_each_net(net)
  404. for_each_netdev(net, ndev) {
  405. /*
  406. * Filter and add default GIDs of the primary netdevice
  407. * when not in bonding mode, or add default GIDs
  408. * of bond master device, when in bonding mode.
  409. */
  410. if (is_ndev_for_default_gid_filter(ib_dev, port,
  411. rdma_ndev, ndev))
  412. add_default_gids(ib_dev, port, rdma_ndev, ndev);
  413. if (is_eth_port_of_netdev_filter(ib_dev, port,
  414. rdma_ndev, ndev))
  415. _add_netdev_ips(ib_dev, port, ndev);
  416. }
  417. up_read(&net_rwsem);
  418. rtnl_unlock();
  419. }
  420. /**
  421. * rdma_roce_rescan_device - Rescan all of the network devices in the system
  422. * and add their gids, as needed, to the relevant RoCE devices.
  423. *
  424. * @device: the rdma device
  425. */
  426. void rdma_roce_rescan_device(struct ib_device *ib_dev)
  427. {
  428. ib_enum_roce_netdev(ib_dev, pass_all_filter, NULL,
  429. enum_all_gids_of_dev_cb, NULL);
  430. }
  431. EXPORT_SYMBOL(rdma_roce_rescan_device);
  432. static void callback_for_addr_gid_device_scan(struct ib_device *device,
  433. u8 port,
  434. struct net_device *rdma_ndev,
  435. void *cookie)
  436. {
  437. struct update_gid_event_work *parsed = cookie;
  438. return update_gid(parsed->gid_op, device,
  439. port, &parsed->gid,
  440. &parsed->gid_attr);
  441. }
  442. struct upper_list {
  443. struct list_head list;
  444. struct net_device *upper;
  445. };
  446. static int netdev_upper_walk(struct net_device *upper,
  447. struct netdev_nested_priv *priv)
  448. {
  449. struct upper_list *entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
  450. struct list_head *upper_list = (struct list_head *)priv->data;
  451. if (!entry)
  452. return 0;
  453. list_add_tail(&entry->list, upper_list);
  454. dev_hold(upper);
  455. entry->upper = upper;
  456. return 0;
  457. }
  458. static void handle_netdev_upper(struct ib_device *ib_dev, u8 port,
  459. void *cookie,
  460. void (*handle_netdev)(struct ib_device *ib_dev,
  461. u8 port,
  462. struct net_device *ndev))
  463. {
  464. struct net_device *ndev = cookie;
  465. struct netdev_nested_priv priv;
  466. struct upper_list *upper_iter;
  467. struct upper_list *upper_temp;
  468. LIST_HEAD(upper_list);
  469. priv.data = &upper_list;
  470. rcu_read_lock();
  471. netdev_walk_all_upper_dev_rcu(ndev, netdev_upper_walk, &priv);
  472. rcu_read_unlock();
  473. handle_netdev(ib_dev, port, ndev);
  474. list_for_each_entry_safe(upper_iter, upper_temp, &upper_list,
  475. list) {
  476. handle_netdev(ib_dev, port, upper_iter->upper);
  477. dev_put(upper_iter->upper);
  478. list_del(&upper_iter->list);
  479. kfree(upper_iter);
  480. }
  481. }
  482. static void _roce_del_all_netdev_gids(struct ib_device *ib_dev, u8 port,
  483. struct net_device *event_ndev)
  484. {
  485. ib_cache_gid_del_all_netdev_gids(ib_dev, port, event_ndev);
  486. }
  487. static void del_netdev_upper_ips(struct ib_device *ib_dev, u8 port,
  488. struct net_device *rdma_ndev, void *cookie)
  489. {
  490. handle_netdev_upper(ib_dev, port, cookie, _roce_del_all_netdev_gids);
  491. }
  492. static void add_netdev_upper_ips(struct ib_device *ib_dev, u8 port,
  493. struct net_device *rdma_ndev, void *cookie)
  494. {
  495. handle_netdev_upper(ib_dev, port, cookie, _add_netdev_ips);
  496. }
  497. static void del_netdev_default_ips_join(struct ib_device *ib_dev, u8 port,
  498. struct net_device *rdma_ndev,
  499. void *cookie)
  500. {
  501. struct net_device *master_ndev;
  502. rcu_read_lock();
  503. master_ndev = netdev_master_upper_dev_get_rcu(rdma_ndev);
  504. if (master_ndev)
  505. dev_hold(master_ndev);
  506. rcu_read_unlock();
  507. if (master_ndev) {
  508. bond_delete_netdev_default_gids(ib_dev, port, rdma_ndev,
  509. master_ndev);
  510. dev_put(master_ndev);
  511. }
  512. }
  513. /* The following functions operate on all IB devices. netdevice_event and
  514. * addr_event execute ib_enum_all_roce_netdevs through a work.
  515. * ib_enum_all_roce_netdevs iterates through all IB devices.
  516. */
  517. static void netdevice_event_work_handler(struct work_struct *_work)
  518. {
  519. struct netdev_event_work *work =
  520. container_of(_work, struct netdev_event_work, work);
  521. unsigned int i;
  522. for (i = 0; i < ARRAY_SIZE(work->cmds) && work->cmds[i].cb; i++) {
  523. ib_enum_all_roce_netdevs(work->cmds[i].filter,
  524. work->cmds[i].filter_ndev,
  525. work->cmds[i].cb,
  526. work->cmds[i].ndev);
  527. dev_put(work->cmds[i].ndev);
  528. dev_put(work->cmds[i].filter_ndev);
  529. }
  530. kfree(work);
  531. }
  532. static int netdevice_queue_work(struct netdev_event_work_cmd *cmds,
  533. struct net_device *ndev)
  534. {
  535. unsigned int i;
  536. struct netdev_event_work *ndev_work =
  537. kmalloc(sizeof(*ndev_work), GFP_KERNEL);
  538. if (!ndev_work)
  539. return NOTIFY_DONE;
  540. memcpy(ndev_work->cmds, cmds, sizeof(ndev_work->cmds));
  541. for (i = 0; i < ARRAY_SIZE(ndev_work->cmds) && ndev_work->cmds[i].cb; i++) {
  542. if (!ndev_work->cmds[i].ndev)
  543. ndev_work->cmds[i].ndev = ndev;
  544. if (!ndev_work->cmds[i].filter_ndev)
  545. ndev_work->cmds[i].filter_ndev = ndev;
  546. dev_hold(ndev_work->cmds[i].ndev);
  547. dev_hold(ndev_work->cmds[i].filter_ndev);
  548. }
  549. INIT_WORK(&ndev_work->work, netdevice_event_work_handler);
  550. queue_work(gid_cache_wq, &ndev_work->work);
  551. return NOTIFY_DONE;
  552. }
  553. static const struct netdev_event_work_cmd add_cmd = {
  554. .cb = add_netdev_ips,
  555. .filter = is_eth_port_of_netdev_filter
  556. };
  557. static const struct netdev_event_work_cmd add_cmd_upper_ips = {
  558. .cb = add_netdev_upper_ips,
  559. .filter = is_eth_port_of_netdev_filter
  560. };
  561. static void
  562. ndev_event_unlink(struct netdev_notifier_changeupper_info *changeupper_info,
  563. struct netdev_event_work_cmd *cmds)
  564. {
  565. static const struct netdev_event_work_cmd
  566. upper_ips_del_cmd = {
  567. .cb = del_netdev_upper_ips,
  568. .filter = upper_device_filter
  569. };
  570. cmds[0] = upper_ips_del_cmd;
  571. cmds[0].ndev = changeupper_info->upper_dev;
  572. cmds[1] = add_cmd;
  573. }
  574. static const struct netdev_event_work_cmd bonding_default_add_cmd = {
  575. .cb = add_default_gids,
  576. .filter = is_upper_ndev_bond_master_filter
  577. };
  578. static void
  579. ndev_event_link(struct net_device *event_ndev,
  580. struct netdev_notifier_changeupper_info *changeupper_info,
  581. struct netdev_event_work_cmd *cmds)
  582. {
  583. static const struct netdev_event_work_cmd
  584. bonding_default_del_cmd = {
  585. .cb = del_default_gids,
  586. .filter = is_upper_ndev_bond_master_filter
  587. };
  588. /*
  589. * When a lower netdev is linked to its upper bonding
  590. * netdev, delete lower slave netdev's default GIDs.
  591. */
  592. cmds[0] = bonding_default_del_cmd;
  593. cmds[0].ndev = event_ndev;
  594. cmds[0].filter_ndev = changeupper_info->upper_dev;
  595. /* Now add bonding upper device default GIDs */
  596. cmds[1] = bonding_default_add_cmd;
  597. cmds[1].ndev = changeupper_info->upper_dev;
  598. cmds[1].filter_ndev = changeupper_info->upper_dev;
  599. /* Now add bonding upper device IP based GIDs */
  600. cmds[2] = add_cmd_upper_ips;
  601. cmds[2].ndev = changeupper_info->upper_dev;
  602. cmds[2].filter_ndev = changeupper_info->upper_dev;
  603. }
  604. static void netdevice_event_changeupper(struct net_device *event_ndev,
  605. struct netdev_notifier_changeupper_info *changeupper_info,
  606. struct netdev_event_work_cmd *cmds)
  607. {
  608. if (changeupper_info->linking)
  609. ndev_event_link(event_ndev, changeupper_info, cmds);
  610. else
  611. ndev_event_unlink(changeupper_info, cmds);
  612. }
  613. static const struct netdev_event_work_cmd add_default_gid_cmd = {
  614. .cb = add_default_gids,
  615. .filter = is_ndev_for_default_gid_filter,
  616. };
  617. static int netdevice_event(struct notifier_block *this, unsigned long event,
  618. void *ptr)
  619. {
  620. static const struct netdev_event_work_cmd del_cmd = {
  621. .cb = del_netdev_ips, .filter = pass_all_filter};
  622. static const struct netdev_event_work_cmd
  623. bonding_default_del_cmd_join = {
  624. .cb = del_netdev_default_ips_join,
  625. .filter = is_eth_port_inactive_slave_filter
  626. };
  627. static const struct netdev_event_work_cmd
  628. netdev_del_cmd = {
  629. .cb = del_netdev_ips,
  630. .filter = is_eth_port_of_netdev_filter
  631. };
  632. static const struct netdev_event_work_cmd bonding_event_ips_del_cmd = {
  633. .cb = del_netdev_upper_ips, .filter = upper_device_filter};
  634. struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
  635. struct netdev_event_work_cmd cmds[ROCE_NETDEV_CALLBACK_SZ] = { {NULL} };
  636. if (ndev->type != ARPHRD_ETHER)
  637. return NOTIFY_DONE;
  638. switch (event) {
  639. case NETDEV_REGISTER:
  640. case NETDEV_UP:
  641. cmds[0] = bonding_default_del_cmd_join;
  642. cmds[1] = add_default_gid_cmd;
  643. cmds[2] = add_cmd;
  644. break;
  645. case NETDEV_UNREGISTER:
  646. if (ndev->reg_state < NETREG_UNREGISTERED)
  647. cmds[0] = del_cmd;
  648. else
  649. return NOTIFY_DONE;
  650. break;
  651. case NETDEV_CHANGEADDR:
  652. cmds[0] = netdev_del_cmd;
  653. if (ndev->reg_state == NETREG_REGISTERED) {
  654. cmds[1] = add_default_gid_cmd;
  655. cmds[2] = add_cmd;
  656. }
  657. break;
  658. case NETDEV_CHANGEUPPER:
  659. netdevice_event_changeupper(ndev,
  660. container_of(ptr, struct netdev_notifier_changeupper_info, info),
  661. cmds);
  662. break;
  663. case NETDEV_BONDING_FAILOVER:
  664. cmds[0] = bonding_event_ips_del_cmd;
  665. /* Add default GIDs of the bond device */
  666. cmds[1] = bonding_default_add_cmd;
  667. /* Add IP based GIDs of the bond device */
  668. cmds[2] = add_cmd_upper_ips;
  669. break;
  670. default:
  671. return NOTIFY_DONE;
  672. }
  673. return netdevice_queue_work(cmds, ndev);
  674. }
  675. static void update_gid_event_work_handler(struct work_struct *_work)
  676. {
  677. struct update_gid_event_work *work =
  678. container_of(_work, struct update_gid_event_work, work);
  679. ib_enum_all_roce_netdevs(is_eth_port_of_netdev_filter,
  680. work->gid_attr.ndev,
  681. callback_for_addr_gid_device_scan, work);
  682. dev_put(work->gid_attr.ndev);
  683. kfree(work);
  684. }
  685. static int addr_event(struct notifier_block *this, unsigned long event,
  686. struct sockaddr *sa, struct net_device *ndev)
  687. {
  688. struct update_gid_event_work *work;
  689. enum gid_op_type gid_op;
  690. if (ndev->type != ARPHRD_ETHER)
  691. return NOTIFY_DONE;
  692. switch (event) {
  693. case NETDEV_UP:
  694. gid_op = GID_ADD;
  695. break;
  696. case NETDEV_DOWN:
  697. gid_op = GID_DEL;
  698. break;
  699. default:
  700. return NOTIFY_DONE;
  701. }
  702. work = kmalloc(sizeof(*work), GFP_ATOMIC);
  703. if (!work)
  704. return NOTIFY_DONE;
  705. INIT_WORK(&work->work, update_gid_event_work_handler);
  706. rdma_ip2gid(sa, &work->gid);
  707. work->gid_op = gid_op;
  708. memset(&work->gid_attr, 0, sizeof(work->gid_attr));
  709. dev_hold(ndev);
  710. work->gid_attr.ndev = ndev;
  711. queue_work(gid_cache_wq, &work->work);
  712. return NOTIFY_DONE;
  713. }
  714. static int inetaddr_event(struct notifier_block *this, unsigned long event,
  715. void *ptr)
  716. {
  717. struct sockaddr_in in;
  718. struct net_device *ndev;
  719. struct in_ifaddr *ifa = ptr;
  720. in.sin_family = AF_INET;
  721. in.sin_addr.s_addr = ifa->ifa_address;
  722. ndev = ifa->ifa_dev->dev;
  723. return addr_event(this, event, (struct sockaddr *)&in, ndev);
  724. }
  725. static int inet6addr_event(struct notifier_block *this, unsigned long event,
  726. void *ptr)
  727. {
  728. struct sockaddr_in6 in6;
  729. struct net_device *ndev;
  730. struct inet6_ifaddr *ifa6 = ptr;
  731. in6.sin6_family = AF_INET6;
  732. in6.sin6_addr = ifa6->addr;
  733. ndev = ifa6->idev->dev;
  734. return addr_event(this, event, (struct sockaddr *)&in6, ndev);
  735. }
  736. static struct notifier_block nb_netdevice = {
  737. .notifier_call = netdevice_event
  738. };
  739. static struct notifier_block nb_inetaddr = {
  740. .notifier_call = inetaddr_event
  741. };
  742. static struct notifier_block nb_inet6addr = {
  743. .notifier_call = inet6addr_event
  744. };
  745. int __init roce_gid_mgmt_init(void)
  746. {
  747. gid_cache_wq = alloc_ordered_workqueue("gid-cache-wq", 0);
  748. if (!gid_cache_wq)
  749. return -ENOMEM;
  750. register_inetaddr_notifier(&nb_inetaddr);
  751. if (IS_ENABLED(CONFIG_IPV6))
  752. register_inet6addr_notifier(&nb_inet6addr);
  753. /* We relay on the netdevice notifier to enumerate all
  754. * existing devices in the system. Register to this notifier
  755. * last to make sure we will not miss any IP add/del
  756. * callbacks.
  757. */
  758. register_netdevice_notifier(&nb_netdevice);
  759. return 0;
  760. }
  761. void __exit roce_gid_mgmt_cleanup(void)
  762. {
  763. if (IS_ENABLED(CONFIG_IPV6))
  764. unregister_inet6addr_notifier(&nb_inet6addr);
  765. unregister_inetaddr_notifier(&nb_inetaddr);
  766. unregister_netdevice_notifier(&nb_netdevice);
  767. /* Ensure all gid deletion tasks complete before we go down,
  768. * to avoid any reference to free'd memory. By the time
  769. * ib-core is removed, all physical devices have been removed,
  770. * so no issue with remaining hardware contexts.
  771. */
  772. destroy_workqueue(gid_cache_wq);
  773. }