udp_tunnel_nic.c 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. // Copyright (c) 2020 Facebook Inc.
  3. #include <linux/ethtool_netlink.h>
  4. #include <linux/netdevice.h>
  5. #include <linux/slab.h>
  6. #include <linux/types.h>
  7. #include <linux/workqueue.h>
  8. #include <net/udp_tunnel.h>
  9. #include <net/vxlan.h>
  10. enum udp_tunnel_nic_table_entry_flags {
  11. UDP_TUNNEL_NIC_ENTRY_ADD = BIT(0),
  12. UDP_TUNNEL_NIC_ENTRY_DEL = BIT(1),
  13. UDP_TUNNEL_NIC_ENTRY_OP_FAIL = BIT(2),
  14. UDP_TUNNEL_NIC_ENTRY_FROZEN = BIT(3),
  15. };
  16. struct udp_tunnel_nic_table_entry {
  17. __be16 port;
  18. u8 type;
  19. u8 flags;
  20. u16 use_cnt;
  21. #define UDP_TUNNEL_NIC_USE_CNT_MAX U16_MAX
  22. u8 hw_priv;
  23. };
  24. /**
  25. * struct udp_tunnel_nic - UDP tunnel port offload state
  26. * @work: async work for talking to hardware from process context
  27. * @dev: netdev pointer
  28. * @need_sync: at least one port start changed
  29. * @need_replay: space was freed, we need a replay of all ports
  30. * @work_pending: @work is currently scheduled
  31. * @n_tables: number of tables under @entries
  32. * @missed: bitmap of tables which overflown
  33. * @entries: table of tables of ports currently offloaded
  34. */
  35. struct udp_tunnel_nic {
  36. struct work_struct work;
  37. struct net_device *dev;
  38. u8 need_sync:1;
  39. u8 need_replay:1;
  40. u8 work_pending:1;
  41. unsigned int n_tables;
  42. unsigned long missed;
  43. struct udp_tunnel_nic_table_entry **entries;
  44. };
  45. /* We ensure all work structs are done using driver state, but not the code.
  46. * We need a workqueue we can flush before module gets removed.
  47. */
  48. static struct workqueue_struct *udp_tunnel_nic_workqueue;
  49. static const char *udp_tunnel_nic_tunnel_type_name(unsigned int type)
  50. {
  51. switch (type) {
  52. case UDP_TUNNEL_TYPE_VXLAN:
  53. return "vxlan";
  54. case UDP_TUNNEL_TYPE_GENEVE:
  55. return "geneve";
  56. case UDP_TUNNEL_TYPE_VXLAN_GPE:
  57. return "vxlan-gpe";
  58. default:
  59. return "unknown";
  60. }
  61. }
  62. static bool
  63. udp_tunnel_nic_entry_is_free(struct udp_tunnel_nic_table_entry *entry)
  64. {
  65. return entry->use_cnt == 0 && !entry->flags;
  66. }
  67. static bool
  68. udp_tunnel_nic_entry_is_present(struct udp_tunnel_nic_table_entry *entry)
  69. {
  70. return entry->use_cnt && !(entry->flags & ~UDP_TUNNEL_NIC_ENTRY_FROZEN);
  71. }
  72. static bool
  73. udp_tunnel_nic_entry_is_frozen(struct udp_tunnel_nic_table_entry *entry)
  74. {
  75. return entry->flags & UDP_TUNNEL_NIC_ENTRY_FROZEN;
  76. }
  77. static void
  78. udp_tunnel_nic_entry_freeze_used(struct udp_tunnel_nic_table_entry *entry)
  79. {
  80. if (!udp_tunnel_nic_entry_is_free(entry))
  81. entry->flags |= UDP_TUNNEL_NIC_ENTRY_FROZEN;
  82. }
  83. static void
  84. udp_tunnel_nic_entry_unfreeze(struct udp_tunnel_nic_table_entry *entry)
  85. {
  86. entry->flags &= ~UDP_TUNNEL_NIC_ENTRY_FROZEN;
  87. }
  88. static bool
  89. udp_tunnel_nic_entry_is_queued(struct udp_tunnel_nic_table_entry *entry)
  90. {
  91. return entry->flags & (UDP_TUNNEL_NIC_ENTRY_ADD |
  92. UDP_TUNNEL_NIC_ENTRY_DEL);
  93. }
  94. static void
  95. udp_tunnel_nic_entry_queue(struct udp_tunnel_nic *utn,
  96. struct udp_tunnel_nic_table_entry *entry,
  97. unsigned int flag)
  98. {
  99. entry->flags |= flag;
  100. utn->need_sync = 1;
  101. }
  102. static void
  103. udp_tunnel_nic_ti_from_entry(struct udp_tunnel_nic_table_entry *entry,
  104. struct udp_tunnel_info *ti)
  105. {
  106. memset(ti, 0, sizeof(*ti));
  107. ti->port = entry->port;
  108. ti->type = entry->type;
  109. ti->hw_priv = entry->hw_priv;
  110. }
  111. static bool
  112. udp_tunnel_nic_is_empty(struct net_device *dev, struct udp_tunnel_nic *utn)
  113. {
  114. const struct udp_tunnel_nic_info *info = dev->udp_tunnel_nic_info;
  115. unsigned int i, j;
  116. for (i = 0; i < utn->n_tables; i++)
  117. for (j = 0; j < info->tables[i].n_entries; j++)
  118. if (!udp_tunnel_nic_entry_is_free(&utn->entries[i][j]))
  119. return false;
  120. return true;
  121. }
  122. static bool
  123. udp_tunnel_nic_should_replay(struct net_device *dev, struct udp_tunnel_nic *utn)
  124. {
  125. const struct udp_tunnel_nic_table_info *table;
  126. unsigned int i, j;
  127. if (!utn->missed)
  128. return false;
  129. for (i = 0; i < utn->n_tables; i++) {
  130. table = &dev->udp_tunnel_nic_info->tables[i];
  131. if (!test_bit(i, &utn->missed))
  132. continue;
  133. for (j = 0; j < table->n_entries; j++)
  134. if (udp_tunnel_nic_entry_is_free(&utn->entries[i][j]))
  135. return true;
  136. }
  137. return false;
  138. }
  139. static void
  140. __udp_tunnel_nic_get_port(struct net_device *dev, unsigned int table,
  141. unsigned int idx, struct udp_tunnel_info *ti)
  142. {
  143. struct udp_tunnel_nic_table_entry *entry;
  144. struct udp_tunnel_nic *utn;
  145. utn = dev->udp_tunnel_nic;
  146. entry = &utn->entries[table][idx];
  147. if (entry->use_cnt)
  148. udp_tunnel_nic_ti_from_entry(entry, ti);
  149. }
  150. static void
  151. __udp_tunnel_nic_set_port_priv(struct net_device *dev, unsigned int table,
  152. unsigned int idx, u8 priv)
  153. {
  154. dev->udp_tunnel_nic->entries[table][idx].hw_priv = priv;
  155. }
  156. static void
  157. udp_tunnel_nic_entry_update_done(struct udp_tunnel_nic_table_entry *entry,
  158. int err)
  159. {
  160. bool dodgy = entry->flags & UDP_TUNNEL_NIC_ENTRY_OP_FAIL;
  161. WARN_ON_ONCE(entry->flags & UDP_TUNNEL_NIC_ENTRY_ADD &&
  162. entry->flags & UDP_TUNNEL_NIC_ENTRY_DEL);
  163. if (entry->flags & UDP_TUNNEL_NIC_ENTRY_ADD &&
  164. (!err || (err == -EEXIST && dodgy)))
  165. entry->flags &= ~UDP_TUNNEL_NIC_ENTRY_ADD;
  166. if (entry->flags & UDP_TUNNEL_NIC_ENTRY_DEL &&
  167. (!err || (err == -ENOENT && dodgy)))
  168. entry->flags &= ~UDP_TUNNEL_NIC_ENTRY_DEL;
  169. if (!err)
  170. entry->flags &= ~UDP_TUNNEL_NIC_ENTRY_OP_FAIL;
  171. else
  172. entry->flags |= UDP_TUNNEL_NIC_ENTRY_OP_FAIL;
  173. }
  174. static void
  175. udp_tunnel_nic_device_sync_one(struct net_device *dev,
  176. struct udp_tunnel_nic *utn,
  177. unsigned int table, unsigned int idx)
  178. {
  179. struct udp_tunnel_nic_table_entry *entry;
  180. struct udp_tunnel_info ti;
  181. int err;
  182. entry = &utn->entries[table][idx];
  183. if (!udp_tunnel_nic_entry_is_queued(entry))
  184. return;
  185. udp_tunnel_nic_ti_from_entry(entry, &ti);
  186. if (entry->flags & UDP_TUNNEL_NIC_ENTRY_ADD)
  187. err = dev->udp_tunnel_nic_info->set_port(dev, table, idx, &ti);
  188. else
  189. err = dev->udp_tunnel_nic_info->unset_port(dev, table, idx,
  190. &ti);
  191. udp_tunnel_nic_entry_update_done(entry, err);
  192. if (err)
  193. netdev_warn(dev,
  194. "UDP tunnel port sync failed port %d type %s: %d\n",
  195. be16_to_cpu(entry->port),
  196. udp_tunnel_nic_tunnel_type_name(entry->type),
  197. err);
  198. }
  199. static void
  200. udp_tunnel_nic_device_sync_by_port(struct net_device *dev,
  201. struct udp_tunnel_nic *utn)
  202. {
  203. const struct udp_tunnel_nic_info *info = dev->udp_tunnel_nic_info;
  204. unsigned int i, j;
  205. for (i = 0; i < utn->n_tables; i++)
  206. for (j = 0; j < info->tables[i].n_entries; j++)
  207. udp_tunnel_nic_device_sync_one(dev, utn, i, j);
  208. }
  209. static void
  210. udp_tunnel_nic_device_sync_by_table(struct net_device *dev,
  211. struct udp_tunnel_nic *utn)
  212. {
  213. const struct udp_tunnel_nic_info *info = dev->udp_tunnel_nic_info;
  214. unsigned int i, j;
  215. int err;
  216. for (i = 0; i < utn->n_tables; i++) {
  217. /* Find something that needs sync in this table */
  218. for (j = 0; j < info->tables[i].n_entries; j++)
  219. if (udp_tunnel_nic_entry_is_queued(&utn->entries[i][j]))
  220. break;
  221. if (j == info->tables[i].n_entries)
  222. continue;
  223. err = info->sync_table(dev, i);
  224. if (err)
  225. netdev_warn(dev, "UDP tunnel port sync failed for table %d: %d\n",
  226. i, err);
  227. for (j = 0; j < info->tables[i].n_entries; j++) {
  228. struct udp_tunnel_nic_table_entry *entry;
  229. entry = &utn->entries[i][j];
  230. if (udp_tunnel_nic_entry_is_queued(entry))
  231. udp_tunnel_nic_entry_update_done(entry, err);
  232. }
  233. }
  234. }
  235. static void
  236. __udp_tunnel_nic_device_sync(struct net_device *dev, struct udp_tunnel_nic *utn)
  237. {
  238. if (!utn->need_sync)
  239. return;
  240. if (dev->udp_tunnel_nic_info->sync_table)
  241. udp_tunnel_nic_device_sync_by_table(dev, utn);
  242. else
  243. udp_tunnel_nic_device_sync_by_port(dev, utn);
  244. utn->need_sync = 0;
  245. /* Can't replay directly here, in case we come from the tunnel driver's
  246. * notification - trying to replay may deadlock inside tunnel driver.
  247. */
  248. utn->need_replay = udp_tunnel_nic_should_replay(dev, utn);
  249. }
  250. static void
  251. udp_tunnel_nic_device_sync(struct net_device *dev, struct udp_tunnel_nic *utn)
  252. {
  253. const struct udp_tunnel_nic_info *info = dev->udp_tunnel_nic_info;
  254. bool may_sleep;
  255. if (!utn->need_sync)
  256. return;
  257. /* Drivers which sleep in the callback need to update from
  258. * the workqueue, if we come from the tunnel driver's notification.
  259. */
  260. may_sleep = info->flags & UDP_TUNNEL_NIC_INFO_MAY_SLEEP;
  261. if (!may_sleep)
  262. __udp_tunnel_nic_device_sync(dev, utn);
  263. if (may_sleep || utn->need_replay) {
  264. queue_work(udp_tunnel_nic_workqueue, &utn->work);
  265. utn->work_pending = 1;
  266. }
  267. }
  268. static bool
  269. udp_tunnel_nic_table_is_capable(const struct udp_tunnel_nic_table_info *table,
  270. struct udp_tunnel_info *ti)
  271. {
  272. return table->tunnel_types & ti->type;
  273. }
  274. static bool
  275. udp_tunnel_nic_is_capable(struct net_device *dev, struct udp_tunnel_nic *utn,
  276. struct udp_tunnel_info *ti)
  277. {
  278. const struct udp_tunnel_nic_info *info = dev->udp_tunnel_nic_info;
  279. unsigned int i;
  280. /* Special case IPv4-only NICs */
  281. if (info->flags & UDP_TUNNEL_NIC_INFO_IPV4_ONLY &&
  282. ti->sa_family != AF_INET)
  283. return false;
  284. for (i = 0; i < utn->n_tables; i++)
  285. if (udp_tunnel_nic_table_is_capable(&info->tables[i], ti))
  286. return true;
  287. return false;
  288. }
  289. static int
  290. udp_tunnel_nic_has_collision(struct net_device *dev, struct udp_tunnel_nic *utn,
  291. struct udp_tunnel_info *ti)
  292. {
  293. const struct udp_tunnel_nic_info *info = dev->udp_tunnel_nic_info;
  294. struct udp_tunnel_nic_table_entry *entry;
  295. unsigned int i, j;
  296. for (i = 0; i < utn->n_tables; i++)
  297. for (j = 0; j < info->tables[i].n_entries; j++) {
  298. entry = &utn->entries[i][j];
  299. if (!udp_tunnel_nic_entry_is_free(entry) &&
  300. entry->port == ti->port &&
  301. entry->type != ti->type) {
  302. __set_bit(i, &utn->missed);
  303. return true;
  304. }
  305. }
  306. return false;
  307. }
  308. static void
  309. udp_tunnel_nic_entry_adj(struct udp_tunnel_nic *utn,
  310. unsigned int table, unsigned int idx, int use_cnt_adj)
  311. {
  312. struct udp_tunnel_nic_table_entry *entry = &utn->entries[table][idx];
  313. bool dodgy = entry->flags & UDP_TUNNEL_NIC_ENTRY_OP_FAIL;
  314. unsigned int from, to;
  315. WARN_ON(entry->use_cnt + (u32)use_cnt_adj > U16_MAX);
  316. /* If not going from used to unused or vice versa - all done.
  317. * For dodgy entries make sure we try to sync again (queue the entry).
  318. */
  319. entry->use_cnt += use_cnt_adj;
  320. if (!dodgy && !entry->use_cnt == !(entry->use_cnt - use_cnt_adj))
  321. return;
  322. /* Cancel the op before it was sent to the device, if possible,
  323. * otherwise we'd need to take special care to issue commands
  324. * in the same order the ports arrived.
  325. */
  326. if (use_cnt_adj < 0) {
  327. from = UDP_TUNNEL_NIC_ENTRY_ADD;
  328. to = UDP_TUNNEL_NIC_ENTRY_DEL;
  329. } else {
  330. from = UDP_TUNNEL_NIC_ENTRY_DEL;
  331. to = UDP_TUNNEL_NIC_ENTRY_ADD;
  332. }
  333. if (entry->flags & from) {
  334. entry->flags &= ~from;
  335. if (!dodgy)
  336. return;
  337. }
  338. udp_tunnel_nic_entry_queue(utn, entry, to);
  339. }
  340. static bool
  341. udp_tunnel_nic_entry_try_adj(struct udp_tunnel_nic *utn,
  342. unsigned int table, unsigned int idx,
  343. struct udp_tunnel_info *ti, int use_cnt_adj)
  344. {
  345. struct udp_tunnel_nic_table_entry *entry = &utn->entries[table][idx];
  346. if (udp_tunnel_nic_entry_is_free(entry) ||
  347. entry->port != ti->port ||
  348. entry->type != ti->type)
  349. return false;
  350. if (udp_tunnel_nic_entry_is_frozen(entry))
  351. return true;
  352. udp_tunnel_nic_entry_adj(utn, table, idx, use_cnt_adj);
  353. return true;
  354. }
  355. /* Try to find existing matching entry and adjust its use count, instead of
  356. * adding a new one. Returns true if entry was found. In case of delete the
  357. * entry may have gotten removed in the process, in which case it will be
  358. * queued for removal.
  359. */
  360. static bool
  361. udp_tunnel_nic_try_existing(struct net_device *dev, struct udp_tunnel_nic *utn,
  362. struct udp_tunnel_info *ti, int use_cnt_adj)
  363. {
  364. const struct udp_tunnel_nic_table_info *table;
  365. unsigned int i, j;
  366. for (i = 0; i < utn->n_tables; i++) {
  367. table = &dev->udp_tunnel_nic_info->tables[i];
  368. if (!udp_tunnel_nic_table_is_capable(table, ti))
  369. continue;
  370. for (j = 0; j < table->n_entries; j++)
  371. if (udp_tunnel_nic_entry_try_adj(utn, i, j, ti,
  372. use_cnt_adj))
  373. return true;
  374. }
  375. return false;
  376. }
  377. static bool
  378. udp_tunnel_nic_add_existing(struct net_device *dev, struct udp_tunnel_nic *utn,
  379. struct udp_tunnel_info *ti)
  380. {
  381. return udp_tunnel_nic_try_existing(dev, utn, ti, +1);
  382. }
  383. static bool
  384. udp_tunnel_nic_del_existing(struct net_device *dev, struct udp_tunnel_nic *utn,
  385. struct udp_tunnel_info *ti)
  386. {
  387. return udp_tunnel_nic_try_existing(dev, utn, ti, -1);
  388. }
  389. static bool
  390. udp_tunnel_nic_add_new(struct net_device *dev, struct udp_tunnel_nic *utn,
  391. struct udp_tunnel_info *ti)
  392. {
  393. const struct udp_tunnel_nic_table_info *table;
  394. unsigned int i, j;
  395. for (i = 0; i < utn->n_tables; i++) {
  396. table = &dev->udp_tunnel_nic_info->tables[i];
  397. if (!udp_tunnel_nic_table_is_capable(table, ti))
  398. continue;
  399. for (j = 0; j < table->n_entries; j++) {
  400. struct udp_tunnel_nic_table_entry *entry;
  401. entry = &utn->entries[i][j];
  402. if (!udp_tunnel_nic_entry_is_free(entry))
  403. continue;
  404. entry->port = ti->port;
  405. entry->type = ti->type;
  406. entry->use_cnt = 1;
  407. udp_tunnel_nic_entry_queue(utn, entry,
  408. UDP_TUNNEL_NIC_ENTRY_ADD);
  409. return true;
  410. }
  411. /* The different table may still fit this port in, but there
  412. * are no devices currently which have multiple tables accepting
  413. * the same tunnel type, and false positives are okay.
  414. */
  415. __set_bit(i, &utn->missed);
  416. }
  417. return false;
  418. }
  419. static void
  420. __udp_tunnel_nic_add_port(struct net_device *dev, struct udp_tunnel_info *ti)
  421. {
  422. const struct udp_tunnel_nic_info *info = dev->udp_tunnel_nic_info;
  423. struct udp_tunnel_nic *utn;
  424. utn = dev->udp_tunnel_nic;
  425. if (!utn)
  426. return;
  427. if (!netif_running(dev) && info->flags & UDP_TUNNEL_NIC_INFO_OPEN_ONLY)
  428. return;
  429. if (info->flags & UDP_TUNNEL_NIC_INFO_STATIC_IANA_VXLAN &&
  430. ti->port == htons(IANA_VXLAN_UDP_PORT)) {
  431. if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
  432. netdev_warn(dev, "device assumes port 4789 will be used by vxlan tunnels\n");
  433. return;
  434. }
  435. if (!udp_tunnel_nic_is_capable(dev, utn, ti))
  436. return;
  437. /* It may happen that a tunnel of one type is removed and different
  438. * tunnel type tries to reuse its port before the device was informed.
  439. * Rely on utn->missed to re-add this port later.
  440. */
  441. if (udp_tunnel_nic_has_collision(dev, utn, ti))
  442. return;
  443. if (!udp_tunnel_nic_add_existing(dev, utn, ti))
  444. udp_tunnel_nic_add_new(dev, utn, ti);
  445. udp_tunnel_nic_device_sync(dev, utn);
  446. }
  447. static void
  448. __udp_tunnel_nic_del_port(struct net_device *dev, struct udp_tunnel_info *ti)
  449. {
  450. struct udp_tunnel_nic *utn;
  451. utn = dev->udp_tunnel_nic;
  452. if (!utn)
  453. return;
  454. if (!udp_tunnel_nic_is_capable(dev, utn, ti))
  455. return;
  456. udp_tunnel_nic_del_existing(dev, utn, ti);
  457. udp_tunnel_nic_device_sync(dev, utn);
  458. }
  459. static void __udp_tunnel_nic_reset_ntf(struct net_device *dev)
  460. {
  461. const struct udp_tunnel_nic_info *info = dev->udp_tunnel_nic_info;
  462. struct udp_tunnel_nic *utn;
  463. unsigned int i, j;
  464. ASSERT_RTNL();
  465. utn = dev->udp_tunnel_nic;
  466. if (!utn)
  467. return;
  468. utn->need_sync = false;
  469. for (i = 0; i < utn->n_tables; i++)
  470. for (j = 0; j < info->tables[i].n_entries; j++) {
  471. struct udp_tunnel_nic_table_entry *entry;
  472. entry = &utn->entries[i][j];
  473. entry->flags &= ~(UDP_TUNNEL_NIC_ENTRY_DEL |
  474. UDP_TUNNEL_NIC_ENTRY_OP_FAIL);
  475. /* We don't release rtnl across ops */
  476. WARN_ON(entry->flags & UDP_TUNNEL_NIC_ENTRY_FROZEN);
  477. if (!entry->use_cnt)
  478. continue;
  479. udp_tunnel_nic_entry_queue(utn, entry,
  480. UDP_TUNNEL_NIC_ENTRY_ADD);
  481. }
  482. __udp_tunnel_nic_device_sync(dev, utn);
  483. }
  484. static size_t
  485. __udp_tunnel_nic_dump_size(struct net_device *dev, unsigned int table)
  486. {
  487. const struct udp_tunnel_nic_info *info = dev->udp_tunnel_nic_info;
  488. struct udp_tunnel_nic *utn;
  489. unsigned int j;
  490. size_t size;
  491. utn = dev->udp_tunnel_nic;
  492. if (!utn)
  493. return 0;
  494. size = 0;
  495. for (j = 0; j < info->tables[table].n_entries; j++) {
  496. if (!udp_tunnel_nic_entry_is_present(&utn->entries[table][j]))
  497. continue;
  498. size += nla_total_size(0) + /* _TABLE_ENTRY */
  499. nla_total_size(sizeof(__be16)) + /* _ENTRY_PORT */
  500. nla_total_size(sizeof(u32)); /* _ENTRY_TYPE */
  501. }
  502. return size;
  503. }
  504. static int
  505. __udp_tunnel_nic_dump_write(struct net_device *dev, unsigned int table,
  506. struct sk_buff *skb)
  507. {
  508. const struct udp_tunnel_nic_info *info = dev->udp_tunnel_nic_info;
  509. struct udp_tunnel_nic *utn;
  510. struct nlattr *nest;
  511. unsigned int j;
  512. utn = dev->udp_tunnel_nic;
  513. if (!utn)
  514. return 0;
  515. for (j = 0; j < info->tables[table].n_entries; j++) {
  516. if (!udp_tunnel_nic_entry_is_present(&utn->entries[table][j]))
  517. continue;
  518. nest = nla_nest_start(skb, ETHTOOL_A_TUNNEL_UDP_TABLE_ENTRY);
  519. if (nla_put_be16(skb, ETHTOOL_A_TUNNEL_UDP_ENTRY_PORT,
  520. utn->entries[table][j].port) ||
  521. nla_put_u32(skb, ETHTOOL_A_TUNNEL_UDP_ENTRY_TYPE,
  522. ilog2(utn->entries[table][j].type)))
  523. goto err_cancel;
  524. nla_nest_end(skb, nest);
  525. }
  526. return 0;
  527. err_cancel:
  528. nla_nest_cancel(skb, nest);
  529. return -EMSGSIZE;
  530. }
  531. static const struct udp_tunnel_nic_ops __udp_tunnel_nic_ops = {
  532. .get_port = __udp_tunnel_nic_get_port,
  533. .set_port_priv = __udp_tunnel_nic_set_port_priv,
  534. .add_port = __udp_tunnel_nic_add_port,
  535. .del_port = __udp_tunnel_nic_del_port,
  536. .reset_ntf = __udp_tunnel_nic_reset_ntf,
  537. .dump_size = __udp_tunnel_nic_dump_size,
  538. .dump_write = __udp_tunnel_nic_dump_write,
  539. };
  540. static void
  541. udp_tunnel_nic_flush(struct net_device *dev, struct udp_tunnel_nic *utn)
  542. {
  543. const struct udp_tunnel_nic_info *info = dev->udp_tunnel_nic_info;
  544. unsigned int i, j;
  545. for (i = 0; i < utn->n_tables; i++)
  546. for (j = 0; j < info->tables[i].n_entries; j++) {
  547. int adj_cnt = -utn->entries[i][j].use_cnt;
  548. if (adj_cnt)
  549. udp_tunnel_nic_entry_adj(utn, i, j, adj_cnt);
  550. }
  551. __udp_tunnel_nic_device_sync(dev, utn);
  552. for (i = 0; i < utn->n_tables; i++)
  553. memset(utn->entries[i], 0, array_size(info->tables[i].n_entries,
  554. sizeof(**utn->entries)));
  555. WARN_ON(utn->need_sync);
  556. utn->need_replay = 0;
  557. }
  558. static void
  559. udp_tunnel_nic_replay(struct net_device *dev, struct udp_tunnel_nic *utn)
  560. {
  561. const struct udp_tunnel_nic_info *info = dev->udp_tunnel_nic_info;
  562. struct udp_tunnel_nic_shared_node *node;
  563. unsigned int i, j;
  564. /* Freeze all the ports we are already tracking so that the replay
  565. * does not double up the refcount.
  566. */
  567. for (i = 0; i < utn->n_tables; i++)
  568. for (j = 0; j < info->tables[i].n_entries; j++)
  569. udp_tunnel_nic_entry_freeze_used(&utn->entries[i][j]);
  570. utn->missed = 0;
  571. utn->need_replay = 0;
  572. if (!info->shared) {
  573. udp_tunnel_get_rx_info(dev);
  574. } else {
  575. list_for_each_entry(node, &info->shared->devices, list)
  576. udp_tunnel_get_rx_info(node->dev);
  577. }
  578. for (i = 0; i < utn->n_tables; i++)
  579. for (j = 0; j < info->tables[i].n_entries; j++)
  580. udp_tunnel_nic_entry_unfreeze(&utn->entries[i][j]);
  581. }
  582. static void udp_tunnel_nic_device_sync_work(struct work_struct *work)
  583. {
  584. struct udp_tunnel_nic *utn =
  585. container_of(work, struct udp_tunnel_nic, work);
  586. rtnl_lock();
  587. utn->work_pending = 0;
  588. __udp_tunnel_nic_device_sync(utn->dev, utn);
  589. if (utn->need_replay)
  590. udp_tunnel_nic_replay(utn->dev, utn);
  591. rtnl_unlock();
  592. }
  593. static struct udp_tunnel_nic *
  594. udp_tunnel_nic_alloc(const struct udp_tunnel_nic_info *info,
  595. unsigned int n_tables)
  596. {
  597. struct udp_tunnel_nic *utn;
  598. unsigned int i;
  599. utn = kzalloc(sizeof(*utn), GFP_KERNEL);
  600. if (!utn)
  601. return NULL;
  602. utn->n_tables = n_tables;
  603. INIT_WORK(&utn->work, udp_tunnel_nic_device_sync_work);
  604. utn->entries = kmalloc_array(n_tables, sizeof(void *), GFP_KERNEL);
  605. if (!utn->entries)
  606. goto err_free_utn;
  607. for (i = 0; i < n_tables; i++) {
  608. utn->entries[i] = kcalloc(info->tables[i].n_entries,
  609. sizeof(*utn->entries[i]), GFP_KERNEL);
  610. if (!utn->entries[i])
  611. goto err_free_prev_entries;
  612. }
  613. return utn;
  614. err_free_prev_entries:
  615. while (i--)
  616. kfree(utn->entries[i]);
  617. kfree(utn->entries);
  618. err_free_utn:
  619. kfree(utn);
  620. return NULL;
  621. }
  622. static void udp_tunnel_nic_free(struct udp_tunnel_nic *utn)
  623. {
  624. unsigned int i;
  625. for (i = 0; i < utn->n_tables; i++)
  626. kfree(utn->entries[i]);
  627. kfree(utn->entries);
  628. kfree(utn);
  629. }
  630. static int udp_tunnel_nic_register(struct net_device *dev)
  631. {
  632. const struct udp_tunnel_nic_info *info = dev->udp_tunnel_nic_info;
  633. struct udp_tunnel_nic_shared_node *node = NULL;
  634. struct udp_tunnel_nic *utn;
  635. unsigned int n_tables, i;
  636. BUILD_BUG_ON(sizeof(utn->missed) * BITS_PER_BYTE <
  637. UDP_TUNNEL_NIC_MAX_TABLES);
  638. /* Expect use count of at most 2 (IPv4, IPv6) per device */
  639. BUILD_BUG_ON(UDP_TUNNEL_NIC_USE_CNT_MAX <
  640. UDP_TUNNEL_NIC_MAX_SHARING_DEVICES * 2);
  641. /* Check that the driver info is sane */
  642. if (WARN_ON(!info->set_port != !info->unset_port) ||
  643. WARN_ON(!info->set_port == !info->sync_table) ||
  644. WARN_ON(!info->tables[0].n_entries))
  645. return -EINVAL;
  646. if (WARN_ON(info->shared &&
  647. info->flags & UDP_TUNNEL_NIC_INFO_OPEN_ONLY))
  648. return -EINVAL;
  649. n_tables = 1;
  650. for (i = 1; i < UDP_TUNNEL_NIC_MAX_TABLES; i++) {
  651. if (!info->tables[i].n_entries)
  652. continue;
  653. n_tables++;
  654. if (WARN_ON(!info->tables[i - 1].n_entries))
  655. return -EINVAL;
  656. }
  657. /* Create UDP tunnel state structures */
  658. if (info->shared) {
  659. node = kzalloc(sizeof(*node), GFP_KERNEL);
  660. if (!node)
  661. return -ENOMEM;
  662. node->dev = dev;
  663. }
  664. if (info->shared && info->shared->udp_tunnel_nic_info) {
  665. utn = info->shared->udp_tunnel_nic_info;
  666. } else {
  667. utn = udp_tunnel_nic_alloc(info, n_tables);
  668. if (!utn) {
  669. kfree(node);
  670. return -ENOMEM;
  671. }
  672. }
  673. if (info->shared) {
  674. if (!info->shared->udp_tunnel_nic_info) {
  675. INIT_LIST_HEAD(&info->shared->devices);
  676. info->shared->udp_tunnel_nic_info = utn;
  677. }
  678. list_add_tail(&node->list, &info->shared->devices);
  679. }
  680. utn->dev = dev;
  681. dev_hold(dev);
  682. dev->udp_tunnel_nic = utn;
  683. if (!(info->flags & UDP_TUNNEL_NIC_INFO_OPEN_ONLY))
  684. udp_tunnel_get_rx_info(dev);
  685. return 0;
  686. }
  687. static void
  688. udp_tunnel_nic_unregister(struct net_device *dev, struct udp_tunnel_nic *utn)
  689. {
  690. const struct udp_tunnel_nic_info *info = dev->udp_tunnel_nic_info;
  691. /* For a shared table remove this dev from the list of sharing devices
  692. * and if there are other devices just detach.
  693. */
  694. if (info->shared) {
  695. struct udp_tunnel_nic_shared_node *node, *first;
  696. list_for_each_entry(node, &info->shared->devices, list)
  697. if (node->dev == dev)
  698. break;
  699. if (list_entry_is_head(node, &info->shared->devices, list))
  700. return;
  701. list_del(&node->list);
  702. kfree(node);
  703. first = list_first_entry_or_null(&info->shared->devices,
  704. typeof(*first), list);
  705. if (first) {
  706. udp_tunnel_drop_rx_info(dev);
  707. utn->dev = first->dev;
  708. goto release_dev;
  709. }
  710. info->shared->udp_tunnel_nic_info = NULL;
  711. }
  712. /* Flush before we check work, so we don't waste time adding entries
  713. * from the work which we will boot immediately.
  714. */
  715. udp_tunnel_nic_flush(dev, utn);
  716. /* Wait for the work to be done using the state, netdev core will
  717. * retry unregister until we give up our reference on this device.
  718. */
  719. if (utn->work_pending)
  720. return;
  721. udp_tunnel_nic_free(utn);
  722. release_dev:
  723. dev->udp_tunnel_nic = NULL;
  724. dev_put(dev);
  725. }
  726. static int
  727. udp_tunnel_nic_netdevice_event(struct notifier_block *unused,
  728. unsigned long event, void *ptr)
  729. {
  730. struct net_device *dev = netdev_notifier_info_to_dev(ptr);
  731. const struct udp_tunnel_nic_info *info;
  732. struct udp_tunnel_nic *utn;
  733. info = dev->udp_tunnel_nic_info;
  734. if (!info)
  735. return NOTIFY_DONE;
  736. if (event == NETDEV_REGISTER) {
  737. int err;
  738. err = udp_tunnel_nic_register(dev);
  739. if (err)
  740. netdev_WARN(dev, "failed to register for UDP tunnel offloads: %d", err);
  741. return notifier_from_errno(err);
  742. }
  743. /* All other events will need the udp_tunnel_nic state */
  744. utn = dev->udp_tunnel_nic;
  745. if (!utn)
  746. return NOTIFY_DONE;
  747. if (event == NETDEV_UNREGISTER) {
  748. udp_tunnel_nic_unregister(dev, utn);
  749. return NOTIFY_OK;
  750. }
  751. /* All other events only matter if NIC has to be programmed open */
  752. if (!(info->flags & UDP_TUNNEL_NIC_INFO_OPEN_ONLY))
  753. return NOTIFY_DONE;
  754. if (event == NETDEV_UP) {
  755. WARN_ON(!udp_tunnel_nic_is_empty(dev, utn));
  756. udp_tunnel_get_rx_info(dev);
  757. return NOTIFY_OK;
  758. }
  759. if (event == NETDEV_GOING_DOWN) {
  760. udp_tunnel_nic_flush(dev, utn);
  761. return NOTIFY_OK;
  762. }
  763. return NOTIFY_DONE;
  764. }
  765. static struct notifier_block udp_tunnel_nic_notifier_block __read_mostly = {
  766. .notifier_call = udp_tunnel_nic_netdevice_event,
  767. };
  768. static int __init udp_tunnel_nic_init_module(void)
  769. {
  770. int err;
  771. udp_tunnel_nic_workqueue = alloc_ordered_workqueue("udp_tunnel_nic", 0);
  772. if (!udp_tunnel_nic_workqueue)
  773. return -ENOMEM;
  774. rtnl_lock();
  775. udp_tunnel_nic_ops = &__udp_tunnel_nic_ops;
  776. rtnl_unlock();
  777. err = register_netdevice_notifier(&udp_tunnel_nic_notifier_block);
  778. if (err)
  779. goto err_unset_ops;
  780. return 0;
  781. err_unset_ops:
  782. rtnl_lock();
  783. udp_tunnel_nic_ops = NULL;
  784. rtnl_unlock();
  785. destroy_workqueue(udp_tunnel_nic_workqueue);
  786. return err;
  787. }
  788. late_initcall(udp_tunnel_nic_init_module);
  789. static void __exit udp_tunnel_nic_cleanup_module(void)
  790. {
  791. unregister_netdevice_notifier(&udp_tunnel_nic_notifier_block);
  792. rtnl_lock();
  793. udp_tunnel_nic_ops = NULL;
  794. rtnl_unlock();
  795. destroy_workqueue(udp_tunnel_nic_workqueue);
  796. }
  797. module_exit(udp_tunnel_nic_cleanup_module);
  798. MODULE_LICENSE("GPL");