offload.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712
  1. /*
  2. * Copyright (C) 2017-2018 Netronome Systems, Inc.
  3. *
  4. * This software is licensed under the GNU General License Version 2,
  5. * June 1991 as shown in the file COPYING in the top-level directory of this
  6. * source tree.
  7. *
  8. * THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS"
  9. * WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING,
  10. * BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
  11. * FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE
  12. * OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME
  13. * THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
  14. */
  15. #include <linux/bpf.h>
  16. #include <linux/bpf_verifier.h>
  17. #include <linux/bug.h>
  18. #include <linux/kdev_t.h>
  19. #include <linux/list.h>
  20. #include <linux/lockdep.h>
  21. #include <linux/netdevice.h>
  22. #include <linux/printk.h>
  23. #include <linux/proc_ns.h>
  24. #include <linux/rhashtable.h>
  25. #include <linux/rtnetlink.h>
  26. #include <linux/rwsem.h>
  27. /* Protects offdevs, members of bpf_offload_netdev and offload members
  28. * of all progs.
  29. * RTNL lock cannot be taken when holding this lock.
  30. */
  31. static DECLARE_RWSEM(bpf_devs_lock);
  32. struct bpf_offload_dev {
  33. const struct bpf_prog_offload_ops *ops;
  34. struct list_head netdevs;
  35. void *priv;
  36. };
  37. struct bpf_offload_netdev {
  38. struct rhash_head l;
  39. struct net_device *netdev;
  40. struct bpf_offload_dev *offdev;
  41. struct list_head progs;
  42. struct list_head maps;
  43. struct list_head offdev_netdevs;
  44. };
  45. static const struct rhashtable_params offdevs_params = {
  46. .nelem_hint = 4,
  47. .key_len = sizeof(struct net_device *),
  48. .key_offset = offsetof(struct bpf_offload_netdev, netdev),
  49. .head_offset = offsetof(struct bpf_offload_netdev, l),
  50. .automatic_shrinking = true,
  51. };
  52. static struct rhashtable offdevs;
  53. static bool offdevs_inited;
  54. static int bpf_dev_offload_check(struct net_device *netdev)
  55. {
  56. if (!netdev)
  57. return -EINVAL;
  58. if (!netdev->netdev_ops->ndo_bpf)
  59. return -EOPNOTSUPP;
  60. return 0;
  61. }
  62. static struct bpf_offload_netdev *
  63. bpf_offload_find_netdev(struct net_device *netdev)
  64. {
  65. lockdep_assert_held(&bpf_devs_lock);
  66. if (!offdevs_inited)
  67. return NULL;
  68. return rhashtable_lookup_fast(&offdevs, &netdev, offdevs_params);
  69. }
  70. int bpf_prog_offload_init(struct bpf_prog *prog, union bpf_attr *attr)
  71. {
  72. struct bpf_offload_netdev *ondev;
  73. struct bpf_prog_offload *offload;
  74. int err;
  75. if (attr->prog_type != BPF_PROG_TYPE_SCHED_CLS &&
  76. attr->prog_type != BPF_PROG_TYPE_XDP)
  77. return -EINVAL;
  78. if (attr->prog_flags)
  79. return -EINVAL;
  80. offload = kzalloc(sizeof(*offload), GFP_USER);
  81. if (!offload)
  82. return -ENOMEM;
  83. offload->prog = prog;
  84. offload->netdev = dev_get_by_index(current->nsproxy->net_ns,
  85. attr->prog_ifindex);
  86. err = bpf_dev_offload_check(offload->netdev);
  87. if (err)
  88. goto err_maybe_put;
  89. down_write(&bpf_devs_lock);
  90. ondev = bpf_offload_find_netdev(offload->netdev);
  91. if (!ondev) {
  92. err = -EINVAL;
  93. goto err_unlock;
  94. }
  95. offload->offdev = ondev->offdev;
  96. prog->aux->offload = offload;
  97. list_add_tail(&offload->offloads, &ondev->progs);
  98. dev_put(offload->netdev);
  99. up_write(&bpf_devs_lock);
  100. return 0;
  101. err_unlock:
  102. up_write(&bpf_devs_lock);
  103. err_maybe_put:
  104. if (offload->netdev)
  105. dev_put(offload->netdev);
  106. kfree(offload);
  107. return err;
  108. }
  109. int bpf_prog_offload_verifier_prep(struct bpf_prog *prog)
  110. {
  111. struct bpf_prog_offload *offload;
  112. int ret = -ENODEV;
  113. down_read(&bpf_devs_lock);
  114. offload = prog->aux->offload;
  115. if (offload) {
  116. ret = offload->offdev->ops->prepare(prog);
  117. offload->dev_state = !ret;
  118. }
  119. up_read(&bpf_devs_lock);
  120. return ret;
  121. }
  122. int bpf_prog_offload_verify_insn(struct bpf_verifier_env *env,
  123. int insn_idx, int prev_insn_idx)
  124. {
  125. struct bpf_prog_offload *offload;
  126. int ret = -ENODEV;
  127. down_read(&bpf_devs_lock);
  128. offload = env->prog->aux->offload;
  129. if (offload)
  130. ret = offload->offdev->ops->insn_hook(env, insn_idx,
  131. prev_insn_idx);
  132. up_read(&bpf_devs_lock);
  133. return ret;
  134. }
  135. int bpf_prog_offload_finalize(struct bpf_verifier_env *env)
  136. {
  137. struct bpf_prog_offload *offload;
  138. int ret = -ENODEV;
  139. down_read(&bpf_devs_lock);
  140. offload = env->prog->aux->offload;
  141. if (offload) {
  142. if (offload->offdev->ops->finalize)
  143. ret = offload->offdev->ops->finalize(env);
  144. else
  145. ret = 0;
  146. }
  147. up_read(&bpf_devs_lock);
  148. return ret;
  149. }
  150. void
  151. bpf_prog_offload_replace_insn(struct bpf_verifier_env *env, u32 off,
  152. struct bpf_insn *insn)
  153. {
  154. const struct bpf_prog_offload_ops *ops;
  155. struct bpf_prog_offload *offload;
  156. int ret = -EOPNOTSUPP;
  157. down_read(&bpf_devs_lock);
  158. offload = env->prog->aux->offload;
  159. if (offload) {
  160. ops = offload->offdev->ops;
  161. if (!offload->opt_failed && ops->replace_insn)
  162. ret = ops->replace_insn(env, off, insn);
  163. offload->opt_failed |= ret;
  164. }
  165. up_read(&bpf_devs_lock);
  166. }
  167. void
  168. bpf_prog_offload_remove_insns(struct bpf_verifier_env *env, u32 off, u32 cnt)
  169. {
  170. struct bpf_prog_offload *offload;
  171. int ret = -EOPNOTSUPP;
  172. down_read(&bpf_devs_lock);
  173. offload = env->prog->aux->offload;
  174. if (offload) {
  175. if (!offload->opt_failed && offload->offdev->ops->remove_insns)
  176. ret = offload->offdev->ops->remove_insns(env, off, cnt);
  177. offload->opt_failed |= ret;
  178. }
  179. up_read(&bpf_devs_lock);
  180. }
  181. static void __bpf_prog_offload_destroy(struct bpf_prog *prog)
  182. {
  183. struct bpf_prog_offload *offload = prog->aux->offload;
  184. if (offload->dev_state)
  185. offload->offdev->ops->destroy(prog);
  186. /* Make sure BPF_PROG_GET_NEXT_ID can't find this dead program */
  187. bpf_prog_free_id(prog, true);
  188. list_del_init(&offload->offloads);
  189. kfree(offload);
  190. prog->aux->offload = NULL;
  191. }
  192. void bpf_prog_offload_destroy(struct bpf_prog *prog)
  193. {
  194. down_write(&bpf_devs_lock);
  195. if (prog->aux->offload)
  196. __bpf_prog_offload_destroy(prog);
  197. up_write(&bpf_devs_lock);
  198. }
  199. static int bpf_prog_offload_translate(struct bpf_prog *prog)
  200. {
  201. struct bpf_prog_offload *offload;
  202. int ret = -ENODEV;
  203. down_read(&bpf_devs_lock);
  204. offload = prog->aux->offload;
  205. if (offload)
  206. ret = offload->offdev->ops->translate(prog);
  207. up_read(&bpf_devs_lock);
  208. return ret;
  209. }
  210. static unsigned int bpf_prog_warn_on_exec(const void *ctx,
  211. const struct bpf_insn *insn)
  212. {
  213. WARN(1, "attempt to execute device eBPF program on the host!");
  214. return 0;
  215. }
  216. int bpf_prog_offload_compile(struct bpf_prog *prog)
  217. {
  218. prog->bpf_func = bpf_prog_warn_on_exec;
  219. return bpf_prog_offload_translate(prog);
  220. }
  221. struct ns_get_path_bpf_prog_args {
  222. struct bpf_prog *prog;
  223. struct bpf_prog_info *info;
  224. };
  225. static struct ns_common *bpf_prog_offload_info_fill_ns(void *private_data)
  226. {
  227. struct ns_get_path_bpf_prog_args *args = private_data;
  228. struct bpf_prog_aux *aux = args->prog->aux;
  229. struct ns_common *ns;
  230. struct net *net;
  231. rtnl_lock();
  232. down_read(&bpf_devs_lock);
  233. if (aux->offload) {
  234. args->info->ifindex = aux->offload->netdev->ifindex;
  235. net = dev_net(aux->offload->netdev);
  236. get_net(net);
  237. ns = &net->ns;
  238. } else {
  239. args->info->ifindex = 0;
  240. ns = NULL;
  241. }
  242. up_read(&bpf_devs_lock);
  243. rtnl_unlock();
  244. return ns;
  245. }
  246. int bpf_prog_offload_info_fill(struct bpf_prog_info *info,
  247. struct bpf_prog *prog)
  248. {
  249. struct ns_get_path_bpf_prog_args args = {
  250. .prog = prog,
  251. .info = info,
  252. };
  253. struct bpf_prog_aux *aux = prog->aux;
  254. struct inode *ns_inode;
  255. struct path ns_path;
  256. char __user *uinsns;
  257. int res;
  258. u32 ulen;
  259. res = ns_get_path_cb(&ns_path, bpf_prog_offload_info_fill_ns, &args);
  260. if (res) {
  261. if (!info->ifindex)
  262. return -ENODEV;
  263. return res;
  264. }
  265. down_read(&bpf_devs_lock);
  266. if (!aux->offload) {
  267. up_read(&bpf_devs_lock);
  268. return -ENODEV;
  269. }
  270. ulen = info->jited_prog_len;
  271. info->jited_prog_len = aux->offload->jited_len;
  272. if (info->jited_prog_len && ulen) {
  273. uinsns = u64_to_user_ptr(info->jited_prog_insns);
  274. ulen = min_t(u32, info->jited_prog_len, ulen);
  275. if (copy_to_user(uinsns, aux->offload->jited_image, ulen)) {
  276. up_read(&bpf_devs_lock);
  277. return -EFAULT;
  278. }
  279. }
  280. up_read(&bpf_devs_lock);
  281. ns_inode = ns_path.dentry->d_inode;
  282. info->netns_dev = new_encode_dev(ns_inode->i_sb->s_dev);
  283. info->netns_ino = ns_inode->i_ino;
  284. path_put(&ns_path);
  285. return 0;
  286. }
  287. const struct bpf_prog_ops bpf_offload_prog_ops = {
  288. };
  289. static int bpf_map_offload_ndo(struct bpf_offloaded_map *offmap,
  290. enum bpf_netdev_command cmd)
  291. {
  292. struct netdev_bpf data = {};
  293. struct net_device *netdev;
  294. ASSERT_RTNL();
  295. data.command = cmd;
  296. data.offmap = offmap;
  297. /* Caller must make sure netdev is valid */
  298. netdev = offmap->netdev;
  299. return netdev->netdev_ops->ndo_bpf(netdev, &data);
  300. }
  301. struct bpf_map *bpf_map_offload_map_alloc(union bpf_attr *attr)
  302. {
  303. struct net *net = current->nsproxy->net_ns;
  304. struct bpf_offload_netdev *ondev;
  305. struct bpf_offloaded_map *offmap;
  306. int err;
  307. if (!capable(CAP_SYS_ADMIN))
  308. return ERR_PTR(-EPERM);
  309. if (attr->map_type != BPF_MAP_TYPE_ARRAY &&
  310. attr->map_type != BPF_MAP_TYPE_HASH)
  311. return ERR_PTR(-EINVAL);
  312. offmap = kzalloc(sizeof(*offmap), GFP_USER);
  313. if (!offmap)
  314. return ERR_PTR(-ENOMEM);
  315. bpf_map_init_from_attr(&offmap->map, attr);
  316. rtnl_lock();
  317. down_write(&bpf_devs_lock);
  318. offmap->netdev = __dev_get_by_index(net, attr->map_ifindex);
  319. err = bpf_dev_offload_check(offmap->netdev);
  320. if (err)
  321. goto err_unlock;
  322. ondev = bpf_offload_find_netdev(offmap->netdev);
  323. if (!ondev) {
  324. err = -EINVAL;
  325. goto err_unlock;
  326. }
  327. err = bpf_map_offload_ndo(offmap, BPF_OFFLOAD_MAP_ALLOC);
  328. if (err)
  329. goto err_unlock;
  330. list_add_tail(&offmap->offloads, &ondev->maps);
  331. up_write(&bpf_devs_lock);
  332. rtnl_unlock();
  333. return &offmap->map;
  334. err_unlock:
  335. up_write(&bpf_devs_lock);
  336. rtnl_unlock();
  337. kfree(offmap);
  338. return ERR_PTR(err);
  339. }
  340. static void __bpf_map_offload_destroy(struct bpf_offloaded_map *offmap)
  341. {
  342. WARN_ON(bpf_map_offload_ndo(offmap, BPF_OFFLOAD_MAP_FREE));
  343. /* Make sure BPF_MAP_GET_NEXT_ID can't find this dead map */
  344. bpf_map_free_id(&offmap->map, true);
  345. list_del_init(&offmap->offloads);
  346. offmap->netdev = NULL;
  347. }
  348. void bpf_map_offload_map_free(struct bpf_map *map)
  349. {
  350. struct bpf_offloaded_map *offmap = map_to_offmap(map);
  351. rtnl_lock();
  352. down_write(&bpf_devs_lock);
  353. if (offmap->netdev)
  354. __bpf_map_offload_destroy(offmap);
  355. up_write(&bpf_devs_lock);
  356. rtnl_unlock();
  357. kfree(offmap);
  358. }
  359. int bpf_map_offload_lookup_elem(struct bpf_map *map, void *key, void *value)
  360. {
  361. struct bpf_offloaded_map *offmap = map_to_offmap(map);
  362. int ret = -ENODEV;
  363. down_read(&bpf_devs_lock);
  364. if (offmap->netdev)
  365. ret = offmap->dev_ops->map_lookup_elem(offmap, key, value);
  366. up_read(&bpf_devs_lock);
  367. return ret;
  368. }
  369. int bpf_map_offload_update_elem(struct bpf_map *map,
  370. void *key, void *value, u64 flags)
  371. {
  372. struct bpf_offloaded_map *offmap = map_to_offmap(map);
  373. int ret = -ENODEV;
  374. if (unlikely(flags > BPF_EXIST))
  375. return -EINVAL;
  376. down_read(&bpf_devs_lock);
  377. if (offmap->netdev)
  378. ret = offmap->dev_ops->map_update_elem(offmap, key, value,
  379. flags);
  380. up_read(&bpf_devs_lock);
  381. return ret;
  382. }
  383. int bpf_map_offload_delete_elem(struct bpf_map *map, void *key)
  384. {
  385. struct bpf_offloaded_map *offmap = map_to_offmap(map);
  386. int ret = -ENODEV;
  387. down_read(&bpf_devs_lock);
  388. if (offmap->netdev)
  389. ret = offmap->dev_ops->map_delete_elem(offmap, key);
  390. up_read(&bpf_devs_lock);
  391. return ret;
  392. }
  393. int bpf_map_offload_get_next_key(struct bpf_map *map, void *key, void *next_key)
  394. {
  395. struct bpf_offloaded_map *offmap = map_to_offmap(map);
  396. int ret = -ENODEV;
  397. down_read(&bpf_devs_lock);
  398. if (offmap->netdev)
  399. ret = offmap->dev_ops->map_get_next_key(offmap, key, next_key);
  400. up_read(&bpf_devs_lock);
  401. return ret;
  402. }
  403. struct ns_get_path_bpf_map_args {
  404. struct bpf_offloaded_map *offmap;
  405. struct bpf_map_info *info;
  406. };
  407. static struct ns_common *bpf_map_offload_info_fill_ns(void *private_data)
  408. {
  409. struct ns_get_path_bpf_map_args *args = private_data;
  410. struct ns_common *ns;
  411. struct net *net;
  412. rtnl_lock();
  413. down_read(&bpf_devs_lock);
  414. if (args->offmap->netdev) {
  415. args->info->ifindex = args->offmap->netdev->ifindex;
  416. net = dev_net(args->offmap->netdev);
  417. get_net(net);
  418. ns = &net->ns;
  419. } else {
  420. args->info->ifindex = 0;
  421. ns = NULL;
  422. }
  423. up_read(&bpf_devs_lock);
  424. rtnl_unlock();
  425. return ns;
  426. }
  427. int bpf_map_offload_info_fill(struct bpf_map_info *info, struct bpf_map *map)
  428. {
  429. struct ns_get_path_bpf_map_args args = {
  430. .offmap = map_to_offmap(map),
  431. .info = info,
  432. };
  433. struct inode *ns_inode;
  434. struct path ns_path;
  435. int res;
  436. res = ns_get_path_cb(&ns_path, bpf_map_offload_info_fill_ns, &args);
  437. if (res) {
  438. if (!info->ifindex)
  439. return -ENODEV;
  440. return res;
  441. }
  442. ns_inode = ns_path.dentry->d_inode;
  443. info->netns_dev = new_encode_dev(ns_inode->i_sb->s_dev);
  444. info->netns_ino = ns_inode->i_ino;
  445. path_put(&ns_path);
  446. return 0;
  447. }
  448. static bool __bpf_offload_dev_match(struct bpf_prog *prog,
  449. struct net_device *netdev)
  450. {
  451. struct bpf_offload_netdev *ondev1, *ondev2;
  452. struct bpf_prog_offload *offload;
  453. if (!bpf_prog_is_dev_bound(prog->aux))
  454. return false;
  455. offload = prog->aux->offload;
  456. if (!offload)
  457. return false;
  458. if (offload->netdev == netdev)
  459. return true;
  460. ondev1 = bpf_offload_find_netdev(offload->netdev);
  461. ondev2 = bpf_offload_find_netdev(netdev);
  462. return ondev1 && ondev2 && ondev1->offdev == ondev2->offdev;
  463. }
  464. bool bpf_offload_dev_match(struct bpf_prog *prog, struct net_device *netdev)
  465. {
  466. bool ret;
  467. down_read(&bpf_devs_lock);
  468. ret = __bpf_offload_dev_match(prog, netdev);
  469. up_read(&bpf_devs_lock);
  470. return ret;
  471. }
  472. EXPORT_SYMBOL_GPL(bpf_offload_dev_match);
  473. bool bpf_offload_prog_map_match(struct bpf_prog *prog, struct bpf_map *map)
  474. {
  475. struct bpf_offloaded_map *offmap;
  476. bool ret;
  477. if (!bpf_map_is_dev_bound(map))
  478. return bpf_map_offload_neutral(map);
  479. offmap = map_to_offmap(map);
  480. down_read(&bpf_devs_lock);
  481. ret = __bpf_offload_dev_match(prog, offmap->netdev);
  482. up_read(&bpf_devs_lock);
  483. return ret;
  484. }
  485. int bpf_offload_dev_netdev_register(struct bpf_offload_dev *offdev,
  486. struct net_device *netdev)
  487. {
  488. struct bpf_offload_netdev *ondev;
  489. int err;
  490. ondev = kzalloc(sizeof(*ondev), GFP_KERNEL);
  491. if (!ondev)
  492. return -ENOMEM;
  493. ondev->netdev = netdev;
  494. ondev->offdev = offdev;
  495. INIT_LIST_HEAD(&ondev->progs);
  496. INIT_LIST_HEAD(&ondev->maps);
  497. down_write(&bpf_devs_lock);
  498. err = rhashtable_insert_fast(&offdevs, &ondev->l, offdevs_params);
  499. if (err) {
  500. netdev_warn(netdev, "failed to register for BPF offload\n");
  501. goto err_unlock_free;
  502. }
  503. list_add(&ondev->offdev_netdevs, &offdev->netdevs);
  504. up_write(&bpf_devs_lock);
  505. return 0;
  506. err_unlock_free:
  507. up_write(&bpf_devs_lock);
  508. kfree(ondev);
  509. return err;
  510. }
  511. EXPORT_SYMBOL_GPL(bpf_offload_dev_netdev_register);
  512. void bpf_offload_dev_netdev_unregister(struct bpf_offload_dev *offdev,
  513. struct net_device *netdev)
  514. {
  515. struct bpf_offload_netdev *ondev, *altdev;
  516. struct bpf_offloaded_map *offmap, *mtmp;
  517. struct bpf_prog_offload *offload, *ptmp;
  518. ASSERT_RTNL();
  519. down_write(&bpf_devs_lock);
  520. ondev = rhashtable_lookup_fast(&offdevs, &netdev, offdevs_params);
  521. if (WARN_ON(!ondev))
  522. goto unlock;
  523. WARN_ON(rhashtable_remove_fast(&offdevs, &ondev->l, offdevs_params));
  524. list_del(&ondev->offdev_netdevs);
  525. /* Try to move the objects to another netdev of the device */
  526. altdev = list_first_entry_or_null(&offdev->netdevs,
  527. struct bpf_offload_netdev,
  528. offdev_netdevs);
  529. if (altdev) {
  530. list_for_each_entry(offload, &ondev->progs, offloads)
  531. offload->netdev = altdev->netdev;
  532. list_splice_init(&ondev->progs, &altdev->progs);
  533. list_for_each_entry(offmap, &ondev->maps, offloads)
  534. offmap->netdev = altdev->netdev;
  535. list_splice_init(&ondev->maps, &altdev->maps);
  536. } else {
  537. list_for_each_entry_safe(offload, ptmp, &ondev->progs, offloads)
  538. __bpf_prog_offload_destroy(offload->prog);
  539. list_for_each_entry_safe(offmap, mtmp, &ondev->maps, offloads)
  540. __bpf_map_offload_destroy(offmap);
  541. }
  542. WARN_ON(!list_empty(&ondev->progs));
  543. WARN_ON(!list_empty(&ondev->maps));
  544. kfree(ondev);
  545. unlock:
  546. up_write(&bpf_devs_lock);
  547. }
  548. EXPORT_SYMBOL_GPL(bpf_offload_dev_netdev_unregister);
  549. struct bpf_offload_dev *
  550. bpf_offload_dev_create(const struct bpf_prog_offload_ops *ops, void *priv)
  551. {
  552. struct bpf_offload_dev *offdev;
  553. int err;
  554. down_write(&bpf_devs_lock);
  555. if (!offdevs_inited) {
  556. err = rhashtable_init(&offdevs, &offdevs_params);
  557. if (err) {
  558. up_write(&bpf_devs_lock);
  559. return ERR_PTR(err);
  560. }
  561. offdevs_inited = true;
  562. }
  563. up_write(&bpf_devs_lock);
  564. offdev = kzalloc(sizeof(*offdev), GFP_KERNEL);
  565. if (!offdev)
  566. return ERR_PTR(-ENOMEM);
  567. offdev->ops = ops;
  568. offdev->priv = priv;
  569. INIT_LIST_HEAD(&offdev->netdevs);
  570. return offdev;
  571. }
  572. EXPORT_SYMBOL_GPL(bpf_offload_dev_create);
  573. void bpf_offload_dev_destroy(struct bpf_offload_dev *offdev)
  574. {
  575. WARN_ON(!list_empty(&offdev->netdevs));
  576. kfree(offdev);
  577. }
  578. EXPORT_SYMBOL_GPL(bpf_offload_dev_destroy);
  579. void *bpf_offload_dev_priv(struct bpf_offload_dev *offdev)
  580. {
  581. return offdev->priv;
  582. }
  583. EXPORT_SYMBOL_GPL(bpf_offload_dev_priv);