net_namespace.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566
  1. // SPDX-License-Identifier: GPL-2.0
  2. #include <linux/bpf.h>
  3. #include <linux/filter.h>
  4. #include <net/net_namespace.h>
  5. /*
  6. * Functions to manage BPF programs attached to netns
  7. */
  8. struct bpf_netns_link {
  9. struct bpf_link link;
  10. enum bpf_attach_type type;
  11. enum netns_bpf_attach_type netns_type;
  12. /* We don't hold a ref to net in order to auto-detach the link
  13. * when netns is going away. Instead we rely on pernet
  14. * pre_exit callback to clear this pointer. Must be accessed
  15. * with netns_bpf_mutex held.
  16. */
  17. struct net *net;
  18. struct list_head node; /* node in list of links attached to net */
  19. };
  20. /* Protects updates to netns_bpf */
  21. DEFINE_MUTEX(netns_bpf_mutex);
  22. static void netns_bpf_attach_type_unneed(enum netns_bpf_attach_type type)
  23. {
  24. switch (type) {
  25. #ifdef CONFIG_INET
  26. case NETNS_BPF_SK_LOOKUP:
  27. static_branch_dec(&bpf_sk_lookup_enabled);
  28. break;
  29. #endif
  30. default:
  31. break;
  32. }
  33. }
  34. static void netns_bpf_attach_type_need(enum netns_bpf_attach_type type)
  35. {
  36. switch (type) {
  37. #ifdef CONFIG_INET
  38. case NETNS_BPF_SK_LOOKUP:
  39. static_branch_inc(&bpf_sk_lookup_enabled);
  40. break;
  41. #endif
  42. default:
  43. break;
  44. }
  45. }
  46. /* Must be called with netns_bpf_mutex held. */
  47. static void netns_bpf_run_array_detach(struct net *net,
  48. enum netns_bpf_attach_type type)
  49. {
  50. struct bpf_prog_array *run_array;
  51. run_array = rcu_replace_pointer(net->bpf.run_array[type], NULL,
  52. lockdep_is_held(&netns_bpf_mutex));
  53. bpf_prog_array_free(run_array);
  54. }
  55. static int link_index(struct net *net, enum netns_bpf_attach_type type,
  56. struct bpf_netns_link *link)
  57. {
  58. struct bpf_netns_link *pos;
  59. int i = 0;
  60. list_for_each_entry(pos, &net->bpf.links[type], node) {
  61. if (pos == link)
  62. return i;
  63. i++;
  64. }
  65. return -ENOENT;
  66. }
  67. static int link_count(struct net *net, enum netns_bpf_attach_type type)
  68. {
  69. struct list_head *pos;
  70. int i = 0;
  71. list_for_each(pos, &net->bpf.links[type])
  72. i++;
  73. return i;
  74. }
  75. static void fill_prog_array(struct net *net, enum netns_bpf_attach_type type,
  76. struct bpf_prog_array *prog_array)
  77. {
  78. struct bpf_netns_link *pos;
  79. unsigned int i = 0;
  80. list_for_each_entry(pos, &net->bpf.links[type], node) {
  81. prog_array->items[i].prog = pos->link.prog;
  82. i++;
  83. }
  84. }
  85. static void bpf_netns_link_release(struct bpf_link *link)
  86. {
  87. struct bpf_netns_link *net_link =
  88. container_of(link, struct bpf_netns_link, link);
  89. enum netns_bpf_attach_type type = net_link->netns_type;
  90. struct bpf_prog_array *old_array, *new_array;
  91. struct net *net;
  92. int cnt, idx;
  93. mutex_lock(&netns_bpf_mutex);
  94. /* We can race with cleanup_net, but if we see a non-NULL
  95. * struct net pointer, pre_exit has not run yet and wait for
  96. * netns_bpf_mutex.
  97. */
  98. net = net_link->net;
  99. if (!net)
  100. goto out_unlock;
  101. /* Mark attach point as unused */
  102. netns_bpf_attach_type_unneed(type);
  103. /* Remember link position in case of safe delete */
  104. idx = link_index(net, type, net_link);
  105. list_del(&net_link->node);
  106. cnt = link_count(net, type);
  107. if (!cnt) {
  108. netns_bpf_run_array_detach(net, type);
  109. goto out_unlock;
  110. }
  111. old_array = rcu_dereference_protected(net->bpf.run_array[type],
  112. lockdep_is_held(&netns_bpf_mutex));
  113. new_array = bpf_prog_array_alloc(cnt, GFP_KERNEL);
  114. if (!new_array) {
  115. WARN_ON(bpf_prog_array_delete_safe_at(old_array, idx));
  116. goto out_unlock;
  117. }
  118. fill_prog_array(net, type, new_array);
  119. rcu_assign_pointer(net->bpf.run_array[type], new_array);
  120. bpf_prog_array_free(old_array);
  121. out_unlock:
  122. net_link->net = NULL;
  123. mutex_unlock(&netns_bpf_mutex);
  124. }
  125. static int bpf_netns_link_detach(struct bpf_link *link)
  126. {
  127. bpf_netns_link_release(link);
  128. return 0;
  129. }
  130. static void bpf_netns_link_dealloc(struct bpf_link *link)
  131. {
  132. struct bpf_netns_link *net_link =
  133. container_of(link, struct bpf_netns_link, link);
  134. kfree(net_link);
  135. }
  136. static int bpf_netns_link_update_prog(struct bpf_link *link,
  137. struct bpf_prog *new_prog,
  138. struct bpf_prog *old_prog)
  139. {
  140. struct bpf_netns_link *net_link =
  141. container_of(link, struct bpf_netns_link, link);
  142. enum netns_bpf_attach_type type = net_link->netns_type;
  143. struct bpf_prog_array *run_array;
  144. struct net *net;
  145. int idx, ret;
  146. if (old_prog && old_prog != link->prog)
  147. return -EPERM;
  148. if (new_prog->type != link->prog->type)
  149. return -EINVAL;
  150. mutex_lock(&netns_bpf_mutex);
  151. net = net_link->net;
  152. if (!net || !check_net(net)) {
  153. /* Link auto-detached or netns dying */
  154. ret = -ENOLINK;
  155. goto out_unlock;
  156. }
  157. run_array = rcu_dereference_protected(net->bpf.run_array[type],
  158. lockdep_is_held(&netns_bpf_mutex));
  159. idx = link_index(net, type, net_link);
  160. ret = bpf_prog_array_update_at(run_array, idx, new_prog);
  161. if (ret)
  162. goto out_unlock;
  163. old_prog = xchg(&link->prog, new_prog);
  164. bpf_prog_put(old_prog);
  165. out_unlock:
  166. mutex_unlock(&netns_bpf_mutex);
  167. return ret;
  168. }
  169. static int bpf_netns_link_fill_info(const struct bpf_link *link,
  170. struct bpf_link_info *info)
  171. {
  172. const struct bpf_netns_link *net_link =
  173. container_of(link, struct bpf_netns_link, link);
  174. unsigned int inum = 0;
  175. struct net *net;
  176. mutex_lock(&netns_bpf_mutex);
  177. net = net_link->net;
  178. if (net && check_net(net))
  179. inum = net->ns.inum;
  180. mutex_unlock(&netns_bpf_mutex);
  181. info->netns.netns_ino = inum;
  182. info->netns.attach_type = net_link->type;
  183. return 0;
  184. }
  185. static void bpf_netns_link_show_fdinfo(const struct bpf_link *link,
  186. struct seq_file *seq)
  187. {
  188. struct bpf_link_info info = {};
  189. bpf_netns_link_fill_info(link, &info);
  190. seq_printf(seq,
  191. "netns_ino:\t%u\n"
  192. "attach_type:\t%u\n",
  193. info.netns.netns_ino,
  194. info.netns.attach_type);
  195. }
  196. static const struct bpf_link_ops bpf_netns_link_ops = {
  197. .release = bpf_netns_link_release,
  198. .dealloc = bpf_netns_link_dealloc,
  199. .detach = bpf_netns_link_detach,
  200. .update_prog = bpf_netns_link_update_prog,
  201. .fill_link_info = bpf_netns_link_fill_info,
  202. .show_fdinfo = bpf_netns_link_show_fdinfo,
  203. };
  204. /* Must be called with netns_bpf_mutex held. */
  205. static int __netns_bpf_prog_query(const union bpf_attr *attr,
  206. union bpf_attr __user *uattr,
  207. struct net *net,
  208. enum netns_bpf_attach_type type)
  209. {
  210. __u32 __user *prog_ids = u64_to_user_ptr(attr->query.prog_ids);
  211. struct bpf_prog_array *run_array;
  212. u32 prog_cnt = 0, flags = 0;
  213. run_array = rcu_dereference_protected(net->bpf.run_array[type],
  214. lockdep_is_held(&netns_bpf_mutex));
  215. if (run_array)
  216. prog_cnt = bpf_prog_array_length(run_array);
  217. if (copy_to_user(&uattr->query.attach_flags, &flags, sizeof(flags)))
  218. return -EFAULT;
  219. if (copy_to_user(&uattr->query.prog_cnt, &prog_cnt, sizeof(prog_cnt)))
  220. return -EFAULT;
  221. if (!attr->query.prog_cnt || !prog_ids || !prog_cnt)
  222. return 0;
  223. return bpf_prog_array_copy_to_user(run_array, prog_ids,
  224. attr->query.prog_cnt);
  225. }
  226. int netns_bpf_prog_query(const union bpf_attr *attr,
  227. union bpf_attr __user *uattr)
  228. {
  229. enum netns_bpf_attach_type type;
  230. struct net *net;
  231. int ret;
  232. if (attr->query.query_flags)
  233. return -EINVAL;
  234. type = to_netns_bpf_attach_type(attr->query.attach_type);
  235. if (type < 0)
  236. return -EINVAL;
  237. net = get_net_ns_by_fd(attr->query.target_fd);
  238. if (IS_ERR(net))
  239. return PTR_ERR(net);
  240. mutex_lock(&netns_bpf_mutex);
  241. ret = __netns_bpf_prog_query(attr, uattr, net, type);
  242. mutex_unlock(&netns_bpf_mutex);
  243. put_net(net);
  244. return ret;
  245. }
  246. int netns_bpf_prog_attach(const union bpf_attr *attr, struct bpf_prog *prog)
  247. {
  248. struct bpf_prog_array *run_array;
  249. enum netns_bpf_attach_type type;
  250. struct bpf_prog *attached;
  251. struct net *net;
  252. int ret;
  253. if (attr->target_fd || attr->attach_flags || attr->replace_bpf_fd)
  254. return -EINVAL;
  255. type = to_netns_bpf_attach_type(attr->attach_type);
  256. if (type < 0)
  257. return -EINVAL;
  258. net = current->nsproxy->net_ns;
  259. mutex_lock(&netns_bpf_mutex);
  260. /* Attaching prog directly is not compatible with links */
  261. if (!list_empty(&net->bpf.links[type])) {
  262. ret = -EEXIST;
  263. goto out_unlock;
  264. }
  265. switch (type) {
  266. case NETNS_BPF_FLOW_DISSECTOR:
  267. ret = flow_dissector_bpf_prog_attach_check(net, prog);
  268. break;
  269. default:
  270. ret = -EINVAL;
  271. break;
  272. }
  273. if (ret)
  274. goto out_unlock;
  275. attached = net->bpf.progs[type];
  276. if (attached == prog) {
  277. /* The same program cannot be attached twice */
  278. ret = -EINVAL;
  279. goto out_unlock;
  280. }
  281. run_array = rcu_dereference_protected(net->bpf.run_array[type],
  282. lockdep_is_held(&netns_bpf_mutex));
  283. if (run_array) {
  284. WRITE_ONCE(run_array->items[0].prog, prog);
  285. } else {
  286. run_array = bpf_prog_array_alloc(1, GFP_KERNEL);
  287. if (!run_array) {
  288. ret = -ENOMEM;
  289. goto out_unlock;
  290. }
  291. run_array->items[0].prog = prog;
  292. rcu_assign_pointer(net->bpf.run_array[type], run_array);
  293. }
  294. net->bpf.progs[type] = prog;
  295. if (attached)
  296. bpf_prog_put(attached);
  297. out_unlock:
  298. mutex_unlock(&netns_bpf_mutex);
  299. return ret;
  300. }
  301. /* Must be called with netns_bpf_mutex held. */
  302. static int __netns_bpf_prog_detach(struct net *net,
  303. enum netns_bpf_attach_type type,
  304. struct bpf_prog *old)
  305. {
  306. struct bpf_prog *attached;
  307. /* Progs attached via links cannot be detached */
  308. if (!list_empty(&net->bpf.links[type]))
  309. return -EINVAL;
  310. attached = net->bpf.progs[type];
  311. if (!attached || attached != old)
  312. return -ENOENT;
  313. netns_bpf_run_array_detach(net, type);
  314. net->bpf.progs[type] = NULL;
  315. bpf_prog_put(attached);
  316. return 0;
  317. }
  318. int netns_bpf_prog_detach(const union bpf_attr *attr, enum bpf_prog_type ptype)
  319. {
  320. enum netns_bpf_attach_type type;
  321. struct bpf_prog *prog;
  322. int ret;
  323. if (attr->target_fd)
  324. return -EINVAL;
  325. type = to_netns_bpf_attach_type(attr->attach_type);
  326. if (type < 0)
  327. return -EINVAL;
  328. prog = bpf_prog_get_type(attr->attach_bpf_fd, ptype);
  329. if (IS_ERR(prog))
  330. return PTR_ERR(prog);
  331. mutex_lock(&netns_bpf_mutex);
  332. ret = __netns_bpf_prog_detach(current->nsproxy->net_ns, type, prog);
  333. mutex_unlock(&netns_bpf_mutex);
  334. bpf_prog_put(prog);
  335. return ret;
  336. }
  337. static int netns_bpf_max_progs(enum netns_bpf_attach_type type)
  338. {
  339. switch (type) {
  340. case NETNS_BPF_FLOW_DISSECTOR:
  341. return 1;
  342. case NETNS_BPF_SK_LOOKUP:
  343. return 64;
  344. default:
  345. return 0;
  346. }
  347. }
  348. static int netns_bpf_link_attach(struct net *net, struct bpf_link *link,
  349. enum netns_bpf_attach_type type)
  350. {
  351. struct bpf_netns_link *net_link =
  352. container_of(link, struct bpf_netns_link, link);
  353. struct bpf_prog_array *run_array;
  354. int cnt, err;
  355. mutex_lock(&netns_bpf_mutex);
  356. cnt = link_count(net, type);
  357. if (cnt >= netns_bpf_max_progs(type)) {
  358. err = -E2BIG;
  359. goto out_unlock;
  360. }
  361. /* Links are not compatible with attaching prog directly */
  362. if (net->bpf.progs[type]) {
  363. err = -EEXIST;
  364. goto out_unlock;
  365. }
  366. switch (type) {
  367. case NETNS_BPF_FLOW_DISSECTOR:
  368. err = flow_dissector_bpf_prog_attach_check(net, link->prog);
  369. break;
  370. case NETNS_BPF_SK_LOOKUP:
  371. err = 0; /* nothing to check */
  372. break;
  373. default:
  374. err = -EINVAL;
  375. break;
  376. }
  377. if (err)
  378. goto out_unlock;
  379. run_array = bpf_prog_array_alloc(cnt + 1, GFP_KERNEL);
  380. if (!run_array) {
  381. err = -ENOMEM;
  382. goto out_unlock;
  383. }
  384. list_add_tail(&net_link->node, &net->bpf.links[type]);
  385. fill_prog_array(net, type, run_array);
  386. run_array = rcu_replace_pointer(net->bpf.run_array[type], run_array,
  387. lockdep_is_held(&netns_bpf_mutex));
  388. bpf_prog_array_free(run_array);
  389. /* Mark attach point as used */
  390. netns_bpf_attach_type_need(type);
  391. out_unlock:
  392. mutex_unlock(&netns_bpf_mutex);
  393. return err;
  394. }
  395. int netns_bpf_link_create(const union bpf_attr *attr, struct bpf_prog *prog)
  396. {
  397. enum netns_bpf_attach_type netns_type;
  398. struct bpf_link_primer link_primer;
  399. struct bpf_netns_link *net_link;
  400. enum bpf_attach_type type;
  401. struct net *net;
  402. int err;
  403. if (attr->link_create.flags)
  404. return -EINVAL;
  405. type = attr->link_create.attach_type;
  406. netns_type = to_netns_bpf_attach_type(type);
  407. if (netns_type < 0)
  408. return -EINVAL;
  409. net = get_net_ns_by_fd(attr->link_create.target_fd);
  410. if (IS_ERR(net))
  411. return PTR_ERR(net);
  412. net_link = kzalloc(sizeof(*net_link), GFP_USER);
  413. if (!net_link) {
  414. err = -ENOMEM;
  415. goto out_put_net;
  416. }
  417. bpf_link_init(&net_link->link, BPF_LINK_TYPE_NETNS,
  418. &bpf_netns_link_ops, prog);
  419. net_link->net = net;
  420. net_link->type = type;
  421. net_link->netns_type = netns_type;
  422. err = bpf_link_prime(&net_link->link, &link_primer);
  423. if (err) {
  424. kfree(net_link);
  425. goto out_put_net;
  426. }
  427. err = netns_bpf_link_attach(net, &net_link->link, netns_type);
  428. if (err) {
  429. bpf_link_cleanup(&link_primer);
  430. goto out_put_net;
  431. }
  432. put_net(net);
  433. return bpf_link_settle(&link_primer);
  434. out_put_net:
  435. put_net(net);
  436. return err;
  437. }
  438. static int __net_init netns_bpf_pernet_init(struct net *net)
  439. {
  440. int type;
  441. for (type = 0; type < MAX_NETNS_BPF_ATTACH_TYPE; type++)
  442. INIT_LIST_HEAD(&net->bpf.links[type]);
  443. return 0;
  444. }
  445. static void __net_exit netns_bpf_pernet_pre_exit(struct net *net)
  446. {
  447. enum netns_bpf_attach_type type;
  448. struct bpf_netns_link *net_link;
  449. mutex_lock(&netns_bpf_mutex);
  450. for (type = 0; type < MAX_NETNS_BPF_ATTACH_TYPE; type++) {
  451. netns_bpf_run_array_detach(net, type);
  452. list_for_each_entry(net_link, &net->bpf.links[type], node) {
  453. net_link->net = NULL; /* auto-detach link */
  454. netns_bpf_attach_type_unneed(type);
  455. }
  456. if (net->bpf.progs[type])
  457. bpf_prog_put(net->bpf.progs[type]);
  458. }
  459. mutex_unlock(&netns_bpf_mutex);
  460. }
  461. static struct pernet_operations netns_bpf_pernet_ops __net_initdata = {
  462. .init = netns_bpf_pernet_init,
  463. .pre_exit = netns_bpf_pernet_pre_exit,
  464. };
  465. static int __init netns_bpf_init(void)
  466. {
  467. return register_pernet_subsys(&netns_bpf_pernet_ops);
  468. }
  469. subsys_initcall(netns_bpf_init);