bpf-cgroup.h 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. #ifndef _BPF_CGROUP_H
  3. #define _BPF_CGROUP_H
  4. #include <linux/bpf.h>
  5. #include <linux/errno.h>
  6. #include <linux/jump_label.h>
  7. #include <linux/percpu.h>
  8. #include <linux/percpu-refcount.h>
  9. #include <linux/rbtree.h>
  10. #include <uapi/linux/bpf.h>
  11. struct sock;
  12. struct sockaddr;
  13. struct cgroup;
  14. struct sk_buff;
  15. struct bpf_map;
  16. struct bpf_prog;
  17. struct bpf_sock_ops_kern;
  18. struct bpf_cgroup_storage;
  19. struct ctl_table;
  20. struct ctl_table_header;
  21. struct task_struct;
  22. #ifdef CONFIG_CGROUP_BPF
  23. extern struct static_key_false cgroup_bpf_enabled_key;
  24. #define cgroup_bpf_enabled static_branch_unlikely(&cgroup_bpf_enabled_key)
  25. #define BPF_CGROUP_STORAGE_NEST_MAX 8
  26. struct bpf_cgroup_storage_info {
  27. struct task_struct *task;
  28. struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE];
  29. };
  30. /* For each cpu, permit maximum BPF_CGROUP_STORAGE_NEST_MAX number of tasks
  31. * to use bpf cgroup storage simultaneously.
  32. */
  33. DECLARE_PER_CPU(struct bpf_cgroup_storage_info,
  34. bpf_cgroup_storage_info[BPF_CGROUP_STORAGE_NEST_MAX]);
  35. #define for_each_cgroup_storage_type(stype) \
  36. for (stype = 0; stype < MAX_BPF_CGROUP_STORAGE_TYPE; stype++)
  37. struct bpf_cgroup_storage_map;
  38. struct bpf_storage_buffer {
  39. struct rcu_head rcu;
  40. char data[];
  41. };
  42. struct bpf_cgroup_storage {
  43. union {
  44. struct bpf_storage_buffer *buf;
  45. void __percpu *percpu_buf;
  46. };
  47. struct bpf_cgroup_storage_map *map;
  48. struct bpf_cgroup_storage_key key;
  49. struct list_head list_map;
  50. struct list_head list_cg;
  51. struct rb_node node;
  52. struct rcu_head rcu;
  53. };
  54. struct bpf_cgroup_link {
  55. struct bpf_link link;
  56. struct cgroup *cgroup;
  57. enum bpf_attach_type type;
  58. };
  59. struct bpf_prog_list {
  60. struct list_head node;
  61. struct bpf_prog *prog;
  62. struct bpf_cgroup_link *link;
  63. struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE];
  64. };
  65. struct bpf_prog_array;
  66. struct cgroup_bpf {
  67. /* array of effective progs in this cgroup */
  68. struct bpf_prog_array __rcu *effective[MAX_BPF_ATTACH_TYPE];
  69. /* attached progs to this cgroup and attach flags
  70. * when flags == 0 or BPF_F_ALLOW_OVERRIDE the progs list will
  71. * have either zero or one element
  72. * when BPF_F_ALLOW_MULTI the list can have up to BPF_CGROUP_MAX_PROGS
  73. */
  74. struct list_head progs[MAX_BPF_ATTACH_TYPE];
  75. u32 flags[MAX_BPF_ATTACH_TYPE];
  76. /* list of cgroup shared storages */
  77. struct list_head storages;
  78. /* temp storage for effective prog array used by prog_attach/detach */
  79. struct bpf_prog_array *inactive;
  80. /* reference counter used to detach bpf programs after cgroup removal */
  81. struct percpu_ref refcnt;
  82. /* cgroup_bpf is released using a work queue */
  83. struct work_struct release_work;
  84. };
  85. int cgroup_bpf_inherit(struct cgroup *cgrp);
  86. void cgroup_bpf_offline(struct cgroup *cgrp);
  87. int __cgroup_bpf_attach(struct cgroup *cgrp,
  88. struct bpf_prog *prog, struct bpf_prog *replace_prog,
  89. struct bpf_cgroup_link *link,
  90. enum bpf_attach_type type, u32 flags);
  91. int __cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog,
  92. struct bpf_cgroup_link *link,
  93. enum bpf_attach_type type);
  94. int __cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr,
  95. union bpf_attr __user *uattr);
  96. /* Wrapper for __cgroup_bpf_*() protected by cgroup_mutex */
  97. int cgroup_bpf_attach(struct cgroup *cgrp,
  98. struct bpf_prog *prog, struct bpf_prog *replace_prog,
  99. struct bpf_cgroup_link *link, enum bpf_attach_type type,
  100. u32 flags);
  101. int cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog,
  102. enum bpf_attach_type type);
  103. int cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr,
  104. union bpf_attr __user *uattr);
  105. int __cgroup_bpf_run_filter_skb(struct sock *sk,
  106. struct sk_buff *skb,
  107. enum bpf_attach_type type);
  108. int __cgroup_bpf_run_filter_sk(struct sock *sk,
  109. enum bpf_attach_type type);
  110. int __cgroup_bpf_run_filter_sock_addr(struct sock *sk,
  111. struct sockaddr *uaddr,
  112. enum bpf_attach_type type,
  113. void *t_ctx);
  114. int __cgroup_bpf_run_filter_sock_ops(struct sock *sk,
  115. struct bpf_sock_ops_kern *sock_ops,
  116. enum bpf_attach_type type);
  117. int __cgroup_bpf_check_dev_permission(short dev_type, u32 major, u32 minor,
  118. short access, enum bpf_attach_type type);
  119. int __cgroup_bpf_run_filter_sysctl(struct ctl_table_header *head,
  120. struct ctl_table *table, int write,
  121. char **buf, size_t *pcount, loff_t *ppos,
  122. enum bpf_attach_type type);
  123. int __cgroup_bpf_run_filter_setsockopt(struct sock *sock, int *level,
  124. int *optname, char __user *optval,
  125. int *optlen, char **kernel_optval);
  126. int __cgroup_bpf_run_filter_getsockopt(struct sock *sk, int level,
  127. int optname, char __user *optval,
  128. int __user *optlen, int max_optlen,
  129. int retval);
  130. static inline enum bpf_cgroup_storage_type cgroup_storage_type(
  131. struct bpf_map *map)
  132. {
  133. if (map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE)
  134. return BPF_CGROUP_STORAGE_PERCPU;
  135. return BPF_CGROUP_STORAGE_SHARED;
  136. }
  137. static inline int bpf_cgroup_storage_set(struct bpf_cgroup_storage
  138. *storage[MAX_BPF_CGROUP_STORAGE_TYPE])
  139. {
  140. enum bpf_cgroup_storage_type stype;
  141. int i, err = 0;
  142. preempt_disable();
  143. for (i = 0; i < BPF_CGROUP_STORAGE_NEST_MAX; i++) {
  144. if (unlikely(this_cpu_read(bpf_cgroup_storage_info[i].task) != NULL))
  145. continue;
  146. this_cpu_write(bpf_cgroup_storage_info[i].task, current);
  147. for_each_cgroup_storage_type(stype)
  148. this_cpu_write(bpf_cgroup_storage_info[i].storage[stype],
  149. storage[stype]);
  150. goto out;
  151. }
  152. err = -EBUSY;
  153. WARN_ON_ONCE(1);
  154. out:
  155. preempt_enable();
  156. return err;
  157. }
  158. static inline void bpf_cgroup_storage_unset(void)
  159. {
  160. int i;
  161. for (i = BPF_CGROUP_STORAGE_NEST_MAX - 1; i >= 0; i--) {
  162. if (likely(this_cpu_read(bpf_cgroup_storage_info[i].task) != current))
  163. continue;
  164. this_cpu_write(bpf_cgroup_storage_info[i].task, NULL);
  165. return;
  166. }
  167. }
  168. struct bpf_cgroup_storage *
  169. cgroup_storage_lookup(struct bpf_cgroup_storage_map *map,
  170. void *key, bool locked);
  171. struct bpf_cgroup_storage *bpf_cgroup_storage_alloc(struct bpf_prog *prog,
  172. enum bpf_cgroup_storage_type stype);
  173. void bpf_cgroup_storage_free(struct bpf_cgroup_storage *storage);
  174. void bpf_cgroup_storage_link(struct bpf_cgroup_storage *storage,
  175. struct cgroup *cgroup,
  176. enum bpf_attach_type type);
  177. void bpf_cgroup_storage_unlink(struct bpf_cgroup_storage *storage);
  178. int bpf_cgroup_storage_assign(struct bpf_prog_aux *aux, struct bpf_map *map);
  179. int bpf_percpu_cgroup_storage_copy(struct bpf_map *map, void *key, void *value);
  180. int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key,
  181. void *value, u64 flags);
  182. /* Wrappers for __cgroup_bpf_run_filter_skb() guarded by cgroup_bpf_enabled. */
  183. #define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk, skb) \
  184. ({ \
  185. int __ret = 0; \
  186. if (cgroup_bpf_enabled) \
  187. __ret = __cgroup_bpf_run_filter_skb(sk, skb, \
  188. BPF_CGROUP_INET_INGRESS); \
  189. \
  190. __ret; \
  191. })
  192. #define BPF_CGROUP_RUN_PROG_INET_EGRESS(sk, skb) \
  193. ({ \
  194. int __ret = 0; \
  195. if (cgroup_bpf_enabled && sk && sk == skb->sk) { \
  196. typeof(sk) __sk = sk_to_full_sk(sk); \
  197. if (sk_fullsock(__sk)) \
  198. __ret = __cgroup_bpf_run_filter_skb(__sk, skb, \
  199. BPF_CGROUP_INET_EGRESS); \
  200. } \
  201. __ret; \
  202. })
  203. #define BPF_CGROUP_RUN_SK_PROG(sk, type) \
  204. ({ \
  205. int __ret = 0; \
  206. if (cgroup_bpf_enabled) { \
  207. __ret = __cgroup_bpf_run_filter_sk(sk, type); \
  208. } \
  209. __ret; \
  210. })
  211. #define BPF_CGROUP_RUN_PROG_INET_SOCK(sk) \
  212. BPF_CGROUP_RUN_SK_PROG(sk, BPF_CGROUP_INET_SOCK_CREATE)
  213. #define BPF_CGROUP_RUN_PROG_INET_SOCK_RELEASE(sk) \
  214. BPF_CGROUP_RUN_SK_PROG(sk, BPF_CGROUP_INET_SOCK_RELEASE)
  215. #define BPF_CGROUP_RUN_PROG_INET4_POST_BIND(sk) \
  216. BPF_CGROUP_RUN_SK_PROG(sk, BPF_CGROUP_INET4_POST_BIND)
  217. #define BPF_CGROUP_RUN_PROG_INET6_POST_BIND(sk) \
  218. BPF_CGROUP_RUN_SK_PROG(sk, BPF_CGROUP_INET6_POST_BIND)
  219. #define BPF_CGROUP_RUN_SA_PROG(sk, uaddr, type) \
  220. ({ \
  221. int __ret = 0; \
  222. if (cgroup_bpf_enabled) \
  223. __ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, type, \
  224. NULL); \
  225. __ret; \
  226. })
  227. #define BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, type, t_ctx) \
  228. ({ \
  229. int __ret = 0; \
  230. if (cgroup_bpf_enabled) { \
  231. lock_sock(sk); \
  232. __ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, type, \
  233. t_ctx); \
  234. release_sock(sk); \
  235. } \
  236. __ret; \
  237. })
  238. #define BPF_CGROUP_RUN_PROG_INET4_BIND(sk, uaddr) \
  239. BPF_CGROUP_RUN_SA_PROG(sk, uaddr, BPF_CGROUP_INET4_BIND)
  240. #define BPF_CGROUP_RUN_PROG_INET6_BIND(sk, uaddr) \
  241. BPF_CGROUP_RUN_SA_PROG(sk, uaddr, BPF_CGROUP_INET6_BIND)
  242. #define BPF_CGROUP_PRE_CONNECT_ENABLED(sk) (cgroup_bpf_enabled && \
  243. sk->sk_prot->pre_connect)
  244. #define BPF_CGROUP_RUN_PROG_INET4_CONNECT(sk, uaddr) \
  245. BPF_CGROUP_RUN_SA_PROG(sk, uaddr, BPF_CGROUP_INET4_CONNECT)
  246. #define BPF_CGROUP_RUN_PROG_INET6_CONNECT(sk, uaddr) \
  247. BPF_CGROUP_RUN_SA_PROG(sk, uaddr, BPF_CGROUP_INET6_CONNECT)
  248. #define BPF_CGROUP_RUN_PROG_INET4_CONNECT_LOCK(sk, uaddr) \
  249. BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_INET4_CONNECT, NULL)
  250. #define BPF_CGROUP_RUN_PROG_INET6_CONNECT_LOCK(sk, uaddr) \
  251. BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_INET6_CONNECT, NULL)
  252. #define BPF_CGROUP_RUN_PROG_UDP4_SENDMSG_LOCK(sk, uaddr, t_ctx) \
  253. BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_UDP4_SENDMSG, t_ctx)
  254. #define BPF_CGROUP_RUN_PROG_UDP6_SENDMSG_LOCK(sk, uaddr, t_ctx) \
  255. BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_UDP6_SENDMSG, t_ctx)
  256. #define BPF_CGROUP_RUN_PROG_UDP4_RECVMSG_LOCK(sk, uaddr) \
  257. BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_UDP4_RECVMSG, NULL)
  258. #define BPF_CGROUP_RUN_PROG_UDP6_RECVMSG_LOCK(sk, uaddr) \
  259. BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_UDP6_RECVMSG, NULL)
  260. /* The SOCK_OPS"_SK" macro should be used when sock_ops->sk is not a
  261. * fullsock and its parent fullsock cannot be traced by
  262. * sk_to_full_sk().
  263. *
  264. * e.g. sock_ops->sk is a request_sock and it is under syncookie mode.
  265. * Its listener-sk is not attached to the rsk_listener.
  266. * In this case, the caller holds the listener-sk (unlocked),
  267. * set its sock_ops->sk to req_sk, and call this SOCK_OPS"_SK" with
  268. * the listener-sk such that the cgroup-bpf-progs of the
  269. * listener-sk will be run.
  270. *
  271. * Regardless of syncookie mode or not,
  272. * calling bpf_setsockopt on listener-sk will not make sense anyway,
  273. * so passing 'sock_ops->sk == req_sk' to the bpf prog is appropriate here.
  274. */
  275. #define BPF_CGROUP_RUN_PROG_SOCK_OPS_SK(sock_ops, sk) \
  276. ({ \
  277. int __ret = 0; \
  278. if (cgroup_bpf_enabled) \
  279. __ret = __cgroup_bpf_run_filter_sock_ops(sk, \
  280. sock_ops, \
  281. BPF_CGROUP_SOCK_OPS); \
  282. __ret; \
  283. })
  284. #define BPF_CGROUP_RUN_PROG_SOCK_OPS(sock_ops) \
  285. ({ \
  286. int __ret = 0; \
  287. if (cgroup_bpf_enabled && (sock_ops)->sk) { \
  288. typeof(sk) __sk = sk_to_full_sk((sock_ops)->sk); \
  289. if (__sk && sk_fullsock(__sk)) \
  290. __ret = __cgroup_bpf_run_filter_sock_ops(__sk, \
  291. sock_ops, \
  292. BPF_CGROUP_SOCK_OPS); \
  293. } \
  294. __ret; \
  295. })
  296. #define BPF_CGROUP_RUN_PROG_DEVICE_CGROUP(type, major, minor, access) \
  297. ({ \
  298. int __ret = 0; \
  299. if (cgroup_bpf_enabled) \
  300. __ret = __cgroup_bpf_check_dev_permission(type, major, minor, \
  301. access, \
  302. BPF_CGROUP_DEVICE); \
  303. \
  304. __ret; \
  305. })
  306. #define BPF_CGROUP_RUN_PROG_SYSCTL(head, table, write, buf, count, pos) \
  307. ({ \
  308. int __ret = 0; \
  309. if (cgroup_bpf_enabled) \
  310. __ret = __cgroup_bpf_run_filter_sysctl(head, table, write, \
  311. buf, count, pos, \
  312. BPF_CGROUP_SYSCTL); \
  313. __ret; \
  314. })
  315. #define BPF_CGROUP_RUN_PROG_SETSOCKOPT(sock, level, optname, optval, optlen, \
  316. kernel_optval) \
  317. ({ \
  318. int __ret = 0; \
  319. if (cgroup_bpf_enabled) \
  320. __ret = __cgroup_bpf_run_filter_setsockopt(sock, level, \
  321. optname, optval, \
  322. optlen, \
  323. kernel_optval); \
  324. __ret; \
  325. })
  326. #define BPF_CGROUP_GETSOCKOPT_MAX_OPTLEN(optlen) \
  327. ({ \
  328. int __ret = 0; \
  329. if (cgroup_bpf_enabled) \
  330. get_user(__ret, optlen); \
  331. __ret; \
  332. })
  333. #define BPF_CGROUP_RUN_PROG_GETSOCKOPT(sock, level, optname, optval, optlen, \
  334. max_optlen, retval) \
  335. ({ \
  336. int __ret = retval; \
  337. if (cgroup_bpf_enabled) \
  338. __ret = __cgroup_bpf_run_filter_getsockopt(sock, level, \
  339. optname, optval, \
  340. optlen, max_optlen, \
  341. retval); \
  342. __ret; \
  343. })
  344. int cgroup_bpf_prog_attach(const union bpf_attr *attr,
  345. enum bpf_prog_type ptype, struct bpf_prog *prog);
  346. int cgroup_bpf_prog_detach(const union bpf_attr *attr,
  347. enum bpf_prog_type ptype);
  348. int cgroup_bpf_link_attach(const union bpf_attr *attr, struct bpf_prog *prog);
  349. int cgroup_bpf_prog_query(const union bpf_attr *attr,
  350. union bpf_attr __user *uattr);
  351. #else
  352. struct bpf_prog;
  353. struct cgroup_bpf {};
  354. static inline int cgroup_bpf_inherit(struct cgroup *cgrp) { return 0; }
  355. static inline void cgroup_bpf_offline(struct cgroup *cgrp) {}
  356. static inline int cgroup_bpf_prog_attach(const union bpf_attr *attr,
  357. enum bpf_prog_type ptype,
  358. struct bpf_prog *prog)
  359. {
  360. return -EINVAL;
  361. }
  362. static inline int cgroup_bpf_prog_detach(const union bpf_attr *attr,
  363. enum bpf_prog_type ptype)
  364. {
  365. return -EINVAL;
  366. }
  367. static inline int cgroup_bpf_link_attach(const union bpf_attr *attr,
  368. struct bpf_prog *prog)
  369. {
  370. return -EINVAL;
  371. }
  372. static inline int cgroup_bpf_prog_query(const union bpf_attr *attr,
  373. union bpf_attr __user *uattr)
  374. {
  375. return -EINVAL;
  376. }
  377. static inline int bpf_cgroup_storage_set(
  378. struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE]) { return 0; }
  379. static inline void bpf_cgroup_storage_unset(void) {}
  380. static inline int bpf_cgroup_storage_assign(struct bpf_prog_aux *aux,
  381. struct bpf_map *map) { return 0; }
  382. static inline struct bpf_cgroup_storage *bpf_cgroup_storage_alloc(
  383. struct bpf_prog *prog, enum bpf_cgroup_storage_type stype) { return NULL; }
  384. static inline void bpf_cgroup_storage_free(
  385. struct bpf_cgroup_storage *storage) {}
  386. static inline int bpf_percpu_cgroup_storage_copy(struct bpf_map *map, void *key,
  387. void *value) {
  388. return 0;
  389. }
  390. static inline int bpf_percpu_cgroup_storage_update(struct bpf_map *map,
  391. void *key, void *value, u64 flags) {
  392. return 0;
  393. }
  394. #define cgroup_bpf_enabled (0)
  395. #define BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, type, t_ctx) ({ 0; })
  396. #define BPF_CGROUP_PRE_CONNECT_ENABLED(sk) (0)
  397. #define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk,skb) ({ 0; })
  398. #define BPF_CGROUP_RUN_PROG_INET_EGRESS(sk,skb) ({ 0; })
  399. #define BPF_CGROUP_RUN_PROG_INET_SOCK(sk) ({ 0; })
  400. #define BPF_CGROUP_RUN_PROG_INET_SOCK_RELEASE(sk) ({ 0; })
  401. #define BPF_CGROUP_RUN_PROG_INET4_BIND(sk, uaddr) ({ 0; })
  402. #define BPF_CGROUP_RUN_PROG_INET6_BIND(sk, uaddr) ({ 0; })
  403. #define BPF_CGROUP_RUN_PROG_INET4_POST_BIND(sk) ({ 0; })
  404. #define BPF_CGROUP_RUN_PROG_INET6_POST_BIND(sk) ({ 0; })
  405. #define BPF_CGROUP_RUN_PROG_INET4_CONNECT(sk, uaddr) ({ 0; })
  406. #define BPF_CGROUP_RUN_PROG_INET4_CONNECT_LOCK(sk, uaddr) ({ 0; })
  407. #define BPF_CGROUP_RUN_PROG_INET6_CONNECT(sk, uaddr) ({ 0; })
  408. #define BPF_CGROUP_RUN_PROG_INET6_CONNECT_LOCK(sk, uaddr) ({ 0; })
  409. #define BPF_CGROUP_RUN_PROG_UDP4_SENDMSG_LOCK(sk, uaddr, t_ctx) ({ 0; })
  410. #define BPF_CGROUP_RUN_PROG_UDP6_SENDMSG_LOCK(sk, uaddr, t_ctx) ({ 0; })
  411. #define BPF_CGROUP_RUN_PROG_UDP4_RECVMSG_LOCK(sk, uaddr) ({ 0; })
  412. #define BPF_CGROUP_RUN_PROG_UDP6_RECVMSG_LOCK(sk, uaddr) ({ 0; })
  413. #define BPF_CGROUP_RUN_PROG_SOCK_OPS(sock_ops) ({ 0; })
  414. #define BPF_CGROUP_RUN_PROG_DEVICE_CGROUP(type,major,minor,access) ({ 0; })
  415. #define BPF_CGROUP_RUN_PROG_SYSCTL(head,table,write,buf,count,pos) ({ 0; })
  416. #define BPF_CGROUP_GETSOCKOPT_MAX_OPTLEN(optlen) ({ 0; })
  417. #define BPF_CGROUP_RUN_PROG_GETSOCKOPT(sock, level, optname, optval, \
  418. optlen, max_optlen, retval) ({ retval; })
  419. #define BPF_CGROUP_RUN_PROG_SETSOCKOPT(sock, level, optname, optval, optlen, \
  420. kernel_optval) ({ 0; })
  421. #define for_each_cgroup_storage_type(stype) for (; false; )
  422. #endif /* CONFIG_CGROUP_BPF */
  423. #endif /* _BPF_CGROUP_H */