cgroup.c 48 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Functions to manage eBPF programs attached to cgroups
  4. *
  5. * Copyright (c) 2016 Daniel Mack
  6. */
  7. #include <linux/kernel.h>
  8. #include <linux/atomic.h>
  9. #include <linux/cgroup.h>
  10. #include <linux/filter.h>
  11. #include <linux/slab.h>
  12. #include <linux/sysctl.h>
  13. #include <linux/string.h>
  14. #include <linux/bpf.h>
  15. #include <linux/bpf-cgroup.h>
  16. #include <net/sock.h>
  17. #include <net/bpf_sk_storage.h>
  18. #include "../cgroup/cgroup-internal.h"
  19. DEFINE_STATIC_KEY_FALSE(cgroup_bpf_enabled_key);
  20. EXPORT_SYMBOL(cgroup_bpf_enabled_key);
  21. void cgroup_bpf_offline(struct cgroup *cgrp)
  22. {
  23. cgroup_get(cgrp);
  24. percpu_ref_kill(&cgrp->bpf.refcnt);
  25. }
  26. static void bpf_cgroup_storages_free(struct bpf_cgroup_storage *storages[])
  27. {
  28. enum bpf_cgroup_storage_type stype;
  29. for_each_cgroup_storage_type(stype)
  30. bpf_cgroup_storage_free(storages[stype]);
  31. }
  32. static int bpf_cgroup_storages_alloc(struct bpf_cgroup_storage *storages[],
  33. struct bpf_cgroup_storage *new_storages[],
  34. enum bpf_attach_type type,
  35. struct bpf_prog *prog,
  36. struct cgroup *cgrp)
  37. {
  38. enum bpf_cgroup_storage_type stype;
  39. struct bpf_cgroup_storage_key key;
  40. struct bpf_map *map;
  41. key.cgroup_inode_id = cgroup_id(cgrp);
  42. key.attach_type = type;
  43. for_each_cgroup_storage_type(stype) {
  44. map = prog->aux->cgroup_storage[stype];
  45. if (!map)
  46. continue;
  47. storages[stype] = cgroup_storage_lookup((void *)map, &key, false);
  48. if (storages[stype])
  49. continue;
  50. storages[stype] = bpf_cgroup_storage_alloc(prog, stype);
  51. if (IS_ERR(storages[stype])) {
  52. bpf_cgroup_storages_free(new_storages);
  53. return -ENOMEM;
  54. }
  55. new_storages[stype] = storages[stype];
  56. }
  57. return 0;
  58. }
  59. static void bpf_cgroup_storages_assign(struct bpf_cgroup_storage *dst[],
  60. struct bpf_cgroup_storage *src[])
  61. {
  62. enum bpf_cgroup_storage_type stype;
  63. for_each_cgroup_storage_type(stype)
  64. dst[stype] = src[stype];
  65. }
  66. static void bpf_cgroup_storages_link(struct bpf_cgroup_storage *storages[],
  67. struct cgroup *cgrp,
  68. enum bpf_attach_type attach_type)
  69. {
  70. enum bpf_cgroup_storage_type stype;
  71. for_each_cgroup_storage_type(stype)
  72. bpf_cgroup_storage_link(storages[stype], cgrp, attach_type);
  73. }
  74. /* Called when bpf_cgroup_link is auto-detached from dying cgroup.
  75. * It drops cgroup and bpf_prog refcounts, and marks bpf_link as defunct. It
  76. * doesn't free link memory, which will eventually be done by bpf_link's
  77. * release() callback, when its last FD is closed.
  78. */
  79. static void bpf_cgroup_link_auto_detach(struct bpf_cgroup_link *link)
  80. {
  81. cgroup_put(link->cgroup);
  82. link->cgroup = NULL;
  83. }
  84. /**
  85. * cgroup_bpf_release() - put references of all bpf programs and
  86. * release all cgroup bpf data
  87. * @work: work structure embedded into the cgroup to modify
  88. */
  89. static void cgroup_bpf_release(struct work_struct *work)
  90. {
  91. struct cgroup *p, *cgrp = container_of(work, struct cgroup,
  92. bpf.release_work);
  93. struct bpf_prog_array *old_array;
  94. struct list_head *storages = &cgrp->bpf.storages;
  95. struct bpf_cgroup_storage *storage, *stmp;
  96. unsigned int type;
  97. mutex_lock(&cgroup_mutex);
  98. for (type = 0; type < ARRAY_SIZE(cgrp->bpf.progs); type++) {
  99. struct list_head *progs = &cgrp->bpf.progs[type];
  100. struct bpf_prog_list *pl, *pltmp;
  101. list_for_each_entry_safe(pl, pltmp, progs, node) {
  102. list_del(&pl->node);
  103. if (pl->prog)
  104. bpf_prog_put(pl->prog);
  105. if (pl->link)
  106. bpf_cgroup_link_auto_detach(pl->link);
  107. kfree(pl);
  108. static_branch_dec(&cgroup_bpf_enabled_key);
  109. }
  110. old_array = rcu_dereference_protected(
  111. cgrp->bpf.effective[type],
  112. lockdep_is_held(&cgroup_mutex));
  113. bpf_prog_array_free(old_array);
  114. }
  115. list_for_each_entry_safe(storage, stmp, storages, list_cg) {
  116. bpf_cgroup_storage_unlink(storage);
  117. bpf_cgroup_storage_free(storage);
  118. }
  119. mutex_unlock(&cgroup_mutex);
  120. for (p = cgroup_parent(cgrp); p; p = cgroup_parent(p))
  121. cgroup_bpf_put(p);
  122. percpu_ref_exit(&cgrp->bpf.refcnt);
  123. cgroup_put(cgrp);
  124. }
  125. /**
  126. * cgroup_bpf_release_fn() - callback used to schedule releasing
  127. * of bpf cgroup data
  128. * @ref: percpu ref counter structure
  129. */
  130. static void cgroup_bpf_release_fn(struct percpu_ref *ref)
  131. {
  132. struct cgroup *cgrp = container_of(ref, struct cgroup, bpf.refcnt);
  133. INIT_WORK(&cgrp->bpf.release_work, cgroup_bpf_release);
  134. queue_work(system_wq, &cgrp->bpf.release_work);
  135. }
  136. /* Get underlying bpf_prog of bpf_prog_list entry, regardless if it's through
  137. * link or direct prog.
  138. */
  139. static struct bpf_prog *prog_list_prog(struct bpf_prog_list *pl)
  140. {
  141. if (pl->prog)
  142. return pl->prog;
  143. if (pl->link)
  144. return pl->link->link.prog;
  145. return NULL;
  146. }
  147. /* count number of elements in the list.
  148. * it's slow but the list cannot be long
  149. */
  150. static u32 prog_list_length(struct list_head *head)
  151. {
  152. struct bpf_prog_list *pl;
  153. u32 cnt = 0;
  154. list_for_each_entry(pl, head, node) {
  155. if (!prog_list_prog(pl))
  156. continue;
  157. cnt++;
  158. }
  159. return cnt;
  160. }
  161. /* if parent has non-overridable prog attached,
  162. * disallow attaching new programs to the descendent cgroup.
  163. * if parent has overridable or multi-prog, allow attaching
  164. */
  165. static bool hierarchy_allows_attach(struct cgroup *cgrp,
  166. enum bpf_attach_type type)
  167. {
  168. struct cgroup *p;
  169. p = cgroup_parent(cgrp);
  170. if (!p)
  171. return true;
  172. do {
  173. u32 flags = p->bpf.flags[type];
  174. u32 cnt;
  175. if (flags & BPF_F_ALLOW_MULTI)
  176. return true;
  177. cnt = prog_list_length(&p->bpf.progs[type]);
  178. WARN_ON_ONCE(cnt > 1);
  179. if (cnt == 1)
  180. return !!(flags & BPF_F_ALLOW_OVERRIDE);
  181. p = cgroup_parent(p);
  182. } while (p);
  183. return true;
  184. }
  185. /* compute a chain of effective programs for a given cgroup:
  186. * start from the list of programs in this cgroup and add
  187. * all parent programs.
  188. * Note that parent's F_ALLOW_OVERRIDE-type program is yielding
  189. * to programs in this cgroup
  190. */
  191. static int compute_effective_progs(struct cgroup *cgrp,
  192. enum bpf_attach_type type,
  193. struct bpf_prog_array **array)
  194. {
  195. struct bpf_prog_array_item *item;
  196. struct bpf_prog_array *progs;
  197. struct bpf_prog_list *pl;
  198. struct cgroup *p = cgrp;
  199. int cnt = 0;
  200. /* count number of effective programs by walking parents */
  201. do {
  202. if (cnt == 0 || (p->bpf.flags[type] & BPF_F_ALLOW_MULTI))
  203. cnt += prog_list_length(&p->bpf.progs[type]);
  204. p = cgroup_parent(p);
  205. } while (p);
  206. progs = bpf_prog_array_alloc(cnt, GFP_KERNEL);
  207. if (!progs)
  208. return -ENOMEM;
  209. /* populate the array with effective progs */
  210. cnt = 0;
  211. p = cgrp;
  212. do {
  213. if (cnt > 0 && !(p->bpf.flags[type] & BPF_F_ALLOW_MULTI))
  214. continue;
  215. list_for_each_entry(pl, &p->bpf.progs[type], node) {
  216. if (!prog_list_prog(pl))
  217. continue;
  218. item = &progs->items[cnt];
  219. item->prog = prog_list_prog(pl);
  220. bpf_cgroup_storages_assign(item->cgroup_storage,
  221. pl->storage);
  222. cnt++;
  223. }
  224. } while ((p = cgroup_parent(p)));
  225. *array = progs;
  226. return 0;
  227. }
  228. static void activate_effective_progs(struct cgroup *cgrp,
  229. enum bpf_attach_type type,
  230. struct bpf_prog_array *old_array)
  231. {
  232. old_array = rcu_replace_pointer(cgrp->bpf.effective[type], old_array,
  233. lockdep_is_held(&cgroup_mutex));
  234. /* free prog array after grace period, since __cgroup_bpf_run_*()
  235. * might be still walking the array
  236. */
  237. bpf_prog_array_free(old_array);
  238. }
  239. /**
  240. * cgroup_bpf_inherit() - inherit effective programs from parent
  241. * @cgrp: the cgroup to modify
  242. */
  243. int cgroup_bpf_inherit(struct cgroup *cgrp)
  244. {
  245. /* has to use marco instead of const int, since compiler thinks
  246. * that array below is variable length
  247. */
  248. #define NR ARRAY_SIZE(cgrp->bpf.effective)
  249. struct bpf_prog_array *arrays[NR] = {};
  250. struct cgroup *p;
  251. int ret, i;
  252. ret = percpu_ref_init(&cgrp->bpf.refcnt, cgroup_bpf_release_fn, 0,
  253. GFP_KERNEL);
  254. if (ret)
  255. return ret;
  256. for (p = cgroup_parent(cgrp); p; p = cgroup_parent(p))
  257. cgroup_bpf_get(p);
  258. for (i = 0; i < NR; i++)
  259. INIT_LIST_HEAD(&cgrp->bpf.progs[i]);
  260. INIT_LIST_HEAD(&cgrp->bpf.storages);
  261. for (i = 0; i < NR; i++)
  262. if (compute_effective_progs(cgrp, i, &arrays[i]))
  263. goto cleanup;
  264. for (i = 0; i < NR; i++)
  265. activate_effective_progs(cgrp, i, arrays[i]);
  266. return 0;
  267. cleanup:
  268. for (i = 0; i < NR; i++)
  269. bpf_prog_array_free(arrays[i]);
  270. for (p = cgroup_parent(cgrp); p; p = cgroup_parent(p))
  271. cgroup_bpf_put(p);
  272. percpu_ref_exit(&cgrp->bpf.refcnt);
  273. return -ENOMEM;
  274. }
  275. static int update_effective_progs(struct cgroup *cgrp,
  276. enum bpf_attach_type type)
  277. {
  278. struct cgroup_subsys_state *css;
  279. int err;
  280. /* allocate and recompute effective prog arrays */
  281. css_for_each_descendant_pre(css, &cgrp->self) {
  282. struct cgroup *desc = container_of(css, struct cgroup, self);
  283. if (percpu_ref_is_zero(&desc->bpf.refcnt))
  284. continue;
  285. err = compute_effective_progs(desc, type, &desc->bpf.inactive);
  286. if (err)
  287. goto cleanup;
  288. }
  289. /* all allocations were successful. Activate all prog arrays */
  290. css_for_each_descendant_pre(css, &cgrp->self) {
  291. struct cgroup *desc = container_of(css, struct cgroup, self);
  292. if (percpu_ref_is_zero(&desc->bpf.refcnt)) {
  293. if (unlikely(desc->bpf.inactive)) {
  294. bpf_prog_array_free(desc->bpf.inactive);
  295. desc->bpf.inactive = NULL;
  296. }
  297. continue;
  298. }
  299. activate_effective_progs(desc, type, desc->bpf.inactive);
  300. desc->bpf.inactive = NULL;
  301. }
  302. return 0;
  303. cleanup:
  304. /* oom while computing effective. Free all computed effective arrays
  305. * since they were not activated
  306. */
  307. css_for_each_descendant_pre(css, &cgrp->self) {
  308. struct cgroup *desc = container_of(css, struct cgroup, self);
  309. bpf_prog_array_free(desc->bpf.inactive);
  310. desc->bpf.inactive = NULL;
  311. }
  312. return err;
  313. }
  314. #define BPF_CGROUP_MAX_PROGS 64
  315. static struct bpf_prog_list *find_attach_entry(struct list_head *progs,
  316. struct bpf_prog *prog,
  317. struct bpf_cgroup_link *link,
  318. struct bpf_prog *replace_prog,
  319. bool allow_multi)
  320. {
  321. struct bpf_prog_list *pl;
  322. /* single-attach case */
  323. if (!allow_multi) {
  324. if (list_empty(progs))
  325. return NULL;
  326. return list_first_entry(progs, typeof(*pl), node);
  327. }
  328. list_for_each_entry(pl, progs, node) {
  329. if (prog && pl->prog == prog && prog != replace_prog)
  330. /* disallow attaching the same prog twice */
  331. return ERR_PTR(-EINVAL);
  332. if (link && pl->link == link)
  333. /* disallow attaching the same link twice */
  334. return ERR_PTR(-EINVAL);
  335. }
  336. /* direct prog multi-attach w/ replacement case */
  337. if (replace_prog) {
  338. list_for_each_entry(pl, progs, node) {
  339. if (pl->prog == replace_prog)
  340. /* a match found */
  341. return pl;
  342. }
  343. /* prog to replace not found for cgroup */
  344. return ERR_PTR(-ENOENT);
  345. }
  346. return NULL;
  347. }
  348. /**
  349. * __cgroup_bpf_attach() - Attach the program or the link to a cgroup, and
  350. * propagate the change to descendants
  351. * @cgrp: The cgroup which descendants to traverse
  352. * @prog: A program to attach
  353. * @link: A link to attach
  354. * @replace_prog: Previously attached program to replace if BPF_F_REPLACE is set
  355. * @type: Type of attach operation
  356. * @flags: Option flags
  357. *
  358. * Exactly one of @prog or @link can be non-null.
  359. * Must be called with cgroup_mutex held.
  360. */
  361. int __cgroup_bpf_attach(struct cgroup *cgrp,
  362. struct bpf_prog *prog, struct bpf_prog *replace_prog,
  363. struct bpf_cgroup_link *link,
  364. enum bpf_attach_type type, u32 flags)
  365. {
  366. u32 saved_flags = (flags & (BPF_F_ALLOW_OVERRIDE | BPF_F_ALLOW_MULTI));
  367. struct list_head *progs = &cgrp->bpf.progs[type];
  368. struct bpf_prog *old_prog = NULL;
  369. struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE] = {};
  370. struct bpf_cgroup_storage *new_storage[MAX_BPF_CGROUP_STORAGE_TYPE] = {};
  371. struct bpf_prog_list *pl;
  372. int err;
  373. if (((flags & BPF_F_ALLOW_OVERRIDE) && (flags & BPF_F_ALLOW_MULTI)) ||
  374. ((flags & BPF_F_REPLACE) && !(flags & BPF_F_ALLOW_MULTI)))
  375. /* invalid combination */
  376. return -EINVAL;
  377. if (link && (prog || replace_prog))
  378. /* only either link or prog/replace_prog can be specified */
  379. return -EINVAL;
  380. if (!!replace_prog != !!(flags & BPF_F_REPLACE))
  381. /* replace_prog implies BPF_F_REPLACE, and vice versa */
  382. return -EINVAL;
  383. if (!hierarchy_allows_attach(cgrp, type))
  384. return -EPERM;
  385. if (!list_empty(progs) && cgrp->bpf.flags[type] != saved_flags)
  386. /* Disallow attaching non-overridable on top
  387. * of existing overridable in this cgroup.
  388. * Disallow attaching multi-prog if overridable or none
  389. */
  390. return -EPERM;
  391. if (prog_list_length(progs) >= BPF_CGROUP_MAX_PROGS)
  392. return -E2BIG;
  393. pl = find_attach_entry(progs, prog, link, replace_prog,
  394. flags & BPF_F_ALLOW_MULTI);
  395. if (IS_ERR(pl))
  396. return PTR_ERR(pl);
  397. if (bpf_cgroup_storages_alloc(storage, new_storage, type,
  398. prog ? : link->link.prog, cgrp))
  399. return -ENOMEM;
  400. if (pl) {
  401. old_prog = pl->prog;
  402. } else {
  403. pl = kmalloc(sizeof(*pl), GFP_KERNEL);
  404. if (!pl) {
  405. bpf_cgroup_storages_free(new_storage);
  406. return -ENOMEM;
  407. }
  408. list_add_tail(&pl->node, progs);
  409. }
  410. pl->prog = prog;
  411. pl->link = link;
  412. bpf_cgroup_storages_assign(pl->storage, storage);
  413. cgrp->bpf.flags[type] = saved_flags;
  414. err = update_effective_progs(cgrp, type);
  415. if (err)
  416. goto cleanup;
  417. if (old_prog)
  418. bpf_prog_put(old_prog);
  419. else
  420. static_branch_inc(&cgroup_bpf_enabled_key);
  421. bpf_cgroup_storages_link(new_storage, cgrp, type);
  422. return 0;
  423. cleanup:
  424. if (old_prog) {
  425. pl->prog = old_prog;
  426. pl->link = NULL;
  427. }
  428. bpf_cgroup_storages_free(new_storage);
  429. if (!old_prog) {
  430. list_del(&pl->node);
  431. kfree(pl);
  432. }
  433. return err;
  434. }
  435. /* Swap updated BPF program for given link in effective program arrays across
  436. * all descendant cgroups. This function is guaranteed to succeed.
  437. */
  438. static void replace_effective_prog(struct cgroup *cgrp,
  439. enum bpf_attach_type type,
  440. struct bpf_cgroup_link *link)
  441. {
  442. struct bpf_prog_array_item *item;
  443. struct cgroup_subsys_state *css;
  444. struct bpf_prog_array *progs;
  445. struct bpf_prog_list *pl;
  446. struct list_head *head;
  447. struct cgroup *cg;
  448. int pos;
  449. css_for_each_descendant_pre(css, &cgrp->self) {
  450. struct cgroup *desc = container_of(css, struct cgroup, self);
  451. if (percpu_ref_is_zero(&desc->bpf.refcnt))
  452. continue;
  453. /* find position of link in effective progs array */
  454. for (pos = 0, cg = desc; cg; cg = cgroup_parent(cg)) {
  455. if (pos && !(cg->bpf.flags[type] & BPF_F_ALLOW_MULTI))
  456. continue;
  457. head = &cg->bpf.progs[type];
  458. list_for_each_entry(pl, head, node) {
  459. if (!prog_list_prog(pl))
  460. continue;
  461. if (pl->link == link)
  462. goto found;
  463. pos++;
  464. }
  465. }
  466. found:
  467. BUG_ON(!cg);
  468. progs = rcu_dereference_protected(
  469. desc->bpf.effective[type],
  470. lockdep_is_held(&cgroup_mutex));
  471. item = &progs->items[pos];
  472. WRITE_ONCE(item->prog, link->link.prog);
  473. }
  474. }
  475. /**
  476. * __cgroup_bpf_replace() - Replace link's program and propagate the change
  477. * to descendants
  478. * @cgrp: The cgroup which descendants to traverse
  479. * @link: A link for which to replace BPF program
  480. * @type: Type of attach operation
  481. *
  482. * Must be called with cgroup_mutex held.
  483. */
  484. static int __cgroup_bpf_replace(struct cgroup *cgrp,
  485. struct bpf_cgroup_link *link,
  486. struct bpf_prog *new_prog)
  487. {
  488. struct list_head *progs = &cgrp->bpf.progs[link->type];
  489. struct bpf_prog *old_prog;
  490. struct bpf_prog_list *pl;
  491. bool found = false;
  492. if (link->link.prog->type != new_prog->type)
  493. return -EINVAL;
  494. list_for_each_entry(pl, progs, node) {
  495. if (pl->link == link) {
  496. found = true;
  497. break;
  498. }
  499. }
  500. if (!found)
  501. return -ENOENT;
  502. old_prog = xchg(&link->link.prog, new_prog);
  503. replace_effective_prog(cgrp, link->type, link);
  504. bpf_prog_put(old_prog);
  505. return 0;
  506. }
  507. static int cgroup_bpf_replace(struct bpf_link *link, struct bpf_prog *new_prog,
  508. struct bpf_prog *old_prog)
  509. {
  510. struct bpf_cgroup_link *cg_link;
  511. int ret;
  512. cg_link = container_of(link, struct bpf_cgroup_link, link);
  513. mutex_lock(&cgroup_mutex);
  514. /* link might have been auto-released by dying cgroup, so fail */
  515. if (!cg_link->cgroup) {
  516. ret = -ENOLINK;
  517. goto out_unlock;
  518. }
  519. if (old_prog && link->prog != old_prog) {
  520. ret = -EPERM;
  521. goto out_unlock;
  522. }
  523. ret = __cgroup_bpf_replace(cg_link->cgroup, cg_link, new_prog);
  524. out_unlock:
  525. mutex_unlock(&cgroup_mutex);
  526. return ret;
  527. }
  528. static struct bpf_prog_list *find_detach_entry(struct list_head *progs,
  529. struct bpf_prog *prog,
  530. struct bpf_cgroup_link *link,
  531. bool allow_multi)
  532. {
  533. struct bpf_prog_list *pl;
  534. if (!allow_multi) {
  535. if (list_empty(progs))
  536. /* report error when trying to detach and nothing is attached */
  537. return ERR_PTR(-ENOENT);
  538. /* to maintain backward compatibility NONE and OVERRIDE cgroups
  539. * allow detaching with invalid FD (prog==NULL) in legacy mode
  540. */
  541. return list_first_entry(progs, typeof(*pl), node);
  542. }
  543. if (!prog && !link)
  544. /* to detach MULTI prog the user has to specify valid FD
  545. * of the program or link to be detached
  546. */
  547. return ERR_PTR(-EINVAL);
  548. /* find the prog or link and detach it */
  549. list_for_each_entry(pl, progs, node) {
  550. if (pl->prog == prog && pl->link == link)
  551. return pl;
  552. }
  553. return ERR_PTR(-ENOENT);
  554. }
  555. /**
  556. * __cgroup_bpf_detach() - Detach the program or link from a cgroup, and
  557. * propagate the change to descendants
  558. * @cgrp: The cgroup which descendants to traverse
  559. * @prog: A program to detach or NULL
  560. * @prog: A link to detach or NULL
  561. * @type: Type of detach operation
  562. *
  563. * At most one of @prog or @link can be non-NULL.
  564. * Must be called with cgroup_mutex held.
  565. */
  566. int __cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog,
  567. struct bpf_cgroup_link *link, enum bpf_attach_type type)
  568. {
  569. struct list_head *progs = &cgrp->bpf.progs[type];
  570. u32 flags = cgrp->bpf.flags[type];
  571. struct bpf_prog_list *pl;
  572. struct bpf_prog *old_prog;
  573. int err;
  574. if (prog && link)
  575. /* only one of prog or link can be specified */
  576. return -EINVAL;
  577. pl = find_detach_entry(progs, prog, link, flags & BPF_F_ALLOW_MULTI);
  578. if (IS_ERR(pl))
  579. return PTR_ERR(pl);
  580. /* mark it deleted, so it's ignored while recomputing effective */
  581. old_prog = pl->prog;
  582. pl->prog = NULL;
  583. pl->link = NULL;
  584. err = update_effective_progs(cgrp, type);
  585. if (err)
  586. goto cleanup;
  587. /* now can actually delete it from this cgroup list */
  588. list_del(&pl->node);
  589. kfree(pl);
  590. if (list_empty(progs))
  591. /* last program was detached, reset flags to zero */
  592. cgrp->bpf.flags[type] = 0;
  593. if (old_prog)
  594. bpf_prog_put(old_prog);
  595. static_branch_dec(&cgroup_bpf_enabled_key);
  596. return 0;
  597. cleanup:
  598. /* restore back prog or link */
  599. pl->prog = old_prog;
  600. pl->link = link;
  601. return err;
  602. }
  603. /* Must be called with cgroup_mutex held to avoid races. */
  604. int __cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr,
  605. union bpf_attr __user *uattr)
  606. {
  607. __u32 __user *prog_ids = u64_to_user_ptr(attr->query.prog_ids);
  608. enum bpf_attach_type type = attr->query.attach_type;
  609. struct list_head *progs = &cgrp->bpf.progs[type];
  610. u32 flags = cgrp->bpf.flags[type];
  611. struct bpf_prog_array *effective;
  612. struct bpf_prog *prog;
  613. int cnt, ret = 0, i;
  614. effective = rcu_dereference_protected(cgrp->bpf.effective[type],
  615. lockdep_is_held(&cgroup_mutex));
  616. if (attr->query.query_flags & BPF_F_QUERY_EFFECTIVE)
  617. cnt = bpf_prog_array_length(effective);
  618. else
  619. cnt = prog_list_length(progs);
  620. if (copy_to_user(&uattr->query.attach_flags, &flags, sizeof(flags)))
  621. return -EFAULT;
  622. if (copy_to_user(&uattr->query.prog_cnt, &cnt, sizeof(cnt)))
  623. return -EFAULT;
  624. if (attr->query.prog_cnt == 0 || !prog_ids || !cnt)
  625. /* return early if user requested only program count + flags */
  626. return 0;
  627. if (attr->query.prog_cnt < cnt) {
  628. cnt = attr->query.prog_cnt;
  629. ret = -ENOSPC;
  630. }
  631. if (attr->query.query_flags & BPF_F_QUERY_EFFECTIVE) {
  632. return bpf_prog_array_copy_to_user(effective, prog_ids, cnt);
  633. } else {
  634. struct bpf_prog_list *pl;
  635. u32 id;
  636. i = 0;
  637. list_for_each_entry(pl, progs, node) {
  638. prog = prog_list_prog(pl);
  639. id = prog->aux->id;
  640. if (copy_to_user(prog_ids + i, &id, sizeof(id)))
  641. return -EFAULT;
  642. if (++i == cnt)
  643. break;
  644. }
  645. }
  646. return ret;
  647. }
  648. int cgroup_bpf_prog_attach(const union bpf_attr *attr,
  649. enum bpf_prog_type ptype, struct bpf_prog *prog)
  650. {
  651. struct bpf_prog *replace_prog = NULL;
  652. struct cgroup *cgrp;
  653. int ret;
  654. cgrp = cgroup_get_from_fd(attr->target_fd);
  655. if (IS_ERR(cgrp))
  656. return PTR_ERR(cgrp);
  657. if ((attr->attach_flags & BPF_F_ALLOW_MULTI) &&
  658. (attr->attach_flags & BPF_F_REPLACE)) {
  659. replace_prog = bpf_prog_get_type(attr->replace_bpf_fd, ptype);
  660. if (IS_ERR(replace_prog)) {
  661. cgroup_put(cgrp);
  662. return PTR_ERR(replace_prog);
  663. }
  664. }
  665. ret = cgroup_bpf_attach(cgrp, prog, replace_prog, NULL,
  666. attr->attach_type, attr->attach_flags);
  667. if (replace_prog)
  668. bpf_prog_put(replace_prog);
  669. cgroup_put(cgrp);
  670. return ret;
  671. }
  672. int cgroup_bpf_prog_detach(const union bpf_attr *attr, enum bpf_prog_type ptype)
  673. {
  674. struct bpf_prog *prog;
  675. struct cgroup *cgrp;
  676. int ret;
  677. cgrp = cgroup_get_from_fd(attr->target_fd);
  678. if (IS_ERR(cgrp))
  679. return PTR_ERR(cgrp);
  680. prog = bpf_prog_get_type(attr->attach_bpf_fd, ptype);
  681. if (IS_ERR(prog))
  682. prog = NULL;
  683. ret = cgroup_bpf_detach(cgrp, prog, attr->attach_type);
  684. if (prog)
  685. bpf_prog_put(prog);
  686. cgroup_put(cgrp);
  687. return ret;
  688. }
  689. static void bpf_cgroup_link_release(struct bpf_link *link)
  690. {
  691. struct bpf_cgroup_link *cg_link =
  692. container_of(link, struct bpf_cgroup_link, link);
  693. struct cgroup *cg;
  694. /* link might have been auto-detached by dying cgroup already,
  695. * in that case our work is done here
  696. */
  697. if (!cg_link->cgroup)
  698. return;
  699. mutex_lock(&cgroup_mutex);
  700. /* re-check cgroup under lock again */
  701. if (!cg_link->cgroup) {
  702. mutex_unlock(&cgroup_mutex);
  703. return;
  704. }
  705. WARN_ON(__cgroup_bpf_detach(cg_link->cgroup, NULL, cg_link,
  706. cg_link->type));
  707. cg = cg_link->cgroup;
  708. cg_link->cgroup = NULL;
  709. mutex_unlock(&cgroup_mutex);
  710. cgroup_put(cg);
  711. }
  712. static void bpf_cgroup_link_dealloc(struct bpf_link *link)
  713. {
  714. struct bpf_cgroup_link *cg_link =
  715. container_of(link, struct bpf_cgroup_link, link);
  716. kfree(cg_link);
  717. }
  718. static int bpf_cgroup_link_detach(struct bpf_link *link)
  719. {
  720. bpf_cgroup_link_release(link);
  721. return 0;
  722. }
  723. static void bpf_cgroup_link_show_fdinfo(const struct bpf_link *link,
  724. struct seq_file *seq)
  725. {
  726. struct bpf_cgroup_link *cg_link =
  727. container_of(link, struct bpf_cgroup_link, link);
  728. u64 cg_id = 0;
  729. mutex_lock(&cgroup_mutex);
  730. if (cg_link->cgroup)
  731. cg_id = cgroup_id(cg_link->cgroup);
  732. mutex_unlock(&cgroup_mutex);
  733. seq_printf(seq,
  734. "cgroup_id:\t%llu\n"
  735. "attach_type:\t%d\n",
  736. cg_id,
  737. cg_link->type);
  738. }
  739. static int bpf_cgroup_link_fill_link_info(const struct bpf_link *link,
  740. struct bpf_link_info *info)
  741. {
  742. struct bpf_cgroup_link *cg_link =
  743. container_of(link, struct bpf_cgroup_link, link);
  744. u64 cg_id = 0;
  745. mutex_lock(&cgroup_mutex);
  746. if (cg_link->cgroup)
  747. cg_id = cgroup_id(cg_link->cgroup);
  748. mutex_unlock(&cgroup_mutex);
  749. info->cgroup.cgroup_id = cg_id;
  750. info->cgroup.attach_type = cg_link->type;
  751. return 0;
  752. }
  753. static const struct bpf_link_ops bpf_cgroup_link_lops = {
  754. .release = bpf_cgroup_link_release,
  755. .dealloc = bpf_cgroup_link_dealloc,
  756. .detach = bpf_cgroup_link_detach,
  757. .update_prog = cgroup_bpf_replace,
  758. .show_fdinfo = bpf_cgroup_link_show_fdinfo,
  759. .fill_link_info = bpf_cgroup_link_fill_link_info,
  760. };
  761. int cgroup_bpf_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
  762. {
  763. struct bpf_link_primer link_primer;
  764. struct bpf_cgroup_link *link;
  765. struct cgroup *cgrp;
  766. int err;
  767. if (attr->link_create.flags)
  768. return -EINVAL;
  769. cgrp = cgroup_get_from_fd(attr->link_create.target_fd);
  770. if (IS_ERR(cgrp))
  771. return PTR_ERR(cgrp);
  772. link = kzalloc(sizeof(*link), GFP_USER);
  773. if (!link) {
  774. err = -ENOMEM;
  775. goto out_put_cgroup;
  776. }
  777. bpf_link_init(&link->link, BPF_LINK_TYPE_CGROUP, &bpf_cgroup_link_lops,
  778. prog);
  779. link->cgroup = cgrp;
  780. link->type = attr->link_create.attach_type;
  781. err = bpf_link_prime(&link->link, &link_primer);
  782. if (err) {
  783. kfree(link);
  784. goto out_put_cgroup;
  785. }
  786. err = cgroup_bpf_attach(cgrp, NULL, NULL, link, link->type,
  787. BPF_F_ALLOW_MULTI);
  788. if (err) {
  789. bpf_link_cleanup(&link_primer);
  790. goto out_put_cgroup;
  791. }
  792. return bpf_link_settle(&link_primer);
  793. out_put_cgroup:
  794. cgroup_put(cgrp);
  795. return err;
  796. }
  797. int cgroup_bpf_prog_query(const union bpf_attr *attr,
  798. union bpf_attr __user *uattr)
  799. {
  800. struct cgroup *cgrp;
  801. int ret;
  802. cgrp = cgroup_get_from_fd(attr->query.target_fd);
  803. if (IS_ERR(cgrp))
  804. return PTR_ERR(cgrp);
  805. ret = cgroup_bpf_query(cgrp, attr, uattr);
  806. cgroup_put(cgrp);
  807. return ret;
  808. }
  809. /**
  810. * __cgroup_bpf_run_filter_skb() - Run a program for packet filtering
  811. * @sk: The socket sending or receiving traffic
  812. * @skb: The skb that is being sent or received
  813. * @type: The type of program to be exectuted
  814. *
  815. * If no socket is passed, or the socket is not of type INET or INET6,
  816. * this function does nothing and returns 0.
  817. *
  818. * The program type passed in via @type must be suitable for network
  819. * filtering. No further check is performed to assert that.
  820. *
  821. * For egress packets, this function can return:
  822. * NET_XMIT_SUCCESS (0) - continue with packet output
  823. * NET_XMIT_DROP (1) - drop packet and notify TCP to call cwr
  824. * NET_XMIT_CN (2) - continue with packet output and notify TCP
  825. * to call cwr
  826. * -EPERM - drop packet
  827. *
  828. * For ingress packets, this function will return -EPERM if any
  829. * attached program was found and if it returned != 1 during execution.
  830. * Otherwise 0 is returned.
  831. */
  832. int __cgroup_bpf_run_filter_skb(struct sock *sk,
  833. struct sk_buff *skb,
  834. enum bpf_attach_type type)
  835. {
  836. unsigned int offset = skb->data - skb_network_header(skb);
  837. struct sock *save_sk;
  838. void *saved_data_end;
  839. struct cgroup *cgrp;
  840. int ret;
  841. if (!sk || !sk_fullsock(sk))
  842. return 0;
  843. if (sk->sk_family != AF_INET && sk->sk_family != AF_INET6)
  844. return 0;
  845. cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
  846. save_sk = skb->sk;
  847. skb->sk = sk;
  848. __skb_push(skb, offset);
  849. /* compute pointers for the bpf prog */
  850. bpf_compute_and_save_data_end(skb, &saved_data_end);
  851. if (type == BPF_CGROUP_INET_EGRESS) {
  852. ret = BPF_PROG_CGROUP_INET_EGRESS_RUN_ARRAY(
  853. cgrp->bpf.effective[type], skb, __bpf_prog_run_save_cb);
  854. } else {
  855. ret = BPF_PROG_RUN_ARRAY(cgrp->bpf.effective[type], skb,
  856. __bpf_prog_run_save_cb);
  857. ret = (ret == 1 ? 0 : -EPERM);
  858. }
  859. bpf_restore_data_end(skb, saved_data_end);
  860. __skb_pull(skb, offset);
  861. skb->sk = save_sk;
  862. return ret;
  863. }
  864. EXPORT_SYMBOL(__cgroup_bpf_run_filter_skb);
  865. /**
  866. * __cgroup_bpf_run_filter_sk() - Run a program on a sock
  867. * @sk: sock structure to manipulate
  868. * @type: The type of program to be exectuted
  869. *
  870. * socket is passed is expected to be of type INET or INET6.
  871. *
  872. * The program type passed in via @type must be suitable for sock
  873. * filtering. No further check is performed to assert that.
  874. *
  875. * This function will return %-EPERM if any if an attached program was found
  876. * and if it returned != 1 during execution. In all other cases, 0 is returned.
  877. */
  878. int __cgroup_bpf_run_filter_sk(struct sock *sk,
  879. enum bpf_attach_type type)
  880. {
  881. struct cgroup *cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
  882. int ret;
  883. ret = BPF_PROG_RUN_ARRAY(cgrp->bpf.effective[type], sk, BPF_PROG_RUN);
  884. return ret == 1 ? 0 : -EPERM;
  885. }
  886. EXPORT_SYMBOL(__cgroup_bpf_run_filter_sk);
  887. /**
  888. * __cgroup_bpf_run_filter_sock_addr() - Run a program on a sock and
  889. * provided by user sockaddr
  890. * @sk: sock struct that will use sockaddr
  891. * @uaddr: sockaddr struct provided by user
  892. * @type: The type of program to be exectuted
  893. * @t_ctx: Pointer to attach type specific context
  894. *
  895. * socket is expected to be of type INET or INET6.
  896. *
  897. * This function will return %-EPERM if an attached program is found and
  898. * returned value != 1 during execution. In all other cases, 0 is returned.
  899. */
  900. int __cgroup_bpf_run_filter_sock_addr(struct sock *sk,
  901. struct sockaddr *uaddr,
  902. enum bpf_attach_type type,
  903. void *t_ctx)
  904. {
  905. struct bpf_sock_addr_kern ctx = {
  906. .sk = sk,
  907. .uaddr = uaddr,
  908. .t_ctx = t_ctx,
  909. };
  910. struct sockaddr_storage unspec;
  911. struct cgroup *cgrp;
  912. int ret;
  913. /* Check socket family since not all sockets represent network
  914. * endpoint (e.g. AF_UNIX).
  915. */
  916. if (sk->sk_family != AF_INET && sk->sk_family != AF_INET6)
  917. return 0;
  918. if (!ctx.uaddr) {
  919. memset(&unspec, 0, sizeof(unspec));
  920. ctx.uaddr = (struct sockaddr *)&unspec;
  921. }
  922. cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
  923. ret = BPF_PROG_RUN_ARRAY(cgrp->bpf.effective[type], &ctx, BPF_PROG_RUN);
  924. return ret == 1 ? 0 : -EPERM;
  925. }
  926. EXPORT_SYMBOL(__cgroup_bpf_run_filter_sock_addr);
  927. /**
  928. * __cgroup_bpf_run_filter_sock_ops() - Run a program on a sock
  929. * @sk: socket to get cgroup from
  930. * @sock_ops: bpf_sock_ops_kern struct to pass to program. Contains
  931. * sk with connection information (IP addresses, etc.) May not contain
  932. * cgroup info if it is a req sock.
  933. * @type: The type of program to be exectuted
  934. *
  935. * socket passed is expected to be of type INET or INET6.
  936. *
  937. * The program type passed in via @type must be suitable for sock_ops
  938. * filtering. No further check is performed to assert that.
  939. *
  940. * This function will return %-EPERM if any if an attached program was found
  941. * and if it returned != 1 during execution. In all other cases, 0 is returned.
  942. */
  943. int __cgroup_bpf_run_filter_sock_ops(struct sock *sk,
  944. struct bpf_sock_ops_kern *sock_ops,
  945. enum bpf_attach_type type)
  946. {
  947. struct cgroup *cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
  948. int ret;
  949. ret = BPF_PROG_RUN_ARRAY(cgrp->bpf.effective[type], sock_ops,
  950. BPF_PROG_RUN);
  951. return ret == 1 ? 0 : -EPERM;
  952. }
  953. EXPORT_SYMBOL(__cgroup_bpf_run_filter_sock_ops);
  954. int __cgroup_bpf_check_dev_permission(short dev_type, u32 major, u32 minor,
  955. short access, enum bpf_attach_type type)
  956. {
  957. struct cgroup *cgrp;
  958. struct bpf_cgroup_dev_ctx ctx = {
  959. .access_type = (access << 16) | dev_type,
  960. .major = major,
  961. .minor = minor,
  962. };
  963. int allow = 1;
  964. rcu_read_lock();
  965. cgrp = task_dfl_cgroup(current);
  966. allow = BPF_PROG_RUN_ARRAY(cgrp->bpf.effective[type], &ctx,
  967. BPF_PROG_RUN);
  968. rcu_read_unlock();
  969. return !allow;
  970. }
  971. static const struct bpf_func_proto *
  972. cgroup_base_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
  973. {
  974. switch (func_id) {
  975. case BPF_FUNC_get_current_uid_gid:
  976. return &bpf_get_current_uid_gid_proto;
  977. case BPF_FUNC_get_local_storage:
  978. return &bpf_get_local_storage_proto;
  979. case BPF_FUNC_get_current_cgroup_id:
  980. return &bpf_get_current_cgroup_id_proto;
  981. case BPF_FUNC_perf_event_output:
  982. return &bpf_event_output_data_proto;
  983. default:
  984. return bpf_base_func_proto(func_id);
  985. }
  986. }
  987. static const struct bpf_func_proto *
  988. cgroup_dev_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
  989. {
  990. return cgroup_base_func_proto(func_id, prog);
  991. }
  992. static bool cgroup_dev_is_valid_access(int off, int size,
  993. enum bpf_access_type type,
  994. const struct bpf_prog *prog,
  995. struct bpf_insn_access_aux *info)
  996. {
  997. const int size_default = sizeof(__u32);
  998. if (type == BPF_WRITE)
  999. return false;
  1000. if (off < 0 || off + size > sizeof(struct bpf_cgroup_dev_ctx))
  1001. return false;
  1002. /* The verifier guarantees that size > 0. */
  1003. if (off % size != 0)
  1004. return false;
  1005. switch (off) {
  1006. case bpf_ctx_range(struct bpf_cgroup_dev_ctx, access_type):
  1007. bpf_ctx_record_field_size(info, size_default);
  1008. if (!bpf_ctx_narrow_access_ok(off, size, size_default))
  1009. return false;
  1010. break;
  1011. default:
  1012. if (size != size_default)
  1013. return false;
  1014. }
  1015. return true;
  1016. }
  1017. const struct bpf_prog_ops cg_dev_prog_ops = {
  1018. };
  1019. const struct bpf_verifier_ops cg_dev_verifier_ops = {
  1020. .get_func_proto = cgroup_dev_func_proto,
  1021. .is_valid_access = cgroup_dev_is_valid_access,
  1022. };
  1023. /**
  1024. * __cgroup_bpf_run_filter_sysctl - Run a program on sysctl
  1025. *
  1026. * @head: sysctl table header
  1027. * @table: sysctl table
  1028. * @write: sysctl is being read (= 0) or written (= 1)
  1029. * @buf: pointer to buffer (in and out)
  1030. * @pcount: value-result argument: value is size of buffer pointed to by @buf,
  1031. * result is size of @new_buf if program set new value, initial value
  1032. * otherwise
  1033. * @ppos: value-result argument: value is position at which read from or write
  1034. * to sysctl is happening, result is new position if program overrode it,
  1035. * initial value otherwise
  1036. * @type: type of program to be executed
  1037. *
  1038. * Program is run when sysctl is being accessed, either read or written, and
  1039. * can allow or deny such access.
  1040. *
  1041. * This function will return %-EPERM if an attached program is found and
  1042. * returned value != 1 during execution. In all other cases 0 is returned.
  1043. */
  1044. int __cgroup_bpf_run_filter_sysctl(struct ctl_table_header *head,
  1045. struct ctl_table *table, int write,
  1046. char **buf, size_t *pcount, loff_t *ppos,
  1047. enum bpf_attach_type type)
  1048. {
  1049. struct bpf_sysctl_kern ctx = {
  1050. .head = head,
  1051. .table = table,
  1052. .write = write,
  1053. .ppos = ppos,
  1054. .cur_val = NULL,
  1055. .cur_len = PAGE_SIZE,
  1056. .new_val = NULL,
  1057. .new_len = 0,
  1058. .new_updated = 0,
  1059. };
  1060. struct cgroup *cgrp;
  1061. loff_t pos = 0;
  1062. int ret;
  1063. ctx.cur_val = kmalloc_track_caller(ctx.cur_len, GFP_KERNEL);
  1064. if (!ctx.cur_val ||
  1065. table->proc_handler(table, 0, ctx.cur_val, &ctx.cur_len, &pos)) {
  1066. /* Let BPF program decide how to proceed. */
  1067. ctx.cur_len = 0;
  1068. }
  1069. if (write && *buf && *pcount) {
  1070. /* BPF program should be able to override new value with a
  1071. * buffer bigger than provided by user.
  1072. */
  1073. ctx.new_val = kmalloc_track_caller(PAGE_SIZE, GFP_KERNEL);
  1074. ctx.new_len = min_t(size_t, PAGE_SIZE, *pcount);
  1075. if (ctx.new_val) {
  1076. memcpy(ctx.new_val, *buf, ctx.new_len);
  1077. } else {
  1078. /* Let BPF program decide how to proceed. */
  1079. ctx.new_len = 0;
  1080. }
  1081. }
  1082. rcu_read_lock();
  1083. cgrp = task_dfl_cgroup(current);
  1084. ret = BPF_PROG_RUN_ARRAY(cgrp->bpf.effective[type], &ctx, BPF_PROG_RUN);
  1085. rcu_read_unlock();
  1086. kfree(ctx.cur_val);
  1087. if (ret == 1 && ctx.new_updated) {
  1088. kfree(*buf);
  1089. *buf = ctx.new_val;
  1090. *pcount = ctx.new_len;
  1091. } else {
  1092. kfree(ctx.new_val);
  1093. }
  1094. return ret == 1 ? 0 : -EPERM;
  1095. }
  1096. #ifdef CONFIG_NET
  1097. static bool __cgroup_bpf_prog_array_is_empty(struct cgroup *cgrp,
  1098. enum bpf_attach_type attach_type)
  1099. {
  1100. struct bpf_prog_array *prog_array;
  1101. bool empty;
  1102. rcu_read_lock();
  1103. prog_array = rcu_dereference(cgrp->bpf.effective[attach_type]);
  1104. empty = bpf_prog_array_is_empty(prog_array);
  1105. rcu_read_unlock();
  1106. return empty;
  1107. }
  1108. static int sockopt_alloc_buf(struct bpf_sockopt_kern *ctx, int max_optlen)
  1109. {
  1110. if (unlikely(max_optlen < 0))
  1111. return -EINVAL;
  1112. if (unlikely(max_optlen > PAGE_SIZE)) {
  1113. /* We don't expose optvals that are greater than PAGE_SIZE
  1114. * to the BPF program.
  1115. */
  1116. max_optlen = PAGE_SIZE;
  1117. }
  1118. ctx->optval = kzalloc(max_optlen, GFP_USER);
  1119. if (!ctx->optval)
  1120. return -ENOMEM;
  1121. ctx->optval_end = ctx->optval + max_optlen;
  1122. return max_optlen;
  1123. }
  1124. static void sockopt_free_buf(struct bpf_sockopt_kern *ctx)
  1125. {
  1126. kfree(ctx->optval);
  1127. }
  1128. int __cgroup_bpf_run_filter_setsockopt(struct sock *sk, int *level,
  1129. int *optname, char __user *optval,
  1130. int *optlen, char **kernel_optval)
  1131. {
  1132. struct cgroup *cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
  1133. struct bpf_sockopt_kern ctx = {
  1134. .sk = sk,
  1135. .level = *level,
  1136. .optname = *optname,
  1137. };
  1138. int ret, max_optlen;
  1139. /* Opportunistic check to see whether we have any BPF program
  1140. * attached to the hook so we don't waste time allocating
  1141. * memory and locking the socket.
  1142. */
  1143. if (!cgroup_bpf_enabled ||
  1144. __cgroup_bpf_prog_array_is_empty(cgrp, BPF_CGROUP_SETSOCKOPT))
  1145. return 0;
  1146. /* Allocate a bit more than the initial user buffer for
  1147. * BPF program. The canonical use case is overriding
  1148. * TCP_CONGESTION(nv) to TCP_CONGESTION(cubic).
  1149. */
  1150. max_optlen = max_t(int, 16, *optlen);
  1151. max_optlen = sockopt_alloc_buf(&ctx, max_optlen);
  1152. if (max_optlen < 0)
  1153. return max_optlen;
  1154. ctx.optlen = *optlen;
  1155. if (copy_from_user(ctx.optval, optval, min(*optlen, max_optlen)) != 0) {
  1156. ret = -EFAULT;
  1157. goto out;
  1158. }
  1159. lock_sock(sk);
  1160. ret = BPF_PROG_RUN_ARRAY(cgrp->bpf.effective[BPF_CGROUP_SETSOCKOPT],
  1161. &ctx, BPF_PROG_RUN);
  1162. release_sock(sk);
  1163. if (!ret) {
  1164. ret = -EPERM;
  1165. goto out;
  1166. }
  1167. if (ctx.optlen == -1) {
  1168. /* optlen set to -1, bypass kernel */
  1169. ret = 1;
  1170. } else if (ctx.optlen > max_optlen || ctx.optlen < -1) {
  1171. /* optlen is out of bounds */
  1172. ret = -EFAULT;
  1173. } else {
  1174. /* optlen within bounds, run kernel handler */
  1175. ret = 0;
  1176. /* export any potential modifications */
  1177. *level = ctx.level;
  1178. *optname = ctx.optname;
  1179. /* optlen == 0 from BPF indicates that we should
  1180. * use original userspace data.
  1181. */
  1182. if (ctx.optlen != 0) {
  1183. *optlen = ctx.optlen;
  1184. *kernel_optval = ctx.optval;
  1185. /* export and don't free sockopt buf */
  1186. return 0;
  1187. }
  1188. }
  1189. out:
  1190. sockopt_free_buf(&ctx);
  1191. return ret;
  1192. }
  1193. int __cgroup_bpf_run_filter_getsockopt(struct sock *sk, int level,
  1194. int optname, char __user *optval,
  1195. int __user *optlen, int max_optlen,
  1196. int retval)
  1197. {
  1198. struct cgroup *cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
  1199. struct bpf_sockopt_kern ctx = {
  1200. .sk = sk,
  1201. .level = level,
  1202. .optname = optname,
  1203. .retval = retval,
  1204. };
  1205. int ret;
  1206. /* Opportunistic check to see whether we have any BPF program
  1207. * attached to the hook so we don't waste time allocating
  1208. * memory and locking the socket.
  1209. */
  1210. if (!cgroup_bpf_enabled ||
  1211. __cgroup_bpf_prog_array_is_empty(cgrp, BPF_CGROUP_GETSOCKOPT))
  1212. return retval;
  1213. ctx.optlen = max_optlen;
  1214. max_optlen = sockopt_alloc_buf(&ctx, max_optlen);
  1215. if (max_optlen < 0)
  1216. return max_optlen;
  1217. if (!retval) {
  1218. /* If kernel getsockopt finished successfully,
  1219. * copy whatever was returned to the user back
  1220. * into our temporary buffer. Set optlen to the
  1221. * one that kernel returned as well to let
  1222. * BPF programs inspect the value.
  1223. */
  1224. if (get_user(ctx.optlen, optlen)) {
  1225. ret = -EFAULT;
  1226. goto out;
  1227. }
  1228. if (ctx.optlen < 0) {
  1229. ret = -EFAULT;
  1230. goto out;
  1231. }
  1232. if (copy_from_user(ctx.optval, optval,
  1233. min(ctx.optlen, max_optlen)) != 0) {
  1234. ret = -EFAULT;
  1235. goto out;
  1236. }
  1237. }
  1238. lock_sock(sk);
  1239. ret = BPF_PROG_RUN_ARRAY(cgrp->bpf.effective[BPF_CGROUP_GETSOCKOPT],
  1240. &ctx, BPF_PROG_RUN);
  1241. release_sock(sk);
  1242. if (!ret) {
  1243. ret = -EPERM;
  1244. goto out;
  1245. }
  1246. if (ctx.optlen > max_optlen || ctx.optlen < 0) {
  1247. ret = -EFAULT;
  1248. goto out;
  1249. }
  1250. /* BPF programs only allowed to set retval to 0, not some
  1251. * arbitrary value.
  1252. */
  1253. if (ctx.retval != 0 && ctx.retval != retval) {
  1254. ret = -EFAULT;
  1255. goto out;
  1256. }
  1257. if (ctx.optlen != 0) {
  1258. if (copy_to_user(optval, ctx.optval, ctx.optlen) ||
  1259. put_user(ctx.optlen, optlen)) {
  1260. ret = -EFAULT;
  1261. goto out;
  1262. }
  1263. }
  1264. ret = ctx.retval;
  1265. out:
  1266. sockopt_free_buf(&ctx);
  1267. return ret;
  1268. }
  1269. #endif
  1270. static ssize_t sysctl_cpy_dir(const struct ctl_dir *dir, char **bufp,
  1271. size_t *lenp)
  1272. {
  1273. ssize_t tmp_ret = 0, ret;
  1274. if (dir->header.parent) {
  1275. tmp_ret = sysctl_cpy_dir(dir->header.parent, bufp, lenp);
  1276. if (tmp_ret < 0)
  1277. return tmp_ret;
  1278. }
  1279. ret = strscpy(*bufp, dir->header.ctl_table[0].procname, *lenp);
  1280. if (ret < 0)
  1281. return ret;
  1282. *bufp += ret;
  1283. *lenp -= ret;
  1284. ret += tmp_ret;
  1285. /* Avoid leading slash. */
  1286. if (!ret)
  1287. return ret;
  1288. tmp_ret = strscpy(*bufp, "/", *lenp);
  1289. if (tmp_ret < 0)
  1290. return tmp_ret;
  1291. *bufp += tmp_ret;
  1292. *lenp -= tmp_ret;
  1293. return ret + tmp_ret;
  1294. }
  1295. BPF_CALL_4(bpf_sysctl_get_name, struct bpf_sysctl_kern *, ctx, char *, buf,
  1296. size_t, buf_len, u64, flags)
  1297. {
  1298. ssize_t tmp_ret = 0, ret;
  1299. if (!buf)
  1300. return -EINVAL;
  1301. if (!(flags & BPF_F_SYSCTL_BASE_NAME)) {
  1302. if (!ctx->head)
  1303. return -EINVAL;
  1304. tmp_ret = sysctl_cpy_dir(ctx->head->parent, &buf, &buf_len);
  1305. if (tmp_ret < 0)
  1306. return tmp_ret;
  1307. }
  1308. ret = strscpy(buf, ctx->table->procname, buf_len);
  1309. return ret < 0 ? ret : tmp_ret + ret;
  1310. }
  1311. static const struct bpf_func_proto bpf_sysctl_get_name_proto = {
  1312. .func = bpf_sysctl_get_name,
  1313. .gpl_only = false,
  1314. .ret_type = RET_INTEGER,
  1315. .arg1_type = ARG_PTR_TO_CTX,
  1316. .arg2_type = ARG_PTR_TO_MEM,
  1317. .arg3_type = ARG_CONST_SIZE,
  1318. .arg4_type = ARG_ANYTHING,
  1319. };
  1320. static int copy_sysctl_value(char *dst, size_t dst_len, char *src,
  1321. size_t src_len)
  1322. {
  1323. if (!dst)
  1324. return -EINVAL;
  1325. if (!dst_len)
  1326. return -E2BIG;
  1327. if (!src || !src_len) {
  1328. memset(dst, 0, dst_len);
  1329. return -EINVAL;
  1330. }
  1331. memcpy(dst, src, min(dst_len, src_len));
  1332. if (dst_len > src_len) {
  1333. memset(dst + src_len, '\0', dst_len - src_len);
  1334. return src_len;
  1335. }
  1336. dst[dst_len - 1] = '\0';
  1337. return -E2BIG;
  1338. }
  1339. BPF_CALL_3(bpf_sysctl_get_current_value, struct bpf_sysctl_kern *, ctx,
  1340. char *, buf, size_t, buf_len)
  1341. {
  1342. return copy_sysctl_value(buf, buf_len, ctx->cur_val, ctx->cur_len);
  1343. }
  1344. static const struct bpf_func_proto bpf_sysctl_get_current_value_proto = {
  1345. .func = bpf_sysctl_get_current_value,
  1346. .gpl_only = false,
  1347. .ret_type = RET_INTEGER,
  1348. .arg1_type = ARG_PTR_TO_CTX,
  1349. .arg2_type = ARG_PTR_TO_UNINIT_MEM,
  1350. .arg3_type = ARG_CONST_SIZE,
  1351. };
  1352. BPF_CALL_3(bpf_sysctl_get_new_value, struct bpf_sysctl_kern *, ctx, char *, buf,
  1353. size_t, buf_len)
  1354. {
  1355. if (!ctx->write) {
  1356. if (buf && buf_len)
  1357. memset(buf, '\0', buf_len);
  1358. return -EINVAL;
  1359. }
  1360. return copy_sysctl_value(buf, buf_len, ctx->new_val, ctx->new_len);
  1361. }
  1362. static const struct bpf_func_proto bpf_sysctl_get_new_value_proto = {
  1363. .func = bpf_sysctl_get_new_value,
  1364. .gpl_only = false,
  1365. .ret_type = RET_INTEGER,
  1366. .arg1_type = ARG_PTR_TO_CTX,
  1367. .arg2_type = ARG_PTR_TO_UNINIT_MEM,
  1368. .arg3_type = ARG_CONST_SIZE,
  1369. };
  1370. BPF_CALL_3(bpf_sysctl_set_new_value, struct bpf_sysctl_kern *, ctx,
  1371. const char *, buf, size_t, buf_len)
  1372. {
  1373. if (!ctx->write || !ctx->new_val || !ctx->new_len || !buf || !buf_len)
  1374. return -EINVAL;
  1375. if (buf_len > PAGE_SIZE - 1)
  1376. return -E2BIG;
  1377. memcpy(ctx->new_val, buf, buf_len);
  1378. ctx->new_len = buf_len;
  1379. ctx->new_updated = 1;
  1380. return 0;
  1381. }
  1382. static const struct bpf_func_proto bpf_sysctl_set_new_value_proto = {
  1383. .func = bpf_sysctl_set_new_value,
  1384. .gpl_only = false,
  1385. .ret_type = RET_INTEGER,
  1386. .arg1_type = ARG_PTR_TO_CTX,
  1387. .arg2_type = ARG_PTR_TO_MEM,
  1388. .arg3_type = ARG_CONST_SIZE,
  1389. };
  1390. static const struct bpf_func_proto *
  1391. sysctl_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
  1392. {
  1393. switch (func_id) {
  1394. case BPF_FUNC_strtol:
  1395. return &bpf_strtol_proto;
  1396. case BPF_FUNC_strtoul:
  1397. return &bpf_strtoul_proto;
  1398. case BPF_FUNC_sysctl_get_name:
  1399. return &bpf_sysctl_get_name_proto;
  1400. case BPF_FUNC_sysctl_get_current_value:
  1401. return &bpf_sysctl_get_current_value_proto;
  1402. case BPF_FUNC_sysctl_get_new_value:
  1403. return &bpf_sysctl_get_new_value_proto;
  1404. case BPF_FUNC_sysctl_set_new_value:
  1405. return &bpf_sysctl_set_new_value_proto;
  1406. default:
  1407. return cgroup_base_func_proto(func_id, prog);
  1408. }
  1409. }
  1410. static bool sysctl_is_valid_access(int off, int size, enum bpf_access_type type,
  1411. const struct bpf_prog *prog,
  1412. struct bpf_insn_access_aux *info)
  1413. {
  1414. const int size_default = sizeof(__u32);
  1415. if (off < 0 || off + size > sizeof(struct bpf_sysctl) || off % size)
  1416. return false;
  1417. switch (off) {
  1418. case bpf_ctx_range(struct bpf_sysctl, write):
  1419. if (type != BPF_READ)
  1420. return false;
  1421. bpf_ctx_record_field_size(info, size_default);
  1422. return bpf_ctx_narrow_access_ok(off, size, size_default);
  1423. case bpf_ctx_range(struct bpf_sysctl, file_pos):
  1424. if (type == BPF_READ) {
  1425. bpf_ctx_record_field_size(info, size_default);
  1426. return bpf_ctx_narrow_access_ok(off, size, size_default);
  1427. } else {
  1428. return size == size_default;
  1429. }
  1430. default:
  1431. return false;
  1432. }
  1433. }
  1434. static u32 sysctl_convert_ctx_access(enum bpf_access_type type,
  1435. const struct bpf_insn *si,
  1436. struct bpf_insn *insn_buf,
  1437. struct bpf_prog *prog, u32 *target_size)
  1438. {
  1439. struct bpf_insn *insn = insn_buf;
  1440. u32 read_size;
  1441. switch (si->off) {
  1442. case offsetof(struct bpf_sysctl, write):
  1443. *insn++ = BPF_LDX_MEM(
  1444. BPF_SIZE(si->code), si->dst_reg, si->src_reg,
  1445. bpf_target_off(struct bpf_sysctl_kern, write,
  1446. sizeof_field(struct bpf_sysctl_kern,
  1447. write),
  1448. target_size));
  1449. break;
  1450. case offsetof(struct bpf_sysctl, file_pos):
  1451. /* ppos is a pointer so it should be accessed via indirect
  1452. * loads and stores. Also for stores additional temporary
  1453. * register is used since neither src_reg nor dst_reg can be
  1454. * overridden.
  1455. */
  1456. if (type == BPF_WRITE) {
  1457. int treg = BPF_REG_9;
  1458. if (si->src_reg == treg || si->dst_reg == treg)
  1459. --treg;
  1460. if (si->src_reg == treg || si->dst_reg == treg)
  1461. --treg;
  1462. *insn++ = BPF_STX_MEM(
  1463. BPF_DW, si->dst_reg, treg,
  1464. offsetof(struct bpf_sysctl_kern, tmp_reg));
  1465. *insn++ = BPF_LDX_MEM(
  1466. BPF_FIELD_SIZEOF(struct bpf_sysctl_kern, ppos),
  1467. treg, si->dst_reg,
  1468. offsetof(struct bpf_sysctl_kern, ppos));
  1469. *insn++ = BPF_STX_MEM(
  1470. BPF_SIZEOF(u32), treg, si->src_reg,
  1471. bpf_ctx_narrow_access_offset(
  1472. 0, sizeof(u32), sizeof(loff_t)));
  1473. *insn++ = BPF_LDX_MEM(
  1474. BPF_DW, treg, si->dst_reg,
  1475. offsetof(struct bpf_sysctl_kern, tmp_reg));
  1476. } else {
  1477. *insn++ = BPF_LDX_MEM(
  1478. BPF_FIELD_SIZEOF(struct bpf_sysctl_kern, ppos),
  1479. si->dst_reg, si->src_reg,
  1480. offsetof(struct bpf_sysctl_kern, ppos));
  1481. read_size = bpf_size_to_bytes(BPF_SIZE(si->code));
  1482. *insn++ = BPF_LDX_MEM(
  1483. BPF_SIZE(si->code), si->dst_reg, si->dst_reg,
  1484. bpf_ctx_narrow_access_offset(
  1485. 0, read_size, sizeof(loff_t)));
  1486. }
  1487. *target_size = sizeof(u32);
  1488. break;
  1489. }
  1490. return insn - insn_buf;
  1491. }
  1492. const struct bpf_verifier_ops cg_sysctl_verifier_ops = {
  1493. .get_func_proto = sysctl_func_proto,
  1494. .is_valid_access = sysctl_is_valid_access,
  1495. .convert_ctx_access = sysctl_convert_ctx_access,
  1496. };
  1497. const struct bpf_prog_ops cg_sysctl_prog_ops = {
  1498. };
  1499. static const struct bpf_func_proto *
  1500. cg_sockopt_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
  1501. {
  1502. switch (func_id) {
  1503. #ifdef CONFIG_NET
  1504. case BPF_FUNC_sk_storage_get:
  1505. return &bpf_sk_storage_get_proto;
  1506. case BPF_FUNC_sk_storage_delete:
  1507. return &bpf_sk_storage_delete_proto;
  1508. #endif
  1509. #ifdef CONFIG_INET
  1510. case BPF_FUNC_tcp_sock:
  1511. return &bpf_tcp_sock_proto;
  1512. #endif
  1513. default:
  1514. return cgroup_base_func_proto(func_id, prog);
  1515. }
  1516. }
  1517. static bool cg_sockopt_is_valid_access(int off, int size,
  1518. enum bpf_access_type type,
  1519. const struct bpf_prog *prog,
  1520. struct bpf_insn_access_aux *info)
  1521. {
  1522. const int size_default = sizeof(__u32);
  1523. if (off < 0 || off >= sizeof(struct bpf_sockopt))
  1524. return false;
  1525. if (off % size != 0)
  1526. return false;
  1527. if (type == BPF_WRITE) {
  1528. switch (off) {
  1529. case offsetof(struct bpf_sockopt, retval):
  1530. if (size != size_default)
  1531. return false;
  1532. return prog->expected_attach_type ==
  1533. BPF_CGROUP_GETSOCKOPT;
  1534. case offsetof(struct bpf_sockopt, optname):
  1535. fallthrough;
  1536. case offsetof(struct bpf_sockopt, level):
  1537. if (size != size_default)
  1538. return false;
  1539. return prog->expected_attach_type ==
  1540. BPF_CGROUP_SETSOCKOPT;
  1541. case offsetof(struct bpf_sockopt, optlen):
  1542. return size == size_default;
  1543. default:
  1544. return false;
  1545. }
  1546. }
  1547. switch (off) {
  1548. case offsetof(struct bpf_sockopt, sk):
  1549. if (size != sizeof(__u64))
  1550. return false;
  1551. info->reg_type = PTR_TO_SOCKET;
  1552. break;
  1553. case offsetof(struct bpf_sockopt, optval):
  1554. if (size != sizeof(__u64))
  1555. return false;
  1556. info->reg_type = PTR_TO_PACKET;
  1557. break;
  1558. case offsetof(struct bpf_sockopt, optval_end):
  1559. if (size != sizeof(__u64))
  1560. return false;
  1561. info->reg_type = PTR_TO_PACKET_END;
  1562. break;
  1563. case offsetof(struct bpf_sockopt, retval):
  1564. if (size != size_default)
  1565. return false;
  1566. return prog->expected_attach_type == BPF_CGROUP_GETSOCKOPT;
  1567. default:
  1568. if (size != size_default)
  1569. return false;
  1570. break;
  1571. }
  1572. return true;
  1573. }
  1574. #define CG_SOCKOPT_ACCESS_FIELD(T, F) \
  1575. T(BPF_FIELD_SIZEOF(struct bpf_sockopt_kern, F), \
  1576. si->dst_reg, si->src_reg, \
  1577. offsetof(struct bpf_sockopt_kern, F))
  1578. static u32 cg_sockopt_convert_ctx_access(enum bpf_access_type type,
  1579. const struct bpf_insn *si,
  1580. struct bpf_insn *insn_buf,
  1581. struct bpf_prog *prog,
  1582. u32 *target_size)
  1583. {
  1584. struct bpf_insn *insn = insn_buf;
  1585. switch (si->off) {
  1586. case offsetof(struct bpf_sockopt, sk):
  1587. *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_LDX_MEM, sk);
  1588. break;
  1589. case offsetof(struct bpf_sockopt, level):
  1590. if (type == BPF_WRITE)
  1591. *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_STX_MEM, level);
  1592. else
  1593. *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_LDX_MEM, level);
  1594. break;
  1595. case offsetof(struct bpf_sockopt, optname):
  1596. if (type == BPF_WRITE)
  1597. *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_STX_MEM, optname);
  1598. else
  1599. *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_LDX_MEM, optname);
  1600. break;
  1601. case offsetof(struct bpf_sockopt, optlen):
  1602. if (type == BPF_WRITE)
  1603. *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_STX_MEM, optlen);
  1604. else
  1605. *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_LDX_MEM, optlen);
  1606. break;
  1607. case offsetof(struct bpf_sockopt, retval):
  1608. if (type == BPF_WRITE)
  1609. *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_STX_MEM, retval);
  1610. else
  1611. *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_LDX_MEM, retval);
  1612. break;
  1613. case offsetof(struct bpf_sockopt, optval):
  1614. *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_LDX_MEM, optval);
  1615. break;
  1616. case offsetof(struct bpf_sockopt, optval_end):
  1617. *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_LDX_MEM, optval_end);
  1618. break;
  1619. }
  1620. return insn - insn_buf;
  1621. }
  1622. static int cg_sockopt_get_prologue(struct bpf_insn *insn_buf,
  1623. bool direct_write,
  1624. const struct bpf_prog *prog)
  1625. {
  1626. /* Nothing to do for sockopt argument. The data is kzalloc'ated.
  1627. */
  1628. return 0;
  1629. }
  1630. const struct bpf_verifier_ops cg_sockopt_verifier_ops = {
  1631. .get_func_proto = cg_sockopt_func_proto,
  1632. .is_valid_access = cg_sockopt_is_valid_access,
  1633. .convert_ctx_access = cg_sockopt_convert_ctx_access,
  1634. .gen_prologue = cg_sockopt_get_prologue,
  1635. };
  1636. const struct bpf_prog_ops cg_sockopt_prog_ops = {
  1637. };