bpf_trace.c 58 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233
  1. // SPDX-License-Identifier: GPL-2.0
  2. /* Copyright (c) 2011-2015 PLUMgrid, http://plumgrid.com
  3. * Copyright (c) 2016 Facebook
  4. */
  5. #include <linux/kernel.h>
  6. #include <linux/types.h>
  7. #include <linux/slab.h>
  8. #include <linux/bpf.h>
  9. #include <linux/bpf_perf_event.h>
  10. #include <linux/btf.h>
  11. #include <linux/filter.h>
  12. #include <linux/uaccess.h>
  13. #include <linux/ctype.h>
  14. #include <linux/kprobes.h>
  15. #include <linux/spinlock.h>
  16. #include <linux/syscalls.h>
  17. #include <linux/error-injection.h>
  18. #include <linux/btf_ids.h>
  19. #include <uapi/linux/bpf.h>
  20. #include <uapi/linux/btf.h>
  21. #include <asm/tlb.h>
  22. #include "trace_probe.h"
  23. #include "trace.h"
  24. #define CREATE_TRACE_POINTS
  25. #include "bpf_trace.h"
  26. #define bpf_event_rcu_dereference(p) \
  27. rcu_dereference_protected(p, lockdep_is_held(&bpf_event_mutex))
  28. #ifdef CONFIG_MODULES
  29. struct bpf_trace_module {
  30. struct module *module;
  31. struct list_head list;
  32. };
  33. static LIST_HEAD(bpf_trace_modules);
  34. static DEFINE_MUTEX(bpf_module_mutex);
  35. static struct bpf_raw_event_map *bpf_get_raw_tracepoint_module(const char *name)
  36. {
  37. struct bpf_raw_event_map *btp, *ret = NULL;
  38. struct bpf_trace_module *btm;
  39. unsigned int i;
  40. mutex_lock(&bpf_module_mutex);
  41. list_for_each_entry(btm, &bpf_trace_modules, list) {
  42. for (i = 0; i < btm->module->num_bpf_raw_events; ++i) {
  43. btp = &btm->module->bpf_raw_events[i];
  44. if (!strcmp(btp->tp->name, name)) {
  45. if (try_module_get(btm->module))
  46. ret = btp;
  47. goto out;
  48. }
  49. }
  50. }
  51. out:
  52. mutex_unlock(&bpf_module_mutex);
  53. return ret;
  54. }
  55. #else
  56. static struct bpf_raw_event_map *bpf_get_raw_tracepoint_module(const char *name)
  57. {
  58. return NULL;
  59. }
  60. #endif /* CONFIG_MODULES */
  61. u64 bpf_get_stackid(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
  62. u64 bpf_get_stack(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
  63. static int bpf_btf_printf_prepare(struct btf_ptr *ptr, u32 btf_ptr_size,
  64. u64 flags, const struct btf **btf,
  65. s32 *btf_id);
  66. /**
  67. * trace_call_bpf - invoke BPF program
  68. * @call: tracepoint event
  69. * @ctx: opaque context pointer
  70. *
  71. * kprobe handlers execute BPF programs via this helper.
  72. * Can be used from static tracepoints in the future.
  73. *
  74. * Return: BPF programs always return an integer which is interpreted by
  75. * kprobe handler as:
  76. * 0 - return from kprobe (event is filtered out)
  77. * 1 - store kprobe event into ring buffer
  78. * Other values are reserved and currently alias to 1
  79. */
  80. unsigned int trace_call_bpf(struct trace_event_call *call, void *ctx)
  81. {
  82. unsigned int ret;
  83. cant_sleep();
  84. if (unlikely(__this_cpu_inc_return(bpf_prog_active) != 1)) {
  85. /*
  86. * since some bpf program is already running on this cpu,
  87. * don't call into another bpf program (same or different)
  88. * and don't send kprobe event into ring-buffer,
  89. * so return zero here
  90. */
  91. ret = 0;
  92. goto out;
  93. }
  94. /*
  95. * Instead of moving rcu_read_lock/rcu_dereference/rcu_read_unlock
  96. * to all call sites, we did a bpf_prog_array_valid() there to check
  97. * whether call->prog_array is empty or not, which is
  98. * a heurisitc to speed up execution.
  99. *
  100. * If bpf_prog_array_valid() fetched prog_array was
  101. * non-NULL, we go into trace_call_bpf() and do the actual
  102. * proper rcu_dereference() under RCU lock.
  103. * If it turns out that prog_array is NULL then, we bail out.
  104. * For the opposite, if the bpf_prog_array_valid() fetched pointer
  105. * was NULL, you'll skip the prog_array with the risk of missing
  106. * out of events when it was updated in between this and the
  107. * rcu_dereference() which is accepted risk.
  108. */
  109. ret = BPF_PROG_RUN_ARRAY_CHECK(call->prog_array, ctx, BPF_PROG_RUN);
  110. out:
  111. __this_cpu_dec(bpf_prog_active);
  112. return ret;
  113. }
  114. #ifdef CONFIG_BPF_KPROBE_OVERRIDE
  115. BPF_CALL_2(bpf_override_return, struct pt_regs *, regs, unsigned long, rc)
  116. {
  117. regs_set_return_value(regs, rc);
  118. override_function_with_return(regs);
  119. return 0;
  120. }
  121. static const struct bpf_func_proto bpf_override_return_proto = {
  122. .func = bpf_override_return,
  123. .gpl_only = true,
  124. .ret_type = RET_INTEGER,
  125. .arg1_type = ARG_PTR_TO_CTX,
  126. .arg2_type = ARG_ANYTHING,
  127. };
  128. #endif
  129. static __always_inline int
  130. bpf_probe_read_user_common(void *dst, u32 size, const void __user *unsafe_ptr)
  131. {
  132. int ret;
  133. ret = copy_from_user_nofault(dst, unsafe_ptr, size);
  134. if (unlikely(ret < 0))
  135. memset(dst, 0, size);
  136. return ret;
  137. }
  138. BPF_CALL_3(bpf_probe_read_user, void *, dst, u32, size,
  139. const void __user *, unsafe_ptr)
  140. {
  141. return bpf_probe_read_user_common(dst, size, unsafe_ptr);
  142. }
  143. const struct bpf_func_proto bpf_probe_read_user_proto = {
  144. .func = bpf_probe_read_user,
  145. .gpl_only = true,
  146. .ret_type = RET_INTEGER,
  147. .arg1_type = ARG_PTR_TO_UNINIT_MEM,
  148. .arg2_type = ARG_CONST_SIZE_OR_ZERO,
  149. .arg3_type = ARG_ANYTHING,
  150. };
  151. static __always_inline int
  152. bpf_probe_read_user_str_common(void *dst, u32 size,
  153. const void __user *unsafe_ptr)
  154. {
  155. int ret;
  156. /*
  157. * NB: We rely on strncpy_from_user() not copying junk past the NUL
  158. * terminator into `dst`.
  159. *
  160. * strncpy_from_user() does long-sized strides in the fast path. If the
  161. * strncpy does not mask out the bytes after the NUL in `unsafe_ptr`,
  162. * then there could be junk after the NUL in `dst`. If user takes `dst`
  163. * and keys a hash map with it, then semantically identical strings can
  164. * occupy multiple entries in the map.
  165. */
  166. ret = strncpy_from_user_nofault(dst, unsafe_ptr, size);
  167. if (unlikely(ret < 0))
  168. memset(dst, 0, size);
  169. return ret;
  170. }
  171. BPF_CALL_3(bpf_probe_read_user_str, void *, dst, u32, size,
  172. const void __user *, unsafe_ptr)
  173. {
  174. return bpf_probe_read_user_str_common(dst, size, unsafe_ptr);
  175. }
  176. const struct bpf_func_proto bpf_probe_read_user_str_proto = {
  177. .func = bpf_probe_read_user_str,
  178. .gpl_only = true,
  179. .ret_type = RET_INTEGER,
  180. .arg1_type = ARG_PTR_TO_UNINIT_MEM,
  181. .arg2_type = ARG_CONST_SIZE_OR_ZERO,
  182. .arg3_type = ARG_ANYTHING,
  183. };
  184. static __always_inline int
  185. bpf_probe_read_kernel_common(void *dst, u32 size, const void *unsafe_ptr)
  186. {
  187. int ret;
  188. ret = copy_from_kernel_nofault(dst, unsafe_ptr, size);
  189. if (unlikely(ret < 0))
  190. memset(dst, 0, size);
  191. return ret;
  192. }
  193. BPF_CALL_3(bpf_probe_read_kernel, void *, dst, u32, size,
  194. const void *, unsafe_ptr)
  195. {
  196. return bpf_probe_read_kernel_common(dst, size, unsafe_ptr);
  197. }
  198. const struct bpf_func_proto bpf_probe_read_kernel_proto = {
  199. .func = bpf_probe_read_kernel,
  200. .gpl_only = true,
  201. .ret_type = RET_INTEGER,
  202. .arg1_type = ARG_PTR_TO_UNINIT_MEM,
  203. .arg2_type = ARG_CONST_SIZE_OR_ZERO,
  204. .arg3_type = ARG_ANYTHING,
  205. };
  206. static __always_inline int
  207. bpf_probe_read_kernel_str_common(void *dst, u32 size, const void *unsafe_ptr)
  208. {
  209. int ret;
  210. /*
  211. * The strncpy_from_kernel_nofault() call will likely not fill the
  212. * entire buffer, but that's okay in this circumstance as we're probing
  213. * arbitrary memory anyway similar to bpf_probe_read_*() and might
  214. * as well probe the stack. Thus, memory is explicitly cleared
  215. * only in error case, so that improper users ignoring return
  216. * code altogether don't copy garbage; otherwise length of string
  217. * is returned that can be used for bpf_perf_event_output() et al.
  218. */
  219. ret = strncpy_from_kernel_nofault(dst, unsafe_ptr, size);
  220. if (unlikely(ret < 0))
  221. memset(dst, 0, size);
  222. return ret;
  223. }
  224. BPF_CALL_3(bpf_probe_read_kernel_str, void *, dst, u32, size,
  225. const void *, unsafe_ptr)
  226. {
  227. return bpf_probe_read_kernel_str_common(dst, size, unsafe_ptr);
  228. }
  229. const struct bpf_func_proto bpf_probe_read_kernel_str_proto = {
  230. .func = bpf_probe_read_kernel_str,
  231. .gpl_only = true,
  232. .ret_type = RET_INTEGER,
  233. .arg1_type = ARG_PTR_TO_UNINIT_MEM,
  234. .arg2_type = ARG_CONST_SIZE_OR_ZERO,
  235. .arg3_type = ARG_ANYTHING,
  236. };
  237. #ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
  238. BPF_CALL_3(bpf_probe_read_compat, void *, dst, u32, size,
  239. const void *, unsafe_ptr)
  240. {
  241. if ((unsigned long)unsafe_ptr < TASK_SIZE) {
  242. return bpf_probe_read_user_common(dst, size,
  243. (__force void __user *)unsafe_ptr);
  244. }
  245. return bpf_probe_read_kernel_common(dst, size, unsafe_ptr);
  246. }
  247. static const struct bpf_func_proto bpf_probe_read_compat_proto = {
  248. .func = bpf_probe_read_compat,
  249. .gpl_only = true,
  250. .ret_type = RET_INTEGER,
  251. .arg1_type = ARG_PTR_TO_UNINIT_MEM,
  252. .arg2_type = ARG_CONST_SIZE_OR_ZERO,
  253. .arg3_type = ARG_ANYTHING,
  254. };
  255. BPF_CALL_3(bpf_probe_read_compat_str, void *, dst, u32, size,
  256. const void *, unsafe_ptr)
  257. {
  258. if ((unsigned long)unsafe_ptr < TASK_SIZE) {
  259. return bpf_probe_read_user_str_common(dst, size,
  260. (__force void __user *)unsafe_ptr);
  261. }
  262. return bpf_probe_read_kernel_str_common(dst, size, unsafe_ptr);
  263. }
  264. static const struct bpf_func_proto bpf_probe_read_compat_str_proto = {
  265. .func = bpf_probe_read_compat_str,
  266. .gpl_only = true,
  267. .ret_type = RET_INTEGER,
  268. .arg1_type = ARG_PTR_TO_UNINIT_MEM,
  269. .arg2_type = ARG_CONST_SIZE_OR_ZERO,
  270. .arg3_type = ARG_ANYTHING,
  271. };
  272. #endif /* CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE */
  273. BPF_CALL_3(bpf_probe_write_user, void __user *, unsafe_ptr, const void *, src,
  274. u32, size)
  275. {
  276. /*
  277. * Ensure we're in user context which is safe for the helper to
  278. * run. This helper has no business in a kthread.
  279. *
  280. * access_ok() should prevent writing to non-user memory, but in
  281. * some situations (nommu, temporary switch, etc) access_ok() does
  282. * not provide enough validation, hence the check on KERNEL_DS.
  283. *
  284. * nmi_uaccess_okay() ensures the probe is not run in an interim
  285. * state, when the task or mm are switched. This is specifically
  286. * required to prevent the use of temporary mm.
  287. */
  288. if (unlikely(in_interrupt() ||
  289. current->flags & (PF_KTHREAD | PF_EXITING)))
  290. return -EPERM;
  291. if (unlikely(uaccess_kernel()))
  292. return -EPERM;
  293. if (unlikely(!nmi_uaccess_okay()))
  294. return -EPERM;
  295. return copy_to_user_nofault(unsafe_ptr, src, size);
  296. }
  297. static const struct bpf_func_proto bpf_probe_write_user_proto = {
  298. .func = bpf_probe_write_user,
  299. .gpl_only = true,
  300. .ret_type = RET_INTEGER,
  301. .arg1_type = ARG_ANYTHING,
  302. .arg2_type = ARG_PTR_TO_MEM,
  303. .arg3_type = ARG_CONST_SIZE,
  304. };
  305. static const struct bpf_func_proto *bpf_get_probe_write_proto(void)
  306. {
  307. if (!capable(CAP_SYS_ADMIN))
  308. return NULL;
  309. pr_warn_ratelimited("%s[%d] is installing a program with bpf_probe_write_user helper that may corrupt user memory!",
  310. current->comm, task_pid_nr(current));
  311. return &bpf_probe_write_user_proto;
  312. }
  313. static void bpf_trace_copy_string(char *buf, void *unsafe_ptr, char fmt_ptype,
  314. size_t bufsz)
  315. {
  316. void __user *user_ptr = (__force void __user *)unsafe_ptr;
  317. buf[0] = 0;
  318. switch (fmt_ptype) {
  319. case 's':
  320. #ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
  321. if ((unsigned long)unsafe_ptr < TASK_SIZE) {
  322. strncpy_from_user_nofault(buf, user_ptr, bufsz);
  323. break;
  324. }
  325. fallthrough;
  326. #endif
  327. case 'k':
  328. strncpy_from_kernel_nofault(buf, unsafe_ptr, bufsz);
  329. break;
  330. case 'u':
  331. strncpy_from_user_nofault(buf, user_ptr, bufsz);
  332. break;
  333. }
  334. }
  335. static DEFINE_RAW_SPINLOCK(trace_printk_lock);
  336. #define BPF_TRACE_PRINTK_SIZE 1024
  337. static __printf(1, 0) int bpf_do_trace_printk(const char *fmt, ...)
  338. {
  339. static char buf[BPF_TRACE_PRINTK_SIZE];
  340. unsigned long flags;
  341. va_list ap;
  342. int ret;
  343. raw_spin_lock_irqsave(&trace_printk_lock, flags);
  344. va_start(ap, fmt);
  345. ret = vsnprintf(buf, sizeof(buf), fmt, ap);
  346. va_end(ap);
  347. /* vsnprintf() will not append null for zero-length strings */
  348. if (ret == 0)
  349. buf[0] = '\0';
  350. trace_bpf_trace_printk(buf);
  351. raw_spin_unlock_irqrestore(&trace_printk_lock, flags);
  352. return ret;
  353. }
  354. /*
  355. * Only limited trace_printk() conversion specifiers allowed:
  356. * %d %i %u %x %ld %li %lu %lx %lld %lli %llu %llx %p %pB %pks %pus %s
  357. */
  358. BPF_CALL_5(bpf_trace_printk, char *, fmt, u32, fmt_size, u64, arg1,
  359. u64, arg2, u64, arg3)
  360. {
  361. int i, mod[3] = {}, fmt_cnt = 0;
  362. char buf[64], fmt_ptype;
  363. void *unsafe_ptr = NULL;
  364. bool str_seen = false;
  365. /*
  366. * bpf_check()->check_func_arg()->check_stack_boundary()
  367. * guarantees that fmt points to bpf program stack,
  368. * fmt_size bytes of it were initialized and fmt_size > 0
  369. */
  370. if (fmt[--fmt_size] != 0)
  371. return -EINVAL;
  372. /* check format string for allowed specifiers */
  373. for (i = 0; i < fmt_size; i++) {
  374. if ((!isprint(fmt[i]) && !isspace(fmt[i])) || !isascii(fmt[i]))
  375. return -EINVAL;
  376. if (fmt[i] != '%')
  377. continue;
  378. if (fmt_cnt >= 3)
  379. return -EINVAL;
  380. /* fmt[i] != 0 && fmt[last] == 0, so we can access fmt[i + 1] */
  381. i++;
  382. if (fmt[i] == 'l') {
  383. mod[fmt_cnt]++;
  384. i++;
  385. } else if (fmt[i] == 'p') {
  386. mod[fmt_cnt]++;
  387. if ((fmt[i + 1] == 'k' ||
  388. fmt[i + 1] == 'u') &&
  389. fmt[i + 2] == 's') {
  390. fmt_ptype = fmt[i + 1];
  391. i += 2;
  392. goto fmt_str;
  393. }
  394. if (fmt[i + 1] == 'B') {
  395. i++;
  396. goto fmt_next;
  397. }
  398. /* disallow any further format extensions */
  399. if (fmt[i + 1] != 0 &&
  400. !isspace(fmt[i + 1]) &&
  401. !ispunct(fmt[i + 1]))
  402. return -EINVAL;
  403. goto fmt_next;
  404. } else if (fmt[i] == 's') {
  405. mod[fmt_cnt]++;
  406. fmt_ptype = fmt[i];
  407. fmt_str:
  408. if (str_seen)
  409. /* allow only one '%s' per fmt string */
  410. return -EINVAL;
  411. str_seen = true;
  412. if (fmt[i + 1] != 0 &&
  413. !isspace(fmt[i + 1]) &&
  414. !ispunct(fmt[i + 1]))
  415. return -EINVAL;
  416. switch (fmt_cnt) {
  417. case 0:
  418. unsafe_ptr = (void *)(long)arg1;
  419. arg1 = (long)buf;
  420. break;
  421. case 1:
  422. unsafe_ptr = (void *)(long)arg2;
  423. arg2 = (long)buf;
  424. break;
  425. case 2:
  426. unsafe_ptr = (void *)(long)arg3;
  427. arg3 = (long)buf;
  428. break;
  429. }
  430. bpf_trace_copy_string(buf, unsafe_ptr, fmt_ptype,
  431. sizeof(buf));
  432. goto fmt_next;
  433. }
  434. if (fmt[i] == 'l') {
  435. mod[fmt_cnt]++;
  436. i++;
  437. }
  438. if (fmt[i] != 'i' && fmt[i] != 'd' &&
  439. fmt[i] != 'u' && fmt[i] != 'x')
  440. return -EINVAL;
  441. fmt_next:
  442. fmt_cnt++;
  443. }
  444. /* Horrid workaround for getting va_list handling working with different
  445. * argument type combinations generically for 32 and 64 bit archs.
  446. */
  447. #define __BPF_TP_EMIT() __BPF_ARG3_TP()
  448. #define __BPF_TP(...) \
  449. bpf_do_trace_printk(fmt, ##__VA_ARGS__)
  450. #define __BPF_ARG1_TP(...) \
  451. ((mod[0] == 2 || (mod[0] == 1 && __BITS_PER_LONG == 64)) \
  452. ? __BPF_TP(arg1, ##__VA_ARGS__) \
  453. : ((mod[0] == 1 || (mod[0] == 0 && __BITS_PER_LONG == 32)) \
  454. ? __BPF_TP((long)arg1, ##__VA_ARGS__) \
  455. : __BPF_TP((u32)arg1, ##__VA_ARGS__)))
  456. #define __BPF_ARG2_TP(...) \
  457. ((mod[1] == 2 || (mod[1] == 1 && __BITS_PER_LONG == 64)) \
  458. ? __BPF_ARG1_TP(arg2, ##__VA_ARGS__) \
  459. : ((mod[1] == 1 || (mod[1] == 0 && __BITS_PER_LONG == 32)) \
  460. ? __BPF_ARG1_TP((long)arg2, ##__VA_ARGS__) \
  461. : __BPF_ARG1_TP((u32)arg2, ##__VA_ARGS__)))
  462. #define __BPF_ARG3_TP(...) \
  463. ((mod[2] == 2 || (mod[2] == 1 && __BITS_PER_LONG == 64)) \
  464. ? __BPF_ARG2_TP(arg3, ##__VA_ARGS__) \
  465. : ((mod[2] == 1 || (mod[2] == 0 && __BITS_PER_LONG == 32)) \
  466. ? __BPF_ARG2_TP((long)arg3, ##__VA_ARGS__) \
  467. : __BPF_ARG2_TP((u32)arg3, ##__VA_ARGS__)))
  468. return __BPF_TP_EMIT();
  469. }
  470. static const struct bpf_func_proto bpf_trace_printk_proto = {
  471. .func = bpf_trace_printk,
  472. .gpl_only = true,
  473. .ret_type = RET_INTEGER,
  474. .arg1_type = ARG_PTR_TO_MEM,
  475. .arg2_type = ARG_CONST_SIZE,
  476. };
  477. const struct bpf_func_proto *bpf_get_trace_printk_proto(void)
  478. {
  479. /*
  480. * This program might be calling bpf_trace_printk,
  481. * so enable the associated bpf_trace/bpf_trace_printk event.
  482. * Repeat this each time as it is possible a user has
  483. * disabled bpf_trace_printk events. By loading a program
  484. * calling bpf_trace_printk() however the user has expressed
  485. * the intent to see such events.
  486. */
  487. if (trace_set_clr_event("bpf_trace", "bpf_trace_printk", 1))
  488. pr_warn_ratelimited("could not enable bpf_trace_printk events");
  489. return &bpf_trace_printk_proto;
  490. }
  491. #define MAX_SEQ_PRINTF_VARARGS 12
  492. #define MAX_SEQ_PRINTF_MAX_MEMCPY 6
  493. #define MAX_SEQ_PRINTF_STR_LEN 128
  494. struct bpf_seq_printf_buf {
  495. char buf[MAX_SEQ_PRINTF_MAX_MEMCPY][MAX_SEQ_PRINTF_STR_LEN];
  496. };
  497. static DEFINE_PER_CPU(struct bpf_seq_printf_buf, bpf_seq_printf_buf);
  498. static DEFINE_PER_CPU(int, bpf_seq_printf_buf_used);
  499. BPF_CALL_5(bpf_seq_printf, struct seq_file *, m, char *, fmt, u32, fmt_size,
  500. const void *, data, u32, data_len)
  501. {
  502. int err = -EINVAL, fmt_cnt = 0, memcpy_cnt = 0;
  503. int i, buf_used, copy_size, num_args;
  504. u64 params[MAX_SEQ_PRINTF_VARARGS];
  505. struct bpf_seq_printf_buf *bufs;
  506. const u64 *args = data;
  507. buf_used = this_cpu_inc_return(bpf_seq_printf_buf_used);
  508. if (WARN_ON_ONCE(buf_used > 1)) {
  509. err = -EBUSY;
  510. goto out;
  511. }
  512. bufs = this_cpu_ptr(&bpf_seq_printf_buf);
  513. /*
  514. * bpf_check()->check_func_arg()->check_stack_boundary()
  515. * guarantees that fmt points to bpf program stack,
  516. * fmt_size bytes of it were initialized and fmt_size > 0
  517. */
  518. if (fmt[--fmt_size] != 0)
  519. goto out;
  520. if (data_len & 7)
  521. goto out;
  522. for (i = 0; i < fmt_size; i++) {
  523. if (fmt[i] == '%') {
  524. if (fmt[i + 1] == '%')
  525. i++;
  526. else if (!data || !data_len)
  527. goto out;
  528. }
  529. }
  530. num_args = data_len / 8;
  531. /* check format string for allowed specifiers */
  532. for (i = 0; i < fmt_size; i++) {
  533. /* only printable ascii for now. */
  534. if ((!isprint(fmt[i]) && !isspace(fmt[i])) || !isascii(fmt[i])) {
  535. err = -EINVAL;
  536. goto out;
  537. }
  538. if (fmt[i] != '%')
  539. continue;
  540. if (fmt[i + 1] == '%') {
  541. i++;
  542. continue;
  543. }
  544. if (fmt_cnt >= MAX_SEQ_PRINTF_VARARGS) {
  545. err = -E2BIG;
  546. goto out;
  547. }
  548. if (fmt_cnt >= num_args) {
  549. err = -EINVAL;
  550. goto out;
  551. }
  552. /* fmt[i] != 0 && fmt[last] == 0, so we can access fmt[i + 1] */
  553. i++;
  554. /* skip optional "[0 +-][num]" width formating field */
  555. while (fmt[i] == '0' || fmt[i] == '+' || fmt[i] == '-' ||
  556. fmt[i] == ' ')
  557. i++;
  558. if (fmt[i] >= '1' && fmt[i] <= '9') {
  559. i++;
  560. while (fmt[i] >= '0' && fmt[i] <= '9')
  561. i++;
  562. }
  563. if (fmt[i] == 's') {
  564. void *unsafe_ptr;
  565. /* try our best to copy */
  566. if (memcpy_cnt >= MAX_SEQ_PRINTF_MAX_MEMCPY) {
  567. err = -E2BIG;
  568. goto out;
  569. }
  570. unsafe_ptr = (void *)(long)args[fmt_cnt];
  571. err = strncpy_from_kernel_nofault(bufs->buf[memcpy_cnt],
  572. unsafe_ptr, MAX_SEQ_PRINTF_STR_LEN);
  573. if (err < 0)
  574. bufs->buf[memcpy_cnt][0] = '\0';
  575. params[fmt_cnt] = (u64)(long)bufs->buf[memcpy_cnt];
  576. fmt_cnt++;
  577. memcpy_cnt++;
  578. continue;
  579. }
  580. if (fmt[i] == 'p') {
  581. if (fmt[i + 1] == 0 ||
  582. fmt[i + 1] == 'K' ||
  583. fmt[i + 1] == 'x' ||
  584. fmt[i + 1] == 'B') {
  585. /* just kernel pointers */
  586. params[fmt_cnt] = args[fmt_cnt];
  587. fmt_cnt++;
  588. continue;
  589. }
  590. /* only support "%pI4", "%pi4", "%pI6" and "%pi6". */
  591. if (fmt[i + 1] != 'i' && fmt[i + 1] != 'I') {
  592. err = -EINVAL;
  593. goto out;
  594. }
  595. if (fmt[i + 2] != '4' && fmt[i + 2] != '6') {
  596. err = -EINVAL;
  597. goto out;
  598. }
  599. if (memcpy_cnt >= MAX_SEQ_PRINTF_MAX_MEMCPY) {
  600. err = -E2BIG;
  601. goto out;
  602. }
  603. copy_size = (fmt[i + 2] == '4') ? 4 : 16;
  604. err = copy_from_kernel_nofault(bufs->buf[memcpy_cnt],
  605. (void *) (long) args[fmt_cnt],
  606. copy_size);
  607. if (err < 0)
  608. memset(bufs->buf[memcpy_cnt], 0, copy_size);
  609. params[fmt_cnt] = (u64)(long)bufs->buf[memcpy_cnt];
  610. i += 2;
  611. fmt_cnt++;
  612. memcpy_cnt++;
  613. continue;
  614. }
  615. if (fmt[i] == 'l') {
  616. i++;
  617. if (fmt[i] == 'l')
  618. i++;
  619. }
  620. if (fmt[i] != 'i' && fmt[i] != 'd' &&
  621. fmt[i] != 'u' && fmt[i] != 'x' &&
  622. fmt[i] != 'X') {
  623. err = -EINVAL;
  624. goto out;
  625. }
  626. params[fmt_cnt] = args[fmt_cnt];
  627. fmt_cnt++;
  628. }
  629. /* Maximumly we can have MAX_SEQ_PRINTF_VARARGS parameter, just give
  630. * all of them to seq_printf().
  631. */
  632. seq_printf(m, fmt, params[0], params[1], params[2], params[3],
  633. params[4], params[5], params[6], params[7], params[8],
  634. params[9], params[10], params[11]);
  635. err = seq_has_overflowed(m) ? -EOVERFLOW : 0;
  636. out:
  637. this_cpu_dec(bpf_seq_printf_buf_used);
  638. return err;
  639. }
  640. BTF_ID_LIST_SINGLE(btf_seq_file_ids, struct, seq_file)
  641. static const struct bpf_func_proto bpf_seq_printf_proto = {
  642. .func = bpf_seq_printf,
  643. .gpl_only = true,
  644. .ret_type = RET_INTEGER,
  645. .arg1_type = ARG_PTR_TO_BTF_ID,
  646. .arg1_btf_id = &btf_seq_file_ids[0],
  647. .arg2_type = ARG_PTR_TO_MEM,
  648. .arg3_type = ARG_CONST_SIZE,
  649. .arg4_type = ARG_PTR_TO_MEM_OR_NULL,
  650. .arg5_type = ARG_CONST_SIZE_OR_ZERO,
  651. };
  652. BPF_CALL_3(bpf_seq_write, struct seq_file *, m, const void *, data, u32, len)
  653. {
  654. return seq_write(m, data, len) ? -EOVERFLOW : 0;
  655. }
  656. static const struct bpf_func_proto bpf_seq_write_proto = {
  657. .func = bpf_seq_write,
  658. .gpl_only = true,
  659. .ret_type = RET_INTEGER,
  660. .arg1_type = ARG_PTR_TO_BTF_ID,
  661. .arg1_btf_id = &btf_seq_file_ids[0],
  662. .arg2_type = ARG_PTR_TO_MEM,
  663. .arg3_type = ARG_CONST_SIZE_OR_ZERO,
  664. };
  665. BPF_CALL_4(bpf_seq_printf_btf, struct seq_file *, m, struct btf_ptr *, ptr,
  666. u32, btf_ptr_size, u64, flags)
  667. {
  668. const struct btf *btf;
  669. s32 btf_id;
  670. int ret;
  671. ret = bpf_btf_printf_prepare(ptr, btf_ptr_size, flags, &btf, &btf_id);
  672. if (ret)
  673. return ret;
  674. return btf_type_seq_show_flags(btf, btf_id, ptr->ptr, m, flags);
  675. }
  676. static const struct bpf_func_proto bpf_seq_printf_btf_proto = {
  677. .func = bpf_seq_printf_btf,
  678. .gpl_only = true,
  679. .ret_type = RET_INTEGER,
  680. .arg1_type = ARG_PTR_TO_BTF_ID,
  681. .arg1_btf_id = &btf_seq_file_ids[0],
  682. .arg2_type = ARG_PTR_TO_MEM,
  683. .arg3_type = ARG_CONST_SIZE_OR_ZERO,
  684. .arg4_type = ARG_ANYTHING,
  685. };
  686. static __always_inline int
  687. get_map_perf_counter(struct bpf_map *map, u64 flags,
  688. u64 *value, u64 *enabled, u64 *running)
  689. {
  690. struct bpf_array *array = container_of(map, struct bpf_array, map);
  691. unsigned int cpu = smp_processor_id();
  692. u64 index = flags & BPF_F_INDEX_MASK;
  693. struct bpf_event_entry *ee;
  694. if (unlikely(flags & ~(BPF_F_INDEX_MASK)))
  695. return -EINVAL;
  696. if (index == BPF_F_CURRENT_CPU)
  697. index = cpu;
  698. if (unlikely(index >= array->map.max_entries))
  699. return -E2BIG;
  700. ee = READ_ONCE(array->ptrs[index]);
  701. if (!ee)
  702. return -ENOENT;
  703. return perf_event_read_local(ee->event, value, enabled, running);
  704. }
  705. BPF_CALL_2(bpf_perf_event_read, struct bpf_map *, map, u64, flags)
  706. {
  707. u64 value = 0;
  708. int err;
  709. err = get_map_perf_counter(map, flags, &value, NULL, NULL);
  710. /*
  711. * this api is ugly since we miss [-22..-2] range of valid
  712. * counter values, but that's uapi
  713. */
  714. if (err)
  715. return err;
  716. return value;
  717. }
  718. static const struct bpf_func_proto bpf_perf_event_read_proto = {
  719. .func = bpf_perf_event_read,
  720. .gpl_only = true,
  721. .ret_type = RET_INTEGER,
  722. .arg1_type = ARG_CONST_MAP_PTR,
  723. .arg2_type = ARG_ANYTHING,
  724. };
  725. BPF_CALL_4(bpf_perf_event_read_value, struct bpf_map *, map, u64, flags,
  726. struct bpf_perf_event_value *, buf, u32, size)
  727. {
  728. int err = -EINVAL;
  729. if (unlikely(size != sizeof(struct bpf_perf_event_value)))
  730. goto clear;
  731. err = get_map_perf_counter(map, flags, &buf->counter, &buf->enabled,
  732. &buf->running);
  733. if (unlikely(err))
  734. goto clear;
  735. return 0;
  736. clear:
  737. memset(buf, 0, size);
  738. return err;
  739. }
  740. static const struct bpf_func_proto bpf_perf_event_read_value_proto = {
  741. .func = bpf_perf_event_read_value,
  742. .gpl_only = true,
  743. .ret_type = RET_INTEGER,
  744. .arg1_type = ARG_CONST_MAP_PTR,
  745. .arg2_type = ARG_ANYTHING,
  746. .arg3_type = ARG_PTR_TO_UNINIT_MEM,
  747. .arg4_type = ARG_CONST_SIZE,
  748. };
  749. static __always_inline u64
  750. __bpf_perf_event_output(struct pt_regs *regs, struct bpf_map *map,
  751. u64 flags, struct perf_sample_data *sd)
  752. {
  753. struct bpf_array *array = container_of(map, struct bpf_array, map);
  754. unsigned int cpu = smp_processor_id();
  755. u64 index = flags & BPF_F_INDEX_MASK;
  756. struct bpf_event_entry *ee;
  757. struct perf_event *event;
  758. if (index == BPF_F_CURRENT_CPU)
  759. index = cpu;
  760. if (unlikely(index >= array->map.max_entries))
  761. return -E2BIG;
  762. ee = READ_ONCE(array->ptrs[index]);
  763. if (!ee)
  764. return -ENOENT;
  765. event = ee->event;
  766. if (unlikely(event->attr.type != PERF_TYPE_SOFTWARE ||
  767. event->attr.config != PERF_COUNT_SW_BPF_OUTPUT))
  768. return -EINVAL;
  769. if (unlikely(event->oncpu != cpu))
  770. return -EOPNOTSUPP;
  771. return perf_event_output(event, sd, regs);
  772. }
  773. /*
  774. * Support executing tracepoints in normal, irq, and nmi context that each call
  775. * bpf_perf_event_output
  776. */
  777. struct bpf_trace_sample_data {
  778. struct perf_sample_data sds[3];
  779. };
  780. static DEFINE_PER_CPU(struct bpf_trace_sample_data, bpf_trace_sds);
  781. static DEFINE_PER_CPU(int, bpf_trace_nest_level);
  782. BPF_CALL_5(bpf_perf_event_output, struct pt_regs *, regs, struct bpf_map *, map,
  783. u64, flags, void *, data, u64, size)
  784. {
  785. struct bpf_trace_sample_data *sds = this_cpu_ptr(&bpf_trace_sds);
  786. int nest_level = this_cpu_inc_return(bpf_trace_nest_level);
  787. struct perf_raw_record raw = {
  788. .frag = {
  789. .size = size,
  790. .data = data,
  791. },
  792. };
  793. struct perf_sample_data *sd;
  794. int err;
  795. if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(sds->sds))) {
  796. err = -EBUSY;
  797. goto out;
  798. }
  799. sd = &sds->sds[nest_level - 1];
  800. if (unlikely(flags & ~(BPF_F_INDEX_MASK))) {
  801. err = -EINVAL;
  802. goto out;
  803. }
  804. perf_sample_data_init(sd, 0, 0);
  805. sd->raw = &raw;
  806. err = __bpf_perf_event_output(regs, map, flags, sd);
  807. out:
  808. this_cpu_dec(bpf_trace_nest_level);
  809. return err;
  810. }
  811. static const struct bpf_func_proto bpf_perf_event_output_proto = {
  812. .func = bpf_perf_event_output,
  813. .gpl_only = true,
  814. .ret_type = RET_INTEGER,
  815. .arg1_type = ARG_PTR_TO_CTX,
  816. .arg2_type = ARG_CONST_MAP_PTR,
  817. .arg3_type = ARG_ANYTHING,
  818. .arg4_type = ARG_PTR_TO_MEM,
  819. .arg5_type = ARG_CONST_SIZE_OR_ZERO,
  820. };
  821. static DEFINE_PER_CPU(int, bpf_event_output_nest_level);
  822. struct bpf_nested_pt_regs {
  823. struct pt_regs regs[3];
  824. };
  825. static DEFINE_PER_CPU(struct bpf_nested_pt_regs, bpf_pt_regs);
  826. static DEFINE_PER_CPU(struct bpf_trace_sample_data, bpf_misc_sds);
  827. u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
  828. void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy)
  829. {
  830. int nest_level = this_cpu_inc_return(bpf_event_output_nest_level);
  831. struct perf_raw_frag frag = {
  832. .copy = ctx_copy,
  833. .size = ctx_size,
  834. .data = ctx,
  835. };
  836. struct perf_raw_record raw = {
  837. .frag = {
  838. {
  839. .next = ctx_size ? &frag : NULL,
  840. },
  841. .size = meta_size,
  842. .data = meta,
  843. },
  844. };
  845. struct perf_sample_data *sd;
  846. struct pt_regs *regs;
  847. u64 ret;
  848. if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(bpf_misc_sds.sds))) {
  849. ret = -EBUSY;
  850. goto out;
  851. }
  852. sd = this_cpu_ptr(&bpf_misc_sds.sds[nest_level - 1]);
  853. regs = this_cpu_ptr(&bpf_pt_regs.regs[nest_level - 1]);
  854. perf_fetch_caller_regs(regs);
  855. perf_sample_data_init(sd, 0, 0);
  856. sd->raw = &raw;
  857. ret = __bpf_perf_event_output(regs, map, flags, sd);
  858. out:
  859. this_cpu_dec(bpf_event_output_nest_level);
  860. return ret;
  861. }
  862. BPF_CALL_0(bpf_get_current_task)
  863. {
  864. return (long) current;
  865. }
  866. const struct bpf_func_proto bpf_get_current_task_proto = {
  867. .func = bpf_get_current_task,
  868. .gpl_only = true,
  869. .ret_type = RET_INTEGER,
  870. };
  871. BPF_CALL_2(bpf_current_task_under_cgroup, struct bpf_map *, map, u32, idx)
  872. {
  873. struct bpf_array *array = container_of(map, struct bpf_array, map);
  874. struct cgroup *cgrp;
  875. if (unlikely(idx >= array->map.max_entries))
  876. return -E2BIG;
  877. cgrp = READ_ONCE(array->ptrs[idx]);
  878. if (unlikely(!cgrp))
  879. return -EAGAIN;
  880. return task_under_cgroup_hierarchy(current, cgrp);
  881. }
  882. static const struct bpf_func_proto bpf_current_task_under_cgroup_proto = {
  883. .func = bpf_current_task_under_cgroup,
  884. .gpl_only = false,
  885. .ret_type = RET_INTEGER,
  886. .arg1_type = ARG_CONST_MAP_PTR,
  887. .arg2_type = ARG_ANYTHING,
  888. };
  889. struct send_signal_irq_work {
  890. struct irq_work irq_work;
  891. struct task_struct *task;
  892. u32 sig;
  893. enum pid_type type;
  894. };
  895. static DEFINE_PER_CPU(struct send_signal_irq_work, send_signal_work);
  896. static void do_bpf_send_signal(struct irq_work *entry)
  897. {
  898. struct send_signal_irq_work *work;
  899. work = container_of(entry, struct send_signal_irq_work, irq_work);
  900. group_send_sig_info(work->sig, SEND_SIG_PRIV, work->task, work->type);
  901. }
  902. static int bpf_send_signal_common(u32 sig, enum pid_type type)
  903. {
  904. struct send_signal_irq_work *work = NULL;
  905. /* Similar to bpf_probe_write_user, task needs to be
  906. * in a sound condition and kernel memory access be
  907. * permitted in order to send signal to the current
  908. * task.
  909. */
  910. if (unlikely(current->flags & (PF_KTHREAD | PF_EXITING)))
  911. return -EPERM;
  912. if (unlikely(uaccess_kernel()))
  913. return -EPERM;
  914. if (unlikely(!nmi_uaccess_okay()))
  915. return -EPERM;
  916. if (irqs_disabled()) {
  917. /* Do an early check on signal validity. Otherwise,
  918. * the error is lost in deferred irq_work.
  919. */
  920. if (unlikely(!valid_signal(sig)))
  921. return -EINVAL;
  922. work = this_cpu_ptr(&send_signal_work);
  923. if (atomic_read(&work->irq_work.flags) & IRQ_WORK_BUSY)
  924. return -EBUSY;
  925. /* Add the current task, which is the target of sending signal,
  926. * to the irq_work. The current task may change when queued
  927. * irq works get executed.
  928. */
  929. work->task = current;
  930. work->sig = sig;
  931. work->type = type;
  932. irq_work_queue(&work->irq_work);
  933. return 0;
  934. }
  935. return group_send_sig_info(sig, SEND_SIG_PRIV, current, type);
  936. }
  937. BPF_CALL_1(bpf_send_signal, u32, sig)
  938. {
  939. return bpf_send_signal_common(sig, PIDTYPE_TGID);
  940. }
  941. static const struct bpf_func_proto bpf_send_signal_proto = {
  942. .func = bpf_send_signal,
  943. .gpl_only = false,
  944. .ret_type = RET_INTEGER,
  945. .arg1_type = ARG_ANYTHING,
  946. };
  947. BPF_CALL_1(bpf_send_signal_thread, u32, sig)
  948. {
  949. return bpf_send_signal_common(sig, PIDTYPE_PID);
  950. }
  951. static const struct bpf_func_proto bpf_send_signal_thread_proto = {
  952. .func = bpf_send_signal_thread,
  953. .gpl_only = false,
  954. .ret_type = RET_INTEGER,
  955. .arg1_type = ARG_ANYTHING,
  956. };
  957. BPF_CALL_3(bpf_d_path, struct path *, path, char *, buf, u32, sz)
  958. {
  959. long len;
  960. char *p;
  961. if (!sz)
  962. return 0;
  963. p = d_path(path, buf, sz);
  964. if (IS_ERR(p)) {
  965. len = PTR_ERR(p);
  966. } else {
  967. len = buf + sz - p;
  968. memmove(buf, p, len);
  969. }
  970. return len;
  971. }
  972. BTF_SET_START(btf_allowlist_d_path)
  973. #ifdef CONFIG_SECURITY
  974. BTF_ID(func, security_file_permission)
  975. BTF_ID(func, security_inode_getattr)
  976. BTF_ID(func, security_file_open)
  977. #endif
  978. #ifdef CONFIG_SECURITY_PATH
  979. BTF_ID(func, security_path_truncate)
  980. #endif
  981. BTF_ID(func, vfs_truncate)
  982. BTF_ID(func, vfs_fallocate)
  983. BTF_ID(func, dentry_open)
  984. BTF_ID(func, vfs_getattr)
  985. BTF_ID(func, filp_close)
  986. BTF_SET_END(btf_allowlist_d_path)
  987. static bool bpf_d_path_allowed(const struct bpf_prog *prog)
  988. {
  989. return btf_id_set_contains(&btf_allowlist_d_path, prog->aux->attach_btf_id);
  990. }
  991. BTF_ID_LIST_SINGLE(bpf_d_path_btf_ids, struct, path)
  992. static const struct bpf_func_proto bpf_d_path_proto = {
  993. .func = bpf_d_path,
  994. .gpl_only = false,
  995. .ret_type = RET_INTEGER,
  996. .arg1_type = ARG_PTR_TO_BTF_ID,
  997. .arg1_btf_id = &bpf_d_path_btf_ids[0],
  998. .arg2_type = ARG_PTR_TO_MEM,
  999. .arg3_type = ARG_CONST_SIZE_OR_ZERO,
  1000. .allowed = bpf_d_path_allowed,
  1001. };
  1002. #define BTF_F_ALL (BTF_F_COMPACT | BTF_F_NONAME | \
  1003. BTF_F_PTR_RAW | BTF_F_ZERO)
  1004. static int bpf_btf_printf_prepare(struct btf_ptr *ptr, u32 btf_ptr_size,
  1005. u64 flags, const struct btf **btf,
  1006. s32 *btf_id)
  1007. {
  1008. const struct btf_type *t;
  1009. if (unlikely(flags & ~(BTF_F_ALL)))
  1010. return -EINVAL;
  1011. if (btf_ptr_size != sizeof(struct btf_ptr))
  1012. return -EINVAL;
  1013. *btf = bpf_get_btf_vmlinux();
  1014. if (IS_ERR_OR_NULL(*btf))
  1015. return IS_ERR(*btf) ? PTR_ERR(*btf) : -EINVAL;
  1016. if (ptr->type_id > 0)
  1017. *btf_id = ptr->type_id;
  1018. else
  1019. return -EINVAL;
  1020. if (*btf_id > 0)
  1021. t = btf_type_by_id(*btf, *btf_id);
  1022. if (*btf_id <= 0 || !t)
  1023. return -ENOENT;
  1024. return 0;
  1025. }
  1026. BPF_CALL_5(bpf_snprintf_btf, char *, str, u32, str_size, struct btf_ptr *, ptr,
  1027. u32, btf_ptr_size, u64, flags)
  1028. {
  1029. const struct btf *btf;
  1030. s32 btf_id;
  1031. int ret;
  1032. ret = bpf_btf_printf_prepare(ptr, btf_ptr_size, flags, &btf, &btf_id);
  1033. if (ret)
  1034. return ret;
  1035. return btf_type_snprintf_show(btf, btf_id, ptr->ptr, str, str_size,
  1036. flags);
  1037. }
  1038. const struct bpf_func_proto bpf_snprintf_btf_proto = {
  1039. .func = bpf_snprintf_btf,
  1040. .gpl_only = false,
  1041. .ret_type = RET_INTEGER,
  1042. .arg1_type = ARG_PTR_TO_MEM,
  1043. .arg2_type = ARG_CONST_SIZE,
  1044. .arg3_type = ARG_PTR_TO_MEM,
  1045. .arg4_type = ARG_CONST_SIZE,
  1046. .arg5_type = ARG_ANYTHING,
  1047. };
  1048. const struct bpf_func_proto *
  1049. bpf_tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
  1050. {
  1051. switch (func_id) {
  1052. case BPF_FUNC_map_lookup_elem:
  1053. return &bpf_map_lookup_elem_proto;
  1054. case BPF_FUNC_map_update_elem:
  1055. return &bpf_map_update_elem_proto;
  1056. case BPF_FUNC_map_delete_elem:
  1057. return &bpf_map_delete_elem_proto;
  1058. case BPF_FUNC_map_push_elem:
  1059. return &bpf_map_push_elem_proto;
  1060. case BPF_FUNC_map_pop_elem:
  1061. return &bpf_map_pop_elem_proto;
  1062. case BPF_FUNC_map_peek_elem:
  1063. return &bpf_map_peek_elem_proto;
  1064. case BPF_FUNC_ktime_get_ns:
  1065. return &bpf_ktime_get_ns_proto;
  1066. case BPF_FUNC_ktime_get_boot_ns:
  1067. return &bpf_ktime_get_boot_ns_proto;
  1068. case BPF_FUNC_tail_call:
  1069. return &bpf_tail_call_proto;
  1070. case BPF_FUNC_get_current_pid_tgid:
  1071. return &bpf_get_current_pid_tgid_proto;
  1072. case BPF_FUNC_get_current_task:
  1073. return &bpf_get_current_task_proto;
  1074. case BPF_FUNC_get_current_uid_gid:
  1075. return &bpf_get_current_uid_gid_proto;
  1076. case BPF_FUNC_get_current_comm:
  1077. return &bpf_get_current_comm_proto;
  1078. case BPF_FUNC_trace_printk:
  1079. return bpf_get_trace_printk_proto();
  1080. case BPF_FUNC_get_smp_processor_id:
  1081. return &bpf_get_smp_processor_id_proto;
  1082. case BPF_FUNC_get_numa_node_id:
  1083. return &bpf_get_numa_node_id_proto;
  1084. case BPF_FUNC_perf_event_read:
  1085. return &bpf_perf_event_read_proto;
  1086. case BPF_FUNC_current_task_under_cgroup:
  1087. return &bpf_current_task_under_cgroup_proto;
  1088. case BPF_FUNC_get_prandom_u32:
  1089. return &bpf_get_prandom_u32_proto;
  1090. case BPF_FUNC_probe_write_user:
  1091. return security_locked_down(LOCKDOWN_BPF_WRITE_USER) < 0 ?
  1092. NULL : bpf_get_probe_write_proto();
  1093. case BPF_FUNC_probe_read_user:
  1094. return &bpf_probe_read_user_proto;
  1095. case BPF_FUNC_probe_read_kernel:
  1096. return security_locked_down(LOCKDOWN_BPF_READ) < 0 ?
  1097. NULL : &bpf_probe_read_kernel_proto;
  1098. case BPF_FUNC_probe_read_user_str:
  1099. return &bpf_probe_read_user_str_proto;
  1100. case BPF_FUNC_probe_read_kernel_str:
  1101. return security_locked_down(LOCKDOWN_BPF_READ) < 0 ?
  1102. NULL : &bpf_probe_read_kernel_str_proto;
  1103. #ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
  1104. case BPF_FUNC_probe_read:
  1105. return security_locked_down(LOCKDOWN_BPF_READ) < 0 ?
  1106. NULL : &bpf_probe_read_compat_proto;
  1107. case BPF_FUNC_probe_read_str:
  1108. return security_locked_down(LOCKDOWN_BPF_READ) < 0 ?
  1109. NULL : &bpf_probe_read_compat_str_proto;
  1110. #endif
  1111. #ifdef CONFIG_CGROUPS
  1112. case BPF_FUNC_get_current_cgroup_id:
  1113. return &bpf_get_current_cgroup_id_proto;
  1114. #endif
  1115. case BPF_FUNC_send_signal:
  1116. return &bpf_send_signal_proto;
  1117. case BPF_FUNC_send_signal_thread:
  1118. return &bpf_send_signal_thread_proto;
  1119. case BPF_FUNC_perf_event_read_value:
  1120. return &bpf_perf_event_read_value_proto;
  1121. case BPF_FUNC_get_ns_current_pid_tgid:
  1122. return &bpf_get_ns_current_pid_tgid_proto;
  1123. case BPF_FUNC_ringbuf_output:
  1124. return &bpf_ringbuf_output_proto;
  1125. case BPF_FUNC_ringbuf_reserve:
  1126. return &bpf_ringbuf_reserve_proto;
  1127. case BPF_FUNC_ringbuf_submit:
  1128. return &bpf_ringbuf_submit_proto;
  1129. case BPF_FUNC_ringbuf_discard:
  1130. return &bpf_ringbuf_discard_proto;
  1131. case BPF_FUNC_ringbuf_query:
  1132. return &bpf_ringbuf_query_proto;
  1133. case BPF_FUNC_jiffies64:
  1134. return &bpf_jiffies64_proto;
  1135. case BPF_FUNC_get_task_stack:
  1136. return &bpf_get_task_stack_proto;
  1137. case BPF_FUNC_copy_from_user:
  1138. return prog->aux->sleepable ? &bpf_copy_from_user_proto : NULL;
  1139. case BPF_FUNC_snprintf_btf:
  1140. return &bpf_snprintf_btf_proto;
  1141. case BPF_FUNC_per_cpu_ptr:
  1142. return &bpf_per_cpu_ptr_proto;
  1143. case BPF_FUNC_this_cpu_ptr:
  1144. return &bpf_this_cpu_ptr_proto;
  1145. default:
  1146. return NULL;
  1147. }
  1148. }
  1149. static const struct bpf_func_proto *
  1150. kprobe_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
  1151. {
  1152. switch (func_id) {
  1153. case BPF_FUNC_perf_event_output:
  1154. return &bpf_perf_event_output_proto;
  1155. case BPF_FUNC_get_stackid:
  1156. return &bpf_get_stackid_proto;
  1157. case BPF_FUNC_get_stack:
  1158. return &bpf_get_stack_proto;
  1159. #ifdef CONFIG_BPF_KPROBE_OVERRIDE
  1160. case BPF_FUNC_override_return:
  1161. return &bpf_override_return_proto;
  1162. #endif
  1163. default:
  1164. return bpf_tracing_func_proto(func_id, prog);
  1165. }
  1166. }
  1167. /* bpf+kprobe programs can access fields of 'struct pt_regs' */
  1168. static bool kprobe_prog_is_valid_access(int off, int size, enum bpf_access_type type,
  1169. const struct bpf_prog *prog,
  1170. struct bpf_insn_access_aux *info)
  1171. {
  1172. if (off < 0 || off >= sizeof(struct pt_regs))
  1173. return false;
  1174. if (type != BPF_READ)
  1175. return false;
  1176. if (off % size != 0)
  1177. return false;
  1178. /*
  1179. * Assertion for 32 bit to make sure last 8 byte access
  1180. * (BPF_DW) to the last 4 byte member is disallowed.
  1181. */
  1182. if (off + size > sizeof(struct pt_regs))
  1183. return false;
  1184. return true;
  1185. }
  1186. const struct bpf_verifier_ops kprobe_verifier_ops = {
  1187. .get_func_proto = kprobe_prog_func_proto,
  1188. .is_valid_access = kprobe_prog_is_valid_access,
  1189. };
  1190. const struct bpf_prog_ops kprobe_prog_ops = {
  1191. };
  1192. BPF_CALL_5(bpf_perf_event_output_tp, void *, tp_buff, struct bpf_map *, map,
  1193. u64, flags, void *, data, u64, size)
  1194. {
  1195. struct pt_regs *regs = *(struct pt_regs **)tp_buff;
  1196. /*
  1197. * r1 points to perf tracepoint buffer where first 8 bytes are hidden
  1198. * from bpf program and contain a pointer to 'struct pt_regs'. Fetch it
  1199. * from there and call the same bpf_perf_event_output() helper inline.
  1200. */
  1201. return ____bpf_perf_event_output(regs, map, flags, data, size);
  1202. }
  1203. static const struct bpf_func_proto bpf_perf_event_output_proto_tp = {
  1204. .func = bpf_perf_event_output_tp,
  1205. .gpl_only = true,
  1206. .ret_type = RET_INTEGER,
  1207. .arg1_type = ARG_PTR_TO_CTX,
  1208. .arg2_type = ARG_CONST_MAP_PTR,
  1209. .arg3_type = ARG_ANYTHING,
  1210. .arg4_type = ARG_PTR_TO_MEM,
  1211. .arg5_type = ARG_CONST_SIZE_OR_ZERO,
  1212. };
  1213. BPF_CALL_3(bpf_get_stackid_tp, void *, tp_buff, struct bpf_map *, map,
  1214. u64, flags)
  1215. {
  1216. struct pt_regs *regs = *(struct pt_regs **)tp_buff;
  1217. /*
  1218. * Same comment as in bpf_perf_event_output_tp(), only that this time
  1219. * the other helper's function body cannot be inlined due to being
  1220. * external, thus we need to call raw helper function.
  1221. */
  1222. return bpf_get_stackid((unsigned long) regs, (unsigned long) map,
  1223. flags, 0, 0);
  1224. }
  1225. static const struct bpf_func_proto bpf_get_stackid_proto_tp = {
  1226. .func = bpf_get_stackid_tp,
  1227. .gpl_only = true,
  1228. .ret_type = RET_INTEGER,
  1229. .arg1_type = ARG_PTR_TO_CTX,
  1230. .arg2_type = ARG_CONST_MAP_PTR,
  1231. .arg3_type = ARG_ANYTHING,
  1232. };
  1233. BPF_CALL_4(bpf_get_stack_tp, void *, tp_buff, void *, buf, u32, size,
  1234. u64, flags)
  1235. {
  1236. struct pt_regs *regs = *(struct pt_regs **)tp_buff;
  1237. return bpf_get_stack((unsigned long) regs, (unsigned long) buf,
  1238. (unsigned long) size, flags, 0);
  1239. }
  1240. static const struct bpf_func_proto bpf_get_stack_proto_tp = {
  1241. .func = bpf_get_stack_tp,
  1242. .gpl_only = true,
  1243. .ret_type = RET_INTEGER,
  1244. .arg1_type = ARG_PTR_TO_CTX,
  1245. .arg2_type = ARG_PTR_TO_UNINIT_MEM,
  1246. .arg3_type = ARG_CONST_SIZE_OR_ZERO,
  1247. .arg4_type = ARG_ANYTHING,
  1248. };
  1249. static const struct bpf_func_proto *
  1250. tp_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
  1251. {
  1252. switch (func_id) {
  1253. case BPF_FUNC_perf_event_output:
  1254. return &bpf_perf_event_output_proto_tp;
  1255. case BPF_FUNC_get_stackid:
  1256. return &bpf_get_stackid_proto_tp;
  1257. case BPF_FUNC_get_stack:
  1258. return &bpf_get_stack_proto_tp;
  1259. default:
  1260. return bpf_tracing_func_proto(func_id, prog);
  1261. }
  1262. }
  1263. static bool tp_prog_is_valid_access(int off, int size, enum bpf_access_type type,
  1264. const struct bpf_prog *prog,
  1265. struct bpf_insn_access_aux *info)
  1266. {
  1267. if (off < sizeof(void *) || off >= PERF_MAX_TRACE_SIZE)
  1268. return false;
  1269. if (type != BPF_READ)
  1270. return false;
  1271. if (off % size != 0)
  1272. return false;
  1273. BUILD_BUG_ON(PERF_MAX_TRACE_SIZE % sizeof(__u64));
  1274. return true;
  1275. }
  1276. const struct bpf_verifier_ops tracepoint_verifier_ops = {
  1277. .get_func_proto = tp_prog_func_proto,
  1278. .is_valid_access = tp_prog_is_valid_access,
  1279. };
  1280. const struct bpf_prog_ops tracepoint_prog_ops = {
  1281. };
  1282. BPF_CALL_3(bpf_perf_prog_read_value, struct bpf_perf_event_data_kern *, ctx,
  1283. struct bpf_perf_event_value *, buf, u32, size)
  1284. {
  1285. int err = -EINVAL;
  1286. if (unlikely(size != sizeof(struct bpf_perf_event_value)))
  1287. goto clear;
  1288. err = perf_event_read_local(ctx->event, &buf->counter, &buf->enabled,
  1289. &buf->running);
  1290. if (unlikely(err))
  1291. goto clear;
  1292. return 0;
  1293. clear:
  1294. memset(buf, 0, size);
  1295. return err;
  1296. }
  1297. static const struct bpf_func_proto bpf_perf_prog_read_value_proto = {
  1298. .func = bpf_perf_prog_read_value,
  1299. .gpl_only = true,
  1300. .ret_type = RET_INTEGER,
  1301. .arg1_type = ARG_PTR_TO_CTX,
  1302. .arg2_type = ARG_PTR_TO_UNINIT_MEM,
  1303. .arg3_type = ARG_CONST_SIZE,
  1304. };
  1305. BPF_CALL_4(bpf_read_branch_records, struct bpf_perf_event_data_kern *, ctx,
  1306. void *, buf, u32, size, u64, flags)
  1307. {
  1308. static const u32 br_entry_size = sizeof(struct perf_branch_entry);
  1309. struct perf_branch_stack *br_stack = ctx->data->br_stack;
  1310. u32 to_copy;
  1311. if (unlikely(flags & ~BPF_F_GET_BRANCH_RECORDS_SIZE))
  1312. return -EINVAL;
  1313. if (unlikely(!br_stack))
  1314. return -ENOENT;
  1315. if (flags & BPF_F_GET_BRANCH_RECORDS_SIZE)
  1316. return br_stack->nr * br_entry_size;
  1317. if (!buf || (size % br_entry_size != 0))
  1318. return -EINVAL;
  1319. to_copy = min_t(u32, br_stack->nr * br_entry_size, size);
  1320. memcpy(buf, br_stack->entries, to_copy);
  1321. return to_copy;
  1322. }
  1323. static const struct bpf_func_proto bpf_read_branch_records_proto = {
  1324. .func = bpf_read_branch_records,
  1325. .gpl_only = true,
  1326. .ret_type = RET_INTEGER,
  1327. .arg1_type = ARG_PTR_TO_CTX,
  1328. .arg2_type = ARG_PTR_TO_MEM_OR_NULL,
  1329. .arg3_type = ARG_CONST_SIZE_OR_ZERO,
  1330. .arg4_type = ARG_ANYTHING,
  1331. };
  1332. static const struct bpf_func_proto *
  1333. pe_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
  1334. {
  1335. switch (func_id) {
  1336. case BPF_FUNC_perf_event_output:
  1337. return &bpf_perf_event_output_proto_tp;
  1338. case BPF_FUNC_get_stackid:
  1339. return &bpf_get_stackid_proto_pe;
  1340. case BPF_FUNC_get_stack:
  1341. return &bpf_get_stack_proto_pe;
  1342. case BPF_FUNC_perf_prog_read_value:
  1343. return &bpf_perf_prog_read_value_proto;
  1344. case BPF_FUNC_read_branch_records:
  1345. return &bpf_read_branch_records_proto;
  1346. default:
  1347. return bpf_tracing_func_proto(func_id, prog);
  1348. }
  1349. }
  1350. /*
  1351. * bpf_raw_tp_regs are separate from bpf_pt_regs used from skb/xdp
  1352. * to avoid potential recursive reuse issue when/if tracepoints are added
  1353. * inside bpf_*_event_output, bpf_get_stackid and/or bpf_get_stack.
  1354. *
  1355. * Since raw tracepoints run despite bpf_prog_active, support concurrent usage
  1356. * in normal, irq, and nmi context.
  1357. */
  1358. struct bpf_raw_tp_regs {
  1359. struct pt_regs regs[3];
  1360. };
  1361. static DEFINE_PER_CPU(struct bpf_raw_tp_regs, bpf_raw_tp_regs);
  1362. static DEFINE_PER_CPU(int, bpf_raw_tp_nest_level);
  1363. static struct pt_regs *get_bpf_raw_tp_regs(void)
  1364. {
  1365. struct bpf_raw_tp_regs *tp_regs = this_cpu_ptr(&bpf_raw_tp_regs);
  1366. int nest_level = this_cpu_inc_return(bpf_raw_tp_nest_level);
  1367. if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(tp_regs->regs))) {
  1368. this_cpu_dec(bpf_raw_tp_nest_level);
  1369. return ERR_PTR(-EBUSY);
  1370. }
  1371. return &tp_regs->regs[nest_level - 1];
  1372. }
  1373. static void put_bpf_raw_tp_regs(void)
  1374. {
  1375. this_cpu_dec(bpf_raw_tp_nest_level);
  1376. }
  1377. BPF_CALL_5(bpf_perf_event_output_raw_tp, struct bpf_raw_tracepoint_args *, args,
  1378. struct bpf_map *, map, u64, flags, void *, data, u64, size)
  1379. {
  1380. struct pt_regs *regs = get_bpf_raw_tp_regs();
  1381. int ret;
  1382. if (IS_ERR(regs))
  1383. return PTR_ERR(regs);
  1384. perf_fetch_caller_regs(regs);
  1385. ret = ____bpf_perf_event_output(regs, map, flags, data, size);
  1386. put_bpf_raw_tp_regs();
  1387. return ret;
  1388. }
  1389. static const struct bpf_func_proto bpf_perf_event_output_proto_raw_tp = {
  1390. .func = bpf_perf_event_output_raw_tp,
  1391. .gpl_only = true,
  1392. .ret_type = RET_INTEGER,
  1393. .arg1_type = ARG_PTR_TO_CTX,
  1394. .arg2_type = ARG_CONST_MAP_PTR,
  1395. .arg3_type = ARG_ANYTHING,
  1396. .arg4_type = ARG_PTR_TO_MEM,
  1397. .arg5_type = ARG_CONST_SIZE_OR_ZERO,
  1398. };
  1399. extern const struct bpf_func_proto bpf_skb_output_proto;
  1400. extern const struct bpf_func_proto bpf_xdp_output_proto;
  1401. BPF_CALL_3(bpf_get_stackid_raw_tp, struct bpf_raw_tracepoint_args *, args,
  1402. struct bpf_map *, map, u64, flags)
  1403. {
  1404. struct pt_regs *regs = get_bpf_raw_tp_regs();
  1405. int ret;
  1406. if (IS_ERR(regs))
  1407. return PTR_ERR(regs);
  1408. perf_fetch_caller_regs(regs);
  1409. /* similar to bpf_perf_event_output_tp, but pt_regs fetched differently */
  1410. ret = bpf_get_stackid((unsigned long) regs, (unsigned long) map,
  1411. flags, 0, 0);
  1412. put_bpf_raw_tp_regs();
  1413. return ret;
  1414. }
  1415. static const struct bpf_func_proto bpf_get_stackid_proto_raw_tp = {
  1416. .func = bpf_get_stackid_raw_tp,
  1417. .gpl_only = true,
  1418. .ret_type = RET_INTEGER,
  1419. .arg1_type = ARG_PTR_TO_CTX,
  1420. .arg2_type = ARG_CONST_MAP_PTR,
  1421. .arg3_type = ARG_ANYTHING,
  1422. };
  1423. BPF_CALL_4(bpf_get_stack_raw_tp, struct bpf_raw_tracepoint_args *, args,
  1424. void *, buf, u32, size, u64, flags)
  1425. {
  1426. struct pt_regs *regs = get_bpf_raw_tp_regs();
  1427. int ret;
  1428. if (IS_ERR(regs))
  1429. return PTR_ERR(regs);
  1430. perf_fetch_caller_regs(regs);
  1431. ret = bpf_get_stack((unsigned long) regs, (unsigned long) buf,
  1432. (unsigned long) size, flags, 0);
  1433. put_bpf_raw_tp_regs();
  1434. return ret;
  1435. }
  1436. static const struct bpf_func_proto bpf_get_stack_proto_raw_tp = {
  1437. .func = bpf_get_stack_raw_tp,
  1438. .gpl_only = true,
  1439. .ret_type = RET_INTEGER,
  1440. .arg1_type = ARG_PTR_TO_CTX,
  1441. .arg2_type = ARG_PTR_TO_MEM,
  1442. .arg3_type = ARG_CONST_SIZE_OR_ZERO,
  1443. .arg4_type = ARG_ANYTHING,
  1444. };
  1445. static const struct bpf_func_proto *
  1446. raw_tp_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
  1447. {
  1448. switch (func_id) {
  1449. case BPF_FUNC_perf_event_output:
  1450. return &bpf_perf_event_output_proto_raw_tp;
  1451. case BPF_FUNC_get_stackid:
  1452. return &bpf_get_stackid_proto_raw_tp;
  1453. case BPF_FUNC_get_stack:
  1454. return &bpf_get_stack_proto_raw_tp;
  1455. default:
  1456. return bpf_tracing_func_proto(func_id, prog);
  1457. }
  1458. }
  1459. const struct bpf_func_proto *
  1460. tracing_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
  1461. {
  1462. switch (func_id) {
  1463. #ifdef CONFIG_NET
  1464. case BPF_FUNC_skb_output:
  1465. return &bpf_skb_output_proto;
  1466. case BPF_FUNC_xdp_output:
  1467. return &bpf_xdp_output_proto;
  1468. case BPF_FUNC_skc_to_tcp6_sock:
  1469. return &bpf_skc_to_tcp6_sock_proto;
  1470. case BPF_FUNC_skc_to_tcp_sock:
  1471. return &bpf_skc_to_tcp_sock_proto;
  1472. case BPF_FUNC_skc_to_tcp_timewait_sock:
  1473. return &bpf_skc_to_tcp_timewait_sock_proto;
  1474. case BPF_FUNC_skc_to_tcp_request_sock:
  1475. return &bpf_skc_to_tcp_request_sock_proto;
  1476. case BPF_FUNC_skc_to_udp6_sock:
  1477. return &bpf_skc_to_udp6_sock_proto;
  1478. #endif
  1479. case BPF_FUNC_seq_printf:
  1480. return prog->expected_attach_type == BPF_TRACE_ITER ?
  1481. &bpf_seq_printf_proto :
  1482. NULL;
  1483. case BPF_FUNC_seq_write:
  1484. return prog->expected_attach_type == BPF_TRACE_ITER ?
  1485. &bpf_seq_write_proto :
  1486. NULL;
  1487. case BPF_FUNC_seq_printf_btf:
  1488. return prog->expected_attach_type == BPF_TRACE_ITER ?
  1489. &bpf_seq_printf_btf_proto :
  1490. NULL;
  1491. case BPF_FUNC_d_path:
  1492. return &bpf_d_path_proto;
  1493. default:
  1494. return raw_tp_prog_func_proto(func_id, prog);
  1495. }
  1496. }
  1497. static bool raw_tp_prog_is_valid_access(int off, int size,
  1498. enum bpf_access_type type,
  1499. const struct bpf_prog *prog,
  1500. struct bpf_insn_access_aux *info)
  1501. {
  1502. if (off < 0 || off >= sizeof(__u64) * MAX_BPF_FUNC_ARGS)
  1503. return false;
  1504. if (type != BPF_READ)
  1505. return false;
  1506. if (off % size != 0)
  1507. return false;
  1508. return true;
  1509. }
  1510. static bool tracing_prog_is_valid_access(int off, int size,
  1511. enum bpf_access_type type,
  1512. const struct bpf_prog *prog,
  1513. struct bpf_insn_access_aux *info)
  1514. {
  1515. if (off < 0 || off >= sizeof(__u64) * MAX_BPF_FUNC_ARGS)
  1516. return false;
  1517. if (type != BPF_READ)
  1518. return false;
  1519. if (off % size != 0)
  1520. return false;
  1521. return btf_ctx_access(off, size, type, prog, info);
  1522. }
  1523. int __weak bpf_prog_test_run_tracing(struct bpf_prog *prog,
  1524. const union bpf_attr *kattr,
  1525. union bpf_attr __user *uattr)
  1526. {
  1527. return -ENOTSUPP;
  1528. }
  1529. const struct bpf_verifier_ops raw_tracepoint_verifier_ops = {
  1530. .get_func_proto = raw_tp_prog_func_proto,
  1531. .is_valid_access = raw_tp_prog_is_valid_access,
  1532. };
  1533. const struct bpf_prog_ops raw_tracepoint_prog_ops = {
  1534. #ifdef CONFIG_NET
  1535. .test_run = bpf_prog_test_run_raw_tp,
  1536. #endif
  1537. };
  1538. const struct bpf_verifier_ops tracing_verifier_ops = {
  1539. .get_func_proto = tracing_prog_func_proto,
  1540. .is_valid_access = tracing_prog_is_valid_access,
  1541. };
  1542. const struct bpf_prog_ops tracing_prog_ops = {
  1543. .test_run = bpf_prog_test_run_tracing,
  1544. };
  1545. static bool raw_tp_writable_prog_is_valid_access(int off, int size,
  1546. enum bpf_access_type type,
  1547. const struct bpf_prog *prog,
  1548. struct bpf_insn_access_aux *info)
  1549. {
  1550. if (off == 0) {
  1551. if (size != sizeof(u64) || type != BPF_READ)
  1552. return false;
  1553. info->reg_type = PTR_TO_TP_BUFFER;
  1554. }
  1555. return raw_tp_prog_is_valid_access(off, size, type, prog, info);
  1556. }
  1557. const struct bpf_verifier_ops raw_tracepoint_writable_verifier_ops = {
  1558. .get_func_proto = raw_tp_prog_func_proto,
  1559. .is_valid_access = raw_tp_writable_prog_is_valid_access,
  1560. };
  1561. const struct bpf_prog_ops raw_tracepoint_writable_prog_ops = {
  1562. };
  1563. static bool pe_prog_is_valid_access(int off, int size, enum bpf_access_type type,
  1564. const struct bpf_prog *prog,
  1565. struct bpf_insn_access_aux *info)
  1566. {
  1567. const int size_u64 = sizeof(u64);
  1568. if (off < 0 || off >= sizeof(struct bpf_perf_event_data))
  1569. return false;
  1570. if (type != BPF_READ)
  1571. return false;
  1572. if (off % size != 0) {
  1573. if (sizeof(unsigned long) != 4)
  1574. return false;
  1575. if (size != 8)
  1576. return false;
  1577. if (off % size != 4)
  1578. return false;
  1579. }
  1580. switch (off) {
  1581. case bpf_ctx_range(struct bpf_perf_event_data, sample_period):
  1582. bpf_ctx_record_field_size(info, size_u64);
  1583. if (!bpf_ctx_narrow_access_ok(off, size, size_u64))
  1584. return false;
  1585. break;
  1586. case bpf_ctx_range(struct bpf_perf_event_data, addr):
  1587. bpf_ctx_record_field_size(info, size_u64);
  1588. if (!bpf_ctx_narrow_access_ok(off, size, size_u64))
  1589. return false;
  1590. break;
  1591. default:
  1592. if (size != sizeof(long))
  1593. return false;
  1594. }
  1595. return true;
  1596. }
  1597. static u32 pe_prog_convert_ctx_access(enum bpf_access_type type,
  1598. const struct bpf_insn *si,
  1599. struct bpf_insn *insn_buf,
  1600. struct bpf_prog *prog, u32 *target_size)
  1601. {
  1602. struct bpf_insn *insn = insn_buf;
  1603. switch (si->off) {
  1604. case offsetof(struct bpf_perf_event_data, sample_period):
  1605. *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern,
  1606. data), si->dst_reg, si->src_reg,
  1607. offsetof(struct bpf_perf_event_data_kern, data));
  1608. *insn++ = BPF_LDX_MEM(BPF_DW, si->dst_reg, si->dst_reg,
  1609. bpf_target_off(struct perf_sample_data, period, 8,
  1610. target_size));
  1611. break;
  1612. case offsetof(struct bpf_perf_event_data, addr):
  1613. *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern,
  1614. data), si->dst_reg, si->src_reg,
  1615. offsetof(struct bpf_perf_event_data_kern, data));
  1616. *insn++ = BPF_LDX_MEM(BPF_DW, si->dst_reg, si->dst_reg,
  1617. bpf_target_off(struct perf_sample_data, addr, 8,
  1618. target_size));
  1619. break;
  1620. default:
  1621. *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern,
  1622. regs), si->dst_reg, si->src_reg,
  1623. offsetof(struct bpf_perf_event_data_kern, regs));
  1624. *insn++ = BPF_LDX_MEM(BPF_SIZEOF(long), si->dst_reg, si->dst_reg,
  1625. si->off);
  1626. break;
  1627. }
  1628. return insn - insn_buf;
  1629. }
  1630. const struct bpf_verifier_ops perf_event_verifier_ops = {
  1631. .get_func_proto = pe_prog_func_proto,
  1632. .is_valid_access = pe_prog_is_valid_access,
  1633. .convert_ctx_access = pe_prog_convert_ctx_access,
  1634. };
  1635. const struct bpf_prog_ops perf_event_prog_ops = {
  1636. };
  1637. static DEFINE_MUTEX(bpf_event_mutex);
  1638. #define BPF_TRACE_MAX_PROGS 64
  1639. int perf_event_attach_bpf_prog(struct perf_event *event,
  1640. struct bpf_prog *prog)
  1641. {
  1642. struct bpf_prog_array *old_array;
  1643. struct bpf_prog_array *new_array;
  1644. int ret = -EEXIST;
  1645. /*
  1646. * Kprobe override only works if they are on the function entry,
  1647. * and only if they are on the opt-in list.
  1648. */
  1649. if (prog->kprobe_override &&
  1650. (!trace_kprobe_on_func_entry(event->tp_event) ||
  1651. !trace_kprobe_error_injectable(event->tp_event)))
  1652. return -EINVAL;
  1653. mutex_lock(&bpf_event_mutex);
  1654. if (event->prog)
  1655. goto unlock;
  1656. old_array = bpf_event_rcu_dereference(event->tp_event->prog_array);
  1657. if (old_array &&
  1658. bpf_prog_array_length(old_array) >= BPF_TRACE_MAX_PROGS) {
  1659. ret = -E2BIG;
  1660. goto unlock;
  1661. }
  1662. ret = bpf_prog_array_copy(old_array, NULL, prog, &new_array);
  1663. if (ret < 0)
  1664. goto unlock;
  1665. /* set the new array to event->tp_event and set event->prog */
  1666. event->prog = prog;
  1667. rcu_assign_pointer(event->tp_event->prog_array, new_array);
  1668. bpf_prog_array_free(old_array);
  1669. unlock:
  1670. mutex_unlock(&bpf_event_mutex);
  1671. return ret;
  1672. }
  1673. void perf_event_detach_bpf_prog(struct perf_event *event)
  1674. {
  1675. struct bpf_prog_array *old_array;
  1676. struct bpf_prog_array *new_array;
  1677. int ret;
  1678. mutex_lock(&bpf_event_mutex);
  1679. if (!event->prog)
  1680. goto unlock;
  1681. old_array = bpf_event_rcu_dereference(event->tp_event->prog_array);
  1682. ret = bpf_prog_array_copy(old_array, event->prog, NULL, &new_array);
  1683. if (ret == -ENOENT)
  1684. goto unlock;
  1685. if (ret < 0) {
  1686. bpf_prog_array_delete_safe(old_array, event->prog);
  1687. } else {
  1688. rcu_assign_pointer(event->tp_event->prog_array, new_array);
  1689. bpf_prog_array_free(old_array);
  1690. }
  1691. bpf_prog_put(event->prog);
  1692. event->prog = NULL;
  1693. unlock:
  1694. mutex_unlock(&bpf_event_mutex);
  1695. }
  1696. int perf_event_query_prog_array(struct perf_event *event, void __user *info)
  1697. {
  1698. struct perf_event_query_bpf __user *uquery = info;
  1699. struct perf_event_query_bpf query = {};
  1700. struct bpf_prog_array *progs;
  1701. u32 *ids, prog_cnt, ids_len;
  1702. int ret;
  1703. if (!perfmon_capable())
  1704. return -EPERM;
  1705. if (event->attr.type != PERF_TYPE_TRACEPOINT)
  1706. return -EINVAL;
  1707. if (copy_from_user(&query, uquery, sizeof(query)))
  1708. return -EFAULT;
  1709. ids_len = query.ids_len;
  1710. if (ids_len > BPF_TRACE_MAX_PROGS)
  1711. return -E2BIG;
  1712. ids = kcalloc(ids_len, sizeof(u32), GFP_USER | __GFP_NOWARN);
  1713. if (!ids)
  1714. return -ENOMEM;
  1715. /*
  1716. * The above kcalloc returns ZERO_SIZE_PTR when ids_len = 0, which
  1717. * is required when user only wants to check for uquery->prog_cnt.
  1718. * There is no need to check for it since the case is handled
  1719. * gracefully in bpf_prog_array_copy_info.
  1720. */
  1721. mutex_lock(&bpf_event_mutex);
  1722. progs = bpf_event_rcu_dereference(event->tp_event->prog_array);
  1723. ret = bpf_prog_array_copy_info(progs, ids, ids_len, &prog_cnt);
  1724. mutex_unlock(&bpf_event_mutex);
  1725. if (copy_to_user(&uquery->prog_cnt, &prog_cnt, sizeof(prog_cnt)) ||
  1726. copy_to_user(uquery->ids, ids, ids_len * sizeof(u32)))
  1727. ret = -EFAULT;
  1728. kfree(ids);
  1729. return ret;
  1730. }
  1731. extern struct bpf_raw_event_map __start__bpf_raw_tp[];
  1732. extern struct bpf_raw_event_map __stop__bpf_raw_tp[];
  1733. struct bpf_raw_event_map *bpf_get_raw_tracepoint(const char *name)
  1734. {
  1735. struct bpf_raw_event_map *btp = __start__bpf_raw_tp;
  1736. for (; btp < __stop__bpf_raw_tp; btp++) {
  1737. if (!strcmp(btp->tp->name, name))
  1738. return btp;
  1739. }
  1740. return bpf_get_raw_tracepoint_module(name);
  1741. }
  1742. void bpf_put_raw_tracepoint(struct bpf_raw_event_map *btp)
  1743. {
  1744. struct module *mod;
  1745. preempt_disable();
  1746. mod = __module_address((unsigned long)btp);
  1747. module_put(mod);
  1748. preempt_enable();
  1749. }
  1750. static __always_inline
  1751. void __bpf_trace_run(struct bpf_prog *prog, u64 *args)
  1752. {
  1753. cant_sleep();
  1754. rcu_read_lock();
  1755. (void) BPF_PROG_RUN(prog, args);
  1756. rcu_read_unlock();
  1757. }
  1758. #define UNPACK(...) __VA_ARGS__
  1759. #define REPEAT_1(FN, DL, X, ...) FN(X)
  1760. #define REPEAT_2(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_1(FN, DL, __VA_ARGS__)
  1761. #define REPEAT_3(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_2(FN, DL, __VA_ARGS__)
  1762. #define REPEAT_4(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_3(FN, DL, __VA_ARGS__)
  1763. #define REPEAT_5(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_4(FN, DL, __VA_ARGS__)
  1764. #define REPEAT_6(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_5(FN, DL, __VA_ARGS__)
  1765. #define REPEAT_7(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_6(FN, DL, __VA_ARGS__)
  1766. #define REPEAT_8(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_7(FN, DL, __VA_ARGS__)
  1767. #define REPEAT_9(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_8(FN, DL, __VA_ARGS__)
  1768. #define REPEAT_10(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_9(FN, DL, __VA_ARGS__)
  1769. #define REPEAT_11(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_10(FN, DL, __VA_ARGS__)
  1770. #define REPEAT_12(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_11(FN, DL, __VA_ARGS__)
  1771. #define REPEAT(X, FN, DL, ...) REPEAT_##X(FN, DL, __VA_ARGS__)
  1772. #define SARG(X) u64 arg##X
  1773. #define COPY(X) args[X] = arg##X
  1774. #define __DL_COM (,)
  1775. #define __DL_SEM (;)
  1776. #define __SEQ_0_11 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11
  1777. #define BPF_TRACE_DEFN_x(x) \
  1778. void bpf_trace_run##x(struct bpf_prog *prog, \
  1779. REPEAT(x, SARG, __DL_COM, __SEQ_0_11)) \
  1780. { \
  1781. u64 args[x]; \
  1782. REPEAT(x, COPY, __DL_SEM, __SEQ_0_11); \
  1783. __bpf_trace_run(prog, args); \
  1784. } \
  1785. EXPORT_SYMBOL_GPL(bpf_trace_run##x)
  1786. BPF_TRACE_DEFN_x(1);
  1787. BPF_TRACE_DEFN_x(2);
  1788. BPF_TRACE_DEFN_x(3);
  1789. BPF_TRACE_DEFN_x(4);
  1790. BPF_TRACE_DEFN_x(5);
  1791. BPF_TRACE_DEFN_x(6);
  1792. BPF_TRACE_DEFN_x(7);
  1793. BPF_TRACE_DEFN_x(8);
  1794. BPF_TRACE_DEFN_x(9);
  1795. BPF_TRACE_DEFN_x(10);
  1796. BPF_TRACE_DEFN_x(11);
  1797. BPF_TRACE_DEFN_x(12);
  1798. static int __bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *prog)
  1799. {
  1800. struct tracepoint *tp = btp->tp;
  1801. /*
  1802. * check that program doesn't access arguments beyond what's
  1803. * available in this tracepoint
  1804. */
  1805. if (prog->aux->max_ctx_offset > btp->num_args * sizeof(u64))
  1806. return -EINVAL;
  1807. if (prog->aux->max_tp_access > btp->writable_size)
  1808. return -EINVAL;
  1809. return tracepoint_probe_register_may_exist(tp, (void *)btp->bpf_func,
  1810. prog);
  1811. }
  1812. int bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *prog)
  1813. {
  1814. return __bpf_probe_register(btp, prog);
  1815. }
  1816. int bpf_probe_unregister(struct bpf_raw_event_map *btp, struct bpf_prog *prog)
  1817. {
  1818. return tracepoint_probe_unregister(btp->tp, (void *)btp->bpf_func, prog);
  1819. }
  1820. int bpf_get_perf_event_info(const struct perf_event *event, u32 *prog_id,
  1821. u32 *fd_type, const char **buf,
  1822. u64 *probe_offset, u64 *probe_addr)
  1823. {
  1824. bool is_tracepoint, is_syscall_tp;
  1825. struct bpf_prog *prog;
  1826. int flags, err = 0;
  1827. prog = event->prog;
  1828. if (!prog)
  1829. return -ENOENT;
  1830. /* not supporting BPF_PROG_TYPE_PERF_EVENT yet */
  1831. if (prog->type == BPF_PROG_TYPE_PERF_EVENT)
  1832. return -EOPNOTSUPP;
  1833. *prog_id = prog->aux->id;
  1834. flags = event->tp_event->flags;
  1835. is_tracepoint = flags & TRACE_EVENT_FL_TRACEPOINT;
  1836. is_syscall_tp = is_syscall_trace_event(event->tp_event);
  1837. if (is_tracepoint || is_syscall_tp) {
  1838. *buf = is_tracepoint ? event->tp_event->tp->name
  1839. : event->tp_event->name;
  1840. *fd_type = BPF_FD_TYPE_TRACEPOINT;
  1841. *probe_offset = 0x0;
  1842. *probe_addr = 0x0;
  1843. } else {
  1844. /* kprobe/uprobe */
  1845. err = -EOPNOTSUPP;
  1846. #ifdef CONFIG_KPROBE_EVENTS
  1847. if (flags & TRACE_EVENT_FL_KPROBE)
  1848. err = bpf_get_kprobe_info(event, fd_type, buf,
  1849. probe_offset, probe_addr,
  1850. event->attr.type == PERF_TYPE_TRACEPOINT);
  1851. #endif
  1852. #ifdef CONFIG_UPROBE_EVENTS
  1853. if (flags & TRACE_EVENT_FL_UPROBE)
  1854. err = bpf_get_uprobe_info(event, fd_type, buf,
  1855. probe_offset,
  1856. event->attr.type == PERF_TYPE_TRACEPOINT);
  1857. #endif
  1858. }
  1859. return err;
  1860. }
  1861. static int __init send_signal_irq_work_init(void)
  1862. {
  1863. int cpu;
  1864. struct send_signal_irq_work *work;
  1865. for_each_possible_cpu(cpu) {
  1866. work = per_cpu_ptr(&send_signal_work, cpu);
  1867. init_irq_work(&work->irq_work, do_bpf_send_signal);
  1868. }
  1869. return 0;
  1870. }
  1871. subsys_initcall(send_signal_irq_work_init);
  1872. #ifdef CONFIG_MODULES
  1873. static int bpf_event_notify(struct notifier_block *nb, unsigned long op,
  1874. void *module)
  1875. {
  1876. struct bpf_trace_module *btm, *tmp;
  1877. struct module *mod = module;
  1878. int ret = 0;
  1879. if (mod->num_bpf_raw_events == 0 ||
  1880. (op != MODULE_STATE_COMING && op != MODULE_STATE_GOING))
  1881. goto out;
  1882. mutex_lock(&bpf_module_mutex);
  1883. switch (op) {
  1884. case MODULE_STATE_COMING:
  1885. btm = kzalloc(sizeof(*btm), GFP_KERNEL);
  1886. if (btm) {
  1887. btm->module = module;
  1888. list_add(&btm->list, &bpf_trace_modules);
  1889. } else {
  1890. ret = -ENOMEM;
  1891. }
  1892. break;
  1893. case MODULE_STATE_GOING:
  1894. list_for_each_entry_safe(btm, tmp, &bpf_trace_modules, list) {
  1895. if (btm->module == module) {
  1896. list_del(&btm->list);
  1897. kfree(btm);
  1898. break;
  1899. }
  1900. }
  1901. break;
  1902. }
  1903. mutex_unlock(&bpf_module_mutex);
  1904. out:
  1905. return notifier_from_errno(ret);
  1906. }
  1907. static struct notifier_block bpf_module_nb = {
  1908. .notifier_call = bpf_event_notify,
  1909. };
  1910. static int __init bpf_event_init(void)
  1911. {
  1912. register_module_notifier(&bpf_module_nb);
  1913. return 0;
  1914. }
  1915. fs_initcall(bpf_event_init);
  1916. #endif /* CONFIG_MODULES */