kprobes.c 72 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * Kernel Probes (KProbes)
  4. * kernel/kprobes.c
  5. *
  6. * Copyright (C) IBM Corporation, 2002, 2004
  7. *
  8. * 2002-Oct Created by Vamsi Krishna S <vamsi_krishna@in.ibm.com> Kernel
  9. * Probes initial implementation (includes suggestions from
  10. * Rusty Russell).
  11. * 2004-Aug Updated by Prasanna S Panchamukhi <prasanna@in.ibm.com> with
  12. * hlists and exceptions notifier as suggested by Andi Kleen.
  13. * 2004-July Suparna Bhattacharya <suparna@in.ibm.com> added jumper probes
  14. * interface to access function arguments.
  15. * 2004-Sep Prasanna S Panchamukhi <prasanna@in.ibm.com> Changed Kprobes
  16. * exceptions notifier to be first on the priority list.
  17. * 2005-May Hien Nguyen <hien@us.ibm.com>, Jim Keniston
  18. * <jkenisto@us.ibm.com> and Prasanna S Panchamukhi
  19. * <prasanna@in.ibm.com> added function-return probes.
  20. */
  21. #include <linux/kprobes.h>
  22. #include <linux/hash.h>
  23. #include <linux/init.h>
  24. #include <linux/slab.h>
  25. #include <linux/stddef.h>
  26. #include <linux/export.h>
  27. #include <linux/moduleloader.h>
  28. #include <linux/kallsyms.h>
  29. #include <linux/freezer.h>
  30. #include <linux/seq_file.h>
  31. #include <linux/debugfs.h>
  32. #include <linux/sysctl.h>
  33. #include <linux/kdebug.h>
  34. #include <linux/memory.h>
  35. #include <linux/ftrace.h>
  36. #include <linux/cpu.h>
  37. #include <linux/jump_label.h>
  38. #include <linux/perf_event.h>
  39. #include <linux/static_call.h>
  40. #include <asm/sections.h>
  41. #include <asm/cacheflush.h>
  42. #include <asm/errno.h>
  43. #include <linux/uaccess.h>
  44. #define KPROBE_HASH_BITS 6
  45. #define KPROBE_TABLE_SIZE (1 << KPROBE_HASH_BITS)
  46. static int kprobes_initialized;
  47. /* kprobe_table can be accessed by
  48. * - Normal hlist traversal and RCU add/del under kprobe_mutex is held.
  49. * Or
  50. * - RCU hlist traversal under disabling preempt (breakpoint handlers)
  51. */
  52. static struct hlist_head kprobe_table[KPROBE_TABLE_SIZE];
  53. static struct hlist_head kretprobe_inst_table[KPROBE_TABLE_SIZE];
  54. /* NOTE: change this value only with kprobe_mutex held */
  55. static bool kprobes_all_disarmed;
  56. /* This protects kprobe_table and optimizing_list */
  57. static DEFINE_MUTEX(kprobe_mutex);
  58. static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL;
  59. static struct {
  60. raw_spinlock_t lock ____cacheline_aligned_in_smp;
  61. } kretprobe_table_locks[KPROBE_TABLE_SIZE];
  62. kprobe_opcode_t * __weak kprobe_lookup_name(const char *name,
  63. unsigned int __unused)
  64. {
  65. return ((kprobe_opcode_t *)(kallsyms_lookup_name(name)));
  66. }
  67. static raw_spinlock_t *kretprobe_table_lock_ptr(unsigned long hash)
  68. {
  69. return &(kretprobe_table_locks[hash].lock);
  70. }
  71. /* Blacklist -- list of struct kprobe_blacklist_entry */
  72. static LIST_HEAD(kprobe_blacklist);
  73. #ifdef __ARCH_WANT_KPROBES_INSN_SLOT
  74. /*
  75. * kprobe->ainsn.insn points to the copy of the instruction to be
  76. * single-stepped. x86_64, POWER4 and above have no-exec support and
  77. * stepping on the instruction on a vmalloced/kmalloced/data page
  78. * is a recipe for disaster
  79. */
  80. struct kprobe_insn_page {
  81. struct list_head list;
  82. kprobe_opcode_t *insns; /* Page of instruction slots */
  83. struct kprobe_insn_cache *cache;
  84. int nused;
  85. int ngarbage;
  86. char slot_used[];
  87. };
  88. #define KPROBE_INSN_PAGE_SIZE(slots) \
  89. (offsetof(struct kprobe_insn_page, slot_used) + \
  90. (sizeof(char) * (slots)))
  91. static int slots_per_page(struct kprobe_insn_cache *c)
  92. {
  93. return PAGE_SIZE/(c->insn_size * sizeof(kprobe_opcode_t));
  94. }
  95. enum kprobe_slot_state {
  96. SLOT_CLEAN = 0,
  97. SLOT_DIRTY = 1,
  98. SLOT_USED = 2,
  99. };
  100. void __weak *alloc_insn_page(void)
  101. {
  102. return module_alloc(PAGE_SIZE);
  103. }
  104. void __weak free_insn_page(void *page)
  105. {
  106. module_memfree(page);
  107. }
  108. struct kprobe_insn_cache kprobe_insn_slots = {
  109. .mutex = __MUTEX_INITIALIZER(kprobe_insn_slots.mutex),
  110. .alloc = alloc_insn_page,
  111. .free = free_insn_page,
  112. .sym = KPROBE_INSN_PAGE_SYM,
  113. .pages = LIST_HEAD_INIT(kprobe_insn_slots.pages),
  114. .insn_size = MAX_INSN_SIZE,
  115. .nr_garbage = 0,
  116. };
  117. static int collect_garbage_slots(struct kprobe_insn_cache *c);
  118. /**
  119. * __get_insn_slot() - Find a slot on an executable page for an instruction.
  120. * We allocate an executable page if there's no room on existing ones.
  121. */
  122. kprobe_opcode_t *__get_insn_slot(struct kprobe_insn_cache *c)
  123. {
  124. struct kprobe_insn_page *kip;
  125. kprobe_opcode_t *slot = NULL;
  126. /* Since the slot array is not protected by rcu, we need a mutex */
  127. mutex_lock(&c->mutex);
  128. retry:
  129. rcu_read_lock();
  130. list_for_each_entry_rcu(kip, &c->pages, list) {
  131. if (kip->nused < slots_per_page(c)) {
  132. int i;
  133. for (i = 0; i < slots_per_page(c); i++) {
  134. if (kip->slot_used[i] == SLOT_CLEAN) {
  135. kip->slot_used[i] = SLOT_USED;
  136. kip->nused++;
  137. slot = kip->insns + (i * c->insn_size);
  138. rcu_read_unlock();
  139. goto out;
  140. }
  141. }
  142. /* kip->nused is broken. Fix it. */
  143. kip->nused = slots_per_page(c);
  144. WARN_ON(1);
  145. }
  146. }
  147. rcu_read_unlock();
  148. /* If there are any garbage slots, collect it and try again. */
  149. if (c->nr_garbage && collect_garbage_slots(c) == 0)
  150. goto retry;
  151. /* All out of space. Need to allocate a new page. */
  152. kip = kmalloc(KPROBE_INSN_PAGE_SIZE(slots_per_page(c)), GFP_KERNEL);
  153. if (!kip)
  154. goto out;
  155. /*
  156. * Use module_alloc so this page is within +/- 2GB of where the
  157. * kernel image and loaded module images reside. This is required
  158. * so x86_64 can correctly handle the %rip-relative fixups.
  159. */
  160. kip->insns = c->alloc();
  161. if (!kip->insns) {
  162. kfree(kip);
  163. goto out;
  164. }
  165. INIT_LIST_HEAD(&kip->list);
  166. memset(kip->slot_used, SLOT_CLEAN, slots_per_page(c));
  167. kip->slot_used[0] = SLOT_USED;
  168. kip->nused = 1;
  169. kip->ngarbage = 0;
  170. kip->cache = c;
  171. list_add_rcu(&kip->list, &c->pages);
  172. slot = kip->insns;
  173. /* Record the perf ksymbol register event after adding the page */
  174. perf_event_ksymbol(PERF_RECORD_KSYMBOL_TYPE_OOL, (unsigned long)kip->insns,
  175. PAGE_SIZE, false, c->sym);
  176. out:
  177. mutex_unlock(&c->mutex);
  178. return slot;
  179. }
  180. /* Return 1 if all garbages are collected, otherwise 0. */
  181. static int collect_one_slot(struct kprobe_insn_page *kip, int idx)
  182. {
  183. kip->slot_used[idx] = SLOT_CLEAN;
  184. kip->nused--;
  185. if (kip->nused == 0) {
  186. /*
  187. * Page is no longer in use. Free it unless
  188. * it's the last one. We keep the last one
  189. * so as not to have to set it up again the
  190. * next time somebody inserts a probe.
  191. */
  192. if (!list_is_singular(&kip->list)) {
  193. /*
  194. * Record perf ksymbol unregister event before removing
  195. * the page.
  196. */
  197. perf_event_ksymbol(PERF_RECORD_KSYMBOL_TYPE_OOL,
  198. (unsigned long)kip->insns, PAGE_SIZE, true,
  199. kip->cache->sym);
  200. list_del_rcu(&kip->list);
  201. synchronize_rcu();
  202. kip->cache->free(kip->insns);
  203. kfree(kip);
  204. }
  205. return 1;
  206. }
  207. return 0;
  208. }
  209. static int collect_garbage_slots(struct kprobe_insn_cache *c)
  210. {
  211. struct kprobe_insn_page *kip, *next;
  212. /* Ensure no-one is interrupted on the garbages */
  213. synchronize_rcu();
  214. list_for_each_entry_safe(kip, next, &c->pages, list) {
  215. int i;
  216. if (kip->ngarbage == 0)
  217. continue;
  218. kip->ngarbage = 0; /* we will collect all garbages */
  219. for (i = 0; i < slots_per_page(c); i++) {
  220. if (kip->slot_used[i] == SLOT_DIRTY && collect_one_slot(kip, i))
  221. break;
  222. }
  223. }
  224. c->nr_garbage = 0;
  225. return 0;
  226. }
  227. void __free_insn_slot(struct kprobe_insn_cache *c,
  228. kprobe_opcode_t *slot, int dirty)
  229. {
  230. struct kprobe_insn_page *kip;
  231. long idx;
  232. mutex_lock(&c->mutex);
  233. rcu_read_lock();
  234. list_for_each_entry_rcu(kip, &c->pages, list) {
  235. idx = ((long)slot - (long)kip->insns) /
  236. (c->insn_size * sizeof(kprobe_opcode_t));
  237. if (idx >= 0 && idx < slots_per_page(c))
  238. goto out;
  239. }
  240. /* Could not find this slot. */
  241. WARN_ON(1);
  242. kip = NULL;
  243. out:
  244. rcu_read_unlock();
  245. /* Mark and sweep: this may sleep */
  246. if (kip) {
  247. /* Check double free */
  248. WARN_ON(kip->slot_used[idx] != SLOT_USED);
  249. if (dirty) {
  250. kip->slot_used[idx] = SLOT_DIRTY;
  251. kip->ngarbage++;
  252. if (++c->nr_garbage > slots_per_page(c))
  253. collect_garbage_slots(c);
  254. } else {
  255. collect_one_slot(kip, idx);
  256. }
  257. }
  258. mutex_unlock(&c->mutex);
  259. }
  260. /*
  261. * Check given address is on the page of kprobe instruction slots.
  262. * This will be used for checking whether the address on a stack
  263. * is on a text area or not.
  264. */
  265. bool __is_insn_slot_addr(struct kprobe_insn_cache *c, unsigned long addr)
  266. {
  267. struct kprobe_insn_page *kip;
  268. bool ret = false;
  269. rcu_read_lock();
  270. list_for_each_entry_rcu(kip, &c->pages, list) {
  271. if (addr >= (unsigned long)kip->insns &&
  272. addr < (unsigned long)kip->insns + PAGE_SIZE) {
  273. ret = true;
  274. break;
  275. }
  276. }
  277. rcu_read_unlock();
  278. return ret;
  279. }
  280. int kprobe_cache_get_kallsym(struct kprobe_insn_cache *c, unsigned int *symnum,
  281. unsigned long *value, char *type, char *sym)
  282. {
  283. struct kprobe_insn_page *kip;
  284. int ret = -ERANGE;
  285. rcu_read_lock();
  286. list_for_each_entry_rcu(kip, &c->pages, list) {
  287. if ((*symnum)--)
  288. continue;
  289. strlcpy(sym, c->sym, KSYM_NAME_LEN);
  290. *type = 't';
  291. *value = (unsigned long)kip->insns;
  292. ret = 0;
  293. break;
  294. }
  295. rcu_read_unlock();
  296. return ret;
  297. }
  298. #ifdef CONFIG_OPTPROBES
  299. /* For optimized_kprobe buffer */
  300. struct kprobe_insn_cache kprobe_optinsn_slots = {
  301. .mutex = __MUTEX_INITIALIZER(kprobe_optinsn_slots.mutex),
  302. .alloc = alloc_insn_page,
  303. .free = free_insn_page,
  304. .sym = KPROBE_OPTINSN_PAGE_SYM,
  305. .pages = LIST_HEAD_INIT(kprobe_optinsn_slots.pages),
  306. /* .insn_size is initialized later */
  307. .nr_garbage = 0,
  308. };
  309. #endif
  310. #endif
  311. /* We have preemption disabled.. so it is safe to use __ versions */
  312. static inline void set_kprobe_instance(struct kprobe *kp)
  313. {
  314. __this_cpu_write(kprobe_instance, kp);
  315. }
  316. static inline void reset_kprobe_instance(void)
  317. {
  318. __this_cpu_write(kprobe_instance, NULL);
  319. }
  320. /*
  321. * This routine is called either:
  322. * - under the kprobe_mutex - during kprobe_[un]register()
  323. * OR
  324. * - with preemption disabled - from arch/xxx/kernel/kprobes.c
  325. */
  326. struct kprobe *get_kprobe(void *addr)
  327. {
  328. struct hlist_head *head;
  329. struct kprobe *p;
  330. head = &kprobe_table[hash_ptr(addr, KPROBE_HASH_BITS)];
  331. hlist_for_each_entry_rcu(p, head, hlist,
  332. lockdep_is_held(&kprobe_mutex)) {
  333. if (p->addr == addr)
  334. return p;
  335. }
  336. return NULL;
  337. }
  338. NOKPROBE_SYMBOL(get_kprobe);
  339. static int aggr_pre_handler(struct kprobe *p, struct pt_regs *regs);
  340. /* Return true if the kprobe is an aggregator */
  341. static inline int kprobe_aggrprobe(struct kprobe *p)
  342. {
  343. return p->pre_handler == aggr_pre_handler;
  344. }
  345. /* Return true(!0) if the kprobe is unused */
  346. static inline int kprobe_unused(struct kprobe *p)
  347. {
  348. return kprobe_aggrprobe(p) && kprobe_disabled(p) &&
  349. list_empty(&p->list);
  350. }
  351. /*
  352. * Keep all fields in the kprobe consistent
  353. */
  354. static inline void copy_kprobe(struct kprobe *ap, struct kprobe *p)
  355. {
  356. memcpy(&p->opcode, &ap->opcode, sizeof(kprobe_opcode_t));
  357. memcpy(&p->ainsn, &ap->ainsn, sizeof(struct arch_specific_insn));
  358. }
  359. #ifdef CONFIG_OPTPROBES
  360. /* NOTE: change this value only with kprobe_mutex held */
  361. static bool kprobes_allow_optimization;
  362. /*
  363. * Call all pre_handler on the list, but ignores its return value.
  364. * This must be called from arch-dep optimized caller.
  365. */
  366. void opt_pre_handler(struct kprobe *p, struct pt_regs *regs)
  367. {
  368. struct kprobe *kp;
  369. list_for_each_entry_rcu(kp, &p->list, list) {
  370. if (kp->pre_handler && likely(!kprobe_disabled(kp))) {
  371. set_kprobe_instance(kp);
  372. kp->pre_handler(kp, regs);
  373. }
  374. reset_kprobe_instance();
  375. }
  376. }
  377. NOKPROBE_SYMBOL(opt_pre_handler);
  378. /* Free optimized instructions and optimized_kprobe */
  379. static void free_aggr_kprobe(struct kprobe *p)
  380. {
  381. struct optimized_kprobe *op;
  382. op = container_of(p, struct optimized_kprobe, kp);
  383. arch_remove_optimized_kprobe(op);
  384. arch_remove_kprobe(p);
  385. kfree(op);
  386. }
  387. /* Return true(!0) if the kprobe is ready for optimization. */
  388. static inline int kprobe_optready(struct kprobe *p)
  389. {
  390. struct optimized_kprobe *op;
  391. if (kprobe_aggrprobe(p)) {
  392. op = container_of(p, struct optimized_kprobe, kp);
  393. return arch_prepared_optinsn(&op->optinsn);
  394. }
  395. return 0;
  396. }
  397. /* Return true(!0) if the kprobe is disarmed. Note: p must be on hash list */
  398. static inline int kprobe_disarmed(struct kprobe *p)
  399. {
  400. struct optimized_kprobe *op;
  401. /* If kprobe is not aggr/opt probe, just return kprobe is disabled */
  402. if (!kprobe_aggrprobe(p))
  403. return kprobe_disabled(p);
  404. op = container_of(p, struct optimized_kprobe, kp);
  405. return kprobe_disabled(p) && list_empty(&op->list);
  406. }
  407. /* Return true(!0) if the probe is queued on (un)optimizing lists */
  408. static int kprobe_queued(struct kprobe *p)
  409. {
  410. struct optimized_kprobe *op;
  411. if (kprobe_aggrprobe(p)) {
  412. op = container_of(p, struct optimized_kprobe, kp);
  413. if (!list_empty(&op->list))
  414. return 1;
  415. }
  416. return 0;
  417. }
  418. /*
  419. * Return an optimized kprobe whose optimizing code replaces
  420. * instructions including addr (exclude breakpoint).
  421. */
  422. static struct kprobe *get_optimized_kprobe(unsigned long addr)
  423. {
  424. int i;
  425. struct kprobe *p = NULL;
  426. struct optimized_kprobe *op;
  427. /* Don't check i == 0, since that is a breakpoint case. */
  428. for (i = 1; !p && i < MAX_OPTIMIZED_LENGTH; i++)
  429. p = get_kprobe((void *)(addr - i));
  430. if (p && kprobe_optready(p)) {
  431. op = container_of(p, struct optimized_kprobe, kp);
  432. if (arch_within_optimized_kprobe(op, addr))
  433. return p;
  434. }
  435. return NULL;
  436. }
  437. /* Optimization staging list, protected by kprobe_mutex */
  438. static LIST_HEAD(optimizing_list);
  439. static LIST_HEAD(unoptimizing_list);
  440. static LIST_HEAD(freeing_list);
  441. static void kprobe_optimizer(struct work_struct *work);
  442. static DECLARE_DELAYED_WORK(optimizing_work, kprobe_optimizer);
  443. #define OPTIMIZE_DELAY 5
  444. /*
  445. * Optimize (replace a breakpoint with a jump) kprobes listed on
  446. * optimizing_list.
  447. */
  448. static void do_optimize_kprobes(void)
  449. {
  450. lockdep_assert_held(&text_mutex);
  451. /*
  452. * The optimization/unoptimization refers online_cpus via
  453. * stop_machine() and cpu-hotplug modifies online_cpus.
  454. * And same time, text_mutex will be held in cpu-hotplug and here.
  455. * This combination can cause a deadlock (cpu-hotplug try to lock
  456. * text_mutex but stop_machine can not be done because online_cpus
  457. * has been changed)
  458. * To avoid this deadlock, caller must have locked cpu hotplug
  459. * for preventing cpu-hotplug outside of text_mutex locking.
  460. */
  461. lockdep_assert_cpus_held();
  462. /* Optimization never be done when disarmed */
  463. if (kprobes_all_disarmed || !kprobes_allow_optimization ||
  464. list_empty(&optimizing_list))
  465. return;
  466. arch_optimize_kprobes(&optimizing_list);
  467. }
  468. /*
  469. * Unoptimize (replace a jump with a breakpoint and remove the breakpoint
  470. * if need) kprobes listed on unoptimizing_list.
  471. */
  472. static void do_unoptimize_kprobes(void)
  473. {
  474. struct optimized_kprobe *op, *tmp;
  475. lockdep_assert_held(&text_mutex);
  476. /* See comment in do_optimize_kprobes() */
  477. lockdep_assert_cpus_held();
  478. /* Unoptimization must be done anytime */
  479. if (list_empty(&unoptimizing_list))
  480. return;
  481. arch_unoptimize_kprobes(&unoptimizing_list, &freeing_list);
  482. /* Loop free_list for disarming */
  483. list_for_each_entry_safe(op, tmp, &freeing_list, list) {
  484. /* Switching from detour code to origin */
  485. op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED;
  486. /* Disarm probes if marked disabled */
  487. if (kprobe_disabled(&op->kp))
  488. arch_disarm_kprobe(&op->kp);
  489. if (kprobe_unused(&op->kp)) {
  490. /*
  491. * Remove unused probes from hash list. After waiting
  492. * for synchronization, these probes are reclaimed.
  493. * (reclaiming is done by do_free_cleaned_kprobes.)
  494. */
  495. hlist_del_rcu(&op->kp.hlist);
  496. } else
  497. list_del_init(&op->list);
  498. }
  499. }
  500. /* Reclaim all kprobes on the free_list */
  501. static void do_free_cleaned_kprobes(void)
  502. {
  503. struct optimized_kprobe *op, *tmp;
  504. list_for_each_entry_safe(op, tmp, &freeing_list, list) {
  505. list_del_init(&op->list);
  506. if (WARN_ON_ONCE(!kprobe_unused(&op->kp))) {
  507. /*
  508. * This must not happen, but if there is a kprobe
  509. * still in use, keep it on kprobes hash list.
  510. */
  511. continue;
  512. }
  513. free_aggr_kprobe(&op->kp);
  514. }
  515. }
  516. /* Start optimizer after OPTIMIZE_DELAY passed */
  517. static void kick_kprobe_optimizer(void)
  518. {
  519. schedule_delayed_work(&optimizing_work, OPTIMIZE_DELAY);
  520. }
  521. /* Kprobe jump optimizer */
  522. static void kprobe_optimizer(struct work_struct *work)
  523. {
  524. mutex_lock(&kprobe_mutex);
  525. cpus_read_lock();
  526. mutex_lock(&text_mutex);
  527. /*
  528. * Step 1: Unoptimize kprobes and collect cleaned (unused and disarmed)
  529. * kprobes before waiting for quiesence period.
  530. */
  531. do_unoptimize_kprobes();
  532. /*
  533. * Step 2: Wait for quiesence period to ensure all potentially
  534. * preempted tasks to have normally scheduled. Because optprobe
  535. * may modify multiple instructions, there is a chance that Nth
  536. * instruction is preempted. In that case, such tasks can return
  537. * to 2nd-Nth byte of jump instruction. This wait is for avoiding it.
  538. * Note that on non-preemptive kernel, this is transparently converted
  539. * to synchronoze_sched() to wait for all interrupts to have completed.
  540. */
  541. synchronize_rcu_tasks();
  542. /* Step 3: Optimize kprobes after quiesence period */
  543. do_optimize_kprobes();
  544. /* Step 4: Free cleaned kprobes after quiesence period */
  545. do_free_cleaned_kprobes();
  546. mutex_unlock(&text_mutex);
  547. cpus_read_unlock();
  548. /* Step 5: Kick optimizer again if needed */
  549. if (!list_empty(&optimizing_list) || !list_empty(&unoptimizing_list))
  550. kick_kprobe_optimizer();
  551. mutex_unlock(&kprobe_mutex);
  552. }
  553. /* Wait for completing optimization and unoptimization */
  554. void wait_for_kprobe_optimizer(void)
  555. {
  556. mutex_lock(&kprobe_mutex);
  557. while (!list_empty(&optimizing_list) || !list_empty(&unoptimizing_list)) {
  558. mutex_unlock(&kprobe_mutex);
  559. /* this will also make optimizing_work execute immmediately */
  560. flush_delayed_work(&optimizing_work);
  561. /* @optimizing_work might not have been queued yet, relax */
  562. cpu_relax();
  563. mutex_lock(&kprobe_mutex);
  564. }
  565. mutex_unlock(&kprobe_mutex);
  566. }
  567. static bool optprobe_queued_unopt(struct optimized_kprobe *op)
  568. {
  569. struct optimized_kprobe *_op;
  570. list_for_each_entry(_op, &unoptimizing_list, list) {
  571. if (op == _op)
  572. return true;
  573. }
  574. return false;
  575. }
  576. /* Optimize kprobe if p is ready to be optimized */
  577. static void optimize_kprobe(struct kprobe *p)
  578. {
  579. struct optimized_kprobe *op;
  580. /* Check if the kprobe is disabled or not ready for optimization. */
  581. if (!kprobe_optready(p) || !kprobes_allow_optimization ||
  582. (kprobe_disabled(p) || kprobes_all_disarmed))
  583. return;
  584. /* kprobes with post_handler can not be optimized */
  585. if (p->post_handler)
  586. return;
  587. op = container_of(p, struct optimized_kprobe, kp);
  588. /* Check there is no other kprobes at the optimized instructions */
  589. if (arch_check_optimized_kprobe(op) < 0)
  590. return;
  591. /* Check if it is already optimized. */
  592. if (op->kp.flags & KPROBE_FLAG_OPTIMIZED) {
  593. if (optprobe_queued_unopt(op)) {
  594. /* This is under unoptimizing. Just dequeue the probe */
  595. list_del_init(&op->list);
  596. }
  597. return;
  598. }
  599. op->kp.flags |= KPROBE_FLAG_OPTIMIZED;
  600. /* On unoptimizing/optimizing_list, op must have OPTIMIZED flag */
  601. if (WARN_ON_ONCE(!list_empty(&op->list)))
  602. return;
  603. list_add(&op->list, &optimizing_list);
  604. kick_kprobe_optimizer();
  605. }
  606. /* Short cut to direct unoptimizing */
  607. static void force_unoptimize_kprobe(struct optimized_kprobe *op)
  608. {
  609. lockdep_assert_cpus_held();
  610. arch_unoptimize_kprobe(op);
  611. op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED;
  612. }
  613. /* Unoptimize a kprobe if p is optimized */
  614. static void unoptimize_kprobe(struct kprobe *p, bool force)
  615. {
  616. struct optimized_kprobe *op;
  617. if (!kprobe_aggrprobe(p) || kprobe_disarmed(p))
  618. return; /* This is not an optprobe nor optimized */
  619. op = container_of(p, struct optimized_kprobe, kp);
  620. if (!kprobe_optimized(p))
  621. return;
  622. if (!list_empty(&op->list)) {
  623. if (optprobe_queued_unopt(op)) {
  624. /* Queued in unoptimizing queue */
  625. if (force) {
  626. /*
  627. * Forcibly unoptimize the kprobe here, and queue it
  628. * in the freeing list for release afterwards.
  629. */
  630. force_unoptimize_kprobe(op);
  631. list_move(&op->list, &freeing_list);
  632. }
  633. } else {
  634. /* Dequeue from the optimizing queue */
  635. list_del_init(&op->list);
  636. op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED;
  637. }
  638. return;
  639. }
  640. /* Optimized kprobe case */
  641. if (force) {
  642. /* Forcibly update the code: this is a special case */
  643. force_unoptimize_kprobe(op);
  644. } else {
  645. list_add(&op->list, &unoptimizing_list);
  646. kick_kprobe_optimizer();
  647. }
  648. }
  649. /* Cancel unoptimizing for reusing */
  650. static int reuse_unused_kprobe(struct kprobe *ap)
  651. {
  652. struct optimized_kprobe *op;
  653. /*
  654. * Unused kprobe MUST be on the way of delayed unoptimizing (means
  655. * there is still a relative jump) and disabled.
  656. */
  657. op = container_of(ap, struct optimized_kprobe, kp);
  658. WARN_ON_ONCE(list_empty(&op->list));
  659. /* Enable the probe again */
  660. ap->flags &= ~KPROBE_FLAG_DISABLED;
  661. /* Optimize it again (remove from op->list) */
  662. if (!kprobe_optready(ap))
  663. return -EINVAL;
  664. optimize_kprobe(ap);
  665. return 0;
  666. }
  667. /* Remove optimized instructions */
  668. static void kill_optimized_kprobe(struct kprobe *p)
  669. {
  670. struct optimized_kprobe *op;
  671. op = container_of(p, struct optimized_kprobe, kp);
  672. if (!list_empty(&op->list))
  673. /* Dequeue from the (un)optimization queue */
  674. list_del_init(&op->list);
  675. op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED;
  676. if (kprobe_unused(p)) {
  677. /* Enqueue if it is unused */
  678. list_add(&op->list, &freeing_list);
  679. /*
  680. * Remove unused probes from the hash list. After waiting
  681. * for synchronization, this probe is reclaimed.
  682. * (reclaiming is done by do_free_cleaned_kprobes().)
  683. */
  684. hlist_del_rcu(&op->kp.hlist);
  685. }
  686. /* Don't touch the code, because it is already freed. */
  687. arch_remove_optimized_kprobe(op);
  688. }
  689. static inline
  690. void __prepare_optimized_kprobe(struct optimized_kprobe *op, struct kprobe *p)
  691. {
  692. if (!kprobe_ftrace(p))
  693. arch_prepare_optimized_kprobe(op, p);
  694. }
  695. /* Try to prepare optimized instructions */
  696. static void prepare_optimized_kprobe(struct kprobe *p)
  697. {
  698. struct optimized_kprobe *op;
  699. op = container_of(p, struct optimized_kprobe, kp);
  700. __prepare_optimized_kprobe(op, p);
  701. }
  702. /* Allocate new optimized_kprobe and try to prepare optimized instructions */
  703. static struct kprobe *alloc_aggr_kprobe(struct kprobe *p)
  704. {
  705. struct optimized_kprobe *op;
  706. op = kzalloc(sizeof(struct optimized_kprobe), GFP_KERNEL);
  707. if (!op)
  708. return NULL;
  709. INIT_LIST_HEAD(&op->list);
  710. op->kp.addr = p->addr;
  711. __prepare_optimized_kprobe(op, p);
  712. return &op->kp;
  713. }
  714. static void init_aggr_kprobe(struct kprobe *ap, struct kprobe *p);
  715. /*
  716. * Prepare an optimized_kprobe and optimize it
  717. * NOTE: p must be a normal registered kprobe
  718. */
  719. static void try_to_optimize_kprobe(struct kprobe *p)
  720. {
  721. struct kprobe *ap;
  722. struct optimized_kprobe *op;
  723. /* Impossible to optimize ftrace-based kprobe */
  724. if (kprobe_ftrace(p))
  725. return;
  726. /* For preparing optimization, jump_label_text_reserved() is called */
  727. cpus_read_lock();
  728. jump_label_lock();
  729. mutex_lock(&text_mutex);
  730. ap = alloc_aggr_kprobe(p);
  731. if (!ap)
  732. goto out;
  733. op = container_of(ap, struct optimized_kprobe, kp);
  734. if (!arch_prepared_optinsn(&op->optinsn)) {
  735. /* If failed to setup optimizing, fallback to kprobe */
  736. arch_remove_optimized_kprobe(op);
  737. kfree(op);
  738. goto out;
  739. }
  740. init_aggr_kprobe(ap, p);
  741. optimize_kprobe(ap); /* This just kicks optimizer thread */
  742. out:
  743. mutex_unlock(&text_mutex);
  744. jump_label_unlock();
  745. cpus_read_unlock();
  746. }
  747. static void optimize_all_kprobes(void)
  748. {
  749. struct hlist_head *head;
  750. struct kprobe *p;
  751. unsigned int i;
  752. mutex_lock(&kprobe_mutex);
  753. /* If optimization is already allowed, just return */
  754. if (kprobes_allow_optimization)
  755. goto out;
  756. cpus_read_lock();
  757. kprobes_allow_optimization = true;
  758. for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
  759. head = &kprobe_table[i];
  760. hlist_for_each_entry(p, head, hlist)
  761. if (!kprobe_disabled(p))
  762. optimize_kprobe(p);
  763. }
  764. cpus_read_unlock();
  765. printk(KERN_INFO "Kprobes globally optimized\n");
  766. out:
  767. mutex_unlock(&kprobe_mutex);
  768. }
  769. #ifdef CONFIG_SYSCTL
  770. static void unoptimize_all_kprobes(void)
  771. {
  772. struct hlist_head *head;
  773. struct kprobe *p;
  774. unsigned int i;
  775. mutex_lock(&kprobe_mutex);
  776. /* If optimization is already prohibited, just return */
  777. if (!kprobes_allow_optimization) {
  778. mutex_unlock(&kprobe_mutex);
  779. return;
  780. }
  781. cpus_read_lock();
  782. kprobes_allow_optimization = false;
  783. for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
  784. head = &kprobe_table[i];
  785. hlist_for_each_entry(p, head, hlist) {
  786. if (!kprobe_disabled(p))
  787. unoptimize_kprobe(p, false);
  788. }
  789. }
  790. cpus_read_unlock();
  791. mutex_unlock(&kprobe_mutex);
  792. /* Wait for unoptimizing completion */
  793. wait_for_kprobe_optimizer();
  794. printk(KERN_INFO "Kprobes globally unoptimized\n");
  795. }
  796. static DEFINE_MUTEX(kprobe_sysctl_mutex);
  797. int sysctl_kprobes_optimization;
  798. int proc_kprobes_optimization_handler(struct ctl_table *table, int write,
  799. void *buffer, size_t *length,
  800. loff_t *ppos)
  801. {
  802. int ret;
  803. mutex_lock(&kprobe_sysctl_mutex);
  804. sysctl_kprobes_optimization = kprobes_allow_optimization ? 1 : 0;
  805. ret = proc_dointvec_minmax(table, write, buffer, length, ppos);
  806. if (sysctl_kprobes_optimization)
  807. optimize_all_kprobes();
  808. else
  809. unoptimize_all_kprobes();
  810. mutex_unlock(&kprobe_sysctl_mutex);
  811. return ret;
  812. }
  813. #endif /* CONFIG_SYSCTL */
  814. /* Put a breakpoint for a probe. Must be called with text_mutex locked */
  815. static void __arm_kprobe(struct kprobe *p)
  816. {
  817. struct kprobe *_p;
  818. /* Check collision with other optimized kprobes */
  819. _p = get_optimized_kprobe((unsigned long)p->addr);
  820. if (unlikely(_p))
  821. /* Fallback to unoptimized kprobe */
  822. unoptimize_kprobe(_p, true);
  823. arch_arm_kprobe(p);
  824. optimize_kprobe(p); /* Try to optimize (add kprobe to a list) */
  825. }
  826. /* Remove the breakpoint of a probe. Must be called with text_mutex locked */
  827. static void __disarm_kprobe(struct kprobe *p, bool reopt)
  828. {
  829. struct kprobe *_p;
  830. /* Try to unoptimize */
  831. unoptimize_kprobe(p, kprobes_all_disarmed);
  832. if (!kprobe_queued(p)) {
  833. arch_disarm_kprobe(p);
  834. /* If another kprobe was blocked, optimize it. */
  835. _p = get_optimized_kprobe((unsigned long)p->addr);
  836. if (unlikely(_p) && reopt)
  837. optimize_kprobe(_p);
  838. }
  839. /* TODO: reoptimize others after unoptimized this probe */
  840. }
  841. #else /* !CONFIG_OPTPROBES */
  842. #define optimize_kprobe(p) do {} while (0)
  843. #define unoptimize_kprobe(p, f) do {} while (0)
  844. #define kill_optimized_kprobe(p) do {} while (0)
  845. #define prepare_optimized_kprobe(p) do {} while (0)
  846. #define try_to_optimize_kprobe(p) do {} while (0)
  847. #define __arm_kprobe(p) arch_arm_kprobe(p)
  848. #define __disarm_kprobe(p, o) arch_disarm_kprobe(p)
  849. #define kprobe_disarmed(p) kprobe_disabled(p)
  850. #define wait_for_kprobe_optimizer() do {} while (0)
  851. static int reuse_unused_kprobe(struct kprobe *ap)
  852. {
  853. /*
  854. * If the optimized kprobe is NOT supported, the aggr kprobe is
  855. * released at the same time that the last aggregated kprobe is
  856. * unregistered.
  857. * Thus there should be no chance to reuse unused kprobe.
  858. */
  859. printk(KERN_ERR "Error: There should be no unused kprobe here.\n");
  860. return -EINVAL;
  861. }
  862. static void free_aggr_kprobe(struct kprobe *p)
  863. {
  864. arch_remove_kprobe(p);
  865. kfree(p);
  866. }
  867. static struct kprobe *alloc_aggr_kprobe(struct kprobe *p)
  868. {
  869. return kzalloc(sizeof(struct kprobe), GFP_KERNEL);
  870. }
  871. #endif /* CONFIG_OPTPROBES */
  872. #ifdef CONFIG_KPROBES_ON_FTRACE
  873. static struct ftrace_ops kprobe_ftrace_ops __read_mostly = {
  874. .func = kprobe_ftrace_handler,
  875. .flags = FTRACE_OPS_FL_SAVE_REGS,
  876. };
  877. static struct ftrace_ops kprobe_ipmodify_ops __read_mostly = {
  878. .func = kprobe_ftrace_handler,
  879. .flags = FTRACE_OPS_FL_SAVE_REGS | FTRACE_OPS_FL_IPMODIFY,
  880. };
  881. static int kprobe_ipmodify_enabled;
  882. static int kprobe_ftrace_enabled;
  883. /* Must ensure p->addr is really on ftrace */
  884. static int prepare_kprobe(struct kprobe *p)
  885. {
  886. if (!kprobe_ftrace(p))
  887. return arch_prepare_kprobe(p);
  888. return arch_prepare_kprobe_ftrace(p);
  889. }
  890. /* Caller must lock kprobe_mutex */
  891. static int __arm_kprobe_ftrace(struct kprobe *p, struct ftrace_ops *ops,
  892. int *cnt)
  893. {
  894. int ret = 0;
  895. ret = ftrace_set_filter_ip(ops, (unsigned long)p->addr, 0, 0);
  896. if (ret) {
  897. pr_debug("Failed to arm kprobe-ftrace at %pS (%d)\n",
  898. p->addr, ret);
  899. return ret;
  900. }
  901. if (*cnt == 0) {
  902. ret = register_ftrace_function(ops);
  903. if (ret) {
  904. pr_debug("Failed to init kprobe-ftrace (%d)\n", ret);
  905. goto err_ftrace;
  906. }
  907. }
  908. (*cnt)++;
  909. return ret;
  910. err_ftrace:
  911. /*
  912. * At this point, sinec ops is not registered, we should be sefe from
  913. * registering empty filter.
  914. */
  915. ftrace_set_filter_ip(ops, (unsigned long)p->addr, 1, 0);
  916. return ret;
  917. }
  918. static int arm_kprobe_ftrace(struct kprobe *p)
  919. {
  920. bool ipmodify = (p->post_handler != NULL);
  921. return __arm_kprobe_ftrace(p,
  922. ipmodify ? &kprobe_ipmodify_ops : &kprobe_ftrace_ops,
  923. ipmodify ? &kprobe_ipmodify_enabled : &kprobe_ftrace_enabled);
  924. }
  925. /* Caller must lock kprobe_mutex */
  926. static int __disarm_kprobe_ftrace(struct kprobe *p, struct ftrace_ops *ops,
  927. int *cnt)
  928. {
  929. int ret = 0;
  930. if (*cnt == 1) {
  931. ret = unregister_ftrace_function(ops);
  932. if (WARN(ret < 0, "Failed to unregister kprobe-ftrace (%d)\n", ret))
  933. return ret;
  934. }
  935. (*cnt)--;
  936. ret = ftrace_set_filter_ip(ops, (unsigned long)p->addr, 1, 0);
  937. WARN_ONCE(ret < 0, "Failed to disarm kprobe-ftrace at %pS (%d)\n",
  938. p->addr, ret);
  939. return ret;
  940. }
  941. static int disarm_kprobe_ftrace(struct kprobe *p)
  942. {
  943. bool ipmodify = (p->post_handler != NULL);
  944. return __disarm_kprobe_ftrace(p,
  945. ipmodify ? &kprobe_ipmodify_ops : &kprobe_ftrace_ops,
  946. ipmodify ? &kprobe_ipmodify_enabled : &kprobe_ftrace_enabled);
  947. }
  948. #else /* !CONFIG_KPROBES_ON_FTRACE */
  949. static inline int prepare_kprobe(struct kprobe *p)
  950. {
  951. return arch_prepare_kprobe(p);
  952. }
  953. static inline int arm_kprobe_ftrace(struct kprobe *p)
  954. {
  955. return -ENODEV;
  956. }
  957. static inline int disarm_kprobe_ftrace(struct kprobe *p)
  958. {
  959. return -ENODEV;
  960. }
  961. #endif
  962. /* Arm a kprobe with text_mutex */
  963. static int arm_kprobe(struct kprobe *kp)
  964. {
  965. if (unlikely(kprobe_ftrace(kp)))
  966. return arm_kprobe_ftrace(kp);
  967. cpus_read_lock();
  968. mutex_lock(&text_mutex);
  969. __arm_kprobe(kp);
  970. mutex_unlock(&text_mutex);
  971. cpus_read_unlock();
  972. return 0;
  973. }
  974. /* Disarm a kprobe with text_mutex */
  975. static int disarm_kprobe(struct kprobe *kp, bool reopt)
  976. {
  977. if (unlikely(kprobe_ftrace(kp)))
  978. return disarm_kprobe_ftrace(kp);
  979. cpus_read_lock();
  980. mutex_lock(&text_mutex);
  981. __disarm_kprobe(kp, reopt);
  982. mutex_unlock(&text_mutex);
  983. cpus_read_unlock();
  984. return 0;
  985. }
  986. /*
  987. * Aggregate handlers for multiple kprobes support - these handlers
  988. * take care of invoking the individual kprobe handlers on p->list
  989. */
  990. static int aggr_pre_handler(struct kprobe *p, struct pt_regs *regs)
  991. {
  992. struct kprobe *kp;
  993. list_for_each_entry_rcu(kp, &p->list, list) {
  994. if (kp->pre_handler && likely(!kprobe_disabled(kp))) {
  995. set_kprobe_instance(kp);
  996. if (kp->pre_handler(kp, regs))
  997. return 1;
  998. }
  999. reset_kprobe_instance();
  1000. }
  1001. return 0;
  1002. }
  1003. NOKPROBE_SYMBOL(aggr_pre_handler);
  1004. static void aggr_post_handler(struct kprobe *p, struct pt_regs *regs,
  1005. unsigned long flags)
  1006. {
  1007. struct kprobe *kp;
  1008. list_for_each_entry_rcu(kp, &p->list, list) {
  1009. if (kp->post_handler && likely(!kprobe_disabled(kp))) {
  1010. set_kprobe_instance(kp);
  1011. kp->post_handler(kp, regs, flags);
  1012. reset_kprobe_instance();
  1013. }
  1014. }
  1015. }
  1016. NOKPROBE_SYMBOL(aggr_post_handler);
  1017. static int aggr_fault_handler(struct kprobe *p, struct pt_regs *regs,
  1018. int trapnr)
  1019. {
  1020. struct kprobe *cur = __this_cpu_read(kprobe_instance);
  1021. /*
  1022. * if we faulted "during" the execution of a user specified
  1023. * probe handler, invoke just that probe's fault handler
  1024. */
  1025. if (cur && cur->fault_handler) {
  1026. if (cur->fault_handler(cur, regs, trapnr))
  1027. return 1;
  1028. }
  1029. return 0;
  1030. }
  1031. NOKPROBE_SYMBOL(aggr_fault_handler);
  1032. /* Walks the list and increments nmissed count for multiprobe case */
  1033. void kprobes_inc_nmissed_count(struct kprobe *p)
  1034. {
  1035. struct kprobe *kp;
  1036. if (!kprobe_aggrprobe(p)) {
  1037. p->nmissed++;
  1038. } else {
  1039. list_for_each_entry_rcu(kp, &p->list, list)
  1040. kp->nmissed++;
  1041. }
  1042. return;
  1043. }
  1044. NOKPROBE_SYMBOL(kprobes_inc_nmissed_count);
  1045. static void recycle_rp_inst(struct kretprobe_instance *ri)
  1046. {
  1047. struct kretprobe *rp = ri->rp;
  1048. /* remove rp inst off the rprobe_inst_table */
  1049. hlist_del(&ri->hlist);
  1050. INIT_HLIST_NODE(&ri->hlist);
  1051. if (likely(rp)) {
  1052. raw_spin_lock(&rp->lock);
  1053. hlist_add_head(&ri->hlist, &rp->free_instances);
  1054. raw_spin_unlock(&rp->lock);
  1055. } else
  1056. kfree_rcu(ri, rcu);
  1057. }
  1058. NOKPROBE_SYMBOL(recycle_rp_inst);
  1059. static void kretprobe_hash_lock(struct task_struct *tsk,
  1060. struct hlist_head **head, unsigned long *flags)
  1061. __acquires(hlist_lock)
  1062. {
  1063. unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS);
  1064. raw_spinlock_t *hlist_lock;
  1065. *head = &kretprobe_inst_table[hash];
  1066. hlist_lock = kretprobe_table_lock_ptr(hash);
  1067. /*
  1068. * Nested is a workaround that will soon not be needed.
  1069. * There's other protections that make sure the same lock
  1070. * is not taken on the same CPU that lockdep is unaware of.
  1071. * Differentiate when it is taken in NMI context.
  1072. */
  1073. raw_spin_lock_irqsave_nested(hlist_lock, *flags, !!in_nmi());
  1074. }
  1075. NOKPROBE_SYMBOL(kretprobe_hash_lock);
  1076. static void kretprobe_table_lock(unsigned long hash,
  1077. unsigned long *flags)
  1078. __acquires(hlist_lock)
  1079. {
  1080. raw_spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash);
  1081. /*
  1082. * Nested is a workaround that will soon not be needed.
  1083. * There's other protections that make sure the same lock
  1084. * is not taken on the same CPU that lockdep is unaware of.
  1085. * Differentiate when it is taken in NMI context.
  1086. */
  1087. raw_spin_lock_irqsave_nested(hlist_lock, *flags, !!in_nmi());
  1088. }
  1089. NOKPROBE_SYMBOL(kretprobe_table_lock);
  1090. static void kretprobe_hash_unlock(struct task_struct *tsk,
  1091. unsigned long *flags)
  1092. __releases(hlist_lock)
  1093. {
  1094. unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS);
  1095. raw_spinlock_t *hlist_lock;
  1096. hlist_lock = kretprobe_table_lock_ptr(hash);
  1097. raw_spin_unlock_irqrestore(hlist_lock, *flags);
  1098. }
  1099. NOKPROBE_SYMBOL(kretprobe_hash_unlock);
  1100. static void kretprobe_table_unlock(unsigned long hash,
  1101. unsigned long *flags)
  1102. __releases(hlist_lock)
  1103. {
  1104. raw_spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash);
  1105. raw_spin_unlock_irqrestore(hlist_lock, *flags);
  1106. }
  1107. NOKPROBE_SYMBOL(kretprobe_table_unlock);
  1108. static struct kprobe kprobe_busy = {
  1109. .addr = (void *) get_kprobe,
  1110. };
  1111. void kprobe_busy_begin(void)
  1112. {
  1113. struct kprobe_ctlblk *kcb;
  1114. preempt_disable();
  1115. __this_cpu_write(current_kprobe, &kprobe_busy);
  1116. kcb = get_kprobe_ctlblk();
  1117. kcb->kprobe_status = KPROBE_HIT_ACTIVE;
  1118. }
  1119. void kprobe_busy_end(void)
  1120. {
  1121. __this_cpu_write(current_kprobe, NULL);
  1122. preempt_enable();
  1123. }
  1124. /*
  1125. * This function is called from finish_task_switch when task tk becomes dead,
  1126. * so that we can recycle any function-return probe instances associated
  1127. * with this task. These left over instances represent probed functions
  1128. * that have been called but will never return.
  1129. */
  1130. void kprobe_flush_task(struct task_struct *tk)
  1131. {
  1132. struct kretprobe_instance *ri;
  1133. struct hlist_head *head;
  1134. struct hlist_node *tmp;
  1135. unsigned long hash, flags = 0;
  1136. if (unlikely(!kprobes_initialized))
  1137. /* Early boot. kretprobe_table_locks not yet initialized. */
  1138. return;
  1139. kprobe_busy_begin();
  1140. hash = hash_ptr(tk, KPROBE_HASH_BITS);
  1141. head = &kretprobe_inst_table[hash];
  1142. kretprobe_table_lock(hash, &flags);
  1143. hlist_for_each_entry_safe(ri, tmp, head, hlist) {
  1144. if (ri->task == tk)
  1145. recycle_rp_inst(ri);
  1146. }
  1147. kretprobe_table_unlock(hash, &flags);
  1148. kprobe_busy_end();
  1149. }
  1150. NOKPROBE_SYMBOL(kprobe_flush_task);
  1151. static inline void free_rp_inst(struct kretprobe *rp)
  1152. {
  1153. struct kretprobe_instance *ri;
  1154. struct hlist_node *next;
  1155. hlist_for_each_entry_safe(ri, next, &rp->free_instances, hlist) {
  1156. hlist_del(&ri->hlist);
  1157. kfree(ri);
  1158. }
  1159. }
  1160. static void cleanup_rp_inst(struct kretprobe *rp)
  1161. {
  1162. unsigned long flags, hash;
  1163. struct kretprobe_instance *ri;
  1164. struct hlist_node *next;
  1165. struct hlist_head *head;
  1166. /* To avoid recursive kretprobe by NMI, set kprobe busy here */
  1167. kprobe_busy_begin();
  1168. for (hash = 0; hash < KPROBE_TABLE_SIZE; hash++) {
  1169. kretprobe_table_lock(hash, &flags);
  1170. head = &kretprobe_inst_table[hash];
  1171. hlist_for_each_entry_safe(ri, next, head, hlist) {
  1172. if (ri->rp == rp)
  1173. ri->rp = NULL;
  1174. }
  1175. kretprobe_table_unlock(hash, &flags);
  1176. }
  1177. kprobe_busy_end();
  1178. free_rp_inst(rp);
  1179. }
  1180. NOKPROBE_SYMBOL(cleanup_rp_inst);
  1181. /* Add the new probe to ap->list */
  1182. static int add_new_kprobe(struct kprobe *ap, struct kprobe *p)
  1183. {
  1184. if (p->post_handler)
  1185. unoptimize_kprobe(ap, true); /* Fall back to normal kprobe */
  1186. list_add_rcu(&p->list, &ap->list);
  1187. if (p->post_handler && !ap->post_handler)
  1188. ap->post_handler = aggr_post_handler;
  1189. return 0;
  1190. }
  1191. /*
  1192. * Fill in the required fields of the "manager kprobe". Replace the
  1193. * earlier kprobe in the hlist with the manager kprobe
  1194. */
  1195. static void init_aggr_kprobe(struct kprobe *ap, struct kprobe *p)
  1196. {
  1197. /* Copy p's insn slot to ap */
  1198. copy_kprobe(p, ap);
  1199. flush_insn_slot(ap);
  1200. ap->addr = p->addr;
  1201. ap->flags = p->flags & ~KPROBE_FLAG_OPTIMIZED;
  1202. ap->pre_handler = aggr_pre_handler;
  1203. ap->fault_handler = aggr_fault_handler;
  1204. /* We don't care the kprobe which has gone. */
  1205. if (p->post_handler && !kprobe_gone(p))
  1206. ap->post_handler = aggr_post_handler;
  1207. INIT_LIST_HEAD(&ap->list);
  1208. INIT_HLIST_NODE(&ap->hlist);
  1209. list_add_rcu(&p->list, &ap->list);
  1210. hlist_replace_rcu(&p->hlist, &ap->hlist);
  1211. }
  1212. /*
  1213. * This is the second or subsequent kprobe at the address - handle
  1214. * the intricacies
  1215. */
  1216. static int register_aggr_kprobe(struct kprobe *orig_p, struct kprobe *p)
  1217. {
  1218. int ret = 0;
  1219. struct kprobe *ap = orig_p;
  1220. cpus_read_lock();
  1221. /* For preparing optimization, jump_label_text_reserved() is called */
  1222. jump_label_lock();
  1223. mutex_lock(&text_mutex);
  1224. if (!kprobe_aggrprobe(orig_p)) {
  1225. /* If orig_p is not an aggr_kprobe, create new aggr_kprobe. */
  1226. ap = alloc_aggr_kprobe(orig_p);
  1227. if (!ap) {
  1228. ret = -ENOMEM;
  1229. goto out;
  1230. }
  1231. init_aggr_kprobe(ap, orig_p);
  1232. } else if (kprobe_unused(ap)) {
  1233. /* This probe is going to die. Rescue it */
  1234. ret = reuse_unused_kprobe(ap);
  1235. if (ret)
  1236. goto out;
  1237. }
  1238. if (kprobe_gone(ap)) {
  1239. /*
  1240. * Attempting to insert new probe at the same location that
  1241. * had a probe in the module vaddr area which already
  1242. * freed. So, the instruction slot has already been
  1243. * released. We need a new slot for the new probe.
  1244. */
  1245. ret = arch_prepare_kprobe(ap);
  1246. if (ret)
  1247. /*
  1248. * Even if fail to allocate new slot, don't need to
  1249. * free aggr_probe. It will be used next time, or
  1250. * freed by unregister_kprobe.
  1251. */
  1252. goto out;
  1253. /* Prepare optimized instructions if possible. */
  1254. prepare_optimized_kprobe(ap);
  1255. /*
  1256. * Clear gone flag to prevent allocating new slot again, and
  1257. * set disabled flag because it is not armed yet.
  1258. */
  1259. ap->flags = (ap->flags & ~KPROBE_FLAG_GONE)
  1260. | KPROBE_FLAG_DISABLED;
  1261. }
  1262. /* Copy ap's insn slot to p */
  1263. copy_kprobe(ap, p);
  1264. ret = add_new_kprobe(ap, p);
  1265. out:
  1266. mutex_unlock(&text_mutex);
  1267. jump_label_unlock();
  1268. cpus_read_unlock();
  1269. if (ret == 0 && kprobe_disabled(ap) && !kprobe_disabled(p)) {
  1270. ap->flags &= ~KPROBE_FLAG_DISABLED;
  1271. if (!kprobes_all_disarmed) {
  1272. /* Arm the breakpoint again. */
  1273. ret = arm_kprobe(ap);
  1274. if (ret) {
  1275. ap->flags |= KPROBE_FLAG_DISABLED;
  1276. list_del_rcu(&p->list);
  1277. synchronize_rcu();
  1278. }
  1279. }
  1280. }
  1281. return ret;
  1282. }
  1283. bool __weak arch_within_kprobe_blacklist(unsigned long addr)
  1284. {
  1285. /* The __kprobes marked functions and entry code must not be probed */
  1286. return addr >= (unsigned long)__kprobes_text_start &&
  1287. addr < (unsigned long)__kprobes_text_end;
  1288. }
  1289. static bool __within_kprobe_blacklist(unsigned long addr)
  1290. {
  1291. struct kprobe_blacklist_entry *ent;
  1292. if (arch_within_kprobe_blacklist(addr))
  1293. return true;
  1294. /*
  1295. * If there exists a kprobe_blacklist, verify and
  1296. * fail any probe registration in the prohibited area
  1297. */
  1298. list_for_each_entry(ent, &kprobe_blacklist, list) {
  1299. if (addr >= ent->start_addr && addr < ent->end_addr)
  1300. return true;
  1301. }
  1302. return false;
  1303. }
  1304. bool within_kprobe_blacklist(unsigned long addr)
  1305. {
  1306. char symname[KSYM_NAME_LEN], *p;
  1307. if (__within_kprobe_blacklist(addr))
  1308. return true;
  1309. /* Check if the address is on a suffixed-symbol */
  1310. if (!lookup_symbol_name(addr, symname)) {
  1311. p = strchr(symname, '.');
  1312. if (!p)
  1313. return false;
  1314. *p = '\0';
  1315. addr = (unsigned long)kprobe_lookup_name(symname, 0);
  1316. if (addr)
  1317. return __within_kprobe_blacklist(addr);
  1318. }
  1319. return false;
  1320. }
  1321. /*
  1322. * If we have a symbol_name argument, look it up and add the offset field
  1323. * to it. This way, we can specify a relative address to a symbol.
  1324. * This returns encoded errors if it fails to look up symbol or invalid
  1325. * combination of parameters.
  1326. */
  1327. static kprobe_opcode_t *_kprobe_addr(kprobe_opcode_t *addr,
  1328. const char *symbol_name, unsigned int offset)
  1329. {
  1330. if ((symbol_name && addr) || (!symbol_name && !addr))
  1331. goto invalid;
  1332. if (symbol_name) {
  1333. addr = kprobe_lookup_name(symbol_name, offset);
  1334. if (!addr)
  1335. return ERR_PTR(-ENOENT);
  1336. }
  1337. addr = (kprobe_opcode_t *)(((char *)addr) + offset);
  1338. if (addr)
  1339. return addr;
  1340. invalid:
  1341. return ERR_PTR(-EINVAL);
  1342. }
  1343. static kprobe_opcode_t *kprobe_addr(struct kprobe *p)
  1344. {
  1345. return _kprobe_addr(p->addr, p->symbol_name, p->offset);
  1346. }
  1347. /* Check passed kprobe is valid and return kprobe in kprobe_table. */
  1348. static struct kprobe *__get_valid_kprobe(struct kprobe *p)
  1349. {
  1350. struct kprobe *ap, *list_p;
  1351. lockdep_assert_held(&kprobe_mutex);
  1352. ap = get_kprobe(p->addr);
  1353. if (unlikely(!ap))
  1354. return NULL;
  1355. if (p != ap) {
  1356. list_for_each_entry(list_p, &ap->list, list)
  1357. if (list_p == p)
  1358. /* kprobe p is a valid probe */
  1359. goto valid;
  1360. return NULL;
  1361. }
  1362. valid:
  1363. return ap;
  1364. }
  1365. /* Return error if the kprobe is being re-registered */
  1366. static inline int check_kprobe_rereg(struct kprobe *p)
  1367. {
  1368. int ret = 0;
  1369. mutex_lock(&kprobe_mutex);
  1370. if (__get_valid_kprobe(p))
  1371. ret = -EINVAL;
  1372. mutex_unlock(&kprobe_mutex);
  1373. return ret;
  1374. }
  1375. int __weak arch_check_ftrace_location(struct kprobe *p)
  1376. {
  1377. unsigned long ftrace_addr;
  1378. ftrace_addr = ftrace_location((unsigned long)p->addr);
  1379. if (ftrace_addr) {
  1380. #ifdef CONFIG_KPROBES_ON_FTRACE
  1381. /* Given address is not on the instruction boundary */
  1382. if ((unsigned long)p->addr != ftrace_addr)
  1383. return -EILSEQ;
  1384. p->flags |= KPROBE_FLAG_FTRACE;
  1385. #else /* !CONFIG_KPROBES_ON_FTRACE */
  1386. return -EINVAL;
  1387. #endif
  1388. }
  1389. return 0;
  1390. }
  1391. static int check_kprobe_address_safe(struct kprobe *p,
  1392. struct module **probed_mod)
  1393. {
  1394. int ret;
  1395. ret = arch_check_ftrace_location(p);
  1396. if (ret)
  1397. return ret;
  1398. jump_label_lock();
  1399. preempt_disable();
  1400. /* Ensure it is not in reserved area nor out of text */
  1401. if (!kernel_text_address((unsigned long) p->addr) ||
  1402. within_kprobe_blacklist((unsigned long) p->addr) ||
  1403. jump_label_text_reserved(p->addr, p->addr) ||
  1404. static_call_text_reserved(p->addr, p->addr) ||
  1405. find_bug((unsigned long)p->addr)) {
  1406. ret = -EINVAL;
  1407. goto out;
  1408. }
  1409. /* Check if are we probing a module */
  1410. *probed_mod = __module_text_address((unsigned long) p->addr);
  1411. if (*probed_mod) {
  1412. /*
  1413. * We must hold a refcount of the probed module while updating
  1414. * its code to prohibit unexpected unloading.
  1415. */
  1416. if (unlikely(!try_module_get(*probed_mod))) {
  1417. ret = -ENOENT;
  1418. goto out;
  1419. }
  1420. /*
  1421. * If the module freed .init.text, we couldn't insert
  1422. * kprobes in there.
  1423. */
  1424. if (within_module_init((unsigned long)p->addr, *probed_mod) &&
  1425. (*probed_mod)->state != MODULE_STATE_COMING) {
  1426. module_put(*probed_mod);
  1427. *probed_mod = NULL;
  1428. ret = -ENOENT;
  1429. }
  1430. }
  1431. out:
  1432. preempt_enable();
  1433. jump_label_unlock();
  1434. return ret;
  1435. }
  1436. int register_kprobe(struct kprobe *p)
  1437. {
  1438. int ret;
  1439. struct kprobe *old_p;
  1440. struct module *probed_mod;
  1441. kprobe_opcode_t *addr;
  1442. /* Adjust probe address from symbol */
  1443. addr = kprobe_addr(p);
  1444. if (IS_ERR(addr))
  1445. return PTR_ERR(addr);
  1446. p->addr = addr;
  1447. ret = check_kprobe_rereg(p);
  1448. if (ret)
  1449. return ret;
  1450. /* User can pass only KPROBE_FLAG_DISABLED to register_kprobe */
  1451. p->flags &= KPROBE_FLAG_DISABLED;
  1452. p->nmissed = 0;
  1453. INIT_LIST_HEAD(&p->list);
  1454. ret = check_kprobe_address_safe(p, &probed_mod);
  1455. if (ret)
  1456. return ret;
  1457. mutex_lock(&kprobe_mutex);
  1458. old_p = get_kprobe(p->addr);
  1459. if (old_p) {
  1460. /* Since this may unoptimize old_p, locking text_mutex. */
  1461. ret = register_aggr_kprobe(old_p, p);
  1462. goto out;
  1463. }
  1464. cpus_read_lock();
  1465. /* Prevent text modification */
  1466. mutex_lock(&text_mutex);
  1467. ret = prepare_kprobe(p);
  1468. mutex_unlock(&text_mutex);
  1469. cpus_read_unlock();
  1470. if (ret)
  1471. goto out;
  1472. INIT_HLIST_NODE(&p->hlist);
  1473. hlist_add_head_rcu(&p->hlist,
  1474. &kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]);
  1475. if (!kprobes_all_disarmed && !kprobe_disabled(p)) {
  1476. ret = arm_kprobe(p);
  1477. if (ret) {
  1478. hlist_del_rcu(&p->hlist);
  1479. synchronize_rcu();
  1480. goto out;
  1481. }
  1482. }
  1483. /* Try to optimize kprobe */
  1484. try_to_optimize_kprobe(p);
  1485. out:
  1486. mutex_unlock(&kprobe_mutex);
  1487. if (probed_mod)
  1488. module_put(probed_mod);
  1489. return ret;
  1490. }
  1491. EXPORT_SYMBOL_GPL(register_kprobe);
  1492. /* Check if all probes on the aggrprobe are disabled */
  1493. static int aggr_kprobe_disabled(struct kprobe *ap)
  1494. {
  1495. struct kprobe *kp;
  1496. lockdep_assert_held(&kprobe_mutex);
  1497. list_for_each_entry(kp, &ap->list, list)
  1498. if (!kprobe_disabled(kp))
  1499. /*
  1500. * There is an active probe on the list.
  1501. * We can't disable this ap.
  1502. */
  1503. return 0;
  1504. return 1;
  1505. }
  1506. /* Disable one kprobe: Make sure called under kprobe_mutex is locked */
  1507. static struct kprobe *__disable_kprobe(struct kprobe *p)
  1508. {
  1509. struct kprobe *orig_p;
  1510. int ret;
  1511. /* Get an original kprobe for return */
  1512. orig_p = __get_valid_kprobe(p);
  1513. if (unlikely(orig_p == NULL))
  1514. return ERR_PTR(-EINVAL);
  1515. if (!kprobe_disabled(p)) {
  1516. /* Disable probe if it is a child probe */
  1517. if (p != orig_p)
  1518. p->flags |= KPROBE_FLAG_DISABLED;
  1519. /* Try to disarm and disable this/parent probe */
  1520. if (p == orig_p || aggr_kprobe_disabled(orig_p)) {
  1521. /*
  1522. * If kprobes_all_disarmed is set, orig_p
  1523. * should have already been disarmed, so
  1524. * skip unneed disarming process.
  1525. */
  1526. if (!kprobes_all_disarmed) {
  1527. ret = disarm_kprobe(orig_p, true);
  1528. if (ret) {
  1529. p->flags &= ~KPROBE_FLAG_DISABLED;
  1530. return ERR_PTR(ret);
  1531. }
  1532. }
  1533. orig_p->flags |= KPROBE_FLAG_DISABLED;
  1534. }
  1535. }
  1536. return orig_p;
  1537. }
  1538. /*
  1539. * Unregister a kprobe without a scheduler synchronization.
  1540. */
  1541. static int __unregister_kprobe_top(struct kprobe *p)
  1542. {
  1543. struct kprobe *ap, *list_p;
  1544. /* Disable kprobe. This will disarm it if needed. */
  1545. ap = __disable_kprobe(p);
  1546. if (IS_ERR(ap))
  1547. return PTR_ERR(ap);
  1548. if (ap == p)
  1549. /*
  1550. * This probe is an independent(and non-optimized) kprobe
  1551. * (not an aggrprobe). Remove from the hash list.
  1552. */
  1553. goto disarmed;
  1554. /* Following process expects this probe is an aggrprobe */
  1555. WARN_ON(!kprobe_aggrprobe(ap));
  1556. if (list_is_singular(&ap->list) && kprobe_disarmed(ap))
  1557. /*
  1558. * !disarmed could be happen if the probe is under delayed
  1559. * unoptimizing.
  1560. */
  1561. goto disarmed;
  1562. else {
  1563. /* If disabling probe has special handlers, update aggrprobe */
  1564. if (p->post_handler && !kprobe_gone(p)) {
  1565. list_for_each_entry(list_p, &ap->list, list) {
  1566. if ((list_p != p) && (list_p->post_handler))
  1567. goto noclean;
  1568. }
  1569. ap->post_handler = NULL;
  1570. }
  1571. noclean:
  1572. /*
  1573. * Remove from the aggrprobe: this path will do nothing in
  1574. * __unregister_kprobe_bottom().
  1575. */
  1576. list_del_rcu(&p->list);
  1577. if (!kprobe_disabled(ap) && !kprobes_all_disarmed)
  1578. /*
  1579. * Try to optimize this probe again, because post
  1580. * handler may have been changed.
  1581. */
  1582. optimize_kprobe(ap);
  1583. }
  1584. return 0;
  1585. disarmed:
  1586. hlist_del_rcu(&ap->hlist);
  1587. return 0;
  1588. }
  1589. static void __unregister_kprobe_bottom(struct kprobe *p)
  1590. {
  1591. struct kprobe *ap;
  1592. if (list_empty(&p->list))
  1593. /* This is an independent kprobe */
  1594. arch_remove_kprobe(p);
  1595. else if (list_is_singular(&p->list)) {
  1596. /* This is the last child of an aggrprobe */
  1597. ap = list_entry(p->list.next, struct kprobe, list);
  1598. list_del(&p->list);
  1599. free_aggr_kprobe(ap);
  1600. }
  1601. /* Otherwise, do nothing. */
  1602. }
  1603. int register_kprobes(struct kprobe **kps, int num)
  1604. {
  1605. int i, ret = 0;
  1606. if (num <= 0)
  1607. return -EINVAL;
  1608. for (i = 0; i < num; i++) {
  1609. ret = register_kprobe(kps[i]);
  1610. if (ret < 0) {
  1611. if (i > 0)
  1612. unregister_kprobes(kps, i);
  1613. break;
  1614. }
  1615. }
  1616. return ret;
  1617. }
  1618. EXPORT_SYMBOL_GPL(register_kprobes);
  1619. void unregister_kprobe(struct kprobe *p)
  1620. {
  1621. unregister_kprobes(&p, 1);
  1622. }
  1623. EXPORT_SYMBOL_GPL(unregister_kprobe);
  1624. void unregister_kprobes(struct kprobe **kps, int num)
  1625. {
  1626. int i;
  1627. if (num <= 0)
  1628. return;
  1629. mutex_lock(&kprobe_mutex);
  1630. for (i = 0; i < num; i++)
  1631. if (__unregister_kprobe_top(kps[i]) < 0)
  1632. kps[i]->addr = NULL;
  1633. mutex_unlock(&kprobe_mutex);
  1634. synchronize_rcu();
  1635. for (i = 0; i < num; i++)
  1636. if (kps[i]->addr)
  1637. __unregister_kprobe_bottom(kps[i]);
  1638. }
  1639. EXPORT_SYMBOL_GPL(unregister_kprobes);
  1640. int __weak kprobe_exceptions_notify(struct notifier_block *self,
  1641. unsigned long val, void *data)
  1642. {
  1643. return NOTIFY_DONE;
  1644. }
  1645. NOKPROBE_SYMBOL(kprobe_exceptions_notify);
  1646. static struct notifier_block kprobe_exceptions_nb = {
  1647. .notifier_call = kprobe_exceptions_notify,
  1648. .priority = 0x7fffffff /* we need to be notified first */
  1649. };
  1650. unsigned long __weak arch_deref_entry_point(void *entry)
  1651. {
  1652. return (unsigned long)entry;
  1653. }
  1654. #ifdef CONFIG_KRETPROBES
  1655. unsigned long __kretprobe_trampoline_handler(struct pt_regs *regs,
  1656. void *trampoline_address,
  1657. void *frame_pointer)
  1658. {
  1659. struct kretprobe_instance *ri = NULL, *last = NULL;
  1660. struct hlist_head *head;
  1661. struct hlist_node *tmp;
  1662. unsigned long flags;
  1663. kprobe_opcode_t *correct_ret_addr = NULL;
  1664. bool skipped = false;
  1665. kretprobe_hash_lock(current, &head, &flags);
  1666. /*
  1667. * It is possible to have multiple instances associated with a given
  1668. * task either because multiple functions in the call path have
  1669. * return probes installed on them, and/or more than one
  1670. * return probe was registered for a target function.
  1671. *
  1672. * We can handle this because:
  1673. * - instances are always pushed into the head of the list
  1674. * - when multiple return probes are registered for the same
  1675. * function, the (chronologically) first instance's ret_addr
  1676. * will be the real return address, and all the rest will
  1677. * point to kretprobe_trampoline.
  1678. */
  1679. hlist_for_each_entry(ri, head, hlist) {
  1680. if (ri->task != current)
  1681. /* another task is sharing our hash bucket */
  1682. continue;
  1683. /*
  1684. * Return probes must be pushed on this hash list correct
  1685. * order (same as return order) so that it can be popped
  1686. * correctly. However, if we find it is pushed it incorrect
  1687. * order, this means we find a function which should not be
  1688. * probed, because the wrong order entry is pushed on the
  1689. * path of processing other kretprobe itself.
  1690. */
  1691. if (ri->fp != frame_pointer) {
  1692. if (!skipped)
  1693. pr_warn("kretprobe is stacked incorrectly. Trying to fixup.\n");
  1694. skipped = true;
  1695. continue;
  1696. }
  1697. correct_ret_addr = ri->ret_addr;
  1698. if (skipped)
  1699. pr_warn("%ps must be blacklisted because of incorrect kretprobe order\n",
  1700. ri->rp->kp.addr);
  1701. if (correct_ret_addr != trampoline_address)
  1702. /*
  1703. * This is the real return address. Any other
  1704. * instances associated with this task are for
  1705. * other calls deeper on the call stack
  1706. */
  1707. break;
  1708. }
  1709. BUG_ON(!correct_ret_addr || (correct_ret_addr == trampoline_address));
  1710. last = ri;
  1711. hlist_for_each_entry_safe(ri, tmp, head, hlist) {
  1712. if (ri->task != current)
  1713. /* another task is sharing our hash bucket */
  1714. continue;
  1715. if (ri->fp != frame_pointer)
  1716. continue;
  1717. if (ri->rp && ri->rp->handler) {
  1718. struct kprobe *prev = kprobe_running();
  1719. __this_cpu_write(current_kprobe, &ri->rp->kp);
  1720. ri->ret_addr = correct_ret_addr;
  1721. ri->rp->handler(ri, regs);
  1722. __this_cpu_write(current_kprobe, prev);
  1723. }
  1724. recycle_rp_inst(ri);
  1725. if (ri == last)
  1726. break;
  1727. }
  1728. kretprobe_hash_unlock(current, &flags);
  1729. return (unsigned long)correct_ret_addr;
  1730. }
  1731. NOKPROBE_SYMBOL(__kretprobe_trampoline_handler)
  1732. /*
  1733. * This kprobe pre_handler is registered with every kretprobe. When probe
  1734. * hits it will set up the return probe.
  1735. */
  1736. static int pre_handler_kretprobe(struct kprobe *p, struct pt_regs *regs)
  1737. {
  1738. struct kretprobe *rp = container_of(p, struct kretprobe, kp);
  1739. unsigned long hash, flags = 0;
  1740. struct kretprobe_instance *ri;
  1741. /* TODO: consider to only swap the RA after the last pre_handler fired */
  1742. hash = hash_ptr(current, KPROBE_HASH_BITS);
  1743. /*
  1744. * Nested is a workaround that will soon not be needed.
  1745. * There's other protections that make sure the same lock
  1746. * is not taken on the same CPU that lockdep is unaware of.
  1747. */
  1748. raw_spin_lock_irqsave_nested(&rp->lock, flags, 1);
  1749. if (!hlist_empty(&rp->free_instances)) {
  1750. ri = hlist_entry(rp->free_instances.first,
  1751. struct kretprobe_instance, hlist);
  1752. hlist_del(&ri->hlist);
  1753. raw_spin_unlock_irqrestore(&rp->lock, flags);
  1754. ri->rp = rp;
  1755. ri->task = current;
  1756. if (rp->entry_handler && rp->entry_handler(ri, regs)) {
  1757. raw_spin_lock_irqsave_nested(&rp->lock, flags, 1);
  1758. hlist_add_head(&ri->hlist, &rp->free_instances);
  1759. raw_spin_unlock_irqrestore(&rp->lock, flags);
  1760. return 0;
  1761. }
  1762. arch_prepare_kretprobe(ri, regs);
  1763. /* XXX(hch): why is there no hlist_move_head? */
  1764. INIT_HLIST_NODE(&ri->hlist);
  1765. kretprobe_table_lock(hash, &flags);
  1766. hlist_add_head(&ri->hlist, &kretprobe_inst_table[hash]);
  1767. kretprobe_table_unlock(hash, &flags);
  1768. } else {
  1769. rp->nmissed++;
  1770. raw_spin_unlock_irqrestore(&rp->lock, flags);
  1771. }
  1772. return 0;
  1773. }
  1774. NOKPROBE_SYMBOL(pre_handler_kretprobe);
  1775. bool __weak arch_kprobe_on_func_entry(unsigned long offset)
  1776. {
  1777. return !offset;
  1778. }
  1779. /**
  1780. * kprobe_on_func_entry() -- check whether given address is function entry
  1781. * @addr: Target address
  1782. * @sym: Target symbol name
  1783. * @offset: The offset from the symbol or the address
  1784. *
  1785. * This checks whether the given @addr+@offset or @sym+@offset is on the
  1786. * function entry address or not.
  1787. * This returns 0 if it is the function entry, or -EINVAL if it is not.
  1788. * And also it returns -ENOENT if it fails the symbol or address lookup.
  1789. * Caller must pass @addr or @sym (either one must be NULL), or this
  1790. * returns -EINVAL.
  1791. */
  1792. int kprobe_on_func_entry(kprobe_opcode_t *addr, const char *sym, unsigned long offset)
  1793. {
  1794. kprobe_opcode_t *kp_addr = _kprobe_addr(addr, sym, offset);
  1795. if (IS_ERR(kp_addr))
  1796. return PTR_ERR(kp_addr);
  1797. if (!kallsyms_lookup_size_offset((unsigned long)kp_addr, NULL, &offset))
  1798. return -ENOENT;
  1799. if (!arch_kprobe_on_func_entry(offset))
  1800. return -EINVAL;
  1801. return 0;
  1802. }
  1803. int register_kretprobe(struct kretprobe *rp)
  1804. {
  1805. int ret;
  1806. struct kretprobe_instance *inst;
  1807. int i;
  1808. void *addr;
  1809. ret = kprobe_on_func_entry(rp->kp.addr, rp->kp.symbol_name, rp->kp.offset);
  1810. if (ret)
  1811. return ret;
  1812. /* If only rp->kp.addr is specified, check reregistering kprobes */
  1813. if (rp->kp.addr && check_kprobe_rereg(&rp->kp))
  1814. return -EINVAL;
  1815. if (kretprobe_blacklist_size) {
  1816. addr = kprobe_addr(&rp->kp);
  1817. if (IS_ERR(addr))
  1818. return PTR_ERR(addr);
  1819. for (i = 0; kretprobe_blacklist[i].name != NULL; i++) {
  1820. if (kretprobe_blacklist[i].addr == addr)
  1821. return -EINVAL;
  1822. }
  1823. }
  1824. if (rp->data_size > KRETPROBE_MAX_DATA_SIZE)
  1825. return -E2BIG;
  1826. rp->kp.pre_handler = pre_handler_kretprobe;
  1827. rp->kp.post_handler = NULL;
  1828. rp->kp.fault_handler = NULL;
  1829. /* Pre-allocate memory for max kretprobe instances */
  1830. if (rp->maxactive <= 0) {
  1831. #ifdef CONFIG_PREEMPTION
  1832. rp->maxactive = max_t(unsigned int, 10, 2*num_possible_cpus());
  1833. #else
  1834. rp->maxactive = num_possible_cpus();
  1835. #endif
  1836. }
  1837. raw_spin_lock_init(&rp->lock);
  1838. INIT_HLIST_HEAD(&rp->free_instances);
  1839. for (i = 0; i < rp->maxactive; i++) {
  1840. inst = kmalloc(sizeof(struct kretprobe_instance) +
  1841. rp->data_size, GFP_KERNEL);
  1842. if (inst == NULL) {
  1843. free_rp_inst(rp);
  1844. return -ENOMEM;
  1845. }
  1846. INIT_HLIST_NODE(&inst->hlist);
  1847. hlist_add_head(&inst->hlist, &rp->free_instances);
  1848. }
  1849. rp->nmissed = 0;
  1850. /* Establish function entry probe point */
  1851. ret = register_kprobe(&rp->kp);
  1852. if (ret != 0)
  1853. free_rp_inst(rp);
  1854. return ret;
  1855. }
  1856. EXPORT_SYMBOL_GPL(register_kretprobe);
  1857. int register_kretprobes(struct kretprobe **rps, int num)
  1858. {
  1859. int ret = 0, i;
  1860. if (num <= 0)
  1861. return -EINVAL;
  1862. for (i = 0; i < num; i++) {
  1863. ret = register_kretprobe(rps[i]);
  1864. if (ret < 0) {
  1865. if (i > 0)
  1866. unregister_kretprobes(rps, i);
  1867. break;
  1868. }
  1869. }
  1870. return ret;
  1871. }
  1872. EXPORT_SYMBOL_GPL(register_kretprobes);
  1873. void unregister_kretprobe(struct kretprobe *rp)
  1874. {
  1875. unregister_kretprobes(&rp, 1);
  1876. }
  1877. EXPORT_SYMBOL_GPL(unregister_kretprobe);
  1878. void unregister_kretprobes(struct kretprobe **rps, int num)
  1879. {
  1880. int i;
  1881. if (num <= 0)
  1882. return;
  1883. mutex_lock(&kprobe_mutex);
  1884. for (i = 0; i < num; i++)
  1885. if (__unregister_kprobe_top(&rps[i]->kp) < 0)
  1886. rps[i]->kp.addr = NULL;
  1887. mutex_unlock(&kprobe_mutex);
  1888. synchronize_rcu();
  1889. for (i = 0; i < num; i++) {
  1890. if (rps[i]->kp.addr) {
  1891. __unregister_kprobe_bottom(&rps[i]->kp);
  1892. cleanup_rp_inst(rps[i]);
  1893. }
  1894. }
  1895. }
  1896. EXPORT_SYMBOL_GPL(unregister_kretprobes);
  1897. #else /* CONFIG_KRETPROBES */
  1898. int register_kretprobe(struct kretprobe *rp)
  1899. {
  1900. return -ENOSYS;
  1901. }
  1902. EXPORT_SYMBOL_GPL(register_kretprobe);
  1903. int register_kretprobes(struct kretprobe **rps, int num)
  1904. {
  1905. return -ENOSYS;
  1906. }
  1907. EXPORT_SYMBOL_GPL(register_kretprobes);
  1908. void unregister_kretprobe(struct kretprobe *rp)
  1909. {
  1910. }
  1911. EXPORT_SYMBOL_GPL(unregister_kretprobe);
  1912. void unregister_kretprobes(struct kretprobe **rps, int num)
  1913. {
  1914. }
  1915. EXPORT_SYMBOL_GPL(unregister_kretprobes);
  1916. static int pre_handler_kretprobe(struct kprobe *p, struct pt_regs *regs)
  1917. {
  1918. return 0;
  1919. }
  1920. NOKPROBE_SYMBOL(pre_handler_kretprobe);
  1921. #endif /* CONFIG_KRETPROBES */
  1922. /* Set the kprobe gone and remove its instruction buffer. */
  1923. static void kill_kprobe(struct kprobe *p)
  1924. {
  1925. struct kprobe *kp;
  1926. lockdep_assert_held(&kprobe_mutex);
  1927. if (WARN_ON_ONCE(kprobe_gone(p)))
  1928. return;
  1929. p->flags |= KPROBE_FLAG_GONE;
  1930. if (kprobe_aggrprobe(p)) {
  1931. /*
  1932. * If this is an aggr_kprobe, we have to list all the
  1933. * chained probes and mark them GONE.
  1934. */
  1935. list_for_each_entry(kp, &p->list, list)
  1936. kp->flags |= KPROBE_FLAG_GONE;
  1937. p->post_handler = NULL;
  1938. kill_optimized_kprobe(p);
  1939. }
  1940. /*
  1941. * Here, we can remove insn_slot safely, because no thread calls
  1942. * the original probed function (which will be freed soon) any more.
  1943. */
  1944. arch_remove_kprobe(p);
  1945. /*
  1946. * The module is going away. We should disarm the kprobe which
  1947. * is using ftrace, because ftrace framework is still available at
  1948. * MODULE_STATE_GOING notification.
  1949. */
  1950. if (kprobe_ftrace(p) && !kprobe_disabled(p) && !kprobes_all_disarmed)
  1951. disarm_kprobe_ftrace(p);
  1952. }
  1953. /* Disable one kprobe */
  1954. int disable_kprobe(struct kprobe *kp)
  1955. {
  1956. int ret = 0;
  1957. struct kprobe *p;
  1958. mutex_lock(&kprobe_mutex);
  1959. /* Disable this kprobe */
  1960. p = __disable_kprobe(kp);
  1961. if (IS_ERR(p))
  1962. ret = PTR_ERR(p);
  1963. mutex_unlock(&kprobe_mutex);
  1964. return ret;
  1965. }
  1966. EXPORT_SYMBOL_GPL(disable_kprobe);
  1967. /* Enable one kprobe */
  1968. int enable_kprobe(struct kprobe *kp)
  1969. {
  1970. int ret = 0;
  1971. struct kprobe *p;
  1972. mutex_lock(&kprobe_mutex);
  1973. /* Check whether specified probe is valid. */
  1974. p = __get_valid_kprobe(kp);
  1975. if (unlikely(p == NULL)) {
  1976. ret = -EINVAL;
  1977. goto out;
  1978. }
  1979. if (kprobe_gone(kp)) {
  1980. /* This kprobe has gone, we couldn't enable it. */
  1981. ret = -EINVAL;
  1982. goto out;
  1983. }
  1984. if (p != kp)
  1985. kp->flags &= ~KPROBE_FLAG_DISABLED;
  1986. if (!kprobes_all_disarmed && kprobe_disabled(p)) {
  1987. p->flags &= ~KPROBE_FLAG_DISABLED;
  1988. ret = arm_kprobe(p);
  1989. if (ret)
  1990. p->flags |= KPROBE_FLAG_DISABLED;
  1991. }
  1992. out:
  1993. mutex_unlock(&kprobe_mutex);
  1994. return ret;
  1995. }
  1996. EXPORT_SYMBOL_GPL(enable_kprobe);
  1997. /* Caller must NOT call this in usual path. This is only for critical case */
  1998. void dump_kprobe(struct kprobe *kp)
  1999. {
  2000. pr_err("Dumping kprobe:\n");
  2001. pr_err("Name: %s\nOffset: %x\nAddress: %pS\n",
  2002. kp->symbol_name, kp->offset, kp->addr);
  2003. }
  2004. NOKPROBE_SYMBOL(dump_kprobe);
  2005. int kprobe_add_ksym_blacklist(unsigned long entry)
  2006. {
  2007. struct kprobe_blacklist_entry *ent;
  2008. unsigned long offset = 0, size = 0;
  2009. if (!kernel_text_address(entry) ||
  2010. !kallsyms_lookup_size_offset(entry, &size, &offset))
  2011. return -EINVAL;
  2012. ent = kmalloc(sizeof(*ent), GFP_KERNEL);
  2013. if (!ent)
  2014. return -ENOMEM;
  2015. ent->start_addr = entry;
  2016. ent->end_addr = entry + size;
  2017. INIT_LIST_HEAD(&ent->list);
  2018. list_add_tail(&ent->list, &kprobe_blacklist);
  2019. return (int)size;
  2020. }
  2021. /* Add all symbols in given area into kprobe blacklist */
  2022. int kprobe_add_area_blacklist(unsigned long start, unsigned long end)
  2023. {
  2024. unsigned long entry;
  2025. int ret = 0;
  2026. for (entry = start; entry < end; entry += ret) {
  2027. ret = kprobe_add_ksym_blacklist(entry);
  2028. if (ret < 0)
  2029. return ret;
  2030. if (ret == 0) /* In case of alias symbol */
  2031. ret = 1;
  2032. }
  2033. return 0;
  2034. }
  2035. /* Remove all symbols in given area from kprobe blacklist */
  2036. static void kprobe_remove_area_blacklist(unsigned long start, unsigned long end)
  2037. {
  2038. struct kprobe_blacklist_entry *ent, *n;
  2039. list_for_each_entry_safe(ent, n, &kprobe_blacklist, list) {
  2040. if (ent->start_addr < start || ent->start_addr >= end)
  2041. continue;
  2042. list_del(&ent->list);
  2043. kfree(ent);
  2044. }
  2045. }
  2046. static void kprobe_remove_ksym_blacklist(unsigned long entry)
  2047. {
  2048. kprobe_remove_area_blacklist(entry, entry + 1);
  2049. }
  2050. int __weak arch_kprobe_get_kallsym(unsigned int *symnum, unsigned long *value,
  2051. char *type, char *sym)
  2052. {
  2053. return -ERANGE;
  2054. }
  2055. int kprobe_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
  2056. char *sym)
  2057. {
  2058. #ifdef __ARCH_WANT_KPROBES_INSN_SLOT
  2059. if (!kprobe_cache_get_kallsym(&kprobe_insn_slots, &symnum, value, type, sym))
  2060. return 0;
  2061. #ifdef CONFIG_OPTPROBES
  2062. if (!kprobe_cache_get_kallsym(&kprobe_optinsn_slots, &symnum, value, type, sym))
  2063. return 0;
  2064. #endif
  2065. #endif
  2066. if (!arch_kprobe_get_kallsym(&symnum, value, type, sym))
  2067. return 0;
  2068. return -ERANGE;
  2069. }
  2070. int __init __weak arch_populate_kprobe_blacklist(void)
  2071. {
  2072. return 0;
  2073. }
  2074. /*
  2075. * Lookup and populate the kprobe_blacklist.
  2076. *
  2077. * Unlike the kretprobe blacklist, we'll need to determine
  2078. * the range of addresses that belong to the said functions,
  2079. * since a kprobe need not necessarily be at the beginning
  2080. * of a function.
  2081. */
  2082. static int __init populate_kprobe_blacklist(unsigned long *start,
  2083. unsigned long *end)
  2084. {
  2085. unsigned long entry;
  2086. unsigned long *iter;
  2087. int ret;
  2088. for (iter = start; iter < end; iter++) {
  2089. entry = arch_deref_entry_point((void *)*iter);
  2090. ret = kprobe_add_ksym_blacklist(entry);
  2091. if (ret == -EINVAL)
  2092. continue;
  2093. if (ret < 0)
  2094. return ret;
  2095. }
  2096. /* Symbols in __kprobes_text are blacklisted */
  2097. ret = kprobe_add_area_blacklist((unsigned long)__kprobes_text_start,
  2098. (unsigned long)__kprobes_text_end);
  2099. if (ret)
  2100. return ret;
  2101. /* Symbols in noinstr section are blacklisted */
  2102. ret = kprobe_add_area_blacklist((unsigned long)__noinstr_text_start,
  2103. (unsigned long)__noinstr_text_end);
  2104. return ret ? : arch_populate_kprobe_blacklist();
  2105. }
  2106. static void add_module_kprobe_blacklist(struct module *mod)
  2107. {
  2108. unsigned long start, end;
  2109. int i;
  2110. if (mod->kprobe_blacklist) {
  2111. for (i = 0; i < mod->num_kprobe_blacklist; i++)
  2112. kprobe_add_ksym_blacklist(mod->kprobe_blacklist[i]);
  2113. }
  2114. start = (unsigned long)mod->kprobes_text_start;
  2115. if (start) {
  2116. end = start + mod->kprobes_text_size;
  2117. kprobe_add_area_blacklist(start, end);
  2118. }
  2119. start = (unsigned long)mod->noinstr_text_start;
  2120. if (start) {
  2121. end = start + mod->noinstr_text_size;
  2122. kprobe_add_area_blacklist(start, end);
  2123. }
  2124. }
  2125. static void remove_module_kprobe_blacklist(struct module *mod)
  2126. {
  2127. unsigned long start, end;
  2128. int i;
  2129. if (mod->kprobe_blacklist) {
  2130. for (i = 0; i < mod->num_kprobe_blacklist; i++)
  2131. kprobe_remove_ksym_blacklist(mod->kprobe_blacklist[i]);
  2132. }
  2133. start = (unsigned long)mod->kprobes_text_start;
  2134. if (start) {
  2135. end = start + mod->kprobes_text_size;
  2136. kprobe_remove_area_blacklist(start, end);
  2137. }
  2138. start = (unsigned long)mod->noinstr_text_start;
  2139. if (start) {
  2140. end = start + mod->noinstr_text_size;
  2141. kprobe_remove_area_blacklist(start, end);
  2142. }
  2143. }
  2144. /* Module notifier call back, checking kprobes on the module */
  2145. static int kprobes_module_callback(struct notifier_block *nb,
  2146. unsigned long val, void *data)
  2147. {
  2148. struct module *mod = data;
  2149. struct hlist_head *head;
  2150. struct kprobe *p;
  2151. unsigned int i;
  2152. int checkcore = (val == MODULE_STATE_GOING);
  2153. if (val == MODULE_STATE_COMING) {
  2154. mutex_lock(&kprobe_mutex);
  2155. add_module_kprobe_blacklist(mod);
  2156. mutex_unlock(&kprobe_mutex);
  2157. }
  2158. if (val != MODULE_STATE_GOING && val != MODULE_STATE_LIVE)
  2159. return NOTIFY_DONE;
  2160. /*
  2161. * When MODULE_STATE_GOING was notified, both of module .text and
  2162. * .init.text sections would be freed. When MODULE_STATE_LIVE was
  2163. * notified, only .init.text section would be freed. We need to
  2164. * disable kprobes which have been inserted in the sections.
  2165. */
  2166. mutex_lock(&kprobe_mutex);
  2167. for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
  2168. head = &kprobe_table[i];
  2169. hlist_for_each_entry(p, head, hlist) {
  2170. if (kprobe_gone(p))
  2171. continue;
  2172. if (within_module_init((unsigned long)p->addr, mod) ||
  2173. (checkcore &&
  2174. within_module_core((unsigned long)p->addr, mod))) {
  2175. /*
  2176. * The vaddr this probe is installed will soon
  2177. * be vfreed buy not synced to disk. Hence,
  2178. * disarming the breakpoint isn't needed.
  2179. *
  2180. * Note, this will also move any optimized probes
  2181. * that are pending to be removed from their
  2182. * corresponding lists to the freeing_list and
  2183. * will not be touched by the delayed
  2184. * kprobe_optimizer work handler.
  2185. */
  2186. kill_kprobe(p);
  2187. }
  2188. }
  2189. }
  2190. if (val == MODULE_STATE_GOING)
  2191. remove_module_kprobe_blacklist(mod);
  2192. mutex_unlock(&kprobe_mutex);
  2193. return NOTIFY_DONE;
  2194. }
  2195. static struct notifier_block kprobe_module_nb = {
  2196. .notifier_call = kprobes_module_callback,
  2197. .priority = 0
  2198. };
  2199. /* Markers of _kprobe_blacklist section */
  2200. extern unsigned long __start_kprobe_blacklist[];
  2201. extern unsigned long __stop_kprobe_blacklist[];
  2202. void kprobe_free_init_mem(void)
  2203. {
  2204. void *start = (void *)(&__init_begin);
  2205. void *end = (void *)(&__init_end);
  2206. struct hlist_head *head;
  2207. struct kprobe *p;
  2208. int i;
  2209. mutex_lock(&kprobe_mutex);
  2210. /* Kill all kprobes on initmem */
  2211. for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
  2212. head = &kprobe_table[i];
  2213. hlist_for_each_entry(p, head, hlist) {
  2214. if (start <= (void *)p->addr && (void *)p->addr < end)
  2215. kill_kprobe(p);
  2216. }
  2217. }
  2218. mutex_unlock(&kprobe_mutex);
  2219. }
  2220. static int __init init_kprobes(void)
  2221. {
  2222. int i, err = 0;
  2223. /* FIXME allocate the probe table, currently defined statically */
  2224. /* initialize all list heads */
  2225. for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
  2226. INIT_HLIST_HEAD(&kprobe_table[i]);
  2227. INIT_HLIST_HEAD(&kretprobe_inst_table[i]);
  2228. raw_spin_lock_init(&(kretprobe_table_locks[i].lock));
  2229. }
  2230. err = populate_kprobe_blacklist(__start_kprobe_blacklist,
  2231. __stop_kprobe_blacklist);
  2232. if (err) {
  2233. pr_err("kprobes: failed to populate blacklist: %d\n", err);
  2234. pr_err("Please take care of using kprobes.\n");
  2235. }
  2236. if (kretprobe_blacklist_size) {
  2237. /* lookup the function address from its name */
  2238. for (i = 0; kretprobe_blacklist[i].name != NULL; i++) {
  2239. kretprobe_blacklist[i].addr =
  2240. kprobe_lookup_name(kretprobe_blacklist[i].name, 0);
  2241. if (!kretprobe_blacklist[i].addr)
  2242. printk("kretprobe: lookup failed: %s\n",
  2243. kretprobe_blacklist[i].name);
  2244. }
  2245. }
  2246. /* By default, kprobes are armed */
  2247. kprobes_all_disarmed = false;
  2248. #if defined(CONFIG_OPTPROBES) && defined(__ARCH_WANT_KPROBES_INSN_SLOT)
  2249. /* Init kprobe_optinsn_slots for allocation */
  2250. kprobe_optinsn_slots.insn_size = MAX_OPTINSN_SIZE;
  2251. #endif
  2252. err = arch_init_kprobes();
  2253. if (!err)
  2254. err = register_die_notifier(&kprobe_exceptions_nb);
  2255. if (!err)
  2256. err = register_module_notifier(&kprobe_module_nb);
  2257. kprobes_initialized = (err == 0);
  2258. if (!err)
  2259. init_test_probes();
  2260. return err;
  2261. }
  2262. early_initcall(init_kprobes);
  2263. #if defined(CONFIG_OPTPROBES)
  2264. static int __init init_optprobes(void)
  2265. {
  2266. /*
  2267. * Enable kprobe optimization - this kicks the optimizer which
  2268. * depends on synchronize_rcu_tasks() and ksoftirqd, that is
  2269. * not spawned in early initcall. So delay the optimization.
  2270. */
  2271. optimize_all_kprobes();
  2272. return 0;
  2273. }
  2274. subsys_initcall(init_optprobes);
  2275. #endif
  2276. #ifdef CONFIG_DEBUG_FS
  2277. static void report_probe(struct seq_file *pi, struct kprobe *p,
  2278. const char *sym, int offset, char *modname, struct kprobe *pp)
  2279. {
  2280. char *kprobe_type;
  2281. void *addr = p->addr;
  2282. if (p->pre_handler == pre_handler_kretprobe)
  2283. kprobe_type = "r";
  2284. else
  2285. kprobe_type = "k";
  2286. if (!kallsyms_show_value(pi->file->f_cred))
  2287. addr = NULL;
  2288. if (sym)
  2289. seq_printf(pi, "%px %s %s+0x%x %s ",
  2290. addr, kprobe_type, sym, offset,
  2291. (modname ? modname : " "));
  2292. else /* try to use %pS */
  2293. seq_printf(pi, "%px %s %pS ",
  2294. addr, kprobe_type, p->addr);
  2295. if (!pp)
  2296. pp = p;
  2297. seq_printf(pi, "%s%s%s%s\n",
  2298. (kprobe_gone(p) ? "[GONE]" : ""),
  2299. ((kprobe_disabled(p) && !kprobe_gone(p)) ? "[DISABLED]" : ""),
  2300. (kprobe_optimized(pp) ? "[OPTIMIZED]" : ""),
  2301. (kprobe_ftrace(pp) ? "[FTRACE]" : ""));
  2302. }
  2303. static void *kprobe_seq_start(struct seq_file *f, loff_t *pos)
  2304. {
  2305. return (*pos < KPROBE_TABLE_SIZE) ? pos : NULL;
  2306. }
  2307. static void *kprobe_seq_next(struct seq_file *f, void *v, loff_t *pos)
  2308. {
  2309. (*pos)++;
  2310. if (*pos >= KPROBE_TABLE_SIZE)
  2311. return NULL;
  2312. return pos;
  2313. }
  2314. static void kprobe_seq_stop(struct seq_file *f, void *v)
  2315. {
  2316. /* Nothing to do */
  2317. }
  2318. static int show_kprobe_addr(struct seq_file *pi, void *v)
  2319. {
  2320. struct hlist_head *head;
  2321. struct kprobe *p, *kp;
  2322. const char *sym = NULL;
  2323. unsigned int i = *(loff_t *) v;
  2324. unsigned long offset = 0;
  2325. char *modname, namebuf[KSYM_NAME_LEN];
  2326. head = &kprobe_table[i];
  2327. preempt_disable();
  2328. hlist_for_each_entry_rcu(p, head, hlist) {
  2329. sym = kallsyms_lookup((unsigned long)p->addr, NULL,
  2330. &offset, &modname, namebuf);
  2331. if (kprobe_aggrprobe(p)) {
  2332. list_for_each_entry_rcu(kp, &p->list, list)
  2333. report_probe(pi, kp, sym, offset, modname, p);
  2334. } else
  2335. report_probe(pi, p, sym, offset, modname, NULL);
  2336. }
  2337. preempt_enable();
  2338. return 0;
  2339. }
  2340. static const struct seq_operations kprobes_sops = {
  2341. .start = kprobe_seq_start,
  2342. .next = kprobe_seq_next,
  2343. .stop = kprobe_seq_stop,
  2344. .show = show_kprobe_addr
  2345. };
  2346. DEFINE_SEQ_ATTRIBUTE(kprobes);
  2347. /* kprobes/blacklist -- shows which functions can not be probed */
  2348. static void *kprobe_blacklist_seq_start(struct seq_file *m, loff_t *pos)
  2349. {
  2350. mutex_lock(&kprobe_mutex);
  2351. return seq_list_start(&kprobe_blacklist, *pos);
  2352. }
  2353. static void *kprobe_blacklist_seq_next(struct seq_file *m, void *v, loff_t *pos)
  2354. {
  2355. return seq_list_next(v, &kprobe_blacklist, pos);
  2356. }
  2357. static int kprobe_blacklist_seq_show(struct seq_file *m, void *v)
  2358. {
  2359. struct kprobe_blacklist_entry *ent =
  2360. list_entry(v, struct kprobe_blacklist_entry, list);
  2361. /*
  2362. * If /proc/kallsyms is not showing kernel address, we won't
  2363. * show them here either.
  2364. */
  2365. if (!kallsyms_show_value(m->file->f_cred))
  2366. seq_printf(m, "0x%px-0x%px\t%ps\n", NULL, NULL,
  2367. (void *)ent->start_addr);
  2368. else
  2369. seq_printf(m, "0x%px-0x%px\t%ps\n", (void *)ent->start_addr,
  2370. (void *)ent->end_addr, (void *)ent->start_addr);
  2371. return 0;
  2372. }
  2373. static void kprobe_blacklist_seq_stop(struct seq_file *f, void *v)
  2374. {
  2375. mutex_unlock(&kprobe_mutex);
  2376. }
  2377. static const struct seq_operations kprobe_blacklist_sops = {
  2378. .start = kprobe_blacklist_seq_start,
  2379. .next = kprobe_blacklist_seq_next,
  2380. .stop = kprobe_blacklist_seq_stop,
  2381. .show = kprobe_blacklist_seq_show,
  2382. };
  2383. DEFINE_SEQ_ATTRIBUTE(kprobe_blacklist);
  2384. static int arm_all_kprobes(void)
  2385. {
  2386. struct hlist_head *head;
  2387. struct kprobe *p;
  2388. unsigned int i, total = 0, errors = 0;
  2389. int err, ret = 0;
  2390. mutex_lock(&kprobe_mutex);
  2391. /* If kprobes are armed, just return */
  2392. if (!kprobes_all_disarmed)
  2393. goto already_enabled;
  2394. /*
  2395. * optimize_kprobe() called by arm_kprobe() checks
  2396. * kprobes_all_disarmed, so set kprobes_all_disarmed before
  2397. * arm_kprobe.
  2398. */
  2399. kprobes_all_disarmed = false;
  2400. /* Arming kprobes doesn't optimize kprobe itself */
  2401. for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
  2402. head = &kprobe_table[i];
  2403. /* Arm all kprobes on a best-effort basis */
  2404. hlist_for_each_entry(p, head, hlist) {
  2405. if (!kprobe_disabled(p)) {
  2406. err = arm_kprobe(p);
  2407. if (err) {
  2408. errors++;
  2409. ret = err;
  2410. }
  2411. total++;
  2412. }
  2413. }
  2414. }
  2415. if (errors)
  2416. pr_warn("Kprobes globally enabled, but failed to arm %d out of %d probes\n",
  2417. errors, total);
  2418. else
  2419. pr_info("Kprobes globally enabled\n");
  2420. already_enabled:
  2421. mutex_unlock(&kprobe_mutex);
  2422. return ret;
  2423. }
  2424. static int disarm_all_kprobes(void)
  2425. {
  2426. struct hlist_head *head;
  2427. struct kprobe *p;
  2428. unsigned int i, total = 0, errors = 0;
  2429. int err, ret = 0;
  2430. mutex_lock(&kprobe_mutex);
  2431. /* If kprobes are already disarmed, just return */
  2432. if (kprobes_all_disarmed) {
  2433. mutex_unlock(&kprobe_mutex);
  2434. return 0;
  2435. }
  2436. kprobes_all_disarmed = true;
  2437. for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
  2438. head = &kprobe_table[i];
  2439. /* Disarm all kprobes on a best-effort basis */
  2440. hlist_for_each_entry(p, head, hlist) {
  2441. if (!arch_trampoline_kprobe(p) && !kprobe_disabled(p)) {
  2442. err = disarm_kprobe(p, false);
  2443. if (err) {
  2444. errors++;
  2445. ret = err;
  2446. }
  2447. total++;
  2448. }
  2449. }
  2450. }
  2451. if (errors)
  2452. pr_warn("Kprobes globally disabled, but failed to disarm %d out of %d probes\n",
  2453. errors, total);
  2454. else
  2455. pr_info("Kprobes globally disabled\n");
  2456. mutex_unlock(&kprobe_mutex);
  2457. /* Wait for disarming all kprobes by optimizer */
  2458. wait_for_kprobe_optimizer();
  2459. return ret;
  2460. }
  2461. /*
  2462. * XXX: The debugfs bool file interface doesn't allow for callbacks
  2463. * when the bool state is switched. We can reuse that facility when
  2464. * available
  2465. */
  2466. static ssize_t read_enabled_file_bool(struct file *file,
  2467. char __user *user_buf, size_t count, loff_t *ppos)
  2468. {
  2469. char buf[3];
  2470. if (!kprobes_all_disarmed)
  2471. buf[0] = '1';
  2472. else
  2473. buf[0] = '0';
  2474. buf[1] = '\n';
  2475. buf[2] = 0x00;
  2476. return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
  2477. }
  2478. static ssize_t write_enabled_file_bool(struct file *file,
  2479. const char __user *user_buf, size_t count, loff_t *ppos)
  2480. {
  2481. char buf[32];
  2482. size_t buf_size;
  2483. int ret = 0;
  2484. buf_size = min(count, (sizeof(buf)-1));
  2485. if (copy_from_user(buf, user_buf, buf_size))
  2486. return -EFAULT;
  2487. buf[buf_size] = '\0';
  2488. switch (buf[0]) {
  2489. case 'y':
  2490. case 'Y':
  2491. case '1':
  2492. ret = arm_all_kprobes();
  2493. break;
  2494. case 'n':
  2495. case 'N':
  2496. case '0':
  2497. ret = disarm_all_kprobes();
  2498. break;
  2499. default:
  2500. return -EINVAL;
  2501. }
  2502. if (ret)
  2503. return ret;
  2504. return count;
  2505. }
  2506. static const struct file_operations fops_kp = {
  2507. .read = read_enabled_file_bool,
  2508. .write = write_enabled_file_bool,
  2509. .llseek = default_llseek,
  2510. };
  2511. static int __init debugfs_kprobe_init(void)
  2512. {
  2513. struct dentry *dir;
  2514. dir = debugfs_create_dir("kprobes", NULL);
  2515. debugfs_create_file("list", 0400, dir, NULL, &kprobes_fops);
  2516. debugfs_create_file("enabled", 0600, dir, NULL, &fops_kp);
  2517. debugfs_create_file("blacklist", 0400, dir, NULL,
  2518. &kprobe_blacklist_fops);
  2519. return 0;
  2520. }
  2521. late_initcall(debugfs_kprobe_init);
  2522. #endif /* CONFIG_DEBUG_FS */