jump_label.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * jump label support
  4. *
  5. * Copyright (C) 2009 Jason Baron <jbaron@redhat.com>
  6. * Copyright (C) 2011 Peter Zijlstra
  7. *
  8. */
  9. #include <linux/memory.h>
  10. #include <linux/uaccess.h>
  11. #include <linux/module.h>
  12. #include <linux/list.h>
  13. #include <linux/slab.h>
  14. #include <linux/sort.h>
  15. #include <linux/err.h>
  16. #include <linux/static_key.h>
  17. #include <linux/jump_label_ratelimit.h>
  18. #include <linux/bug.h>
  19. #include <linux/cpu.h>
  20. #include <asm/sections.h>
  21. /* mutex to protect coming/going of the jump_label table */
  22. static DEFINE_MUTEX(jump_label_mutex);
  23. void jump_label_lock(void)
  24. {
  25. mutex_lock(&jump_label_mutex);
  26. }
  27. void jump_label_unlock(void)
  28. {
  29. mutex_unlock(&jump_label_mutex);
  30. }
  31. static int jump_label_cmp(const void *a, const void *b)
  32. {
  33. const struct jump_entry *jea = a;
  34. const struct jump_entry *jeb = b;
  35. /*
  36. * Entrires are sorted by key.
  37. */
  38. if (jump_entry_key(jea) < jump_entry_key(jeb))
  39. return -1;
  40. if (jump_entry_key(jea) > jump_entry_key(jeb))
  41. return 1;
  42. /*
  43. * In the batching mode, entries should also be sorted by the code
  44. * inside the already sorted list of entries, enabling a bsearch in
  45. * the vector.
  46. */
  47. if (jump_entry_code(jea) < jump_entry_code(jeb))
  48. return -1;
  49. if (jump_entry_code(jea) > jump_entry_code(jeb))
  50. return 1;
  51. return 0;
  52. }
  53. static void jump_label_swap(void *a, void *b, int size)
  54. {
  55. long delta = (unsigned long)a - (unsigned long)b;
  56. struct jump_entry *jea = a;
  57. struct jump_entry *jeb = b;
  58. struct jump_entry tmp = *jea;
  59. jea->code = jeb->code - delta;
  60. jea->target = jeb->target - delta;
  61. jea->key = jeb->key - delta;
  62. jeb->code = tmp.code + delta;
  63. jeb->target = tmp.target + delta;
  64. jeb->key = tmp.key + delta;
  65. }
  66. static void
  67. jump_label_sort_entries(struct jump_entry *start, struct jump_entry *stop)
  68. {
  69. unsigned long size;
  70. void *swapfn = NULL;
  71. if (IS_ENABLED(CONFIG_HAVE_ARCH_JUMP_LABEL_RELATIVE))
  72. swapfn = jump_label_swap;
  73. size = (((unsigned long)stop - (unsigned long)start)
  74. / sizeof(struct jump_entry));
  75. sort(start, size, sizeof(struct jump_entry), jump_label_cmp, swapfn);
  76. }
  77. static void jump_label_update(struct static_key *key);
  78. /*
  79. * There are similar definitions for the !CONFIG_JUMP_LABEL case in jump_label.h.
  80. * The use of 'atomic_read()' requires atomic.h and its problematic for some
  81. * kernel headers such as kernel.h and others. Since static_key_count() is not
  82. * used in the branch statements as it is for the !CONFIG_JUMP_LABEL case its ok
  83. * to have it be a function here. Similarly, for 'static_key_enable()' and
  84. * 'static_key_disable()', which require bug.h. This should allow jump_label.h
  85. * to be included from most/all places for CONFIG_JUMP_LABEL.
  86. */
  87. int static_key_count(struct static_key *key)
  88. {
  89. /*
  90. * -1 means the first static_key_slow_inc() is in progress.
  91. * static_key_enabled() must return true, so return 1 here.
  92. */
  93. int n = atomic_read(&key->enabled);
  94. return n >= 0 ? n : 1;
  95. }
  96. EXPORT_SYMBOL_GPL(static_key_count);
  97. void static_key_slow_inc_cpuslocked(struct static_key *key)
  98. {
  99. int v, v1;
  100. STATIC_KEY_CHECK_USE(key);
  101. lockdep_assert_cpus_held();
  102. /*
  103. * Careful if we get concurrent static_key_slow_inc() calls;
  104. * later calls must wait for the first one to _finish_ the
  105. * jump_label_update() process. At the same time, however,
  106. * the jump_label_update() call below wants to see
  107. * static_key_enabled(&key) for jumps to be updated properly.
  108. *
  109. * So give a special meaning to negative key->enabled: it sends
  110. * static_key_slow_inc() down the slow path, and it is non-zero
  111. * so it counts as "enabled" in jump_label_update(). Note that
  112. * atomic_inc_unless_negative() checks >= 0, so roll our own.
  113. */
  114. for (v = atomic_read(&key->enabled); v > 0; v = v1) {
  115. v1 = atomic_cmpxchg(&key->enabled, v, v + 1);
  116. if (likely(v1 == v))
  117. return;
  118. }
  119. jump_label_lock();
  120. if (atomic_read(&key->enabled) == 0) {
  121. atomic_set(&key->enabled, -1);
  122. jump_label_update(key);
  123. /*
  124. * Ensure that if the above cmpxchg loop observes our positive
  125. * value, it must also observe all the text changes.
  126. */
  127. atomic_set_release(&key->enabled, 1);
  128. } else {
  129. atomic_inc(&key->enabled);
  130. }
  131. jump_label_unlock();
  132. }
  133. void static_key_slow_inc(struct static_key *key)
  134. {
  135. cpus_read_lock();
  136. static_key_slow_inc_cpuslocked(key);
  137. cpus_read_unlock();
  138. }
  139. EXPORT_SYMBOL_GPL(static_key_slow_inc);
  140. void static_key_enable_cpuslocked(struct static_key *key)
  141. {
  142. STATIC_KEY_CHECK_USE(key);
  143. lockdep_assert_cpus_held();
  144. if (atomic_read(&key->enabled) > 0) {
  145. WARN_ON_ONCE(atomic_read(&key->enabled) != 1);
  146. return;
  147. }
  148. jump_label_lock();
  149. if (atomic_read(&key->enabled) == 0) {
  150. atomic_set(&key->enabled, -1);
  151. jump_label_update(key);
  152. /*
  153. * See static_key_slow_inc().
  154. */
  155. atomic_set_release(&key->enabled, 1);
  156. }
  157. jump_label_unlock();
  158. }
  159. EXPORT_SYMBOL_GPL(static_key_enable_cpuslocked);
  160. void static_key_enable(struct static_key *key)
  161. {
  162. cpus_read_lock();
  163. static_key_enable_cpuslocked(key);
  164. cpus_read_unlock();
  165. }
  166. EXPORT_SYMBOL_GPL(static_key_enable);
  167. void static_key_disable_cpuslocked(struct static_key *key)
  168. {
  169. STATIC_KEY_CHECK_USE(key);
  170. lockdep_assert_cpus_held();
  171. if (atomic_read(&key->enabled) != 1) {
  172. WARN_ON_ONCE(atomic_read(&key->enabled) != 0);
  173. return;
  174. }
  175. jump_label_lock();
  176. if (atomic_cmpxchg(&key->enabled, 1, 0))
  177. jump_label_update(key);
  178. jump_label_unlock();
  179. }
  180. EXPORT_SYMBOL_GPL(static_key_disable_cpuslocked);
  181. void static_key_disable(struct static_key *key)
  182. {
  183. cpus_read_lock();
  184. static_key_disable_cpuslocked(key);
  185. cpus_read_unlock();
  186. }
  187. EXPORT_SYMBOL_GPL(static_key_disable);
  188. static bool static_key_slow_try_dec(struct static_key *key)
  189. {
  190. int val;
  191. val = atomic_fetch_add_unless(&key->enabled, -1, 1);
  192. if (val == 1)
  193. return false;
  194. /*
  195. * The negative count check is valid even when a negative
  196. * key->enabled is in use by static_key_slow_inc(); a
  197. * __static_key_slow_dec() before the first static_key_slow_inc()
  198. * returns is unbalanced, because all other static_key_slow_inc()
  199. * instances block while the update is in progress.
  200. */
  201. WARN(val < 0, "jump label: negative count!\n");
  202. return true;
  203. }
  204. static void __static_key_slow_dec_cpuslocked(struct static_key *key)
  205. {
  206. lockdep_assert_cpus_held();
  207. if (static_key_slow_try_dec(key))
  208. return;
  209. jump_label_lock();
  210. if (atomic_dec_and_test(&key->enabled))
  211. jump_label_update(key);
  212. jump_label_unlock();
  213. }
  214. static void __static_key_slow_dec(struct static_key *key)
  215. {
  216. cpus_read_lock();
  217. __static_key_slow_dec_cpuslocked(key);
  218. cpus_read_unlock();
  219. }
  220. void jump_label_update_timeout(struct work_struct *work)
  221. {
  222. struct static_key_deferred *key =
  223. container_of(work, struct static_key_deferred, work.work);
  224. __static_key_slow_dec(&key->key);
  225. }
  226. EXPORT_SYMBOL_GPL(jump_label_update_timeout);
  227. void static_key_slow_dec(struct static_key *key)
  228. {
  229. STATIC_KEY_CHECK_USE(key);
  230. __static_key_slow_dec(key);
  231. }
  232. EXPORT_SYMBOL_GPL(static_key_slow_dec);
  233. void static_key_slow_dec_cpuslocked(struct static_key *key)
  234. {
  235. STATIC_KEY_CHECK_USE(key);
  236. __static_key_slow_dec_cpuslocked(key);
  237. }
  238. void __static_key_slow_dec_deferred(struct static_key *key,
  239. struct delayed_work *work,
  240. unsigned long timeout)
  241. {
  242. STATIC_KEY_CHECK_USE(key);
  243. if (static_key_slow_try_dec(key))
  244. return;
  245. schedule_delayed_work(work, timeout);
  246. }
  247. EXPORT_SYMBOL_GPL(__static_key_slow_dec_deferred);
  248. void __static_key_deferred_flush(void *key, struct delayed_work *work)
  249. {
  250. STATIC_KEY_CHECK_USE(key);
  251. flush_delayed_work(work);
  252. }
  253. EXPORT_SYMBOL_GPL(__static_key_deferred_flush);
  254. void jump_label_rate_limit(struct static_key_deferred *key,
  255. unsigned long rl)
  256. {
  257. STATIC_KEY_CHECK_USE(key);
  258. key->timeout = rl;
  259. INIT_DELAYED_WORK(&key->work, jump_label_update_timeout);
  260. }
  261. EXPORT_SYMBOL_GPL(jump_label_rate_limit);
  262. static int addr_conflict(struct jump_entry *entry, void *start, void *end)
  263. {
  264. if (jump_entry_code(entry) <= (unsigned long)end &&
  265. jump_entry_code(entry) + JUMP_LABEL_NOP_SIZE > (unsigned long)start)
  266. return 1;
  267. return 0;
  268. }
  269. static int __jump_label_text_reserved(struct jump_entry *iter_start,
  270. struct jump_entry *iter_stop, void *start, void *end, bool init)
  271. {
  272. struct jump_entry *iter;
  273. iter = iter_start;
  274. while (iter < iter_stop) {
  275. if (init || !jump_entry_is_init(iter)) {
  276. if (addr_conflict(iter, start, end))
  277. return 1;
  278. }
  279. iter++;
  280. }
  281. return 0;
  282. }
  283. /*
  284. * Update code which is definitely not currently executing.
  285. * Architectures which need heavyweight synchronization to modify
  286. * running code can override this to make the non-live update case
  287. * cheaper.
  288. */
  289. void __weak __init_or_module arch_jump_label_transform_static(struct jump_entry *entry,
  290. enum jump_label_type type)
  291. {
  292. arch_jump_label_transform(entry, type);
  293. }
  294. static inline struct jump_entry *static_key_entries(struct static_key *key)
  295. {
  296. WARN_ON_ONCE(key->type & JUMP_TYPE_LINKED);
  297. return (struct jump_entry *)(key->type & ~JUMP_TYPE_MASK);
  298. }
  299. static inline bool static_key_type(struct static_key *key)
  300. {
  301. return key->type & JUMP_TYPE_TRUE;
  302. }
  303. static inline bool static_key_linked(struct static_key *key)
  304. {
  305. return key->type & JUMP_TYPE_LINKED;
  306. }
  307. static inline void static_key_clear_linked(struct static_key *key)
  308. {
  309. key->type &= ~JUMP_TYPE_LINKED;
  310. }
  311. static inline void static_key_set_linked(struct static_key *key)
  312. {
  313. key->type |= JUMP_TYPE_LINKED;
  314. }
  315. /***
  316. * A 'struct static_key' uses a union such that it either points directly
  317. * to a table of 'struct jump_entry' or to a linked list of modules which in
  318. * turn point to 'struct jump_entry' tables.
  319. *
  320. * The two lower bits of the pointer are used to keep track of which pointer
  321. * type is in use and to store the initial branch direction, we use an access
  322. * function which preserves these bits.
  323. */
  324. static void static_key_set_entries(struct static_key *key,
  325. struct jump_entry *entries)
  326. {
  327. unsigned long type;
  328. WARN_ON_ONCE((unsigned long)entries & JUMP_TYPE_MASK);
  329. type = key->type & JUMP_TYPE_MASK;
  330. key->entries = entries;
  331. key->type |= type;
  332. }
  333. static enum jump_label_type jump_label_type(struct jump_entry *entry)
  334. {
  335. struct static_key *key = jump_entry_key(entry);
  336. bool enabled = static_key_enabled(key);
  337. bool branch = jump_entry_is_branch(entry);
  338. /* See the comment in linux/jump_label.h */
  339. return enabled ^ branch;
  340. }
  341. static bool jump_label_can_update(struct jump_entry *entry, bool init)
  342. {
  343. /*
  344. * Cannot update code that was in an init text area.
  345. */
  346. if (!init && jump_entry_is_init(entry))
  347. return false;
  348. if (!kernel_text_address(jump_entry_code(entry))) {
  349. /*
  350. * This skips patching built-in __exit, which
  351. * is part of init_section_contains() but is
  352. * not part of kernel_text_address().
  353. *
  354. * Skipping built-in __exit is fine since it
  355. * will never be executed.
  356. */
  357. WARN_ONCE(!jump_entry_is_init(entry),
  358. "can't patch jump_label at %pS",
  359. (void *)jump_entry_code(entry));
  360. return false;
  361. }
  362. return true;
  363. }
  364. #ifndef HAVE_JUMP_LABEL_BATCH
  365. static void __jump_label_update(struct static_key *key,
  366. struct jump_entry *entry,
  367. struct jump_entry *stop,
  368. bool init)
  369. {
  370. for (; (entry < stop) && (jump_entry_key(entry) == key); entry++) {
  371. if (jump_label_can_update(entry, init))
  372. arch_jump_label_transform(entry, jump_label_type(entry));
  373. }
  374. }
  375. #else
  376. static void __jump_label_update(struct static_key *key,
  377. struct jump_entry *entry,
  378. struct jump_entry *stop,
  379. bool init)
  380. {
  381. for (; (entry < stop) && (jump_entry_key(entry) == key); entry++) {
  382. if (!jump_label_can_update(entry, init))
  383. continue;
  384. if (!arch_jump_label_transform_queue(entry, jump_label_type(entry))) {
  385. /*
  386. * Queue is full: Apply the current queue and try again.
  387. */
  388. arch_jump_label_transform_apply();
  389. BUG_ON(!arch_jump_label_transform_queue(entry, jump_label_type(entry)));
  390. }
  391. }
  392. arch_jump_label_transform_apply();
  393. }
  394. #endif
  395. void __init jump_label_init(void)
  396. {
  397. struct jump_entry *iter_start = __start___jump_table;
  398. struct jump_entry *iter_stop = __stop___jump_table;
  399. struct static_key *key = NULL;
  400. struct jump_entry *iter;
  401. /*
  402. * Since we are initializing the static_key.enabled field with
  403. * with the 'raw' int values (to avoid pulling in atomic.h) in
  404. * jump_label.h, let's make sure that is safe. There are only two
  405. * cases to check since we initialize to 0 or 1.
  406. */
  407. BUILD_BUG_ON((int)ATOMIC_INIT(0) != 0);
  408. BUILD_BUG_ON((int)ATOMIC_INIT(1) != 1);
  409. if (static_key_initialized)
  410. return;
  411. cpus_read_lock();
  412. jump_label_lock();
  413. jump_label_sort_entries(iter_start, iter_stop);
  414. for (iter = iter_start; iter < iter_stop; iter++) {
  415. struct static_key *iterk;
  416. /* rewrite NOPs */
  417. if (jump_label_type(iter) == JUMP_LABEL_NOP)
  418. arch_jump_label_transform_static(iter, JUMP_LABEL_NOP);
  419. if (init_section_contains((void *)jump_entry_code(iter), 1))
  420. jump_entry_set_init(iter);
  421. iterk = jump_entry_key(iter);
  422. if (iterk == key)
  423. continue;
  424. key = iterk;
  425. static_key_set_entries(key, iter);
  426. }
  427. static_key_initialized = true;
  428. jump_label_unlock();
  429. cpus_read_unlock();
  430. }
  431. #ifdef CONFIG_MODULES
  432. static enum jump_label_type jump_label_init_type(struct jump_entry *entry)
  433. {
  434. struct static_key *key = jump_entry_key(entry);
  435. bool type = static_key_type(key);
  436. bool branch = jump_entry_is_branch(entry);
  437. /* See the comment in linux/jump_label.h */
  438. return type ^ branch;
  439. }
  440. struct static_key_mod {
  441. struct static_key_mod *next;
  442. struct jump_entry *entries;
  443. struct module *mod;
  444. };
  445. static inline struct static_key_mod *static_key_mod(struct static_key *key)
  446. {
  447. WARN_ON_ONCE(!static_key_linked(key));
  448. return (struct static_key_mod *)(key->type & ~JUMP_TYPE_MASK);
  449. }
  450. /***
  451. * key->type and key->next are the same via union.
  452. * This sets key->next and preserves the type bits.
  453. *
  454. * See additional comments above static_key_set_entries().
  455. */
  456. static void static_key_set_mod(struct static_key *key,
  457. struct static_key_mod *mod)
  458. {
  459. unsigned long type;
  460. WARN_ON_ONCE((unsigned long)mod & JUMP_TYPE_MASK);
  461. type = key->type & JUMP_TYPE_MASK;
  462. key->next = mod;
  463. key->type |= type;
  464. }
  465. static int __jump_label_mod_text_reserved(void *start, void *end)
  466. {
  467. struct module *mod;
  468. int ret;
  469. preempt_disable();
  470. mod = __module_text_address((unsigned long)start);
  471. WARN_ON_ONCE(__module_text_address((unsigned long)end) != mod);
  472. if (!try_module_get(mod))
  473. mod = NULL;
  474. preempt_enable();
  475. if (!mod)
  476. return 0;
  477. ret = __jump_label_text_reserved(mod->jump_entries,
  478. mod->jump_entries + mod->num_jump_entries,
  479. start, end, mod->state == MODULE_STATE_COMING);
  480. module_put(mod);
  481. return ret;
  482. }
  483. static void __jump_label_mod_update(struct static_key *key)
  484. {
  485. struct static_key_mod *mod;
  486. for (mod = static_key_mod(key); mod; mod = mod->next) {
  487. struct jump_entry *stop;
  488. struct module *m;
  489. /*
  490. * NULL if the static_key is defined in a module
  491. * that does not use it
  492. */
  493. if (!mod->entries)
  494. continue;
  495. m = mod->mod;
  496. if (!m)
  497. stop = __stop___jump_table;
  498. else
  499. stop = m->jump_entries + m->num_jump_entries;
  500. __jump_label_update(key, mod->entries, stop,
  501. m && m->state == MODULE_STATE_COMING);
  502. }
  503. }
  504. /***
  505. * apply_jump_label_nops - patch module jump labels with arch_get_jump_label_nop()
  506. * @mod: module to patch
  507. *
  508. * Allow for run-time selection of the optimal nops. Before the module
  509. * loads patch these with arch_get_jump_label_nop(), which is specified by
  510. * the arch specific jump label code.
  511. */
  512. void jump_label_apply_nops(struct module *mod)
  513. {
  514. struct jump_entry *iter_start = mod->jump_entries;
  515. struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
  516. struct jump_entry *iter;
  517. /* if the module doesn't have jump label entries, just return */
  518. if (iter_start == iter_stop)
  519. return;
  520. for (iter = iter_start; iter < iter_stop; iter++) {
  521. /* Only write NOPs for arch_branch_static(). */
  522. if (jump_label_init_type(iter) == JUMP_LABEL_NOP)
  523. arch_jump_label_transform_static(iter, JUMP_LABEL_NOP);
  524. }
  525. }
  526. static int jump_label_add_module(struct module *mod)
  527. {
  528. struct jump_entry *iter_start = mod->jump_entries;
  529. struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
  530. struct jump_entry *iter;
  531. struct static_key *key = NULL;
  532. struct static_key_mod *jlm, *jlm2;
  533. /* if the module doesn't have jump label entries, just return */
  534. if (iter_start == iter_stop)
  535. return 0;
  536. jump_label_sort_entries(iter_start, iter_stop);
  537. for (iter = iter_start; iter < iter_stop; iter++) {
  538. struct static_key *iterk;
  539. if (within_module_init(jump_entry_code(iter), mod))
  540. jump_entry_set_init(iter);
  541. iterk = jump_entry_key(iter);
  542. if (iterk == key)
  543. continue;
  544. key = iterk;
  545. if (within_module((unsigned long)key, mod)) {
  546. static_key_set_entries(key, iter);
  547. continue;
  548. }
  549. jlm = kzalloc(sizeof(struct static_key_mod), GFP_KERNEL);
  550. if (!jlm)
  551. return -ENOMEM;
  552. if (!static_key_linked(key)) {
  553. jlm2 = kzalloc(sizeof(struct static_key_mod),
  554. GFP_KERNEL);
  555. if (!jlm2) {
  556. kfree(jlm);
  557. return -ENOMEM;
  558. }
  559. preempt_disable();
  560. jlm2->mod = __module_address((unsigned long)key);
  561. preempt_enable();
  562. jlm2->entries = static_key_entries(key);
  563. jlm2->next = NULL;
  564. static_key_set_mod(key, jlm2);
  565. static_key_set_linked(key);
  566. }
  567. jlm->mod = mod;
  568. jlm->entries = iter;
  569. jlm->next = static_key_mod(key);
  570. static_key_set_mod(key, jlm);
  571. static_key_set_linked(key);
  572. /* Only update if we've changed from our initial state */
  573. if (jump_label_type(iter) != jump_label_init_type(iter))
  574. __jump_label_update(key, iter, iter_stop, true);
  575. }
  576. return 0;
  577. }
  578. static void jump_label_del_module(struct module *mod)
  579. {
  580. struct jump_entry *iter_start = mod->jump_entries;
  581. struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
  582. struct jump_entry *iter;
  583. struct static_key *key = NULL;
  584. struct static_key_mod *jlm, **prev;
  585. for (iter = iter_start; iter < iter_stop; iter++) {
  586. if (jump_entry_key(iter) == key)
  587. continue;
  588. key = jump_entry_key(iter);
  589. if (within_module((unsigned long)key, mod))
  590. continue;
  591. /* No memory during module load */
  592. if (WARN_ON(!static_key_linked(key)))
  593. continue;
  594. prev = &key->next;
  595. jlm = static_key_mod(key);
  596. while (jlm && jlm->mod != mod) {
  597. prev = &jlm->next;
  598. jlm = jlm->next;
  599. }
  600. /* No memory during module load */
  601. if (WARN_ON(!jlm))
  602. continue;
  603. if (prev == &key->next)
  604. static_key_set_mod(key, jlm->next);
  605. else
  606. *prev = jlm->next;
  607. kfree(jlm);
  608. jlm = static_key_mod(key);
  609. /* if only one etry is left, fold it back into the static_key */
  610. if (jlm->next == NULL) {
  611. static_key_set_entries(key, jlm->entries);
  612. static_key_clear_linked(key);
  613. kfree(jlm);
  614. }
  615. }
  616. }
  617. static int
  618. jump_label_module_notify(struct notifier_block *self, unsigned long val,
  619. void *data)
  620. {
  621. struct module *mod = data;
  622. int ret = 0;
  623. cpus_read_lock();
  624. jump_label_lock();
  625. switch (val) {
  626. case MODULE_STATE_COMING:
  627. ret = jump_label_add_module(mod);
  628. if (ret) {
  629. WARN(1, "Failed to allocate memory: jump_label may not work properly.\n");
  630. jump_label_del_module(mod);
  631. }
  632. break;
  633. case MODULE_STATE_GOING:
  634. jump_label_del_module(mod);
  635. break;
  636. }
  637. jump_label_unlock();
  638. cpus_read_unlock();
  639. return notifier_from_errno(ret);
  640. }
  641. static struct notifier_block jump_label_module_nb = {
  642. .notifier_call = jump_label_module_notify,
  643. .priority = 1, /* higher than tracepoints */
  644. };
  645. static __init int jump_label_init_module(void)
  646. {
  647. return register_module_notifier(&jump_label_module_nb);
  648. }
  649. early_initcall(jump_label_init_module);
  650. #endif /* CONFIG_MODULES */
  651. /***
  652. * jump_label_text_reserved - check if addr range is reserved
  653. * @start: start text addr
  654. * @end: end text addr
  655. *
  656. * checks if the text addr located between @start and @end
  657. * overlaps with any of the jump label patch addresses. Code
  658. * that wants to modify kernel text should first verify that
  659. * it does not overlap with any of the jump label addresses.
  660. * Caller must hold jump_label_mutex.
  661. *
  662. * returns 1 if there is an overlap, 0 otherwise
  663. */
  664. int jump_label_text_reserved(void *start, void *end)
  665. {
  666. bool init = system_state < SYSTEM_RUNNING;
  667. int ret = __jump_label_text_reserved(__start___jump_table,
  668. __stop___jump_table, start, end, init);
  669. if (ret)
  670. return ret;
  671. #ifdef CONFIG_MODULES
  672. ret = __jump_label_mod_text_reserved(start, end);
  673. #endif
  674. return ret;
  675. }
  676. static void jump_label_update(struct static_key *key)
  677. {
  678. struct jump_entry *stop = __stop___jump_table;
  679. struct jump_entry *entry;
  680. #ifdef CONFIG_MODULES
  681. struct module *mod;
  682. if (static_key_linked(key)) {
  683. __jump_label_mod_update(key);
  684. return;
  685. }
  686. preempt_disable();
  687. mod = __module_address((unsigned long)key);
  688. if (mod)
  689. stop = mod->jump_entries + mod->num_jump_entries;
  690. preempt_enable();
  691. #endif
  692. entry = static_key_entries(key);
  693. /* if there are no users, entry can be NULL */
  694. if (entry)
  695. __jump_label_update(key, entry, stop,
  696. system_state < SYSTEM_RUNNING);
  697. }
  698. #ifdef CONFIG_STATIC_KEYS_SELFTEST
  699. static DEFINE_STATIC_KEY_TRUE(sk_true);
  700. static DEFINE_STATIC_KEY_FALSE(sk_false);
  701. static __init int jump_label_test(void)
  702. {
  703. int i;
  704. for (i = 0; i < 2; i++) {
  705. WARN_ON(static_key_enabled(&sk_true.key) != true);
  706. WARN_ON(static_key_enabled(&sk_false.key) != false);
  707. WARN_ON(!static_branch_likely(&sk_true));
  708. WARN_ON(!static_branch_unlikely(&sk_true));
  709. WARN_ON(static_branch_likely(&sk_false));
  710. WARN_ON(static_branch_unlikely(&sk_false));
  711. static_branch_disable(&sk_true);
  712. static_branch_enable(&sk_false);
  713. WARN_ON(static_key_enabled(&sk_true.key) == true);
  714. WARN_ON(static_key_enabled(&sk_false.key) == false);
  715. WARN_ON(static_branch_likely(&sk_true));
  716. WARN_ON(static_branch_unlikely(&sk_true));
  717. WARN_ON(!static_branch_likely(&sk_false));
  718. WARN_ON(!static_branch_unlikely(&sk_false));
  719. static_branch_enable(&sk_true);
  720. static_branch_disable(&sk_false);
  721. }
  722. return 0;
  723. }
  724. early_initcall(jump_label_test);
  725. #endif /* STATIC_KEYS_SELFTEST */