mmu_notifier.c 37 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * linux/mm/mmu_notifier.c
  4. *
  5. * Copyright (C) 2008 Qumranet, Inc.
  6. * Copyright (C) 2008 SGI
  7. * Christoph Lameter <cl@linux.com>
  8. */
  9. #include <linux/rculist.h>
  10. #include <linux/mmu_notifier.h>
  11. #include <linux/export.h>
  12. #include <linux/mm.h>
  13. #include <linux/err.h>
  14. #include <linux/interval_tree.h>
  15. #include <linux/srcu.h>
  16. #include <linux/rcupdate.h>
  17. #include <linux/sched.h>
  18. #include <linux/sched/mm.h>
  19. #include <linux/slab.h>
  20. /* global SRCU for all MMs */
  21. DEFINE_STATIC_SRCU(srcu);
  22. #ifdef CONFIG_LOCKDEP
  23. struct lockdep_map __mmu_notifier_invalidate_range_start_map = {
  24. .name = "mmu_notifier_invalidate_range_start"
  25. };
  26. #endif
  27. /*
  28. * The mmu_notifier_subscriptions structure is allocated and installed in
  29. * mm->notifier_subscriptions inside the mm_take_all_locks() protected
  30. * critical section and it's released only when mm_count reaches zero
  31. * in mmdrop().
  32. */
  33. struct mmu_notifier_subscriptions {
  34. /*
  35. * WARNING: hdr should be the first member of this structure
  36. * so that it can be typecasted into mmu_notifier_subscriptions_hdr.
  37. * This is required to avoid KMI CRC breakage.
  38. */
  39. struct mmu_notifier_subscriptions_hdr hdr;
  40. /* all mmu notifiers registered in this mm are queued in this list */
  41. struct hlist_head list;
  42. bool has_itree;
  43. /* to serialize the list modifications and hlist_unhashed */
  44. spinlock_t lock;
  45. unsigned long invalidate_seq;
  46. unsigned long active_invalidate_ranges;
  47. struct rb_root_cached itree;
  48. wait_queue_head_t wq;
  49. struct hlist_head deferred_list;
  50. };
  51. /*
  52. * This is a collision-retry read-side/write-side 'lock', a lot like a
  53. * seqcount, however this allows multiple write-sides to hold it at
  54. * once. Conceptually the write side is protecting the values of the PTEs in
  55. * this mm, such that PTES cannot be read into SPTEs (shadow PTEs) while any
  56. * writer exists.
  57. *
  58. * Note that the core mm creates nested invalidate_range_start()/end() regions
  59. * within the same thread, and runs invalidate_range_start()/end() in parallel
  60. * on multiple CPUs. This is designed to not reduce concurrency or block
  61. * progress on the mm side.
  62. *
  63. * As a secondary function, holding the full write side also serves to prevent
  64. * writers for the itree, this is an optimization to avoid extra locking
  65. * during invalidate_range_start/end notifiers.
  66. *
  67. * The write side has two states, fully excluded:
  68. * - mm->active_invalidate_ranges != 0
  69. * - subscriptions->invalidate_seq & 1 == True (odd)
  70. * - some range on the mm_struct is being invalidated
  71. * - the itree is not allowed to change
  72. *
  73. * And partially excluded:
  74. * - mm->active_invalidate_ranges != 0
  75. * - subscriptions->invalidate_seq & 1 == False (even)
  76. * - some range on the mm_struct is being invalidated
  77. * - the itree is allowed to change
  78. *
  79. * Operations on notifier_subscriptions->invalidate_seq (under spinlock):
  80. * seq |= 1 # Begin writing
  81. * seq++ # Release the writing state
  82. * seq & 1 # True if a writer exists
  83. *
  84. * The later state avoids some expensive work on inv_end in the common case of
  85. * no mmu_interval_notifier monitoring the VA.
  86. */
  87. static bool
  88. mn_itree_is_invalidating(struct mmu_notifier_subscriptions *subscriptions)
  89. {
  90. lockdep_assert_held(&subscriptions->lock);
  91. return subscriptions->invalidate_seq & 1;
  92. }
  93. static struct mmu_interval_notifier *
  94. mn_itree_inv_start_range(struct mmu_notifier_subscriptions *subscriptions,
  95. const struct mmu_notifier_range *range,
  96. unsigned long *seq)
  97. {
  98. struct interval_tree_node *node;
  99. struct mmu_interval_notifier *res = NULL;
  100. spin_lock(&subscriptions->lock);
  101. subscriptions->active_invalidate_ranges++;
  102. node = interval_tree_iter_first(&subscriptions->itree, range->start,
  103. range->end - 1);
  104. if (node) {
  105. subscriptions->invalidate_seq |= 1;
  106. res = container_of(node, struct mmu_interval_notifier,
  107. interval_tree);
  108. }
  109. *seq = subscriptions->invalidate_seq;
  110. spin_unlock(&subscriptions->lock);
  111. return res;
  112. }
  113. static struct mmu_interval_notifier *
  114. mn_itree_inv_next(struct mmu_interval_notifier *interval_sub,
  115. const struct mmu_notifier_range *range)
  116. {
  117. struct interval_tree_node *node;
  118. node = interval_tree_iter_next(&interval_sub->interval_tree,
  119. range->start, range->end - 1);
  120. if (!node)
  121. return NULL;
  122. return container_of(node, struct mmu_interval_notifier, interval_tree);
  123. }
  124. static void mn_itree_inv_end(struct mmu_notifier_subscriptions *subscriptions)
  125. {
  126. struct mmu_interval_notifier *interval_sub;
  127. struct hlist_node *next;
  128. spin_lock(&subscriptions->lock);
  129. if (--subscriptions->active_invalidate_ranges ||
  130. !mn_itree_is_invalidating(subscriptions)) {
  131. spin_unlock(&subscriptions->lock);
  132. return;
  133. }
  134. /* Make invalidate_seq even */
  135. subscriptions->invalidate_seq++;
  136. /*
  137. * The inv_end incorporates a deferred mechanism like rtnl_unlock().
  138. * Adds and removes are queued until the final inv_end happens then
  139. * they are progressed. This arrangement for tree updates is used to
  140. * avoid using a blocking lock during invalidate_range_start.
  141. */
  142. hlist_for_each_entry_safe(interval_sub, next,
  143. &subscriptions->deferred_list,
  144. deferred_item) {
  145. if (RB_EMPTY_NODE(&interval_sub->interval_tree.rb))
  146. interval_tree_insert(&interval_sub->interval_tree,
  147. &subscriptions->itree);
  148. else
  149. interval_tree_remove(&interval_sub->interval_tree,
  150. &subscriptions->itree);
  151. hlist_del(&interval_sub->deferred_item);
  152. }
  153. spin_unlock(&subscriptions->lock);
  154. wake_up_all(&subscriptions->wq);
  155. }
  156. /**
  157. * mmu_interval_read_begin - Begin a read side critical section against a VA
  158. * range
  159. * @interval_sub: The interval subscription
  160. *
  161. * mmu_iterval_read_begin()/mmu_iterval_read_retry() implement a
  162. * collision-retry scheme similar to seqcount for the VA range under
  163. * subscription. If the mm invokes invalidation during the critical section
  164. * then mmu_interval_read_retry() will return true.
  165. *
  166. * This is useful to obtain shadow PTEs where teardown or setup of the SPTEs
  167. * require a blocking context. The critical region formed by this can sleep,
  168. * and the required 'user_lock' can also be a sleeping lock.
  169. *
  170. * The caller is required to provide a 'user_lock' to serialize both teardown
  171. * and setup.
  172. *
  173. * The return value should be passed to mmu_interval_read_retry().
  174. */
  175. unsigned long
  176. mmu_interval_read_begin(struct mmu_interval_notifier *interval_sub)
  177. {
  178. struct mmu_notifier_subscriptions *subscriptions =
  179. interval_sub->mm->notifier_subscriptions;
  180. unsigned long seq;
  181. bool is_invalidating;
  182. /*
  183. * If the subscription has a different seq value under the user_lock
  184. * than we started with then it has collided.
  185. *
  186. * If the subscription currently has the same seq value as the
  187. * subscriptions seq, then it is currently between
  188. * invalidate_start/end and is colliding.
  189. *
  190. * The locking looks broadly like this:
  191. * mn_tree_invalidate_start(): mmu_interval_read_begin():
  192. * spin_lock
  193. * seq = READ_ONCE(interval_sub->invalidate_seq);
  194. * seq == subs->invalidate_seq
  195. * spin_unlock
  196. * spin_lock
  197. * seq = ++subscriptions->invalidate_seq
  198. * spin_unlock
  199. * op->invalidate_range():
  200. * user_lock
  201. * mmu_interval_set_seq()
  202. * interval_sub->invalidate_seq = seq
  203. * user_unlock
  204. *
  205. * [Required: mmu_interval_read_retry() == true]
  206. *
  207. * mn_itree_inv_end():
  208. * spin_lock
  209. * seq = ++subscriptions->invalidate_seq
  210. * spin_unlock
  211. *
  212. * user_lock
  213. * mmu_interval_read_retry():
  214. * interval_sub->invalidate_seq != seq
  215. * user_unlock
  216. *
  217. * Barriers are not needed here as any races here are closed by an
  218. * eventual mmu_interval_read_retry(), which provides a barrier via the
  219. * user_lock.
  220. */
  221. spin_lock(&subscriptions->lock);
  222. /* Pairs with the WRITE_ONCE in mmu_interval_set_seq() */
  223. seq = READ_ONCE(interval_sub->invalidate_seq);
  224. is_invalidating = seq == subscriptions->invalidate_seq;
  225. spin_unlock(&subscriptions->lock);
  226. /*
  227. * interval_sub->invalidate_seq must always be set to an odd value via
  228. * mmu_interval_set_seq() using the provided cur_seq from
  229. * mn_itree_inv_start_range(). This ensures that if seq does wrap we
  230. * will always clear the below sleep in some reasonable time as
  231. * subscriptions->invalidate_seq is even in the idle state.
  232. */
  233. lock_map_acquire(&__mmu_notifier_invalidate_range_start_map);
  234. lock_map_release(&__mmu_notifier_invalidate_range_start_map);
  235. if (is_invalidating)
  236. wait_event(subscriptions->wq,
  237. READ_ONCE(subscriptions->invalidate_seq) != seq);
  238. /*
  239. * Notice that mmu_interval_read_retry() can already be true at this
  240. * point, avoiding loops here allows the caller to provide a global
  241. * time bound.
  242. */
  243. return seq;
  244. }
  245. EXPORT_SYMBOL_GPL(mmu_interval_read_begin);
  246. static void mn_itree_release(struct mmu_notifier_subscriptions *subscriptions,
  247. struct mm_struct *mm)
  248. {
  249. struct mmu_notifier_range range = {
  250. .flags = MMU_NOTIFIER_RANGE_BLOCKABLE,
  251. .event = MMU_NOTIFY_RELEASE,
  252. .mm = mm,
  253. .start = 0,
  254. .end = ULONG_MAX,
  255. };
  256. struct mmu_interval_notifier *interval_sub;
  257. unsigned long cur_seq;
  258. bool ret;
  259. for (interval_sub =
  260. mn_itree_inv_start_range(subscriptions, &range, &cur_seq);
  261. interval_sub;
  262. interval_sub = mn_itree_inv_next(interval_sub, &range)) {
  263. ret = interval_sub->ops->invalidate(interval_sub, &range,
  264. cur_seq);
  265. WARN_ON(!ret);
  266. }
  267. mn_itree_inv_end(subscriptions);
  268. }
  269. /*
  270. * This function can't run concurrently against mmu_notifier_register
  271. * because mm->mm_users > 0 during mmu_notifier_register and exit_mmap
  272. * runs with mm_users == 0. Other tasks may still invoke mmu notifiers
  273. * in parallel despite there being no task using this mm any more,
  274. * through the vmas outside of the exit_mmap context, such as with
  275. * vmtruncate. This serializes against mmu_notifier_unregister with
  276. * the notifier_subscriptions->lock in addition to SRCU and it serializes
  277. * against the other mmu notifiers with SRCU. struct mmu_notifier_subscriptions
  278. * can't go away from under us as exit_mmap holds an mm_count pin
  279. * itself.
  280. */
  281. static void mn_hlist_release(struct mmu_notifier_subscriptions *subscriptions,
  282. struct mm_struct *mm)
  283. {
  284. struct mmu_notifier *subscription;
  285. int id;
  286. /*
  287. * SRCU here will block mmu_notifier_unregister until
  288. * ->release returns.
  289. */
  290. id = srcu_read_lock(&srcu);
  291. hlist_for_each_entry_rcu(subscription, &subscriptions->list, hlist,
  292. srcu_read_lock_held(&srcu))
  293. /*
  294. * If ->release runs before mmu_notifier_unregister it must be
  295. * handled, as it's the only way for the driver to flush all
  296. * existing sptes and stop the driver from establishing any more
  297. * sptes before all the pages in the mm are freed.
  298. */
  299. if (subscription->ops->release)
  300. subscription->ops->release(subscription, mm);
  301. spin_lock(&subscriptions->lock);
  302. while (unlikely(!hlist_empty(&subscriptions->list))) {
  303. subscription = hlist_entry(subscriptions->list.first,
  304. struct mmu_notifier, hlist);
  305. /*
  306. * We arrived before mmu_notifier_unregister so
  307. * mmu_notifier_unregister will do nothing other than to wait
  308. * for ->release to finish and for mmu_notifier_unregister to
  309. * return.
  310. */
  311. hlist_del_init_rcu(&subscription->hlist);
  312. }
  313. spin_unlock(&subscriptions->lock);
  314. srcu_read_unlock(&srcu, id);
  315. /*
  316. * synchronize_srcu here prevents mmu_notifier_release from returning to
  317. * exit_mmap (which would proceed with freeing all pages in the mm)
  318. * until the ->release method returns, if it was invoked by
  319. * mmu_notifier_unregister.
  320. *
  321. * The notifier_subscriptions can't go away from under us because
  322. * one mm_count is held by exit_mmap.
  323. */
  324. synchronize_srcu(&srcu);
  325. }
  326. void __mmu_notifier_release(struct mm_struct *mm)
  327. {
  328. struct mmu_notifier_subscriptions *subscriptions =
  329. mm->notifier_subscriptions;
  330. if (subscriptions->has_itree)
  331. mn_itree_release(subscriptions, mm);
  332. if (!hlist_empty(&subscriptions->list))
  333. mn_hlist_release(subscriptions, mm);
  334. }
  335. /*
  336. * If no young bitflag is supported by the hardware, ->clear_flush_young can
  337. * unmap the address and return 1 or 0 depending if the mapping previously
  338. * existed or not.
  339. */
  340. int __mmu_notifier_clear_flush_young(struct mm_struct *mm,
  341. unsigned long start,
  342. unsigned long end)
  343. {
  344. struct mmu_notifier *subscription;
  345. int young = 0, id;
  346. id = srcu_read_lock(&srcu);
  347. hlist_for_each_entry_rcu(subscription,
  348. &mm->notifier_subscriptions->list, hlist,
  349. srcu_read_lock_held(&srcu)) {
  350. if (subscription->ops->clear_flush_young)
  351. young |= subscription->ops->clear_flush_young(
  352. subscription, mm, start, end);
  353. }
  354. srcu_read_unlock(&srcu, id);
  355. return young;
  356. }
  357. int __mmu_notifier_clear_young(struct mm_struct *mm,
  358. unsigned long start,
  359. unsigned long end)
  360. {
  361. struct mmu_notifier *subscription;
  362. int young = 0, id;
  363. id = srcu_read_lock(&srcu);
  364. hlist_for_each_entry_rcu(subscription,
  365. &mm->notifier_subscriptions->list, hlist,
  366. srcu_read_lock_held(&srcu)) {
  367. if (subscription->ops->clear_young)
  368. young |= subscription->ops->clear_young(subscription,
  369. mm, start, end);
  370. }
  371. srcu_read_unlock(&srcu, id);
  372. return young;
  373. }
  374. int __mmu_notifier_test_young(struct mm_struct *mm,
  375. unsigned long address)
  376. {
  377. struct mmu_notifier *subscription;
  378. int young = 0, id;
  379. id = srcu_read_lock(&srcu);
  380. hlist_for_each_entry_rcu(subscription,
  381. &mm->notifier_subscriptions->list, hlist,
  382. srcu_read_lock_held(&srcu)) {
  383. if (subscription->ops->test_young) {
  384. young = subscription->ops->test_young(subscription, mm,
  385. address);
  386. if (young)
  387. break;
  388. }
  389. }
  390. srcu_read_unlock(&srcu, id);
  391. return young;
  392. }
  393. void __mmu_notifier_change_pte(struct mm_struct *mm, unsigned long address,
  394. pte_t pte)
  395. {
  396. struct mmu_notifier *subscription;
  397. int id;
  398. id = srcu_read_lock(&srcu);
  399. hlist_for_each_entry_rcu(subscription,
  400. &mm->notifier_subscriptions->list, hlist,
  401. srcu_read_lock_held(&srcu)) {
  402. if (subscription->ops->change_pte)
  403. subscription->ops->change_pte(subscription, mm, address,
  404. pte);
  405. }
  406. srcu_read_unlock(&srcu, id);
  407. }
  408. static int mn_itree_invalidate(struct mmu_notifier_subscriptions *subscriptions,
  409. const struct mmu_notifier_range *range)
  410. {
  411. struct mmu_interval_notifier *interval_sub;
  412. unsigned long cur_seq;
  413. for (interval_sub =
  414. mn_itree_inv_start_range(subscriptions, range, &cur_seq);
  415. interval_sub;
  416. interval_sub = mn_itree_inv_next(interval_sub, range)) {
  417. bool ret;
  418. ret = interval_sub->ops->invalidate(interval_sub, range,
  419. cur_seq);
  420. if (!ret) {
  421. if (WARN_ON(mmu_notifier_range_blockable(range)))
  422. continue;
  423. goto out_would_block;
  424. }
  425. }
  426. return 0;
  427. out_would_block:
  428. /*
  429. * On -EAGAIN the non-blocking caller is not allowed to call
  430. * invalidate_range_end()
  431. */
  432. mn_itree_inv_end(subscriptions);
  433. return -EAGAIN;
  434. }
  435. static int mn_hlist_invalidate_range_start(
  436. struct mmu_notifier_subscriptions *subscriptions,
  437. struct mmu_notifier_range *range)
  438. {
  439. struct mmu_notifier *subscription;
  440. int ret = 0;
  441. int id;
  442. id = srcu_read_lock(&srcu);
  443. hlist_for_each_entry_rcu(subscription, &subscriptions->list, hlist,
  444. srcu_read_lock_held(&srcu)) {
  445. const struct mmu_notifier_ops *ops = subscription->ops;
  446. if (ops->invalidate_range_start) {
  447. int _ret;
  448. if (!mmu_notifier_range_blockable(range))
  449. non_block_start();
  450. _ret = ops->invalidate_range_start(subscription, range);
  451. if (!mmu_notifier_range_blockable(range))
  452. non_block_end();
  453. if (_ret) {
  454. pr_info("%pS callback failed with %d in %sblockable context.\n",
  455. ops->invalidate_range_start, _ret,
  456. !mmu_notifier_range_blockable(range) ?
  457. "non-" :
  458. "");
  459. WARN_ON(mmu_notifier_range_blockable(range) ||
  460. _ret != -EAGAIN);
  461. /*
  462. * We call all the notifiers on any EAGAIN,
  463. * there is no way for a notifier to know if
  464. * its start method failed, thus a start that
  465. * does EAGAIN can't also do end.
  466. */
  467. WARN_ON(ops->invalidate_range_end);
  468. ret = _ret;
  469. }
  470. }
  471. }
  472. if (ret) {
  473. /*
  474. * Must be non-blocking to get here. If there are multiple
  475. * notifiers and one or more failed start, any that succeeded
  476. * start are expecting their end to be called. Do so now.
  477. */
  478. hlist_for_each_entry_rcu(subscription, &subscriptions->list,
  479. hlist, srcu_read_lock_held(&srcu)) {
  480. if (!subscription->ops->invalidate_range_end)
  481. continue;
  482. subscription->ops->invalidate_range_end(subscription,
  483. range);
  484. }
  485. }
  486. srcu_read_unlock(&srcu, id);
  487. return ret;
  488. }
  489. int __mmu_notifier_invalidate_range_start(struct mmu_notifier_range *range)
  490. {
  491. struct mmu_notifier_subscriptions *subscriptions =
  492. range->mm->notifier_subscriptions;
  493. int ret;
  494. if (subscriptions->has_itree) {
  495. ret = mn_itree_invalidate(subscriptions, range);
  496. if (ret)
  497. return ret;
  498. }
  499. if (!hlist_empty(&subscriptions->list))
  500. return mn_hlist_invalidate_range_start(subscriptions, range);
  501. return 0;
  502. }
  503. static void
  504. mn_hlist_invalidate_end(struct mmu_notifier_subscriptions *subscriptions,
  505. struct mmu_notifier_range *range, bool only_end)
  506. {
  507. struct mmu_notifier *subscription;
  508. int id;
  509. id = srcu_read_lock(&srcu);
  510. hlist_for_each_entry_rcu(subscription, &subscriptions->list, hlist,
  511. srcu_read_lock_held(&srcu)) {
  512. /*
  513. * Call invalidate_range here too to avoid the need for the
  514. * subsystem of having to register an invalidate_range_end
  515. * call-back when there is invalidate_range already. Usually a
  516. * subsystem registers either invalidate_range_start()/end() or
  517. * invalidate_range(), so this will be no additional overhead
  518. * (besides the pointer check).
  519. *
  520. * We skip call to invalidate_range() if we know it is safe ie
  521. * call site use mmu_notifier_invalidate_range_only_end() which
  522. * is safe to do when we know that a call to invalidate_range()
  523. * already happen under page table lock.
  524. */
  525. if (!only_end && subscription->ops->invalidate_range)
  526. subscription->ops->invalidate_range(subscription,
  527. range->mm,
  528. range->start,
  529. range->end);
  530. if (subscription->ops->invalidate_range_end) {
  531. if (!mmu_notifier_range_blockable(range))
  532. non_block_start();
  533. subscription->ops->invalidate_range_end(subscription,
  534. range);
  535. if (!mmu_notifier_range_blockable(range))
  536. non_block_end();
  537. }
  538. }
  539. srcu_read_unlock(&srcu, id);
  540. }
  541. void __mmu_notifier_invalidate_range_end(struct mmu_notifier_range *range,
  542. bool only_end)
  543. {
  544. struct mmu_notifier_subscriptions *subscriptions =
  545. range->mm->notifier_subscriptions;
  546. lock_map_acquire(&__mmu_notifier_invalidate_range_start_map);
  547. if (subscriptions->has_itree)
  548. mn_itree_inv_end(subscriptions);
  549. if (!hlist_empty(&subscriptions->list))
  550. mn_hlist_invalidate_end(subscriptions, range, only_end);
  551. lock_map_release(&__mmu_notifier_invalidate_range_start_map);
  552. }
  553. void __mmu_notifier_invalidate_range(struct mm_struct *mm,
  554. unsigned long start, unsigned long end)
  555. {
  556. struct mmu_notifier *subscription;
  557. int id;
  558. id = srcu_read_lock(&srcu);
  559. hlist_for_each_entry_rcu(subscription,
  560. &mm->notifier_subscriptions->list, hlist,
  561. srcu_read_lock_held(&srcu)) {
  562. if (subscription->ops->invalidate_range)
  563. subscription->ops->invalidate_range(subscription, mm,
  564. start, end);
  565. }
  566. srcu_read_unlock(&srcu, id);
  567. }
  568. #ifdef CONFIG_SPECULATIVE_PAGE_FAULT
  569. static inline void mmu_notifier_write_lock(struct mm_struct *mm)
  570. {
  571. percpu_down_write(
  572. &mm->notifier_subscriptions->hdr.mmu_notifier_lock->rw_sem);
  573. }
  574. static inline void mmu_notifier_write_unlock(struct mm_struct *mm)
  575. {
  576. percpu_up_write(
  577. &mm->notifier_subscriptions->hdr.mmu_notifier_lock->rw_sem);
  578. }
  579. #else /* CONFIG_SPECULATIVE_PAGE_FAULT */
  580. static inline void mmu_notifier_write_lock(struct mm_struct *mm) {}
  581. static inline void mmu_notifier_write_unlock(struct mm_struct *mm) {}
  582. #endif /* CONFIG_SPECULATIVE_PAGE_FAULT */
  583. static void init_subscriptions(struct mmu_notifier_subscriptions *subscriptions)
  584. {
  585. INIT_HLIST_HEAD(&subscriptions->list);
  586. spin_lock_init(&subscriptions->lock);
  587. subscriptions->invalidate_seq = 2;
  588. subscriptions->itree = RB_ROOT_CACHED;
  589. init_waitqueue_head(&subscriptions->wq);
  590. INIT_HLIST_HEAD(&subscriptions->deferred_list);
  591. }
  592. /*
  593. * Same as mmu_notifier_register but here the caller must hold the mmap_lock in
  594. * write mode. A NULL mn signals the notifier is being registered for itree
  595. * mode.
  596. */
  597. int __mmu_notifier_register(struct mmu_notifier *subscription,
  598. struct mm_struct *mm)
  599. {
  600. struct mmu_notifier_subscriptions *subscriptions = NULL;
  601. int ret;
  602. mmap_assert_write_locked(mm);
  603. BUG_ON(atomic_read(&mm->mm_users) <= 0);
  604. if (IS_ENABLED(CONFIG_LOCKDEP)) {
  605. fs_reclaim_acquire(GFP_KERNEL);
  606. lock_map_acquire(&__mmu_notifier_invalidate_range_start_map);
  607. lock_map_release(&__mmu_notifier_invalidate_range_start_map);
  608. fs_reclaim_release(GFP_KERNEL);
  609. }
  610. if (!mm->notifier_subscriptions) {
  611. /*
  612. * kmalloc cannot be called under mm_take_all_locks(), but we
  613. * know that mm->notifier_subscriptions can't change while we
  614. * hold the write side of the mmap_lock.
  615. */
  616. subscriptions = kzalloc(
  617. sizeof(struct mmu_notifier_subscriptions), GFP_KERNEL);
  618. if (!subscriptions)
  619. return -ENOMEM;
  620. init_subscriptions(subscriptions);
  621. }
  622. mmu_notifier_write_lock(mm);
  623. ret = mm_take_all_locks(mm);
  624. if (unlikely(ret)) {
  625. mmu_notifier_write_unlock(mm);
  626. goto out_clean;
  627. }
  628. /*
  629. * Serialize the update against mmu_notifier_unregister. A
  630. * side note: mmu_notifier_release can't run concurrently with
  631. * us because we hold the mm_users pin (either implicitly as
  632. * current->mm or explicitly with get_task_mm() or similar).
  633. * We can't race against any other mmu notifier method either
  634. * thanks to mm_take_all_locks().
  635. *
  636. * release semantics on the initialization of the
  637. * mmu_notifier_subscriptions's contents are provided for unlocked
  638. * readers. acquire can only be used while holding the mmgrab or
  639. * mmget, and is safe because once created the
  640. * mmu_notifier_subscriptions is not freed until the mm is destroyed.
  641. * As above, users holding the mmap_lock or one of the
  642. * mm_take_all_locks() do not need to use acquire semantics.
  643. */
  644. if (subscriptions)
  645. smp_store_release(&mm->notifier_subscriptions, subscriptions);
  646. mm->notifier_subscriptions->hdr.valid = true;
  647. if (subscription) {
  648. /* Pairs with the mmdrop in mmu_notifier_unregister_* */
  649. mmgrab(mm);
  650. subscription->mm = mm;
  651. subscription->users = 1;
  652. spin_lock(&mm->notifier_subscriptions->lock);
  653. hlist_add_head_rcu(&subscription->hlist,
  654. &mm->notifier_subscriptions->list);
  655. spin_unlock(&mm->notifier_subscriptions->lock);
  656. } else
  657. mm->notifier_subscriptions->has_itree = true;
  658. mm_drop_all_locks(mm);
  659. mmu_notifier_write_unlock(mm);
  660. BUG_ON(atomic_read(&mm->mm_users) <= 0);
  661. return 0;
  662. out_clean:
  663. kfree(subscriptions);
  664. return ret;
  665. }
  666. EXPORT_SYMBOL_GPL(__mmu_notifier_register);
  667. /**
  668. * mmu_notifier_register - Register a notifier on a mm
  669. * @subscription: The notifier to attach
  670. * @mm: The mm to attach the notifier to
  671. *
  672. * Must not hold mmap_lock nor any other VM related lock when calling
  673. * this registration function. Must also ensure mm_users can't go down
  674. * to zero while this runs to avoid races with mmu_notifier_release,
  675. * so mm has to be current->mm or the mm should be pinned safely such
  676. * as with get_task_mm(). If the mm is not current->mm, the mm_users
  677. * pin should be released by calling mmput after mmu_notifier_register
  678. * returns.
  679. *
  680. * mmu_notifier_unregister() or mmu_notifier_put() must be always called to
  681. * unregister the notifier.
  682. *
  683. * While the caller has a mmu_notifier get the subscription->mm pointer will remain
  684. * valid, and can be converted to an active mm pointer via mmget_not_zero().
  685. */
  686. int mmu_notifier_register(struct mmu_notifier *subscription,
  687. struct mm_struct *mm)
  688. {
  689. int ret;
  690. mmap_write_lock(mm);
  691. ret = __mmu_notifier_register(subscription, mm);
  692. mmap_write_unlock(mm);
  693. return ret;
  694. }
  695. EXPORT_SYMBOL_GPL(mmu_notifier_register);
  696. static struct mmu_notifier *
  697. find_get_mmu_notifier(struct mm_struct *mm, const struct mmu_notifier_ops *ops)
  698. {
  699. struct mmu_notifier *subscription;
  700. spin_lock(&mm->notifier_subscriptions->lock);
  701. hlist_for_each_entry_rcu(subscription,
  702. &mm->notifier_subscriptions->list, hlist,
  703. lockdep_is_held(&mm->notifier_subscriptions->lock)) {
  704. if (subscription->ops != ops)
  705. continue;
  706. if (likely(subscription->users != UINT_MAX))
  707. subscription->users++;
  708. else
  709. subscription = ERR_PTR(-EOVERFLOW);
  710. spin_unlock(&mm->notifier_subscriptions->lock);
  711. return subscription;
  712. }
  713. spin_unlock(&mm->notifier_subscriptions->lock);
  714. return NULL;
  715. }
  716. /**
  717. * mmu_notifier_get_locked - Return the single struct mmu_notifier for
  718. * the mm & ops
  719. * @ops: The operations struct being subscribe with
  720. * @mm : The mm to attach notifiers too
  721. *
  722. * This function either allocates a new mmu_notifier via
  723. * ops->alloc_notifier(), or returns an already existing notifier on the
  724. * list. The value of the ops pointer is used to determine when two notifiers
  725. * are the same.
  726. *
  727. * Each call to mmu_notifier_get() must be paired with a call to
  728. * mmu_notifier_put(). The caller must hold the write side of mm->mmap_lock.
  729. *
  730. * While the caller has a mmu_notifier get the mm pointer will remain valid,
  731. * and can be converted to an active mm pointer via mmget_not_zero().
  732. */
  733. struct mmu_notifier *mmu_notifier_get_locked(const struct mmu_notifier_ops *ops,
  734. struct mm_struct *mm)
  735. {
  736. struct mmu_notifier *subscription;
  737. int ret;
  738. mmap_assert_write_locked(mm);
  739. if (mm->notifier_subscriptions) {
  740. subscription = find_get_mmu_notifier(mm, ops);
  741. if (subscription)
  742. return subscription;
  743. }
  744. subscription = ops->alloc_notifier(mm);
  745. if (IS_ERR(subscription))
  746. return subscription;
  747. subscription->ops = ops;
  748. ret = __mmu_notifier_register(subscription, mm);
  749. if (ret)
  750. goto out_free;
  751. return subscription;
  752. out_free:
  753. subscription->ops->free_notifier(subscription);
  754. return ERR_PTR(ret);
  755. }
  756. EXPORT_SYMBOL_GPL(mmu_notifier_get_locked);
  757. /* this is called after the last mmu_notifier_unregister() returned */
  758. void __mmu_notifier_subscriptions_destroy(struct mm_struct *mm)
  759. {
  760. BUG_ON(!hlist_empty(&mm->notifier_subscriptions->list));
  761. kfree(mm->notifier_subscriptions);
  762. mm->notifier_subscriptions = LIST_POISON1; /* debug */
  763. }
  764. /*
  765. * This releases the mm_count pin automatically and frees the mm
  766. * structure if it was the last user of it. It serializes against
  767. * running mmu notifiers with SRCU and against mmu_notifier_unregister
  768. * with the unregister lock + SRCU. All sptes must be dropped before
  769. * calling mmu_notifier_unregister. ->release or any other notifier
  770. * method may be invoked concurrently with mmu_notifier_unregister,
  771. * and only after mmu_notifier_unregister returned we're guaranteed
  772. * that ->release or any other method can't run anymore.
  773. */
  774. void mmu_notifier_unregister(struct mmu_notifier *subscription,
  775. struct mm_struct *mm)
  776. {
  777. BUG_ON(atomic_read(&mm->mm_count) <= 0);
  778. if (!hlist_unhashed(&subscription->hlist)) {
  779. /*
  780. * SRCU here will force exit_mmap to wait for ->release to
  781. * finish before freeing the pages.
  782. */
  783. int id;
  784. id = srcu_read_lock(&srcu);
  785. /*
  786. * exit_mmap will block in mmu_notifier_release to guarantee
  787. * that ->release is called before freeing the pages.
  788. */
  789. if (subscription->ops->release)
  790. subscription->ops->release(subscription, mm);
  791. srcu_read_unlock(&srcu, id);
  792. spin_lock(&mm->notifier_subscriptions->lock);
  793. /*
  794. * Can not use list_del_rcu() since __mmu_notifier_release
  795. * can delete it before we hold the lock.
  796. */
  797. hlist_del_init_rcu(&subscription->hlist);
  798. spin_unlock(&mm->notifier_subscriptions->lock);
  799. }
  800. /*
  801. * Wait for any running method to finish, of course including
  802. * ->release if it was run by mmu_notifier_release instead of us.
  803. */
  804. synchronize_srcu(&srcu);
  805. BUG_ON(atomic_read(&mm->mm_count) <= 0);
  806. mmdrop(mm);
  807. }
  808. EXPORT_SYMBOL_GPL(mmu_notifier_unregister);
  809. static void mmu_notifier_free_rcu(struct rcu_head *rcu)
  810. {
  811. struct mmu_notifier *subscription =
  812. container_of(rcu, struct mmu_notifier, rcu);
  813. struct mm_struct *mm = subscription->mm;
  814. subscription->ops->free_notifier(subscription);
  815. /* Pairs with the get in __mmu_notifier_register() */
  816. mmdrop(mm);
  817. }
  818. /**
  819. * mmu_notifier_put - Release the reference on the notifier
  820. * @subscription: The notifier to act on
  821. *
  822. * This function must be paired with each mmu_notifier_get(), it releases the
  823. * reference obtained by the get. If this is the last reference then process
  824. * to free the notifier will be run asynchronously.
  825. *
  826. * Unlike mmu_notifier_unregister() the get/put flow only calls ops->release
  827. * when the mm_struct is destroyed. Instead free_notifier is always called to
  828. * release any resources held by the user.
  829. *
  830. * As ops->release is not guaranteed to be called, the user must ensure that
  831. * all sptes are dropped, and no new sptes can be established before
  832. * mmu_notifier_put() is called.
  833. *
  834. * This function can be called from the ops->release callback, however the
  835. * caller must still ensure it is called pairwise with mmu_notifier_get().
  836. *
  837. * Modules calling this function must call mmu_notifier_synchronize() in
  838. * their __exit functions to ensure the async work is completed.
  839. */
  840. void mmu_notifier_put(struct mmu_notifier *subscription)
  841. {
  842. struct mm_struct *mm = subscription->mm;
  843. spin_lock(&mm->notifier_subscriptions->lock);
  844. if (WARN_ON(!subscription->users) || --subscription->users)
  845. goto out_unlock;
  846. hlist_del_init_rcu(&subscription->hlist);
  847. spin_unlock(&mm->notifier_subscriptions->lock);
  848. call_srcu(&srcu, &subscription->rcu, mmu_notifier_free_rcu);
  849. return;
  850. out_unlock:
  851. spin_unlock(&mm->notifier_subscriptions->lock);
  852. }
  853. EXPORT_SYMBOL_GPL(mmu_notifier_put);
  854. static int __mmu_interval_notifier_insert(
  855. struct mmu_interval_notifier *interval_sub, struct mm_struct *mm,
  856. struct mmu_notifier_subscriptions *subscriptions, unsigned long start,
  857. unsigned long length, const struct mmu_interval_notifier_ops *ops)
  858. {
  859. interval_sub->mm = mm;
  860. interval_sub->ops = ops;
  861. RB_CLEAR_NODE(&interval_sub->interval_tree.rb);
  862. interval_sub->interval_tree.start = start;
  863. /*
  864. * Note that the representation of the intervals in the interval tree
  865. * considers the ending point as contained in the interval.
  866. */
  867. if (length == 0 ||
  868. check_add_overflow(start, length - 1,
  869. &interval_sub->interval_tree.last))
  870. return -EOVERFLOW;
  871. /* Must call with a mmget() held */
  872. if (WARN_ON(atomic_read(&mm->mm_users) <= 0))
  873. return -EINVAL;
  874. /* pairs with mmdrop in mmu_interval_notifier_remove() */
  875. mmgrab(mm);
  876. /*
  877. * If some invalidate_range_start/end region is going on in parallel
  878. * we don't know what VA ranges are affected, so we must assume this
  879. * new range is included.
  880. *
  881. * If the itree is invalidating then we are not allowed to change
  882. * it. Retrying until invalidation is done is tricky due to the
  883. * possibility for live lock, instead defer the add to
  884. * mn_itree_inv_end() so this algorithm is deterministic.
  885. *
  886. * In all cases the value for the interval_sub->invalidate_seq should be
  887. * odd, see mmu_interval_read_begin()
  888. */
  889. spin_lock(&subscriptions->lock);
  890. if (subscriptions->active_invalidate_ranges) {
  891. if (mn_itree_is_invalidating(subscriptions))
  892. hlist_add_head(&interval_sub->deferred_item,
  893. &subscriptions->deferred_list);
  894. else {
  895. subscriptions->invalidate_seq |= 1;
  896. interval_tree_insert(&interval_sub->interval_tree,
  897. &subscriptions->itree);
  898. }
  899. interval_sub->invalidate_seq = subscriptions->invalidate_seq;
  900. } else {
  901. WARN_ON(mn_itree_is_invalidating(subscriptions));
  902. /*
  903. * The starting seq for a subscription not under invalidation
  904. * should be odd, not equal to the current invalidate_seq and
  905. * invalidate_seq should not 'wrap' to the new seq any time
  906. * soon.
  907. */
  908. interval_sub->invalidate_seq =
  909. subscriptions->invalidate_seq - 1;
  910. interval_tree_insert(&interval_sub->interval_tree,
  911. &subscriptions->itree);
  912. }
  913. spin_unlock(&subscriptions->lock);
  914. return 0;
  915. }
  916. /**
  917. * mmu_interval_notifier_insert - Insert an interval notifier
  918. * @interval_sub: Interval subscription to register
  919. * @start: Starting virtual address to monitor
  920. * @length: Length of the range to monitor
  921. * @mm: mm_struct to attach to
  922. * @ops: Interval notifier operations to be called on matching events
  923. *
  924. * This function subscribes the interval notifier for notifications from the
  925. * mm. Upon return the ops related to mmu_interval_notifier will be called
  926. * whenever an event that intersects with the given range occurs.
  927. *
  928. * Upon return the range_notifier may not be present in the interval tree yet.
  929. * The caller must use the normal interval notifier read flow via
  930. * mmu_interval_read_begin() to establish SPTEs for this range.
  931. */
  932. int mmu_interval_notifier_insert(struct mmu_interval_notifier *interval_sub,
  933. struct mm_struct *mm, unsigned long start,
  934. unsigned long length,
  935. const struct mmu_interval_notifier_ops *ops)
  936. {
  937. struct mmu_notifier_subscriptions *subscriptions;
  938. int ret;
  939. might_lock(&mm->mmap_lock);
  940. subscriptions = smp_load_acquire(&mm->notifier_subscriptions);
  941. if (!subscriptions || !subscriptions->has_itree) {
  942. ret = mmu_notifier_register(NULL, mm);
  943. if (ret)
  944. return ret;
  945. subscriptions = mm->notifier_subscriptions;
  946. }
  947. return __mmu_interval_notifier_insert(interval_sub, mm, subscriptions,
  948. start, length, ops);
  949. }
  950. EXPORT_SYMBOL_GPL(mmu_interval_notifier_insert);
  951. int mmu_interval_notifier_insert_locked(
  952. struct mmu_interval_notifier *interval_sub, struct mm_struct *mm,
  953. unsigned long start, unsigned long length,
  954. const struct mmu_interval_notifier_ops *ops)
  955. {
  956. struct mmu_notifier_subscriptions *subscriptions =
  957. mm->notifier_subscriptions;
  958. int ret;
  959. mmap_assert_write_locked(mm);
  960. if (!subscriptions || !subscriptions->has_itree) {
  961. ret = __mmu_notifier_register(NULL, mm);
  962. if (ret)
  963. return ret;
  964. subscriptions = mm->notifier_subscriptions;
  965. }
  966. return __mmu_interval_notifier_insert(interval_sub, mm, subscriptions,
  967. start, length, ops);
  968. }
  969. EXPORT_SYMBOL_GPL(mmu_interval_notifier_insert_locked);
  970. static bool
  971. mmu_interval_seq_released(struct mmu_notifier_subscriptions *subscriptions,
  972. unsigned long seq)
  973. {
  974. bool ret;
  975. spin_lock(&subscriptions->lock);
  976. ret = subscriptions->invalidate_seq != seq;
  977. spin_unlock(&subscriptions->lock);
  978. return ret;
  979. }
  980. /**
  981. * mmu_interval_notifier_remove - Remove a interval notifier
  982. * @interval_sub: Interval subscription to unregister
  983. *
  984. * This function must be paired with mmu_interval_notifier_insert(). It cannot
  985. * be called from any ops callback.
  986. *
  987. * Once this returns ops callbacks are no longer running on other CPUs and
  988. * will not be called in future.
  989. */
  990. void mmu_interval_notifier_remove(struct mmu_interval_notifier *interval_sub)
  991. {
  992. struct mm_struct *mm = interval_sub->mm;
  993. struct mmu_notifier_subscriptions *subscriptions =
  994. mm->notifier_subscriptions;
  995. unsigned long seq = 0;
  996. might_sleep();
  997. spin_lock(&subscriptions->lock);
  998. if (mn_itree_is_invalidating(subscriptions)) {
  999. /*
  1000. * remove is being called after insert put this on the
  1001. * deferred list, but before the deferred list was processed.
  1002. */
  1003. if (RB_EMPTY_NODE(&interval_sub->interval_tree.rb)) {
  1004. hlist_del(&interval_sub->deferred_item);
  1005. } else {
  1006. hlist_add_head(&interval_sub->deferred_item,
  1007. &subscriptions->deferred_list);
  1008. seq = subscriptions->invalidate_seq;
  1009. }
  1010. } else {
  1011. WARN_ON(RB_EMPTY_NODE(&interval_sub->interval_tree.rb));
  1012. interval_tree_remove(&interval_sub->interval_tree,
  1013. &subscriptions->itree);
  1014. }
  1015. spin_unlock(&subscriptions->lock);
  1016. /*
  1017. * The possible sleep on progress in the invalidation requires the
  1018. * caller not hold any locks held by invalidation callbacks.
  1019. */
  1020. lock_map_acquire(&__mmu_notifier_invalidate_range_start_map);
  1021. lock_map_release(&__mmu_notifier_invalidate_range_start_map);
  1022. if (seq)
  1023. wait_event(subscriptions->wq,
  1024. mmu_interval_seq_released(subscriptions, seq));
  1025. /* pairs with mmgrab in mmu_interval_notifier_insert() */
  1026. mmdrop(mm);
  1027. }
  1028. EXPORT_SYMBOL_GPL(mmu_interval_notifier_remove);
  1029. /**
  1030. * mmu_notifier_synchronize - Ensure all mmu_notifiers are freed
  1031. *
  1032. * This function ensures that all outstanding async SRU work from
  1033. * mmu_notifier_put() is completed. After it returns any mmu_notifier_ops
  1034. * associated with an unused mmu_notifier will no longer be called.
  1035. *
  1036. * Before using the caller must ensure that all of its mmu_notifiers have been
  1037. * fully released via mmu_notifier_put().
  1038. *
  1039. * Modules using the mmu_notifier_put() API should call this in their __exit
  1040. * function to avoid module unloading races.
  1041. */
  1042. void mmu_notifier_synchronize(void)
  1043. {
  1044. synchronize_srcu(&srcu);
  1045. }
  1046. EXPORT_SYMBOL_GPL(mmu_notifier_synchronize);
  1047. bool
  1048. mmu_notifier_range_update_to_read_only(const struct mmu_notifier_range *range)
  1049. {
  1050. if (!range->vma || range->event != MMU_NOTIFY_PROTECTION_VMA)
  1051. return false;
  1052. /* Return true if the vma still have the read flag set. */
  1053. return range->vma->vm_flags & VM_READ;
  1054. }
  1055. EXPORT_SYMBOL_GPL(mmu_notifier_range_update_to_read_only);
  1056. #ifdef CONFIG_SPECULATIVE_PAGE_FAULT
  1057. bool mmu_notifier_subscriptions_init(struct mm_struct *mm)
  1058. {
  1059. struct mmu_notifier_subscriptions *subscriptions;
  1060. struct percpu_rw_semaphore_atomic *sem;
  1061. subscriptions = kzalloc(
  1062. sizeof(struct mmu_notifier_subscriptions), GFP_KERNEL);
  1063. if (!subscriptions)
  1064. return false;
  1065. sem = kzalloc(sizeof(struct percpu_rw_semaphore_atomic), GFP_KERNEL);
  1066. if (!sem) {
  1067. kfree(subscriptions);
  1068. return false;
  1069. }
  1070. percpu_init_rwsem(&sem->rw_sem);
  1071. init_subscriptions(subscriptions);
  1072. subscriptions->has_itree = true;
  1073. subscriptions->hdr.valid = false;
  1074. subscriptions->hdr.mmu_notifier_lock = sem;
  1075. mm->notifier_subscriptions = subscriptions;
  1076. return true;
  1077. }
  1078. void mmu_notifier_subscriptions_destroy(struct mm_struct *mm)
  1079. {
  1080. percpu_rwsem_async_destroy(
  1081. mm->notifier_subscriptions->hdr.mmu_notifier_lock);
  1082. kfree(mm->notifier_subscriptions);
  1083. mm->notifier_subscriptions = NULL;
  1084. }
  1085. #endif /* CONFIG_SPECULATIVE_PAGE_FAULT */