mm.h 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. #ifndef _LINUX_SCHED_MM_H
  3. #define _LINUX_SCHED_MM_H
  4. #include <linux/kernel.h>
  5. #include <linux/atomic.h>
  6. #include <linux/sched.h>
  7. #include <linux/mm_types.h>
  8. #include <linux/gfp.h>
  9. #include <linux/sync_core.h>
  10. /*
  11. * Routines for handling mm_structs
  12. */
  13. extern struct mm_struct *mm_alloc(void);
  14. /**
  15. * mmgrab() - Pin a &struct mm_struct.
  16. * @mm: The &struct mm_struct to pin.
  17. *
  18. * Make sure that @mm will not get freed even after the owning task
  19. * exits. This doesn't guarantee that the associated address space
  20. * will still exist later on and mmget_not_zero() has to be used before
  21. * accessing it.
  22. *
  23. * This is a preferred way to pin @mm for a longer/unbounded amount
  24. * of time.
  25. *
  26. * Use mmdrop() to release the reference acquired by mmgrab().
  27. *
  28. * See also <Documentation/vm/active_mm.rst> for an in-depth explanation
  29. * of &mm_struct.mm_count vs &mm_struct.mm_users.
  30. */
  31. static inline void mmgrab(struct mm_struct *mm)
  32. {
  33. atomic_inc(&mm->mm_count);
  34. }
  35. extern void __mmdrop(struct mm_struct *mm);
  36. static inline void mmdrop(struct mm_struct *mm)
  37. {
  38. /*
  39. * The implicit full barrier implied by atomic_dec_and_test() is
  40. * required by the membarrier system call before returning to
  41. * user-space, after storing to rq->curr.
  42. */
  43. if (unlikely(atomic_dec_and_test(&mm->mm_count)))
  44. __mmdrop(mm);
  45. }
  46. /**
  47. * mmget() - Pin the address space associated with a &struct mm_struct.
  48. * @mm: The address space to pin.
  49. *
  50. * Make sure that the address space of the given &struct mm_struct doesn't
  51. * go away. This does not protect against parts of the address space being
  52. * modified or freed, however.
  53. *
  54. * Never use this function to pin this address space for an
  55. * unbounded/indefinite amount of time.
  56. *
  57. * Use mmput() to release the reference acquired by mmget().
  58. *
  59. * See also <Documentation/vm/active_mm.rst> for an in-depth explanation
  60. * of &mm_struct.mm_count vs &mm_struct.mm_users.
  61. */
  62. static inline void mmget(struct mm_struct *mm)
  63. {
  64. atomic_inc(&mm->mm_users);
  65. }
  66. static inline bool mmget_not_zero(struct mm_struct *mm)
  67. {
  68. return atomic_inc_not_zero(&mm->mm_users);
  69. }
  70. /* mmput gets rid of the mappings and all user-space */
  71. extern void mmput(struct mm_struct *);
  72. #ifdef CONFIG_MMU
  73. /* same as above but performs the slow path from the async context. Can
  74. * be called from the atomic context as well
  75. */
  76. void mmput_async(struct mm_struct *);
  77. #endif
  78. /* Grab a reference to a task's mm, if it is not already going away */
  79. extern struct mm_struct *get_task_mm(struct task_struct *task);
  80. /*
  81. * Grab a reference to a task's mm, if it is not already going away
  82. * and ptrace_may_access with the mode parameter passed to it
  83. * succeeds.
  84. */
  85. extern struct mm_struct *mm_access(struct task_struct *task, unsigned int mode);
  86. /* Remove the current tasks stale references to the old mm_struct on exit() */
  87. extern void exit_mm_release(struct task_struct *, struct mm_struct *);
  88. /* Remove the current tasks stale references to the old mm_struct on exec() */
  89. extern void exec_mm_release(struct task_struct *, struct mm_struct *);
  90. #ifdef CONFIG_MEMCG
  91. extern void mm_update_next_owner(struct mm_struct *mm);
  92. #else
  93. static inline void mm_update_next_owner(struct mm_struct *mm)
  94. {
  95. }
  96. #endif /* CONFIG_MEMCG */
  97. #ifdef CONFIG_MMU
  98. #ifndef arch_get_mmap_end
  99. #define arch_get_mmap_end(addr) (TASK_SIZE)
  100. #endif
  101. #ifndef arch_get_mmap_base
  102. #define arch_get_mmap_base(addr, base) (base)
  103. #endif
  104. extern void arch_pick_mmap_layout(struct mm_struct *mm,
  105. struct rlimit *rlim_stack);
  106. extern unsigned long
  107. arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
  108. unsigned long, unsigned long);
  109. extern unsigned long
  110. arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
  111. unsigned long len, unsigned long pgoff,
  112. unsigned long flags);
  113. #else
  114. static inline void arch_pick_mmap_layout(struct mm_struct *mm,
  115. struct rlimit *rlim_stack) {}
  116. #endif
  117. static inline bool in_vfork(struct task_struct *tsk)
  118. {
  119. bool ret;
  120. /*
  121. * need RCU to access ->real_parent if CLONE_VM was used along with
  122. * CLONE_PARENT.
  123. *
  124. * We check real_parent->mm == tsk->mm because CLONE_VFORK does not
  125. * imply CLONE_VM
  126. *
  127. * CLONE_VFORK can be used with CLONE_PARENT/CLONE_THREAD and thus
  128. * ->real_parent is not necessarily the task doing vfork(), so in
  129. * theory we can't rely on task_lock() if we want to dereference it.
  130. *
  131. * And in this case we can't trust the real_parent->mm == tsk->mm
  132. * check, it can be false negative. But we do not care, if init or
  133. * another oom-unkillable task does this it should blame itself.
  134. */
  135. rcu_read_lock();
  136. ret = tsk->vfork_done &&
  137. rcu_dereference(tsk->real_parent)->mm == tsk->mm;
  138. rcu_read_unlock();
  139. return ret;
  140. }
  141. /*
  142. * Applies per-task gfp context to the given allocation flags.
  143. * PF_MEMALLOC_NOIO implies GFP_NOIO
  144. * PF_MEMALLOC_NOFS implies GFP_NOFS
  145. */
  146. static inline gfp_t current_gfp_context(gfp_t flags)
  147. {
  148. unsigned int pflags = READ_ONCE(current->flags);
  149. if (unlikely(pflags & (PF_MEMALLOC_NOIO | PF_MEMALLOC_NOFS))) {
  150. /*
  151. * NOIO implies both NOIO and NOFS and it is a weaker context
  152. * so always make sure it makes precedence
  153. */
  154. if (pflags & PF_MEMALLOC_NOIO)
  155. flags &= ~(__GFP_IO | __GFP_FS);
  156. else if (pflags & PF_MEMALLOC_NOFS)
  157. flags &= ~__GFP_FS;
  158. }
  159. return flags;
  160. }
  161. #ifdef CONFIG_LOCKDEP
  162. extern void __fs_reclaim_acquire(void);
  163. extern void __fs_reclaim_release(void);
  164. extern void fs_reclaim_acquire(gfp_t gfp_mask);
  165. extern void fs_reclaim_release(gfp_t gfp_mask);
  166. #else
  167. static inline void __fs_reclaim_acquire(void) { }
  168. static inline void __fs_reclaim_release(void) { }
  169. static inline void fs_reclaim_acquire(gfp_t gfp_mask) { }
  170. static inline void fs_reclaim_release(gfp_t gfp_mask) { }
  171. #endif
  172. /**
  173. * memalloc_noio_save - Marks implicit GFP_NOIO allocation scope.
  174. *
  175. * This functions marks the beginning of the GFP_NOIO allocation scope.
  176. * All further allocations will implicitly drop __GFP_IO flag and so
  177. * they are safe for the IO critical section from the allocation recursion
  178. * point of view. Use memalloc_noio_restore to end the scope with flags
  179. * returned by this function.
  180. *
  181. * This function is safe to be used from any context.
  182. */
  183. static inline unsigned int memalloc_noio_save(void)
  184. {
  185. unsigned int flags = current->flags & PF_MEMALLOC_NOIO;
  186. current->flags |= PF_MEMALLOC_NOIO;
  187. return flags;
  188. }
  189. /**
  190. * memalloc_noio_restore - Ends the implicit GFP_NOIO scope.
  191. * @flags: Flags to restore.
  192. *
  193. * Ends the implicit GFP_NOIO scope started by memalloc_noio_save function.
  194. * Always make sure that the given flags is the return value from the
  195. * pairing memalloc_noio_save call.
  196. */
  197. static inline void memalloc_noio_restore(unsigned int flags)
  198. {
  199. current->flags = (current->flags & ~PF_MEMALLOC_NOIO) | flags;
  200. }
  201. /**
  202. * memalloc_nofs_save - Marks implicit GFP_NOFS allocation scope.
  203. *
  204. * This functions marks the beginning of the GFP_NOFS allocation scope.
  205. * All further allocations will implicitly drop __GFP_FS flag and so
  206. * they are safe for the FS critical section from the allocation recursion
  207. * point of view. Use memalloc_nofs_restore to end the scope with flags
  208. * returned by this function.
  209. *
  210. * This function is safe to be used from any context.
  211. */
  212. static inline unsigned int memalloc_nofs_save(void)
  213. {
  214. unsigned int flags = current->flags & PF_MEMALLOC_NOFS;
  215. current->flags |= PF_MEMALLOC_NOFS;
  216. return flags;
  217. }
  218. /**
  219. * memalloc_nofs_restore - Ends the implicit GFP_NOFS scope.
  220. * @flags: Flags to restore.
  221. *
  222. * Ends the implicit GFP_NOFS scope started by memalloc_nofs_save function.
  223. * Always make sure that the given flags is the return value from the
  224. * pairing memalloc_nofs_save call.
  225. */
  226. static inline void memalloc_nofs_restore(unsigned int flags)
  227. {
  228. current->flags = (current->flags & ~PF_MEMALLOC_NOFS) | flags;
  229. }
  230. static inline unsigned int memalloc_noreclaim_save(void)
  231. {
  232. unsigned int flags = current->flags & PF_MEMALLOC;
  233. current->flags |= PF_MEMALLOC;
  234. return flags;
  235. }
  236. static inline void memalloc_noreclaim_restore(unsigned int flags)
  237. {
  238. current->flags = (current->flags & ~PF_MEMALLOC) | flags;
  239. }
  240. #ifdef CONFIG_CMA
  241. static inline unsigned int memalloc_nocma_save(void)
  242. {
  243. unsigned int flags = current->flags & PF_MEMALLOC_NOCMA;
  244. current->flags |= PF_MEMALLOC_NOCMA;
  245. return flags;
  246. }
  247. static inline void memalloc_nocma_restore(unsigned int flags)
  248. {
  249. current->flags = (current->flags & ~PF_MEMALLOC_NOCMA) | flags;
  250. }
  251. #else
  252. static inline unsigned int memalloc_nocma_save(void)
  253. {
  254. return 0;
  255. }
  256. static inline void memalloc_nocma_restore(unsigned int flags)
  257. {
  258. }
  259. #endif
  260. #ifdef CONFIG_MEMCG
  261. DECLARE_PER_CPU(struct mem_cgroup *, int_active_memcg);
  262. /**
  263. * set_active_memcg - Starts the remote memcg charging scope.
  264. * @memcg: memcg to charge.
  265. *
  266. * This function marks the beginning of the remote memcg charging scope. All the
  267. * __GFP_ACCOUNT allocations till the end of the scope will be charged to the
  268. * given memcg.
  269. *
  270. * NOTE: This function can nest. Users must save the return value and
  271. * reset the previous value after their own charging scope is over.
  272. */
  273. static inline struct mem_cgroup *
  274. set_active_memcg(struct mem_cgroup *memcg)
  275. {
  276. struct mem_cgroup *old;
  277. if (in_interrupt()) {
  278. old = this_cpu_read(int_active_memcg);
  279. this_cpu_write(int_active_memcg, memcg);
  280. } else {
  281. old = current->active_memcg;
  282. current->active_memcg = memcg;
  283. }
  284. return old;
  285. }
  286. #else
  287. static inline struct mem_cgroup *
  288. set_active_memcg(struct mem_cgroup *memcg)
  289. {
  290. return NULL;
  291. }
  292. #endif
  293. #ifdef CONFIG_MEMBARRIER
  294. enum {
  295. MEMBARRIER_STATE_PRIVATE_EXPEDITED_READY = (1U << 0),
  296. MEMBARRIER_STATE_PRIVATE_EXPEDITED = (1U << 1),
  297. MEMBARRIER_STATE_GLOBAL_EXPEDITED_READY = (1U << 2),
  298. MEMBARRIER_STATE_GLOBAL_EXPEDITED = (1U << 3),
  299. MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE_READY = (1U << 4),
  300. MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE = (1U << 5),
  301. MEMBARRIER_STATE_PRIVATE_EXPEDITED_RSEQ_READY = (1U << 6),
  302. MEMBARRIER_STATE_PRIVATE_EXPEDITED_RSEQ = (1U << 7),
  303. };
  304. enum {
  305. MEMBARRIER_FLAG_SYNC_CORE = (1U << 0),
  306. MEMBARRIER_FLAG_RSEQ = (1U << 1),
  307. };
  308. #ifdef CONFIG_ARCH_HAS_MEMBARRIER_CALLBACKS
  309. #include <asm/membarrier.h>
  310. #endif
  311. static inline void membarrier_mm_sync_core_before_usermode(struct mm_struct *mm)
  312. {
  313. if (current->mm != mm)
  314. return;
  315. if (likely(!(atomic_read(&mm->membarrier_state) &
  316. MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE)))
  317. return;
  318. sync_core_before_usermode();
  319. }
  320. extern void membarrier_exec_mmap(struct mm_struct *mm);
  321. #else
  322. #ifdef CONFIG_ARCH_HAS_MEMBARRIER_CALLBACKS
  323. static inline void membarrier_arch_switch_mm(struct mm_struct *prev,
  324. struct mm_struct *next,
  325. struct task_struct *tsk)
  326. {
  327. }
  328. #endif
  329. static inline void membarrier_exec_mmap(struct mm_struct *mm)
  330. {
  331. }
  332. static inline void membarrier_mm_sync_core_before_usermode(struct mm_struct *mm)
  333. {
  334. }
  335. #endif
  336. #endif /* _LINUX_SCHED_MM_H */