membarrier.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * Copyright (C) 2010-2017 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
  4. *
  5. * membarrier system call
  6. */
  7. #include "sched.h"
  8. /*
  9. * Bitmask made from a "or" of all commands within enum membarrier_cmd,
  10. * except MEMBARRIER_CMD_QUERY.
  11. */
  12. #ifdef CONFIG_ARCH_HAS_MEMBARRIER_SYNC_CORE
  13. #define MEMBARRIER_PRIVATE_EXPEDITED_SYNC_CORE_BITMASK \
  14. (MEMBARRIER_CMD_PRIVATE_EXPEDITED_SYNC_CORE \
  15. | MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED_SYNC_CORE)
  16. #else
  17. #define MEMBARRIER_PRIVATE_EXPEDITED_SYNC_CORE_BITMASK 0
  18. #endif
  19. #ifdef CONFIG_RSEQ
  20. #define MEMBARRIER_PRIVATE_EXPEDITED_RSEQ_BITMASK \
  21. (MEMBARRIER_CMD_PRIVATE_EXPEDITED_RSEQ \
  22. | MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED_RSEQ)
  23. #else
  24. #define MEMBARRIER_PRIVATE_EXPEDITED_RSEQ_BITMASK 0
  25. #endif
  26. #define MEMBARRIER_CMD_BITMASK \
  27. (MEMBARRIER_CMD_GLOBAL | MEMBARRIER_CMD_GLOBAL_EXPEDITED \
  28. | MEMBARRIER_CMD_REGISTER_GLOBAL_EXPEDITED \
  29. | MEMBARRIER_CMD_PRIVATE_EXPEDITED \
  30. | MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED \
  31. | MEMBARRIER_PRIVATE_EXPEDITED_SYNC_CORE_BITMASK \
  32. | MEMBARRIER_PRIVATE_EXPEDITED_RSEQ_BITMASK)
  33. static void ipi_mb(void *info)
  34. {
  35. smp_mb(); /* IPIs should be serializing but paranoid. */
  36. }
  37. static void ipi_sync_core(void *info)
  38. {
  39. /*
  40. * The smp_mb() in membarrier after all the IPIs is supposed to
  41. * ensure that memory on remote CPUs that occur before the IPI
  42. * become visible to membarrier()'s caller -- see scenario B in
  43. * the big comment at the top of this file.
  44. *
  45. * A sync_core() would provide this guarantee, but
  46. * sync_core_before_usermode() might end up being deferred until
  47. * after membarrier()'s smp_mb().
  48. */
  49. smp_mb(); /* IPIs should be serializing but paranoid. */
  50. sync_core_before_usermode();
  51. }
  52. static void ipi_rseq(void *info)
  53. {
  54. /*
  55. * Ensure that all stores done by the calling thread are visible
  56. * to the current task before the current task resumes. We could
  57. * probably optimize this away on most architectures, but by the
  58. * time we've already sent an IPI, the cost of the extra smp_mb()
  59. * is negligible.
  60. */
  61. smp_mb();
  62. rseq_preempt(current);
  63. }
  64. static void ipi_sync_rq_state(void *info)
  65. {
  66. struct mm_struct *mm = (struct mm_struct *) info;
  67. if (current->mm != mm)
  68. return;
  69. this_cpu_write(runqueues.membarrier_state,
  70. atomic_read(&mm->membarrier_state));
  71. /*
  72. * Issue a memory barrier after setting
  73. * MEMBARRIER_STATE_GLOBAL_EXPEDITED in the current runqueue to
  74. * guarantee that no memory access following registration is reordered
  75. * before registration.
  76. */
  77. smp_mb();
  78. }
  79. void membarrier_exec_mmap(struct mm_struct *mm)
  80. {
  81. /*
  82. * Issue a memory barrier before clearing membarrier_state to
  83. * guarantee that no memory access prior to exec is reordered after
  84. * clearing this state.
  85. */
  86. smp_mb();
  87. atomic_set(&mm->membarrier_state, 0);
  88. /*
  89. * Keep the runqueue membarrier_state in sync with this mm
  90. * membarrier_state.
  91. */
  92. this_cpu_write(runqueues.membarrier_state, 0);
  93. }
  94. static int membarrier_global_expedited(void)
  95. {
  96. int cpu;
  97. cpumask_var_t tmpmask;
  98. if (num_online_cpus() == 1)
  99. return 0;
  100. /*
  101. * Matches memory barriers around rq->curr modification in
  102. * scheduler.
  103. */
  104. smp_mb(); /* system call entry is not a mb. */
  105. if (!zalloc_cpumask_var(&tmpmask, GFP_KERNEL))
  106. return -ENOMEM;
  107. cpus_read_lock();
  108. rcu_read_lock();
  109. for_each_online_cpu(cpu) {
  110. struct task_struct *p;
  111. /*
  112. * Skipping the current CPU is OK even through we can be
  113. * migrated at any point. The current CPU, at the point
  114. * where we read raw_smp_processor_id(), is ensured to
  115. * be in program order with respect to the caller
  116. * thread. Therefore, we can skip this CPU from the
  117. * iteration.
  118. */
  119. if (cpu == raw_smp_processor_id())
  120. continue;
  121. if (!(READ_ONCE(cpu_rq(cpu)->membarrier_state) &
  122. MEMBARRIER_STATE_GLOBAL_EXPEDITED))
  123. continue;
  124. /*
  125. * Skip the CPU if it runs a kernel thread. The scheduler
  126. * leaves the prior task mm in place as an optimization when
  127. * scheduling a kthread.
  128. */
  129. p = rcu_dereference(cpu_rq(cpu)->curr);
  130. if (p->flags & PF_KTHREAD)
  131. continue;
  132. __cpumask_set_cpu(cpu, tmpmask);
  133. }
  134. rcu_read_unlock();
  135. preempt_disable();
  136. smp_call_function_many(tmpmask, ipi_mb, NULL, 1);
  137. preempt_enable();
  138. free_cpumask_var(tmpmask);
  139. cpus_read_unlock();
  140. /*
  141. * Memory barrier on the caller thread _after_ we finished
  142. * waiting for the last IPI. Matches memory barriers around
  143. * rq->curr modification in scheduler.
  144. */
  145. smp_mb(); /* exit from system call is not a mb */
  146. return 0;
  147. }
  148. static int membarrier_private_expedited(int flags, int cpu_id)
  149. {
  150. cpumask_var_t tmpmask;
  151. struct mm_struct *mm = current->mm;
  152. smp_call_func_t ipi_func = ipi_mb;
  153. if (flags == MEMBARRIER_FLAG_SYNC_CORE) {
  154. if (!IS_ENABLED(CONFIG_ARCH_HAS_MEMBARRIER_SYNC_CORE))
  155. return -EINVAL;
  156. if (!(atomic_read(&mm->membarrier_state) &
  157. MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE_READY))
  158. return -EPERM;
  159. ipi_func = ipi_sync_core;
  160. } else if (flags == MEMBARRIER_FLAG_RSEQ) {
  161. if (!IS_ENABLED(CONFIG_RSEQ))
  162. return -EINVAL;
  163. if (!(atomic_read(&mm->membarrier_state) &
  164. MEMBARRIER_STATE_PRIVATE_EXPEDITED_RSEQ_READY))
  165. return -EPERM;
  166. ipi_func = ipi_rseq;
  167. } else {
  168. WARN_ON_ONCE(flags);
  169. if (!(atomic_read(&mm->membarrier_state) &
  170. MEMBARRIER_STATE_PRIVATE_EXPEDITED_READY))
  171. return -EPERM;
  172. }
  173. if (flags != MEMBARRIER_FLAG_SYNC_CORE &&
  174. (atomic_read(&mm->mm_users) == 1 || num_online_cpus() == 1))
  175. return 0;
  176. /*
  177. * Matches memory barriers around rq->curr modification in
  178. * scheduler.
  179. */
  180. smp_mb(); /* system call entry is not a mb. */
  181. if (cpu_id < 0 && !zalloc_cpumask_var(&tmpmask, GFP_KERNEL))
  182. return -ENOMEM;
  183. cpus_read_lock();
  184. if (cpu_id >= 0) {
  185. struct task_struct *p;
  186. if (cpu_id >= nr_cpu_ids || !cpu_online(cpu_id))
  187. goto out;
  188. rcu_read_lock();
  189. p = rcu_dereference(cpu_rq(cpu_id)->curr);
  190. if (!p || p->mm != mm) {
  191. rcu_read_unlock();
  192. goto out;
  193. }
  194. rcu_read_unlock();
  195. } else {
  196. int cpu;
  197. rcu_read_lock();
  198. for_each_online_cpu(cpu) {
  199. struct task_struct *p;
  200. p = rcu_dereference(cpu_rq(cpu)->curr);
  201. if (p && p->mm == mm)
  202. __cpumask_set_cpu(cpu, tmpmask);
  203. }
  204. rcu_read_unlock();
  205. }
  206. if (cpu_id >= 0) {
  207. /*
  208. * smp_call_function_single() will call ipi_func() if cpu_id
  209. * is the calling CPU.
  210. */
  211. smp_call_function_single(cpu_id, ipi_func, NULL, 1);
  212. } else {
  213. /*
  214. * For regular membarrier, we can save a few cycles by
  215. * skipping the current cpu -- we're about to do smp_mb()
  216. * below, and if we migrate to a different cpu, this cpu
  217. * and the new cpu will execute a full barrier in the
  218. * scheduler.
  219. *
  220. * For SYNC_CORE, we do need a barrier on the current cpu --
  221. * otherwise, if we are migrated and replaced by a different
  222. * task in the same mm just before, during, or after
  223. * membarrier, we will end up with some thread in the mm
  224. * running without a core sync.
  225. *
  226. * For RSEQ, don't rseq_preempt() the caller. User code
  227. * is not supposed to issue syscalls at all from inside an
  228. * rseq critical section.
  229. */
  230. if (flags != MEMBARRIER_FLAG_SYNC_CORE) {
  231. preempt_disable();
  232. smp_call_function_many(tmpmask, ipi_func, NULL, true);
  233. preempt_enable();
  234. } else {
  235. on_each_cpu_mask(tmpmask, ipi_func, NULL, true);
  236. }
  237. }
  238. out:
  239. if (cpu_id < 0)
  240. free_cpumask_var(tmpmask);
  241. cpus_read_unlock();
  242. /*
  243. * Memory barrier on the caller thread _after_ we finished
  244. * waiting for the last IPI. Matches memory barriers around
  245. * rq->curr modification in scheduler.
  246. */
  247. smp_mb(); /* exit from system call is not a mb */
  248. return 0;
  249. }
  250. static int sync_runqueues_membarrier_state(struct mm_struct *mm)
  251. {
  252. int membarrier_state = atomic_read(&mm->membarrier_state);
  253. cpumask_var_t tmpmask;
  254. int cpu;
  255. if (atomic_read(&mm->mm_users) == 1 || num_online_cpus() == 1) {
  256. this_cpu_write(runqueues.membarrier_state, membarrier_state);
  257. /*
  258. * For single mm user, we can simply issue a memory barrier
  259. * after setting MEMBARRIER_STATE_GLOBAL_EXPEDITED in the
  260. * mm and in the current runqueue to guarantee that no memory
  261. * access following registration is reordered before
  262. * registration.
  263. */
  264. smp_mb();
  265. return 0;
  266. }
  267. if (!zalloc_cpumask_var(&tmpmask, GFP_KERNEL))
  268. return -ENOMEM;
  269. /*
  270. * For mm with multiple users, we need to ensure all future
  271. * scheduler executions will observe @mm's new membarrier
  272. * state.
  273. */
  274. synchronize_rcu();
  275. /*
  276. * For each cpu runqueue, if the task's mm match @mm, ensure that all
  277. * @mm's membarrier state set bits are also set in the runqueue's
  278. * membarrier state. This ensures that a runqueue scheduling
  279. * between threads which are users of @mm has its membarrier state
  280. * updated.
  281. */
  282. cpus_read_lock();
  283. rcu_read_lock();
  284. for_each_online_cpu(cpu) {
  285. struct rq *rq = cpu_rq(cpu);
  286. struct task_struct *p;
  287. p = rcu_dereference(rq->curr);
  288. if (p && p->mm == mm)
  289. __cpumask_set_cpu(cpu, tmpmask);
  290. }
  291. rcu_read_unlock();
  292. on_each_cpu_mask(tmpmask, ipi_sync_rq_state, mm, true);
  293. free_cpumask_var(tmpmask);
  294. cpus_read_unlock();
  295. return 0;
  296. }
  297. static int membarrier_register_global_expedited(void)
  298. {
  299. struct task_struct *p = current;
  300. struct mm_struct *mm = p->mm;
  301. int ret;
  302. if (atomic_read(&mm->membarrier_state) &
  303. MEMBARRIER_STATE_GLOBAL_EXPEDITED_READY)
  304. return 0;
  305. atomic_or(MEMBARRIER_STATE_GLOBAL_EXPEDITED, &mm->membarrier_state);
  306. ret = sync_runqueues_membarrier_state(mm);
  307. if (ret)
  308. return ret;
  309. atomic_or(MEMBARRIER_STATE_GLOBAL_EXPEDITED_READY,
  310. &mm->membarrier_state);
  311. return 0;
  312. }
  313. static int membarrier_register_private_expedited(int flags)
  314. {
  315. struct task_struct *p = current;
  316. struct mm_struct *mm = p->mm;
  317. int ready_state = MEMBARRIER_STATE_PRIVATE_EXPEDITED_READY,
  318. set_state = MEMBARRIER_STATE_PRIVATE_EXPEDITED,
  319. ret;
  320. if (flags == MEMBARRIER_FLAG_SYNC_CORE) {
  321. if (!IS_ENABLED(CONFIG_ARCH_HAS_MEMBARRIER_SYNC_CORE))
  322. return -EINVAL;
  323. ready_state =
  324. MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE_READY;
  325. } else if (flags == MEMBARRIER_FLAG_RSEQ) {
  326. if (!IS_ENABLED(CONFIG_RSEQ))
  327. return -EINVAL;
  328. ready_state =
  329. MEMBARRIER_STATE_PRIVATE_EXPEDITED_RSEQ_READY;
  330. } else {
  331. WARN_ON_ONCE(flags);
  332. }
  333. /*
  334. * We need to consider threads belonging to different thread
  335. * groups, which use the same mm. (CLONE_VM but not
  336. * CLONE_THREAD).
  337. */
  338. if ((atomic_read(&mm->membarrier_state) & ready_state) == ready_state)
  339. return 0;
  340. if (flags & MEMBARRIER_FLAG_SYNC_CORE)
  341. set_state |= MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE;
  342. if (flags & MEMBARRIER_FLAG_RSEQ)
  343. set_state |= MEMBARRIER_STATE_PRIVATE_EXPEDITED_RSEQ;
  344. atomic_or(set_state, &mm->membarrier_state);
  345. ret = sync_runqueues_membarrier_state(mm);
  346. if (ret)
  347. return ret;
  348. atomic_or(ready_state, &mm->membarrier_state);
  349. return 0;
  350. }
  351. /**
  352. * sys_membarrier - issue memory barriers on a set of threads
  353. * @cmd: Takes command values defined in enum membarrier_cmd.
  354. * @flags: Currently needs to be 0 for all commands other than
  355. * MEMBARRIER_CMD_PRIVATE_EXPEDITED_RSEQ: in the latter
  356. * case it can be MEMBARRIER_CMD_FLAG_CPU, indicating that @cpu_id
  357. * contains the CPU on which to interrupt (= restart)
  358. * the RSEQ critical section.
  359. * @cpu_id: if @flags == MEMBARRIER_CMD_FLAG_CPU, indicates the cpu on which
  360. * RSEQ CS should be interrupted (@cmd must be
  361. * MEMBARRIER_CMD_PRIVATE_EXPEDITED_RSEQ).
  362. *
  363. * If this system call is not implemented, -ENOSYS is returned. If the
  364. * command specified does not exist, not available on the running
  365. * kernel, or if the command argument is invalid, this system call
  366. * returns -EINVAL. For a given command, with flags argument set to 0,
  367. * if this system call returns -ENOSYS or -EINVAL, it is guaranteed to
  368. * always return the same value until reboot. In addition, it can return
  369. * -ENOMEM if there is not enough memory available to perform the system
  370. * call.
  371. *
  372. * All memory accesses performed in program order from each targeted thread
  373. * is guaranteed to be ordered with respect to sys_membarrier(). If we use
  374. * the semantic "barrier()" to represent a compiler barrier forcing memory
  375. * accesses to be performed in program order across the barrier, and
  376. * smp_mb() to represent explicit memory barriers forcing full memory
  377. * ordering across the barrier, we have the following ordering table for
  378. * each pair of barrier(), sys_membarrier() and smp_mb():
  379. *
  380. * The pair ordering is detailed as (O: ordered, X: not ordered):
  381. *
  382. * barrier() smp_mb() sys_membarrier()
  383. * barrier() X X O
  384. * smp_mb() X O O
  385. * sys_membarrier() O O O
  386. */
  387. SYSCALL_DEFINE3(membarrier, int, cmd, unsigned int, flags, int, cpu_id)
  388. {
  389. switch (cmd) {
  390. case MEMBARRIER_CMD_PRIVATE_EXPEDITED_RSEQ:
  391. if (unlikely(flags && flags != MEMBARRIER_CMD_FLAG_CPU))
  392. return -EINVAL;
  393. break;
  394. default:
  395. if (unlikely(flags))
  396. return -EINVAL;
  397. }
  398. if (!(flags & MEMBARRIER_CMD_FLAG_CPU))
  399. cpu_id = -1;
  400. switch (cmd) {
  401. case MEMBARRIER_CMD_QUERY:
  402. {
  403. int cmd_mask = MEMBARRIER_CMD_BITMASK;
  404. if (tick_nohz_full_enabled())
  405. cmd_mask &= ~MEMBARRIER_CMD_GLOBAL;
  406. return cmd_mask;
  407. }
  408. case MEMBARRIER_CMD_GLOBAL:
  409. /* MEMBARRIER_CMD_GLOBAL is not compatible with nohz_full. */
  410. if (tick_nohz_full_enabled())
  411. return -EINVAL;
  412. if (num_online_cpus() > 1)
  413. synchronize_rcu();
  414. return 0;
  415. case MEMBARRIER_CMD_GLOBAL_EXPEDITED:
  416. return membarrier_global_expedited();
  417. case MEMBARRIER_CMD_REGISTER_GLOBAL_EXPEDITED:
  418. return membarrier_register_global_expedited();
  419. case MEMBARRIER_CMD_PRIVATE_EXPEDITED:
  420. return membarrier_private_expedited(0, cpu_id);
  421. case MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED:
  422. return membarrier_register_private_expedited(0);
  423. case MEMBARRIER_CMD_PRIVATE_EXPEDITED_SYNC_CORE:
  424. return membarrier_private_expedited(MEMBARRIER_FLAG_SYNC_CORE, cpu_id);
  425. case MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED_SYNC_CORE:
  426. return membarrier_register_private_expedited(MEMBARRIER_FLAG_SYNC_CORE);
  427. case MEMBARRIER_CMD_PRIVATE_EXPEDITED_RSEQ:
  428. return membarrier_private_expedited(MEMBARRIER_FLAG_RSEQ, cpu_id);
  429. case MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED_RSEQ:
  430. return membarrier_register_private_expedited(MEMBARRIER_FLAG_RSEQ);
  431. default:
  432. return -EINVAL;
  433. }
  434. }