sem.c 63 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * linux/ipc/sem.c
  4. * Copyright (C) 1992 Krishna Balasubramanian
  5. * Copyright (C) 1995 Eric Schenk, Bruno Haible
  6. *
  7. * /proc/sysvipc/sem support (c) 1999 Dragos Acostachioaie <dragos@iname.com>
  8. *
  9. * SMP-threaded, sysctl's added
  10. * (c) 1999 Manfred Spraul <manfred@colorfullife.com>
  11. * Enforced range limit on SEM_UNDO
  12. * (c) 2001 Red Hat Inc
  13. * Lockless wakeup
  14. * (c) 2003 Manfred Spraul <manfred@colorfullife.com>
  15. * (c) 2016 Davidlohr Bueso <dave@stgolabs.net>
  16. * Further wakeup optimizations, documentation
  17. * (c) 2010 Manfred Spraul <manfred@colorfullife.com>
  18. *
  19. * support for audit of ipc object properties and permission changes
  20. * Dustin Kirkland <dustin.kirkland@us.ibm.com>
  21. *
  22. * namespaces support
  23. * OpenVZ, SWsoft Inc.
  24. * Pavel Emelianov <xemul@openvz.org>
  25. *
  26. * Implementation notes: (May 2010)
  27. * This file implements System V semaphores.
  28. *
  29. * User space visible behavior:
  30. * - FIFO ordering for semop() operations (just FIFO, not starvation
  31. * protection)
  32. * - multiple semaphore operations that alter the same semaphore in
  33. * one semop() are handled.
  34. * - sem_ctime (time of last semctl()) is updated in the IPC_SET, SETVAL and
  35. * SETALL calls.
  36. * - two Linux specific semctl() commands: SEM_STAT, SEM_INFO.
  37. * - undo adjustments at process exit are limited to 0..SEMVMX.
  38. * - namespace are supported.
  39. * - SEMMSL, SEMMNS, SEMOPM and SEMMNI can be configured at runtine by writing
  40. * to /proc/sys/kernel/sem.
  41. * - statistics about the usage are reported in /proc/sysvipc/sem.
  42. *
  43. * Internals:
  44. * - scalability:
  45. * - all global variables are read-mostly.
  46. * - semop() calls and semctl(RMID) are synchronized by RCU.
  47. * - most operations do write operations (actually: spin_lock calls) to
  48. * the per-semaphore array structure.
  49. * Thus: Perfect SMP scaling between independent semaphore arrays.
  50. * If multiple semaphores in one array are used, then cache line
  51. * trashing on the semaphore array spinlock will limit the scaling.
  52. * - semncnt and semzcnt are calculated on demand in count_semcnt()
  53. * - the task that performs a successful semop() scans the list of all
  54. * sleeping tasks and completes any pending operations that can be fulfilled.
  55. * Semaphores are actively given to waiting tasks (necessary for FIFO).
  56. * (see update_queue())
  57. * - To improve the scalability, the actual wake-up calls are performed after
  58. * dropping all locks. (see wake_up_sem_queue_prepare())
  59. * - All work is done by the waker, the woken up task does not have to do
  60. * anything - not even acquiring a lock or dropping a refcount.
  61. * - A woken up task may not even touch the semaphore array anymore, it may
  62. * have been destroyed already by a semctl(RMID).
  63. * - UNDO values are stored in an array (one per process and per
  64. * semaphore array, lazily allocated). For backwards compatibility, multiple
  65. * modes for the UNDO variables are supported (per process, per thread)
  66. * (see copy_semundo, CLONE_SYSVSEM)
  67. * - There are two lists of the pending operations: a per-array list
  68. * and per-semaphore list (stored in the array). This allows to achieve FIFO
  69. * ordering without always scanning all pending operations.
  70. * The worst-case behavior is nevertheless O(N^2) for N wakeups.
  71. */
  72. #include <linux/compat.h>
  73. #include <linux/slab.h>
  74. #include <linux/spinlock.h>
  75. #include <linux/init.h>
  76. #include <linux/proc_fs.h>
  77. #include <linux/time.h>
  78. #include <linux/security.h>
  79. #include <linux/syscalls.h>
  80. #include <linux/audit.h>
  81. #include <linux/capability.h>
  82. #include <linux/seq_file.h>
  83. #include <linux/rwsem.h>
  84. #include <linux/nsproxy.h>
  85. #include <linux/ipc_namespace.h>
  86. #include <linux/sched/wake_q.h>
  87. #include <linux/nospec.h>
  88. #include <linux/rhashtable.h>
  89. #include <linux/uaccess.h>
  90. #include "util.h"
  91. /* One semaphore structure for each semaphore in the system. */
  92. struct sem {
  93. int semval; /* current value */
  94. /*
  95. * PID of the process that last modified the semaphore. For
  96. * Linux, specifically these are:
  97. * - semop
  98. * - semctl, via SETVAL and SETALL.
  99. * - at task exit when performing undo adjustments (see exit_sem).
  100. */
  101. struct pid *sempid;
  102. spinlock_t lock; /* spinlock for fine-grained semtimedop */
  103. struct list_head pending_alter; /* pending single-sop operations */
  104. /* that alter the semaphore */
  105. struct list_head pending_const; /* pending single-sop operations */
  106. /* that do not alter the semaphore*/
  107. time64_t sem_otime; /* candidate for sem_otime */
  108. } ____cacheline_aligned_in_smp;
  109. /* One sem_array data structure for each set of semaphores in the system. */
  110. struct sem_array {
  111. struct kern_ipc_perm sem_perm; /* permissions .. see ipc.h */
  112. time64_t sem_ctime; /* create/last semctl() time */
  113. struct list_head pending_alter; /* pending operations */
  114. /* that alter the array */
  115. struct list_head pending_const; /* pending complex operations */
  116. /* that do not alter semvals */
  117. struct list_head list_id; /* undo requests on this array */
  118. int sem_nsems; /* no. of semaphores in array */
  119. int complex_count; /* pending complex operations */
  120. unsigned int use_global_lock;/* >0: global lock required */
  121. struct sem sems[];
  122. } __randomize_layout;
  123. /* One queue for each sleeping process in the system. */
  124. struct sem_queue {
  125. struct list_head list; /* queue of pending operations */
  126. struct task_struct *sleeper; /* this process */
  127. struct sem_undo *undo; /* undo structure */
  128. struct pid *pid; /* process id of requesting process */
  129. int status; /* completion status of operation */
  130. struct sembuf *sops; /* array of pending operations */
  131. struct sembuf *blocking; /* the operation that blocked */
  132. int nsops; /* number of operations */
  133. bool alter; /* does *sops alter the array? */
  134. bool dupsop; /* sops on more than one sem_num */
  135. };
  136. /* Each task has a list of undo requests. They are executed automatically
  137. * when the process exits.
  138. */
  139. struct sem_undo {
  140. struct list_head list_proc; /* per-process list: *
  141. * all undos from one process
  142. * rcu protected */
  143. struct rcu_head rcu; /* rcu struct for sem_undo */
  144. struct sem_undo_list *ulp; /* back ptr to sem_undo_list */
  145. struct list_head list_id; /* per semaphore array list:
  146. * all undos for one array */
  147. int semid; /* semaphore set identifier */
  148. short *semadj; /* array of adjustments */
  149. /* one per semaphore */
  150. };
  151. /* sem_undo_list controls shared access to the list of sem_undo structures
  152. * that may be shared among all a CLONE_SYSVSEM task group.
  153. */
  154. struct sem_undo_list {
  155. refcount_t refcnt;
  156. spinlock_t lock;
  157. struct list_head list_proc;
  158. };
  159. #define sem_ids(ns) ((ns)->ids[IPC_SEM_IDS])
  160. static int newary(struct ipc_namespace *, struct ipc_params *);
  161. static void freeary(struct ipc_namespace *, struct kern_ipc_perm *);
  162. #ifdef CONFIG_PROC_FS
  163. static int sysvipc_sem_proc_show(struct seq_file *s, void *it);
  164. #endif
  165. #define SEMMSL_FAST 256 /* 512 bytes on stack */
  166. #define SEMOPM_FAST 64 /* ~ 372 bytes on stack */
  167. /*
  168. * Switching from the mode suitable for simple ops
  169. * to the mode for complex ops is costly. Therefore:
  170. * use some hysteresis
  171. */
  172. #define USE_GLOBAL_LOCK_HYSTERESIS 10
  173. /*
  174. * Locking:
  175. * a) global sem_lock() for read/write
  176. * sem_undo.id_next,
  177. * sem_array.complex_count,
  178. * sem_array.pending{_alter,_const},
  179. * sem_array.sem_undo
  180. *
  181. * b) global or semaphore sem_lock() for read/write:
  182. * sem_array.sems[i].pending_{const,alter}:
  183. *
  184. * c) special:
  185. * sem_undo_list.list_proc:
  186. * * undo_list->lock for write
  187. * * rcu for read
  188. * use_global_lock:
  189. * * global sem_lock() for write
  190. * * either local or global sem_lock() for read.
  191. *
  192. * Memory ordering:
  193. * Most ordering is enforced by using spin_lock() and spin_unlock().
  194. *
  195. * Exceptions:
  196. * 1) use_global_lock: (SEM_BARRIER_1)
  197. * Setting it from non-zero to 0 is a RELEASE, this is ensured by
  198. * using smp_store_release(): Immediately after setting it to 0,
  199. * a simple op can start.
  200. * Testing if it is non-zero is an ACQUIRE, this is ensured by using
  201. * smp_load_acquire().
  202. * Setting it from 0 to non-zero must be ordered with regards to
  203. * this smp_load_acquire(), this is guaranteed because the smp_load_acquire()
  204. * is inside a spin_lock() and after a write from 0 to non-zero a
  205. * spin_lock()+spin_unlock() is done.
  206. *
  207. * 2) queue.status: (SEM_BARRIER_2)
  208. * Initialization is done while holding sem_lock(), so no further barrier is
  209. * required.
  210. * Setting it to a result code is a RELEASE, this is ensured by both a
  211. * smp_store_release() (for case a) and while holding sem_lock()
  212. * (for case b).
  213. * The AQUIRE when reading the result code without holding sem_lock() is
  214. * achieved by using READ_ONCE() + smp_acquire__after_ctrl_dep().
  215. * (case a above).
  216. * Reading the result code while holding sem_lock() needs no further barriers,
  217. * the locks inside sem_lock() enforce ordering (case b above)
  218. *
  219. * 3) current->state:
  220. * current->state is set to TASK_INTERRUPTIBLE while holding sem_lock().
  221. * The wakeup is handled using the wake_q infrastructure. wake_q wakeups may
  222. * happen immediately after calling wake_q_add. As wake_q_add_safe() is called
  223. * when holding sem_lock(), no further barriers are required.
  224. *
  225. * See also ipc/mqueue.c for more details on the covered races.
  226. */
  227. #define sc_semmsl sem_ctls[0]
  228. #define sc_semmns sem_ctls[1]
  229. #define sc_semopm sem_ctls[2]
  230. #define sc_semmni sem_ctls[3]
  231. void sem_init_ns(struct ipc_namespace *ns)
  232. {
  233. ns->sc_semmsl = SEMMSL;
  234. ns->sc_semmns = SEMMNS;
  235. ns->sc_semopm = SEMOPM;
  236. ns->sc_semmni = SEMMNI;
  237. ns->used_sems = 0;
  238. ipc_init_ids(&ns->ids[IPC_SEM_IDS]);
  239. }
  240. #ifdef CONFIG_IPC_NS
  241. void sem_exit_ns(struct ipc_namespace *ns)
  242. {
  243. free_ipcs(ns, &sem_ids(ns), freeary);
  244. idr_destroy(&ns->ids[IPC_SEM_IDS].ipcs_idr);
  245. rhashtable_destroy(&ns->ids[IPC_SEM_IDS].key_ht);
  246. }
  247. #endif
  248. void __init sem_init(void)
  249. {
  250. sem_init_ns(&init_ipc_ns);
  251. ipc_init_proc_interface("sysvipc/sem",
  252. " key semid perms nsems uid gid cuid cgid otime ctime\n",
  253. IPC_SEM_IDS, sysvipc_sem_proc_show);
  254. }
  255. /**
  256. * unmerge_queues - unmerge queues, if possible.
  257. * @sma: semaphore array
  258. *
  259. * The function unmerges the wait queues if complex_count is 0.
  260. * It must be called prior to dropping the global semaphore array lock.
  261. */
  262. static void unmerge_queues(struct sem_array *sma)
  263. {
  264. struct sem_queue *q, *tq;
  265. /* complex operations still around? */
  266. if (sma->complex_count)
  267. return;
  268. /*
  269. * We will switch back to simple mode.
  270. * Move all pending operation back into the per-semaphore
  271. * queues.
  272. */
  273. list_for_each_entry_safe(q, tq, &sma->pending_alter, list) {
  274. struct sem *curr;
  275. curr = &sma->sems[q->sops[0].sem_num];
  276. list_add_tail(&q->list, &curr->pending_alter);
  277. }
  278. INIT_LIST_HEAD(&sma->pending_alter);
  279. }
  280. /**
  281. * merge_queues - merge single semop queues into global queue
  282. * @sma: semaphore array
  283. *
  284. * This function merges all per-semaphore queues into the global queue.
  285. * It is necessary to achieve FIFO ordering for the pending single-sop
  286. * operations when a multi-semop operation must sleep.
  287. * Only the alter operations must be moved, the const operations can stay.
  288. */
  289. static void merge_queues(struct sem_array *sma)
  290. {
  291. int i;
  292. for (i = 0; i < sma->sem_nsems; i++) {
  293. struct sem *sem = &sma->sems[i];
  294. list_splice_init(&sem->pending_alter, &sma->pending_alter);
  295. }
  296. }
  297. static void sem_rcu_free(struct rcu_head *head)
  298. {
  299. struct kern_ipc_perm *p = container_of(head, struct kern_ipc_perm, rcu);
  300. struct sem_array *sma = container_of(p, struct sem_array, sem_perm);
  301. security_sem_free(&sma->sem_perm);
  302. kvfree(sma);
  303. }
  304. /*
  305. * Enter the mode suitable for non-simple operations:
  306. * Caller must own sem_perm.lock.
  307. */
  308. static void complexmode_enter(struct sem_array *sma)
  309. {
  310. int i;
  311. struct sem *sem;
  312. if (sma->use_global_lock > 0) {
  313. /*
  314. * We are already in global lock mode.
  315. * Nothing to do, just reset the
  316. * counter until we return to simple mode.
  317. */
  318. sma->use_global_lock = USE_GLOBAL_LOCK_HYSTERESIS;
  319. return;
  320. }
  321. sma->use_global_lock = USE_GLOBAL_LOCK_HYSTERESIS;
  322. for (i = 0; i < sma->sem_nsems; i++) {
  323. sem = &sma->sems[i];
  324. spin_lock(&sem->lock);
  325. spin_unlock(&sem->lock);
  326. }
  327. }
  328. /*
  329. * Try to leave the mode that disallows simple operations:
  330. * Caller must own sem_perm.lock.
  331. */
  332. static void complexmode_tryleave(struct sem_array *sma)
  333. {
  334. if (sma->complex_count) {
  335. /* Complex ops are sleeping.
  336. * We must stay in complex mode
  337. */
  338. return;
  339. }
  340. if (sma->use_global_lock == 1) {
  341. /* See SEM_BARRIER_1 for purpose/pairing */
  342. smp_store_release(&sma->use_global_lock, 0);
  343. } else {
  344. sma->use_global_lock--;
  345. }
  346. }
  347. #define SEM_GLOBAL_LOCK (-1)
  348. /*
  349. * If the request contains only one semaphore operation, and there are
  350. * no complex transactions pending, lock only the semaphore involved.
  351. * Otherwise, lock the entire semaphore array, since we either have
  352. * multiple semaphores in our own semops, or we need to look at
  353. * semaphores from other pending complex operations.
  354. */
  355. static inline int sem_lock(struct sem_array *sma, struct sembuf *sops,
  356. int nsops)
  357. {
  358. struct sem *sem;
  359. int idx;
  360. if (nsops != 1) {
  361. /* Complex operation - acquire a full lock */
  362. ipc_lock_object(&sma->sem_perm);
  363. /* Prevent parallel simple ops */
  364. complexmode_enter(sma);
  365. return SEM_GLOBAL_LOCK;
  366. }
  367. /*
  368. * Only one semaphore affected - try to optimize locking.
  369. * Optimized locking is possible if no complex operation
  370. * is either enqueued or processed right now.
  371. *
  372. * Both facts are tracked by use_global_mode.
  373. */
  374. idx = array_index_nospec(sops->sem_num, sma->sem_nsems);
  375. sem = &sma->sems[idx];
  376. /*
  377. * Initial check for use_global_lock. Just an optimization,
  378. * no locking, no memory barrier.
  379. */
  380. if (!sma->use_global_lock) {
  381. /*
  382. * It appears that no complex operation is around.
  383. * Acquire the per-semaphore lock.
  384. */
  385. spin_lock(&sem->lock);
  386. /* see SEM_BARRIER_1 for purpose/pairing */
  387. if (!smp_load_acquire(&sma->use_global_lock)) {
  388. /* fast path successful! */
  389. return sops->sem_num;
  390. }
  391. spin_unlock(&sem->lock);
  392. }
  393. /* slow path: acquire the full lock */
  394. ipc_lock_object(&sma->sem_perm);
  395. if (sma->use_global_lock == 0) {
  396. /*
  397. * The use_global_lock mode ended while we waited for
  398. * sma->sem_perm.lock. Thus we must switch to locking
  399. * with sem->lock.
  400. * Unlike in the fast path, there is no need to recheck
  401. * sma->use_global_lock after we have acquired sem->lock:
  402. * We own sma->sem_perm.lock, thus use_global_lock cannot
  403. * change.
  404. */
  405. spin_lock(&sem->lock);
  406. ipc_unlock_object(&sma->sem_perm);
  407. return sops->sem_num;
  408. } else {
  409. /*
  410. * Not a false alarm, thus continue to use the global lock
  411. * mode. No need for complexmode_enter(), this was done by
  412. * the caller that has set use_global_mode to non-zero.
  413. */
  414. return SEM_GLOBAL_LOCK;
  415. }
  416. }
  417. static inline void sem_unlock(struct sem_array *sma, int locknum)
  418. {
  419. if (locknum == SEM_GLOBAL_LOCK) {
  420. unmerge_queues(sma);
  421. complexmode_tryleave(sma);
  422. ipc_unlock_object(&sma->sem_perm);
  423. } else {
  424. struct sem *sem = &sma->sems[locknum];
  425. spin_unlock(&sem->lock);
  426. }
  427. }
  428. /*
  429. * sem_lock_(check_) routines are called in the paths where the rwsem
  430. * is not held.
  431. *
  432. * The caller holds the RCU read lock.
  433. */
  434. static inline struct sem_array *sem_obtain_object(struct ipc_namespace *ns, int id)
  435. {
  436. struct kern_ipc_perm *ipcp = ipc_obtain_object_idr(&sem_ids(ns), id);
  437. if (IS_ERR(ipcp))
  438. return ERR_CAST(ipcp);
  439. return container_of(ipcp, struct sem_array, sem_perm);
  440. }
  441. static inline struct sem_array *sem_obtain_object_check(struct ipc_namespace *ns,
  442. int id)
  443. {
  444. struct kern_ipc_perm *ipcp = ipc_obtain_object_check(&sem_ids(ns), id);
  445. if (IS_ERR(ipcp))
  446. return ERR_CAST(ipcp);
  447. return container_of(ipcp, struct sem_array, sem_perm);
  448. }
  449. static inline void sem_lock_and_putref(struct sem_array *sma)
  450. {
  451. sem_lock(sma, NULL, -1);
  452. ipc_rcu_putref(&sma->sem_perm, sem_rcu_free);
  453. }
  454. static inline void sem_rmid(struct ipc_namespace *ns, struct sem_array *s)
  455. {
  456. ipc_rmid(&sem_ids(ns), &s->sem_perm);
  457. }
  458. static struct sem_array *sem_alloc(size_t nsems)
  459. {
  460. struct sem_array *sma;
  461. if (nsems > (INT_MAX - sizeof(*sma)) / sizeof(sma->sems[0]))
  462. return NULL;
  463. sma = kvzalloc(struct_size(sma, sems, nsems), GFP_KERNEL);
  464. if (unlikely(!sma))
  465. return NULL;
  466. return sma;
  467. }
  468. /**
  469. * newary - Create a new semaphore set
  470. * @ns: namespace
  471. * @params: ptr to the structure that contains key, semflg and nsems
  472. *
  473. * Called with sem_ids.rwsem held (as a writer)
  474. */
  475. static int newary(struct ipc_namespace *ns, struct ipc_params *params)
  476. {
  477. int retval;
  478. struct sem_array *sma;
  479. key_t key = params->key;
  480. int nsems = params->u.nsems;
  481. int semflg = params->flg;
  482. int i;
  483. if (!nsems)
  484. return -EINVAL;
  485. if (ns->used_sems + nsems > ns->sc_semmns)
  486. return -ENOSPC;
  487. sma = sem_alloc(nsems);
  488. if (!sma)
  489. return -ENOMEM;
  490. sma->sem_perm.mode = (semflg & S_IRWXUGO);
  491. sma->sem_perm.key = key;
  492. sma->sem_perm.security = NULL;
  493. retval = security_sem_alloc(&sma->sem_perm);
  494. if (retval) {
  495. kvfree(sma);
  496. return retval;
  497. }
  498. for (i = 0; i < nsems; i++) {
  499. INIT_LIST_HEAD(&sma->sems[i].pending_alter);
  500. INIT_LIST_HEAD(&sma->sems[i].pending_const);
  501. spin_lock_init(&sma->sems[i].lock);
  502. }
  503. sma->complex_count = 0;
  504. sma->use_global_lock = USE_GLOBAL_LOCK_HYSTERESIS;
  505. INIT_LIST_HEAD(&sma->pending_alter);
  506. INIT_LIST_HEAD(&sma->pending_const);
  507. INIT_LIST_HEAD(&sma->list_id);
  508. sma->sem_nsems = nsems;
  509. sma->sem_ctime = ktime_get_real_seconds();
  510. /* ipc_addid() locks sma upon success. */
  511. retval = ipc_addid(&sem_ids(ns), &sma->sem_perm, ns->sc_semmni);
  512. if (retval < 0) {
  513. ipc_rcu_putref(&sma->sem_perm, sem_rcu_free);
  514. return retval;
  515. }
  516. ns->used_sems += nsems;
  517. sem_unlock(sma, -1);
  518. rcu_read_unlock();
  519. return sma->sem_perm.id;
  520. }
  521. /*
  522. * Called with sem_ids.rwsem and ipcp locked.
  523. */
  524. static int sem_more_checks(struct kern_ipc_perm *ipcp, struct ipc_params *params)
  525. {
  526. struct sem_array *sma;
  527. sma = container_of(ipcp, struct sem_array, sem_perm);
  528. if (params->u.nsems > sma->sem_nsems)
  529. return -EINVAL;
  530. return 0;
  531. }
  532. long ksys_semget(key_t key, int nsems, int semflg)
  533. {
  534. struct ipc_namespace *ns;
  535. static const struct ipc_ops sem_ops = {
  536. .getnew = newary,
  537. .associate = security_sem_associate,
  538. .more_checks = sem_more_checks,
  539. };
  540. struct ipc_params sem_params;
  541. ns = current->nsproxy->ipc_ns;
  542. if (nsems < 0 || nsems > ns->sc_semmsl)
  543. return -EINVAL;
  544. sem_params.key = key;
  545. sem_params.flg = semflg;
  546. sem_params.u.nsems = nsems;
  547. return ipcget(ns, &sem_ids(ns), &sem_ops, &sem_params);
  548. }
  549. SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
  550. {
  551. return ksys_semget(key, nsems, semflg);
  552. }
  553. /**
  554. * perform_atomic_semop[_slow] - Attempt to perform semaphore
  555. * operations on a given array.
  556. * @sma: semaphore array
  557. * @q: struct sem_queue that describes the operation
  558. *
  559. * Caller blocking are as follows, based the value
  560. * indicated by the semaphore operation (sem_op):
  561. *
  562. * (1) >0 never blocks.
  563. * (2) 0 (wait-for-zero operation): semval is non-zero.
  564. * (3) <0 attempting to decrement semval to a value smaller than zero.
  565. *
  566. * Returns 0 if the operation was possible.
  567. * Returns 1 if the operation is impossible, the caller must sleep.
  568. * Returns <0 for error codes.
  569. */
  570. static int perform_atomic_semop_slow(struct sem_array *sma, struct sem_queue *q)
  571. {
  572. int result, sem_op, nsops;
  573. struct pid *pid;
  574. struct sembuf *sop;
  575. struct sem *curr;
  576. struct sembuf *sops;
  577. struct sem_undo *un;
  578. sops = q->sops;
  579. nsops = q->nsops;
  580. un = q->undo;
  581. for (sop = sops; sop < sops + nsops; sop++) {
  582. int idx = array_index_nospec(sop->sem_num, sma->sem_nsems);
  583. curr = &sma->sems[idx];
  584. sem_op = sop->sem_op;
  585. result = curr->semval;
  586. if (!sem_op && result)
  587. goto would_block;
  588. result += sem_op;
  589. if (result < 0)
  590. goto would_block;
  591. if (result > SEMVMX)
  592. goto out_of_range;
  593. if (sop->sem_flg & SEM_UNDO) {
  594. int undo = un->semadj[sop->sem_num] - sem_op;
  595. /* Exceeding the undo range is an error. */
  596. if (undo < (-SEMAEM - 1) || undo > SEMAEM)
  597. goto out_of_range;
  598. un->semadj[sop->sem_num] = undo;
  599. }
  600. curr->semval = result;
  601. }
  602. sop--;
  603. pid = q->pid;
  604. while (sop >= sops) {
  605. ipc_update_pid(&sma->sems[sop->sem_num].sempid, pid);
  606. sop--;
  607. }
  608. return 0;
  609. out_of_range:
  610. result = -ERANGE;
  611. goto undo;
  612. would_block:
  613. q->blocking = sop;
  614. if (sop->sem_flg & IPC_NOWAIT)
  615. result = -EAGAIN;
  616. else
  617. result = 1;
  618. undo:
  619. sop--;
  620. while (sop >= sops) {
  621. sem_op = sop->sem_op;
  622. sma->sems[sop->sem_num].semval -= sem_op;
  623. if (sop->sem_flg & SEM_UNDO)
  624. un->semadj[sop->sem_num] += sem_op;
  625. sop--;
  626. }
  627. return result;
  628. }
  629. static int perform_atomic_semop(struct sem_array *sma, struct sem_queue *q)
  630. {
  631. int result, sem_op, nsops;
  632. struct sembuf *sop;
  633. struct sem *curr;
  634. struct sembuf *sops;
  635. struct sem_undo *un;
  636. sops = q->sops;
  637. nsops = q->nsops;
  638. un = q->undo;
  639. if (unlikely(q->dupsop))
  640. return perform_atomic_semop_slow(sma, q);
  641. /*
  642. * We scan the semaphore set twice, first to ensure that the entire
  643. * operation can succeed, therefore avoiding any pointless writes
  644. * to shared memory and having to undo such changes in order to block
  645. * until the operations can go through.
  646. */
  647. for (sop = sops; sop < sops + nsops; sop++) {
  648. int idx = array_index_nospec(sop->sem_num, sma->sem_nsems);
  649. curr = &sma->sems[idx];
  650. sem_op = sop->sem_op;
  651. result = curr->semval;
  652. if (!sem_op && result)
  653. goto would_block; /* wait-for-zero */
  654. result += sem_op;
  655. if (result < 0)
  656. goto would_block;
  657. if (result > SEMVMX)
  658. return -ERANGE;
  659. if (sop->sem_flg & SEM_UNDO) {
  660. int undo = un->semadj[sop->sem_num] - sem_op;
  661. /* Exceeding the undo range is an error. */
  662. if (undo < (-SEMAEM - 1) || undo > SEMAEM)
  663. return -ERANGE;
  664. }
  665. }
  666. for (sop = sops; sop < sops + nsops; sop++) {
  667. curr = &sma->sems[sop->sem_num];
  668. sem_op = sop->sem_op;
  669. result = curr->semval;
  670. if (sop->sem_flg & SEM_UNDO) {
  671. int undo = un->semadj[sop->sem_num] - sem_op;
  672. un->semadj[sop->sem_num] = undo;
  673. }
  674. curr->semval += sem_op;
  675. ipc_update_pid(&curr->sempid, q->pid);
  676. }
  677. return 0;
  678. would_block:
  679. q->blocking = sop;
  680. return sop->sem_flg & IPC_NOWAIT ? -EAGAIN : 1;
  681. }
  682. static inline void wake_up_sem_queue_prepare(struct sem_queue *q, int error,
  683. struct wake_q_head *wake_q)
  684. {
  685. struct task_struct *sleeper;
  686. sleeper = get_task_struct(q->sleeper);
  687. /* see SEM_BARRIER_2 for purpuse/pairing */
  688. smp_store_release(&q->status, error);
  689. wake_q_add_safe(wake_q, sleeper);
  690. }
  691. static void unlink_queue(struct sem_array *sma, struct sem_queue *q)
  692. {
  693. list_del(&q->list);
  694. if (q->nsops > 1)
  695. sma->complex_count--;
  696. }
  697. /** check_restart(sma, q)
  698. * @sma: semaphore array
  699. * @q: the operation that just completed
  700. *
  701. * update_queue is O(N^2) when it restarts scanning the whole queue of
  702. * waiting operations. Therefore this function checks if the restart is
  703. * really necessary. It is called after a previously waiting operation
  704. * modified the array.
  705. * Note that wait-for-zero operations are handled without restart.
  706. */
  707. static inline int check_restart(struct sem_array *sma, struct sem_queue *q)
  708. {
  709. /* pending complex alter operations are too difficult to analyse */
  710. if (!list_empty(&sma->pending_alter))
  711. return 1;
  712. /* we were a sleeping complex operation. Too difficult */
  713. if (q->nsops > 1)
  714. return 1;
  715. /* It is impossible that someone waits for the new value:
  716. * - complex operations always restart.
  717. * - wait-for-zero are handled seperately.
  718. * - q is a previously sleeping simple operation that
  719. * altered the array. It must be a decrement, because
  720. * simple increments never sleep.
  721. * - If there are older (higher priority) decrements
  722. * in the queue, then they have observed the original
  723. * semval value and couldn't proceed. The operation
  724. * decremented to value - thus they won't proceed either.
  725. */
  726. return 0;
  727. }
  728. /**
  729. * wake_const_ops - wake up non-alter tasks
  730. * @sma: semaphore array.
  731. * @semnum: semaphore that was modified.
  732. * @wake_q: lockless wake-queue head.
  733. *
  734. * wake_const_ops must be called after a semaphore in a semaphore array
  735. * was set to 0. If complex const operations are pending, wake_const_ops must
  736. * be called with semnum = -1, as well as with the number of each modified
  737. * semaphore.
  738. * The tasks that must be woken up are added to @wake_q. The return code
  739. * is stored in q->pid.
  740. * The function returns 1 if at least one operation was completed successfully.
  741. */
  742. static int wake_const_ops(struct sem_array *sma, int semnum,
  743. struct wake_q_head *wake_q)
  744. {
  745. struct sem_queue *q, *tmp;
  746. struct list_head *pending_list;
  747. int semop_completed = 0;
  748. if (semnum == -1)
  749. pending_list = &sma->pending_const;
  750. else
  751. pending_list = &sma->sems[semnum].pending_const;
  752. list_for_each_entry_safe(q, tmp, pending_list, list) {
  753. int error = perform_atomic_semop(sma, q);
  754. if (error > 0)
  755. continue;
  756. /* operation completed, remove from queue & wakeup */
  757. unlink_queue(sma, q);
  758. wake_up_sem_queue_prepare(q, error, wake_q);
  759. if (error == 0)
  760. semop_completed = 1;
  761. }
  762. return semop_completed;
  763. }
  764. /**
  765. * do_smart_wakeup_zero - wakeup all wait for zero tasks
  766. * @sma: semaphore array
  767. * @sops: operations that were performed
  768. * @nsops: number of operations
  769. * @wake_q: lockless wake-queue head
  770. *
  771. * Checks all required queue for wait-for-zero operations, based
  772. * on the actual changes that were performed on the semaphore array.
  773. * The function returns 1 if at least one operation was completed successfully.
  774. */
  775. static int do_smart_wakeup_zero(struct sem_array *sma, struct sembuf *sops,
  776. int nsops, struct wake_q_head *wake_q)
  777. {
  778. int i;
  779. int semop_completed = 0;
  780. int got_zero = 0;
  781. /* first: the per-semaphore queues, if known */
  782. if (sops) {
  783. for (i = 0; i < nsops; i++) {
  784. int num = sops[i].sem_num;
  785. if (sma->sems[num].semval == 0) {
  786. got_zero = 1;
  787. semop_completed |= wake_const_ops(sma, num, wake_q);
  788. }
  789. }
  790. } else {
  791. /*
  792. * No sops means modified semaphores not known.
  793. * Assume all were changed.
  794. */
  795. for (i = 0; i < sma->sem_nsems; i++) {
  796. if (sma->sems[i].semval == 0) {
  797. got_zero = 1;
  798. semop_completed |= wake_const_ops(sma, i, wake_q);
  799. }
  800. }
  801. }
  802. /*
  803. * If one of the modified semaphores got 0,
  804. * then check the global queue, too.
  805. */
  806. if (got_zero)
  807. semop_completed |= wake_const_ops(sma, -1, wake_q);
  808. return semop_completed;
  809. }
  810. /**
  811. * update_queue - look for tasks that can be completed.
  812. * @sma: semaphore array.
  813. * @semnum: semaphore that was modified.
  814. * @wake_q: lockless wake-queue head.
  815. *
  816. * update_queue must be called after a semaphore in a semaphore array
  817. * was modified. If multiple semaphores were modified, update_queue must
  818. * be called with semnum = -1, as well as with the number of each modified
  819. * semaphore.
  820. * The tasks that must be woken up are added to @wake_q. The return code
  821. * is stored in q->pid.
  822. * The function internally checks if const operations can now succeed.
  823. *
  824. * The function return 1 if at least one semop was completed successfully.
  825. */
  826. static int update_queue(struct sem_array *sma, int semnum, struct wake_q_head *wake_q)
  827. {
  828. struct sem_queue *q, *tmp;
  829. struct list_head *pending_list;
  830. int semop_completed = 0;
  831. if (semnum == -1)
  832. pending_list = &sma->pending_alter;
  833. else
  834. pending_list = &sma->sems[semnum].pending_alter;
  835. again:
  836. list_for_each_entry_safe(q, tmp, pending_list, list) {
  837. int error, restart;
  838. /* If we are scanning the single sop, per-semaphore list of
  839. * one semaphore and that semaphore is 0, then it is not
  840. * necessary to scan further: simple increments
  841. * that affect only one entry succeed immediately and cannot
  842. * be in the per semaphore pending queue, and decrements
  843. * cannot be successful if the value is already 0.
  844. */
  845. if (semnum != -1 && sma->sems[semnum].semval == 0)
  846. break;
  847. error = perform_atomic_semop(sma, q);
  848. /* Does q->sleeper still need to sleep? */
  849. if (error > 0)
  850. continue;
  851. unlink_queue(sma, q);
  852. if (error) {
  853. restart = 0;
  854. } else {
  855. semop_completed = 1;
  856. do_smart_wakeup_zero(sma, q->sops, q->nsops, wake_q);
  857. restart = check_restart(sma, q);
  858. }
  859. wake_up_sem_queue_prepare(q, error, wake_q);
  860. if (restart)
  861. goto again;
  862. }
  863. return semop_completed;
  864. }
  865. /**
  866. * set_semotime - set sem_otime
  867. * @sma: semaphore array
  868. * @sops: operations that modified the array, may be NULL
  869. *
  870. * sem_otime is replicated to avoid cache line trashing.
  871. * This function sets one instance to the current time.
  872. */
  873. static void set_semotime(struct sem_array *sma, struct sembuf *sops)
  874. {
  875. if (sops == NULL) {
  876. sma->sems[0].sem_otime = ktime_get_real_seconds();
  877. } else {
  878. sma->sems[sops[0].sem_num].sem_otime =
  879. ktime_get_real_seconds();
  880. }
  881. }
  882. /**
  883. * do_smart_update - optimized update_queue
  884. * @sma: semaphore array
  885. * @sops: operations that were performed
  886. * @nsops: number of operations
  887. * @otime: force setting otime
  888. * @wake_q: lockless wake-queue head
  889. *
  890. * do_smart_update() does the required calls to update_queue and wakeup_zero,
  891. * based on the actual changes that were performed on the semaphore array.
  892. * Note that the function does not do the actual wake-up: the caller is
  893. * responsible for calling wake_up_q().
  894. * It is safe to perform this call after dropping all locks.
  895. */
  896. static void do_smart_update(struct sem_array *sma, struct sembuf *sops, int nsops,
  897. int otime, struct wake_q_head *wake_q)
  898. {
  899. int i;
  900. otime |= do_smart_wakeup_zero(sma, sops, nsops, wake_q);
  901. if (!list_empty(&sma->pending_alter)) {
  902. /* semaphore array uses the global queue - just process it. */
  903. otime |= update_queue(sma, -1, wake_q);
  904. } else {
  905. if (!sops) {
  906. /*
  907. * No sops, thus the modified semaphores are not
  908. * known. Check all.
  909. */
  910. for (i = 0; i < sma->sem_nsems; i++)
  911. otime |= update_queue(sma, i, wake_q);
  912. } else {
  913. /*
  914. * Check the semaphores that were increased:
  915. * - No complex ops, thus all sleeping ops are
  916. * decrease.
  917. * - if we decreased the value, then any sleeping
  918. * semaphore ops wont be able to run: If the
  919. * previous value was too small, then the new
  920. * value will be too small, too.
  921. */
  922. for (i = 0; i < nsops; i++) {
  923. if (sops[i].sem_op > 0) {
  924. otime |= update_queue(sma,
  925. sops[i].sem_num, wake_q);
  926. }
  927. }
  928. }
  929. }
  930. if (otime)
  931. set_semotime(sma, sops);
  932. }
  933. /*
  934. * check_qop: Test if a queued operation sleeps on the semaphore semnum
  935. */
  936. static int check_qop(struct sem_array *sma, int semnum, struct sem_queue *q,
  937. bool count_zero)
  938. {
  939. struct sembuf *sop = q->blocking;
  940. /*
  941. * Linux always (since 0.99.10) reported a task as sleeping on all
  942. * semaphores. This violates SUS, therefore it was changed to the
  943. * standard compliant behavior.
  944. * Give the administrators a chance to notice that an application
  945. * might misbehave because it relies on the Linux behavior.
  946. */
  947. pr_info_once("semctl(GETNCNT/GETZCNT) is since 3.16 Single Unix Specification compliant.\n"
  948. "The task %s (%d) triggered the difference, watch for misbehavior.\n",
  949. current->comm, task_pid_nr(current));
  950. if (sop->sem_num != semnum)
  951. return 0;
  952. if (count_zero && sop->sem_op == 0)
  953. return 1;
  954. if (!count_zero && sop->sem_op < 0)
  955. return 1;
  956. return 0;
  957. }
  958. /* The following counts are associated to each semaphore:
  959. * semncnt number of tasks waiting on semval being nonzero
  960. * semzcnt number of tasks waiting on semval being zero
  961. *
  962. * Per definition, a task waits only on the semaphore of the first semop
  963. * that cannot proceed, even if additional operation would block, too.
  964. */
  965. static int count_semcnt(struct sem_array *sma, ushort semnum,
  966. bool count_zero)
  967. {
  968. struct list_head *l;
  969. struct sem_queue *q;
  970. int semcnt;
  971. semcnt = 0;
  972. /* First: check the simple operations. They are easy to evaluate */
  973. if (count_zero)
  974. l = &sma->sems[semnum].pending_const;
  975. else
  976. l = &sma->sems[semnum].pending_alter;
  977. list_for_each_entry(q, l, list) {
  978. /* all task on a per-semaphore list sleep on exactly
  979. * that semaphore
  980. */
  981. semcnt++;
  982. }
  983. /* Then: check the complex operations. */
  984. list_for_each_entry(q, &sma->pending_alter, list) {
  985. semcnt += check_qop(sma, semnum, q, count_zero);
  986. }
  987. if (count_zero) {
  988. list_for_each_entry(q, &sma->pending_const, list) {
  989. semcnt += check_qop(sma, semnum, q, count_zero);
  990. }
  991. }
  992. return semcnt;
  993. }
  994. /* Free a semaphore set. freeary() is called with sem_ids.rwsem locked
  995. * as a writer and the spinlock for this semaphore set hold. sem_ids.rwsem
  996. * remains locked on exit.
  997. */
  998. static void freeary(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
  999. {
  1000. struct sem_undo *un, *tu;
  1001. struct sem_queue *q, *tq;
  1002. struct sem_array *sma = container_of(ipcp, struct sem_array, sem_perm);
  1003. int i;
  1004. DEFINE_WAKE_Q(wake_q);
  1005. /* Free the existing undo structures for this semaphore set. */
  1006. ipc_assert_locked_object(&sma->sem_perm);
  1007. list_for_each_entry_safe(un, tu, &sma->list_id, list_id) {
  1008. list_del(&un->list_id);
  1009. spin_lock(&un->ulp->lock);
  1010. un->semid = -1;
  1011. list_del_rcu(&un->list_proc);
  1012. spin_unlock(&un->ulp->lock);
  1013. kfree_rcu(un, rcu);
  1014. }
  1015. /* Wake up all pending processes and let them fail with EIDRM. */
  1016. list_for_each_entry_safe(q, tq, &sma->pending_const, list) {
  1017. unlink_queue(sma, q);
  1018. wake_up_sem_queue_prepare(q, -EIDRM, &wake_q);
  1019. }
  1020. list_for_each_entry_safe(q, tq, &sma->pending_alter, list) {
  1021. unlink_queue(sma, q);
  1022. wake_up_sem_queue_prepare(q, -EIDRM, &wake_q);
  1023. }
  1024. for (i = 0; i < sma->sem_nsems; i++) {
  1025. struct sem *sem = &sma->sems[i];
  1026. list_for_each_entry_safe(q, tq, &sem->pending_const, list) {
  1027. unlink_queue(sma, q);
  1028. wake_up_sem_queue_prepare(q, -EIDRM, &wake_q);
  1029. }
  1030. list_for_each_entry_safe(q, tq, &sem->pending_alter, list) {
  1031. unlink_queue(sma, q);
  1032. wake_up_sem_queue_prepare(q, -EIDRM, &wake_q);
  1033. }
  1034. ipc_update_pid(&sem->sempid, NULL);
  1035. }
  1036. /* Remove the semaphore set from the IDR */
  1037. sem_rmid(ns, sma);
  1038. sem_unlock(sma, -1);
  1039. rcu_read_unlock();
  1040. wake_up_q(&wake_q);
  1041. ns->used_sems -= sma->sem_nsems;
  1042. ipc_rcu_putref(&sma->sem_perm, sem_rcu_free);
  1043. }
  1044. static unsigned long copy_semid_to_user(void __user *buf, struct semid64_ds *in, int version)
  1045. {
  1046. switch (version) {
  1047. case IPC_64:
  1048. return copy_to_user(buf, in, sizeof(*in));
  1049. case IPC_OLD:
  1050. {
  1051. struct semid_ds out;
  1052. memset(&out, 0, sizeof(out));
  1053. ipc64_perm_to_ipc_perm(&in->sem_perm, &out.sem_perm);
  1054. out.sem_otime = in->sem_otime;
  1055. out.sem_ctime = in->sem_ctime;
  1056. out.sem_nsems = in->sem_nsems;
  1057. return copy_to_user(buf, &out, sizeof(out));
  1058. }
  1059. default:
  1060. return -EINVAL;
  1061. }
  1062. }
  1063. static time64_t get_semotime(struct sem_array *sma)
  1064. {
  1065. int i;
  1066. time64_t res;
  1067. res = sma->sems[0].sem_otime;
  1068. for (i = 1; i < sma->sem_nsems; i++) {
  1069. time64_t to = sma->sems[i].sem_otime;
  1070. if (to > res)
  1071. res = to;
  1072. }
  1073. return res;
  1074. }
  1075. static int semctl_stat(struct ipc_namespace *ns, int semid,
  1076. int cmd, struct semid64_ds *semid64)
  1077. {
  1078. struct sem_array *sma;
  1079. time64_t semotime;
  1080. int err;
  1081. memset(semid64, 0, sizeof(*semid64));
  1082. rcu_read_lock();
  1083. if (cmd == SEM_STAT || cmd == SEM_STAT_ANY) {
  1084. sma = sem_obtain_object(ns, semid);
  1085. if (IS_ERR(sma)) {
  1086. err = PTR_ERR(sma);
  1087. goto out_unlock;
  1088. }
  1089. } else { /* IPC_STAT */
  1090. sma = sem_obtain_object_check(ns, semid);
  1091. if (IS_ERR(sma)) {
  1092. err = PTR_ERR(sma);
  1093. goto out_unlock;
  1094. }
  1095. }
  1096. /* see comment for SHM_STAT_ANY */
  1097. if (cmd == SEM_STAT_ANY)
  1098. audit_ipc_obj(&sma->sem_perm);
  1099. else {
  1100. err = -EACCES;
  1101. if (ipcperms(ns, &sma->sem_perm, S_IRUGO))
  1102. goto out_unlock;
  1103. }
  1104. err = security_sem_semctl(&sma->sem_perm, cmd);
  1105. if (err)
  1106. goto out_unlock;
  1107. ipc_lock_object(&sma->sem_perm);
  1108. if (!ipc_valid_object(&sma->sem_perm)) {
  1109. ipc_unlock_object(&sma->sem_perm);
  1110. err = -EIDRM;
  1111. goto out_unlock;
  1112. }
  1113. kernel_to_ipc64_perm(&sma->sem_perm, &semid64->sem_perm);
  1114. semotime = get_semotime(sma);
  1115. semid64->sem_otime = semotime;
  1116. semid64->sem_ctime = sma->sem_ctime;
  1117. #ifndef CONFIG_64BIT
  1118. semid64->sem_otime_high = semotime >> 32;
  1119. semid64->sem_ctime_high = sma->sem_ctime >> 32;
  1120. #endif
  1121. semid64->sem_nsems = sma->sem_nsems;
  1122. if (cmd == IPC_STAT) {
  1123. /*
  1124. * As defined in SUS:
  1125. * Return 0 on success
  1126. */
  1127. err = 0;
  1128. } else {
  1129. /*
  1130. * SEM_STAT and SEM_STAT_ANY (both Linux specific)
  1131. * Return the full id, including the sequence number
  1132. */
  1133. err = sma->sem_perm.id;
  1134. }
  1135. ipc_unlock_object(&sma->sem_perm);
  1136. out_unlock:
  1137. rcu_read_unlock();
  1138. return err;
  1139. }
  1140. static int semctl_info(struct ipc_namespace *ns, int semid,
  1141. int cmd, void __user *p)
  1142. {
  1143. struct seminfo seminfo;
  1144. int max_idx;
  1145. int err;
  1146. err = security_sem_semctl(NULL, cmd);
  1147. if (err)
  1148. return err;
  1149. memset(&seminfo, 0, sizeof(seminfo));
  1150. seminfo.semmni = ns->sc_semmni;
  1151. seminfo.semmns = ns->sc_semmns;
  1152. seminfo.semmsl = ns->sc_semmsl;
  1153. seminfo.semopm = ns->sc_semopm;
  1154. seminfo.semvmx = SEMVMX;
  1155. seminfo.semmnu = SEMMNU;
  1156. seminfo.semmap = SEMMAP;
  1157. seminfo.semume = SEMUME;
  1158. down_read(&sem_ids(ns).rwsem);
  1159. if (cmd == SEM_INFO) {
  1160. seminfo.semusz = sem_ids(ns).in_use;
  1161. seminfo.semaem = ns->used_sems;
  1162. } else {
  1163. seminfo.semusz = SEMUSZ;
  1164. seminfo.semaem = SEMAEM;
  1165. }
  1166. max_idx = ipc_get_maxidx(&sem_ids(ns));
  1167. up_read(&sem_ids(ns).rwsem);
  1168. if (copy_to_user(p, &seminfo, sizeof(struct seminfo)))
  1169. return -EFAULT;
  1170. return (max_idx < 0) ? 0 : max_idx;
  1171. }
  1172. static int semctl_setval(struct ipc_namespace *ns, int semid, int semnum,
  1173. int val)
  1174. {
  1175. struct sem_undo *un;
  1176. struct sem_array *sma;
  1177. struct sem *curr;
  1178. int err;
  1179. DEFINE_WAKE_Q(wake_q);
  1180. if (val > SEMVMX || val < 0)
  1181. return -ERANGE;
  1182. rcu_read_lock();
  1183. sma = sem_obtain_object_check(ns, semid);
  1184. if (IS_ERR(sma)) {
  1185. rcu_read_unlock();
  1186. return PTR_ERR(sma);
  1187. }
  1188. if (semnum < 0 || semnum >= sma->sem_nsems) {
  1189. rcu_read_unlock();
  1190. return -EINVAL;
  1191. }
  1192. if (ipcperms(ns, &sma->sem_perm, S_IWUGO)) {
  1193. rcu_read_unlock();
  1194. return -EACCES;
  1195. }
  1196. err = security_sem_semctl(&sma->sem_perm, SETVAL);
  1197. if (err) {
  1198. rcu_read_unlock();
  1199. return -EACCES;
  1200. }
  1201. sem_lock(sma, NULL, -1);
  1202. if (!ipc_valid_object(&sma->sem_perm)) {
  1203. sem_unlock(sma, -1);
  1204. rcu_read_unlock();
  1205. return -EIDRM;
  1206. }
  1207. semnum = array_index_nospec(semnum, sma->sem_nsems);
  1208. curr = &sma->sems[semnum];
  1209. ipc_assert_locked_object(&sma->sem_perm);
  1210. list_for_each_entry(un, &sma->list_id, list_id)
  1211. un->semadj[semnum] = 0;
  1212. curr->semval = val;
  1213. ipc_update_pid(&curr->sempid, task_tgid(current));
  1214. sma->sem_ctime = ktime_get_real_seconds();
  1215. /* maybe some queued-up processes were waiting for this */
  1216. do_smart_update(sma, NULL, 0, 0, &wake_q);
  1217. sem_unlock(sma, -1);
  1218. rcu_read_unlock();
  1219. wake_up_q(&wake_q);
  1220. return 0;
  1221. }
  1222. static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
  1223. int cmd, void __user *p)
  1224. {
  1225. struct sem_array *sma;
  1226. struct sem *curr;
  1227. int err, nsems;
  1228. ushort fast_sem_io[SEMMSL_FAST];
  1229. ushort *sem_io = fast_sem_io;
  1230. DEFINE_WAKE_Q(wake_q);
  1231. rcu_read_lock();
  1232. sma = sem_obtain_object_check(ns, semid);
  1233. if (IS_ERR(sma)) {
  1234. rcu_read_unlock();
  1235. return PTR_ERR(sma);
  1236. }
  1237. nsems = sma->sem_nsems;
  1238. err = -EACCES;
  1239. if (ipcperms(ns, &sma->sem_perm, cmd == SETALL ? S_IWUGO : S_IRUGO))
  1240. goto out_rcu_wakeup;
  1241. err = security_sem_semctl(&sma->sem_perm, cmd);
  1242. if (err)
  1243. goto out_rcu_wakeup;
  1244. err = -EACCES;
  1245. switch (cmd) {
  1246. case GETALL:
  1247. {
  1248. ushort __user *array = p;
  1249. int i;
  1250. sem_lock(sma, NULL, -1);
  1251. if (!ipc_valid_object(&sma->sem_perm)) {
  1252. err = -EIDRM;
  1253. goto out_unlock;
  1254. }
  1255. if (nsems > SEMMSL_FAST) {
  1256. if (!ipc_rcu_getref(&sma->sem_perm)) {
  1257. err = -EIDRM;
  1258. goto out_unlock;
  1259. }
  1260. sem_unlock(sma, -1);
  1261. rcu_read_unlock();
  1262. sem_io = kvmalloc_array(nsems, sizeof(ushort),
  1263. GFP_KERNEL);
  1264. if (sem_io == NULL) {
  1265. ipc_rcu_putref(&sma->sem_perm, sem_rcu_free);
  1266. return -ENOMEM;
  1267. }
  1268. rcu_read_lock();
  1269. sem_lock_and_putref(sma);
  1270. if (!ipc_valid_object(&sma->sem_perm)) {
  1271. err = -EIDRM;
  1272. goto out_unlock;
  1273. }
  1274. }
  1275. for (i = 0; i < sma->sem_nsems; i++)
  1276. sem_io[i] = sma->sems[i].semval;
  1277. sem_unlock(sma, -1);
  1278. rcu_read_unlock();
  1279. err = 0;
  1280. if (copy_to_user(array, sem_io, nsems*sizeof(ushort)))
  1281. err = -EFAULT;
  1282. goto out_free;
  1283. }
  1284. case SETALL:
  1285. {
  1286. int i;
  1287. struct sem_undo *un;
  1288. if (!ipc_rcu_getref(&sma->sem_perm)) {
  1289. err = -EIDRM;
  1290. goto out_rcu_wakeup;
  1291. }
  1292. rcu_read_unlock();
  1293. if (nsems > SEMMSL_FAST) {
  1294. sem_io = kvmalloc_array(nsems, sizeof(ushort),
  1295. GFP_KERNEL);
  1296. if (sem_io == NULL) {
  1297. ipc_rcu_putref(&sma->sem_perm, sem_rcu_free);
  1298. return -ENOMEM;
  1299. }
  1300. }
  1301. if (copy_from_user(sem_io, p, nsems*sizeof(ushort))) {
  1302. ipc_rcu_putref(&sma->sem_perm, sem_rcu_free);
  1303. err = -EFAULT;
  1304. goto out_free;
  1305. }
  1306. for (i = 0; i < nsems; i++) {
  1307. if (sem_io[i] > SEMVMX) {
  1308. ipc_rcu_putref(&sma->sem_perm, sem_rcu_free);
  1309. err = -ERANGE;
  1310. goto out_free;
  1311. }
  1312. }
  1313. rcu_read_lock();
  1314. sem_lock_and_putref(sma);
  1315. if (!ipc_valid_object(&sma->sem_perm)) {
  1316. err = -EIDRM;
  1317. goto out_unlock;
  1318. }
  1319. for (i = 0; i < nsems; i++) {
  1320. sma->sems[i].semval = sem_io[i];
  1321. ipc_update_pid(&sma->sems[i].sempid, task_tgid(current));
  1322. }
  1323. ipc_assert_locked_object(&sma->sem_perm);
  1324. list_for_each_entry(un, &sma->list_id, list_id) {
  1325. for (i = 0; i < nsems; i++)
  1326. un->semadj[i] = 0;
  1327. }
  1328. sma->sem_ctime = ktime_get_real_seconds();
  1329. /* maybe some queued-up processes were waiting for this */
  1330. do_smart_update(sma, NULL, 0, 0, &wake_q);
  1331. err = 0;
  1332. goto out_unlock;
  1333. }
  1334. /* GETVAL, GETPID, GETNCTN, GETZCNT: fall-through */
  1335. }
  1336. err = -EINVAL;
  1337. if (semnum < 0 || semnum >= nsems)
  1338. goto out_rcu_wakeup;
  1339. sem_lock(sma, NULL, -1);
  1340. if (!ipc_valid_object(&sma->sem_perm)) {
  1341. err = -EIDRM;
  1342. goto out_unlock;
  1343. }
  1344. semnum = array_index_nospec(semnum, nsems);
  1345. curr = &sma->sems[semnum];
  1346. switch (cmd) {
  1347. case GETVAL:
  1348. err = curr->semval;
  1349. goto out_unlock;
  1350. case GETPID:
  1351. err = pid_vnr(curr->sempid);
  1352. goto out_unlock;
  1353. case GETNCNT:
  1354. err = count_semcnt(sma, semnum, 0);
  1355. goto out_unlock;
  1356. case GETZCNT:
  1357. err = count_semcnt(sma, semnum, 1);
  1358. goto out_unlock;
  1359. }
  1360. out_unlock:
  1361. sem_unlock(sma, -1);
  1362. out_rcu_wakeup:
  1363. rcu_read_unlock();
  1364. wake_up_q(&wake_q);
  1365. out_free:
  1366. if (sem_io != fast_sem_io)
  1367. kvfree(sem_io);
  1368. return err;
  1369. }
  1370. static inline unsigned long
  1371. copy_semid_from_user(struct semid64_ds *out, void __user *buf, int version)
  1372. {
  1373. switch (version) {
  1374. case IPC_64:
  1375. if (copy_from_user(out, buf, sizeof(*out)))
  1376. return -EFAULT;
  1377. return 0;
  1378. case IPC_OLD:
  1379. {
  1380. struct semid_ds tbuf_old;
  1381. if (copy_from_user(&tbuf_old, buf, sizeof(tbuf_old)))
  1382. return -EFAULT;
  1383. out->sem_perm.uid = tbuf_old.sem_perm.uid;
  1384. out->sem_perm.gid = tbuf_old.sem_perm.gid;
  1385. out->sem_perm.mode = tbuf_old.sem_perm.mode;
  1386. return 0;
  1387. }
  1388. default:
  1389. return -EINVAL;
  1390. }
  1391. }
  1392. /*
  1393. * This function handles some semctl commands which require the rwsem
  1394. * to be held in write mode.
  1395. * NOTE: no locks must be held, the rwsem is taken inside this function.
  1396. */
  1397. static int semctl_down(struct ipc_namespace *ns, int semid,
  1398. int cmd, struct semid64_ds *semid64)
  1399. {
  1400. struct sem_array *sma;
  1401. int err;
  1402. struct kern_ipc_perm *ipcp;
  1403. down_write(&sem_ids(ns).rwsem);
  1404. rcu_read_lock();
  1405. ipcp = ipcctl_obtain_check(ns, &sem_ids(ns), semid, cmd,
  1406. &semid64->sem_perm, 0);
  1407. if (IS_ERR(ipcp)) {
  1408. err = PTR_ERR(ipcp);
  1409. goto out_unlock1;
  1410. }
  1411. sma = container_of(ipcp, struct sem_array, sem_perm);
  1412. err = security_sem_semctl(&sma->sem_perm, cmd);
  1413. if (err)
  1414. goto out_unlock1;
  1415. switch (cmd) {
  1416. case IPC_RMID:
  1417. sem_lock(sma, NULL, -1);
  1418. /* freeary unlocks the ipc object and rcu */
  1419. freeary(ns, ipcp);
  1420. goto out_up;
  1421. case IPC_SET:
  1422. sem_lock(sma, NULL, -1);
  1423. err = ipc_update_perm(&semid64->sem_perm, ipcp);
  1424. if (err)
  1425. goto out_unlock0;
  1426. sma->sem_ctime = ktime_get_real_seconds();
  1427. break;
  1428. default:
  1429. err = -EINVAL;
  1430. goto out_unlock1;
  1431. }
  1432. out_unlock0:
  1433. sem_unlock(sma, -1);
  1434. out_unlock1:
  1435. rcu_read_unlock();
  1436. out_up:
  1437. up_write(&sem_ids(ns).rwsem);
  1438. return err;
  1439. }
  1440. static long ksys_semctl(int semid, int semnum, int cmd, unsigned long arg, int version)
  1441. {
  1442. struct ipc_namespace *ns;
  1443. void __user *p = (void __user *)arg;
  1444. struct semid64_ds semid64;
  1445. int err;
  1446. if (semid < 0)
  1447. return -EINVAL;
  1448. ns = current->nsproxy->ipc_ns;
  1449. switch (cmd) {
  1450. case IPC_INFO:
  1451. case SEM_INFO:
  1452. return semctl_info(ns, semid, cmd, p);
  1453. case IPC_STAT:
  1454. case SEM_STAT:
  1455. case SEM_STAT_ANY:
  1456. err = semctl_stat(ns, semid, cmd, &semid64);
  1457. if (err < 0)
  1458. return err;
  1459. if (copy_semid_to_user(p, &semid64, version))
  1460. err = -EFAULT;
  1461. return err;
  1462. case GETALL:
  1463. case GETVAL:
  1464. case GETPID:
  1465. case GETNCNT:
  1466. case GETZCNT:
  1467. case SETALL:
  1468. return semctl_main(ns, semid, semnum, cmd, p);
  1469. case SETVAL: {
  1470. int val;
  1471. #if defined(CONFIG_64BIT) && defined(__BIG_ENDIAN)
  1472. /* big-endian 64bit */
  1473. val = arg >> 32;
  1474. #else
  1475. /* 32bit or little-endian 64bit */
  1476. val = arg;
  1477. #endif
  1478. return semctl_setval(ns, semid, semnum, val);
  1479. }
  1480. case IPC_SET:
  1481. if (copy_semid_from_user(&semid64, p, version))
  1482. return -EFAULT;
  1483. fallthrough;
  1484. case IPC_RMID:
  1485. return semctl_down(ns, semid, cmd, &semid64);
  1486. default:
  1487. return -EINVAL;
  1488. }
  1489. }
  1490. SYSCALL_DEFINE4(semctl, int, semid, int, semnum, int, cmd, unsigned long, arg)
  1491. {
  1492. return ksys_semctl(semid, semnum, cmd, arg, IPC_64);
  1493. }
  1494. #ifdef CONFIG_ARCH_WANT_IPC_PARSE_VERSION
  1495. long ksys_old_semctl(int semid, int semnum, int cmd, unsigned long arg)
  1496. {
  1497. int version = ipc_parse_version(&cmd);
  1498. return ksys_semctl(semid, semnum, cmd, arg, version);
  1499. }
  1500. SYSCALL_DEFINE4(old_semctl, int, semid, int, semnum, int, cmd, unsigned long, arg)
  1501. {
  1502. return ksys_old_semctl(semid, semnum, cmd, arg);
  1503. }
  1504. #endif
  1505. #ifdef CONFIG_COMPAT
  1506. struct compat_semid_ds {
  1507. struct compat_ipc_perm sem_perm;
  1508. old_time32_t sem_otime;
  1509. old_time32_t sem_ctime;
  1510. compat_uptr_t sem_base;
  1511. compat_uptr_t sem_pending;
  1512. compat_uptr_t sem_pending_last;
  1513. compat_uptr_t undo;
  1514. unsigned short sem_nsems;
  1515. };
  1516. static int copy_compat_semid_from_user(struct semid64_ds *out, void __user *buf,
  1517. int version)
  1518. {
  1519. memset(out, 0, sizeof(*out));
  1520. if (version == IPC_64) {
  1521. struct compat_semid64_ds __user *p = buf;
  1522. return get_compat_ipc64_perm(&out->sem_perm, &p->sem_perm);
  1523. } else {
  1524. struct compat_semid_ds __user *p = buf;
  1525. return get_compat_ipc_perm(&out->sem_perm, &p->sem_perm);
  1526. }
  1527. }
  1528. static int copy_compat_semid_to_user(void __user *buf, struct semid64_ds *in,
  1529. int version)
  1530. {
  1531. if (version == IPC_64) {
  1532. struct compat_semid64_ds v;
  1533. memset(&v, 0, sizeof(v));
  1534. to_compat_ipc64_perm(&v.sem_perm, &in->sem_perm);
  1535. v.sem_otime = lower_32_bits(in->sem_otime);
  1536. v.sem_otime_high = upper_32_bits(in->sem_otime);
  1537. v.sem_ctime = lower_32_bits(in->sem_ctime);
  1538. v.sem_ctime_high = upper_32_bits(in->sem_ctime);
  1539. v.sem_nsems = in->sem_nsems;
  1540. return copy_to_user(buf, &v, sizeof(v));
  1541. } else {
  1542. struct compat_semid_ds v;
  1543. memset(&v, 0, sizeof(v));
  1544. to_compat_ipc_perm(&v.sem_perm, &in->sem_perm);
  1545. v.sem_otime = in->sem_otime;
  1546. v.sem_ctime = in->sem_ctime;
  1547. v.sem_nsems = in->sem_nsems;
  1548. return copy_to_user(buf, &v, sizeof(v));
  1549. }
  1550. }
  1551. static long compat_ksys_semctl(int semid, int semnum, int cmd, int arg, int version)
  1552. {
  1553. void __user *p = compat_ptr(arg);
  1554. struct ipc_namespace *ns;
  1555. struct semid64_ds semid64;
  1556. int err;
  1557. ns = current->nsproxy->ipc_ns;
  1558. if (semid < 0)
  1559. return -EINVAL;
  1560. switch (cmd & (~IPC_64)) {
  1561. case IPC_INFO:
  1562. case SEM_INFO:
  1563. return semctl_info(ns, semid, cmd, p);
  1564. case IPC_STAT:
  1565. case SEM_STAT:
  1566. case SEM_STAT_ANY:
  1567. err = semctl_stat(ns, semid, cmd, &semid64);
  1568. if (err < 0)
  1569. return err;
  1570. if (copy_compat_semid_to_user(p, &semid64, version))
  1571. err = -EFAULT;
  1572. return err;
  1573. case GETVAL:
  1574. case GETPID:
  1575. case GETNCNT:
  1576. case GETZCNT:
  1577. case GETALL:
  1578. case SETALL:
  1579. return semctl_main(ns, semid, semnum, cmd, p);
  1580. case SETVAL:
  1581. return semctl_setval(ns, semid, semnum, arg);
  1582. case IPC_SET:
  1583. if (copy_compat_semid_from_user(&semid64, p, version))
  1584. return -EFAULT;
  1585. fallthrough;
  1586. case IPC_RMID:
  1587. return semctl_down(ns, semid, cmd, &semid64);
  1588. default:
  1589. return -EINVAL;
  1590. }
  1591. }
  1592. COMPAT_SYSCALL_DEFINE4(semctl, int, semid, int, semnum, int, cmd, int, arg)
  1593. {
  1594. return compat_ksys_semctl(semid, semnum, cmd, arg, IPC_64);
  1595. }
  1596. #ifdef CONFIG_ARCH_WANT_COMPAT_IPC_PARSE_VERSION
  1597. long compat_ksys_old_semctl(int semid, int semnum, int cmd, int arg)
  1598. {
  1599. int version = compat_ipc_parse_version(&cmd);
  1600. return compat_ksys_semctl(semid, semnum, cmd, arg, version);
  1601. }
  1602. COMPAT_SYSCALL_DEFINE4(old_semctl, int, semid, int, semnum, int, cmd, int, arg)
  1603. {
  1604. return compat_ksys_old_semctl(semid, semnum, cmd, arg);
  1605. }
  1606. #endif
  1607. #endif
  1608. /* If the task doesn't already have a undo_list, then allocate one
  1609. * here. We guarantee there is only one thread using this undo list,
  1610. * and current is THE ONE
  1611. *
  1612. * If this allocation and assignment succeeds, but later
  1613. * portions of this code fail, there is no need to free the sem_undo_list.
  1614. * Just let it stay associated with the task, and it'll be freed later
  1615. * at exit time.
  1616. *
  1617. * This can block, so callers must hold no locks.
  1618. */
  1619. static inline int get_undo_list(struct sem_undo_list **undo_listp)
  1620. {
  1621. struct sem_undo_list *undo_list;
  1622. undo_list = current->sysvsem.undo_list;
  1623. if (!undo_list) {
  1624. undo_list = kzalloc(sizeof(*undo_list), GFP_KERNEL);
  1625. if (undo_list == NULL)
  1626. return -ENOMEM;
  1627. spin_lock_init(&undo_list->lock);
  1628. refcount_set(&undo_list->refcnt, 1);
  1629. INIT_LIST_HEAD(&undo_list->list_proc);
  1630. current->sysvsem.undo_list = undo_list;
  1631. }
  1632. *undo_listp = undo_list;
  1633. return 0;
  1634. }
  1635. static struct sem_undo *__lookup_undo(struct sem_undo_list *ulp, int semid)
  1636. {
  1637. struct sem_undo *un;
  1638. list_for_each_entry_rcu(un, &ulp->list_proc, list_proc,
  1639. spin_is_locked(&ulp->lock)) {
  1640. if (un->semid == semid)
  1641. return un;
  1642. }
  1643. return NULL;
  1644. }
  1645. static struct sem_undo *lookup_undo(struct sem_undo_list *ulp, int semid)
  1646. {
  1647. struct sem_undo *un;
  1648. assert_spin_locked(&ulp->lock);
  1649. un = __lookup_undo(ulp, semid);
  1650. if (un) {
  1651. list_del_rcu(&un->list_proc);
  1652. list_add_rcu(&un->list_proc, &ulp->list_proc);
  1653. }
  1654. return un;
  1655. }
  1656. /**
  1657. * find_alloc_undo - lookup (and if not present create) undo array
  1658. * @ns: namespace
  1659. * @semid: semaphore array id
  1660. *
  1661. * The function looks up (and if not present creates) the undo structure.
  1662. * The size of the undo structure depends on the size of the semaphore
  1663. * array, thus the alloc path is not that straightforward.
  1664. * Lifetime-rules: sem_undo is rcu-protected, on success, the function
  1665. * performs a rcu_read_lock().
  1666. */
  1667. static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid)
  1668. {
  1669. struct sem_array *sma;
  1670. struct sem_undo_list *ulp;
  1671. struct sem_undo *un, *new;
  1672. int nsems, error;
  1673. error = get_undo_list(&ulp);
  1674. if (error)
  1675. return ERR_PTR(error);
  1676. rcu_read_lock();
  1677. spin_lock(&ulp->lock);
  1678. un = lookup_undo(ulp, semid);
  1679. spin_unlock(&ulp->lock);
  1680. if (likely(un != NULL))
  1681. goto out;
  1682. /* no undo structure around - allocate one. */
  1683. /* step 1: figure out the size of the semaphore array */
  1684. sma = sem_obtain_object_check(ns, semid);
  1685. if (IS_ERR(sma)) {
  1686. rcu_read_unlock();
  1687. return ERR_CAST(sma);
  1688. }
  1689. nsems = sma->sem_nsems;
  1690. if (!ipc_rcu_getref(&sma->sem_perm)) {
  1691. rcu_read_unlock();
  1692. un = ERR_PTR(-EIDRM);
  1693. goto out;
  1694. }
  1695. rcu_read_unlock();
  1696. /* step 2: allocate new undo structure */
  1697. new = kzalloc(sizeof(struct sem_undo) + sizeof(short)*nsems, GFP_KERNEL);
  1698. if (!new) {
  1699. ipc_rcu_putref(&sma->sem_perm, sem_rcu_free);
  1700. return ERR_PTR(-ENOMEM);
  1701. }
  1702. /* step 3: Acquire the lock on semaphore array */
  1703. rcu_read_lock();
  1704. sem_lock_and_putref(sma);
  1705. if (!ipc_valid_object(&sma->sem_perm)) {
  1706. sem_unlock(sma, -1);
  1707. rcu_read_unlock();
  1708. kfree(new);
  1709. un = ERR_PTR(-EIDRM);
  1710. goto out;
  1711. }
  1712. spin_lock(&ulp->lock);
  1713. /*
  1714. * step 4: check for races: did someone else allocate the undo struct?
  1715. */
  1716. un = lookup_undo(ulp, semid);
  1717. if (un) {
  1718. kfree(new);
  1719. goto success;
  1720. }
  1721. /* step 5: initialize & link new undo structure */
  1722. new->semadj = (short *) &new[1];
  1723. new->ulp = ulp;
  1724. new->semid = semid;
  1725. assert_spin_locked(&ulp->lock);
  1726. list_add_rcu(&new->list_proc, &ulp->list_proc);
  1727. ipc_assert_locked_object(&sma->sem_perm);
  1728. list_add(&new->list_id, &sma->list_id);
  1729. un = new;
  1730. success:
  1731. spin_unlock(&ulp->lock);
  1732. sem_unlock(sma, -1);
  1733. out:
  1734. return un;
  1735. }
  1736. static long do_semtimedop(int semid, struct sembuf __user *tsops,
  1737. unsigned nsops, const struct timespec64 *timeout)
  1738. {
  1739. int error = -EINVAL;
  1740. struct sem_array *sma;
  1741. struct sembuf fast_sops[SEMOPM_FAST];
  1742. struct sembuf *sops = fast_sops, *sop;
  1743. struct sem_undo *un;
  1744. int max, locknum;
  1745. bool undos = false, alter = false, dupsop = false;
  1746. struct sem_queue queue;
  1747. unsigned long dup = 0, jiffies_left = 0;
  1748. struct ipc_namespace *ns;
  1749. ns = current->nsproxy->ipc_ns;
  1750. if (nsops < 1 || semid < 0)
  1751. return -EINVAL;
  1752. if (nsops > ns->sc_semopm)
  1753. return -E2BIG;
  1754. if (nsops > SEMOPM_FAST) {
  1755. sops = kvmalloc_array(nsops, sizeof(*sops), GFP_KERNEL);
  1756. if (sops == NULL)
  1757. return -ENOMEM;
  1758. }
  1759. if (copy_from_user(sops, tsops, nsops * sizeof(*tsops))) {
  1760. error = -EFAULT;
  1761. goto out_free;
  1762. }
  1763. if (timeout) {
  1764. if (timeout->tv_sec < 0 || timeout->tv_nsec < 0 ||
  1765. timeout->tv_nsec >= 1000000000L) {
  1766. error = -EINVAL;
  1767. goto out_free;
  1768. }
  1769. jiffies_left = timespec64_to_jiffies(timeout);
  1770. }
  1771. max = 0;
  1772. for (sop = sops; sop < sops + nsops; sop++) {
  1773. unsigned long mask = 1ULL << ((sop->sem_num) % BITS_PER_LONG);
  1774. if (sop->sem_num >= max)
  1775. max = sop->sem_num;
  1776. if (sop->sem_flg & SEM_UNDO)
  1777. undos = true;
  1778. if (dup & mask) {
  1779. /*
  1780. * There was a previous alter access that appears
  1781. * to have accessed the same semaphore, thus use
  1782. * the dupsop logic. "appears", because the detection
  1783. * can only check % BITS_PER_LONG.
  1784. */
  1785. dupsop = true;
  1786. }
  1787. if (sop->sem_op != 0) {
  1788. alter = true;
  1789. dup |= mask;
  1790. }
  1791. }
  1792. if (undos) {
  1793. /* On success, find_alloc_undo takes the rcu_read_lock */
  1794. un = find_alloc_undo(ns, semid);
  1795. if (IS_ERR(un)) {
  1796. error = PTR_ERR(un);
  1797. goto out_free;
  1798. }
  1799. } else {
  1800. un = NULL;
  1801. rcu_read_lock();
  1802. }
  1803. sma = sem_obtain_object_check(ns, semid);
  1804. if (IS_ERR(sma)) {
  1805. rcu_read_unlock();
  1806. error = PTR_ERR(sma);
  1807. goto out_free;
  1808. }
  1809. error = -EFBIG;
  1810. if (max >= sma->sem_nsems) {
  1811. rcu_read_unlock();
  1812. goto out_free;
  1813. }
  1814. error = -EACCES;
  1815. if (ipcperms(ns, &sma->sem_perm, alter ? S_IWUGO : S_IRUGO)) {
  1816. rcu_read_unlock();
  1817. goto out_free;
  1818. }
  1819. error = security_sem_semop(&sma->sem_perm, sops, nsops, alter);
  1820. if (error) {
  1821. rcu_read_unlock();
  1822. goto out_free;
  1823. }
  1824. error = -EIDRM;
  1825. locknum = sem_lock(sma, sops, nsops);
  1826. /*
  1827. * We eventually might perform the following check in a lockless
  1828. * fashion, considering ipc_valid_object() locking constraints.
  1829. * If nsops == 1 and there is no contention for sem_perm.lock, then
  1830. * only a per-semaphore lock is held and it's OK to proceed with the
  1831. * check below. More details on the fine grained locking scheme
  1832. * entangled here and why it's RMID race safe on comments at sem_lock()
  1833. */
  1834. if (!ipc_valid_object(&sma->sem_perm))
  1835. goto out_unlock_free;
  1836. /*
  1837. * semid identifiers are not unique - find_alloc_undo may have
  1838. * allocated an undo structure, it was invalidated by an RMID
  1839. * and now a new array with received the same id. Check and fail.
  1840. * This case can be detected checking un->semid. The existence of
  1841. * "un" itself is guaranteed by rcu.
  1842. */
  1843. if (un && un->semid == -1)
  1844. goto out_unlock_free;
  1845. queue.sops = sops;
  1846. queue.nsops = nsops;
  1847. queue.undo = un;
  1848. queue.pid = task_tgid(current);
  1849. queue.alter = alter;
  1850. queue.dupsop = dupsop;
  1851. error = perform_atomic_semop(sma, &queue);
  1852. if (error == 0) { /* non-blocking succesfull path */
  1853. DEFINE_WAKE_Q(wake_q);
  1854. /*
  1855. * If the operation was successful, then do
  1856. * the required updates.
  1857. */
  1858. if (alter)
  1859. do_smart_update(sma, sops, nsops, 1, &wake_q);
  1860. else
  1861. set_semotime(sma, sops);
  1862. sem_unlock(sma, locknum);
  1863. rcu_read_unlock();
  1864. wake_up_q(&wake_q);
  1865. goto out_free;
  1866. }
  1867. if (error < 0) /* non-blocking error path */
  1868. goto out_unlock_free;
  1869. /*
  1870. * We need to sleep on this operation, so we put the current
  1871. * task into the pending queue and go to sleep.
  1872. */
  1873. if (nsops == 1) {
  1874. struct sem *curr;
  1875. int idx = array_index_nospec(sops->sem_num, sma->sem_nsems);
  1876. curr = &sma->sems[idx];
  1877. if (alter) {
  1878. if (sma->complex_count) {
  1879. list_add_tail(&queue.list,
  1880. &sma->pending_alter);
  1881. } else {
  1882. list_add_tail(&queue.list,
  1883. &curr->pending_alter);
  1884. }
  1885. } else {
  1886. list_add_tail(&queue.list, &curr->pending_const);
  1887. }
  1888. } else {
  1889. if (!sma->complex_count)
  1890. merge_queues(sma);
  1891. if (alter)
  1892. list_add_tail(&queue.list, &sma->pending_alter);
  1893. else
  1894. list_add_tail(&queue.list, &sma->pending_const);
  1895. sma->complex_count++;
  1896. }
  1897. do {
  1898. /* memory ordering ensured by the lock in sem_lock() */
  1899. WRITE_ONCE(queue.status, -EINTR);
  1900. queue.sleeper = current;
  1901. /* memory ordering is ensured by the lock in sem_lock() */
  1902. __set_current_state(TASK_INTERRUPTIBLE);
  1903. sem_unlock(sma, locknum);
  1904. rcu_read_unlock();
  1905. if (timeout)
  1906. jiffies_left = schedule_timeout(jiffies_left);
  1907. else
  1908. schedule();
  1909. /*
  1910. * fastpath: the semop has completed, either successfully or
  1911. * not, from the syscall pov, is quite irrelevant to us at this
  1912. * point; we're done.
  1913. *
  1914. * We _do_ care, nonetheless, about being awoken by a signal or
  1915. * spuriously. The queue.status is checked again in the
  1916. * slowpath (aka after taking sem_lock), such that we can detect
  1917. * scenarios where we were awakened externally, during the
  1918. * window between wake_q_add() and wake_up_q().
  1919. */
  1920. error = READ_ONCE(queue.status);
  1921. if (error != -EINTR) {
  1922. /* see SEM_BARRIER_2 for purpose/pairing */
  1923. smp_acquire__after_ctrl_dep();
  1924. goto out_free;
  1925. }
  1926. rcu_read_lock();
  1927. locknum = sem_lock(sma, sops, nsops);
  1928. if (!ipc_valid_object(&sma->sem_perm))
  1929. goto out_unlock_free;
  1930. /*
  1931. * No necessity for any barrier: We are protect by sem_lock()
  1932. */
  1933. error = READ_ONCE(queue.status);
  1934. /*
  1935. * If queue.status != -EINTR we are woken up by another process.
  1936. * Leave without unlink_queue(), but with sem_unlock().
  1937. */
  1938. if (error != -EINTR)
  1939. goto out_unlock_free;
  1940. /*
  1941. * If an interrupt occurred we have to clean up the queue.
  1942. */
  1943. if (timeout && jiffies_left == 0)
  1944. error = -EAGAIN;
  1945. } while (error == -EINTR && !signal_pending(current)); /* spurious */
  1946. unlink_queue(sma, &queue);
  1947. out_unlock_free:
  1948. sem_unlock(sma, locknum);
  1949. rcu_read_unlock();
  1950. out_free:
  1951. if (sops != fast_sops)
  1952. kvfree(sops);
  1953. return error;
  1954. }
  1955. long ksys_semtimedop(int semid, struct sembuf __user *tsops,
  1956. unsigned int nsops, const struct __kernel_timespec __user *timeout)
  1957. {
  1958. if (timeout) {
  1959. struct timespec64 ts;
  1960. if (get_timespec64(&ts, timeout))
  1961. return -EFAULT;
  1962. return do_semtimedop(semid, tsops, nsops, &ts);
  1963. }
  1964. return do_semtimedop(semid, tsops, nsops, NULL);
  1965. }
  1966. SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
  1967. unsigned int, nsops, const struct __kernel_timespec __user *, timeout)
  1968. {
  1969. return ksys_semtimedop(semid, tsops, nsops, timeout);
  1970. }
  1971. #ifdef CONFIG_COMPAT_32BIT_TIME
  1972. long compat_ksys_semtimedop(int semid, struct sembuf __user *tsems,
  1973. unsigned int nsops,
  1974. const struct old_timespec32 __user *timeout)
  1975. {
  1976. if (timeout) {
  1977. struct timespec64 ts;
  1978. if (get_old_timespec32(&ts, timeout))
  1979. return -EFAULT;
  1980. return do_semtimedop(semid, tsems, nsops, &ts);
  1981. }
  1982. return do_semtimedop(semid, tsems, nsops, NULL);
  1983. }
  1984. SYSCALL_DEFINE4(semtimedop_time32, int, semid, struct sembuf __user *, tsems,
  1985. unsigned int, nsops,
  1986. const struct old_timespec32 __user *, timeout)
  1987. {
  1988. return compat_ksys_semtimedop(semid, tsems, nsops, timeout);
  1989. }
  1990. #endif
  1991. SYSCALL_DEFINE3(semop, int, semid, struct sembuf __user *, tsops,
  1992. unsigned, nsops)
  1993. {
  1994. return do_semtimedop(semid, tsops, nsops, NULL);
  1995. }
  1996. /* If CLONE_SYSVSEM is set, establish sharing of SEM_UNDO state between
  1997. * parent and child tasks.
  1998. */
  1999. int copy_semundo(unsigned long clone_flags, struct task_struct *tsk)
  2000. {
  2001. struct sem_undo_list *undo_list;
  2002. int error;
  2003. if (clone_flags & CLONE_SYSVSEM) {
  2004. error = get_undo_list(&undo_list);
  2005. if (error)
  2006. return error;
  2007. refcount_inc(&undo_list->refcnt);
  2008. tsk->sysvsem.undo_list = undo_list;
  2009. } else
  2010. tsk->sysvsem.undo_list = NULL;
  2011. return 0;
  2012. }
  2013. /*
  2014. * add semadj values to semaphores, free undo structures.
  2015. * undo structures are not freed when semaphore arrays are destroyed
  2016. * so some of them may be out of date.
  2017. * IMPLEMENTATION NOTE: There is some confusion over whether the
  2018. * set of adjustments that needs to be done should be done in an atomic
  2019. * manner or not. That is, if we are attempting to decrement the semval
  2020. * should we queue up and wait until we can do so legally?
  2021. * The original implementation attempted to do this (queue and wait).
  2022. * The current implementation does not do so. The POSIX standard
  2023. * and SVID should be consulted to determine what behavior is mandated.
  2024. */
  2025. void exit_sem(struct task_struct *tsk)
  2026. {
  2027. struct sem_undo_list *ulp;
  2028. ulp = tsk->sysvsem.undo_list;
  2029. if (!ulp)
  2030. return;
  2031. tsk->sysvsem.undo_list = NULL;
  2032. if (!refcount_dec_and_test(&ulp->refcnt))
  2033. return;
  2034. for (;;) {
  2035. struct sem_array *sma;
  2036. struct sem_undo *un;
  2037. int semid, i;
  2038. DEFINE_WAKE_Q(wake_q);
  2039. cond_resched();
  2040. rcu_read_lock();
  2041. un = list_entry_rcu(ulp->list_proc.next,
  2042. struct sem_undo, list_proc);
  2043. if (&un->list_proc == &ulp->list_proc) {
  2044. /*
  2045. * We must wait for freeary() before freeing this ulp,
  2046. * in case we raced with last sem_undo. There is a small
  2047. * possibility where we exit while freeary() didn't
  2048. * finish unlocking sem_undo_list.
  2049. */
  2050. spin_lock(&ulp->lock);
  2051. spin_unlock(&ulp->lock);
  2052. rcu_read_unlock();
  2053. break;
  2054. }
  2055. spin_lock(&ulp->lock);
  2056. semid = un->semid;
  2057. spin_unlock(&ulp->lock);
  2058. /* exit_sem raced with IPC_RMID, nothing to do */
  2059. if (semid == -1) {
  2060. rcu_read_unlock();
  2061. continue;
  2062. }
  2063. sma = sem_obtain_object_check(tsk->nsproxy->ipc_ns, semid);
  2064. /* exit_sem raced with IPC_RMID, nothing to do */
  2065. if (IS_ERR(sma)) {
  2066. rcu_read_unlock();
  2067. continue;
  2068. }
  2069. sem_lock(sma, NULL, -1);
  2070. /* exit_sem raced with IPC_RMID, nothing to do */
  2071. if (!ipc_valid_object(&sma->sem_perm)) {
  2072. sem_unlock(sma, -1);
  2073. rcu_read_unlock();
  2074. continue;
  2075. }
  2076. un = __lookup_undo(ulp, semid);
  2077. if (un == NULL) {
  2078. /* exit_sem raced with IPC_RMID+semget() that created
  2079. * exactly the same semid. Nothing to do.
  2080. */
  2081. sem_unlock(sma, -1);
  2082. rcu_read_unlock();
  2083. continue;
  2084. }
  2085. /* remove un from the linked lists */
  2086. ipc_assert_locked_object(&sma->sem_perm);
  2087. list_del(&un->list_id);
  2088. spin_lock(&ulp->lock);
  2089. list_del_rcu(&un->list_proc);
  2090. spin_unlock(&ulp->lock);
  2091. /* perform adjustments registered in un */
  2092. for (i = 0; i < sma->sem_nsems; i++) {
  2093. struct sem *semaphore = &sma->sems[i];
  2094. if (un->semadj[i]) {
  2095. semaphore->semval += un->semadj[i];
  2096. /*
  2097. * Range checks of the new semaphore value,
  2098. * not defined by sus:
  2099. * - Some unices ignore the undo entirely
  2100. * (e.g. HP UX 11i 11.22, Tru64 V5.1)
  2101. * - some cap the value (e.g. FreeBSD caps
  2102. * at 0, but doesn't enforce SEMVMX)
  2103. *
  2104. * Linux caps the semaphore value, both at 0
  2105. * and at SEMVMX.
  2106. *
  2107. * Manfred <manfred@colorfullife.com>
  2108. */
  2109. if (semaphore->semval < 0)
  2110. semaphore->semval = 0;
  2111. if (semaphore->semval > SEMVMX)
  2112. semaphore->semval = SEMVMX;
  2113. ipc_update_pid(&semaphore->sempid, task_tgid(current));
  2114. }
  2115. }
  2116. /* maybe some queued-up processes were waiting for this */
  2117. do_smart_update(sma, NULL, 0, 1, &wake_q);
  2118. sem_unlock(sma, -1);
  2119. rcu_read_unlock();
  2120. wake_up_q(&wake_q);
  2121. kfree_rcu(un, rcu);
  2122. }
  2123. kfree(ulp);
  2124. }
  2125. #ifdef CONFIG_PROC_FS
  2126. static int sysvipc_sem_proc_show(struct seq_file *s, void *it)
  2127. {
  2128. struct user_namespace *user_ns = seq_user_ns(s);
  2129. struct kern_ipc_perm *ipcp = it;
  2130. struct sem_array *sma = container_of(ipcp, struct sem_array, sem_perm);
  2131. time64_t sem_otime;
  2132. /*
  2133. * The proc interface isn't aware of sem_lock(), it calls
  2134. * ipc_lock_object() directly (in sysvipc_find_ipc).
  2135. * In order to stay compatible with sem_lock(), we must
  2136. * enter / leave complex_mode.
  2137. */
  2138. complexmode_enter(sma);
  2139. sem_otime = get_semotime(sma);
  2140. seq_printf(s,
  2141. "%10d %10d %4o %10u %5u %5u %5u %5u %10llu %10llu\n",
  2142. sma->sem_perm.key,
  2143. sma->sem_perm.id,
  2144. sma->sem_perm.mode,
  2145. sma->sem_nsems,
  2146. from_kuid_munged(user_ns, sma->sem_perm.uid),
  2147. from_kgid_munged(user_ns, sma->sem_perm.gid),
  2148. from_kuid_munged(user_ns, sma->sem_perm.cuid),
  2149. from_kgid_munged(user_ns, sma->sem_perm.cgid),
  2150. sem_otime,
  2151. sma->sem_ctime);
  2152. complexmode_tryleave(sma);
  2153. return 0;
  2154. }
  2155. #endif