rwsem.c 49 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711
  1. // SPDX-License-Identifier: GPL-2.0
  2. /* kernel/rwsem.c: R/W semaphores, public implementation
  3. *
  4. * Written by David Howells (dhowells@redhat.com).
  5. * Derived from asm-i386/semaphore.h
  6. *
  7. * Writer lock-stealing by Alex Shi <alex.shi@intel.com>
  8. * and Michel Lespinasse <walken@google.com>
  9. *
  10. * Optimistic spinning by Tim Chen <tim.c.chen@intel.com>
  11. * and Davidlohr Bueso <davidlohr@hp.com>. Based on mutexes.
  12. *
  13. * Rwsem count bit fields re-definition and rwsem rearchitecture by
  14. * Waiman Long <longman@redhat.com> and
  15. * Peter Zijlstra <peterz@infradead.org>.
  16. */
  17. #include <linux/types.h>
  18. #include <linux/kernel.h>
  19. #include <linux/sched.h>
  20. #include <linux/sched/rt.h>
  21. #include <linux/sched/task.h>
  22. #include <linux/sched/debug.h>
  23. #include <linux/sched/wake_q.h>
  24. #include <linux/sched/signal.h>
  25. #include <linux/sched/clock.h>
  26. #include <linux/export.h>
  27. #include <linux/rwsem.h>
  28. #include <linux/atomic.h>
  29. #include "lock_events.h"
  30. #include <trace/hooks/rwsem.h>
  31. #include <trace/hooks/dtask.h>
  32. /*
  33. * The least significant 3 bits of the owner value has the following
  34. * meanings when set.
  35. * - Bit 0: RWSEM_READER_OWNED - The rwsem is owned by readers
  36. * - Bit 1: RWSEM_RD_NONSPINNABLE - Readers cannot spin on this lock.
  37. * - Bit 2: RWSEM_WR_NONSPINNABLE - Writers cannot spin on this lock.
  38. *
  39. * When the rwsem is either owned by an anonymous writer, or it is
  40. * reader-owned, but a spinning writer has timed out, both nonspinnable
  41. * bits will be set to disable optimistic spinning by readers and writers.
  42. * In the later case, the last unlocking reader should then check the
  43. * writer nonspinnable bit and clear it only to give writers preference
  44. * to acquire the lock via optimistic spinning, but not readers. Similar
  45. * action is also done in the reader slowpath.
  46. * When a writer acquires a rwsem, it puts its task_struct pointer
  47. * into the owner field. It is cleared after an unlock.
  48. *
  49. * When a reader acquires a rwsem, it will also puts its task_struct
  50. * pointer into the owner field with the RWSEM_READER_OWNED bit set.
  51. * On unlock, the owner field will largely be left untouched. So
  52. * for a free or reader-owned rwsem, the owner value may contain
  53. * information about the last reader that acquires the rwsem.
  54. *
  55. * That information may be helpful in debugging cases where the system
  56. * seems to hang on a reader owned rwsem especially if only one reader
  57. * is involved. Ideally we would like to track all the readers that own
  58. * a rwsem, but the overhead is simply too big.
  59. *
  60. * Reader optimistic spinning is helpful when the reader critical section
  61. * is short and there aren't that many readers around. It makes readers
  62. * relatively more preferred than writers. When a writer times out spinning
  63. * on a reader-owned lock and set the nospinnable bits, there are two main
  64. * reasons for that.
  65. *
  66. * 1) The reader critical section is long, perhaps the task sleeps after
  67. * acquiring the read lock.
  68. * 2) There are just too many readers contending the lock causing it to
  69. * take a while to service all of them.
  70. *
  71. * In the former case, long reader critical section will impede the progress
  72. * of writers which is usually more important for system performance. In
  73. * the later case, reader optimistic spinning tends to make the reader
  74. * groups that contain readers that acquire the lock together smaller
  75. * leading to more of them. That may hurt performance in some cases. In
  76. * other words, the setting of nonspinnable bits indicates that reader
  77. * optimistic spinning may not be helpful for those workloads that cause
  78. * it.
  79. *
  80. * Therefore, any writers that had observed the setting of the writer
  81. * nonspinnable bit for a given rwsem after they fail to acquire the lock
  82. * via optimistic spinning will set the reader nonspinnable bit once they
  83. * acquire the write lock. Similarly, readers that observe the setting
  84. * of reader nonspinnable bit at slowpath entry will set the reader
  85. * nonspinnable bits when they acquire the read lock via the wakeup path.
  86. *
  87. * Once the reader nonspinnable bit is on, it will only be reset when
  88. * a writer is able to acquire the rwsem in the fast path or somehow a
  89. * reader or writer in the slowpath doesn't observe the nonspinable bit.
  90. *
  91. * This is to discourage reader optmistic spinning on that particular
  92. * rwsem and make writers more preferred. This adaptive disabling of reader
  93. * optimistic spinning will alleviate the negative side effect of this
  94. * feature.
  95. */
  96. #define RWSEM_READER_OWNED (1UL << 0)
  97. #define RWSEM_RD_NONSPINNABLE (1UL << 1)
  98. #define RWSEM_WR_NONSPINNABLE (1UL << 2)
  99. #define RWSEM_NONSPINNABLE (RWSEM_RD_NONSPINNABLE | RWSEM_WR_NONSPINNABLE)
  100. #define RWSEM_OWNER_FLAGS_MASK (RWSEM_READER_OWNED | RWSEM_NONSPINNABLE)
  101. #ifdef CONFIG_DEBUG_RWSEMS
  102. # define DEBUG_RWSEMS_WARN_ON(c, sem) do { \
  103. if (!debug_locks_silent && \
  104. WARN_ONCE(c, "DEBUG_RWSEMS_WARN_ON(%s): count = 0x%lx, magic = 0x%lx, owner = 0x%lx, curr 0x%lx, list %sempty\n",\
  105. #c, atomic_long_read(&(sem)->count), \
  106. (unsigned long) sem->magic, \
  107. atomic_long_read(&(sem)->owner), (long)current, \
  108. list_empty(&(sem)->wait_list) ? "" : "not ")) \
  109. debug_locks_off(); \
  110. } while (0)
  111. #else
  112. # define DEBUG_RWSEMS_WARN_ON(c, sem)
  113. #endif
  114. /*
  115. * On 64-bit architectures, the bit definitions of the count are:
  116. *
  117. * Bit 0 - writer locked bit
  118. * Bit 1 - waiters present bit
  119. * Bit 2 - lock handoff bit
  120. * Bits 3-7 - reserved
  121. * Bits 8-62 - 55-bit reader count
  122. * Bit 63 - read fail bit
  123. *
  124. * On 32-bit architectures, the bit definitions of the count are:
  125. *
  126. * Bit 0 - writer locked bit
  127. * Bit 1 - waiters present bit
  128. * Bit 2 - lock handoff bit
  129. * Bits 3-7 - reserved
  130. * Bits 8-30 - 23-bit reader count
  131. * Bit 31 - read fail bit
  132. *
  133. * It is not likely that the most significant bit (read fail bit) will ever
  134. * be set. This guard bit is still checked anyway in the down_read() fastpath
  135. * just in case we need to use up more of the reader bits for other purpose
  136. * in the future.
  137. *
  138. * atomic_long_fetch_add() is used to obtain reader lock, whereas
  139. * atomic_long_cmpxchg() will be used to obtain writer lock.
  140. *
  141. * There are three places where the lock handoff bit may be set or cleared.
  142. * 1) rwsem_mark_wake() for readers.
  143. * 2) rwsem_try_write_lock() for writers.
  144. * 3) Error path of rwsem_down_write_slowpath().
  145. *
  146. * For all the above cases, wait_lock will be held. A writer must also
  147. * be the first one in the wait_list to be eligible for setting the handoff
  148. * bit. So concurrent setting/clearing of handoff bit is not possible.
  149. */
  150. #define RWSEM_WRITER_LOCKED (1UL << 0)
  151. #define RWSEM_FLAG_WAITERS (1UL << 1)
  152. #define RWSEM_FLAG_HANDOFF (1UL << 2)
  153. #define RWSEM_FLAG_READFAIL (1UL << (BITS_PER_LONG - 1))
  154. #define RWSEM_READER_SHIFT 8
  155. #define RWSEM_READER_BIAS (1UL << RWSEM_READER_SHIFT)
  156. #define RWSEM_READER_MASK (~(RWSEM_READER_BIAS - 1))
  157. #define RWSEM_WRITER_MASK RWSEM_WRITER_LOCKED
  158. #define RWSEM_LOCK_MASK (RWSEM_WRITER_MASK|RWSEM_READER_MASK)
  159. #define RWSEM_READ_FAILED_MASK (RWSEM_WRITER_MASK|RWSEM_FLAG_WAITERS|\
  160. RWSEM_FLAG_HANDOFF|RWSEM_FLAG_READFAIL)
  161. /*
  162. * All writes to owner are protected by WRITE_ONCE() to make sure that
  163. * store tearing can't happen as optimistic spinners may read and use
  164. * the owner value concurrently without lock. Read from owner, however,
  165. * may not need READ_ONCE() as long as the pointer value is only used
  166. * for comparison and isn't being dereferenced.
  167. */
  168. static inline void rwsem_set_owner(struct rw_semaphore *sem)
  169. {
  170. atomic_long_set(&sem->owner, (long)current);
  171. trace_android_vh_rwsem_set_owner(sem);
  172. }
  173. static inline void rwsem_clear_owner(struct rw_semaphore *sem)
  174. {
  175. atomic_long_set(&sem->owner, 0);
  176. }
  177. /*
  178. * Test the flags in the owner field.
  179. */
  180. static inline bool rwsem_test_oflags(struct rw_semaphore *sem, long flags)
  181. {
  182. return atomic_long_read(&sem->owner) & flags;
  183. }
  184. /*
  185. * The task_struct pointer of the last owning reader will be left in
  186. * the owner field.
  187. *
  188. * Note that the owner value just indicates the task has owned the rwsem
  189. * previously, it may not be the real owner or one of the real owners
  190. * anymore when that field is examined, so take it with a grain of salt.
  191. *
  192. * The reader non-spinnable bit is preserved.
  193. */
  194. static inline void __rwsem_set_reader_owned(struct rw_semaphore *sem,
  195. struct task_struct *owner)
  196. {
  197. unsigned long val = (unsigned long)owner | RWSEM_READER_OWNED |
  198. (atomic_long_read(&sem->owner) & RWSEM_RD_NONSPINNABLE);
  199. atomic_long_set(&sem->owner, val);
  200. }
  201. static inline void rwsem_set_reader_owned(struct rw_semaphore *sem)
  202. {
  203. __rwsem_set_reader_owned(sem, current);
  204. trace_android_vh_rwsem_set_reader_owned(sem);
  205. }
  206. /*
  207. * Return true if the rwsem is owned by a reader.
  208. */
  209. static inline bool is_rwsem_reader_owned(struct rw_semaphore *sem)
  210. {
  211. #ifdef CONFIG_DEBUG_RWSEMS
  212. /*
  213. * Check the count to see if it is write-locked.
  214. */
  215. long count = atomic_long_read(&sem->count);
  216. if (count & RWSEM_WRITER_MASK)
  217. return false;
  218. #endif
  219. return rwsem_test_oflags(sem, RWSEM_READER_OWNED);
  220. }
  221. #ifdef CONFIG_DEBUG_RWSEMS
  222. /*
  223. * With CONFIG_DEBUG_RWSEMS configured, it will make sure that if there
  224. * is a task pointer in owner of a reader-owned rwsem, it will be the
  225. * real owner or one of the real owners. The only exception is when the
  226. * unlock is done by up_read_non_owner().
  227. */
  228. static inline void rwsem_clear_reader_owned(struct rw_semaphore *sem)
  229. {
  230. unsigned long val = atomic_long_read(&sem->owner);
  231. while ((val & ~RWSEM_OWNER_FLAGS_MASK) == (unsigned long)current) {
  232. if (atomic_long_try_cmpxchg(&sem->owner, &val,
  233. val & RWSEM_OWNER_FLAGS_MASK))
  234. return;
  235. }
  236. }
  237. #else
  238. static inline void rwsem_clear_reader_owned(struct rw_semaphore *sem)
  239. {
  240. }
  241. #endif
  242. /*
  243. * Set the RWSEM_NONSPINNABLE bits if the RWSEM_READER_OWNED flag
  244. * remains set. Otherwise, the operation will be aborted.
  245. */
  246. static inline void rwsem_set_nonspinnable(struct rw_semaphore *sem)
  247. {
  248. unsigned long owner = atomic_long_read(&sem->owner);
  249. do {
  250. if (!(owner & RWSEM_READER_OWNED))
  251. break;
  252. if (owner & RWSEM_NONSPINNABLE)
  253. break;
  254. } while (!atomic_long_try_cmpxchg(&sem->owner, &owner,
  255. owner | RWSEM_NONSPINNABLE));
  256. }
  257. static inline bool rwsem_read_trylock(struct rw_semaphore *sem)
  258. {
  259. long cnt = atomic_long_add_return_acquire(RWSEM_READER_BIAS, &sem->count);
  260. if (WARN_ON_ONCE(cnt < 0))
  261. rwsem_set_nonspinnable(sem);
  262. return !(cnt & RWSEM_READ_FAILED_MASK);
  263. }
  264. /*
  265. * Return just the real task structure pointer of the owner
  266. */
  267. static inline struct task_struct *rwsem_owner(struct rw_semaphore *sem)
  268. {
  269. return (struct task_struct *)
  270. (atomic_long_read(&sem->owner) & ~RWSEM_OWNER_FLAGS_MASK);
  271. }
  272. /*
  273. * Return the real task structure pointer of the owner and the embedded
  274. * flags in the owner. pflags must be non-NULL.
  275. */
  276. static inline struct task_struct *
  277. rwsem_owner_flags(struct rw_semaphore *sem, unsigned long *pflags)
  278. {
  279. unsigned long owner = atomic_long_read(&sem->owner);
  280. *pflags = owner & RWSEM_OWNER_FLAGS_MASK;
  281. return (struct task_struct *)(owner & ~RWSEM_OWNER_FLAGS_MASK);
  282. }
  283. /*
  284. * Guide to the rw_semaphore's count field.
  285. *
  286. * When the RWSEM_WRITER_LOCKED bit in count is set, the lock is owned
  287. * by a writer.
  288. *
  289. * The lock is owned by readers when
  290. * (1) the RWSEM_WRITER_LOCKED isn't set in count,
  291. * (2) some of the reader bits are set in count, and
  292. * (3) the owner field has RWSEM_READ_OWNED bit set.
  293. *
  294. * Having some reader bits set is not enough to guarantee a readers owned
  295. * lock as the readers may be in the process of backing out from the count
  296. * and a writer has just released the lock. So another writer may steal
  297. * the lock immediately after that.
  298. */
  299. /*
  300. * Initialize an rwsem:
  301. */
  302. void __init_rwsem(struct rw_semaphore *sem, const char *name,
  303. struct lock_class_key *key)
  304. {
  305. #ifdef CONFIG_DEBUG_LOCK_ALLOC
  306. /*
  307. * Make sure we are not reinitializing a held semaphore:
  308. */
  309. debug_check_no_locks_freed((void *)sem, sizeof(*sem));
  310. lockdep_init_map_wait(&sem->dep_map, name, key, 0, LD_WAIT_SLEEP);
  311. #endif
  312. #ifdef CONFIG_DEBUG_RWSEMS
  313. sem->magic = sem;
  314. #endif
  315. atomic_long_set(&sem->count, RWSEM_UNLOCKED_VALUE);
  316. raw_spin_lock_init(&sem->wait_lock);
  317. INIT_LIST_HEAD(&sem->wait_list);
  318. atomic_long_set(&sem->owner, 0L);
  319. #ifdef CONFIG_RWSEM_SPIN_ON_OWNER
  320. osq_lock_init(&sem->osq);
  321. #endif
  322. trace_android_vh_rwsem_init(sem);
  323. }
  324. EXPORT_SYMBOL(__init_rwsem);
  325. #define rwsem_first_waiter(sem) \
  326. list_first_entry(&sem->wait_list, struct rwsem_waiter, list)
  327. enum rwsem_wake_type {
  328. RWSEM_WAKE_ANY, /* Wake whatever's at head of wait list */
  329. RWSEM_WAKE_READERS, /* Wake readers only */
  330. RWSEM_WAKE_READ_OWNED /* Waker thread holds the read lock */
  331. };
  332. enum writer_wait_state {
  333. WRITER_NOT_FIRST, /* Writer is not first in wait list */
  334. WRITER_FIRST, /* Writer is first in wait list */
  335. WRITER_HANDOFF /* Writer is first & handoff needed */
  336. };
  337. /*
  338. * The typical HZ value is either 250 or 1000. So set the minimum waiting
  339. * time to at least 4ms or 1 jiffy (if it is higher than 4ms) in the wait
  340. * queue before initiating the handoff protocol.
  341. */
  342. #define RWSEM_WAIT_TIMEOUT DIV_ROUND_UP(HZ, 250)
  343. /*
  344. * Magic number to batch-wakeup waiting readers, even when writers are
  345. * also present in the queue. This both limits the amount of work the
  346. * waking thread must do and also prevents any potential counter overflow,
  347. * however unlikely.
  348. */
  349. #define MAX_READERS_WAKEUP 0x100
  350. /*
  351. * handle the lock release when processes blocked on it that can now run
  352. * - if we come here from up_xxxx(), then the RWSEM_FLAG_WAITERS bit must
  353. * have been set.
  354. * - there must be someone on the queue
  355. * - the wait_lock must be held by the caller
  356. * - tasks are marked for wakeup, the caller must later invoke wake_up_q()
  357. * to actually wakeup the blocked task(s) and drop the reference count,
  358. * preferably when the wait_lock is released
  359. * - woken process blocks are discarded from the list after having task zeroed
  360. * - writers are only marked woken if downgrading is false
  361. */
  362. static void rwsem_mark_wake(struct rw_semaphore *sem,
  363. enum rwsem_wake_type wake_type,
  364. struct wake_q_head *wake_q)
  365. {
  366. struct rwsem_waiter *waiter, *tmp;
  367. long oldcount, woken = 0, adjustment = 0;
  368. struct list_head wlist;
  369. lockdep_assert_held(&sem->wait_lock);
  370. /*
  371. * Take a peek at the queue head waiter such that we can determine
  372. * the wakeup(s) to perform.
  373. */
  374. waiter = rwsem_first_waiter(sem);
  375. if (waiter->type == RWSEM_WAITING_FOR_WRITE) {
  376. if (wake_type == RWSEM_WAKE_ANY) {
  377. /*
  378. * Mark writer at the front of the queue for wakeup.
  379. * Until the task is actually later awoken later by
  380. * the caller, other writers are able to steal it.
  381. * Readers, on the other hand, will block as they
  382. * will notice the queued writer.
  383. */
  384. wake_q_add(wake_q, waiter->task);
  385. lockevent_inc(rwsem_wake_writer);
  386. }
  387. return;
  388. }
  389. /*
  390. * No reader wakeup if there are too many of them already.
  391. */
  392. if (unlikely(atomic_long_read(&sem->count) < 0))
  393. return;
  394. /*
  395. * Writers might steal the lock before we grant it to the next reader.
  396. * We prefer to do the first reader grant before counting readers
  397. * so we can bail out early if a writer stole the lock.
  398. */
  399. if (wake_type != RWSEM_WAKE_READ_OWNED) {
  400. struct task_struct *owner;
  401. adjustment = RWSEM_READER_BIAS;
  402. oldcount = atomic_long_fetch_add(adjustment, &sem->count);
  403. if (unlikely(oldcount & RWSEM_WRITER_MASK)) {
  404. /*
  405. * When we've been waiting "too" long (for writers
  406. * to give up the lock), request a HANDOFF to
  407. * force the issue.
  408. */
  409. if (!(oldcount & RWSEM_FLAG_HANDOFF) &&
  410. time_after(jiffies, waiter->timeout)) {
  411. adjustment -= RWSEM_FLAG_HANDOFF;
  412. lockevent_inc(rwsem_rlock_handoff);
  413. }
  414. atomic_long_add(-adjustment, &sem->count);
  415. return;
  416. }
  417. /*
  418. * Set it to reader-owned to give spinners an early
  419. * indication that readers now have the lock.
  420. * The reader nonspinnable bit seen at slowpath entry of
  421. * the reader is copied over.
  422. */
  423. owner = waiter->task;
  424. if (waiter->last_rowner & RWSEM_RD_NONSPINNABLE) {
  425. owner = (void *)((unsigned long)owner | RWSEM_RD_NONSPINNABLE);
  426. lockevent_inc(rwsem_opt_norspin);
  427. }
  428. __rwsem_set_reader_owned(sem, owner);
  429. }
  430. /*
  431. * Grant up to MAX_READERS_WAKEUP read locks to all the readers in the
  432. * queue. We know that the woken will be at least 1 as we accounted
  433. * for above. Note we increment the 'active part' of the count by the
  434. * number of readers before waking any processes up.
  435. *
  436. * This is an adaptation of the phase-fair R/W locks where at the
  437. * reader phase (first waiter is a reader), all readers are eligible
  438. * to acquire the lock at the same time irrespective of their order
  439. * in the queue. The writers acquire the lock according to their
  440. * order in the queue.
  441. *
  442. * We have to do wakeup in 2 passes to prevent the possibility that
  443. * the reader count may be decremented before it is incremented. It
  444. * is because the to-be-woken waiter may not have slept yet. So it
  445. * may see waiter->task got cleared, finish its critical section and
  446. * do an unlock before the reader count increment.
  447. *
  448. * 1) Collect the read-waiters in a separate list, count them and
  449. * fully increment the reader count in rwsem.
  450. * 2) For each waiters in the new list, clear waiter->task and
  451. * put them into wake_q to be woken up later.
  452. */
  453. INIT_LIST_HEAD(&wlist);
  454. list_for_each_entry_safe(waiter, tmp, &sem->wait_list, list) {
  455. if (waiter->type == RWSEM_WAITING_FOR_WRITE)
  456. continue;
  457. woken++;
  458. list_move_tail(&waiter->list, &wlist);
  459. trace_android_vh_rwsem_mark_wake_readers(sem, waiter);
  460. /*
  461. * Limit # of readers that can be woken up per wakeup call.
  462. */
  463. if (woken >= MAX_READERS_WAKEUP)
  464. break;
  465. }
  466. adjustment = woken * RWSEM_READER_BIAS - adjustment;
  467. lockevent_cond_inc(rwsem_wake_reader, woken);
  468. if (list_empty(&sem->wait_list)) {
  469. /* hit end of list above */
  470. adjustment -= RWSEM_FLAG_WAITERS;
  471. }
  472. /*
  473. * When we've woken a reader, we no longer need to force writers
  474. * to give up the lock and we can clear HANDOFF.
  475. */
  476. if (woken && (atomic_long_read(&sem->count) & RWSEM_FLAG_HANDOFF))
  477. adjustment -= RWSEM_FLAG_HANDOFF;
  478. if (adjustment)
  479. atomic_long_add(adjustment, &sem->count);
  480. /* 2nd pass */
  481. list_for_each_entry_safe(waiter, tmp, &wlist, list) {
  482. struct task_struct *tsk;
  483. tsk = waiter->task;
  484. get_task_struct(tsk);
  485. /*
  486. * Ensure calling get_task_struct() before setting the reader
  487. * waiter to nil such that rwsem_down_read_slowpath() cannot
  488. * race with do_exit() by always holding a reference count
  489. * to the task to wakeup.
  490. */
  491. smp_store_release(&waiter->task, NULL);
  492. /*
  493. * Ensure issuing the wakeup (either by us or someone else)
  494. * after setting the reader waiter to nil.
  495. */
  496. wake_q_add_safe(wake_q, tsk);
  497. }
  498. }
  499. /*
  500. * This function must be called with the sem->wait_lock held to prevent
  501. * race conditions between checking the rwsem wait list and setting the
  502. * sem->count accordingly.
  503. *
  504. * If wstate is WRITER_HANDOFF, it will make sure that either the handoff
  505. * bit is set or the lock is acquired with handoff bit cleared.
  506. */
  507. static inline bool rwsem_try_write_lock(struct rw_semaphore *sem,
  508. enum writer_wait_state wstate)
  509. {
  510. long count, new;
  511. lockdep_assert_held(&sem->wait_lock);
  512. count = atomic_long_read(&sem->count);
  513. do {
  514. bool has_handoff = !!(count & RWSEM_FLAG_HANDOFF);
  515. if (has_handoff && wstate == WRITER_NOT_FIRST)
  516. return false;
  517. new = count;
  518. if (count & RWSEM_LOCK_MASK) {
  519. if (has_handoff || (wstate != WRITER_HANDOFF))
  520. return false;
  521. new |= RWSEM_FLAG_HANDOFF;
  522. } else {
  523. new |= RWSEM_WRITER_LOCKED;
  524. new &= ~RWSEM_FLAG_HANDOFF;
  525. if (list_is_singular(&sem->wait_list))
  526. new &= ~RWSEM_FLAG_WAITERS;
  527. }
  528. } while (!atomic_long_try_cmpxchg_acquire(&sem->count, &count, new));
  529. /*
  530. * We have either acquired the lock with handoff bit cleared or
  531. * set the handoff bit.
  532. */
  533. if (new & RWSEM_FLAG_HANDOFF)
  534. return false;
  535. rwsem_set_owner(sem);
  536. return true;
  537. }
  538. #ifdef CONFIG_RWSEM_SPIN_ON_OWNER
  539. /*
  540. * Try to acquire read lock before the reader is put on wait queue.
  541. * Lock acquisition isn't allowed if the rwsem is locked or a writer handoff
  542. * is ongoing.
  543. */
  544. static inline bool rwsem_try_read_lock_unqueued(struct rw_semaphore *sem)
  545. {
  546. long count = atomic_long_read(&sem->count);
  547. if (count & (RWSEM_WRITER_MASK | RWSEM_FLAG_HANDOFF))
  548. return false;
  549. count = atomic_long_fetch_add_acquire(RWSEM_READER_BIAS, &sem->count);
  550. if (!(count & (RWSEM_WRITER_MASK | RWSEM_FLAG_HANDOFF))) {
  551. rwsem_set_reader_owned(sem);
  552. lockevent_inc(rwsem_opt_rlock);
  553. return true;
  554. }
  555. /* Back out the change */
  556. atomic_long_add(-RWSEM_READER_BIAS, &sem->count);
  557. return false;
  558. }
  559. /*
  560. * Try to acquire write lock before the writer has been put on wait queue.
  561. */
  562. static inline bool rwsem_try_write_lock_unqueued(struct rw_semaphore *sem)
  563. {
  564. long count = atomic_long_read(&sem->count);
  565. while (!(count & (RWSEM_LOCK_MASK|RWSEM_FLAG_HANDOFF))) {
  566. if (atomic_long_try_cmpxchg_acquire(&sem->count, &count,
  567. count | RWSEM_WRITER_LOCKED)) {
  568. rwsem_set_owner(sem);
  569. lockevent_inc(rwsem_opt_wlock);
  570. return true;
  571. }
  572. }
  573. return false;
  574. }
  575. static inline bool owner_on_cpu(struct task_struct *owner)
  576. {
  577. /*
  578. * As lock holder preemption issue, we both skip spinning if
  579. * task is not on cpu or its cpu is preempted
  580. */
  581. return owner->on_cpu && !vcpu_is_preempted(task_cpu(owner));
  582. }
  583. static inline bool rwsem_can_spin_on_owner(struct rw_semaphore *sem,
  584. unsigned long nonspinnable)
  585. {
  586. struct task_struct *owner;
  587. unsigned long flags;
  588. bool ret = true;
  589. if (need_resched()) {
  590. lockevent_inc(rwsem_opt_fail);
  591. return false;
  592. }
  593. preempt_disable();
  594. rcu_read_lock();
  595. owner = rwsem_owner_flags(sem, &flags);
  596. /*
  597. * Don't check the read-owner as the entry may be stale.
  598. */
  599. if ((flags & nonspinnable) ||
  600. (owner && !(flags & RWSEM_READER_OWNED) && !owner_on_cpu(owner)))
  601. ret = false;
  602. rcu_read_unlock();
  603. preempt_enable();
  604. lockevent_cond_inc(rwsem_opt_fail, !ret);
  605. return ret;
  606. }
  607. /*
  608. * The rwsem_spin_on_owner() function returns the folowing 4 values
  609. * depending on the lock owner state.
  610. * OWNER_NULL : owner is currently NULL
  611. * OWNER_WRITER: when owner changes and is a writer
  612. * OWNER_READER: when owner changes and the new owner may be a reader.
  613. * OWNER_NONSPINNABLE:
  614. * when optimistic spinning has to stop because either the
  615. * owner stops running, is unknown, or its timeslice has
  616. * been used up.
  617. */
  618. enum owner_state {
  619. OWNER_NULL = 1 << 0,
  620. OWNER_WRITER = 1 << 1,
  621. OWNER_READER = 1 << 2,
  622. OWNER_NONSPINNABLE = 1 << 3,
  623. };
  624. #define OWNER_SPINNABLE (OWNER_NULL | OWNER_WRITER | OWNER_READER)
  625. static inline enum owner_state
  626. rwsem_owner_state(struct task_struct *owner, unsigned long flags, unsigned long nonspinnable)
  627. {
  628. if (flags & nonspinnable)
  629. return OWNER_NONSPINNABLE;
  630. if (flags & RWSEM_READER_OWNED)
  631. return OWNER_READER;
  632. return owner ? OWNER_WRITER : OWNER_NULL;
  633. }
  634. static noinline enum owner_state
  635. rwsem_spin_on_owner(struct rw_semaphore *sem, unsigned long nonspinnable)
  636. {
  637. struct task_struct *new, *owner;
  638. unsigned long flags, new_flags;
  639. enum owner_state state;
  640. owner = rwsem_owner_flags(sem, &flags);
  641. state = rwsem_owner_state(owner, flags, nonspinnable);
  642. if (state != OWNER_WRITER)
  643. return state;
  644. rcu_read_lock();
  645. for (;;) {
  646. /*
  647. * When a waiting writer set the handoff flag, it may spin
  648. * on the owner as well. Once that writer acquires the lock,
  649. * we can spin on it. So we don't need to quit even when the
  650. * handoff bit is set.
  651. */
  652. new = rwsem_owner_flags(sem, &new_flags);
  653. if ((new != owner) || (new_flags != flags)) {
  654. state = rwsem_owner_state(new, new_flags, nonspinnable);
  655. break;
  656. }
  657. /*
  658. * Ensure we emit the owner->on_cpu, dereference _after_
  659. * checking sem->owner still matches owner, if that fails,
  660. * owner might point to free()d memory, if it still matches,
  661. * the rcu_read_lock() ensures the memory stays valid.
  662. */
  663. barrier();
  664. if (need_resched() || !owner_on_cpu(owner)) {
  665. state = OWNER_NONSPINNABLE;
  666. break;
  667. }
  668. cpu_relax();
  669. }
  670. rcu_read_unlock();
  671. return state;
  672. }
  673. /*
  674. * Calculate reader-owned rwsem spinning threshold for writer
  675. *
  676. * The more readers own the rwsem, the longer it will take for them to
  677. * wind down and free the rwsem. So the empirical formula used to
  678. * determine the actual spinning time limit here is:
  679. *
  680. * Spinning threshold = (10 + nr_readers/2)us
  681. *
  682. * The limit is capped to a maximum of 25us (30 readers). This is just
  683. * a heuristic and is subjected to change in the future.
  684. */
  685. static inline u64 rwsem_rspin_threshold(struct rw_semaphore *sem)
  686. {
  687. long count = atomic_long_read(&sem->count);
  688. int readers = count >> RWSEM_READER_SHIFT;
  689. u64 delta;
  690. if (readers > 30)
  691. readers = 30;
  692. delta = (20 + readers) * NSEC_PER_USEC / 2;
  693. return sched_clock() + delta;
  694. }
  695. static bool rwsem_optimistic_spin(struct rw_semaphore *sem, bool wlock)
  696. {
  697. bool taken = false;
  698. int prev_owner_state = OWNER_NULL;
  699. int loop = 0;
  700. u64 rspin_threshold = 0;
  701. unsigned long nonspinnable = wlock ? RWSEM_WR_NONSPINNABLE
  702. : RWSEM_RD_NONSPINNABLE;
  703. preempt_disable();
  704. /* sem->wait_lock should not be held when doing optimistic spinning */
  705. if (!osq_lock(&sem->osq))
  706. goto done;
  707. /*
  708. * Optimistically spin on the owner field and attempt to acquire the
  709. * lock whenever the owner changes. Spinning will be stopped when:
  710. * 1) the owning writer isn't running; or
  711. * 2) readers own the lock and spinning time has exceeded limit.
  712. */
  713. for (;;) {
  714. enum owner_state owner_state;
  715. owner_state = rwsem_spin_on_owner(sem, nonspinnable);
  716. if (!(owner_state & OWNER_SPINNABLE))
  717. break;
  718. /*
  719. * Try to acquire the lock
  720. */
  721. taken = wlock ? rwsem_try_write_lock_unqueued(sem)
  722. : rwsem_try_read_lock_unqueued(sem);
  723. if (taken)
  724. break;
  725. /*
  726. * Time-based reader-owned rwsem optimistic spinning
  727. */
  728. if (wlock && (owner_state == OWNER_READER)) {
  729. /*
  730. * Re-initialize rspin_threshold every time when
  731. * the owner state changes from non-reader to reader.
  732. * This allows a writer to steal the lock in between
  733. * 2 reader phases and have the threshold reset at
  734. * the beginning of the 2nd reader phase.
  735. */
  736. if (prev_owner_state != OWNER_READER) {
  737. if (rwsem_test_oflags(sem, nonspinnable))
  738. break;
  739. rspin_threshold = rwsem_rspin_threshold(sem);
  740. loop = 0;
  741. }
  742. /*
  743. * Check time threshold once every 16 iterations to
  744. * avoid calling sched_clock() too frequently so
  745. * as to reduce the average latency between the times
  746. * when the lock becomes free and when the spinner
  747. * is ready to do a trylock.
  748. */
  749. else if (!(++loop & 0xf) && (sched_clock() > rspin_threshold)) {
  750. rwsem_set_nonspinnable(sem);
  751. lockevent_inc(rwsem_opt_nospin);
  752. break;
  753. }
  754. }
  755. /*
  756. * An RT task cannot do optimistic spinning if it cannot
  757. * be sure the lock holder is running or live-lock may
  758. * happen if the current task and the lock holder happen
  759. * to run in the same CPU. However, aborting optimistic
  760. * spinning while a NULL owner is detected may miss some
  761. * opportunity where spinning can continue without causing
  762. * problem.
  763. *
  764. * There are 2 possible cases where an RT task may be able
  765. * to continue spinning.
  766. *
  767. * 1) The lock owner is in the process of releasing the
  768. * lock, sem->owner is cleared but the lock has not
  769. * been released yet.
  770. * 2) The lock was free and owner cleared, but another
  771. * task just comes in and acquire the lock before
  772. * we try to get it. The new owner may be a spinnable
  773. * writer.
  774. *
  775. * To take advantage of two scenarios listed agove, the RT
  776. * task is made to retry one more time to see if it can
  777. * acquire the lock or continue spinning on the new owning
  778. * writer. Of course, if the time lag is long enough or the
  779. * new owner is not a writer or spinnable, the RT task will
  780. * quit spinning.
  781. *
  782. * If the owner is a writer, the need_resched() check is
  783. * done inside rwsem_spin_on_owner(). If the owner is not
  784. * a writer, need_resched() check needs to be done here.
  785. */
  786. if (owner_state != OWNER_WRITER) {
  787. if (need_resched())
  788. break;
  789. if (rt_task(current) &&
  790. (prev_owner_state != OWNER_WRITER))
  791. break;
  792. }
  793. prev_owner_state = owner_state;
  794. /*
  795. * The cpu_relax() call is a compiler barrier which forces
  796. * everything in this loop to be re-loaded. We don't need
  797. * memory barriers as we'll eventually observe the right
  798. * values at the cost of a few extra spins.
  799. */
  800. cpu_relax();
  801. }
  802. osq_unlock(&sem->osq);
  803. done:
  804. preempt_enable();
  805. lockevent_cond_inc(rwsem_opt_fail, !taken);
  806. return taken;
  807. }
  808. /*
  809. * Clear the owner's RWSEM_WR_NONSPINNABLE bit if it is set. This should
  810. * only be called when the reader count reaches 0.
  811. *
  812. * This give writers better chance to acquire the rwsem first before
  813. * readers when the rwsem was being held by readers for a relatively long
  814. * period of time. Race can happen that an optimistic spinner may have
  815. * just stolen the rwsem and set the owner, but just clearing the
  816. * RWSEM_WR_NONSPINNABLE bit will do no harm anyway.
  817. */
  818. static inline void clear_wr_nonspinnable(struct rw_semaphore *sem)
  819. {
  820. if (rwsem_test_oflags(sem, RWSEM_WR_NONSPINNABLE))
  821. atomic_long_andnot(RWSEM_WR_NONSPINNABLE, &sem->owner);
  822. }
  823. /*
  824. * This function is called when the reader fails to acquire the lock via
  825. * optimistic spinning. In this case we will still attempt to do a trylock
  826. * when comparing the rwsem state right now with the state when entering
  827. * the slowpath indicates that the reader is still in a valid reader phase.
  828. * This happens when the following conditions are true:
  829. *
  830. * 1) The lock is currently reader owned, and
  831. * 2) The lock is previously not reader-owned or the last read owner changes.
  832. *
  833. * In the former case, we have transitioned from a writer phase to a
  834. * reader-phase while spinning. In the latter case, it means the reader
  835. * phase hasn't ended when we entered the optimistic spinning loop. In
  836. * both cases, the reader is eligible to acquire the lock. This is the
  837. * secondary path where a read lock is acquired optimistically.
  838. *
  839. * The reader non-spinnable bit wasn't set at time of entry or it will
  840. * not be here at all.
  841. */
  842. static inline bool rwsem_reader_phase_trylock(struct rw_semaphore *sem,
  843. unsigned long last_rowner)
  844. {
  845. unsigned long owner = atomic_long_read(&sem->owner);
  846. if (!(owner & RWSEM_READER_OWNED))
  847. return false;
  848. if (((owner ^ last_rowner) & ~RWSEM_OWNER_FLAGS_MASK) &&
  849. rwsem_try_read_lock_unqueued(sem)) {
  850. lockevent_inc(rwsem_opt_rlock2);
  851. lockevent_add(rwsem_opt_fail, -1);
  852. return true;
  853. }
  854. return false;
  855. }
  856. #else
  857. static inline bool rwsem_can_spin_on_owner(struct rw_semaphore *sem,
  858. unsigned long nonspinnable)
  859. {
  860. return false;
  861. }
  862. static inline bool rwsem_optimistic_spin(struct rw_semaphore *sem, bool wlock)
  863. {
  864. return false;
  865. }
  866. static inline void clear_wr_nonspinnable(struct rw_semaphore *sem) { }
  867. static inline bool rwsem_reader_phase_trylock(struct rw_semaphore *sem,
  868. unsigned long last_rowner)
  869. {
  870. return false;
  871. }
  872. static inline int
  873. rwsem_spin_on_owner(struct rw_semaphore *sem, unsigned long nonspinnable)
  874. {
  875. return 0;
  876. }
  877. #define OWNER_NULL 1
  878. #endif
  879. /*
  880. * Wait for the read lock to be granted
  881. */
  882. static struct rw_semaphore __sched *
  883. rwsem_down_read_slowpath(struct rw_semaphore *sem, int state)
  884. {
  885. long count, adjustment = -RWSEM_READER_BIAS;
  886. struct rwsem_waiter waiter;
  887. DEFINE_WAKE_Q(wake_q);
  888. bool wake = false;
  889. bool already_on_list = false;
  890. /*
  891. * Save the current read-owner of rwsem, if available, and the
  892. * reader nonspinnable bit.
  893. */
  894. waiter.last_rowner = atomic_long_read(&sem->owner);
  895. if (!(waiter.last_rowner & RWSEM_READER_OWNED))
  896. waiter.last_rowner &= RWSEM_RD_NONSPINNABLE;
  897. if (!rwsem_can_spin_on_owner(sem, RWSEM_RD_NONSPINNABLE))
  898. goto queue;
  899. /*
  900. * Undo read bias from down_read() and do optimistic spinning.
  901. */
  902. atomic_long_add(-RWSEM_READER_BIAS, &sem->count);
  903. adjustment = 0;
  904. if (rwsem_optimistic_spin(sem, false)) {
  905. /* rwsem_optimistic_spin() implies ACQUIRE on success */
  906. /*
  907. * Wake up other readers in the wait list if the front
  908. * waiter is a reader.
  909. */
  910. if ((atomic_long_read(&sem->count) & RWSEM_FLAG_WAITERS)) {
  911. raw_spin_lock_irq(&sem->wait_lock);
  912. if (!list_empty(&sem->wait_list))
  913. rwsem_mark_wake(sem, RWSEM_WAKE_READ_OWNED,
  914. &wake_q);
  915. raw_spin_unlock_irq(&sem->wait_lock);
  916. wake_up_q(&wake_q);
  917. }
  918. return sem;
  919. } else if (rwsem_reader_phase_trylock(sem, waiter.last_rowner)) {
  920. /* rwsem_reader_phase_trylock() implies ACQUIRE on success */
  921. return sem;
  922. }
  923. queue:
  924. waiter.task = current;
  925. waiter.type = RWSEM_WAITING_FOR_READ;
  926. waiter.timeout = jiffies + RWSEM_WAIT_TIMEOUT;
  927. raw_spin_lock_irq(&sem->wait_lock);
  928. if (list_empty(&sem->wait_list)) {
  929. /*
  930. * In case the wait queue is empty and the lock isn't owned
  931. * by a writer or has the handoff bit set, this reader can
  932. * exit the slowpath and return immediately as its
  933. * RWSEM_READER_BIAS has already been set in the count.
  934. */
  935. if (adjustment && !(atomic_long_read(&sem->count) &
  936. (RWSEM_WRITER_MASK | RWSEM_FLAG_HANDOFF))) {
  937. /* Provide lock ACQUIRE */
  938. smp_acquire__after_ctrl_dep();
  939. raw_spin_unlock_irq(&sem->wait_lock);
  940. rwsem_set_reader_owned(sem);
  941. lockevent_inc(rwsem_rlock_fast);
  942. return sem;
  943. }
  944. adjustment += RWSEM_FLAG_WAITERS;
  945. }
  946. trace_android_vh_alter_rwsem_list_add(
  947. &waiter,
  948. sem, &already_on_list);
  949. if (!already_on_list)
  950. list_add_tail(&waiter.list, &sem->wait_list);
  951. /* we're now waiting on the lock, but no longer actively locking */
  952. if (adjustment)
  953. count = atomic_long_add_return(adjustment, &sem->count);
  954. else
  955. count = atomic_long_read(&sem->count);
  956. /*
  957. * If there are no active locks, wake the front queued process(es).
  958. *
  959. * If there are no writers and we are first in the queue,
  960. * wake our own waiter to join the existing active readers !
  961. */
  962. if (!(count & RWSEM_LOCK_MASK)) {
  963. clear_wr_nonspinnable(sem);
  964. wake = true;
  965. }
  966. if (wake || (!(count & RWSEM_WRITER_MASK) &&
  967. (adjustment & RWSEM_FLAG_WAITERS)))
  968. rwsem_mark_wake(sem, RWSEM_WAKE_ANY, &wake_q);
  969. trace_android_vh_rwsem_wake(sem);
  970. raw_spin_unlock_irq(&sem->wait_lock);
  971. wake_up_q(&wake_q);
  972. /* wait to be given the lock */
  973. trace_android_vh_rwsem_read_wait_start(sem);
  974. for (;;) {
  975. set_current_state(state);
  976. if (!smp_load_acquire(&waiter.task)) {
  977. /* Matches rwsem_mark_wake()'s smp_store_release(). */
  978. break;
  979. }
  980. if (signal_pending_state(state, current)) {
  981. raw_spin_lock_irq(&sem->wait_lock);
  982. if (waiter.task)
  983. goto out_nolock;
  984. raw_spin_unlock_irq(&sem->wait_lock);
  985. /* Ordered by sem->wait_lock against rwsem_mark_wake(). */
  986. break;
  987. }
  988. schedule();
  989. lockevent_inc(rwsem_sleep_reader);
  990. }
  991. __set_current_state(TASK_RUNNING);
  992. trace_android_vh_rwsem_read_wait_finish(sem);
  993. lockevent_inc(rwsem_rlock);
  994. return sem;
  995. out_nolock:
  996. list_del(&waiter.list);
  997. if (list_empty(&sem->wait_list)) {
  998. atomic_long_andnot(RWSEM_FLAG_WAITERS|RWSEM_FLAG_HANDOFF,
  999. &sem->count);
  1000. }
  1001. raw_spin_unlock_irq(&sem->wait_lock);
  1002. __set_current_state(TASK_RUNNING);
  1003. trace_android_vh_rwsem_read_wait_finish(sem);
  1004. lockevent_inc(rwsem_rlock_fail);
  1005. return ERR_PTR(-EINTR);
  1006. }
  1007. /*
  1008. * This function is called by the a write lock owner. So the owner value
  1009. * won't get changed by others.
  1010. */
  1011. static inline void rwsem_disable_reader_optspin(struct rw_semaphore *sem,
  1012. bool disable)
  1013. {
  1014. if (unlikely(disable)) {
  1015. atomic_long_or(RWSEM_RD_NONSPINNABLE, &sem->owner);
  1016. lockevent_inc(rwsem_opt_norspin);
  1017. }
  1018. }
  1019. /*
  1020. * Wait until we successfully acquire the write lock
  1021. */
  1022. static struct rw_semaphore *
  1023. rwsem_down_write_slowpath(struct rw_semaphore *sem, int state)
  1024. {
  1025. long count;
  1026. bool disable_rspin;
  1027. enum writer_wait_state wstate;
  1028. struct rwsem_waiter waiter;
  1029. struct rw_semaphore *ret = sem;
  1030. DEFINE_WAKE_Q(wake_q);
  1031. bool already_on_list = false;
  1032. /* do optimistic spinning and steal lock if possible */
  1033. if (rwsem_can_spin_on_owner(sem, RWSEM_WR_NONSPINNABLE) &&
  1034. rwsem_optimistic_spin(sem, true)) {
  1035. /* rwsem_optimistic_spin() implies ACQUIRE on success */
  1036. return sem;
  1037. }
  1038. /*
  1039. * Disable reader optimistic spinning for this rwsem after
  1040. * acquiring the write lock when the setting of the nonspinnable
  1041. * bits are observed.
  1042. */
  1043. disable_rspin = atomic_long_read(&sem->owner) & RWSEM_NONSPINNABLE;
  1044. /*
  1045. * Optimistic spinning failed, proceed to the slowpath
  1046. * and block until we can acquire the sem.
  1047. */
  1048. waiter.task = current;
  1049. waiter.type = RWSEM_WAITING_FOR_WRITE;
  1050. waiter.timeout = jiffies + RWSEM_WAIT_TIMEOUT;
  1051. raw_spin_lock_irq(&sem->wait_lock);
  1052. /* account for this before adding a new element to the list */
  1053. wstate = list_empty(&sem->wait_list) ? WRITER_FIRST : WRITER_NOT_FIRST;
  1054. trace_android_vh_alter_rwsem_list_add(
  1055. &waiter,
  1056. sem, &already_on_list);
  1057. if (!already_on_list)
  1058. list_add_tail(&waiter.list, &sem->wait_list);
  1059. /* we're now waiting on the lock */
  1060. if (wstate == WRITER_NOT_FIRST) {
  1061. count = atomic_long_read(&sem->count);
  1062. /*
  1063. * If there were already threads queued before us and:
  1064. * 1) there are no active locks, wake the front
  1065. * queued process(es) as the handoff bit might be set.
  1066. * 2) there are no active writers and some readers, the lock
  1067. * must be read owned; so we try to wake any read lock
  1068. * waiters that were queued ahead of us.
  1069. */
  1070. if (count & RWSEM_WRITER_MASK)
  1071. goto wait;
  1072. rwsem_mark_wake(sem, (count & RWSEM_READER_MASK)
  1073. ? RWSEM_WAKE_READERS
  1074. : RWSEM_WAKE_ANY, &wake_q);
  1075. if (!wake_q_empty(&wake_q)) {
  1076. /*
  1077. * We want to minimize wait_lock hold time especially
  1078. * when a large number of readers are to be woken up.
  1079. */
  1080. raw_spin_unlock_irq(&sem->wait_lock);
  1081. wake_up_q(&wake_q);
  1082. wake_q_init(&wake_q); /* Used again, reinit */
  1083. raw_spin_lock_irq(&sem->wait_lock);
  1084. }
  1085. } else {
  1086. atomic_long_or(RWSEM_FLAG_WAITERS, &sem->count);
  1087. }
  1088. wait:
  1089. trace_android_vh_rwsem_wake(sem);
  1090. /* wait until we successfully acquire the lock */
  1091. trace_android_vh_rwsem_write_wait_start(sem);
  1092. set_current_state(state);
  1093. for (;;) {
  1094. if (rwsem_try_write_lock(sem, wstate)) {
  1095. /* rwsem_try_write_lock() implies ACQUIRE on success */
  1096. break;
  1097. }
  1098. raw_spin_unlock_irq(&sem->wait_lock);
  1099. /*
  1100. * After setting the handoff bit and failing to acquire
  1101. * the lock, attempt to spin on owner to accelerate lock
  1102. * transfer. If the previous owner is a on-cpu writer and it
  1103. * has just released the lock, OWNER_NULL will be returned.
  1104. * In this case, we attempt to acquire the lock again
  1105. * without sleeping.
  1106. */
  1107. if (wstate == WRITER_HANDOFF &&
  1108. rwsem_spin_on_owner(sem, RWSEM_NONSPINNABLE) == OWNER_NULL)
  1109. goto trylock_again;
  1110. /* Block until there are no active lockers. */
  1111. for (;;) {
  1112. if (signal_pending_state(state, current))
  1113. goto out_nolock;
  1114. schedule();
  1115. lockevent_inc(rwsem_sleep_writer);
  1116. set_current_state(state);
  1117. /*
  1118. * If HANDOFF bit is set, unconditionally do
  1119. * a trylock.
  1120. */
  1121. if (wstate == WRITER_HANDOFF)
  1122. break;
  1123. if ((wstate == WRITER_NOT_FIRST) &&
  1124. (rwsem_first_waiter(sem) == &waiter))
  1125. wstate = WRITER_FIRST;
  1126. count = atomic_long_read(&sem->count);
  1127. if (!(count & RWSEM_LOCK_MASK))
  1128. break;
  1129. /*
  1130. * The setting of the handoff bit is deferred
  1131. * until rwsem_try_write_lock() is called.
  1132. */
  1133. if ((wstate == WRITER_FIRST) && (rt_task(current) ||
  1134. time_after(jiffies, waiter.timeout))) {
  1135. wstate = WRITER_HANDOFF;
  1136. lockevent_inc(rwsem_wlock_handoff);
  1137. break;
  1138. }
  1139. }
  1140. trylock_again:
  1141. raw_spin_lock_irq(&sem->wait_lock);
  1142. }
  1143. __set_current_state(TASK_RUNNING);
  1144. trace_android_vh_rwsem_write_wait_finish(sem);
  1145. list_del(&waiter.list);
  1146. rwsem_disable_reader_optspin(sem, disable_rspin);
  1147. raw_spin_unlock_irq(&sem->wait_lock);
  1148. lockevent_inc(rwsem_wlock);
  1149. return ret;
  1150. out_nolock:
  1151. __set_current_state(TASK_RUNNING);
  1152. trace_android_vh_rwsem_write_wait_finish(sem);
  1153. raw_spin_lock_irq(&sem->wait_lock);
  1154. list_del(&waiter.list);
  1155. if (unlikely(wstate == WRITER_HANDOFF))
  1156. atomic_long_andnot(RWSEM_FLAG_HANDOFF, &sem->count);
  1157. if (list_empty(&sem->wait_list))
  1158. atomic_long_andnot(RWSEM_FLAG_WAITERS, &sem->count);
  1159. else
  1160. rwsem_mark_wake(sem, RWSEM_WAKE_ANY, &wake_q);
  1161. raw_spin_unlock_irq(&sem->wait_lock);
  1162. wake_up_q(&wake_q);
  1163. lockevent_inc(rwsem_wlock_fail);
  1164. return ERR_PTR(-EINTR);
  1165. }
  1166. /*
  1167. * handle waking up a waiter on the semaphore
  1168. * - up_read/up_write has decremented the active part of count if we come here
  1169. */
  1170. static struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem, long count)
  1171. {
  1172. unsigned long flags;
  1173. DEFINE_WAKE_Q(wake_q);
  1174. raw_spin_lock_irqsave(&sem->wait_lock, flags);
  1175. if (!list_empty(&sem->wait_list))
  1176. rwsem_mark_wake(sem, RWSEM_WAKE_ANY, &wake_q);
  1177. trace_android_vh_rwsem_wake_finish(sem);
  1178. raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
  1179. wake_up_q(&wake_q);
  1180. return sem;
  1181. }
  1182. /*
  1183. * downgrade a write lock into a read lock
  1184. * - caller incremented waiting part of count and discovered it still negative
  1185. * - just wake up any readers at the front of the queue
  1186. */
  1187. static struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem)
  1188. {
  1189. unsigned long flags;
  1190. DEFINE_WAKE_Q(wake_q);
  1191. raw_spin_lock_irqsave(&sem->wait_lock, flags);
  1192. if (!list_empty(&sem->wait_list))
  1193. rwsem_mark_wake(sem, RWSEM_WAKE_READ_OWNED, &wake_q);
  1194. raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
  1195. wake_up_q(&wake_q);
  1196. return sem;
  1197. }
  1198. /*
  1199. * lock for reading
  1200. */
  1201. static inline void __down_read(struct rw_semaphore *sem)
  1202. {
  1203. if (!rwsem_read_trylock(sem)) {
  1204. rwsem_down_read_slowpath(sem, TASK_UNINTERRUPTIBLE);
  1205. DEBUG_RWSEMS_WARN_ON(!is_rwsem_reader_owned(sem), sem);
  1206. } else {
  1207. rwsem_set_reader_owned(sem);
  1208. }
  1209. }
  1210. static inline int __down_read_interruptible(struct rw_semaphore *sem)
  1211. {
  1212. if (!rwsem_read_trylock(sem)) {
  1213. if (IS_ERR(rwsem_down_read_slowpath(sem, TASK_INTERRUPTIBLE)))
  1214. return -EINTR;
  1215. DEBUG_RWSEMS_WARN_ON(!is_rwsem_reader_owned(sem), sem);
  1216. } else {
  1217. rwsem_set_reader_owned(sem);
  1218. }
  1219. return 0;
  1220. }
  1221. static inline int __down_read_killable(struct rw_semaphore *sem)
  1222. {
  1223. if (!rwsem_read_trylock(sem)) {
  1224. if (IS_ERR(rwsem_down_read_slowpath(sem, TASK_KILLABLE)))
  1225. return -EINTR;
  1226. DEBUG_RWSEMS_WARN_ON(!is_rwsem_reader_owned(sem), sem);
  1227. } else {
  1228. rwsem_set_reader_owned(sem);
  1229. }
  1230. return 0;
  1231. }
  1232. static inline int __down_read_trylock(struct rw_semaphore *sem)
  1233. {
  1234. long tmp;
  1235. DEBUG_RWSEMS_WARN_ON(sem->magic != sem, sem);
  1236. /*
  1237. * Optimize for the case when the rwsem is not locked at all.
  1238. */
  1239. tmp = RWSEM_UNLOCKED_VALUE;
  1240. do {
  1241. if (atomic_long_try_cmpxchg_acquire(&sem->count, &tmp,
  1242. tmp + RWSEM_READER_BIAS)) {
  1243. rwsem_set_reader_owned(sem);
  1244. return 1;
  1245. }
  1246. } while (!(tmp & RWSEM_READ_FAILED_MASK));
  1247. return 0;
  1248. }
  1249. /*
  1250. * lock for writing
  1251. */
  1252. static inline void __down_write(struct rw_semaphore *sem)
  1253. {
  1254. long tmp = RWSEM_UNLOCKED_VALUE;
  1255. if (unlikely(!atomic_long_try_cmpxchg_acquire(&sem->count, &tmp,
  1256. RWSEM_WRITER_LOCKED)))
  1257. rwsem_down_write_slowpath(sem, TASK_UNINTERRUPTIBLE);
  1258. else
  1259. rwsem_set_owner(sem);
  1260. }
  1261. static inline int __down_write_killable(struct rw_semaphore *sem)
  1262. {
  1263. long tmp = RWSEM_UNLOCKED_VALUE;
  1264. if (unlikely(!atomic_long_try_cmpxchg_acquire(&sem->count, &tmp,
  1265. RWSEM_WRITER_LOCKED))) {
  1266. if (IS_ERR(rwsem_down_write_slowpath(sem, TASK_KILLABLE)))
  1267. return -EINTR;
  1268. } else {
  1269. rwsem_set_owner(sem);
  1270. }
  1271. return 0;
  1272. }
  1273. static inline int __down_write_trylock(struct rw_semaphore *sem)
  1274. {
  1275. long tmp;
  1276. DEBUG_RWSEMS_WARN_ON(sem->magic != sem, sem);
  1277. tmp = RWSEM_UNLOCKED_VALUE;
  1278. if (atomic_long_try_cmpxchg_acquire(&sem->count, &tmp,
  1279. RWSEM_WRITER_LOCKED)) {
  1280. rwsem_set_owner(sem);
  1281. return true;
  1282. }
  1283. return false;
  1284. }
  1285. /*
  1286. * unlock after reading
  1287. */
  1288. static inline void __up_read(struct rw_semaphore *sem)
  1289. {
  1290. long tmp;
  1291. DEBUG_RWSEMS_WARN_ON(sem->magic != sem, sem);
  1292. DEBUG_RWSEMS_WARN_ON(!is_rwsem_reader_owned(sem), sem);
  1293. rwsem_clear_reader_owned(sem);
  1294. tmp = atomic_long_add_return_release(-RWSEM_READER_BIAS, &sem->count);
  1295. DEBUG_RWSEMS_WARN_ON(tmp < 0, sem);
  1296. if (unlikely((tmp & (RWSEM_LOCK_MASK|RWSEM_FLAG_WAITERS)) ==
  1297. RWSEM_FLAG_WAITERS)) {
  1298. clear_wr_nonspinnable(sem);
  1299. rwsem_wake(sem, tmp);
  1300. }
  1301. trace_android_vh_rwsem_up_read_end(sem);
  1302. }
  1303. /*
  1304. * unlock after writing
  1305. */
  1306. static inline void __up_write(struct rw_semaphore *sem)
  1307. {
  1308. long tmp;
  1309. DEBUG_RWSEMS_WARN_ON(sem->magic != sem, sem);
  1310. /*
  1311. * sem->owner may differ from current if the ownership is transferred
  1312. * to an anonymous writer by setting the RWSEM_NONSPINNABLE bits.
  1313. */
  1314. DEBUG_RWSEMS_WARN_ON((rwsem_owner(sem) != current) &&
  1315. !rwsem_test_oflags(sem, RWSEM_NONSPINNABLE), sem);
  1316. rwsem_clear_owner(sem);
  1317. tmp = atomic_long_fetch_add_release(-RWSEM_WRITER_LOCKED, &sem->count);
  1318. if (unlikely(tmp & RWSEM_FLAG_WAITERS))
  1319. rwsem_wake(sem, tmp);
  1320. trace_android_vh_rwsem_up_write_end(sem);
  1321. }
  1322. /*
  1323. * downgrade write lock to read lock
  1324. */
  1325. static inline void __downgrade_write(struct rw_semaphore *sem)
  1326. {
  1327. long tmp;
  1328. /*
  1329. * When downgrading from exclusive to shared ownership,
  1330. * anything inside the write-locked region cannot leak
  1331. * into the read side. In contrast, anything in the
  1332. * read-locked region is ok to be re-ordered into the
  1333. * write side. As such, rely on RELEASE semantics.
  1334. */
  1335. DEBUG_RWSEMS_WARN_ON(rwsem_owner(sem) != current, sem);
  1336. tmp = atomic_long_fetch_add_release(
  1337. -RWSEM_WRITER_LOCKED+RWSEM_READER_BIAS, &sem->count);
  1338. rwsem_set_reader_owned(sem);
  1339. if (tmp & RWSEM_FLAG_WAITERS)
  1340. rwsem_downgrade_wake(sem);
  1341. }
  1342. /*
  1343. * lock for reading
  1344. */
  1345. void __sched down_read(struct rw_semaphore *sem)
  1346. {
  1347. might_sleep();
  1348. rwsem_acquire_read(&sem->dep_map, 0, 0, _RET_IP_);
  1349. LOCK_CONTENDED(sem, __down_read_trylock, __down_read);
  1350. }
  1351. EXPORT_SYMBOL(down_read);
  1352. int __sched down_read_interruptible(struct rw_semaphore *sem)
  1353. {
  1354. might_sleep();
  1355. rwsem_acquire_read(&sem->dep_map, 0, 0, _RET_IP_);
  1356. if (LOCK_CONTENDED_RETURN(sem, __down_read_trylock, __down_read_interruptible)) {
  1357. rwsem_release(&sem->dep_map, _RET_IP_);
  1358. return -EINTR;
  1359. }
  1360. return 0;
  1361. }
  1362. EXPORT_SYMBOL(down_read_interruptible);
  1363. int __sched down_read_killable(struct rw_semaphore *sem)
  1364. {
  1365. might_sleep();
  1366. rwsem_acquire_read(&sem->dep_map, 0, 0, _RET_IP_);
  1367. if (LOCK_CONTENDED_RETURN(sem, __down_read_trylock, __down_read_killable)) {
  1368. rwsem_release(&sem->dep_map, _RET_IP_);
  1369. return -EINTR;
  1370. }
  1371. return 0;
  1372. }
  1373. EXPORT_SYMBOL(down_read_killable);
  1374. /*
  1375. * trylock for reading -- returns 1 if successful, 0 if contention
  1376. */
  1377. int down_read_trylock(struct rw_semaphore *sem)
  1378. {
  1379. int ret = __down_read_trylock(sem);
  1380. if (ret == 1)
  1381. rwsem_acquire_read(&sem->dep_map, 0, 1, _RET_IP_);
  1382. return ret;
  1383. }
  1384. EXPORT_SYMBOL(down_read_trylock);
  1385. /*
  1386. * lock for writing
  1387. */
  1388. void __sched down_write(struct rw_semaphore *sem)
  1389. {
  1390. might_sleep();
  1391. rwsem_acquire(&sem->dep_map, 0, 0, _RET_IP_);
  1392. LOCK_CONTENDED(sem, __down_write_trylock, __down_write);
  1393. }
  1394. EXPORT_SYMBOL(down_write);
  1395. /*
  1396. * lock for writing
  1397. */
  1398. int __sched down_write_killable(struct rw_semaphore *sem)
  1399. {
  1400. might_sleep();
  1401. rwsem_acquire(&sem->dep_map, 0, 0, _RET_IP_);
  1402. if (LOCK_CONTENDED_RETURN(sem, __down_write_trylock,
  1403. __down_write_killable)) {
  1404. rwsem_release(&sem->dep_map, _RET_IP_);
  1405. return -EINTR;
  1406. }
  1407. return 0;
  1408. }
  1409. EXPORT_SYMBOL(down_write_killable);
  1410. /*
  1411. * trylock for writing -- returns 1 if successful, 0 if contention
  1412. */
  1413. int down_write_trylock(struct rw_semaphore *sem)
  1414. {
  1415. int ret = __down_write_trylock(sem);
  1416. if (ret == 1)
  1417. rwsem_acquire(&sem->dep_map, 0, 1, _RET_IP_);
  1418. return ret;
  1419. }
  1420. EXPORT_SYMBOL(down_write_trylock);
  1421. /*
  1422. * release a read lock
  1423. */
  1424. void up_read(struct rw_semaphore *sem)
  1425. {
  1426. rwsem_release(&sem->dep_map, _RET_IP_);
  1427. __up_read(sem);
  1428. }
  1429. EXPORT_SYMBOL(up_read);
  1430. /*
  1431. * release a write lock
  1432. */
  1433. void up_write(struct rw_semaphore *sem)
  1434. {
  1435. rwsem_release(&sem->dep_map, _RET_IP_);
  1436. trace_android_vh_rwsem_write_finished(sem);
  1437. __up_write(sem);
  1438. }
  1439. EXPORT_SYMBOL(up_write);
  1440. /*
  1441. * downgrade write lock to read lock
  1442. */
  1443. void downgrade_write(struct rw_semaphore *sem)
  1444. {
  1445. lock_downgrade(&sem->dep_map, _RET_IP_);
  1446. trace_android_vh_rwsem_write_finished(sem);
  1447. __downgrade_write(sem);
  1448. }
  1449. EXPORT_SYMBOL(downgrade_write);
  1450. #ifdef CONFIG_DEBUG_LOCK_ALLOC
  1451. void down_read_nested(struct rw_semaphore *sem, int subclass)
  1452. {
  1453. might_sleep();
  1454. rwsem_acquire_read(&sem->dep_map, subclass, 0, _RET_IP_);
  1455. LOCK_CONTENDED(sem, __down_read_trylock, __down_read);
  1456. }
  1457. EXPORT_SYMBOL(down_read_nested);
  1458. int down_read_killable_nested(struct rw_semaphore *sem, int subclass)
  1459. {
  1460. might_sleep();
  1461. rwsem_acquire_read(&sem->dep_map, subclass, 0, _RET_IP_);
  1462. if (LOCK_CONTENDED_RETURN(sem, __down_read_trylock, __down_read_killable)) {
  1463. rwsem_release(&sem->dep_map, _RET_IP_);
  1464. return -EINTR;
  1465. }
  1466. return 0;
  1467. }
  1468. EXPORT_SYMBOL(down_read_killable_nested);
  1469. void _down_write_nest_lock(struct rw_semaphore *sem, struct lockdep_map *nest)
  1470. {
  1471. might_sleep();
  1472. rwsem_acquire_nest(&sem->dep_map, 0, 0, nest, _RET_IP_);
  1473. LOCK_CONTENDED(sem, __down_write_trylock, __down_write);
  1474. }
  1475. EXPORT_SYMBOL(_down_write_nest_lock);
  1476. void down_read_non_owner(struct rw_semaphore *sem)
  1477. {
  1478. might_sleep();
  1479. __down_read(sem);
  1480. __rwsem_set_reader_owned(sem, NULL);
  1481. }
  1482. EXPORT_SYMBOL(down_read_non_owner);
  1483. void down_write_nested(struct rw_semaphore *sem, int subclass)
  1484. {
  1485. might_sleep();
  1486. rwsem_acquire(&sem->dep_map, subclass, 0, _RET_IP_);
  1487. LOCK_CONTENDED(sem, __down_write_trylock, __down_write);
  1488. }
  1489. EXPORT_SYMBOL(down_write_nested);
  1490. int __sched down_write_killable_nested(struct rw_semaphore *sem, int subclass)
  1491. {
  1492. might_sleep();
  1493. rwsem_acquire(&sem->dep_map, subclass, 0, _RET_IP_);
  1494. if (LOCK_CONTENDED_RETURN(sem, __down_write_trylock,
  1495. __down_write_killable)) {
  1496. rwsem_release(&sem->dep_map, _RET_IP_);
  1497. return -EINTR;
  1498. }
  1499. return 0;
  1500. }
  1501. EXPORT_SYMBOL(down_write_killable_nested);
  1502. void up_read_non_owner(struct rw_semaphore *sem)
  1503. {
  1504. DEBUG_RWSEMS_WARN_ON(!is_rwsem_reader_owned(sem), sem);
  1505. __up_read(sem);
  1506. }
  1507. EXPORT_SYMBOL(up_read_non_owner);
  1508. #endif