hwspinlock.h 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. /*
  3. * Hardware spinlock public header
  4. *
  5. * Copyright (C) 2010 Texas Instruments Incorporated - http://www.ti.com
  6. *
  7. * Contact: Ohad Ben-Cohen <ohad@wizery.com>
  8. */
  9. #ifndef __LINUX_HWSPINLOCK_H
  10. #define __LINUX_HWSPINLOCK_H
  11. #include <linux/err.h>
  12. #include <linux/sched.h>
  13. /* hwspinlock mode argument */
  14. #define HWLOCK_IRQSTATE 0x01 /* Disable interrupts, save state */
  15. #define HWLOCK_IRQ 0x02 /* Disable interrupts, don't save state */
  16. #define HWLOCK_RAW 0x03
  17. #define HWLOCK_IN_ATOMIC 0x04 /* Called while in atomic context */
  18. struct device;
  19. struct device_node;
  20. struct hwspinlock;
  21. struct hwspinlock_device;
  22. struct hwspinlock_ops;
  23. /**
  24. * struct hwspinlock_pdata - platform data for hwspinlock drivers
  25. * @base_id: base id for this hwspinlock device
  26. *
  27. * hwspinlock devices provide system-wide hardware locks that are used
  28. * by remote processors that have no other way to achieve synchronization.
  29. *
  30. * To achieve that, each physical lock must have a system-wide id number
  31. * that is agreed upon, otherwise remote processors can't possibly assume
  32. * they're using the same hardware lock.
  33. *
  34. * Usually boards have a single hwspinlock device, which provides several
  35. * hwspinlocks, and in this case, they can be trivially numbered 0 to
  36. * (num-of-locks - 1).
  37. *
  38. * In case boards have several hwspinlocks devices, a different base id
  39. * should be used for each hwspinlock device (they can't all use 0 as
  40. * a starting id!).
  41. *
  42. * This platform data structure should be used to provide the base id
  43. * for each device (which is trivially 0 when only a single hwspinlock
  44. * device exists). It can be shared between different platforms, hence
  45. * its location.
  46. */
  47. struct hwspinlock_pdata {
  48. int base_id;
  49. };
  50. #ifdef CONFIG_HWSPINLOCK
  51. int hwspin_lock_register(struct hwspinlock_device *bank, struct device *dev,
  52. const struct hwspinlock_ops *ops, int base_id, int num_locks);
  53. int hwspin_lock_unregister(struct hwspinlock_device *bank);
  54. struct hwspinlock *hwspin_lock_request(void);
  55. struct hwspinlock *hwspin_lock_request_specific(unsigned int id);
  56. int hwspin_lock_free(struct hwspinlock *hwlock);
  57. int of_hwspin_lock_get_id(struct device_node *np, int index);
  58. int hwspin_lock_get_id(struct hwspinlock *hwlock);
  59. int __hwspin_lock_timeout(struct hwspinlock *, unsigned int, int,
  60. unsigned long *);
  61. int __hwspin_trylock(struct hwspinlock *, int, unsigned long *);
  62. void __hwspin_unlock(struct hwspinlock *, int, unsigned long *);
  63. int of_hwspin_lock_get_id_byname(struct device_node *np, const char *name);
  64. int devm_hwspin_lock_free(struct device *dev, struct hwspinlock *hwlock);
  65. struct hwspinlock *devm_hwspin_lock_request(struct device *dev);
  66. struct hwspinlock *devm_hwspin_lock_request_specific(struct device *dev,
  67. unsigned int id);
  68. int devm_hwspin_lock_unregister(struct device *dev,
  69. struct hwspinlock_device *bank);
  70. int devm_hwspin_lock_register(struct device *dev,
  71. struct hwspinlock_device *bank,
  72. const struct hwspinlock_ops *ops,
  73. int base_id, int num_locks);
  74. #else /* !CONFIG_HWSPINLOCK */
  75. /*
  76. * We don't want these functions to fail if CONFIG_HWSPINLOCK is not
  77. * enabled. We prefer to silently succeed in this case, and let the
  78. * code path get compiled away. This way, if CONFIG_HWSPINLOCK is not
  79. * required on a given setup, users will still work.
  80. *
  81. * The only exception is hwspin_lock_register/hwspin_lock_unregister, with which
  82. * we _do_ want users to fail (no point in registering hwspinlock instances if
  83. * the framework is not available).
  84. *
  85. * Note: ERR_PTR(-ENODEV) will still be considered a success for NULL-checking
  86. * users. Others, which care, can still check this with IS_ERR.
  87. */
  88. static inline struct hwspinlock *hwspin_lock_request(void)
  89. {
  90. return ERR_PTR(-ENODEV);
  91. }
  92. static inline struct hwspinlock *hwspin_lock_request_specific(unsigned int id)
  93. {
  94. return ERR_PTR(-ENODEV);
  95. }
  96. static inline int hwspin_lock_free(struct hwspinlock *hwlock)
  97. {
  98. return 0;
  99. }
  100. static inline
  101. int __hwspin_lock_timeout(struct hwspinlock *hwlock, unsigned int to,
  102. int mode, unsigned long *flags)
  103. {
  104. return 0;
  105. }
  106. static inline
  107. int __hwspin_trylock(struct hwspinlock *hwlock, int mode, unsigned long *flags)
  108. {
  109. return 0;
  110. }
  111. static inline
  112. void __hwspin_unlock(struct hwspinlock *hwlock, int mode, unsigned long *flags)
  113. {
  114. }
  115. static inline int of_hwspin_lock_get_id(struct device_node *np, int index)
  116. {
  117. return 0;
  118. }
  119. static inline int hwspin_lock_get_id(struct hwspinlock *hwlock)
  120. {
  121. return 0;
  122. }
  123. static inline
  124. int of_hwspin_lock_get_id_byname(struct device_node *np, const char *name)
  125. {
  126. return 0;
  127. }
  128. static inline
  129. int devm_hwspin_lock_free(struct device *dev, struct hwspinlock *hwlock)
  130. {
  131. return 0;
  132. }
  133. static inline struct hwspinlock *devm_hwspin_lock_request(struct device *dev)
  134. {
  135. return ERR_PTR(-ENODEV);
  136. }
  137. static inline
  138. struct hwspinlock *devm_hwspin_lock_request_specific(struct device *dev,
  139. unsigned int id)
  140. {
  141. return ERR_PTR(-ENODEV);
  142. }
  143. #endif /* !CONFIG_HWSPINLOCK */
  144. /**
  145. * hwspin_trylock_irqsave() - try to lock an hwspinlock, disable interrupts
  146. * @hwlock: an hwspinlock which we want to trylock
  147. * @flags: a pointer to where the caller's interrupt state will be saved at
  148. *
  149. * This function attempts to lock the underlying hwspinlock, and will
  150. * immediately fail if the hwspinlock is already locked.
  151. *
  152. * Upon a successful return from this function, preemption and local
  153. * interrupts are disabled (previous interrupts state is saved at @flags),
  154. * so the caller must not sleep, and is advised to release the hwspinlock
  155. * as soon as possible.
  156. *
  157. * Returns 0 if we successfully locked the hwspinlock, -EBUSY if
  158. * the hwspinlock was already taken, and -EINVAL if @hwlock is invalid.
  159. */
  160. static inline
  161. int hwspin_trylock_irqsave(struct hwspinlock *hwlock, unsigned long *flags)
  162. {
  163. return __hwspin_trylock(hwlock, HWLOCK_IRQSTATE, flags);
  164. }
  165. /**
  166. * hwspin_trylock_irq() - try to lock an hwspinlock, disable interrupts
  167. * @hwlock: an hwspinlock which we want to trylock
  168. *
  169. * This function attempts to lock the underlying hwspinlock, and will
  170. * immediately fail if the hwspinlock is already locked.
  171. *
  172. * Upon a successful return from this function, preemption and local
  173. * interrupts are disabled, so the caller must not sleep, and is advised
  174. * to release the hwspinlock as soon as possible.
  175. *
  176. * Returns 0 if we successfully locked the hwspinlock, -EBUSY if
  177. * the hwspinlock was already taken, and -EINVAL if @hwlock is invalid.
  178. */
  179. static inline int hwspin_trylock_irq(struct hwspinlock *hwlock)
  180. {
  181. return __hwspin_trylock(hwlock, HWLOCK_IRQ, NULL);
  182. }
  183. /**
  184. * hwspin_trylock_raw() - attempt to lock a specific hwspinlock
  185. * @hwlock: an hwspinlock which we want to trylock
  186. *
  187. * This function attempts to lock an hwspinlock, and will immediately fail
  188. * if the hwspinlock is already taken.
  189. *
  190. * Caution: User must protect the routine of getting hardware lock with mutex
  191. * or spinlock to avoid dead-lock, that will let user can do some time-consuming
  192. * or sleepable operations under the hardware lock.
  193. *
  194. * Returns 0 if we successfully locked the hwspinlock, -EBUSY if
  195. * the hwspinlock was already taken, and -EINVAL if @hwlock is invalid.
  196. */
  197. static inline int hwspin_trylock_raw(struct hwspinlock *hwlock)
  198. {
  199. return __hwspin_trylock(hwlock, HWLOCK_RAW, NULL);
  200. }
  201. /**
  202. * hwspin_trylock_in_atomic() - attempt to lock a specific hwspinlock
  203. * @hwlock: an hwspinlock which we want to trylock
  204. *
  205. * This function attempts to lock an hwspinlock, and will immediately fail
  206. * if the hwspinlock is already taken.
  207. *
  208. * This function shall be called only from an atomic context.
  209. *
  210. * Returns 0 if we successfully locked the hwspinlock, -EBUSY if
  211. * the hwspinlock was already taken, and -EINVAL if @hwlock is invalid.
  212. */
  213. static inline int hwspin_trylock_in_atomic(struct hwspinlock *hwlock)
  214. {
  215. return __hwspin_trylock(hwlock, HWLOCK_IN_ATOMIC, NULL);
  216. }
  217. /**
  218. * hwspin_trylock() - attempt to lock a specific hwspinlock
  219. * @hwlock: an hwspinlock which we want to trylock
  220. *
  221. * This function attempts to lock an hwspinlock, and will immediately fail
  222. * if the hwspinlock is already taken.
  223. *
  224. * Upon a successful return from this function, preemption is disabled,
  225. * so the caller must not sleep, and is advised to release the hwspinlock
  226. * as soon as possible. This is required in order to minimize remote cores
  227. * polling on the hardware interconnect.
  228. *
  229. * Returns 0 if we successfully locked the hwspinlock, -EBUSY if
  230. * the hwspinlock was already taken, and -EINVAL if @hwlock is invalid.
  231. */
  232. static inline int hwspin_trylock(struct hwspinlock *hwlock)
  233. {
  234. return __hwspin_trylock(hwlock, 0, NULL);
  235. }
  236. /**
  237. * hwspin_lock_timeout_irqsave() - lock hwspinlock, with timeout, disable irqs
  238. * @hwlock: the hwspinlock to be locked
  239. * @to: timeout value in msecs
  240. * @flags: a pointer to where the caller's interrupt state will be saved at
  241. *
  242. * This function locks the underlying @hwlock. If the @hwlock
  243. * is already taken, the function will busy loop waiting for it to
  244. * be released, but give up when @timeout msecs have elapsed.
  245. *
  246. * Upon a successful return from this function, preemption and local interrupts
  247. * are disabled (plus previous interrupt state is saved), so the caller must
  248. * not sleep, and is advised to release the hwspinlock as soon as possible.
  249. *
  250. * Returns 0 when the @hwlock was successfully taken, and an appropriate
  251. * error code otherwise (most notably an -ETIMEDOUT if the @hwlock is still
  252. * busy after @timeout msecs). The function will never sleep.
  253. */
  254. static inline int hwspin_lock_timeout_irqsave(struct hwspinlock *hwlock,
  255. unsigned int to, unsigned long *flags)
  256. {
  257. return __hwspin_lock_timeout(hwlock, to, HWLOCK_IRQSTATE, flags);
  258. }
  259. /**
  260. * hwspin_lock_timeout_irq() - lock hwspinlock, with timeout, disable irqs
  261. * @hwlock: the hwspinlock to be locked
  262. * @to: timeout value in msecs
  263. *
  264. * This function locks the underlying @hwlock. If the @hwlock
  265. * is already taken, the function will busy loop waiting for it to
  266. * be released, but give up when @timeout msecs have elapsed.
  267. *
  268. * Upon a successful return from this function, preemption and local interrupts
  269. * are disabled so the caller must not sleep, and is advised to release the
  270. * hwspinlock as soon as possible.
  271. *
  272. * Returns 0 when the @hwlock was successfully taken, and an appropriate
  273. * error code otherwise (most notably an -ETIMEDOUT if the @hwlock is still
  274. * busy after @timeout msecs). The function will never sleep.
  275. */
  276. static inline
  277. int hwspin_lock_timeout_irq(struct hwspinlock *hwlock, unsigned int to)
  278. {
  279. return __hwspin_lock_timeout(hwlock, to, HWLOCK_IRQ, NULL);
  280. }
  281. /**
  282. * hwspin_lock_timeout_raw() - lock an hwspinlock with timeout limit
  283. * @hwlock: the hwspinlock to be locked
  284. * @to: timeout value in msecs
  285. *
  286. * This function locks the underlying @hwlock. If the @hwlock
  287. * is already taken, the function will busy loop waiting for it to
  288. * be released, but give up when @timeout msecs have elapsed.
  289. *
  290. * Caution: User must protect the routine of getting hardware lock with mutex
  291. * or spinlock to avoid dead-lock, that will let user can do some time-consuming
  292. * or sleepable operations under the hardware lock.
  293. *
  294. * Returns 0 when the @hwlock was successfully taken, and an appropriate
  295. * error code otherwise (most notably an -ETIMEDOUT if the @hwlock is still
  296. * busy after @timeout msecs). The function will never sleep.
  297. */
  298. static inline
  299. int hwspin_lock_timeout_raw(struct hwspinlock *hwlock, unsigned int to)
  300. {
  301. return __hwspin_lock_timeout(hwlock, to, HWLOCK_RAW, NULL);
  302. }
  303. /**
  304. * hwspin_lock_timeout_in_atomic() - lock an hwspinlock with timeout limit
  305. * @hwlock: the hwspinlock to be locked
  306. * @to: timeout value in msecs
  307. *
  308. * This function locks the underlying @hwlock. If the @hwlock
  309. * is already taken, the function will busy loop waiting for it to
  310. * be released, but give up when @timeout msecs have elapsed.
  311. *
  312. * This function shall be called only from an atomic context and the timeout
  313. * value shall not exceed a few msecs.
  314. *
  315. * Returns 0 when the @hwlock was successfully taken, and an appropriate
  316. * error code otherwise (most notably an -ETIMEDOUT if the @hwlock is still
  317. * busy after @timeout msecs). The function will never sleep.
  318. */
  319. static inline
  320. int hwspin_lock_timeout_in_atomic(struct hwspinlock *hwlock, unsigned int to)
  321. {
  322. return __hwspin_lock_timeout(hwlock, to, HWLOCK_IN_ATOMIC, NULL);
  323. }
  324. /**
  325. * hwspin_lock_timeout() - lock an hwspinlock with timeout limit
  326. * @hwlock: the hwspinlock to be locked
  327. * @to: timeout value in msecs
  328. *
  329. * This function locks the underlying @hwlock. If the @hwlock
  330. * is already taken, the function will busy loop waiting for it to
  331. * be released, but give up when @timeout msecs have elapsed.
  332. *
  333. * Upon a successful return from this function, preemption is disabled
  334. * so the caller must not sleep, and is advised to release the hwspinlock
  335. * as soon as possible.
  336. * This is required in order to minimize remote cores polling on the
  337. * hardware interconnect.
  338. *
  339. * Returns 0 when the @hwlock was successfully taken, and an appropriate
  340. * error code otherwise (most notably an -ETIMEDOUT if the @hwlock is still
  341. * busy after @timeout msecs). The function will never sleep.
  342. */
  343. static inline
  344. int hwspin_lock_timeout(struct hwspinlock *hwlock, unsigned int to)
  345. {
  346. return __hwspin_lock_timeout(hwlock, to, 0, NULL);
  347. }
  348. /**
  349. * hwspin_unlock_irqrestore() - unlock hwspinlock, restore irq state
  350. * @hwlock: a previously-acquired hwspinlock which we want to unlock
  351. * @flags: previous caller's interrupt state to restore
  352. *
  353. * This function will unlock a specific hwspinlock, enable preemption and
  354. * restore the previous state of the local interrupts. It should be used
  355. * to undo, e.g., hwspin_trylock_irqsave().
  356. *
  357. * @hwlock must be already locked before calling this function: it is a bug
  358. * to call unlock on a @hwlock that is already unlocked.
  359. */
  360. static inline void hwspin_unlock_irqrestore(struct hwspinlock *hwlock,
  361. unsigned long *flags)
  362. {
  363. __hwspin_unlock(hwlock, HWLOCK_IRQSTATE, flags);
  364. }
  365. /**
  366. * hwspin_unlock_irq() - unlock hwspinlock, enable interrupts
  367. * @hwlock: a previously-acquired hwspinlock which we want to unlock
  368. *
  369. * This function will unlock a specific hwspinlock, enable preemption and
  370. * enable local interrupts. Should be used to undo hwspin_lock_irq().
  371. *
  372. * @hwlock must be already locked (e.g. by hwspin_trylock_irq()) before
  373. * calling this function: it is a bug to call unlock on a @hwlock that is
  374. * already unlocked.
  375. */
  376. static inline void hwspin_unlock_irq(struct hwspinlock *hwlock)
  377. {
  378. __hwspin_unlock(hwlock, HWLOCK_IRQ, NULL);
  379. }
  380. /**
  381. * hwspin_unlock_raw() - unlock hwspinlock
  382. * @hwlock: a previously-acquired hwspinlock which we want to unlock
  383. *
  384. * This function will unlock a specific hwspinlock.
  385. *
  386. * @hwlock must be already locked (e.g. by hwspin_trylock()) before calling
  387. * this function: it is a bug to call unlock on a @hwlock that is already
  388. * unlocked.
  389. */
  390. static inline void hwspin_unlock_raw(struct hwspinlock *hwlock)
  391. {
  392. __hwspin_unlock(hwlock, HWLOCK_RAW, NULL);
  393. }
  394. /**
  395. * hwspin_unlock_in_atomic() - unlock hwspinlock
  396. * @hwlock: a previously-acquired hwspinlock which we want to unlock
  397. *
  398. * This function will unlock a specific hwspinlock.
  399. *
  400. * @hwlock must be already locked (e.g. by hwspin_trylock()) before calling
  401. * this function: it is a bug to call unlock on a @hwlock that is already
  402. * unlocked.
  403. */
  404. static inline void hwspin_unlock_in_atomic(struct hwspinlock *hwlock)
  405. {
  406. __hwspin_unlock(hwlock, HWLOCK_IN_ATOMIC, NULL);
  407. }
  408. /**
  409. * hwspin_unlock() - unlock hwspinlock
  410. * @hwlock: a previously-acquired hwspinlock which we want to unlock
  411. *
  412. * This function will unlock a specific hwspinlock and enable preemption
  413. * back.
  414. *
  415. * @hwlock must be already locked (e.g. by hwspin_trylock()) before calling
  416. * this function: it is a bug to call unlock on a @hwlock that is already
  417. * unlocked.
  418. */
  419. static inline void hwspin_unlock(struct hwspinlock *hwlock)
  420. {
  421. __hwspin_unlock(hwlock, 0, NULL);
  422. }
  423. #endif /* __LINUX_HWSPINLOCK_H */