mmc_hsq.c 7.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. *
  4. * MMC software queue support based on command queue interfaces
  5. *
  6. * Copyright (C) 2019 Linaro, Inc.
  7. * Author: Baolin Wang <baolin.wang@linaro.org>
  8. */
  9. #include <linux/mmc/card.h>
  10. #include <linux/mmc/host.h>
  11. #include <linux/module.h>
  12. #include "mmc_hsq.h"
  13. #define HSQ_NUM_SLOTS 64
  14. #define HSQ_INVALID_TAG HSQ_NUM_SLOTS
  15. static void mmc_hsq_retry_handler(struct work_struct *work)
  16. {
  17. struct mmc_hsq *hsq = container_of(work, struct mmc_hsq, retry_work);
  18. struct mmc_host *mmc = hsq->mmc;
  19. mmc->ops->request(mmc, hsq->mrq);
  20. }
  21. static void mmc_hsq_pump_requests(struct mmc_hsq *hsq)
  22. {
  23. struct mmc_host *mmc = hsq->mmc;
  24. struct hsq_slot *slot;
  25. unsigned long flags;
  26. int ret = 0;
  27. spin_lock_irqsave(&hsq->lock, flags);
  28. /* Make sure we are not already running a request now */
  29. if (hsq->mrq) {
  30. spin_unlock_irqrestore(&hsq->lock, flags);
  31. return;
  32. }
  33. /* Make sure there are remain requests need to pump */
  34. if (!hsq->qcnt || !hsq->enabled) {
  35. spin_unlock_irqrestore(&hsq->lock, flags);
  36. return;
  37. }
  38. slot = &hsq->slot[hsq->next_tag];
  39. hsq->mrq = slot->mrq;
  40. hsq->qcnt--;
  41. spin_unlock_irqrestore(&hsq->lock, flags);
  42. if (mmc->ops->request_atomic)
  43. ret = mmc->ops->request_atomic(mmc, hsq->mrq);
  44. else
  45. mmc->ops->request(mmc, hsq->mrq);
  46. /*
  47. * If returning BUSY from request_atomic(), which means the card
  48. * may be busy now, and we should change to non-atomic context to
  49. * try again for this unusual case, to avoid time-consuming operations
  50. * in the atomic context.
  51. *
  52. * Note: we just give a warning for other error cases, since the host
  53. * driver will handle them.
  54. */
  55. if (ret == -EBUSY)
  56. schedule_work(&hsq->retry_work);
  57. else
  58. WARN_ON_ONCE(ret);
  59. }
  60. static void mmc_hsq_update_next_tag(struct mmc_hsq *hsq, int remains)
  61. {
  62. struct hsq_slot *slot;
  63. int tag;
  64. /*
  65. * If there are no remain requests in software queue, then set a invalid
  66. * tag.
  67. */
  68. if (!remains) {
  69. hsq->next_tag = HSQ_INVALID_TAG;
  70. return;
  71. }
  72. /*
  73. * Increasing the next tag and check if the corresponding request is
  74. * available, if yes, then we found a candidate request.
  75. */
  76. if (++hsq->next_tag != HSQ_INVALID_TAG) {
  77. slot = &hsq->slot[hsq->next_tag];
  78. if (slot->mrq)
  79. return;
  80. }
  81. /* Othersie we should iterate all slots to find a available tag. */
  82. for (tag = 0; tag < HSQ_NUM_SLOTS; tag++) {
  83. slot = &hsq->slot[tag];
  84. if (slot->mrq)
  85. break;
  86. }
  87. if (tag == HSQ_NUM_SLOTS)
  88. tag = HSQ_INVALID_TAG;
  89. hsq->next_tag = tag;
  90. }
  91. static void mmc_hsq_post_request(struct mmc_hsq *hsq)
  92. {
  93. unsigned long flags;
  94. int remains;
  95. spin_lock_irqsave(&hsq->lock, flags);
  96. remains = hsq->qcnt;
  97. hsq->mrq = NULL;
  98. /* Update the next available tag to be queued. */
  99. mmc_hsq_update_next_tag(hsq, remains);
  100. if (hsq->waiting_for_idle && !remains) {
  101. hsq->waiting_for_idle = false;
  102. wake_up(&hsq->wait_queue);
  103. }
  104. /* Do not pump new request in recovery mode. */
  105. if (hsq->recovery_halt) {
  106. spin_unlock_irqrestore(&hsq->lock, flags);
  107. return;
  108. }
  109. spin_unlock_irqrestore(&hsq->lock, flags);
  110. /*
  111. * Try to pump new request to host controller as fast as possible,
  112. * after completing previous request.
  113. */
  114. if (remains > 0)
  115. mmc_hsq_pump_requests(hsq);
  116. }
  117. /**
  118. * mmc_hsq_finalize_request - finalize one request if the request is done
  119. * @mmc: the host controller
  120. * @mrq: the request need to be finalized
  121. *
  122. * Return true if we finalized the corresponding request in software queue,
  123. * otherwise return false.
  124. */
  125. bool mmc_hsq_finalize_request(struct mmc_host *mmc, struct mmc_request *mrq)
  126. {
  127. struct mmc_hsq *hsq = mmc->cqe_private;
  128. unsigned long flags;
  129. spin_lock_irqsave(&hsq->lock, flags);
  130. if (!hsq->enabled || !hsq->mrq || hsq->mrq != mrq) {
  131. spin_unlock_irqrestore(&hsq->lock, flags);
  132. return false;
  133. }
  134. /*
  135. * Clear current completed slot request to make a room for new request.
  136. */
  137. hsq->slot[hsq->next_tag].mrq = NULL;
  138. spin_unlock_irqrestore(&hsq->lock, flags);
  139. mmc_cqe_request_done(mmc, hsq->mrq);
  140. mmc_hsq_post_request(hsq);
  141. return true;
  142. }
  143. EXPORT_SYMBOL_GPL(mmc_hsq_finalize_request);
  144. static void mmc_hsq_recovery_start(struct mmc_host *mmc)
  145. {
  146. struct mmc_hsq *hsq = mmc->cqe_private;
  147. unsigned long flags;
  148. spin_lock_irqsave(&hsq->lock, flags);
  149. hsq->recovery_halt = true;
  150. spin_unlock_irqrestore(&hsq->lock, flags);
  151. }
  152. static void mmc_hsq_recovery_finish(struct mmc_host *mmc)
  153. {
  154. struct mmc_hsq *hsq = mmc->cqe_private;
  155. int remains;
  156. spin_lock_irq(&hsq->lock);
  157. hsq->recovery_halt = false;
  158. remains = hsq->qcnt;
  159. spin_unlock_irq(&hsq->lock);
  160. /*
  161. * Try to pump new request if there are request pending in software
  162. * queue after finishing recovery.
  163. */
  164. if (remains > 0)
  165. mmc_hsq_pump_requests(hsq);
  166. }
  167. static int mmc_hsq_request(struct mmc_host *mmc, struct mmc_request *mrq)
  168. {
  169. struct mmc_hsq *hsq = mmc->cqe_private;
  170. int tag = mrq->tag;
  171. spin_lock_irq(&hsq->lock);
  172. if (!hsq->enabled) {
  173. spin_unlock_irq(&hsq->lock);
  174. return -ESHUTDOWN;
  175. }
  176. /* Do not queue any new requests in recovery mode. */
  177. if (hsq->recovery_halt) {
  178. spin_unlock_irq(&hsq->lock);
  179. return -EBUSY;
  180. }
  181. hsq->slot[tag].mrq = mrq;
  182. /*
  183. * Set the next tag as current request tag if no available
  184. * next tag.
  185. */
  186. if (hsq->next_tag == HSQ_INVALID_TAG)
  187. hsq->next_tag = tag;
  188. hsq->qcnt++;
  189. spin_unlock_irq(&hsq->lock);
  190. mmc_hsq_pump_requests(hsq);
  191. return 0;
  192. }
  193. static void mmc_hsq_post_req(struct mmc_host *mmc, struct mmc_request *mrq)
  194. {
  195. if (mmc->ops->post_req)
  196. mmc->ops->post_req(mmc, mrq, 0);
  197. }
  198. static bool mmc_hsq_queue_is_idle(struct mmc_hsq *hsq, int *ret)
  199. {
  200. bool is_idle;
  201. spin_lock_irq(&hsq->lock);
  202. is_idle = (!hsq->mrq && !hsq->qcnt) ||
  203. hsq->recovery_halt;
  204. *ret = hsq->recovery_halt ? -EBUSY : 0;
  205. hsq->waiting_for_idle = !is_idle;
  206. spin_unlock_irq(&hsq->lock);
  207. return is_idle;
  208. }
  209. static int mmc_hsq_wait_for_idle(struct mmc_host *mmc)
  210. {
  211. struct mmc_hsq *hsq = mmc->cqe_private;
  212. int ret;
  213. wait_event(hsq->wait_queue,
  214. mmc_hsq_queue_is_idle(hsq, &ret));
  215. return ret;
  216. }
  217. static void mmc_hsq_disable(struct mmc_host *mmc)
  218. {
  219. struct mmc_hsq *hsq = mmc->cqe_private;
  220. u32 timeout = 500;
  221. int ret;
  222. spin_lock_irq(&hsq->lock);
  223. if (!hsq->enabled) {
  224. spin_unlock_irq(&hsq->lock);
  225. return;
  226. }
  227. spin_unlock_irq(&hsq->lock);
  228. ret = wait_event_timeout(hsq->wait_queue,
  229. mmc_hsq_queue_is_idle(hsq, &ret),
  230. msecs_to_jiffies(timeout));
  231. if (ret == 0) {
  232. pr_warn("could not stop mmc software queue\n");
  233. return;
  234. }
  235. spin_lock_irq(&hsq->lock);
  236. hsq->enabled = false;
  237. spin_unlock_irq(&hsq->lock);
  238. }
  239. static int mmc_hsq_enable(struct mmc_host *mmc, struct mmc_card *card)
  240. {
  241. struct mmc_hsq *hsq = mmc->cqe_private;
  242. spin_lock_irq(&hsq->lock);
  243. if (hsq->enabled) {
  244. spin_unlock_irq(&hsq->lock);
  245. return -EBUSY;
  246. }
  247. hsq->enabled = true;
  248. spin_unlock_irq(&hsq->lock);
  249. return 0;
  250. }
  251. static const struct mmc_cqe_ops mmc_hsq_ops = {
  252. .cqe_enable = mmc_hsq_enable,
  253. .cqe_disable = mmc_hsq_disable,
  254. .cqe_request = mmc_hsq_request,
  255. .cqe_post_req = mmc_hsq_post_req,
  256. .cqe_wait_for_idle = mmc_hsq_wait_for_idle,
  257. .cqe_recovery_start = mmc_hsq_recovery_start,
  258. .cqe_recovery_finish = mmc_hsq_recovery_finish,
  259. };
  260. int mmc_hsq_init(struct mmc_hsq *hsq, struct mmc_host *mmc)
  261. {
  262. hsq->num_slots = HSQ_NUM_SLOTS;
  263. hsq->next_tag = HSQ_INVALID_TAG;
  264. hsq->slot = devm_kcalloc(mmc_dev(mmc), hsq->num_slots,
  265. sizeof(struct hsq_slot), GFP_KERNEL);
  266. if (!hsq->slot)
  267. return -ENOMEM;
  268. hsq->mmc = mmc;
  269. hsq->mmc->cqe_private = hsq;
  270. mmc->cqe_ops = &mmc_hsq_ops;
  271. INIT_WORK(&hsq->retry_work, mmc_hsq_retry_handler);
  272. spin_lock_init(&hsq->lock);
  273. init_waitqueue_head(&hsq->wait_queue);
  274. return 0;
  275. }
  276. EXPORT_SYMBOL_GPL(mmc_hsq_init);
  277. void mmc_hsq_suspend(struct mmc_host *mmc)
  278. {
  279. mmc_hsq_disable(mmc);
  280. }
  281. EXPORT_SYMBOL_GPL(mmc_hsq_suspend);
  282. int mmc_hsq_resume(struct mmc_host *mmc)
  283. {
  284. return mmc_hsq_enable(mmc, NULL);
  285. }
  286. EXPORT_SYMBOL_GPL(mmc_hsq_resume);
  287. MODULE_DESCRIPTION("MMC Host Software Queue support");
  288. MODULE_LICENSE("GPL v2");