core.c 5.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2010-2014, The Linux Foundation. All rights reserved.
  4. */
  5. #include <linux/clk.h>
  6. #include <linux/dma-mapping.h>
  7. #include <linux/interrupt.h>
  8. #include <linux/module.h>
  9. #include <linux/mod_devicetable.h>
  10. #include <linux/platform_device.h>
  11. #include <linux/spinlock.h>
  12. #include <linux/types.h>
  13. #include <crypto/algapi.h>
  14. #include <crypto/internal/hash.h>
  15. #include <crypto/sha.h>
  16. #include "core.h"
  17. #include "cipher.h"
  18. #include "sha.h"
  19. #define QCE_MAJOR_VERSION5 0x05
  20. #define QCE_QUEUE_LENGTH 1
  21. static const struct qce_algo_ops *qce_ops[] = {
  22. #ifdef CONFIG_CRYPTO_DEV_QCE_SKCIPHER
  23. &skcipher_ops,
  24. #endif
  25. #ifdef CONFIG_CRYPTO_DEV_QCE_SHA
  26. &ahash_ops,
  27. #endif
  28. };
  29. static void qce_unregister_algs(struct qce_device *qce)
  30. {
  31. const struct qce_algo_ops *ops;
  32. int i;
  33. for (i = 0; i < ARRAY_SIZE(qce_ops); i++) {
  34. ops = qce_ops[i];
  35. ops->unregister_algs(qce);
  36. }
  37. }
  38. static int qce_register_algs(struct qce_device *qce)
  39. {
  40. const struct qce_algo_ops *ops;
  41. int i, ret = -ENODEV;
  42. for (i = 0; i < ARRAY_SIZE(qce_ops); i++) {
  43. ops = qce_ops[i];
  44. ret = ops->register_algs(qce);
  45. if (ret)
  46. break;
  47. }
  48. return ret;
  49. }
  50. static int qce_handle_request(struct crypto_async_request *async_req)
  51. {
  52. int ret = -EINVAL, i;
  53. const struct qce_algo_ops *ops;
  54. u32 type = crypto_tfm_alg_type(async_req->tfm);
  55. for (i = 0; i < ARRAY_SIZE(qce_ops); i++) {
  56. ops = qce_ops[i];
  57. if (type != ops->type)
  58. continue;
  59. ret = ops->async_req_handle(async_req);
  60. break;
  61. }
  62. return ret;
  63. }
  64. static int qce_handle_queue(struct qce_device *qce,
  65. struct crypto_async_request *req)
  66. {
  67. struct crypto_async_request *async_req, *backlog;
  68. unsigned long flags;
  69. int ret = 0, err;
  70. spin_lock_irqsave(&qce->lock, flags);
  71. if (req)
  72. ret = crypto_enqueue_request(&qce->queue, req);
  73. /* busy, do not dequeue request */
  74. if (qce->req) {
  75. spin_unlock_irqrestore(&qce->lock, flags);
  76. return ret;
  77. }
  78. backlog = crypto_get_backlog(&qce->queue);
  79. async_req = crypto_dequeue_request(&qce->queue);
  80. if (async_req)
  81. qce->req = async_req;
  82. spin_unlock_irqrestore(&qce->lock, flags);
  83. if (!async_req)
  84. return ret;
  85. if (backlog) {
  86. spin_lock_bh(&qce->lock);
  87. backlog->complete(backlog, -EINPROGRESS);
  88. spin_unlock_bh(&qce->lock);
  89. }
  90. err = qce_handle_request(async_req);
  91. if (err) {
  92. qce->result = err;
  93. tasklet_schedule(&qce->done_tasklet);
  94. }
  95. return ret;
  96. }
  97. static void qce_tasklet_req_done(unsigned long data)
  98. {
  99. struct qce_device *qce = (struct qce_device *)data;
  100. struct crypto_async_request *req;
  101. unsigned long flags;
  102. spin_lock_irqsave(&qce->lock, flags);
  103. req = qce->req;
  104. qce->req = NULL;
  105. spin_unlock_irqrestore(&qce->lock, flags);
  106. if (req)
  107. req->complete(req, qce->result);
  108. qce_handle_queue(qce, NULL);
  109. }
  110. static int qce_async_request_enqueue(struct qce_device *qce,
  111. struct crypto_async_request *req)
  112. {
  113. return qce_handle_queue(qce, req);
  114. }
  115. static void qce_async_request_done(struct qce_device *qce, int ret)
  116. {
  117. qce->result = ret;
  118. tasklet_schedule(&qce->done_tasklet);
  119. }
  120. static int qce_check_version(struct qce_device *qce)
  121. {
  122. u32 major, minor, step;
  123. qce_get_version(qce, &major, &minor, &step);
  124. /*
  125. * the driver does not support v5 with minor 0 because it has special
  126. * alignment requirements.
  127. */
  128. if (major != QCE_MAJOR_VERSION5 || minor == 0)
  129. return -ENODEV;
  130. qce->burst_size = QCE_BAM_BURST_SIZE;
  131. qce->pipe_pair_id = 1;
  132. dev_dbg(qce->dev, "Crypto device found, version %d.%d.%d\n",
  133. major, minor, step);
  134. return 0;
  135. }
  136. static int qce_crypto_probe(struct platform_device *pdev)
  137. {
  138. struct device *dev = &pdev->dev;
  139. struct qce_device *qce;
  140. int ret;
  141. qce = devm_kzalloc(dev, sizeof(*qce), GFP_KERNEL);
  142. if (!qce)
  143. return -ENOMEM;
  144. qce->dev = dev;
  145. platform_set_drvdata(pdev, qce);
  146. qce->base = devm_platform_ioremap_resource(pdev, 0);
  147. if (IS_ERR(qce->base))
  148. return PTR_ERR(qce->base);
  149. ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
  150. if (ret < 0)
  151. return ret;
  152. qce->core = devm_clk_get(qce->dev, "core");
  153. if (IS_ERR(qce->core))
  154. return PTR_ERR(qce->core);
  155. qce->iface = devm_clk_get(qce->dev, "iface");
  156. if (IS_ERR(qce->iface))
  157. return PTR_ERR(qce->iface);
  158. qce->bus = devm_clk_get(qce->dev, "bus");
  159. if (IS_ERR(qce->bus))
  160. return PTR_ERR(qce->bus);
  161. ret = clk_prepare_enable(qce->core);
  162. if (ret)
  163. return ret;
  164. ret = clk_prepare_enable(qce->iface);
  165. if (ret)
  166. goto err_clks_core;
  167. ret = clk_prepare_enable(qce->bus);
  168. if (ret)
  169. goto err_clks_iface;
  170. ret = qce_dma_request(qce->dev, &qce->dma);
  171. if (ret)
  172. goto err_clks;
  173. ret = qce_check_version(qce);
  174. if (ret)
  175. goto err_clks;
  176. spin_lock_init(&qce->lock);
  177. tasklet_init(&qce->done_tasklet, qce_tasklet_req_done,
  178. (unsigned long)qce);
  179. crypto_init_queue(&qce->queue, QCE_QUEUE_LENGTH);
  180. qce->async_req_enqueue = qce_async_request_enqueue;
  181. qce->async_req_done = qce_async_request_done;
  182. ret = qce_register_algs(qce);
  183. if (ret)
  184. goto err_dma;
  185. return 0;
  186. err_dma:
  187. qce_dma_release(&qce->dma);
  188. err_clks:
  189. clk_disable_unprepare(qce->bus);
  190. err_clks_iface:
  191. clk_disable_unprepare(qce->iface);
  192. err_clks_core:
  193. clk_disable_unprepare(qce->core);
  194. return ret;
  195. }
  196. static int qce_crypto_remove(struct platform_device *pdev)
  197. {
  198. struct qce_device *qce = platform_get_drvdata(pdev);
  199. tasklet_kill(&qce->done_tasklet);
  200. qce_unregister_algs(qce);
  201. qce_dma_release(&qce->dma);
  202. clk_disable_unprepare(qce->bus);
  203. clk_disable_unprepare(qce->iface);
  204. clk_disable_unprepare(qce->core);
  205. return 0;
  206. }
  207. static const struct of_device_id qce_crypto_of_match[] = {
  208. { .compatible = "qcom,crypto-v5.1", },
  209. {}
  210. };
  211. MODULE_DEVICE_TABLE(of, qce_crypto_of_match);
  212. static struct platform_driver qce_crypto_driver = {
  213. .probe = qce_crypto_probe,
  214. .remove = qce_crypto_remove,
  215. .driver = {
  216. .name = KBUILD_MODNAME,
  217. .of_match_table = qce_crypto_of_match,
  218. },
  219. };
  220. module_platform_driver(qce_crypto_driver);
  221. MODULE_LICENSE("GPL v2");
  222. MODULE_DESCRIPTION("Qualcomm crypto engine driver");
  223. MODULE_ALIAS("platform:" KBUILD_MODNAME);
  224. MODULE_AUTHOR("The Linux Foundation");