crypto_engine.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * Handle async block request by crypto hardware engine.
  4. *
  5. * Copyright (C) 2016 Linaro, Inc.
  6. *
  7. * Author: Baolin Wang <baolin.wang@linaro.org>
  8. */
  9. #include <linux/err.h>
  10. #include <linux/delay.h>
  11. #include <linux/device.h>
  12. #include <crypto/engine.h>
  13. #include <uapi/linux/sched/types.h>
  14. #include "internal.h"
  15. #define CRYPTO_ENGINE_MAX_QLEN 10
  16. /**
  17. * crypto_finalize_request - finalize one request if the request is done
  18. * @engine: the hardware engine
  19. * @req: the request need to be finalized
  20. * @err: error number
  21. */
  22. static void crypto_finalize_request(struct crypto_engine *engine,
  23. struct crypto_async_request *req, int err)
  24. {
  25. unsigned long flags;
  26. bool finalize_req = false;
  27. int ret;
  28. struct crypto_engine_ctx *enginectx;
  29. /*
  30. * If hardware cannot enqueue more requests
  31. * and retry mechanism is not supported
  32. * make sure we are completing the current request
  33. */
  34. if (!engine->retry_support) {
  35. spin_lock_irqsave(&engine->queue_lock, flags);
  36. if (engine->cur_req == req) {
  37. finalize_req = true;
  38. engine->cur_req = NULL;
  39. }
  40. spin_unlock_irqrestore(&engine->queue_lock, flags);
  41. }
  42. if (finalize_req || engine->retry_support) {
  43. enginectx = crypto_tfm_ctx(req->tfm);
  44. if (enginectx->op.prepare_request &&
  45. enginectx->op.unprepare_request) {
  46. ret = enginectx->op.unprepare_request(engine, req);
  47. if (ret)
  48. dev_err(engine->dev, "failed to unprepare request\n");
  49. }
  50. }
  51. req->complete(req, err);
  52. kthread_queue_work(engine->kworker, &engine->pump_requests);
  53. }
  54. /**
  55. * crypto_pump_requests - dequeue one request from engine queue to process
  56. * @engine: the hardware engine
  57. * @in_kthread: true if we are in the context of the request pump thread
  58. *
  59. * This function checks if there is any request in the engine queue that
  60. * needs processing and if so call out to the driver to initialize hardware
  61. * and handle each request.
  62. */
  63. static void crypto_pump_requests(struct crypto_engine *engine,
  64. bool in_kthread)
  65. {
  66. struct crypto_async_request *async_req, *backlog;
  67. unsigned long flags;
  68. bool was_busy = false;
  69. int ret;
  70. struct crypto_engine_ctx *enginectx;
  71. spin_lock_irqsave(&engine->queue_lock, flags);
  72. /* Make sure we are not already running a request */
  73. if (!engine->retry_support && engine->cur_req)
  74. goto out;
  75. /* If another context is idling then defer */
  76. if (engine->idling) {
  77. kthread_queue_work(engine->kworker, &engine->pump_requests);
  78. goto out;
  79. }
  80. /* Check if the engine queue is idle */
  81. if (!crypto_queue_len(&engine->queue) || !engine->running) {
  82. if (!engine->busy)
  83. goto out;
  84. /* Only do teardown in the thread */
  85. if (!in_kthread) {
  86. kthread_queue_work(engine->kworker,
  87. &engine->pump_requests);
  88. goto out;
  89. }
  90. engine->busy = false;
  91. engine->idling = true;
  92. spin_unlock_irqrestore(&engine->queue_lock, flags);
  93. if (engine->unprepare_crypt_hardware &&
  94. engine->unprepare_crypt_hardware(engine))
  95. dev_err(engine->dev, "failed to unprepare crypt hardware\n");
  96. spin_lock_irqsave(&engine->queue_lock, flags);
  97. engine->idling = false;
  98. goto out;
  99. }
  100. start_request:
  101. /* Get the fist request from the engine queue to handle */
  102. backlog = crypto_get_backlog(&engine->queue);
  103. async_req = crypto_dequeue_request(&engine->queue);
  104. if (!async_req)
  105. goto out;
  106. /*
  107. * If hardware doesn't support the retry mechanism,
  108. * keep track of the request we are processing now.
  109. * We'll need it on completion (crypto_finalize_request).
  110. */
  111. if (!engine->retry_support)
  112. engine->cur_req = async_req;
  113. if (backlog)
  114. backlog->complete(backlog, -EINPROGRESS);
  115. if (engine->busy)
  116. was_busy = true;
  117. else
  118. engine->busy = true;
  119. spin_unlock_irqrestore(&engine->queue_lock, flags);
  120. /* Until here we get the request need to be encrypted successfully */
  121. if (!was_busy && engine->prepare_crypt_hardware) {
  122. ret = engine->prepare_crypt_hardware(engine);
  123. if (ret) {
  124. dev_err(engine->dev, "failed to prepare crypt hardware\n");
  125. goto req_err_2;
  126. }
  127. }
  128. enginectx = crypto_tfm_ctx(async_req->tfm);
  129. if (enginectx->op.prepare_request) {
  130. ret = enginectx->op.prepare_request(engine, async_req);
  131. if (ret) {
  132. dev_err(engine->dev, "failed to prepare request: %d\n",
  133. ret);
  134. goto req_err_2;
  135. }
  136. }
  137. if (!enginectx->op.do_one_request) {
  138. dev_err(engine->dev, "failed to do request\n");
  139. ret = -EINVAL;
  140. goto req_err_1;
  141. }
  142. ret = enginectx->op.do_one_request(engine, async_req);
  143. /* Request unsuccessfully executed by hardware */
  144. if (ret < 0) {
  145. /*
  146. * If hardware queue is full (-ENOSPC), requeue request
  147. * regardless of backlog flag.
  148. * Otherwise, unprepare and complete the request.
  149. */
  150. if (!engine->retry_support ||
  151. (ret != -ENOSPC)) {
  152. dev_err(engine->dev,
  153. "Failed to do one request from queue: %d\n",
  154. ret);
  155. goto req_err_1;
  156. }
  157. /*
  158. * If retry mechanism is supported,
  159. * unprepare current request and
  160. * enqueue it back into crypto-engine queue.
  161. */
  162. if (enginectx->op.unprepare_request) {
  163. ret = enginectx->op.unprepare_request(engine,
  164. async_req);
  165. if (ret)
  166. dev_err(engine->dev,
  167. "failed to unprepare request\n");
  168. }
  169. spin_lock_irqsave(&engine->queue_lock, flags);
  170. /*
  171. * If hardware was unable to execute request, enqueue it
  172. * back in front of crypto-engine queue, to keep the order
  173. * of requests.
  174. */
  175. crypto_enqueue_request_head(&engine->queue, async_req);
  176. kthread_queue_work(engine->kworker, &engine->pump_requests);
  177. goto out;
  178. }
  179. goto retry;
  180. req_err_1:
  181. if (enginectx->op.unprepare_request) {
  182. ret = enginectx->op.unprepare_request(engine, async_req);
  183. if (ret)
  184. dev_err(engine->dev, "failed to unprepare request\n");
  185. }
  186. req_err_2:
  187. async_req->complete(async_req, ret);
  188. retry:
  189. /* If retry mechanism is supported, send new requests to engine */
  190. if (engine->retry_support) {
  191. spin_lock_irqsave(&engine->queue_lock, flags);
  192. goto start_request;
  193. }
  194. return;
  195. out:
  196. spin_unlock_irqrestore(&engine->queue_lock, flags);
  197. /*
  198. * Batch requests is possible only if
  199. * hardware can enqueue multiple requests
  200. */
  201. if (engine->do_batch_requests) {
  202. ret = engine->do_batch_requests(engine);
  203. if (ret)
  204. dev_err(engine->dev, "failed to do batch requests: %d\n",
  205. ret);
  206. }
  207. return;
  208. }
  209. static void crypto_pump_work(struct kthread_work *work)
  210. {
  211. struct crypto_engine *engine =
  212. container_of(work, struct crypto_engine, pump_requests);
  213. crypto_pump_requests(engine, true);
  214. }
  215. /**
  216. * crypto_transfer_request - transfer the new request into the engine queue
  217. * @engine: the hardware engine
  218. * @req: the request need to be listed into the engine queue
  219. */
  220. static int crypto_transfer_request(struct crypto_engine *engine,
  221. struct crypto_async_request *req,
  222. bool need_pump)
  223. {
  224. unsigned long flags;
  225. int ret;
  226. spin_lock_irqsave(&engine->queue_lock, flags);
  227. if (!engine->running) {
  228. spin_unlock_irqrestore(&engine->queue_lock, flags);
  229. return -ESHUTDOWN;
  230. }
  231. ret = crypto_enqueue_request(&engine->queue, req);
  232. if (!engine->busy && need_pump)
  233. kthread_queue_work(engine->kworker, &engine->pump_requests);
  234. spin_unlock_irqrestore(&engine->queue_lock, flags);
  235. return ret;
  236. }
  237. /**
  238. * crypto_transfer_request_to_engine - transfer one request to list
  239. * into the engine queue
  240. * @engine: the hardware engine
  241. * @req: the request need to be listed into the engine queue
  242. */
  243. static int crypto_transfer_request_to_engine(struct crypto_engine *engine,
  244. struct crypto_async_request *req)
  245. {
  246. return crypto_transfer_request(engine, req, true);
  247. }
  248. /**
  249. * crypto_transfer_aead_request_to_engine - transfer one aead_request
  250. * to list into the engine queue
  251. * @engine: the hardware engine
  252. * @req: the request need to be listed into the engine queue
  253. */
  254. int crypto_transfer_aead_request_to_engine(struct crypto_engine *engine,
  255. struct aead_request *req)
  256. {
  257. return crypto_transfer_request_to_engine(engine, &req->base);
  258. }
  259. EXPORT_SYMBOL_GPL(crypto_transfer_aead_request_to_engine);
  260. /**
  261. * crypto_transfer_akcipher_request_to_engine - transfer one akcipher_request
  262. * to list into the engine queue
  263. * @engine: the hardware engine
  264. * @req: the request need to be listed into the engine queue
  265. */
  266. int crypto_transfer_akcipher_request_to_engine(struct crypto_engine *engine,
  267. struct akcipher_request *req)
  268. {
  269. return crypto_transfer_request_to_engine(engine, &req->base);
  270. }
  271. EXPORT_SYMBOL_GPL(crypto_transfer_akcipher_request_to_engine);
  272. /**
  273. * crypto_transfer_hash_request_to_engine - transfer one ahash_request
  274. * to list into the engine queue
  275. * @engine: the hardware engine
  276. * @req: the request need to be listed into the engine queue
  277. */
  278. int crypto_transfer_hash_request_to_engine(struct crypto_engine *engine,
  279. struct ahash_request *req)
  280. {
  281. return crypto_transfer_request_to_engine(engine, &req->base);
  282. }
  283. EXPORT_SYMBOL_GPL(crypto_transfer_hash_request_to_engine);
  284. /**
  285. * crypto_transfer_skcipher_request_to_engine - transfer one skcipher_request
  286. * to list into the engine queue
  287. * @engine: the hardware engine
  288. * @req: the request need to be listed into the engine queue
  289. */
  290. int crypto_transfer_skcipher_request_to_engine(struct crypto_engine *engine,
  291. struct skcipher_request *req)
  292. {
  293. return crypto_transfer_request_to_engine(engine, &req->base);
  294. }
  295. EXPORT_SYMBOL_GPL(crypto_transfer_skcipher_request_to_engine);
  296. /**
  297. * crypto_finalize_aead_request - finalize one aead_request if
  298. * the request is done
  299. * @engine: the hardware engine
  300. * @req: the request need to be finalized
  301. * @err: error number
  302. */
  303. void crypto_finalize_aead_request(struct crypto_engine *engine,
  304. struct aead_request *req, int err)
  305. {
  306. return crypto_finalize_request(engine, &req->base, err);
  307. }
  308. EXPORT_SYMBOL_GPL(crypto_finalize_aead_request);
  309. /**
  310. * crypto_finalize_akcipher_request - finalize one akcipher_request if
  311. * the request is done
  312. * @engine: the hardware engine
  313. * @req: the request need to be finalized
  314. * @err: error number
  315. */
  316. void crypto_finalize_akcipher_request(struct crypto_engine *engine,
  317. struct akcipher_request *req, int err)
  318. {
  319. return crypto_finalize_request(engine, &req->base, err);
  320. }
  321. EXPORT_SYMBOL_GPL(crypto_finalize_akcipher_request);
  322. /**
  323. * crypto_finalize_hash_request - finalize one ahash_request if
  324. * the request is done
  325. * @engine: the hardware engine
  326. * @req: the request need to be finalized
  327. * @err: error number
  328. */
  329. void crypto_finalize_hash_request(struct crypto_engine *engine,
  330. struct ahash_request *req, int err)
  331. {
  332. return crypto_finalize_request(engine, &req->base, err);
  333. }
  334. EXPORT_SYMBOL_GPL(crypto_finalize_hash_request);
  335. /**
  336. * crypto_finalize_skcipher_request - finalize one skcipher_request if
  337. * the request is done
  338. * @engine: the hardware engine
  339. * @req: the request need to be finalized
  340. * @err: error number
  341. */
  342. void crypto_finalize_skcipher_request(struct crypto_engine *engine,
  343. struct skcipher_request *req, int err)
  344. {
  345. return crypto_finalize_request(engine, &req->base, err);
  346. }
  347. EXPORT_SYMBOL_GPL(crypto_finalize_skcipher_request);
  348. /**
  349. * crypto_engine_start - start the hardware engine
  350. * @engine: the hardware engine need to be started
  351. *
  352. * Return 0 on success, else on fail.
  353. */
  354. int crypto_engine_start(struct crypto_engine *engine)
  355. {
  356. unsigned long flags;
  357. spin_lock_irqsave(&engine->queue_lock, flags);
  358. if (engine->running || engine->busy) {
  359. spin_unlock_irqrestore(&engine->queue_lock, flags);
  360. return -EBUSY;
  361. }
  362. engine->running = true;
  363. spin_unlock_irqrestore(&engine->queue_lock, flags);
  364. kthread_queue_work(engine->kworker, &engine->pump_requests);
  365. return 0;
  366. }
  367. EXPORT_SYMBOL_GPL(crypto_engine_start);
  368. /**
  369. * crypto_engine_stop - stop the hardware engine
  370. * @engine: the hardware engine need to be stopped
  371. *
  372. * Return 0 on success, else on fail.
  373. */
  374. int crypto_engine_stop(struct crypto_engine *engine)
  375. {
  376. unsigned long flags;
  377. unsigned int limit = 500;
  378. int ret = 0;
  379. spin_lock_irqsave(&engine->queue_lock, flags);
  380. /*
  381. * If the engine queue is not empty or the engine is on busy state,
  382. * we need to wait for a while to pump the requests of engine queue.
  383. */
  384. while ((crypto_queue_len(&engine->queue) || engine->busy) && limit--) {
  385. spin_unlock_irqrestore(&engine->queue_lock, flags);
  386. msleep(20);
  387. spin_lock_irqsave(&engine->queue_lock, flags);
  388. }
  389. if (crypto_queue_len(&engine->queue) || engine->busy)
  390. ret = -EBUSY;
  391. else
  392. engine->running = false;
  393. spin_unlock_irqrestore(&engine->queue_lock, flags);
  394. if (ret)
  395. dev_warn(engine->dev, "could not stop engine\n");
  396. return ret;
  397. }
  398. EXPORT_SYMBOL_GPL(crypto_engine_stop);
  399. /**
  400. * crypto_engine_alloc_init_and_set - allocate crypto hardware engine structure
  401. * and initialize it by setting the maximum number of entries in the software
  402. * crypto-engine queue.
  403. * @dev: the device attached with one hardware engine
  404. * @retry_support: whether hardware has support for retry mechanism
  405. * @cbk_do_batch: pointer to a callback function to be invoked when executing
  406. * a batch of requests.
  407. * This has the form:
  408. * callback(struct crypto_engine *engine)
  409. * where:
  410. * @engine: the crypto engine structure.
  411. * @rt: whether this queue is set to run as a realtime task
  412. * @qlen: maximum size of the crypto-engine queue
  413. *
  414. * This must be called from context that can sleep.
  415. * Return: the crypto engine structure on success, else NULL.
  416. */
  417. struct crypto_engine *crypto_engine_alloc_init_and_set(struct device *dev,
  418. bool retry_support,
  419. int (*cbk_do_batch)(struct crypto_engine *engine),
  420. bool rt, int qlen)
  421. {
  422. struct crypto_engine *engine;
  423. if (!dev)
  424. return NULL;
  425. engine = devm_kzalloc(dev, sizeof(*engine), GFP_KERNEL);
  426. if (!engine)
  427. return NULL;
  428. engine->dev = dev;
  429. engine->rt = rt;
  430. engine->running = false;
  431. engine->busy = false;
  432. engine->idling = false;
  433. engine->retry_support = retry_support;
  434. engine->priv_data = dev;
  435. /*
  436. * Batch requests is possible only if
  437. * hardware has support for retry mechanism.
  438. */
  439. engine->do_batch_requests = retry_support ? cbk_do_batch : NULL;
  440. snprintf(engine->name, sizeof(engine->name),
  441. "%s-engine", dev_name(dev));
  442. crypto_init_queue(&engine->queue, qlen);
  443. spin_lock_init(&engine->queue_lock);
  444. engine->kworker = kthread_create_worker(0, "%s", engine->name);
  445. if (IS_ERR(engine->kworker)) {
  446. dev_err(dev, "failed to create crypto request pump task\n");
  447. return NULL;
  448. }
  449. kthread_init_work(&engine->pump_requests, crypto_pump_work);
  450. if (engine->rt) {
  451. dev_info(dev, "will run requests pump with realtime priority\n");
  452. sched_set_fifo(engine->kworker->task);
  453. }
  454. return engine;
  455. }
  456. EXPORT_SYMBOL_GPL(crypto_engine_alloc_init_and_set);
  457. /**
  458. * crypto_engine_alloc_init - allocate crypto hardware engine structure and
  459. * initialize it.
  460. * @dev: the device attached with one hardware engine
  461. * @rt: whether this queue is set to run as a realtime task
  462. *
  463. * This must be called from context that can sleep.
  464. * Return: the crypto engine structure on success, else NULL.
  465. */
  466. struct crypto_engine *crypto_engine_alloc_init(struct device *dev, bool rt)
  467. {
  468. return crypto_engine_alloc_init_and_set(dev, false, NULL, rt,
  469. CRYPTO_ENGINE_MAX_QLEN);
  470. }
  471. EXPORT_SYMBOL_GPL(crypto_engine_alloc_init);
  472. /**
  473. * crypto_engine_exit - free the resources of hardware engine when exit
  474. * @engine: the hardware engine need to be freed
  475. *
  476. * Return 0 for success.
  477. */
  478. int crypto_engine_exit(struct crypto_engine *engine)
  479. {
  480. int ret;
  481. ret = crypto_engine_stop(engine);
  482. if (ret)
  483. return ret;
  484. kthread_destroy_worker(engine->kworker);
  485. return 0;
  486. }
  487. EXPORT_SYMBOL_GPL(crypto_engine_exit);
  488. MODULE_LICENSE("GPL");
  489. MODULE_DESCRIPTION("Crypto hardware engine framework");