common.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
  4. */
  5. #include <linux/err.h>
  6. #include <linux/interrupt.h>
  7. #include <linux/types.h>
  8. #include <crypto/scatterwalk.h>
  9. #include <crypto/sha.h>
  10. #include "cipher.h"
  11. #include "common.h"
  12. #include "core.h"
  13. #include "regs-v5.h"
  14. #include "sha.h"
  15. static inline u32 qce_read(struct qce_device *qce, u32 offset)
  16. {
  17. return readl(qce->base + offset);
  18. }
  19. static inline void qce_write(struct qce_device *qce, u32 offset, u32 val)
  20. {
  21. writel(val, qce->base + offset);
  22. }
  23. static inline void qce_write_array(struct qce_device *qce, u32 offset,
  24. const u32 *val, unsigned int len)
  25. {
  26. int i;
  27. for (i = 0; i < len; i++)
  28. qce_write(qce, offset + i * sizeof(u32), val[i]);
  29. }
  30. static inline void
  31. qce_clear_array(struct qce_device *qce, u32 offset, unsigned int len)
  32. {
  33. int i;
  34. for (i = 0; i < len; i++)
  35. qce_write(qce, offset + i * sizeof(u32), 0);
  36. }
  37. static u32 qce_config_reg(struct qce_device *qce, int little)
  38. {
  39. u32 beats = (qce->burst_size >> 3) - 1;
  40. u32 pipe_pair = qce->pipe_pair_id;
  41. u32 config;
  42. config = (beats << REQ_SIZE_SHIFT) & REQ_SIZE_MASK;
  43. config |= BIT(MASK_DOUT_INTR_SHIFT) | BIT(MASK_DIN_INTR_SHIFT) |
  44. BIT(MASK_OP_DONE_INTR_SHIFT) | BIT(MASK_ERR_INTR_SHIFT);
  45. config |= (pipe_pair << PIPE_SET_SELECT_SHIFT) & PIPE_SET_SELECT_MASK;
  46. config &= ~HIGH_SPD_EN_N_SHIFT;
  47. if (little)
  48. config |= BIT(LITTLE_ENDIAN_MODE_SHIFT);
  49. return config;
  50. }
  51. void qce_cpu_to_be32p_array(__be32 *dst, const u8 *src, unsigned int len)
  52. {
  53. __be32 *d = dst;
  54. const u8 *s = src;
  55. unsigned int n;
  56. n = len / sizeof(u32);
  57. for (; n > 0; n--) {
  58. *d = cpu_to_be32p((const __u32 *) s);
  59. s += sizeof(__u32);
  60. d++;
  61. }
  62. }
  63. static void qce_setup_config(struct qce_device *qce)
  64. {
  65. u32 config;
  66. /* get big endianness */
  67. config = qce_config_reg(qce, 0);
  68. /* clear status */
  69. qce_write(qce, REG_STATUS, 0);
  70. qce_write(qce, REG_CONFIG, config);
  71. }
  72. static inline void qce_crypto_go(struct qce_device *qce)
  73. {
  74. qce_write(qce, REG_GOPROC, BIT(GO_SHIFT) | BIT(RESULTS_DUMP_SHIFT));
  75. }
  76. #ifdef CONFIG_CRYPTO_DEV_QCE_SHA
  77. static u32 qce_auth_cfg(unsigned long flags, u32 key_size)
  78. {
  79. u32 cfg = 0;
  80. if (IS_AES(flags) && (IS_CCM(flags) || IS_CMAC(flags)))
  81. cfg |= AUTH_ALG_AES << AUTH_ALG_SHIFT;
  82. else
  83. cfg |= AUTH_ALG_SHA << AUTH_ALG_SHIFT;
  84. if (IS_CCM(flags) || IS_CMAC(flags)) {
  85. if (key_size == AES_KEYSIZE_128)
  86. cfg |= AUTH_KEY_SZ_AES128 << AUTH_KEY_SIZE_SHIFT;
  87. else if (key_size == AES_KEYSIZE_256)
  88. cfg |= AUTH_KEY_SZ_AES256 << AUTH_KEY_SIZE_SHIFT;
  89. }
  90. if (IS_SHA1(flags) || IS_SHA1_HMAC(flags))
  91. cfg |= AUTH_SIZE_SHA1 << AUTH_SIZE_SHIFT;
  92. else if (IS_SHA256(flags) || IS_SHA256_HMAC(flags))
  93. cfg |= AUTH_SIZE_SHA256 << AUTH_SIZE_SHIFT;
  94. else if (IS_CMAC(flags))
  95. cfg |= AUTH_SIZE_ENUM_16_BYTES << AUTH_SIZE_SHIFT;
  96. if (IS_SHA1(flags) || IS_SHA256(flags))
  97. cfg |= AUTH_MODE_HASH << AUTH_MODE_SHIFT;
  98. else if (IS_SHA1_HMAC(flags) || IS_SHA256_HMAC(flags) ||
  99. IS_CBC(flags) || IS_CTR(flags))
  100. cfg |= AUTH_MODE_HMAC << AUTH_MODE_SHIFT;
  101. else if (IS_AES(flags) && IS_CCM(flags))
  102. cfg |= AUTH_MODE_CCM << AUTH_MODE_SHIFT;
  103. else if (IS_AES(flags) && IS_CMAC(flags))
  104. cfg |= AUTH_MODE_CMAC << AUTH_MODE_SHIFT;
  105. if (IS_SHA(flags) || IS_SHA_HMAC(flags))
  106. cfg |= AUTH_POS_BEFORE << AUTH_POS_SHIFT;
  107. if (IS_CCM(flags))
  108. cfg |= QCE_MAX_NONCE_WORDS << AUTH_NONCE_NUM_WORDS_SHIFT;
  109. if (IS_CBC(flags) || IS_CTR(flags) || IS_CCM(flags) ||
  110. IS_CMAC(flags))
  111. cfg |= BIT(AUTH_LAST_SHIFT) | BIT(AUTH_FIRST_SHIFT);
  112. return cfg;
  113. }
  114. static int qce_setup_regs_ahash(struct crypto_async_request *async_req,
  115. u32 totallen, u32 offset)
  116. {
  117. struct ahash_request *req = ahash_request_cast(async_req);
  118. struct crypto_ahash *ahash = __crypto_ahash_cast(async_req->tfm);
  119. struct qce_sha_reqctx *rctx = ahash_request_ctx(req);
  120. struct qce_alg_template *tmpl = to_ahash_tmpl(async_req->tfm);
  121. struct qce_device *qce = tmpl->qce;
  122. unsigned int digestsize = crypto_ahash_digestsize(ahash);
  123. unsigned int blocksize = crypto_tfm_alg_blocksize(async_req->tfm);
  124. __be32 auth[SHA256_DIGEST_SIZE / sizeof(__be32)] = {0};
  125. __be32 mackey[QCE_SHA_HMAC_KEY_SIZE / sizeof(__be32)] = {0};
  126. u32 auth_cfg = 0, config;
  127. unsigned int iv_words;
  128. /* if not the last, the size has to be on the block boundary */
  129. if (!rctx->last_blk && req->nbytes % blocksize)
  130. return -EINVAL;
  131. qce_setup_config(qce);
  132. if (IS_CMAC(rctx->flags)) {
  133. qce_write(qce, REG_AUTH_SEG_CFG, 0);
  134. qce_write(qce, REG_ENCR_SEG_CFG, 0);
  135. qce_write(qce, REG_ENCR_SEG_SIZE, 0);
  136. qce_clear_array(qce, REG_AUTH_IV0, 16);
  137. qce_clear_array(qce, REG_AUTH_KEY0, 16);
  138. qce_clear_array(qce, REG_AUTH_BYTECNT0, 4);
  139. auth_cfg = qce_auth_cfg(rctx->flags, rctx->authklen);
  140. }
  141. if (IS_SHA_HMAC(rctx->flags) || IS_CMAC(rctx->flags)) {
  142. u32 authkey_words = rctx->authklen / sizeof(u32);
  143. qce_cpu_to_be32p_array(mackey, rctx->authkey, rctx->authklen);
  144. qce_write_array(qce, REG_AUTH_KEY0, (u32 *)mackey,
  145. authkey_words);
  146. }
  147. if (IS_CMAC(rctx->flags))
  148. goto go_proc;
  149. if (rctx->first_blk)
  150. memcpy(auth, rctx->digest, digestsize);
  151. else
  152. qce_cpu_to_be32p_array(auth, rctx->digest, digestsize);
  153. iv_words = (IS_SHA1(rctx->flags) || IS_SHA1_HMAC(rctx->flags)) ? 5 : 8;
  154. qce_write_array(qce, REG_AUTH_IV0, (u32 *)auth, iv_words);
  155. if (rctx->first_blk)
  156. qce_clear_array(qce, REG_AUTH_BYTECNT0, 4);
  157. else
  158. qce_write_array(qce, REG_AUTH_BYTECNT0,
  159. (u32 *)rctx->byte_count, 2);
  160. auth_cfg = qce_auth_cfg(rctx->flags, 0);
  161. if (rctx->last_blk)
  162. auth_cfg |= BIT(AUTH_LAST_SHIFT);
  163. else
  164. auth_cfg &= ~BIT(AUTH_LAST_SHIFT);
  165. if (rctx->first_blk)
  166. auth_cfg |= BIT(AUTH_FIRST_SHIFT);
  167. else
  168. auth_cfg &= ~BIT(AUTH_FIRST_SHIFT);
  169. go_proc:
  170. qce_write(qce, REG_AUTH_SEG_CFG, auth_cfg);
  171. qce_write(qce, REG_AUTH_SEG_SIZE, req->nbytes);
  172. qce_write(qce, REG_AUTH_SEG_START, 0);
  173. qce_write(qce, REG_ENCR_SEG_CFG, 0);
  174. qce_write(qce, REG_SEG_SIZE, req->nbytes);
  175. /* get little endianness */
  176. config = qce_config_reg(qce, 1);
  177. qce_write(qce, REG_CONFIG, config);
  178. qce_crypto_go(qce);
  179. return 0;
  180. }
  181. #endif
  182. #ifdef CONFIG_CRYPTO_DEV_QCE_SKCIPHER
  183. static u32 qce_encr_cfg(unsigned long flags, u32 aes_key_size)
  184. {
  185. u32 cfg = 0;
  186. if (IS_AES(flags)) {
  187. if (aes_key_size == AES_KEYSIZE_128)
  188. cfg |= ENCR_KEY_SZ_AES128 << ENCR_KEY_SZ_SHIFT;
  189. else if (aes_key_size == AES_KEYSIZE_256)
  190. cfg |= ENCR_KEY_SZ_AES256 << ENCR_KEY_SZ_SHIFT;
  191. }
  192. if (IS_AES(flags))
  193. cfg |= ENCR_ALG_AES << ENCR_ALG_SHIFT;
  194. else if (IS_DES(flags) || IS_3DES(flags))
  195. cfg |= ENCR_ALG_DES << ENCR_ALG_SHIFT;
  196. if (IS_DES(flags))
  197. cfg |= ENCR_KEY_SZ_DES << ENCR_KEY_SZ_SHIFT;
  198. if (IS_3DES(flags))
  199. cfg |= ENCR_KEY_SZ_3DES << ENCR_KEY_SZ_SHIFT;
  200. switch (flags & QCE_MODE_MASK) {
  201. case QCE_MODE_ECB:
  202. cfg |= ENCR_MODE_ECB << ENCR_MODE_SHIFT;
  203. break;
  204. case QCE_MODE_CBC:
  205. cfg |= ENCR_MODE_CBC << ENCR_MODE_SHIFT;
  206. break;
  207. case QCE_MODE_CTR:
  208. cfg |= ENCR_MODE_CTR << ENCR_MODE_SHIFT;
  209. break;
  210. case QCE_MODE_XTS:
  211. cfg |= ENCR_MODE_XTS << ENCR_MODE_SHIFT;
  212. break;
  213. case QCE_MODE_CCM:
  214. cfg |= ENCR_MODE_CCM << ENCR_MODE_SHIFT;
  215. cfg |= LAST_CCM_XFR << LAST_CCM_SHIFT;
  216. break;
  217. default:
  218. return ~0;
  219. }
  220. return cfg;
  221. }
  222. static void qce_xts_swapiv(__be32 *dst, const u8 *src, unsigned int ivsize)
  223. {
  224. u8 swap[QCE_AES_IV_LENGTH];
  225. u32 i, j;
  226. if (ivsize > QCE_AES_IV_LENGTH)
  227. return;
  228. memset(swap, 0, QCE_AES_IV_LENGTH);
  229. for (i = (QCE_AES_IV_LENGTH - ivsize), j = ivsize - 1;
  230. i < QCE_AES_IV_LENGTH; i++, j--)
  231. swap[i] = src[j];
  232. qce_cpu_to_be32p_array(dst, swap, QCE_AES_IV_LENGTH);
  233. }
  234. static void qce_xtskey(struct qce_device *qce, const u8 *enckey,
  235. unsigned int enckeylen, unsigned int cryptlen)
  236. {
  237. u32 xtskey[QCE_MAX_CIPHER_KEY_SIZE / sizeof(u32)] = {0};
  238. unsigned int xtsklen = enckeylen / (2 * sizeof(u32));
  239. unsigned int xtsdusize;
  240. qce_cpu_to_be32p_array((__be32 *)xtskey, enckey + enckeylen / 2,
  241. enckeylen / 2);
  242. qce_write_array(qce, REG_ENCR_XTS_KEY0, xtskey, xtsklen);
  243. /* xts du size 512B */
  244. xtsdusize = min_t(u32, QCE_SECTOR_SIZE, cryptlen);
  245. qce_write(qce, REG_ENCR_XTS_DU_SIZE, xtsdusize);
  246. }
  247. static int qce_setup_regs_skcipher(struct crypto_async_request *async_req,
  248. u32 totallen, u32 offset)
  249. {
  250. struct skcipher_request *req = skcipher_request_cast(async_req);
  251. struct qce_cipher_reqctx *rctx = skcipher_request_ctx(req);
  252. struct qce_cipher_ctx *ctx = crypto_tfm_ctx(async_req->tfm);
  253. struct qce_alg_template *tmpl = to_cipher_tmpl(crypto_skcipher_reqtfm(req));
  254. struct qce_device *qce = tmpl->qce;
  255. __be32 enckey[QCE_MAX_CIPHER_KEY_SIZE / sizeof(__be32)] = {0};
  256. __be32 enciv[QCE_MAX_IV_SIZE / sizeof(__be32)] = {0};
  257. unsigned int enckey_words, enciv_words;
  258. unsigned int keylen;
  259. u32 encr_cfg = 0, auth_cfg = 0, config;
  260. unsigned int ivsize = rctx->ivsize;
  261. unsigned long flags = rctx->flags;
  262. qce_setup_config(qce);
  263. if (IS_XTS(flags))
  264. keylen = ctx->enc_keylen / 2;
  265. else
  266. keylen = ctx->enc_keylen;
  267. qce_cpu_to_be32p_array(enckey, ctx->enc_key, keylen);
  268. enckey_words = keylen / sizeof(u32);
  269. qce_write(qce, REG_AUTH_SEG_CFG, auth_cfg);
  270. encr_cfg = qce_encr_cfg(flags, keylen);
  271. if (IS_DES(flags)) {
  272. enciv_words = 2;
  273. enckey_words = 2;
  274. } else if (IS_3DES(flags)) {
  275. enciv_words = 2;
  276. enckey_words = 6;
  277. } else if (IS_AES(flags)) {
  278. if (IS_XTS(flags))
  279. qce_xtskey(qce, ctx->enc_key, ctx->enc_keylen,
  280. rctx->cryptlen);
  281. enciv_words = 4;
  282. } else {
  283. return -EINVAL;
  284. }
  285. qce_write_array(qce, REG_ENCR_KEY0, (u32 *)enckey, enckey_words);
  286. if (!IS_ECB(flags)) {
  287. if (IS_XTS(flags))
  288. qce_xts_swapiv(enciv, rctx->iv, ivsize);
  289. else
  290. qce_cpu_to_be32p_array(enciv, rctx->iv, ivsize);
  291. qce_write_array(qce, REG_CNTR0_IV0, (u32 *)enciv, enciv_words);
  292. }
  293. if (IS_ENCRYPT(flags))
  294. encr_cfg |= BIT(ENCODE_SHIFT);
  295. qce_write(qce, REG_ENCR_SEG_CFG, encr_cfg);
  296. qce_write(qce, REG_ENCR_SEG_SIZE, rctx->cryptlen);
  297. qce_write(qce, REG_ENCR_SEG_START, offset & 0xffff);
  298. if (IS_CTR(flags)) {
  299. qce_write(qce, REG_CNTR_MASK, ~0);
  300. qce_write(qce, REG_CNTR_MASK0, ~0);
  301. qce_write(qce, REG_CNTR_MASK1, ~0);
  302. qce_write(qce, REG_CNTR_MASK2, ~0);
  303. }
  304. qce_write(qce, REG_SEG_SIZE, totallen);
  305. /* get little endianness */
  306. config = qce_config_reg(qce, 1);
  307. qce_write(qce, REG_CONFIG, config);
  308. qce_crypto_go(qce);
  309. return 0;
  310. }
  311. #endif
  312. int qce_start(struct crypto_async_request *async_req, u32 type, u32 totallen,
  313. u32 offset)
  314. {
  315. switch (type) {
  316. #ifdef CONFIG_CRYPTO_DEV_QCE_SKCIPHER
  317. case CRYPTO_ALG_TYPE_SKCIPHER:
  318. return qce_setup_regs_skcipher(async_req, totallen, offset);
  319. #endif
  320. #ifdef CONFIG_CRYPTO_DEV_QCE_SHA
  321. case CRYPTO_ALG_TYPE_AHASH:
  322. return qce_setup_regs_ahash(async_req, totallen, offset);
  323. #endif
  324. default:
  325. return -EINVAL;
  326. }
  327. }
  328. #define STATUS_ERRORS \
  329. (BIT(SW_ERR_SHIFT) | BIT(AXI_ERR_SHIFT) | BIT(HSD_ERR_SHIFT))
  330. int qce_check_status(struct qce_device *qce, u32 *status)
  331. {
  332. int ret = 0;
  333. *status = qce_read(qce, REG_STATUS);
  334. /*
  335. * Don't use result dump status. The operation may not be complete.
  336. * Instead, use the status we just read from device. In case, we need to
  337. * use result_status from result dump the result_status needs to be byte
  338. * swapped, since we set the device to little endian.
  339. */
  340. if (*status & STATUS_ERRORS || !(*status & BIT(OPERATION_DONE_SHIFT)))
  341. ret = -ENXIO;
  342. return ret;
  343. }
  344. void qce_get_version(struct qce_device *qce, u32 *major, u32 *minor, u32 *step)
  345. {
  346. u32 val;
  347. val = qce_read(qce, REG_VERSION);
  348. *major = (val & CORE_MAJOR_REV_MASK) >> CORE_MAJOR_REV_SHIFT;
  349. *minor = (val & CORE_MINOR_REV_MASK) >> CORE_MINOR_REV_SHIFT;
  350. *step = (val & CORE_STEP_REV_MASK) >> CORE_STEP_REV_SHIFT;
  351. }