geode-aes.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /* Copyright (C) 2004-2006, Advanced Micro Devices, Inc.
  3. */
  4. #include <linux/module.h>
  5. #include <linux/kernel.h>
  6. #include <linux/pci.h>
  7. #include <linux/pci_ids.h>
  8. #include <linux/crypto.h>
  9. #include <linux/spinlock.h>
  10. #include <crypto/algapi.h>
  11. #include <crypto/aes.h>
  12. #include <crypto/internal/cipher.h>
  13. #include <crypto/internal/skcipher.h>
  14. #include <linux/io.h>
  15. #include <linux/delay.h>
  16. #include "geode-aes.h"
  17. /* Static structures */
  18. static void __iomem *_iobase;
  19. static spinlock_t lock;
  20. /* Write a 128 bit field (either a writable key or IV) */
  21. static inline void
  22. _writefield(u32 offset, const void *value)
  23. {
  24. int i;
  25. for (i = 0; i < 4; i++)
  26. iowrite32(((const u32 *) value)[i], _iobase + offset + (i * 4));
  27. }
  28. /* Read a 128 bit field (either a writable key or IV) */
  29. static inline void
  30. _readfield(u32 offset, void *value)
  31. {
  32. int i;
  33. for (i = 0; i < 4; i++)
  34. ((u32 *) value)[i] = ioread32(_iobase + offset + (i * 4));
  35. }
  36. static int
  37. do_crypt(const void *src, void *dst, u32 len, u32 flags)
  38. {
  39. u32 status;
  40. u32 counter = AES_OP_TIMEOUT;
  41. iowrite32(virt_to_phys((void *)src), _iobase + AES_SOURCEA_REG);
  42. iowrite32(virt_to_phys(dst), _iobase + AES_DSTA_REG);
  43. iowrite32(len, _iobase + AES_LENA_REG);
  44. /* Start the operation */
  45. iowrite32(AES_CTRL_START | flags, _iobase + AES_CTRLA_REG);
  46. do {
  47. status = ioread32(_iobase + AES_INTR_REG);
  48. cpu_relax();
  49. } while (!(status & AES_INTRA_PENDING) && --counter);
  50. /* Clear the event */
  51. iowrite32((status & 0xFF) | AES_INTRA_PENDING, _iobase + AES_INTR_REG);
  52. return counter ? 0 : 1;
  53. }
  54. static void
  55. geode_aes_crypt(const struct geode_aes_tfm_ctx *tctx, const void *src,
  56. void *dst, u32 len, u8 *iv, int mode, int dir)
  57. {
  58. u32 flags = 0;
  59. unsigned long iflags;
  60. int ret;
  61. /* If the source and destination is the same, then
  62. * we need to turn on the coherent flags, otherwise
  63. * we don't need to worry
  64. */
  65. flags |= (AES_CTRL_DCA | AES_CTRL_SCA);
  66. if (dir == AES_DIR_ENCRYPT)
  67. flags |= AES_CTRL_ENCRYPT;
  68. /* Start the critical section */
  69. spin_lock_irqsave(&lock, iflags);
  70. if (mode == AES_MODE_CBC) {
  71. flags |= AES_CTRL_CBC;
  72. _writefield(AES_WRITEIV0_REG, iv);
  73. }
  74. flags |= AES_CTRL_WRKEY;
  75. _writefield(AES_WRITEKEY0_REG, tctx->key);
  76. ret = do_crypt(src, dst, len, flags);
  77. BUG_ON(ret);
  78. if (mode == AES_MODE_CBC)
  79. _readfield(AES_WRITEIV0_REG, iv);
  80. spin_unlock_irqrestore(&lock, iflags);
  81. }
  82. /* CRYPTO-API Functions */
  83. static int geode_setkey_cip(struct crypto_tfm *tfm, const u8 *key,
  84. unsigned int len)
  85. {
  86. struct geode_aes_tfm_ctx *tctx = crypto_tfm_ctx(tfm);
  87. tctx->keylen = len;
  88. if (len == AES_KEYSIZE_128) {
  89. memcpy(tctx->key, key, len);
  90. return 0;
  91. }
  92. if (len != AES_KEYSIZE_192 && len != AES_KEYSIZE_256)
  93. /* not supported at all */
  94. return -EINVAL;
  95. /*
  96. * The requested key size is not supported by HW, do a fallback
  97. */
  98. tctx->fallback.cip->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
  99. tctx->fallback.cip->base.crt_flags |=
  100. (tfm->crt_flags & CRYPTO_TFM_REQ_MASK);
  101. return crypto_cipher_setkey(tctx->fallback.cip, key, len);
  102. }
  103. static int geode_setkey_skcipher(struct crypto_skcipher *tfm, const u8 *key,
  104. unsigned int len)
  105. {
  106. struct geode_aes_tfm_ctx *tctx = crypto_skcipher_ctx(tfm);
  107. tctx->keylen = len;
  108. if (len == AES_KEYSIZE_128) {
  109. memcpy(tctx->key, key, len);
  110. return 0;
  111. }
  112. if (len != AES_KEYSIZE_192 && len != AES_KEYSIZE_256)
  113. /* not supported at all */
  114. return -EINVAL;
  115. /*
  116. * The requested key size is not supported by HW, do a fallback
  117. */
  118. crypto_skcipher_clear_flags(tctx->fallback.skcipher,
  119. CRYPTO_TFM_REQ_MASK);
  120. crypto_skcipher_set_flags(tctx->fallback.skcipher,
  121. crypto_skcipher_get_flags(tfm) &
  122. CRYPTO_TFM_REQ_MASK);
  123. return crypto_skcipher_setkey(tctx->fallback.skcipher, key, len);
  124. }
  125. static void
  126. geode_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
  127. {
  128. const struct geode_aes_tfm_ctx *tctx = crypto_tfm_ctx(tfm);
  129. if (unlikely(tctx->keylen != AES_KEYSIZE_128)) {
  130. crypto_cipher_encrypt_one(tctx->fallback.cip, out, in);
  131. return;
  132. }
  133. geode_aes_crypt(tctx, in, out, AES_BLOCK_SIZE, NULL,
  134. AES_MODE_ECB, AES_DIR_ENCRYPT);
  135. }
  136. static void
  137. geode_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
  138. {
  139. const struct geode_aes_tfm_ctx *tctx = crypto_tfm_ctx(tfm);
  140. if (unlikely(tctx->keylen != AES_KEYSIZE_128)) {
  141. crypto_cipher_decrypt_one(tctx->fallback.cip, out, in);
  142. return;
  143. }
  144. geode_aes_crypt(tctx, in, out, AES_BLOCK_SIZE, NULL,
  145. AES_MODE_ECB, AES_DIR_DECRYPT);
  146. }
  147. static int fallback_init_cip(struct crypto_tfm *tfm)
  148. {
  149. const char *name = crypto_tfm_alg_name(tfm);
  150. struct geode_aes_tfm_ctx *tctx = crypto_tfm_ctx(tfm);
  151. tctx->fallback.cip = crypto_alloc_cipher(name, 0,
  152. CRYPTO_ALG_NEED_FALLBACK);
  153. if (IS_ERR(tctx->fallback.cip)) {
  154. printk(KERN_ERR "Error allocating fallback algo %s\n", name);
  155. return PTR_ERR(tctx->fallback.cip);
  156. }
  157. return 0;
  158. }
  159. static void fallback_exit_cip(struct crypto_tfm *tfm)
  160. {
  161. struct geode_aes_tfm_ctx *tctx = crypto_tfm_ctx(tfm);
  162. crypto_free_cipher(tctx->fallback.cip);
  163. }
  164. static struct crypto_alg geode_alg = {
  165. .cra_name = "aes",
  166. .cra_driver_name = "geode-aes",
  167. .cra_priority = 300,
  168. .cra_alignmask = 15,
  169. .cra_flags = CRYPTO_ALG_TYPE_CIPHER |
  170. CRYPTO_ALG_NEED_FALLBACK,
  171. .cra_init = fallback_init_cip,
  172. .cra_exit = fallback_exit_cip,
  173. .cra_blocksize = AES_BLOCK_SIZE,
  174. .cra_ctxsize = sizeof(struct geode_aes_tfm_ctx),
  175. .cra_module = THIS_MODULE,
  176. .cra_u = {
  177. .cipher = {
  178. .cia_min_keysize = AES_MIN_KEY_SIZE,
  179. .cia_max_keysize = AES_MAX_KEY_SIZE,
  180. .cia_setkey = geode_setkey_cip,
  181. .cia_encrypt = geode_encrypt,
  182. .cia_decrypt = geode_decrypt
  183. }
  184. }
  185. };
  186. static int geode_init_skcipher(struct crypto_skcipher *tfm)
  187. {
  188. const char *name = crypto_tfm_alg_name(&tfm->base);
  189. struct geode_aes_tfm_ctx *tctx = crypto_skcipher_ctx(tfm);
  190. tctx->fallback.skcipher =
  191. crypto_alloc_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK |
  192. CRYPTO_ALG_ASYNC);
  193. if (IS_ERR(tctx->fallback.skcipher)) {
  194. printk(KERN_ERR "Error allocating fallback algo %s\n", name);
  195. return PTR_ERR(tctx->fallback.skcipher);
  196. }
  197. crypto_skcipher_set_reqsize(tfm, sizeof(struct skcipher_request) +
  198. crypto_skcipher_reqsize(tctx->fallback.skcipher));
  199. return 0;
  200. }
  201. static void geode_exit_skcipher(struct crypto_skcipher *tfm)
  202. {
  203. struct geode_aes_tfm_ctx *tctx = crypto_skcipher_ctx(tfm);
  204. crypto_free_skcipher(tctx->fallback.skcipher);
  205. }
  206. static int geode_skcipher_crypt(struct skcipher_request *req, int mode, int dir)
  207. {
  208. struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
  209. const struct geode_aes_tfm_ctx *tctx = crypto_skcipher_ctx(tfm);
  210. struct skcipher_walk walk;
  211. unsigned int nbytes;
  212. int err;
  213. if (unlikely(tctx->keylen != AES_KEYSIZE_128)) {
  214. struct skcipher_request *subreq = skcipher_request_ctx(req);
  215. *subreq = *req;
  216. skcipher_request_set_tfm(subreq, tctx->fallback.skcipher);
  217. if (dir == AES_DIR_DECRYPT)
  218. return crypto_skcipher_decrypt(subreq);
  219. else
  220. return crypto_skcipher_encrypt(subreq);
  221. }
  222. err = skcipher_walk_virt(&walk, req, false);
  223. while ((nbytes = walk.nbytes) != 0) {
  224. geode_aes_crypt(tctx, walk.src.virt.addr, walk.dst.virt.addr,
  225. round_down(nbytes, AES_BLOCK_SIZE),
  226. walk.iv, mode, dir);
  227. err = skcipher_walk_done(&walk, nbytes % AES_BLOCK_SIZE);
  228. }
  229. return err;
  230. }
  231. static int geode_cbc_encrypt(struct skcipher_request *req)
  232. {
  233. return geode_skcipher_crypt(req, AES_MODE_CBC, AES_DIR_ENCRYPT);
  234. }
  235. static int geode_cbc_decrypt(struct skcipher_request *req)
  236. {
  237. return geode_skcipher_crypt(req, AES_MODE_CBC, AES_DIR_DECRYPT);
  238. }
  239. static int geode_ecb_encrypt(struct skcipher_request *req)
  240. {
  241. return geode_skcipher_crypt(req, AES_MODE_ECB, AES_DIR_ENCRYPT);
  242. }
  243. static int geode_ecb_decrypt(struct skcipher_request *req)
  244. {
  245. return geode_skcipher_crypt(req, AES_MODE_ECB, AES_DIR_DECRYPT);
  246. }
  247. static struct skcipher_alg geode_skcipher_algs[] = {
  248. {
  249. .base.cra_name = "cbc(aes)",
  250. .base.cra_driver_name = "cbc-aes-geode",
  251. .base.cra_priority = 400,
  252. .base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
  253. CRYPTO_ALG_NEED_FALLBACK,
  254. .base.cra_blocksize = AES_BLOCK_SIZE,
  255. .base.cra_ctxsize = sizeof(struct geode_aes_tfm_ctx),
  256. .base.cra_alignmask = 15,
  257. .base.cra_module = THIS_MODULE,
  258. .init = geode_init_skcipher,
  259. .exit = geode_exit_skcipher,
  260. .setkey = geode_setkey_skcipher,
  261. .encrypt = geode_cbc_encrypt,
  262. .decrypt = geode_cbc_decrypt,
  263. .min_keysize = AES_MIN_KEY_SIZE,
  264. .max_keysize = AES_MAX_KEY_SIZE,
  265. .ivsize = AES_BLOCK_SIZE,
  266. }, {
  267. .base.cra_name = "ecb(aes)",
  268. .base.cra_driver_name = "ecb-aes-geode",
  269. .base.cra_priority = 400,
  270. .base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
  271. CRYPTO_ALG_NEED_FALLBACK,
  272. .base.cra_blocksize = AES_BLOCK_SIZE,
  273. .base.cra_ctxsize = sizeof(struct geode_aes_tfm_ctx),
  274. .base.cra_alignmask = 15,
  275. .base.cra_module = THIS_MODULE,
  276. .init = geode_init_skcipher,
  277. .exit = geode_exit_skcipher,
  278. .setkey = geode_setkey_skcipher,
  279. .encrypt = geode_ecb_encrypt,
  280. .decrypt = geode_ecb_decrypt,
  281. .min_keysize = AES_MIN_KEY_SIZE,
  282. .max_keysize = AES_MAX_KEY_SIZE,
  283. },
  284. };
  285. static void geode_aes_remove(struct pci_dev *dev)
  286. {
  287. crypto_unregister_alg(&geode_alg);
  288. crypto_unregister_skciphers(geode_skcipher_algs,
  289. ARRAY_SIZE(geode_skcipher_algs));
  290. pci_iounmap(dev, _iobase);
  291. _iobase = NULL;
  292. pci_release_regions(dev);
  293. pci_disable_device(dev);
  294. }
  295. static int geode_aes_probe(struct pci_dev *dev, const struct pci_device_id *id)
  296. {
  297. int ret;
  298. ret = pci_enable_device(dev);
  299. if (ret)
  300. return ret;
  301. ret = pci_request_regions(dev, "geode-aes");
  302. if (ret)
  303. goto eenable;
  304. _iobase = pci_iomap(dev, 0, 0);
  305. if (_iobase == NULL) {
  306. ret = -ENOMEM;
  307. goto erequest;
  308. }
  309. spin_lock_init(&lock);
  310. /* Clear any pending activity */
  311. iowrite32(AES_INTR_PENDING | AES_INTR_MASK, _iobase + AES_INTR_REG);
  312. ret = crypto_register_alg(&geode_alg);
  313. if (ret)
  314. goto eiomap;
  315. ret = crypto_register_skciphers(geode_skcipher_algs,
  316. ARRAY_SIZE(geode_skcipher_algs));
  317. if (ret)
  318. goto ealg;
  319. dev_notice(&dev->dev, "GEODE AES engine enabled.\n");
  320. return 0;
  321. ealg:
  322. crypto_unregister_alg(&geode_alg);
  323. eiomap:
  324. pci_iounmap(dev, _iobase);
  325. erequest:
  326. pci_release_regions(dev);
  327. eenable:
  328. pci_disable_device(dev);
  329. dev_err(&dev->dev, "GEODE AES initialization failed.\n");
  330. return ret;
  331. }
  332. static struct pci_device_id geode_aes_tbl[] = {
  333. { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_LX_AES), },
  334. { 0, }
  335. };
  336. MODULE_DEVICE_TABLE(pci, geode_aes_tbl);
  337. static struct pci_driver geode_aes_driver = {
  338. .name = "Geode LX AES",
  339. .id_table = geode_aes_tbl,
  340. .probe = geode_aes_probe,
  341. .remove = geode_aes_remove,
  342. };
  343. module_pci_driver(geode_aes_driver);
  344. MODULE_AUTHOR("Advanced Micro Devices, Inc.");
  345. MODULE_DESCRIPTION("Geode LX Hardware AES driver");
  346. MODULE_LICENSE("GPL");
  347. MODULE_IMPORT_NS(CRYPTO_INTERNAL);