rsa-pkcs1pad.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * RSA padding templates.
  4. *
  5. * Copyright (c) 2015 Intel Corporation
  6. */
  7. #include <crypto/algapi.h>
  8. #include <crypto/akcipher.h>
  9. #include <crypto/internal/akcipher.h>
  10. #include <crypto/internal/rsa.h>
  11. #include <linux/err.h>
  12. #include <linux/init.h>
  13. #include <linux/kernel.h>
  14. #include <linux/module.h>
  15. #include <linux/random.h>
  16. #include <linux/scatterlist.h>
  17. /*
  18. * Hash algorithm OIDs plus ASN.1 DER wrappings [RFC4880 sec 5.2.2].
  19. */
  20. static const u8 rsa_digest_info_md5[] = {
  21. 0x30, 0x20, 0x30, 0x0c, 0x06, 0x08,
  22. 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x02, 0x05, /* OID */
  23. 0x05, 0x00, 0x04, 0x10
  24. };
  25. static const u8 rsa_digest_info_sha1[] = {
  26. 0x30, 0x21, 0x30, 0x09, 0x06, 0x05,
  27. 0x2b, 0x0e, 0x03, 0x02, 0x1a,
  28. 0x05, 0x00, 0x04, 0x14
  29. };
  30. static const u8 rsa_digest_info_rmd160[] = {
  31. 0x30, 0x21, 0x30, 0x09, 0x06, 0x05,
  32. 0x2b, 0x24, 0x03, 0x02, 0x01,
  33. 0x05, 0x00, 0x04, 0x14
  34. };
  35. static const u8 rsa_digest_info_sha224[] = {
  36. 0x30, 0x2d, 0x30, 0x0d, 0x06, 0x09,
  37. 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x04,
  38. 0x05, 0x00, 0x04, 0x1c
  39. };
  40. static const u8 rsa_digest_info_sha256[] = {
  41. 0x30, 0x31, 0x30, 0x0d, 0x06, 0x09,
  42. 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x01,
  43. 0x05, 0x00, 0x04, 0x20
  44. };
  45. static const u8 rsa_digest_info_sha384[] = {
  46. 0x30, 0x41, 0x30, 0x0d, 0x06, 0x09,
  47. 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x02,
  48. 0x05, 0x00, 0x04, 0x30
  49. };
  50. static const u8 rsa_digest_info_sha512[] = {
  51. 0x30, 0x51, 0x30, 0x0d, 0x06, 0x09,
  52. 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x03,
  53. 0x05, 0x00, 0x04, 0x40
  54. };
  55. static const struct rsa_asn1_template {
  56. const char *name;
  57. const u8 *data;
  58. size_t size;
  59. } rsa_asn1_templates[] = {
  60. #define _(X) { #X, rsa_digest_info_##X, sizeof(rsa_digest_info_##X) }
  61. _(md5),
  62. _(sha1),
  63. _(rmd160),
  64. _(sha256),
  65. _(sha384),
  66. _(sha512),
  67. _(sha224),
  68. { NULL }
  69. #undef _
  70. };
  71. static const struct rsa_asn1_template *rsa_lookup_asn1(const char *name)
  72. {
  73. const struct rsa_asn1_template *p;
  74. for (p = rsa_asn1_templates; p->name; p++)
  75. if (strcmp(name, p->name) == 0)
  76. return p;
  77. return NULL;
  78. }
  79. struct pkcs1pad_ctx {
  80. struct crypto_akcipher *child;
  81. unsigned int key_size;
  82. };
  83. struct pkcs1pad_inst_ctx {
  84. struct crypto_akcipher_spawn spawn;
  85. const struct rsa_asn1_template *digest_info;
  86. };
  87. struct pkcs1pad_request {
  88. struct scatterlist in_sg[2], out_sg[1];
  89. uint8_t *in_buf, *out_buf;
  90. struct akcipher_request child_req;
  91. };
  92. static int pkcs1pad_set_pub_key(struct crypto_akcipher *tfm, const void *key,
  93. unsigned int keylen)
  94. {
  95. struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
  96. int err;
  97. ctx->key_size = 0;
  98. err = crypto_akcipher_set_pub_key(ctx->child, key, keylen);
  99. if (err)
  100. return err;
  101. /* Find out new modulus size from rsa implementation */
  102. err = crypto_akcipher_maxsize(ctx->child);
  103. if (err > PAGE_SIZE)
  104. return -ENOTSUPP;
  105. ctx->key_size = err;
  106. return 0;
  107. }
  108. static int pkcs1pad_set_priv_key(struct crypto_akcipher *tfm, const void *key,
  109. unsigned int keylen)
  110. {
  111. struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
  112. int err;
  113. ctx->key_size = 0;
  114. err = crypto_akcipher_set_priv_key(ctx->child, key, keylen);
  115. if (err)
  116. return err;
  117. /* Find out new modulus size from rsa implementation */
  118. err = crypto_akcipher_maxsize(ctx->child);
  119. if (err > PAGE_SIZE)
  120. return -ENOTSUPP;
  121. ctx->key_size = err;
  122. return 0;
  123. }
  124. static unsigned int pkcs1pad_get_max_size(struct crypto_akcipher *tfm)
  125. {
  126. struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
  127. /*
  128. * The maximum destination buffer size for the encrypt/sign operations
  129. * will be the same as for RSA, even though it's smaller for
  130. * decrypt/verify.
  131. */
  132. return ctx->key_size;
  133. }
  134. static void pkcs1pad_sg_set_buf(struct scatterlist *sg, void *buf, size_t len,
  135. struct scatterlist *next)
  136. {
  137. int nsegs = next ? 2 : 1;
  138. sg_init_table(sg, nsegs);
  139. sg_set_buf(sg, buf, len);
  140. if (next)
  141. sg_chain(sg, nsegs, next);
  142. }
  143. static int pkcs1pad_encrypt_sign_complete(struct akcipher_request *req, int err)
  144. {
  145. struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
  146. struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
  147. struct pkcs1pad_request *req_ctx = akcipher_request_ctx(req);
  148. unsigned int pad_len;
  149. unsigned int len;
  150. u8 *out_buf;
  151. if (err)
  152. goto out;
  153. len = req_ctx->child_req.dst_len;
  154. pad_len = ctx->key_size - len;
  155. /* Four billion to one */
  156. if (likely(!pad_len))
  157. goto out;
  158. out_buf = kzalloc(ctx->key_size, GFP_KERNEL);
  159. err = -ENOMEM;
  160. if (!out_buf)
  161. goto out;
  162. sg_copy_to_buffer(req->dst, sg_nents_for_len(req->dst, len),
  163. out_buf + pad_len, len);
  164. sg_copy_from_buffer(req->dst,
  165. sg_nents_for_len(req->dst, ctx->key_size),
  166. out_buf, ctx->key_size);
  167. kfree_sensitive(out_buf);
  168. out:
  169. req->dst_len = ctx->key_size;
  170. kfree(req_ctx->in_buf);
  171. return err;
  172. }
  173. static void pkcs1pad_encrypt_sign_complete_cb(
  174. struct crypto_async_request *child_async_req, int err)
  175. {
  176. struct akcipher_request *req = child_async_req->data;
  177. struct crypto_async_request async_req;
  178. if (err == -EINPROGRESS)
  179. return;
  180. async_req.data = req->base.data;
  181. async_req.tfm = crypto_akcipher_tfm(crypto_akcipher_reqtfm(req));
  182. async_req.flags = child_async_req->flags;
  183. req->base.complete(&async_req,
  184. pkcs1pad_encrypt_sign_complete(req, err));
  185. }
  186. static int pkcs1pad_encrypt(struct akcipher_request *req)
  187. {
  188. struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
  189. struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
  190. struct pkcs1pad_request *req_ctx = akcipher_request_ctx(req);
  191. int err;
  192. unsigned int i, ps_end;
  193. if (!ctx->key_size)
  194. return -EINVAL;
  195. if (req->src_len > ctx->key_size - 11)
  196. return -EOVERFLOW;
  197. if (req->dst_len < ctx->key_size) {
  198. req->dst_len = ctx->key_size;
  199. return -EOVERFLOW;
  200. }
  201. req_ctx->in_buf = kmalloc(ctx->key_size - 1 - req->src_len,
  202. GFP_KERNEL);
  203. if (!req_ctx->in_buf)
  204. return -ENOMEM;
  205. ps_end = ctx->key_size - req->src_len - 2;
  206. req_ctx->in_buf[0] = 0x02;
  207. for (i = 1; i < ps_end; i++)
  208. req_ctx->in_buf[i] = 1 + prandom_u32_max(255);
  209. req_ctx->in_buf[ps_end] = 0x00;
  210. pkcs1pad_sg_set_buf(req_ctx->in_sg, req_ctx->in_buf,
  211. ctx->key_size - 1 - req->src_len, req->src);
  212. akcipher_request_set_tfm(&req_ctx->child_req, ctx->child);
  213. akcipher_request_set_callback(&req_ctx->child_req, req->base.flags,
  214. pkcs1pad_encrypt_sign_complete_cb, req);
  215. /* Reuse output buffer */
  216. akcipher_request_set_crypt(&req_ctx->child_req, req_ctx->in_sg,
  217. req->dst, ctx->key_size - 1, req->dst_len);
  218. err = crypto_akcipher_encrypt(&req_ctx->child_req);
  219. if (err != -EINPROGRESS && err != -EBUSY)
  220. return pkcs1pad_encrypt_sign_complete(req, err);
  221. return err;
  222. }
  223. static int pkcs1pad_decrypt_complete(struct akcipher_request *req, int err)
  224. {
  225. struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
  226. struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
  227. struct pkcs1pad_request *req_ctx = akcipher_request_ctx(req);
  228. unsigned int dst_len;
  229. unsigned int pos;
  230. u8 *out_buf;
  231. if (err)
  232. goto done;
  233. err = -EINVAL;
  234. dst_len = req_ctx->child_req.dst_len;
  235. if (dst_len < ctx->key_size - 1)
  236. goto done;
  237. out_buf = req_ctx->out_buf;
  238. if (dst_len == ctx->key_size) {
  239. if (out_buf[0] != 0x00)
  240. /* Decrypted value had no leading 0 byte */
  241. goto done;
  242. dst_len--;
  243. out_buf++;
  244. }
  245. if (out_buf[0] != 0x02)
  246. goto done;
  247. for (pos = 1; pos < dst_len; pos++)
  248. if (out_buf[pos] == 0x00)
  249. break;
  250. if (pos < 9 || pos == dst_len)
  251. goto done;
  252. pos++;
  253. err = 0;
  254. if (req->dst_len < dst_len - pos)
  255. err = -EOVERFLOW;
  256. req->dst_len = dst_len - pos;
  257. if (!err)
  258. sg_copy_from_buffer(req->dst,
  259. sg_nents_for_len(req->dst, req->dst_len),
  260. out_buf + pos, req->dst_len);
  261. done:
  262. kfree_sensitive(req_ctx->out_buf);
  263. return err;
  264. }
  265. static void pkcs1pad_decrypt_complete_cb(
  266. struct crypto_async_request *child_async_req, int err)
  267. {
  268. struct akcipher_request *req = child_async_req->data;
  269. struct crypto_async_request async_req;
  270. if (err == -EINPROGRESS)
  271. return;
  272. async_req.data = req->base.data;
  273. async_req.tfm = crypto_akcipher_tfm(crypto_akcipher_reqtfm(req));
  274. async_req.flags = child_async_req->flags;
  275. req->base.complete(&async_req, pkcs1pad_decrypt_complete(req, err));
  276. }
  277. static int pkcs1pad_decrypt(struct akcipher_request *req)
  278. {
  279. struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
  280. struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
  281. struct pkcs1pad_request *req_ctx = akcipher_request_ctx(req);
  282. int err;
  283. if (!ctx->key_size || req->src_len != ctx->key_size)
  284. return -EINVAL;
  285. req_ctx->out_buf = kmalloc(ctx->key_size, GFP_KERNEL);
  286. if (!req_ctx->out_buf)
  287. return -ENOMEM;
  288. pkcs1pad_sg_set_buf(req_ctx->out_sg, req_ctx->out_buf,
  289. ctx->key_size, NULL);
  290. akcipher_request_set_tfm(&req_ctx->child_req, ctx->child);
  291. akcipher_request_set_callback(&req_ctx->child_req, req->base.flags,
  292. pkcs1pad_decrypt_complete_cb, req);
  293. /* Reuse input buffer, output to a new buffer */
  294. akcipher_request_set_crypt(&req_ctx->child_req, req->src,
  295. req_ctx->out_sg, req->src_len,
  296. ctx->key_size);
  297. err = crypto_akcipher_decrypt(&req_ctx->child_req);
  298. if (err != -EINPROGRESS && err != -EBUSY)
  299. return pkcs1pad_decrypt_complete(req, err);
  300. return err;
  301. }
  302. static int pkcs1pad_sign(struct akcipher_request *req)
  303. {
  304. struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
  305. struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
  306. struct pkcs1pad_request *req_ctx = akcipher_request_ctx(req);
  307. struct akcipher_instance *inst = akcipher_alg_instance(tfm);
  308. struct pkcs1pad_inst_ctx *ictx = akcipher_instance_ctx(inst);
  309. const struct rsa_asn1_template *digest_info = ictx->digest_info;
  310. int err;
  311. unsigned int ps_end, digest_size = 0;
  312. if (!ctx->key_size)
  313. return -EINVAL;
  314. if (digest_info)
  315. digest_size = digest_info->size;
  316. if (req->src_len + digest_size > ctx->key_size - 11)
  317. return -EOVERFLOW;
  318. if (req->dst_len < ctx->key_size) {
  319. req->dst_len = ctx->key_size;
  320. return -EOVERFLOW;
  321. }
  322. req_ctx->in_buf = kmalloc(ctx->key_size - 1 - req->src_len,
  323. GFP_KERNEL);
  324. if (!req_ctx->in_buf)
  325. return -ENOMEM;
  326. ps_end = ctx->key_size - digest_size - req->src_len - 2;
  327. req_ctx->in_buf[0] = 0x01;
  328. memset(req_ctx->in_buf + 1, 0xff, ps_end - 1);
  329. req_ctx->in_buf[ps_end] = 0x00;
  330. if (digest_info)
  331. memcpy(req_ctx->in_buf + ps_end + 1, digest_info->data,
  332. digest_info->size);
  333. pkcs1pad_sg_set_buf(req_ctx->in_sg, req_ctx->in_buf,
  334. ctx->key_size - 1 - req->src_len, req->src);
  335. akcipher_request_set_tfm(&req_ctx->child_req, ctx->child);
  336. akcipher_request_set_callback(&req_ctx->child_req, req->base.flags,
  337. pkcs1pad_encrypt_sign_complete_cb, req);
  338. /* Reuse output buffer */
  339. akcipher_request_set_crypt(&req_ctx->child_req, req_ctx->in_sg,
  340. req->dst, ctx->key_size - 1, req->dst_len);
  341. err = crypto_akcipher_decrypt(&req_ctx->child_req);
  342. if (err != -EINPROGRESS && err != -EBUSY)
  343. return pkcs1pad_encrypt_sign_complete(req, err);
  344. return err;
  345. }
  346. static int pkcs1pad_verify_complete(struct akcipher_request *req, int err)
  347. {
  348. struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
  349. struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
  350. struct pkcs1pad_request *req_ctx = akcipher_request_ctx(req);
  351. struct akcipher_instance *inst = akcipher_alg_instance(tfm);
  352. struct pkcs1pad_inst_ctx *ictx = akcipher_instance_ctx(inst);
  353. const struct rsa_asn1_template *digest_info = ictx->digest_info;
  354. unsigned int dst_len;
  355. unsigned int pos;
  356. u8 *out_buf;
  357. if (err)
  358. goto done;
  359. err = -EINVAL;
  360. dst_len = req_ctx->child_req.dst_len;
  361. if (dst_len < ctx->key_size - 1)
  362. goto done;
  363. out_buf = req_ctx->out_buf;
  364. if (dst_len == ctx->key_size) {
  365. if (out_buf[0] != 0x00)
  366. /* Decrypted value had no leading 0 byte */
  367. goto done;
  368. dst_len--;
  369. out_buf++;
  370. }
  371. err = -EBADMSG;
  372. if (out_buf[0] != 0x01)
  373. goto done;
  374. for (pos = 1; pos < dst_len; pos++)
  375. if (out_buf[pos] != 0xff)
  376. break;
  377. if (pos < 9 || pos == dst_len || out_buf[pos] != 0x00)
  378. goto done;
  379. pos++;
  380. if (digest_info) {
  381. if (digest_info->size > dst_len - pos)
  382. goto done;
  383. if (crypto_memneq(out_buf + pos, digest_info->data,
  384. digest_info->size))
  385. goto done;
  386. pos += digest_info->size;
  387. }
  388. err = 0;
  389. if (req->dst_len != dst_len - pos) {
  390. err = -EKEYREJECTED;
  391. req->dst_len = dst_len - pos;
  392. goto done;
  393. }
  394. /* Extract appended digest. */
  395. sg_pcopy_to_buffer(req->src,
  396. sg_nents_for_len(req->src,
  397. req->src_len + req->dst_len),
  398. req_ctx->out_buf + ctx->key_size,
  399. req->dst_len, req->src_len);
  400. /* Do the actual verification step. */
  401. if (memcmp(req_ctx->out_buf + ctx->key_size, out_buf + pos,
  402. req->dst_len) != 0)
  403. err = -EKEYREJECTED;
  404. done:
  405. kfree_sensitive(req_ctx->out_buf);
  406. return err;
  407. }
  408. static void pkcs1pad_verify_complete_cb(
  409. struct crypto_async_request *child_async_req, int err)
  410. {
  411. struct akcipher_request *req = child_async_req->data;
  412. struct crypto_async_request async_req;
  413. if (err == -EINPROGRESS)
  414. return;
  415. async_req.data = req->base.data;
  416. async_req.tfm = crypto_akcipher_tfm(crypto_akcipher_reqtfm(req));
  417. async_req.flags = child_async_req->flags;
  418. req->base.complete(&async_req, pkcs1pad_verify_complete(req, err));
  419. }
  420. /*
  421. * The verify operation is here for completeness similar to the verification
  422. * defined in RFC2313 section 10.2 except that block type 0 is not accepted,
  423. * as in RFC2437. RFC2437 section 9.2 doesn't define any operation to
  424. * retrieve the DigestInfo from a signature, instead the user is expected
  425. * to call the sign operation to generate the expected signature and compare
  426. * signatures instead of the message-digests.
  427. */
  428. static int pkcs1pad_verify(struct akcipher_request *req)
  429. {
  430. struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
  431. struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
  432. struct pkcs1pad_request *req_ctx = akcipher_request_ctx(req);
  433. int err;
  434. if (WARN_ON(req->dst) ||
  435. WARN_ON(!req->dst_len) ||
  436. !ctx->key_size || req->src_len != ctx->key_size)
  437. return -EINVAL;
  438. req_ctx->out_buf = kmalloc(ctx->key_size + req->dst_len, GFP_KERNEL);
  439. if (!req_ctx->out_buf)
  440. return -ENOMEM;
  441. pkcs1pad_sg_set_buf(req_ctx->out_sg, req_ctx->out_buf,
  442. ctx->key_size, NULL);
  443. akcipher_request_set_tfm(&req_ctx->child_req, ctx->child);
  444. akcipher_request_set_callback(&req_ctx->child_req, req->base.flags,
  445. pkcs1pad_verify_complete_cb, req);
  446. /* Reuse input buffer, output to a new buffer */
  447. akcipher_request_set_crypt(&req_ctx->child_req, req->src,
  448. req_ctx->out_sg, req->src_len,
  449. ctx->key_size);
  450. err = crypto_akcipher_encrypt(&req_ctx->child_req);
  451. if (err != -EINPROGRESS && err != -EBUSY)
  452. return pkcs1pad_verify_complete(req, err);
  453. return err;
  454. }
  455. static int pkcs1pad_init_tfm(struct crypto_akcipher *tfm)
  456. {
  457. struct akcipher_instance *inst = akcipher_alg_instance(tfm);
  458. struct pkcs1pad_inst_ctx *ictx = akcipher_instance_ctx(inst);
  459. struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
  460. struct crypto_akcipher *child_tfm;
  461. child_tfm = crypto_spawn_akcipher(&ictx->spawn);
  462. if (IS_ERR(child_tfm))
  463. return PTR_ERR(child_tfm);
  464. ctx->child = child_tfm;
  465. return 0;
  466. }
  467. static void pkcs1pad_exit_tfm(struct crypto_akcipher *tfm)
  468. {
  469. struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
  470. crypto_free_akcipher(ctx->child);
  471. }
  472. static void pkcs1pad_free(struct akcipher_instance *inst)
  473. {
  474. struct pkcs1pad_inst_ctx *ctx = akcipher_instance_ctx(inst);
  475. struct crypto_akcipher_spawn *spawn = &ctx->spawn;
  476. crypto_drop_akcipher(spawn);
  477. kfree(inst);
  478. }
  479. static int pkcs1pad_create(struct crypto_template *tmpl, struct rtattr **tb)
  480. {
  481. u32 mask;
  482. struct akcipher_instance *inst;
  483. struct pkcs1pad_inst_ctx *ctx;
  484. struct akcipher_alg *rsa_alg;
  485. const char *hash_name;
  486. int err;
  487. err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_AKCIPHER, &mask);
  488. if (err)
  489. return err;
  490. inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
  491. if (!inst)
  492. return -ENOMEM;
  493. ctx = akcipher_instance_ctx(inst);
  494. err = crypto_grab_akcipher(&ctx->spawn, akcipher_crypto_instance(inst),
  495. crypto_attr_alg_name(tb[1]), 0, mask);
  496. if (err)
  497. goto err_free_inst;
  498. rsa_alg = crypto_spawn_akcipher_alg(&ctx->spawn);
  499. if (strcmp(rsa_alg->base.cra_name, "rsa") != 0) {
  500. err = -EINVAL;
  501. goto err_free_inst;
  502. }
  503. err = -ENAMETOOLONG;
  504. hash_name = crypto_attr_alg_name(tb[2]);
  505. if (IS_ERR(hash_name)) {
  506. if (snprintf(inst->alg.base.cra_name,
  507. CRYPTO_MAX_ALG_NAME, "pkcs1pad(%s)",
  508. rsa_alg->base.cra_name) >= CRYPTO_MAX_ALG_NAME)
  509. goto err_free_inst;
  510. if (snprintf(inst->alg.base.cra_driver_name,
  511. CRYPTO_MAX_ALG_NAME, "pkcs1pad(%s)",
  512. rsa_alg->base.cra_driver_name) >=
  513. CRYPTO_MAX_ALG_NAME)
  514. goto err_free_inst;
  515. } else {
  516. ctx->digest_info = rsa_lookup_asn1(hash_name);
  517. if (!ctx->digest_info) {
  518. err = -EINVAL;
  519. goto err_free_inst;
  520. }
  521. if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
  522. "pkcs1pad(%s,%s)", rsa_alg->base.cra_name,
  523. hash_name) >= CRYPTO_MAX_ALG_NAME)
  524. goto err_free_inst;
  525. if (snprintf(inst->alg.base.cra_driver_name,
  526. CRYPTO_MAX_ALG_NAME, "pkcs1pad(%s,%s)",
  527. rsa_alg->base.cra_driver_name,
  528. hash_name) >= CRYPTO_MAX_ALG_NAME)
  529. goto err_free_inst;
  530. }
  531. inst->alg.base.cra_priority = rsa_alg->base.cra_priority;
  532. inst->alg.base.cra_ctxsize = sizeof(struct pkcs1pad_ctx);
  533. inst->alg.init = pkcs1pad_init_tfm;
  534. inst->alg.exit = pkcs1pad_exit_tfm;
  535. inst->alg.encrypt = pkcs1pad_encrypt;
  536. inst->alg.decrypt = pkcs1pad_decrypt;
  537. inst->alg.sign = pkcs1pad_sign;
  538. inst->alg.verify = pkcs1pad_verify;
  539. inst->alg.set_pub_key = pkcs1pad_set_pub_key;
  540. inst->alg.set_priv_key = pkcs1pad_set_priv_key;
  541. inst->alg.max_size = pkcs1pad_get_max_size;
  542. inst->alg.reqsize = sizeof(struct pkcs1pad_request) + rsa_alg->reqsize;
  543. inst->free = pkcs1pad_free;
  544. err = akcipher_register_instance(tmpl, inst);
  545. if (err) {
  546. err_free_inst:
  547. pkcs1pad_free(inst);
  548. }
  549. return err;
  550. }
  551. struct crypto_template rsa_pkcs1pad_tmpl = {
  552. .name = "pkcs1pad",
  553. .create = pkcs1pad_create,
  554. .module = THIS_MODULE,
  555. };