fsl_hash.c 5.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233
  1. // SPDX-License-Identifier: GPL-2.0+
  2. /*
  3. * Copyright 2014 Freescale Semiconductor, Inc.
  4. *
  5. */
  6. #include <common.h>
  7. #include <cpu_func.h>
  8. #include <log.h>
  9. #include <malloc.h>
  10. #include <memalign.h>
  11. #include "jobdesc.h"
  12. #include "desc.h"
  13. #include "jr.h"
  14. #include "fsl_hash.h"
  15. #include <hw_sha.h>
  16. #include <asm/cache.h>
  17. #include <linux/errno.h>
  18. #define CRYPTO_MAX_ALG_NAME 80
  19. #define SHA1_DIGEST_SIZE 20
  20. #define SHA256_DIGEST_SIZE 32
  21. struct caam_hash_template {
  22. char name[CRYPTO_MAX_ALG_NAME];
  23. unsigned int digestsize;
  24. u32 alg_type;
  25. };
  26. enum caam_hash_algos {
  27. SHA1 = 0,
  28. SHA256
  29. };
  30. static struct caam_hash_template driver_hash[] = {
  31. {
  32. .name = "sha1",
  33. .digestsize = SHA1_DIGEST_SIZE,
  34. .alg_type = OP_ALG_ALGSEL_SHA1,
  35. },
  36. {
  37. .name = "sha256",
  38. .digestsize = SHA256_DIGEST_SIZE,
  39. .alg_type = OP_ALG_ALGSEL_SHA256,
  40. },
  41. };
  42. static enum caam_hash_algos get_hash_type(struct hash_algo *algo)
  43. {
  44. if (!strcmp(algo->name, driver_hash[SHA1].name))
  45. return SHA1;
  46. else
  47. return SHA256;
  48. }
  49. /* Create the context for progressive hashing using h/w acceleration.
  50. *
  51. * @ctxp: Pointer to the pointer of the context for hashing
  52. * @caam_algo: Enum for SHA1 or SHA256
  53. * @return 0 if ok, -ENOMEM on error
  54. */
  55. static int caam_hash_init(void **ctxp, enum caam_hash_algos caam_algo)
  56. {
  57. *ctxp = calloc(1, sizeof(struct sha_ctx));
  58. if (*ctxp == NULL) {
  59. debug("Cannot allocate memory for context\n");
  60. return -ENOMEM;
  61. }
  62. return 0;
  63. }
  64. /*
  65. * Update sg table for progressive hashing using h/w acceleration
  66. *
  67. * The context is freed by this function if an error occurs.
  68. * We support at most 32 Scatter/Gather Entries.
  69. *
  70. * @hash_ctx: Pointer to the context for hashing
  71. * @buf: Pointer to the buffer being hashed
  72. * @size: Size of the buffer being hashed
  73. * @is_last: 1 if this is the last update; 0 otherwise
  74. * @caam_algo: Enum for SHA1 or SHA256
  75. * @return 0 if ok, -EINVAL on error
  76. */
  77. static int caam_hash_update(void *hash_ctx, const void *buf,
  78. unsigned int size, int is_last,
  79. enum caam_hash_algos caam_algo)
  80. {
  81. uint32_t final;
  82. caam_dma_addr_t addr = virt_to_phys((void *)buf);
  83. struct sha_ctx *ctx = hash_ctx;
  84. if (ctx->sg_num >= MAX_SG_32) {
  85. free(ctx);
  86. return -EINVAL;
  87. }
  88. #ifdef CONFIG_CAAM_64BIT
  89. sec_out32(&ctx->sg_tbl[ctx->sg_num].addr_hi, (uint32_t)(addr >> 32));
  90. #else
  91. sec_out32(&ctx->sg_tbl[ctx->sg_num].addr_hi, 0x0);
  92. #endif
  93. sec_out32(&ctx->sg_tbl[ctx->sg_num].addr_lo, (caam_dma_addr_t)addr);
  94. sec_out32(&ctx->sg_tbl[ctx->sg_num].len_flag,
  95. (size & SG_ENTRY_LENGTH_MASK));
  96. ctx->sg_num++;
  97. if (is_last) {
  98. final = sec_in32(&ctx->sg_tbl[ctx->sg_num - 1].len_flag) |
  99. SG_ENTRY_FINAL_BIT;
  100. sec_out32(&ctx->sg_tbl[ctx->sg_num - 1].len_flag, final);
  101. }
  102. return 0;
  103. }
  104. /*
  105. * Perform progressive hashing on the given buffer and copy hash at
  106. * destination buffer
  107. *
  108. * The context is freed after completion of hash operation.
  109. *
  110. * @hash_ctx: Pointer to the context for hashing
  111. * @dest_buf: Pointer to the destination buffer where hash is to be copied
  112. * @size: Size of the buffer being hashed
  113. * @caam_algo: Enum for SHA1 or SHA256
  114. * @return 0 if ok, -EINVAL on error
  115. */
  116. static int caam_hash_finish(void *hash_ctx, void *dest_buf,
  117. int size, enum caam_hash_algos caam_algo)
  118. {
  119. uint32_t len = 0;
  120. struct sha_ctx *ctx = hash_ctx;
  121. int i = 0, ret = 0;
  122. if (size < driver_hash[caam_algo].digestsize) {
  123. free(ctx);
  124. return -EINVAL;
  125. }
  126. for (i = 0; i < ctx->sg_num; i++)
  127. len += (sec_in32(&ctx->sg_tbl[i].len_flag) &
  128. SG_ENTRY_LENGTH_MASK);
  129. inline_cnstr_jobdesc_hash(ctx->sha_desc, (uint8_t *)ctx->sg_tbl, len,
  130. ctx->hash,
  131. driver_hash[caam_algo].alg_type,
  132. driver_hash[caam_algo].digestsize,
  133. 1);
  134. ret = run_descriptor_jr(ctx->sha_desc);
  135. if (ret)
  136. debug("Error %x\n", ret);
  137. else
  138. memcpy(dest_buf, ctx->hash, sizeof(ctx->hash));
  139. free(ctx);
  140. return ret;
  141. }
  142. int caam_hash(const unsigned char *pbuf, unsigned int buf_len,
  143. unsigned char *pout, enum caam_hash_algos algo)
  144. {
  145. int ret = 0;
  146. uint32_t *desc;
  147. unsigned int size;
  148. desc = malloc_cache_aligned(sizeof(int) * MAX_CAAM_DESCSIZE);
  149. if (!desc) {
  150. debug("Not enough memory for descriptor allocation\n");
  151. return -ENOMEM;
  152. }
  153. if (!IS_ALIGNED((uintptr_t)pbuf, ARCH_DMA_MINALIGN) ||
  154. !IS_ALIGNED((uintptr_t)pout, ARCH_DMA_MINALIGN)) {
  155. puts("Error: Address arguments are not aligned\n");
  156. return -EINVAL;
  157. }
  158. size = ALIGN(buf_len, ARCH_DMA_MINALIGN);
  159. flush_dcache_range((unsigned long)pbuf, (unsigned long)pbuf + size);
  160. inline_cnstr_jobdesc_hash(desc, pbuf, buf_len, pout,
  161. driver_hash[algo].alg_type,
  162. driver_hash[algo].digestsize,
  163. 0);
  164. size = ALIGN(sizeof(int) * MAX_CAAM_DESCSIZE, ARCH_DMA_MINALIGN);
  165. flush_dcache_range((unsigned long)desc, (unsigned long)desc + size);
  166. ret = run_descriptor_jr(desc);
  167. size = ALIGN(driver_hash[algo].digestsize, ARCH_DMA_MINALIGN);
  168. invalidate_dcache_range((unsigned long)pout,
  169. (unsigned long)pout + size);
  170. free(desc);
  171. return ret;
  172. }
  173. void hw_sha256(const unsigned char *pbuf, unsigned int buf_len,
  174. unsigned char *pout, unsigned int chunk_size)
  175. {
  176. if (caam_hash(pbuf, buf_len, pout, SHA256))
  177. printf("CAAM was not setup properly or it is faulty\n");
  178. }
  179. void hw_sha1(const unsigned char *pbuf, unsigned int buf_len,
  180. unsigned char *pout, unsigned int chunk_size)
  181. {
  182. if (caam_hash(pbuf, buf_len, pout, SHA1))
  183. printf("CAAM was not setup properly or it is faulty\n");
  184. }
  185. int hw_sha_init(struct hash_algo *algo, void **ctxp)
  186. {
  187. return caam_hash_init(ctxp, get_hash_type(algo));
  188. }
  189. int hw_sha_update(struct hash_algo *algo, void *ctx, const void *buf,
  190. unsigned int size, int is_last)
  191. {
  192. return caam_hash_update(ctx, buf, size, is_last, get_hash_type(algo));
  193. }
  194. int hw_sha_finish(struct hash_algo *algo, void *ctx, void *dest_buf,
  195. int size)
  196. {
  197. return caam_hash_finish(ctx, dest_buf, size, get_hash_type(algo));
  198. }