2 * caam - Freescale FSL CAAM support for ahash functions of crypto API
4 * Copyright 2011 Freescale Semiconductor, Inc.
6 * Based on caamalg.c crypto API driver.
8 * relationship of digest job descriptor or first job descriptor after init to
11 * --------------- ---------------
12 * | JobDesc #1 |-------------------->| ShareDesc |
13 * | *(packet 1) | | (hashKey) |
14 * --------------- | (operation) |
17 * relationship of subsequent job descriptors to shared descriptors:
19 * --------------- ---------------
20 * | JobDesc #2 |-------------------->| ShareDesc |
21 * | *(packet 2) | |------------->| (hashKey) |
22 * --------------- | |-------->| (operation) |
23 * . | | | (load ctx2) |
24 * . | | ---------------
26 * | JobDesc #3 |------| |
32 * | JobDesc #4 |------------
36 * The SharedDesc never changes for a connection unless rekeyed, but
37 * each packet will likely be in a different place. So all we need
38 * to know to process the packet is where the input is, where the
39 * output goes, and what context we want to process with. Context is
40 * in the SharedDesc, packet references in the JobDesc.
42 * So, a job desc looks like:
44 * ---------------------
46 * | ShareDesc Pointer |
53 * ---------------------
60 #include "desc_constr.h"
63 #include "sg_sw_sec4.h"
66 #define CAAM_CRA_PRIORITY 3000
68 /* max hash key is max split key size */
69 #define CAAM_MAX_HASH_KEY_SIZE (SHA512_DIGEST_SIZE * 2)
71 #define CAAM_MAX_HASH_BLOCK_SIZE SHA512_BLOCK_SIZE
72 #define CAAM_MAX_HASH_DIGEST_SIZE SHA512_DIGEST_SIZE
74 /* length of descriptors text */
75 #define DESC_AHASH_BASE (4 * CAAM_CMD_SZ)
76 #define DESC_AHASH_UPDATE_LEN (6 * CAAM_CMD_SZ)
77 #define DESC_AHASH_UPDATE_FIRST_LEN (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ)
78 #define DESC_AHASH_FINAL_LEN (DESC_AHASH_BASE + 5 * CAAM_CMD_SZ)
79 #define DESC_AHASH_FINUP_LEN (DESC_AHASH_BASE + 5 * CAAM_CMD_SZ)
80 #define DESC_AHASH_DIGEST_LEN (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ)
82 #define DESC_HASH_MAX_USED_BYTES (DESC_AHASH_FINAL_LEN + \
83 CAAM_MAX_HASH_KEY_SIZE)
84 #define DESC_HASH_MAX_USED_LEN (DESC_HASH_MAX_USED_BYTES / CAAM_CMD_SZ)
86 /* caam context sizes for hashes: running digest + 8 */
87 #define HASH_MSG_LEN 8
88 #define MAX_CTX_LEN (HASH_MSG_LEN + SHA512_DIGEST_SIZE)
91 /* for print_hex_dumps with line references */
92 #define debug(format, arg...) printk(format, arg)
94 #define debug(format, arg...)
98 static struct list_head hash_list;
100 /* ahash per-session context */
101 struct caam_hash_ctx {
102 struct device *jrdev;
103 u32 sh_desc_update[DESC_HASH_MAX_USED_LEN];
104 u32 sh_desc_update_first[DESC_HASH_MAX_USED_LEN];
105 u32 sh_desc_fin[DESC_HASH_MAX_USED_LEN];
106 u32 sh_desc_digest[DESC_HASH_MAX_USED_LEN];
107 u32 sh_desc_finup[DESC_HASH_MAX_USED_LEN];
108 dma_addr_t sh_desc_update_dma;
109 dma_addr_t sh_desc_update_first_dma;
110 dma_addr_t sh_desc_fin_dma;
111 dma_addr_t sh_desc_digest_dma;
112 dma_addr_t sh_desc_finup_dma;
115 u8 key[CAAM_MAX_HASH_KEY_SIZE];
118 unsigned int split_key_len;
119 unsigned int split_key_pad_len;
123 struct caam_hash_state {
126 u8 buf_0[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
128 u8 buf_1[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
130 u8 caam_ctx[MAX_CTX_LEN];
131 int (*update)(struct ahash_request *req);
132 int (*final)(struct ahash_request *req);
133 int (*finup)(struct ahash_request *req);
137 /* Common job descriptor seq in/out ptr routines */
139 /* Map state->caam_ctx, and append seq_out_ptr command that points to it */
140 static inline void map_seq_out_ptr_ctx(u32 *desc, struct device *jrdev,
141 struct caam_hash_state *state,
144 state->ctx_dma = dma_map_single(jrdev, state->caam_ctx,
145 ctx_len, DMA_FROM_DEVICE);
146 append_seq_out_ptr(desc, state->ctx_dma, ctx_len, 0);
149 /* Map req->result, and append seq_out_ptr command that points to it */
150 static inline dma_addr_t map_seq_out_ptr_result(u32 *desc, struct device *jrdev,
151 u8 *result, int digestsize)
155 dst_dma = dma_map_single(jrdev, result, digestsize, DMA_FROM_DEVICE);
156 append_seq_out_ptr(desc, dst_dma, digestsize, 0);
161 /* Map current buffer in state and put it in link table */
162 static inline dma_addr_t buf_map_to_sec4_sg(struct device *jrdev,
163 struct sec4_sg_entry *sec4_sg,
168 buf_dma = dma_map_single(jrdev, buf, buflen, DMA_TO_DEVICE);
169 dma_to_sec4_sg_one(sec4_sg, buf_dma, buflen, 0);
174 /* Map req->src and put it in link table */
175 static inline void src_map_to_sec4_sg(struct device *jrdev,
176 struct scatterlist *src, int src_nents,
177 struct sec4_sg_entry *sec4_sg,
180 dma_map_sg_chained(jrdev, src, src_nents, DMA_TO_DEVICE, chained);
181 sg_to_sec4_sg_last(src, src_nents, sec4_sg, 0);
185 * Only put buffer in link table if it contains data, which is possible,
186 * since a buffer has previously been used, and needs to be unmapped,
188 static inline dma_addr_t
189 try_buf_map_to_sec4_sg(struct device *jrdev, struct sec4_sg_entry *sec4_sg,
190 u8 *buf, dma_addr_t buf_dma, int buflen,
193 if (buf_dma && !dma_mapping_error(jrdev, buf_dma))
194 dma_unmap_single(jrdev, buf_dma, last_buflen, DMA_TO_DEVICE);
196 buf_dma = buf_map_to_sec4_sg(jrdev, sec4_sg, buf, buflen);
203 /* Map state->caam_ctx, and add it to link table */
204 static inline void ctx_map_to_sec4_sg(u32 *desc, struct device *jrdev,
205 struct caam_hash_state *state,
207 struct sec4_sg_entry *sec4_sg,
210 state->ctx_dma = dma_map_single(jrdev, state->caam_ctx, ctx_len, flag);
211 dma_to_sec4_sg_one(sec4_sg, state->ctx_dma, ctx_len, 0);
214 /* Common shared descriptor commands */
215 static inline void append_key_ahash(u32 *desc, struct caam_hash_ctx *ctx)
217 append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
218 ctx->split_key_len, CLASS_2 |
219 KEY_DEST_MDHA_SPLIT | KEY_ENC);
222 /* Append key if it has been set */
223 static inline void init_sh_desc_key_ahash(u32 *desc, struct caam_hash_ctx *ctx)
227 init_sh_desc(desc, HDR_SHARE_SERIAL);
229 if (ctx->split_key_len) {
230 /* Skip if already shared */
231 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
234 append_key_ahash(desc, ctx);
236 set_jump_tgt_here(desc, key_jump_cmd);
239 /* Propagate errors from shared to job descriptor */
240 append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD);
244 * For ahash read data from seqin following state->caam_ctx,
245 * and write resulting class2 context to seqout, which may be state->caam_ctx
248 static inline void ahash_append_load_str(u32 *desc, int digestsize)
250 /* Calculate remaining bytes to read */
251 append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
253 /* Read remaining bytes */
254 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_LAST2 |
255 FIFOLD_TYPE_MSG | KEY_VLF);
257 /* Store class2 context bytes */
258 append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
259 LDST_SRCDST_BYTE_CONTEXT);
263 * For ahash update, final and finup, import context, read and write to seqout
265 static inline void ahash_ctx_data_to_out(u32 *desc, u32 op, u32 state,
267 struct caam_hash_ctx *ctx)
269 init_sh_desc_key_ahash(desc, ctx);
271 /* Import context from software */
272 append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
273 LDST_CLASS_2_CCB | ctx->ctx_len);
275 /* Class 2 operation */
276 append_operation(desc, op | state | OP_ALG_ENCRYPT);
279 * Load from buf and/or src and write to req->result or state->context
281 ahash_append_load_str(desc, digestsize);
284 /* For ahash firsts and digest, read and write to seqout */
285 static inline void ahash_data_to_out(u32 *desc, u32 op, u32 state,
286 int digestsize, struct caam_hash_ctx *ctx)
288 init_sh_desc_key_ahash(desc, ctx);
290 /* Class 2 operation */
291 append_operation(desc, op | state | OP_ALG_ENCRYPT);
294 * Load from buf and/or src and write to req->result or state->context
296 ahash_append_load_str(desc, digestsize);
299 static int ahash_set_sh_desc(struct crypto_ahash *ahash)
301 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
302 int digestsize = crypto_ahash_digestsize(ahash);
303 struct device *jrdev = ctx->jrdev;
307 if (ctx->split_key_len)
308 have_key = OP_ALG_AAI_HMAC_PRECOMP;
310 /* ahash_update shared descriptor */
311 desc = ctx->sh_desc_update;
313 init_sh_desc(desc, HDR_SHARE_SERIAL);
315 /* Import context from software */
316 append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
317 LDST_CLASS_2_CCB | ctx->ctx_len);
319 /* Class 2 operation */
320 append_operation(desc, ctx->alg_type | OP_ALG_AS_UPDATE |
323 /* Load data and write to result or context */
324 ahash_append_load_str(desc, ctx->ctx_len);
326 ctx->sh_desc_update_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
328 if (dma_mapping_error(jrdev, ctx->sh_desc_update_dma)) {
329 dev_err(jrdev, "unable to map shared descriptor\n");
333 print_hex_dump(KERN_ERR,
334 "ahash update shdesc@"__stringify(__LINE__)": ",
335 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
338 /* ahash_update_first shared descriptor */
339 desc = ctx->sh_desc_update_first;
341 ahash_data_to_out(desc, have_key | ctx->alg_type, OP_ALG_AS_INIT,
344 ctx->sh_desc_update_first_dma = dma_map_single(jrdev, desc,
347 if (dma_mapping_error(jrdev, ctx->sh_desc_update_first_dma)) {
348 dev_err(jrdev, "unable to map shared descriptor\n");
352 print_hex_dump(KERN_ERR,
353 "ahash update first shdesc@"__stringify(__LINE__)": ",
354 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
357 /* ahash_final shared descriptor */
358 desc = ctx->sh_desc_fin;
360 ahash_ctx_data_to_out(desc, have_key | ctx->alg_type,
361 OP_ALG_AS_FINALIZE, digestsize, ctx);
363 ctx->sh_desc_fin_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
365 if (dma_mapping_error(jrdev, ctx->sh_desc_fin_dma)) {
366 dev_err(jrdev, "unable to map shared descriptor\n");
370 print_hex_dump(KERN_ERR, "ahash final shdesc@"__stringify(__LINE__)": ",
371 DUMP_PREFIX_ADDRESS, 16, 4, desc,
372 desc_bytes(desc), 1);
375 /* ahash_finup shared descriptor */
376 desc = ctx->sh_desc_finup;
378 ahash_ctx_data_to_out(desc, have_key | ctx->alg_type,
379 OP_ALG_AS_FINALIZE, digestsize, ctx);
381 ctx->sh_desc_finup_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
383 if (dma_mapping_error(jrdev, ctx->sh_desc_finup_dma)) {
384 dev_err(jrdev, "unable to map shared descriptor\n");
388 print_hex_dump(KERN_ERR, "ahash finup shdesc@"__stringify(__LINE__)": ",
389 DUMP_PREFIX_ADDRESS, 16, 4, desc,
390 desc_bytes(desc), 1);
393 /* ahash_digest shared descriptor */
394 desc = ctx->sh_desc_digest;
396 ahash_data_to_out(desc, have_key | ctx->alg_type, OP_ALG_AS_INITFINAL,
399 ctx->sh_desc_digest_dma = dma_map_single(jrdev, desc,
402 if (dma_mapping_error(jrdev, ctx->sh_desc_digest_dma)) {
403 dev_err(jrdev, "unable to map shared descriptor\n");
407 print_hex_dump(KERN_ERR,
408 "ahash digest shdesc@"__stringify(__LINE__)": ",
409 DUMP_PREFIX_ADDRESS, 16, 4, desc,
410 desc_bytes(desc), 1);
416 static int gen_split_hash_key(struct caam_hash_ctx *ctx, const u8 *key_in,
419 return gen_split_key(ctx->jrdev, ctx->key, ctx->split_key_len,
420 ctx->split_key_pad_len, key_in, keylen,
424 /* Digest hash size if it is too large */
425 static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in,
426 u32 *keylen, u8 *key_out, u32 digestsize)
428 struct device *jrdev = ctx->jrdev;
430 struct split_key_result result;
431 dma_addr_t src_dma, dst_dma;
434 desc = kmalloc(CAAM_CMD_SZ * 8 + CAAM_PTR_SZ * 2, GFP_KERNEL | GFP_DMA);
436 dev_err(jrdev, "unable to allocate key input memory\n");
440 init_job_desc(desc, 0);
442 src_dma = dma_map_single(jrdev, (void *)key_in, *keylen,
444 if (dma_mapping_error(jrdev, src_dma)) {
445 dev_err(jrdev, "unable to map key input memory\n");
449 dst_dma = dma_map_single(jrdev, (void *)key_out, digestsize,
451 if (dma_mapping_error(jrdev, dst_dma)) {
452 dev_err(jrdev, "unable to map key output memory\n");
453 dma_unmap_single(jrdev, src_dma, *keylen, DMA_TO_DEVICE);
458 /* Job descriptor to perform unkeyed hash on key_in */
459 append_operation(desc, ctx->alg_type | OP_ALG_ENCRYPT |
460 OP_ALG_AS_INITFINAL);
461 append_seq_in_ptr(desc, src_dma, *keylen, 0);
462 append_seq_fifo_load(desc, *keylen, FIFOLD_CLASS_CLASS2 |
463 FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_MSG);
464 append_seq_out_ptr(desc, dst_dma, digestsize, 0);
465 append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
466 LDST_SRCDST_BYTE_CONTEXT);
469 print_hex_dump(KERN_ERR, "key_in@"__stringify(__LINE__)": ",
470 DUMP_PREFIX_ADDRESS, 16, 4, key_in, *keylen, 1);
471 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
472 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
476 init_completion(&result.completion);
478 ret = caam_jr_enqueue(jrdev, desc, split_key_done, &result);
481 wait_for_completion_interruptible(&result.completion);
484 print_hex_dump(KERN_ERR,
485 "digested key@"__stringify(__LINE__)": ",
486 DUMP_PREFIX_ADDRESS, 16, 4, key_in,
490 *keylen = digestsize;
492 dma_unmap_single(jrdev, src_dma, *keylen, DMA_TO_DEVICE);
493 dma_unmap_single(jrdev, dst_dma, digestsize, DMA_FROM_DEVICE);
500 static int ahash_setkey(struct crypto_ahash *ahash,
501 const u8 *key, unsigned int keylen)
503 /* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */
504 static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 };
505 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
506 struct device *jrdev = ctx->jrdev;
507 int blocksize = crypto_tfm_alg_blocksize(&ahash->base);
508 int digestsize = crypto_ahash_digestsize(ahash);
510 u8 *hashed_key = NULL;
513 printk(KERN_ERR "keylen %d\n", keylen);
516 if (keylen > blocksize) {
517 hashed_key = kmalloc(sizeof(u8) * digestsize, GFP_KERNEL |
521 ret = hash_digest_key(ctx, key, &keylen, hashed_key,
528 /* Pick class 2 key length from algorithm submask */
529 ctx->split_key_len = mdpadlen[(ctx->alg_op & OP_ALG_ALGSEL_SUBMASK) >>
530 OP_ALG_ALGSEL_SHIFT] * 2;
531 ctx->split_key_pad_len = ALIGN(ctx->split_key_len, 16);
534 printk(KERN_ERR "split_key_len %d split_key_pad_len %d\n",
535 ctx->split_key_len, ctx->split_key_pad_len);
536 print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
537 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
540 ret = gen_split_hash_key(ctx, key, keylen);
544 ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->split_key_pad_len,
546 if (dma_mapping_error(jrdev, ctx->key_dma)) {
547 dev_err(jrdev, "unable to map key i/o memory\n");
552 print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
553 DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
554 ctx->split_key_pad_len, 1);
557 ret = ahash_set_sh_desc(ahash);
559 dma_unmap_single(jrdev, ctx->key_dma, ctx->split_key_pad_len,
568 crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN);
573 * ahash_edesc - s/w-extended ahash descriptor
574 * @dst_dma: physical mapped address of req->result
575 * @sec4_sg_dma: physical mapped address of h/w link table
576 * @chained: if source is chained
577 * @src_nents: number of segments in input scatterlist
578 * @sec4_sg_bytes: length of dma mapped sec4_sg space
579 * @sec4_sg: pointer to h/w link table
580 * @hw_desc: the h/w job descriptor followed by any referenced link tables
584 dma_addr_t sec4_sg_dma;
588 struct sec4_sg_entry *sec4_sg;
592 static inline void ahash_unmap(struct device *dev,
593 struct ahash_edesc *edesc,
594 struct ahash_request *req, int dst_len)
596 if (edesc->src_nents)
597 dma_unmap_sg_chained(dev, req->src, edesc->src_nents,
598 DMA_TO_DEVICE, edesc->chained);
600 dma_unmap_single(dev, edesc->dst_dma, dst_len, DMA_FROM_DEVICE);
602 if (edesc->sec4_sg_bytes)
603 dma_unmap_single(dev, edesc->sec4_sg_dma,
604 edesc->sec4_sg_bytes, DMA_TO_DEVICE);
607 static inline void ahash_unmap_ctx(struct device *dev,
608 struct ahash_edesc *edesc,
609 struct ahash_request *req, int dst_len, u32 flag)
611 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
612 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
613 struct caam_hash_state *state = ahash_request_ctx(req);
616 dma_unmap_single(dev, state->ctx_dma, ctx->ctx_len, flag);
617 ahash_unmap(dev, edesc, req, dst_len);
620 static void ahash_done(struct device *jrdev, u32 *desc, u32 err,
623 struct ahash_request *req = context;
624 struct ahash_edesc *edesc;
625 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
626 int digestsize = crypto_ahash_digestsize(ahash);
628 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
629 struct caam_hash_state *state = ahash_request_ctx(req);
631 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
634 edesc = (struct ahash_edesc *)((char *)desc -
635 offsetof(struct ahash_edesc, hw_desc));
637 caam_jr_strstatus(jrdev, err);
639 ahash_unmap(jrdev, edesc, req, digestsize);
643 print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
644 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
647 print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ",
648 DUMP_PREFIX_ADDRESS, 16, 4, req->result,
652 req->base.complete(&req->base, err);
655 static void ahash_done_bi(struct device *jrdev, u32 *desc, u32 err,
658 struct ahash_request *req = context;
659 struct ahash_edesc *edesc;
660 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
661 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
663 struct caam_hash_state *state = ahash_request_ctx(req);
664 int digestsize = crypto_ahash_digestsize(ahash);
666 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
669 edesc = (struct ahash_edesc *)((char *)desc -
670 offsetof(struct ahash_edesc, hw_desc));
672 caam_jr_strstatus(jrdev, err);
674 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL);
678 print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
679 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
682 print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ",
683 DUMP_PREFIX_ADDRESS, 16, 4, req->result,
687 req->base.complete(&req->base, err);
690 static void ahash_done_ctx_src(struct device *jrdev, u32 *desc, u32 err,
693 struct ahash_request *req = context;
694 struct ahash_edesc *edesc;
695 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
696 int digestsize = crypto_ahash_digestsize(ahash);
698 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
699 struct caam_hash_state *state = ahash_request_ctx(req);
701 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
704 edesc = (struct ahash_edesc *)((char *)desc -
705 offsetof(struct ahash_edesc, hw_desc));
707 caam_jr_strstatus(jrdev, err);
709 ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
713 print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
714 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
717 print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ",
718 DUMP_PREFIX_ADDRESS, 16, 4, req->result,
722 req->base.complete(&req->base, err);
725 static void ahash_done_ctx_dst(struct device *jrdev, u32 *desc, u32 err,
728 struct ahash_request *req = context;
729 struct ahash_edesc *edesc;
730 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
731 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
733 struct caam_hash_state *state = ahash_request_ctx(req);
734 int digestsize = crypto_ahash_digestsize(ahash);
736 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
739 edesc = (struct ahash_edesc *)((char *)desc -
740 offsetof(struct ahash_edesc, hw_desc));
742 caam_jr_strstatus(jrdev, err);
744 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE);
748 print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
749 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
752 print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ",
753 DUMP_PREFIX_ADDRESS, 16, 4, req->result,
757 req->base.complete(&req->base, err);
760 /* submit update job descriptor */
761 static int ahash_update_ctx(struct ahash_request *req)
763 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
764 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
765 struct caam_hash_state *state = ahash_request_ctx(req);
766 struct device *jrdev = ctx->jrdev;
767 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
768 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
769 u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
770 int *buflen = state->current_buf ? &state->buflen_1 : &state->buflen_0;
771 u8 *next_buf = state->current_buf ? state->buf_0 : state->buf_1;
772 int *next_buflen = state->current_buf ? &state->buflen_0 :
773 &state->buflen_1, last_buflen;
774 int in_len = *buflen + req->nbytes, to_hash;
775 u32 *sh_desc = ctx->sh_desc_update, *desc;
776 dma_addr_t ptr = ctx->sh_desc_update_dma;
777 int src_nents, sec4_sg_bytes, sec4_sg_src_index;
778 struct ahash_edesc *edesc;
779 bool chained = false;
783 last_buflen = *next_buflen;
784 *next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1);
785 to_hash = in_len - *next_buflen;
788 src_nents = __sg_count(req->src, req->nbytes - (*next_buflen),
790 sec4_sg_src_index = 1 + (*buflen ? 1 : 0);
791 sec4_sg_bytes = (sec4_sg_src_index + src_nents) *
792 sizeof(struct sec4_sg_entry);
795 * allocate space for base edesc and hw desc commands,
798 edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
799 sec4_sg_bytes, GFP_DMA | flags);
802 "could not allocate extended descriptor\n");
806 edesc->src_nents = src_nents;
807 edesc->chained = chained;
808 edesc->sec4_sg_bytes = sec4_sg_bytes;
809 edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
811 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
815 ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len,
816 edesc->sec4_sg, DMA_BIDIRECTIONAL);
818 state->buf_dma = try_buf_map_to_sec4_sg(jrdev,
821 *buflen, last_buflen);
824 src_map_to_sec4_sg(jrdev, req->src, src_nents,
825 edesc->sec4_sg + sec4_sg_src_index,
828 sg_copy_part(next_buf, req->src, to_hash -
829 *buflen, req->nbytes);
830 state->current_buf = !state->current_buf;
833 (edesc->sec4_sg + sec4_sg_src_index - 1)->len |=
837 sh_len = desc_len(sh_desc);
838 desc = edesc->hw_desc;
839 init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER |
842 append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len +
845 append_seq_out_ptr(desc, state->ctx_dma, ctx->ctx_len, 0);
848 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
849 DUMP_PREFIX_ADDRESS, 16, 4, desc,
850 desc_bytes(desc), 1);
853 ret = caam_jr_enqueue(jrdev, desc, ahash_done_bi, req);
857 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len,
861 } else if (*next_buflen) {
862 sg_copy(buf + *buflen, req->src, req->nbytes);
863 *buflen = *next_buflen;
864 *next_buflen = last_buflen;
867 print_hex_dump(KERN_ERR, "buf@"__stringify(__LINE__)": ",
868 DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
869 print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ",
870 DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
877 static int ahash_final_ctx(struct ahash_request *req)
879 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
880 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
881 struct caam_hash_state *state = ahash_request_ctx(req);
882 struct device *jrdev = ctx->jrdev;
883 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
884 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
885 u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
886 int buflen = state->current_buf ? state->buflen_1 : state->buflen_0;
887 int last_buflen = state->current_buf ? state->buflen_0 :
889 u32 *sh_desc = ctx->sh_desc_fin, *desc;
890 dma_addr_t ptr = ctx->sh_desc_fin_dma;
892 int digestsize = crypto_ahash_digestsize(ahash);
893 struct ahash_edesc *edesc;
897 sec4_sg_bytes = (1 + (buflen ? 1 : 0)) * sizeof(struct sec4_sg_entry);
899 /* allocate space for base edesc and hw desc commands, link tables */
900 edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
901 sec4_sg_bytes, GFP_DMA | flags);
903 dev_err(jrdev, "could not allocate extended descriptor\n");
907 sh_len = desc_len(sh_desc);
908 desc = edesc->hw_desc;
909 init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE);
911 edesc->sec4_sg_bytes = sec4_sg_bytes;
912 edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
914 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
915 sec4_sg_bytes, DMA_TO_DEVICE);
916 edesc->src_nents = 0;
918 ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len, edesc->sec4_sg,
921 state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1,
922 buf, state->buf_dma, buflen,
924 (edesc->sec4_sg + sec4_sg_bytes - 1)->len |= SEC4_SG_LEN_FIN;
926 append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len + buflen,
929 edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
933 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
934 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
937 ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_src, req);
941 ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
948 static int ahash_finup_ctx(struct ahash_request *req)
950 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
951 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
952 struct caam_hash_state *state = ahash_request_ctx(req);
953 struct device *jrdev = ctx->jrdev;
954 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
955 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
956 u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
957 int buflen = state->current_buf ? state->buflen_1 : state->buflen_0;
958 int last_buflen = state->current_buf ? state->buflen_0 :
960 u32 *sh_desc = ctx->sh_desc_finup, *desc;
961 dma_addr_t ptr = ctx->sh_desc_finup_dma;
962 int sec4_sg_bytes, sec4_sg_src_index;
964 int digestsize = crypto_ahash_digestsize(ahash);
965 struct ahash_edesc *edesc;
966 bool chained = false;
970 src_nents = __sg_count(req->src, req->nbytes, &chained);
971 sec4_sg_src_index = 1 + (buflen ? 1 : 0);
972 sec4_sg_bytes = (sec4_sg_src_index + src_nents) *
973 sizeof(struct sec4_sg_entry);
975 /* allocate space for base edesc and hw desc commands, link tables */
976 edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
977 sec4_sg_bytes, GFP_DMA | flags);
979 dev_err(jrdev, "could not allocate extended descriptor\n");
983 sh_len = desc_len(sh_desc);
984 desc = edesc->hw_desc;
985 init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE);
987 edesc->src_nents = src_nents;
988 edesc->chained = chained;
989 edesc->sec4_sg_bytes = sec4_sg_bytes;
990 edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
992 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
993 sec4_sg_bytes, DMA_TO_DEVICE);
995 ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len, edesc->sec4_sg,
998 state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1,
999 buf, state->buf_dma, buflen,
1002 src_map_to_sec4_sg(jrdev, req->src, src_nents, edesc->sec4_sg +
1003 sec4_sg_src_index, chained);
1005 append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len +
1006 buflen + req->nbytes, LDST_SGF);
1008 edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
1012 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
1013 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
1016 ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_src, req);
1020 ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
1027 static int ahash_digest(struct ahash_request *req)
1029 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1030 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1031 struct device *jrdev = ctx->jrdev;
1032 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1033 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1034 u32 *sh_desc = ctx->sh_desc_digest, *desc;
1035 dma_addr_t ptr = ctx->sh_desc_digest_dma;
1036 int digestsize = crypto_ahash_digestsize(ahash);
1037 int src_nents, sec4_sg_bytes;
1039 struct ahash_edesc *edesc;
1040 bool chained = false;
1045 src_nents = sg_count(req->src, req->nbytes, &chained);
1046 dma_map_sg_chained(jrdev, req->src, src_nents ? : 1, DMA_TO_DEVICE,
1048 sec4_sg_bytes = src_nents * sizeof(struct sec4_sg_entry);
1050 /* allocate space for base edesc and hw desc commands, link tables */
1051 edesc = kmalloc(sizeof(struct ahash_edesc) + sec4_sg_bytes +
1052 DESC_JOB_IO_LEN, GFP_DMA | flags);
1054 dev_err(jrdev, "could not allocate extended descriptor\n");
1057 edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
1059 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1060 sec4_sg_bytes, DMA_TO_DEVICE);
1061 edesc->src_nents = src_nents;
1062 edesc->chained = chained;
1064 sh_len = desc_len(sh_desc);
1065 desc = edesc->hw_desc;
1066 init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE);
1069 sg_to_sec4_sg_last(req->src, src_nents, edesc->sec4_sg, 0);
1070 src_dma = edesc->sec4_sg_dma;
1073 src_dma = sg_dma_address(req->src);
1076 append_seq_in_ptr(desc, src_dma, req->nbytes, options);
1078 edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
1082 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
1083 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
1086 ret = caam_jr_enqueue(jrdev, desc, ahash_done, req);
1090 ahash_unmap(jrdev, edesc, req, digestsize);
1097 /* submit ahash final if it the first job descriptor */
1098 static int ahash_final_no_ctx(struct ahash_request *req)
1100 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1101 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1102 struct caam_hash_state *state = ahash_request_ctx(req);
1103 struct device *jrdev = ctx->jrdev;
1104 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1105 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1106 u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
1107 int buflen = state->current_buf ? state->buflen_1 : state->buflen_0;
1108 u32 *sh_desc = ctx->sh_desc_digest, *desc;
1109 dma_addr_t ptr = ctx->sh_desc_digest_dma;
1110 int digestsize = crypto_ahash_digestsize(ahash);
1111 struct ahash_edesc *edesc;
1115 /* allocate space for base edesc and hw desc commands, link tables */
1116 edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN,
1119 dev_err(jrdev, "could not allocate extended descriptor\n");
1123 sh_len = desc_len(sh_desc);
1124 desc = edesc->hw_desc;
1125 init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE);
1127 state->buf_dma = dma_map_single(jrdev, buf, buflen, DMA_TO_DEVICE);
1129 append_seq_in_ptr(desc, state->buf_dma, buflen, 0);
1131 edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
1133 edesc->src_nents = 0;
1136 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
1137 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
1140 ret = caam_jr_enqueue(jrdev, desc, ahash_done, req);
1144 ahash_unmap(jrdev, edesc, req, digestsize);
1151 /* submit ahash update if it the first job descriptor after update */
1152 static int ahash_update_no_ctx(struct ahash_request *req)
1154 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1155 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1156 struct caam_hash_state *state = ahash_request_ctx(req);
1157 struct device *jrdev = ctx->jrdev;
1158 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1159 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1160 u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
1161 int *buflen = state->current_buf ? &state->buflen_1 : &state->buflen_0;
1162 u8 *next_buf = state->current_buf ? state->buf_0 : state->buf_1;
1163 int *next_buflen = state->current_buf ? &state->buflen_0 :
1165 int in_len = *buflen + req->nbytes, to_hash;
1166 int sec4_sg_bytes, src_nents;
1167 struct ahash_edesc *edesc;
1168 u32 *desc, *sh_desc = ctx->sh_desc_update_first;
1169 dma_addr_t ptr = ctx->sh_desc_update_first_dma;
1170 bool chained = false;
1174 *next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1);
1175 to_hash = in_len - *next_buflen;
1178 src_nents = __sg_count(req->src, req->nbytes - (*next_buflen),
1180 sec4_sg_bytes = (1 + src_nents) *
1181 sizeof(struct sec4_sg_entry);
1184 * allocate space for base edesc and hw desc commands,
1187 edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
1188 sec4_sg_bytes, GFP_DMA | flags);
1191 "could not allocate extended descriptor\n");
1195 edesc->src_nents = src_nents;
1196 edesc->chained = chained;
1197 edesc->sec4_sg_bytes = sec4_sg_bytes;
1198 edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
1200 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1204 state->buf_dma = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg,
1206 src_map_to_sec4_sg(jrdev, req->src, src_nents,
1207 edesc->sec4_sg + 1, chained);
1209 sg_copy_part(next_buf, req->src, to_hash - *buflen,
1211 state->current_buf = !state->current_buf;
1214 sh_len = desc_len(sh_desc);
1215 desc = edesc->hw_desc;
1216 init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER |
1219 append_seq_in_ptr(desc, edesc->sec4_sg_dma, to_hash, LDST_SGF);
1221 map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len);
1224 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
1225 DUMP_PREFIX_ADDRESS, 16, 4, desc,
1226 desc_bytes(desc), 1);
1229 ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_dst, req);
1232 state->update = ahash_update_ctx;
1233 state->finup = ahash_finup_ctx;
1234 state->final = ahash_final_ctx;
1236 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len,
1240 } else if (*next_buflen) {
1241 sg_copy(buf + *buflen, req->src, req->nbytes);
1242 *buflen = *next_buflen;
1246 print_hex_dump(KERN_ERR, "buf@"__stringify(__LINE__)": ",
1247 DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
1248 print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ",
1249 DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
1256 /* submit ahash finup if it the first job descriptor after update */
1257 static int ahash_finup_no_ctx(struct ahash_request *req)
1259 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1260 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1261 struct caam_hash_state *state = ahash_request_ctx(req);
1262 struct device *jrdev = ctx->jrdev;
1263 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1264 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1265 u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
1266 int buflen = state->current_buf ? state->buflen_1 : state->buflen_0;
1267 int last_buflen = state->current_buf ? state->buflen_0 :
1269 u32 *sh_desc = ctx->sh_desc_digest, *desc;
1270 dma_addr_t ptr = ctx->sh_desc_digest_dma;
1271 int sec4_sg_bytes, sec4_sg_src_index, src_nents;
1272 int digestsize = crypto_ahash_digestsize(ahash);
1273 struct ahash_edesc *edesc;
1274 bool chained = false;
1278 src_nents = __sg_count(req->src, req->nbytes, &chained);
1279 sec4_sg_src_index = 2;
1280 sec4_sg_bytes = (sec4_sg_src_index + src_nents) *
1281 sizeof(struct sec4_sg_entry);
1283 /* allocate space for base edesc and hw desc commands, link tables */
1284 edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
1285 sec4_sg_bytes, GFP_DMA | flags);
1287 dev_err(jrdev, "could not allocate extended descriptor\n");
1291 sh_len = desc_len(sh_desc);
1292 desc = edesc->hw_desc;
1293 init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE);
1295 edesc->src_nents = src_nents;
1296 edesc->chained = chained;
1297 edesc->sec4_sg_bytes = sec4_sg_bytes;
1298 edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
1300 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1301 sec4_sg_bytes, DMA_TO_DEVICE);
1303 state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, buf,
1304 state->buf_dma, buflen,
1307 src_map_to_sec4_sg(jrdev, req->src, src_nents, edesc->sec4_sg + 1,
1310 append_seq_in_ptr(desc, edesc->sec4_sg_dma, buflen +
1311 req->nbytes, LDST_SGF);
1313 edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
1317 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
1318 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
1321 ret = caam_jr_enqueue(jrdev, desc, ahash_done, req);
1325 ahash_unmap(jrdev, edesc, req, digestsize);
1332 /* submit first update job descriptor after init */
1333 static int ahash_update_first(struct ahash_request *req)
1335 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1336 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1337 struct caam_hash_state *state = ahash_request_ctx(req);
1338 struct device *jrdev = ctx->jrdev;
1339 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1340 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1341 u8 *next_buf = state->buf_0 + state->current_buf *
1342 CAAM_MAX_HASH_BLOCK_SIZE;
1343 int *next_buflen = &state->buflen_0 + state->current_buf;
1345 u32 *sh_desc = ctx->sh_desc_update_first, *desc;
1346 dma_addr_t ptr = ctx->sh_desc_update_first_dma;
1347 int sec4_sg_bytes, src_nents;
1350 struct ahash_edesc *edesc;
1351 bool chained = false;
1355 *next_buflen = req->nbytes & (crypto_tfm_alg_blocksize(&ahash->base) -
1357 to_hash = req->nbytes - *next_buflen;
1360 src_nents = sg_count(req->src, req->nbytes - (*next_buflen),
1362 dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
1363 DMA_TO_DEVICE, chained);
1364 sec4_sg_bytes = src_nents * sizeof(struct sec4_sg_entry);
1367 * allocate space for base edesc and hw desc commands,
1370 edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
1371 sec4_sg_bytes, GFP_DMA | flags);
1374 "could not allocate extended descriptor\n");
1378 edesc->src_nents = src_nents;
1379 edesc->chained = chained;
1380 edesc->sec4_sg_bytes = sec4_sg_bytes;
1381 edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
1383 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1388 sg_to_sec4_sg_last(req->src, src_nents,
1390 src_dma = edesc->sec4_sg_dma;
1393 src_dma = sg_dma_address(req->src);
1398 sg_copy_part(next_buf, req->src, to_hash, req->nbytes);
1400 sh_len = desc_len(sh_desc);
1401 desc = edesc->hw_desc;
1402 init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER |
1405 append_seq_in_ptr(desc, src_dma, to_hash, options);
1407 map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len);
1410 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
1411 DUMP_PREFIX_ADDRESS, 16, 4, desc,
1412 desc_bytes(desc), 1);
1415 ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_dst,
1419 state->update = ahash_update_ctx;
1420 state->finup = ahash_finup_ctx;
1421 state->final = ahash_final_ctx;
1423 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len,
1427 } else if (*next_buflen) {
1428 state->update = ahash_update_no_ctx;
1429 state->finup = ahash_finup_no_ctx;
1430 state->final = ahash_final_no_ctx;
1431 sg_copy(next_buf, req->src, req->nbytes);
1434 print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ",
1435 DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
1442 static int ahash_finup_first(struct ahash_request *req)
1444 return ahash_digest(req);
1447 static int ahash_init(struct ahash_request *req)
1449 struct caam_hash_state *state = ahash_request_ctx(req);
1451 state->update = ahash_update_first;
1452 state->finup = ahash_finup_first;
1453 state->final = ahash_final_no_ctx;
1455 state->current_buf = 0;
1460 static int ahash_update(struct ahash_request *req)
1462 struct caam_hash_state *state = ahash_request_ctx(req);
1464 return state->update(req);
1467 static int ahash_finup(struct ahash_request *req)
1469 struct caam_hash_state *state = ahash_request_ctx(req);
1471 return state->finup(req);
1474 static int ahash_final(struct ahash_request *req)
1476 struct caam_hash_state *state = ahash_request_ctx(req);
1478 return state->final(req);
1481 static int ahash_export(struct ahash_request *req, void *out)
1483 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1484 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1485 struct caam_hash_state *state = ahash_request_ctx(req);
1487 memcpy(out, ctx, sizeof(struct caam_hash_ctx));
1488 memcpy(out + sizeof(struct caam_hash_ctx), state,
1489 sizeof(struct caam_hash_state));
1493 static int ahash_import(struct ahash_request *req, const void *in)
1495 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1496 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1497 struct caam_hash_state *state = ahash_request_ctx(req);
1499 memcpy(ctx, in, sizeof(struct caam_hash_ctx));
1500 memcpy(state, in + sizeof(struct caam_hash_ctx),
1501 sizeof(struct caam_hash_state));
1505 struct caam_hash_template {
1506 char name[CRYPTO_MAX_ALG_NAME];
1507 char driver_name[CRYPTO_MAX_ALG_NAME];
1508 char hmac_name[CRYPTO_MAX_ALG_NAME];
1509 char hmac_driver_name[CRYPTO_MAX_ALG_NAME];
1510 unsigned int blocksize;
1511 struct ahash_alg template_ahash;
1516 /* ahash descriptors */
1517 static struct caam_hash_template driver_hash[] = {
1520 .driver_name = "sha1-caam",
1521 .hmac_name = "hmac(sha1)",
1522 .hmac_driver_name = "hmac-sha1-caam",
1523 .blocksize = SHA1_BLOCK_SIZE,
1526 .update = ahash_update,
1527 .final = ahash_final,
1528 .finup = ahash_finup,
1529 .digest = ahash_digest,
1530 .export = ahash_export,
1531 .import = ahash_import,
1532 .setkey = ahash_setkey,
1534 .digestsize = SHA1_DIGEST_SIZE,
1537 .alg_type = OP_ALG_ALGSEL_SHA1,
1538 .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
1541 .driver_name = "sha224-caam",
1542 .hmac_name = "hmac(sha224)",
1543 .hmac_driver_name = "hmac-sha224-caam",
1544 .blocksize = SHA224_BLOCK_SIZE,
1547 .update = ahash_update,
1548 .final = ahash_final,
1549 .finup = ahash_finup,
1550 .digest = ahash_digest,
1551 .export = ahash_export,
1552 .import = ahash_import,
1553 .setkey = ahash_setkey,
1555 .digestsize = SHA224_DIGEST_SIZE,
1558 .alg_type = OP_ALG_ALGSEL_SHA224,
1559 .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
1562 .driver_name = "sha256-caam",
1563 .hmac_name = "hmac(sha256)",
1564 .hmac_driver_name = "hmac-sha256-caam",
1565 .blocksize = SHA256_BLOCK_SIZE,
1568 .update = ahash_update,
1569 .final = ahash_final,
1570 .finup = ahash_finup,
1571 .digest = ahash_digest,
1572 .export = ahash_export,
1573 .import = ahash_import,
1574 .setkey = ahash_setkey,
1576 .digestsize = SHA256_DIGEST_SIZE,
1579 .alg_type = OP_ALG_ALGSEL_SHA256,
1580 .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
1583 .driver_name = "sha384-caam",
1584 .hmac_name = "hmac(sha384)",
1585 .hmac_driver_name = "hmac-sha384-caam",
1586 .blocksize = SHA384_BLOCK_SIZE,
1589 .update = ahash_update,
1590 .final = ahash_final,
1591 .finup = ahash_finup,
1592 .digest = ahash_digest,
1593 .export = ahash_export,
1594 .import = ahash_import,
1595 .setkey = ahash_setkey,
1597 .digestsize = SHA384_DIGEST_SIZE,
1600 .alg_type = OP_ALG_ALGSEL_SHA384,
1601 .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
1604 .driver_name = "sha512-caam",
1605 .hmac_name = "hmac(sha512)",
1606 .hmac_driver_name = "hmac-sha512-caam",
1607 .blocksize = SHA512_BLOCK_SIZE,
1610 .update = ahash_update,
1611 .final = ahash_final,
1612 .finup = ahash_finup,
1613 .digest = ahash_digest,
1614 .export = ahash_export,
1615 .import = ahash_import,
1616 .setkey = ahash_setkey,
1618 .digestsize = SHA512_DIGEST_SIZE,
1621 .alg_type = OP_ALG_ALGSEL_SHA512,
1622 .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
1625 .driver_name = "md5-caam",
1626 .hmac_name = "hmac(md5)",
1627 .hmac_driver_name = "hmac-md5-caam",
1628 .blocksize = MD5_BLOCK_WORDS * 4,
1631 .update = ahash_update,
1632 .final = ahash_final,
1633 .finup = ahash_finup,
1634 .digest = ahash_digest,
1635 .export = ahash_export,
1636 .import = ahash_import,
1637 .setkey = ahash_setkey,
1639 .digestsize = MD5_DIGEST_SIZE,
1642 .alg_type = OP_ALG_ALGSEL_MD5,
1643 .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
1647 struct caam_hash_alg {
1648 struct list_head entry;
1651 struct ahash_alg ahash_alg;
1654 static int caam_hash_cra_init(struct crypto_tfm *tfm)
1656 struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
1657 struct crypto_alg *base = tfm->__crt_alg;
1658 struct hash_alg_common *halg =
1659 container_of(base, struct hash_alg_common, base);
1660 struct ahash_alg *alg =
1661 container_of(halg, struct ahash_alg, halg);
1662 struct caam_hash_alg *caam_hash =
1663 container_of(alg, struct caam_hash_alg, ahash_alg);
1664 struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
1665 /* Sizes for MDHA running digests: MD5, SHA1, 224, 256, 384, 512 */
1666 static const u8 runninglen[] = { HASH_MSG_LEN + MD5_DIGEST_SIZE,
1667 HASH_MSG_LEN + SHA1_DIGEST_SIZE,
1669 HASH_MSG_LEN + SHA256_DIGEST_SIZE,
1671 HASH_MSG_LEN + SHA512_DIGEST_SIZE };
1675 * Get a Job ring from Job Ring driver to ensure in-order
1676 * crypto request processing per tfm
1678 ctx->jrdev = caam_jr_alloc();
1679 if (IS_ERR(ctx->jrdev)) {
1680 pr_err("Job Ring Device allocation for transform failed\n");
1681 return PTR_ERR(ctx->jrdev);
1683 /* copy descriptor header template value */
1684 ctx->alg_type = OP_TYPE_CLASS2_ALG | caam_hash->alg_type;
1685 ctx->alg_op = OP_TYPE_CLASS2_ALG | caam_hash->alg_op;
1687 ctx->ctx_len = runninglen[(ctx->alg_op & OP_ALG_ALGSEL_SUBMASK) >>
1688 OP_ALG_ALGSEL_SHIFT];
1690 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1691 sizeof(struct caam_hash_state));
1693 ret = ahash_set_sh_desc(ahash);
1698 static void caam_hash_cra_exit(struct crypto_tfm *tfm)
1700 struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
1702 if (ctx->sh_desc_update_dma &&
1703 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_update_dma))
1704 dma_unmap_single(ctx->jrdev, ctx->sh_desc_update_dma,
1705 desc_bytes(ctx->sh_desc_update),
1707 if (ctx->sh_desc_update_first_dma &&
1708 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_update_first_dma))
1709 dma_unmap_single(ctx->jrdev, ctx->sh_desc_update_first_dma,
1710 desc_bytes(ctx->sh_desc_update_first),
1712 if (ctx->sh_desc_fin_dma &&
1713 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_fin_dma))
1714 dma_unmap_single(ctx->jrdev, ctx->sh_desc_fin_dma,
1715 desc_bytes(ctx->sh_desc_fin), DMA_TO_DEVICE);
1716 if (ctx->sh_desc_digest_dma &&
1717 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_digest_dma))
1718 dma_unmap_single(ctx->jrdev, ctx->sh_desc_digest_dma,
1719 desc_bytes(ctx->sh_desc_digest),
1721 if (ctx->sh_desc_finup_dma &&
1722 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_finup_dma))
1723 dma_unmap_single(ctx->jrdev, ctx->sh_desc_finup_dma,
1724 desc_bytes(ctx->sh_desc_finup), DMA_TO_DEVICE);
1726 caam_jr_free(ctx->jrdev);
1729 static void __exit caam_algapi_hash_exit(void)
1731 struct caam_hash_alg *t_alg, *n;
1733 if (!hash_list.next)
1736 list_for_each_entry_safe(t_alg, n, &hash_list, entry) {
1737 crypto_unregister_ahash(&t_alg->ahash_alg);
1738 list_del(&t_alg->entry);
1743 static struct caam_hash_alg *
1744 caam_hash_alloc(struct caam_hash_template *template,
1747 struct caam_hash_alg *t_alg;
1748 struct ahash_alg *halg;
1749 struct crypto_alg *alg;
1751 t_alg = kzalloc(sizeof(struct caam_hash_alg), GFP_KERNEL);
1753 pr_err("failed to allocate t_alg\n");
1754 return ERR_PTR(-ENOMEM);
1757 t_alg->ahash_alg = template->template_ahash;
1758 halg = &t_alg->ahash_alg;
1759 alg = &halg->halg.base;
1762 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
1763 template->hmac_name);
1764 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
1765 template->hmac_driver_name);
1767 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
1769 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
1770 template->driver_name);
1772 alg->cra_module = THIS_MODULE;
1773 alg->cra_init = caam_hash_cra_init;
1774 alg->cra_exit = caam_hash_cra_exit;
1775 alg->cra_ctxsize = sizeof(struct caam_hash_ctx);
1776 alg->cra_priority = CAAM_CRA_PRIORITY;
1777 alg->cra_blocksize = template->blocksize;
1778 alg->cra_alignmask = 0;
1779 alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_TYPE_AHASH;
1780 alg->cra_type = &crypto_ahash_type;
1782 t_alg->alg_type = template->alg_type;
1783 t_alg->alg_op = template->alg_op;
1788 static int __init caam_algapi_hash_init(void)
1792 INIT_LIST_HEAD(&hash_list);
1794 /* register crypto algorithms the device supports */
1795 for (i = 0; i < ARRAY_SIZE(driver_hash); i++) {
1796 /* TODO: check if h/w supports alg */
1797 struct caam_hash_alg *t_alg;
1799 /* register hmac version */
1800 t_alg = caam_hash_alloc(&driver_hash[i], true);
1801 if (IS_ERR(t_alg)) {
1802 err = PTR_ERR(t_alg);
1803 pr_warn("%s alg allocation failed\n",
1804 driver_hash[i].driver_name);
1808 err = crypto_register_ahash(&t_alg->ahash_alg);
1810 pr_warn("%s alg registration failed\n",
1811 t_alg->ahash_alg.halg.base.cra_driver_name);
1814 list_add_tail(&t_alg->entry, &hash_list);
1816 /* register unkeyed version */
1817 t_alg = caam_hash_alloc(&driver_hash[i], false);
1818 if (IS_ERR(t_alg)) {
1819 err = PTR_ERR(t_alg);
1820 pr_warn("%s alg allocation failed\n",
1821 driver_hash[i].driver_name);
1825 err = crypto_register_ahash(&t_alg->ahash_alg);
1827 pr_warn("%s alg registration failed\n",
1828 t_alg->ahash_alg.halg.base.cra_driver_name);
1831 list_add_tail(&t_alg->entry, &hash_list);
1837 module_init(caam_algapi_hash_init);
1838 module_exit(caam_algapi_hash_exit);
1840 MODULE_LICENSE("GPL");
1841 MODULE_DESCRIPTION("FSL CAAM support for ahash functions of crypto API");
1842 MODULE_AUTHOR("Freescale Semiconductor - NMG");