2 * caam - Freescale FSL CAAM support for ahash functions of crypto API
4 * Copyright 2011 Freescale Semiconductor, Inc.
6 * Based on caamalg.c crypto API driver.
8 * relationship of digest job descriptor or first job descriptor after init to
11 * --------------- ---------------
12 * | JobDesc #1 |-------------------->| ShareDesc |
13 * | *(packet 1) | | (hashKey) |
14 * --------------- | (operation) |
17 * relationship of subsequent job descriptors to shared descriptors:
19 * --------------- ---------------
20 * | JobDesc #2 |-------------------->| ShareDesc |
21 * | *(packet 2) | |------------->| (hashKey) |
22 * --------------- | |-------->| (operation) |
23 * . | | | (load ctx2) |
24 * . | | ---------------
26 * | JobDesc #3 |------| |
32 * | JobDesc #4 |------------
36 * The SharedDesc never changes for a connection unless rekeyed, but
37 * each packet will likely be in a different place. So all we need
38 * to know to process the packet is where the input is, where the
39 * output goes, and what context we want to process with. Context is
40 * in the SharedDesc, packet references in the JobDesc.
42 * So, a job desc looks like:
44 * ---------------------
46 * | ShareDesc Pointer |
53 * ---------------------
60 #include "desc_constr.h"
63 #include "sg_sw_sec4.h"
66 #define CAAM_CRA_PRIORITY 3000
68 /* max hash key is max split key size */
69 #define CAAM_MAX_HASH_KEY_SIZE (SHA512_DIGEST_SIZE * 2)
71 #define CAAM_MAX_HASH_BLOCK_SIZE SHA512_BLOCK_SIZE
72 #define CAAM_MAX_HASH_DIGEST_SIZE SHA512_DIGEST_SIZE
74 /* length of descriptors text */
75 #define DESC_AHASH_BASE (4 * CAAM_CMD_SZ)
76 #define DESC_AHASH_UPDATE_LEN (6 * CAAM_CMD_SZ)
77 #define DESC_AHASH_UPDATE_FIRST_LEN (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ)
78 #define DESC_AHASH_FINAL_LEN (DESC_AHASH_BASE + 5 * CAAM_CMD_SZ)
79 #define DESC_AHASH_FINUP_LEN (DESC_AHASH_BASE + 5 * CAAM_CMD_SZ)
80 #define DESC_AHASH_DIGEST_LEN (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ)
82 #define DESC_HASH_MAX_USED_BYTES (DESC_AHASH_FINAL_LEN + \
83 CAAM_MAX_HASH_KEY_SIZE)
84 #define DESC_HASH_MAX_USED_LEN (DESC_HASH_MAX_USED_BYTES / CAAM_CMD_SZ)
86 /* caam context sizes for hashes: running digest + 8 */
87 #define HASH_MSG_LEN 8
88 #define MAX_CTX_LEN (HASH_MSG_LEN + SHA512_DIGEST_SIZE)
91 /* for print_hex_dumps with line references */
92 #define debug(format, arg...) printk(format, arg)
94 #define debug(format, arg...)
98 static struct list_head hash_list;
100 /* ahash per-session context */
101 struct caam_hash_ctx {
102 struct device *jrdev;
103 u32 sh_desc_update[DESC_HASH_MAX_USED_LEN];
104 u32 sh_desc_update_first[DESC_HASH_MAX_USED_LEN];
105 u32 sh_desc_fin[DESC_HASH_MAX_USED_LEN];
106 u32 sh_desc_digest[DESC_HASH_MAX_USED_LEN];
107 u32 sh_desc_finup[DESC_HASH_MAX_USED_LEN];
108 dma_addr_t sh_desc_update_dma;
109 dma_addr_t sh_desc_update_first_dma;
110 dma_addr_t sh_desc_fin_dma;
111 dma_addr_t sh_desc_digest_dma;
112 dma_addr_t sh_desc_finup_dma;
115 u8 key[CAAM_MAX_HASH_KEY_SIZE];
118 unsigned int split_key_len;
119 unsigned int split_key_pad_len;
123 struct caam_hash_state {
126 u8 buf_0[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
128 u8 buf_1[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
130 u8 caam_ctx[MAX_CTX_LEN];
131 int (*update)(struct ahash_request *req);
132 int (*final)(struct ahash_request *req);
133 int (*finup)(struct ahash_request *req);
137 /* Common job descriptor seq in/out ptr routines */
139 /* Map state->caam_ctx, and append seq_out_ptr command that points to it */
140 static inline int map_seq_out_ptr_ctx(u32 *desc, struct device *jrdev,
141 struct caam_hash_state *state,
144 state->ctx_dma = dma_map_single(jrdev, state->caam_ctx,
145 ctx_len, DMA_FROM_DEVICE);
146 if (dma_mapping_error(jrdev, state->ctx_dma)) {
147 dev_err(jrdev, "unable to map ctx\n");
151 append_seq_out_ptr(desc, state->ctx_dma, ctx_len, 0);
156 /* Map req->result, and append seq_out_ptr command that points to it */
157 static inline dma_addr_t map_seq_out_ptr_result(u32 *desc, struct device *jrdev,
158 u8 *result, int digestsize)
162 dst_dma = dma_map_single(jrdev, result, digestsize, DMA_FROM_DEVICE);
163 append_seq_out_ptr(desc, dst_dma, digestsize, 0);
168 /* Map current buffer in state and put it in link table */
169 static inline dma_addr_t buf_map_to_sec4_sg(struct device *jrdev,
170 struct sec4_sg_entry *sec4_sg,
175 buf_dma = dma_map_single(jrdev, buf, buflen, DMA_TO_DEVICE);
176 dma_to_sec4_sg_one(sec4_sg, buf_dma, buflen, 0);
181 /* Map req->src and put it in link table */
182 static inline void src_map_to_sec4_sg(struct device *jrdev,
183 struct scatterlist *src, int src_nents,
184 struct sec4_sg_entry *sec4_sg,
187 dma_map_sg_chained(jrdev, src, src_nents, DMA_TO_DEVICE, chained);
188 sg_to_sec4_sg_last(src, src_nents, sec4_sg, 0);
192 * Only put buffer in link table if it contains data, which is possible,
193 * since a buffer has previously been used, and needs to be unmapped,
195 static inline dma_addr_t
196 try_buf_map_to_sec4_sg(struct device *jrdev, struct sec4_sg_entry *sec4_sg,
197 u8 *buf, dma_addr_t buf_dma, int buflen,
200 if (buf_dma && !dma_mapping_error(jrdev, buf_dma))
201 dma_unmap_single(jrdev, buf_dma, last_buflen, DMA_TO_DEVICE);
203 buf_dma = buf_map_to_sec4_sg(jrdev, sec4_sg, buf, buflen);
210 /* Map state->caam_ctx, and add it to link table */
211 static inline int ctx_map_to_sec4_sg(u32 *desc, struct device *jrdev,
212 struct caam_hash_state *state, int ctx_len,
213 struct sec4_sg_entry *sec4_sg, u32 flag)
215 state->ctx_dma = dma_map_single(jrdev, state->caam_ctx, ctx_len, flag);
216 if (dma_mapping_error(jrdev, state->ctx_dma)) {
217 dev_err(jrdev, "unable to map ctx\n");
221 dma_to_sec4_sg_one(sec4_sg, state->ctx_dma, ctx_len, 0);
226 /* Common shared descriptor commands */
227 static inline void append_key_ahash(u32 *desc, struct caam_hash_ctx *ctx)
229 append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
230 ctx->split_key_len, CLASS_2 |
231 KEY_DEST_MDHA_SPLIT | KEY_ENC);
234 /* Append key if it has been set */
235 static inline void init_sh_desc_key_ahash(u32 *desc, struct caam_hash_ctx *ctx)
239 init_sh_desc(desc, HDR_SHARE_SERIAL);
241 if (ctx->split_key_len) {
242 /* Skip if already shared */
243 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
246 append_key_ahash(desc, ctx);
248 set_jump_tgt_here(desc, key_jump_cmd);
251 /* Propagate errors from shared to job descriptor */
252 append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD);
256 * For ahash read data from seqin following state->caam_ctx,
257 * and write resulting class2 context to seqout, which may be state->caam_ctx
260 static inline void ahash_append_load_str(u32 *desc, int digestsize)
262 /* Calculate remaining bytes to read */
263 append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
265 /* Read remaining bytes */
266 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_LAST2 |
267 FIFOLD_TYPE_MSG | KEY_VLF);
269 /* Store class2 context bytes */
270 append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
271 LDST_SRCDST_BYTE_CONTEXT);
275 * For ahash update, final and finup, import context, read and write to seqout
277 static inline void ahash_ctx_data_to_out(u32 *desc, u32 op, u32 state,
279 struct caam_hash_ctx *ctx)
281 init_sh_desc_key_ahash(desc, ctx);
283 /* Import context from software */
284 append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
285 LDST_CLASS_2_CCB | ctx->ctx_len);
287 /* Class 2 operation */
288 append_operation(desc, op | state | OP_ALG_ENCRYPT);
291 * Load from buf and/or src and write to req->result or state->context
293 ahash_append_load_str(desc, digestsize);
296 /* For ahash firsts and digest, read and write to seqout */
297 static inline void ahash_data_to_out(u32 *desc, u32 op, u32 state,
298 int digestsize, struct caam_hash_ctx *ctx)
300 init_sh_desc_key_ahash(desc, ctx);
302 /* Class 2 operation */
303 append_operation(desc, op | state | OP_ALG_ENCRYPT);
306 * Load from buf and/or src and write to req->result or state->context
308 ahash_append_load_str(desc, digestsize);
311 static int ahash_set_sh_desc(struct crypto_ahash *ahash)
313 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
314 int digestsize = crypto_ahash_digestsize(ahash);
315 struct device *jrdev = ctx->jrdev;
319 if (ctx->split_key_len)
320 have_key = OP_ALG_AAI_HMAC_PRECOMP;
322 /* ahash_update shared descriptor */
323 desc = ctx->sh_desc_update;
325 init_sh_desc(desc, HDR_SHARE_SERIAL);
327 /* Import context from software */
328 append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
329 LDST_CLASS_2_CCB | ctx->ctx_len);
331 /* Class 2 operation */
332 append_operation(desc, ctx->alg_type | OP_ALG_AS_UPDATE |
335 /* Load data and write to result or context */
336 ahash_append_load_str(desc, ctx->ctx_len);
338 ctx->sh_desc_update_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
340 if (dma_mapping_error(jrdev, ctx->sh_desc_update_dma)) {
341 dev_err(jrdev, "unable to map shared descriptor\n");
345 print_hex_dump(KERN_ERR,
346 "ahash update shdesc@"__stringify(__LINE__)": ",
347 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
350 /* ahash_update_first shared descriptor */
351 desc = ctx->sh_desc_update_first;
353 ahash_data_to_out(desc, have_key | ctx->alg_type, OP_ALG_AS_INIT,
356 ctx->sh_desc_update_first_dma = dma_map_single(jrdev, desc,
359 if (dma_mapping_error(jrdev, ctx->sh_desc_update_first_dma)) {
360 dev_err(jrdev, "unable to map shared descriptor\n");
364 print_hex_dump(KERN_ERR,
365 "ahash update first shdesc@"__stringify(__LINE__)": ",
366 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
369 /* ahash_final shared descriptor */
370 desc = ctx->sh_desc_fin;
372 ahash_ctx_data_to_out(desc, have_key | ctx->alg_type,
373 OP_ALG_AS_FINALIZE, digestsize, ctx);
375 ctx->sh_desc_fin_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
377 if (dma_mapping_error(jrdev, ctx->sh_desc_fin_dma)) {
378 dev_err(jrdev, "unable to map shared descriptor\n");
382 print_hex_dump(KERN_ERR, "ahash final shdesc@"__stringify(__LINE__)": ",
383 DUMP_PREFIX_ADDRESS, 16, 4, desc,
384 desc_bytes(desc), 1);
387 /* ahash_finup shared descriptor */
388 desc = ctx->sh_desc_finup;
390 ahash_ctx_data_to_out(desc, have_key | ctx->alg_type,
391 OP_ALG_AS_FINALIZE, digestsize, ctx);
393 ctx->sh_desc_finup_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
395 if (dma_mapping_error(jrdev, ctx->sh_desc_finup_dma)) {
396 dev_err(jrdev, "unable to map shared descriptor\n");
400 print_hex_dump(KERN_ERR, "ahash finup shdesc@"__stringify(__LINE__)": ",
401 DUMP_PREFIX_ADDRESS, 16, 4, desc,
402 desc_bytes(desc), 1);
405 /* ahash_digest shared descriptor */
406 desc = ctx->sh_desc_digest;
408 ahash_data_to_out(desc, have_key | ctx->alg_type, OP_ALG_AS_INITFINAL,
411 ctx->sh_desc_digest_dma = dma_map_single(jrdev, desc,
414 if (dma_mapping_error(jrdev, ctx->sh_desc_digest_dma)) {
415 dev_err(jrdev, "unable to map shared descriptor\n");
419 print_hex_dump(KERN_ERR,
420 "ahash digest shdesc@"__stringify(__LINE__)": ",
421 DUMP_PREFIX_ADDRESS, 16, 4, desc,
422 desc_bytes(desc), 1);
428 static int gen_split_hash_key(struct caam_hash_ctx *ctx, const u8 *key_in,
431 return gen_split_key(ctx->jrdev, ctx->key, ctx->split_key_len,
432 ctx->split_key_pad_len, key_in, keylen,
436 /* Digest hash size if it is too large */
437 static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in,
438 u32 *keylen, u8 *key_out, u32 digestsize)
440 struct device *jrdev = ctx->jrdev;
442 struct split_key_result result;
443 dma_addr_t src_dma, dst_dma;
446 desc = kmalloc(CAAM_CMD_SZ * 8 + CAAM_PTR_SZ * 2, GFP_KERNEL | GFP_DMA);
448 dev_err(jrdev, "unable to allocate key input memory\n");
452 init_job_desc(desc, 0);
454 src_dma = dma_map_single(jrdev, (void *)key_in, *keylen,
456 if (dma_mapping_error(jrdev, src_dma)) {
457 dev_err(jrdev, "unable to map key input memory\n");
461 dst_dma = dma_map_single(jrdev, (void *)key_out, digestsize,
463 if (dma_mapping_error(jrdev, dst_dma)) {
464 dev_err(jrdev, "unable to map key output memory\n");
465 dma_unmap_single(jrdev, src_dma, *keylen, DMA_TO_DEVICE);
470 /* Job descriptor to perform unkeyed hash on key_in */
471 append_operation(desc, ctx->alg_type | OP_ALG_ENCRYPT |
472 OP_ALG_AS_INITFINAL);
473 append_seq_in_ptr(desc, src_dma, *keylen, 0);
474 append_seq_fifo_load(desc, *keylen, FIFOLD_CLASS_CLASS2 |
475 FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_MSG);
476 append_seq_out_ptr(desc, dst_dma, digestsize, 0);
477 append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
478 LDST_SRCDST_BYTE_CONTEXT);
481 print_hex_dump(KERN_ERR, "key_in@"__stringify(__LINE__)": ",
482 DUMP_PREFIX_ADDRESS, 16, 4, key_in, *keylen, 1);
483 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
484 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
488 init_completion(&result.completion);
490 ret = caam_jr_enqueue(jrdev, desc, split_key_done, &result);
493 wait_for_completion_interruptible(&result.completion);
496 print_hex_dump(KERN_ERR,
497 "digested key@"__stringify(__LINE__)": ",
498 DUMP_PREFIX_ADDRESS, 16, 4, key_in,
502 dma_unmap_single(jrdev, src_dma, *keylen, DMA_TO_DEVICE);
503 dma_unmap_single(jrdev, dst_dma, digestsize, DMA_FROM_DEVICE);
505 *keylen = digestsize;
512 static int ahash_setkey(struct crypto_ahash *ahash,
513 const u8 *key, unsigned int keylen)
515 /* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */
516 static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 };
517 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
518 struct device *jrdev = ctx->jrdev;
519 int blocksize = crypto_tfm_alg_blocksize(&ahash->base);
520 int digestsize = crypto_ahash_digestsize(ahash);
522 u8 *hashed_key = NULL;
525 printk(KERN_ERR "keylen %d\n", keylen);
528 if (keylen > blocksize) {
529 hashed_key = kmalloc(sizeof(u8) * digestsize, GFP_KERNEL |
533 ret = hash_digest_key(ctx, key, &keylen, hashed_key,
540 /* Pick class 2 key length from algorithm submask */
541 ctx->split_key_len = mdpadlen[(ctx->alg_op & OP_ALG_ALGSEL_SUBMASK) >>
542 OP_ALG_ALGSEL_SHIFT] * 2;
543 ctx->split_key_pad_len = ALIGN(ctx->split_key_len, 16);
546 printk(KERN_ERR "split_key_len %d split_key_pad_len %d\n",
547 ctx->split_key_len, ctx->split_key_pad_len);
548 print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
549 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
552 ret = gen_split_hash_key(ctx, key, keylen);
556 ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->split_key_pad_len,
558 if (dma_mapping_error(jrdev, ctx->key_dma)) {
559 dev_err(jrdev, "unable to map key i/o memory\n");
564 print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
565 DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
566 ctx->split_key_pad_len, 1);
569 ret = ahash_set_sh_desc(ahash);
571 dma_unmap_single(jrdev, ctx->key_dma, ctx->split_key_pad_len,
580 crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN);
585 * ahash_edesc - s/w-extended ahash descriptor
586 * @dst_dma: physical mapped address of req->result
587 * @sec4_sg_dma: physical mapped address of h/w link table
588 * @chained: if source is chained
589 * @src_nents: number of segments in input scatterlist
590 * @sec4_sg_bytes: length of dma mapped sec4_sg space
591 * @sec4_sg: pointer to h/w link table
592 * @hw_desc: the h/w job descriptor followed by any referenced link tables
596 dma_addr_t sec4_sg_dma;
600 struct sec4_sg_entry *sec4_sg;
604 static inline void ahash_unmap(struct device *dev,
605 struct ahash_edesc *edesc,
606 struct ahash_request *req, int dst_len)
608 if (edesc->src_nents)
609 dma_unmap_sg_chained(dev, req->src, edesc->src_nents,
610 DMA_TO_DEVICE, edesc->chained);
612 dma_unmap_single(dev, edesc->dst_dma, dst_len, DMA_FROM_DEVICE);
614 if (edesc->sec4_sg_bytes)
615 dma_unmap_single(dev, edesc->sec4_sg_dma,
616 edesc->sec4_sg_bytes, DMA_TO_DEVICE);
619 static inline void ahash_unmap_ctx(struct device *dev,
620 struct ahash_edesc *edesc,
621 struct ahash_request *req, int dst_len, u32 flag)
623 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
624 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
625 struct caam_hash_state *state = ahash_request_ctx(req);
628 dma_unmap_single(dev, state->ctx_dma, ctx->ctx_len, flag);
629 ahash_unmap(dev, edesc, req, dst_len);
632 static void ahash_done(struct device *jrdev, u32 *desc, u32 err,
635 struct ahash_request *req = context;
636 struct ahash_edesc *edesc;
637 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
638 int digestsize = crypto_ahash_digestsize(ahash);
640 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
641 struct caam_hash_state *state = ahash_request_ctx(req);
643 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
646 edesc = (struct ahash_edesc *)((char *)desc -
647 offsetof(struct ahash_edesc, hw_desc));
649 caam_jr_strstatus(jrdev, err);
651 ahash_unmap(jrdev, edesc, req, digestsize);
655 print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
656 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
659 print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ",
660 DUMP_PREFIX_ADDRESS, 16, 4, req->result,
664 req->base.complete(&req->base, err);
667 static void ahash_done_bi(struct device *jrdev, u32 *desc, u32 err,
670 struct ahash_request *req = context;
671 struct ahash_edesc *edesc;
672 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
673 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
675 struct caam_hash_state *state = ahash_request_ctx(req);
676 int digestsize = crypto_ahash_digestsize(ahash);
678 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
681 edesc = (struct ahash_edesc *)((char *)desc -
682 offsetof(struct ahash_edesc, hw_desc));
684 caam_jr_strstatus(jrdev, err);
686 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL);
690 print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
691 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
694 print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ",
695 DUMP_PREFIX_ADDRESS, 16, 4, req->result,
699 req->base.complete(&req->base, err);
702 static void ahash_done_ctx_src(struct device *jrdev, u32 *desc, u32 err,
705 struct ahash_request *req = context;
706 struct ahash_edesc *edesc;
707 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
708 int digestsize = crypto_ahash_digestsize(ahash);
710 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
711 struct caam_hash_state *state = ahash_request_ctx(req);
713 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
716 edesc = (struct ahash_edesc *)((char *)desc -
717 offsetof(struct ahash_edesc, hw_desc));
719 caam_jr_strstatus(jrdev, err);
721 ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_TO_DEVICE);
725 print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
726 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
729 print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ",
730 DUMP_PREFIX_ADDRESS, 16, 4, req->result,
734 req->base.complete(&req->base, err);
737 static void ahash_done_ctx_dst(struct device *jrdev, u32 *desc, u32 err,
740 struct ahash_request *req = context;
741 struct ahash_edesc *edesc;
742 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
743 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
745 struct caam_hash_state *state = ahash_request_ctx(req);
746 int digestsize = crypto_ahash_digestsize(ahash);
748 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
751 edesc = (struct ahash_edesc *)((char *)desc -
752 offsetof(struct ahash_edesc, hw_desc));
754 caam_jr_strstatus(jrdev, err);
756 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_FROM_DEVICE);
760 print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
761 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
764 print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ",
765 DUMP_PREFIX_ADDRESS, 16, 4, req->result,
769 req->base.complete(&req->base, err);
772 /* submit update job descriptor */
773 static int ahash_update_ctx(struct ahash_request *req)
775 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
776 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
777 struct caam_hash_state *state = ahash_request_ctx(req);
778 struct device *jrdev = ctx->jrdev;
779 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
780 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
781 u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
782 int *buflen = state->current_buf ? &state->buflen_1 : &state->buflen_0;
783 u8 *next_buf = state->current_buf ? state->buf_0 : state->buf_1;
784 int *next_buflen = state->current_buf ? &state->buflen_0 :
785 &state->buflen_1, last_buflen;
786 int in_len = *buflen + req->nbytes, to_hash;
787 u32 *sh_desc = ctx->sh_desc_update, *desc;
788 dma_addr_t ptr = ctx->sh_desc_update_dma;
789 int src_nents, sec4_sg_bytes, sec4_sg_src_index;
790 struct ahash_edesc *edesc;
791 bool chained = false;
795 last_buflen = *next_buflen;
796 *next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1);
797 to_hash = in_len - *next_buflen;
800 src_nents = __sg_count(req->src, req->nbytes - (*next_buflen),
802 sec4_sg_src_index = 1 + (*buflen ? 1 : 0);
803 sec4_sg_bytes = (sec4_sg_src_index + src_nents) *
804 sizeof(struct sec4_sg_entry);
807 * allocate space for base edesc and hw desc commands,
810 edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
811 sec4_sg_bytes, GFP_DMA | flags);
814 "could not allocate extended descriptor\n");
818 edesc->src_nents = src_nents;
819 edesc->chained = chained;
820 edesc->sec4_sg_bytes = sec4_sg_bytes;
821 edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
824 ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len,
825 edesc->sec4_sg, DMA_BIDIRECTIONAL);
829 state->buf_dma = try_buf_map_to_sec4_sg(jrdev,
832 *buflen, last_buflen);
835 src_map_to_sec4_sg(jrdev, req->src, src_nents,
836 edesc->sec4_sg + sec4_sg_src_index,
839 scatterwalk_map_and_copy(next_buf, req->src,
842 state->current_buf = !state->current_buf;
845 (edesc->sec4_sg + sec4_sg_src_index - 1)->len |=
849 sh_len = desc_len(sh_desc);
850 desc = edesc->hw_desc;
851 init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER |
854 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
857 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
858 dev_err(jrdev, "unable to map S/G table\n");
862 append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len +
865 append_seq_out_ptr(desc, state->ctx_dma, ctx->ctx_len, 0);
868 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
869 DUMP_PREFIX_ADDRESS, 16, 4, desc,
870 desc_bytes(desc), 1);
873 ret = caam_jr_enqueue(jrdev, desc, ahash_done_bi, req);
877 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len,
881 } else if (*next_buflen) {
882 scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
884 *buflen = *next_buflen;
885 *next_buflen = last_buflen;
888 print_hex_dump(KERN_ERR, "buf@"__stringify(__LINE__)": ",
889 DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
890 print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ",
891 DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
898 static int ahash_final_ctx(struct ahash_request *req)
900 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
901 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
902 struct caam_hash_state *state = ahash_request_ctx(req);
903 struct device *jrdev = ctx->jrdev;
904 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
905 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
906 u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
907 int buflen = state->current_buf ? state->buflen_1 : state->buflen_0;
908 int last_buflen = state->current_buf ? state->buflen_0 :
910 u32 *sh_desc = ctx->sh_desc_fin, *desc;
911 dma_addr_t ptr = ctx->sh_desc_fin_dma;
913 int digestsize = crypto_ahash_digestsize(ahash);
914 struct ahash_edesc *edesc;
918 sec4_sg_bytes = (1 + (buflen ? 1 : 0)) * sizeof(struct sec4_sg_entry);
920 /* allocate space for base edesc and hw desc commands, link tables */
921 edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
922 sec4_sg_bytes, GFP_DMA | flags);
924 dev_err(jrdev, "could not allocate extended descriptor\n");
928 sh_len = desc_len(sh_desc);
929 desc = edesc->hw_desc;
930 init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE);
932 edesc->sec4_sg_bytes = sec4_sg_bytes;
933 edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
935 edesc->src_nents = 0;
937 ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len,
938 edesc->sec4_sg, DMA_TO_DEVICE);
942 state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1,
943 buf, state->buf_dma, buflen,
945 (edesc->sec4_sg + sec4_sg_bytes - 1)->len |= SEC4_SG_LEN_FIN;
947 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
948 sec4_sg_bytes, DMA_TO_DEVICE);
949 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
950 dev_err(jrdev, "unable to map S/G table\n");
954 append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len + buflen,
957 edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
959 if (dma_mapping_error(jrdev, edesc->dst_dma)) {
960 dev_err(jrdev, "unable to map dst\n");
965 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
966 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
969 ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_src, req);
973 ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
980 static int ahash_finup_ctx(struct ahash_request *req)
982 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
983 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
984 struct caam_hash_state *state = ahash_request_ctx(req);
985 struct device *jrdev = ctx->jrdev;
986 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
987 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
988 u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
989 int buflen = state->current_buf ? state->buflen_1 : state->buflen_0;
990 int last_buflen = state->current_buf ? state->buflen_0 :
992 u32 *sh_desc = ctx->sh_desc_finup, *desc;
993 dma_addr_t ptr = ctx->sh_desc_finup_dma;
994 int sec4_sg_bytes, sec4_sg_src_index;
996 int digestsize = crypto_ahash_digestsize(ahash);
997 struct ahash_edesc *edesc;
998 bool chained = false;
1002 src_nents = __sg_count(req->src, req->nbytes, &chained);
1003 sec4_sg_src_index = 1 + (buflen ? 1 : 0);
1004 sec4_sg_bytes = (sec4_sg_src_index + src_nents) *
1005 sizeof(struct sec4_sg_entry);
1007 /* allocate space for base edesc and hw desc commands, link tables */
1008 edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
1009 sec4_sg_bytes, GFP_DMA | flags);
1011 dev_err(jrdev, "could not allocate extended descriptor\n");
1015 sh_len = desc_len(sh_desc);
1016 desc = edesc->hw_desc;
1017 init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE);
1019 edesc->src_nents = src_nents;
1020 edesc->chained = chained;
1021 edesc->sec4_sg_bytes = sec4_sg_bytes;
1022 edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
1025 ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len,
1026 edesc->sec4_sg, DMA_TO_DEVICE);
1030 state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1,
1031 buf, state->buf_dma, buflen,
1034 src_map_to_sec4_sg(jrdev, req->src, src_nents, edesc->sec4_sg +
1035 sec4_sg_src_index, chained);
1037 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1038 sec4_sg_bytes, DMA_TO_DEVICE);
1039 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
1040 dev_err(jrdev, "unable to map S/G table\n");
1044 append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len +
1045 buflen + req->nbytes, LDST_SGF);
1047 edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
1049 if (dma_mapping_error(jrdev, edesc->dst_dma)) {
1050 dev_err(jrdev, "unable to map dst\n");
1055 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
1056 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
1059 ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_src, req);
1063 ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
1070 static int ahash_digest(struct ahash_request *req)
1072 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1073 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1074 struct device *jrdev = ctx->jrdev;
1075 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1076 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1077 u32 *sh_desc = ctx->sh_desc_digest, *desc;
1078 dma_addr_t ptr = ctx->sh_desc_digest_dma;
1079 int digestsize = crypto_ahash_digestsize(ahash);
1080 int src_nents, sec4_sg_bytes;
1082 struct ahash_edesc *edesc;
1083 bool chained = false;
1088 src_nents = sg_count(req->src, req->nbytes, &chained);
1089 dma_map_sg_chained(jrdev, req->src, src_nents ? : 1, DMA_TO_DEVICE,
1091 sec4_sg_bytes = src_nents * sizeof(struct sec4_sg_entry);
1093 /* allocate space for base edesc and hw desc commands, link tables */
1094 edesc = kmalloc(sizeof(struct ahash_edesc) + sec4_sg_bytes +
1095 DESC_JOB_IO_LEN, GFP_DMA | flags);
1097 dev_err(jrdev, "could not allocate extended descriptor\n");
1100 edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
1102 edesc->sec4_sg_bytes = sec4_sg_bytes;
1103 edesc->src_nents = src_nents;
1104 edesc->chained = chained;
1106 sh_len = desc_len(sh_desc);
1107 desc = edesc->hw_desc;
1108 init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE);
1111 sg_to_sec4_sg_last(req->src, src_nents, edesc->sec4_sg, 0);
1112 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1113 sec4_sg_bytes, DMA_TO_DEVICE);
1114 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
1115 dev_err(jrdev, "unable to map S/G table\n");
1118 src_dma = edesc->sec4_sg_dma;
1121 src_dma = sg_dma_address(req->src);
1124 append_seq_in_ptr(desc, src_dma, req->nbytes, options);
1126 edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
1128 if (dma_mapping_error(jrdev, edesc->dst_dma)) {
1129 dev_err(jrdev, "unable to map dst\n");
1134 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
1135 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
1138 ret = caam_jr_enqueue(jrdev, desc, ahash_done, req);
1142 ahash_unmap(jrdev, edesc, req, digestsize);
1149 /* submit ahash final if it the first job descriptor */
1150 static int ahash_final_no_ctx(struct ahash_request *req)
1152 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1153 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1154 struct caam_hash_state *state = ahash_request_ctx(req);
1155 struct device *jrdev = ctx->jrdev;
1156 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1157 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1158 u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
1159 int buflen = state->current_buf ? state->buflen_1 : state->buflen_0;
1160 u32 *sh_desc = ctx->sh_desc_digest, *desc;
1161 dma_addr_t ptr = ctx->sh_desc_digest_dma;
1162 int digestsize = crypto_ahash_digestsize(ahash);
1163 struct ahash_edesc *edesc;
1167 /* allocate space for base edesc and hw desc commands, link tables */
1168 edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN,
1171 dev_err(jrdev, "could not allocate extended descriptor\n");
1175 sh_len = desc_len(sh_desc);
1176 desc = edesc->hw_desc;
1177 init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE);
1179 state->buf_dma = dma_map_single(jrdev, buf, buflen, DMA_TO_DEVICE);
1180 if (dma_mapping_error(jrdev, state->buf_dma)) {
1181 dev_err(jrdev, "unable to map src\n");
1185 append_seq_in_ptr(desc, state->buf_dma, buflen, 0);
1187 edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
1189 if (dma_mapping_error(jrdev, edesc->dst_dma)) {
1190 dev_err(jrdev, "unable to map dst\n");
1193 edesc->src_nents = 0;
1196 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
1197 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
1200 ret = caam_jr_enqueue(jrdev, desc, ahash_done, req);
1204 ahash_unmap(jrdev, edesc, req, digestsize);
1211 /* submit ahash update if it the first job descriptor after update */
1212 static int ahash_update_no_ctx(struct ahash_request *req)
1214 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1215 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1216 struct caam_hash_state *state = ahash_request_ctx(req);
1217 struct device *jrdev = ctx->jrdev;
1218 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1219 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1220 u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
1221 int *buflen = state->current_buf ? &state->buflen_1 : &state->buflen_0;
1222 u8 *next_buf = state->current_buf ? state->buf_0 : state->buf_1;
1223 int *next_buflen = state->current_buf ? &state->buflen_0 :
1225 int in_len = *buflen + req->nbytes, to_hash;
1226 int sec4_sg_bytes, src_nents;
1227 struct ahash_edesc *edesc;
1228 u32 *desc, *sh_desc = ctx->sh_desc_update_first;
1229 dma_addr_t ptr = ctx->sh_desc_update_first_dma;
1230 bool chained = false;
1234 *next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1);
1235 to_hash = in_len - *next_buflen;
1238 src_nents = __sg_count(req->src, req->nbytes - (*next_buflen),
1240 sec4_sg_bytes = (1 + src_nents) *
1241 sizeof(struct sec4_sg_entry);
1244 * allocate space for base edesc and hw desc commands,
1247 edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
1248 sec4_sg_bytes, GFP_DMA | flags);
1251 "could not allocate extended descriptor\n");
1255 edesc->src_nents = src_nents;
1256 edesc->chained = chained;
1257 edesc->sec4_sg_bytes = sec4_sg_bytes;
1258 edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
1262 state->buf_dma = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg,
1264 src_map_to_sec4_sg(jrdev, req->src, src_nents,
1265 edesc->sec4_sg + 1, chained);
1267 scatterwalk_map_and_copy(next_buf, req->src,
1270 state->current_buf = !state->current_buf;
1273 sh_len = desc_len(sh_desc);
1274 desc = edesc->hw_desc;
1275 init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER |
1278 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1281 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
1282 dev_err(jrdev, "unable to map S/G table\n");
1286 append_seq_in_ptr(desc, edesc->sec4_sg_dma, to_hash, LDST_SGF);
1288 ret = map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len);
1293 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
1294 DUMP_PREFIX_ADDRESS, 16, 4, desc,
1295 desc_bytes(desc), 1);
1298 ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_dst, req);
1301 state->update = ahash_update_ctx;
1302 state->finup = ahash_finup_ctx;
1303 state->final = ahash_final_ctx;
1305 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len,
1309 } else if (*next_buflen) {
1310 scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
1312 *buflen = *next_buflen;
1316 print_hex_dump(KERN_ERR, "buf@"__stringify(__LINE__)": ",
1317 DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
1318 print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ",
1319 DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
1326 /* submit ahash finup if it the first job descriptor after update */
1327 static int ahash_finup_no_ctx(struct ahash_request *req)
1329 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1330 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1331 struct caam_hash_state *state = ahash_request_ctx(req);
1332 struct device *jrdev = ctx->jrdev;
1333 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1334 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1335 u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
1336 int buflen = state->current_buf ? state->buflen_1 : state->buflen_0;
1337 int last_buflen = state->current_buf ? state->buflen_0 :
1339 u32 *sh_desc = ctx->sh_desc_digest, *desc;
1340 dma_addr_t ptr = ctx->sh_desc_digest_dma;
1341 int sec4_sg_bytes, sec4_sg_src_index, src_nents;
1342 int digestsize = crypto_ahash_digestsize(ahash);
1343 struct ahash_edesc *edesc;
1344 bool chained = false;
1348 src_nents = __sg_count(req->src, req->nbytes, &chained);
1349 sec4_sg_src_index = 2;
1350 sec4_sg_bytes = (sec4_sg_src_index + src_nents) *
1351 sizeof(struct sec4_sg_entry);
1353 /* allocate space for base edesc and hw desc commands, link tables */
1354 edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
1355 sec4_sg_bytes, GFP_DMA | flags);
1357 dev_err(jrdev, "could not allocate extended descriptor\n");
1361 sh_len = desc_len(sh_desc);
1362 desc = edesc->hw_desc;
1363 init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE);
1365 edesc->src_nents = src_nents;
1366 edesc->chained = chained;
1367 edesc->sec4_sg_bytes = sec4_sg_bytes;
1368 edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
1371 state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, buf,
1372 state->buf_dma, buflen,
1375 src_map_to_sec4_sg(jrdev, req->src, src_nents, edesc->sec4_sg + 1,
1378 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1379 sec4_sg_bytes, DMA_TO_DEVICE);
1380 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
1381 dev_err(jrdev, "unable to map S/G table\n");
1385 append_seq_in_ptr(desc, edesc->sec4_sg_dma, buflen +
1386 req->nbytes, LDST_SGF);
1388 edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
1390 if (dma_mapping_error(jrdev, edesc->dst_dma)) {
1391 dev_err(jrdev, "unable to map dst\n");
1396 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
1397 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
1400 ret = caam_jr_enqueue(jrdev, desc, ahash_done, req);
1404 ahash_unmap(jrdev, edesc, req, digestsize);
1411 /* submit first update job descriptor after init */
1412 static int ahash_update_first(struct ahash_request *req)
1414 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1415 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1416 struct caam_hash_state *state = ahash_request_ctx(req);
1417 struct device *jrdev = ctx->jrdev;
1418 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1419 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1420 u8 *next_buf = state->current_buf ? state->buf_1 : state->buf_0;
1421 int *next_buflen = state->current_buf ?
1422 &state->buflen_1 : &state->buflen_0;
1424 u32 *sh_desc = ctx->sh_desc_update_first, *desc;
1425 dma_addr_t ptr = ctx->sh_desc_update_first_dma;
1426 int sec4_sg_bytes, src_nents;
1429 struct ahash_edesc *edesc;
1430 bool chained = false;
1434 *next_buflen = req->nbytes & (crypto_tfm_alg_blocksize(&ahash->base) -
1436 to_hash = req->nbytes - *next_buflen;
1439 src_nents = sg_count(req->src, req->nbytes - (*next_buflen),
1441 dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
1442 DMA_TO_DEVICE, chained);
1443 sec4_sg_bytes = src_nents * sizeof(struct sec4_sg_entry);
1446 * allocate space for base edesc and hw desc commands,
1449 edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
1450 sec4_sg_bytes, GFP_DMA | flags);
1453 "could not allocate extended descriptor\n");
1457 edesc->src_nents = src_nents;
1458 edesc->chained = chained;
1459 edesc->sec4_sg_bytes = sec4_sg_bytes;
1460 edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
1465 sg_to_sec4_sg_last(req->src, src_nents,
1467 edesc->sec4_sg_dma = dma_map_single(jrdev,
1471 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
1472 dev_err(jrdev, "unable to map S/G table\n");
1475 src_dma = edesc->sec4_sg_dma;
1478 src_dma = sg_dma_address(req->src);
1483 scatterwalk_map_and_copy(next_buf, req->src, to_hash,
1486 sh_len = desc_len(sh_desc);
1487 desc = edesc->hw_desc;
1488 init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER |
1491 append_seq_in_ptr(desc, src_dma, to_hash, options);
1493 ret = map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len);
1498 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
1499 DUMP_PREFIX_ADDRESS, 16, 4, desc,
1500 desc_bytes(desc), 1);
1503 ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_dst,
1507 state->update = ahash_update_ctx;
1508 state->finup = ahash_finup_ctx;
1509 state->final = ahash_final_ctx;
1511 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len,
1515 } else if (*next_buflen) {
1516 state->update = ahash_update_no_ctx;
1517 state->finup = ahash_finup_no_ctx;
1518 state->final = ahash_final_no_ctx;
1519 scatterwalk_map_and_copy(next_buf, req->src, 0,
1523 print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ",
1524 DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
1531 static int ahash_finup_first(struct ahash_request *req)
1533 return ahash_digest(req);
1536 static int ahash_init(struct ahash_request *req)
1538 struct caam_hash_state *state = ahash_request_ctx(req);
1540 state->update = ahash_update_first;
1541 state->finup = ahash_finup_first;
1542 state->final = ahash_final_no_ctx;
1544 state->current_buf = 0;
1550 static int ahash_update(struct ahash_request *req)
1552 struct caam_hash_state *state = ahash_request_ctx(req);
1554 return state->update(req);
1557 static int ahash_finup(struct ahash_request *req)
1559 struct caam_hash_state *state = ahash_request_ctx(req);
1561 return state->finup(req);
1564 static int ahash_final(struct ahash_request *req)
1566 struct caam_hash_state *state = ahash_request_ctx(req);
1568 return state->final(req);
1571 static int ahash_export(struct ahash_request *req, void *out)
1573 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1574 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1575 struct caam_hash_state *state = ahash_request_ctx(req);
1577 memcpy(out, ctx, sizeof(struct caam_hash_ctx));
1578 memcpy(out + sizeof(struct caam_hash_ctx), state,
1579 sizeof(struct caam_hash_state));
1583 static int ahash_import(struct ahash_request *req, const void *in)
1585 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1586 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1587 struct caam_hash_state *state = ahash_request_ctx(req);
1589 memcpy(ctx, in, sizeof(struct caam_hash_ctx));
1590 memcpy(state, in + sizeof(struct caam_hash_ctx),
1591 sizeof(struct caam_hash_state));
1595 struct caam_hash_template {
1596 char name[CRYPTO_MAX_ALG_NAME];
1597 char driver_name[CRYPTO_MAX_ALG_NAME];
1598 char hmac_name[CRYPTO_MAX_ALG_NAME];
1599 char hmac_driver_name[CRYPTO_MAX_ALG_NAME];
1600 unsigned int blocksize;
1601 struct ahash_alg template_ahash;
1606 /* ahash descriptors */
1607 static struct caam_hash_template driver_hash[] = {
1610 .driver_name = "sha1-caam",
1611 .hmac_name = "hmac(sha1)",
1612 .hmac_driver_name = "hmac-sha1-caam",
1613 .blocksize = SHA1_BLOCK_SIZE,
1616 .update = ahash_update,
1617 .final = ahash_final,
1618 .finup = ahash_finup,
1619 .digest = ahash_digest,
1620 .export = ahash_export,
1621 .import = ahash_import,
1622 .setkey = ahash_setkey,
1624 .digestsize = SHA1_DIGEST_SIZE,
1627 .alg_type = OP_ALG_ALGSEL_SHA1,
1628 .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
1631 .driver_name = "sha224-caam",
1632 .hmac_name = "hmac(sha224)",
1633 .hmac_driver_name = "hmac-sha224-caam",
1634 .blocksize = SHA224_BLOCK_SIZE,
1637 .update = ahash_update,
1638 .final = ahash_final,
1639 .finup = ahash_finup,
1640 .digest = ahash_digest,
1641 .export = ahash_export,
1642 .import = ahash_import,
1643 .setkey = ahash_setkey,
1645 .digestsize = SHA224_DIGEST_SIZE,
1648 .alg_type = OP_ALG_ALGSEL_SHA224,
1649 .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
1652 .driver_name = "sha256-caam",
1653 .hmac_name = "hmac(sha256)",
1654 .hmac_driver_name = "hmac-sha256-caam",
1655 .blocksize = SHA256_BLOCK_SIZE,
1658 .update = ahash_update,
1659 .final = ahash_final,
1660 .finup = ahash_finup,
1661 .digest = ahash_digest,
1662 .export = ahash_export,
1663 .import = ahash_import,
1664 .setkey = ahash_setkey,
1666 .digestsize = SHA256_DIGEST_SIZE,
1669 .alg_type = OP_ALG_ALGSEL_SHA256,
1670 .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
1673 .driver_name = "sha384-caam",
1674 .hmac_name = "hmac(sha384)",
1675 .hmac_driver_name = "hmac-sha384-caam",
1676 .blocksize = SHA384_BLOCK_SIZE,
1679 .update = ahash_update,
1680 .final = ahash_final,
1681 .finup = ahash_finup,
1682 .digest = ahash_digest,
1683 .export = ahash_export,
1684 .import = ahash_import,
1685 .setkey = ahash_setkey,
1687 .digestsize = SHA384_DIGEST_SIZE,
1690 .alg_type = OP_ALG_ALGSEL_SHA384,
1691 .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
1694 .driver_name = "sha512-caam",
1695 .hmac_name = "hmac(sha512)",
1696 .hmac_driver_name = "hmac-sha512-caam",
1697 .blocksize = SHA512_BLOCK_SIZE,
1700 .update = ahash_update,
1701 .final = ahash_final,
1702 .finup = ahash_finup,
1703 .digest = ahash_digest,
1704 .export = ahash_export,
1705 .import = ahash_import,
1706 .setkey = ahash_setkey,
1708 .digestsize = SHA512_DIGEST_SIZE,
1711 .alg_type = OP_ALG_ALGSEL_SHA512,
1712 .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
1715 .driver_name = "md5-caam",
1716 .hmac_name = "hmac(md5)",
1717 .hmac_driver_name = "hmac-md5-caam",
1718 .blocksize = MD5_BLOCK_WORDS * 4,
1721 .update = ahash_update,
1722 .final = ahash_final,
1723 .finup = ahash_finup,
1724 .digest = ahash_digest,
1725 .export = ahash_export,
1726 .import = ahash_import,
1727 .setkey = ahash_setkey,
1729 .digestsize = MD5_DIGEST_SIZE,
1732 .alg_type = OP_ALG_ALGSEL_MD5,
1733 .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
1737 struct caam_hash_alg {
1738 struct list_head entry;
1741 struct ahash_alg ahash_alg;
1744 static int caam_hash_cra_init(struct crypto_tfm *tfm)
1746 struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
1747 struct crypto_alg *base = tfm->__crt_alg;
1748 struct hash_alg_common *halg =
1749 container_of(base, struct hash_alg_common, base);
1750 struct ahash_alg *alg =
1751 container_of(halg, struct ahash_alg, halg);
1752 struct caam_hash_alg *caam_hash =
1753 container_of(alg, struct caam_hash_alg, ahash_alg);
1754 struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
1755 /* Sizes for MDHA running digests: MD5, SHA1, 224, 256, 384, 512 */
1756 static const u8 runninglen[] = { HASH_MSG_LEN + MD5_DIGEST_SIZE,
1757 HASH_MSG_LEN + SHA1_DIGEST_SIZE,
1759 HASH_MSG_LEN + SHA256_DIGEST_SIZE,
1761 HASH_MSG_LEN + SHA512_DIGEST_SIZE };
1765 * Get a Job ring from Job Ring driver to ensure in-order
1766 * crypto request processing per tfm
1768 ctx->jrdev = caam_jr_alloc();
1769 if (IS_ERR(ctx->jrdev)) {
1770 pr_err("Job Ring Device allocation for transform failed\n");
1771 return PTR_ERR(ctx->jrdev);
1773 /* copy descriptor header template value */
1774 ctx->alg_type = OP_TYPE_CLASS2_ALG | caam_hash->alg_type;
1775 ctx->alg_op = OP_TYPE_CLASS2_ALG | caam_hash->alg_op;
1777 ctx->ctx_len = runninglen[(ctx->alg_op & OP_ALG_ALGSEL_SUBMASK) >>
1778 OP_ALG_ALGSEL_SHIFT];
1780 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1781 sizeof(struct caam_hash_state));
1783 ret = ahash_set_sh_desc(ahash);
1788 static void caam_hash_cra_exit(struct crypto_tfm *tfm)
1790 struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
1792 if (ctx->sh_desc_update_dma &&
1793 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_update_dma))
1794 dma_unmap_single(ctx->jrdev, ctx->sh_desc_update_dma,
1795 desc_bytes(ctx->sh_desc_update),
1797 if (ctx->sh_desc_update_first_dma &&
1798 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_update_first_dma))
1799 dma_unmap_single(ctx->jrdev, ctx->sh_desc_update_first_dma,
1800 desc_bytes(ctx->sh_desc_update_first),
1802 if (ctx->sh_desc_fin_dma &&
1803 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_fin_dma))
1804 dma_unmap_single(ctx->jrdev, ctx->sh_desc_fin_dma,
1805 desc_bytes(ctx->sh_desc_fin), DMA_TO_DEVICE);
1806 if (ctx->sh_desc_digest_dma &&
1807 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_digest_dma))
1808 dma_unmap_single(ctx->jrdev, ctx->sh_desc_digest_dma,
1809 desc_bytes(ctx->sh_desc_digest),
1811 if (ctx->sh_desc_finup_dma &&
1812 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_finup_dma))
1813 dma_unmap_single(ctx->jrdev, ctx->sh_desc_finup_dma,
1814 desc_bytes(ctx->sh_desc_finup), DMA_TO_DEVICE);
1816 caam_jr_free(ctx->jrdev);
1819 static void __exit caam_algapi_hash_exit(void)
1821 struct caam_hash_alg *t_alg, *n;
1823 if (!hash_list.next)
1826 list_for_each_entry_safe(t_alg, n, &hash_list, entry) {
1827 crypto_unregister_ahash(&t_alg->ahash_alg);
1828 list_del(&t_alg->entry);
1833 static struct caam_hash_alg *
1834 caam_hash_alloc(struct caam_hash_template *template,
1837 struct caam_hash_alg *t_alg;
1838 struct ahash_alg *halg;
1839 struct crypto_alg *alg;
1841 t_alg = kzalloc(sizeof(struct caam_hash_alg), GFP_KERNEL);
1843 pr_err("failed to allocate t_alg\n");
1844 return ERR_PTR(-ENOMEM);
1847 t_alg->ahash_alg = template->template_ahash;
1848 halg = &t_alg->ahash_alg;
1849 alg = &halg->halg.base;
1852 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
1853 template->hmac_name);
1854 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
1855 template->hmac_driver_name);
1857 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
1859 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
1860 template->driver_name);
1862 alg->cra_module = THIS_MODULE;
1863 alg->cra_init = caam_hash_cra_init;
1864 alg->cra_exit = caam_hash_cra_exit;
1865 alg->cra_ctxsize = sizeof(struct caam_hash_ctx);
1866 alg->cra_priority = CAAM_CRA_PRIORITY;
1867 alg->cra_blocksize = template->blocksize;
1868 alg->cra_alignmask = 0;
1869 alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_TYPE_AHASH;
1870 alg->cra_type = &crypto_ahash_type;
1872 t_alg->alg_type = template->alg_type;
1873 t_alg->alg_op = template->alg_op;
1878 static int __init caam_algapi_hash_init(void)
1880 struct device_node *dev_node;
1881 struct platform_device *pdev;
1882 struct device *ctrldev;
1886 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
1888 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
1893 pdev = of_find_device_by_node(dev_node);
1895 of_node_put(dev_node);
1899 ctrldev = &pdev->dev;
1900 priv = dev_get_drvdata(ctrldev);
1901 of_node_put(dev_node);
1904 * If priv is NULL, it's probably because the caam driver wasn't
1905 * properly initialized (e.g. RNG4 init failed). Thus, bail out here.
1910 INIT_LIST_HEAD(&hash_list);
1912 /* register crypto algorithms the device supports */
1913 for (i = 0; i < ARRAY_SIZE(driver_hash); i++) {
1914 /* TODO: check if h/w supports alg */
1915 struct caam_hash_alg *t_alg;
1917 /* register hmac version */
1918 t_alg = caam_hash_alloc(&driver_hash[i], true);
1919 if (IS_ERR(t_alg)) {
1920 err = PTR_ERR(t_alg);
1921 pr_warn("%s alg allocation failed\n",
1922 driver_hash[i].driver_name);
1926 err = crypto_register_ahash(&t_alg->ahash_alg);
1928 pr_warn("%s alg registration failed\n",
1929 t_alg->ahash_alg.halg.base.cra_driver_name);
1932 list_add_tail(&t_alg->entry, &hash_list);
1934 /* register unkeyed version */
1935 t_alg = caam_hash_alloc(&driver_hash[i], false);
1936 if (IS_ERR(t_alg)) {
1937 err = PTR_ERR(t_alg);
1938 pr_warn("%s alg allocation failed\n",
1939 driver_hash[i].driver_name);
1943 err = crypto_register_ahash(&t_alg->ahash_alg);
1945 pr_warn("%s alg registration failed\n",
1946 t_alg->ahash_alg.halg.base.cra_driver_name);
1949 list_add_tail(&t_alg->entry, &hash_list);
1955 module_init(caam_algapi_hash_init);
1956 module_exit(caam_algapi_hash_exit);
1958 MODULE_LICENSE("GPL");
1959 MODULE_DESCRIPTION("FSL CAAM support for ahash functions of crypto API");
1960 MODULE_AUTHOR("Freescale Semiconductor - NMG");