2 * caam - Freescale FSL CAAM support for ahash functions of crypto API
4 * Copyright 2011 Freescale Semiconductor, Inc.
6 * Based on caamalg.c crypto API driver.
8 * relationship of digest job descriptor or first job descriptor after init to
11 * --------------- ---------------
12 * | JobDesc #1 |-------------------->| ShareDesc |
13 * | *(packet 1) | | (hashKey) |
14 * --------------- | (operation) |
17 * relationship of subsequent job descriptors to shared descriptors:
19 * --------------- ---------------
20 * | JobDesc #2 |-------------------->| ShareDesc |
21 * | *(packet 2) | |------------->| (hashKey) |
22 * --------------- | |-------->| (operation) |
23 * . | | | (load ctx2) |
24 * . | | ---------------
26 * | JobDesc #3 |------| |
32 * | JobDesc #4 |------------
36 * The SharedDesc never changes for a connection unless rekeyed, but
37 * each packet will likely be in a different place. So all we need
38 * to know to process the packet is where the input is, where the
39 * output goes, and what context we want to process with. Context is
40 * in the SharedDesc, packet references in the JobDesc.
42 * So, a job desc looks like:
44 * ---------------------
46 * | ShareDesc Pointer |
53 * ---------------------
60 #include "desc_constr.h"
63 #include "sg_sw_sec4.h"
66 #define CAAM_CRA_PRIORITY 3000
68 /* max hash key is max split key size */
69 #define CAAM_MAX_HASH_KEY_SIZE (SHA512_DIGEST_SIZE * 2)
71 #define CAAM_MAX_HASH_BLOCK_SIZE SHA512_BLOCK_SIZE
72 #define CAAM_MAX_HASH_DIGEST_SIZE SHA512_DIGEST_SIZE
74 /* length of descriptors text */
75 #define DESC_AHASH_BASE (4 * CAAM_CMD_SZ)
76 #define DESC_AHASH_UPDATE_LEN (6 * CAAM_CMD_SZ)
77 #define DESC_AHASH_UPDATE_FIRST_LEN (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ)
78 #define DESC_AHASH_FINAL_LEN (DESC_AHASH_BASE + 5 * CAAM_CMD_SZ)
79 #define DESC_AHASH_FINUP_LEN (DESC_AHASH_BASE + 5 * CAAM_CMD_SZ)
80 #define DESC_AHASH_DIGEST_LEN (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ)
82 #define DESC_HASH_MAX_USED_BYTES (DESC_AHASH_FINAL_LEN + \
83 CAAM_MAX_HASH_KEY_SIZE)
84 #define DESC_HASH_MAX_USED_LEN (DESC_HASH_MAX_USED_BYTES / CAAM_CMD_SZ)
86 /* caam context sizes for hashes: running digest + 8 */
87 #define HASH_MSG_LEN 8
88 #define MAX_CTX_LEN (HASH_MSG_LEN + SHA512_DIGEST_SIZE)
91 /* for print_hex_dumps with line references */
92 #define xstr(s) str(s)
94 #define debug(format, arg...) printk(format, arg)
96 #define debug(format, arg...)
99 /* ahash per-session context */
100 struct caam_hash_ctx {
101 struct device *jrdev;
102 u32 sh_desc_update[DESC_HASH_MAX_USED_LEN];
103 u32 sh_desc_update_first[DESC_HASH_MAX_USED_LEN];
104 u32 sh_desc_fin[DESC_HASH_MAX_USED_LEN];
105 u32 sh_desc_digest[DESC_HASH_MAX_USED_LEN];
106 u32 sh_desc_finup[DESC_HASH_MAX_USED_LEN];
107 dma_addr_t sh_desc_update_dma;
108 dma_addr_t sh_desc_update_first_dma;
109 dma_addr_t sh_desc_fin_dma;
110 dma_addr_t sh_desc_digest_dma;
111 dma_addr_t sh_desc_finup_dma;
114 u8 key[CAAM_MAX_HASH_KEY_SIZE];
117 unsigned int split_key_len;
118 unsigned int split_key_pad_len;
122 struct caam_hash_state {
125 u8 buf_0[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
127 u8 buf_1[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
129 u8 caam_ctx[MAX_CTX_LEN];
130 int (*update)(struct ahash_request *req);
131 int (*final)(struct ahash_request *req);
132 int (*finup)(struct ahash_request *req);
136 /* Common job descriptor seq in/out ptr routines */
138 /* Map state->caam_ctx, and append seq_out_ptr command that points to it */
139 static inline void map_seq_out_ptr_ctx(u32 *desc, struct device *jrdev,
140 struct caam_hash_state *state,
143 state->ctx_dma = dma_map_single(jrdev, state->caam_ctx,
144 ctx_len, DMA_FROM_DEVICE);
145 append_seq_out_ptr(desc, state->ctx_dma, ctx_len, 0);
148 /* Map req->result, and append seq_out_ptr command that points to it */
149 static inline dma_addr_t map_seq_out_ptr_result(u32 *desc, struct device *jrdev,
150 u8 *result, int digestsize)
154 dst_dma = dma_map_single(jrdev, result, digestsize, DMA_FROM_DEVICE);
155 append_seq_out_ptr(desc, dst_dma, digestsize, 0);
160 /* Map current buffer in state and put it in link table */
161 static inline dma_addr_t buf_map_to_sec4_sg(struct device *jrdev,
162 struct sec4_sg_entry *sec4_sg,
167 buf_dma = dma_map_single(jrdev, buf, buflen, DMA_TO_DEVICE);
168 dma_to_sec4_sg_one(sec4_sg, buf_dma, buflen, 0);
173 /* Map req->src and put it in link table */
174 static inline void src_map_to_sec4_sg(struct device *jrdev,
175 struct scatterlist *src, int src_nents,
176 struct sec4_sg_entry *sec4_sg,
179 dma_map_sg_chained(jrdev, src, src_nents, DMA_TO_DEVICE, chained);
180 sg_to_sec4_sg_last(src, src_nents, sec4_sg, 0);
184 * Only put buffer in link table if it contains data, which is possible,
185 * since a buffer has previously been used, and needs to be unmapped,
187 static inline dma_addr_t
188 try_buf_map_to_sec4_sg(struct device *jrdev, struct sec4_sg_entry *sec4_sg,
189 u8 *buf, dma_addr_t buf_dma, int buflen,
192 if (buf_dma && !dma_mapping_error(jrdev, buf_dma))
193 dma_unmap_single(jrdev, buf_dma, last_buflen, DMA_TO_DEVICE);
195 buf_dma = buf_map_to_sec4_sg(jrdev, sec4_sg, buf, buflen);
202 /* Map state->caam_ctx, and add it to link table */
203 static inline void ctx_map_to_sec4_sg(u32 *desc, struct device *jrdev,
204 struct caam_hash_state *state,
206 struct sec4_sg_entry *sec4_sg,
209 state->ctx_dma = dma_map_single(jrdev, state->caam_ctx, ctx_len, flag);
210 dma_to_sec4_sg_one(sec4_sg, state->ctx_dma, ctx_len, 0);
213 /* Common shared descriptor commands */
214 static inline void append_key_ahash(u32 *desc, struct caam_hash_ctx *ctx)
216 append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
217 ctx->split_key_len, CLASS_2 |
218 KEY_DEST_MDHA_SPLIT | KEY_ENC);
221 /* Append key if it has been set */
222 static inline void init_sh_desc_key_ahash(u32 *desc, struct caam_hash_ctx *ctx)
226 init_sh_desc(desc, HDR_SHARE_SERIAL);
228 if (ctx->split_key_len) {
229 /* Skip if already shared */
230 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
233 append_key_ahash(desc, ctx);
235 set_jump_tgt_here(desc, key_jump_cmd);
238 /* Propagate errors from shared to job descriptor */
239 append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD);
243 * For ahash read data from seqin following state->caam_ctx,
244 * and write resulting class2 context to seqout, which may be state->caam_ctx
247 static inline void ahash_append_load_str(u32 *desc, int digestsize)
249 /* Calculate remaining bytes to read */
250 append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
252 /* Read remaining bytes */
253 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_LAST2 |
254 FIFOLD_TYPE_MSG | KEY_VLF);
256 /* Store class2 context bytes */
257 append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
258 LDST_SRCDST_BYTE_CONTEXT);
262 * For ahash update, final and finup, import context, read and write to seqout
264 static inline void ahash_ctx_data_to_out(u32 *desc, u32 op, u32 state,
266 struct caam_hash_ctx *ctx)
268 init_sh_desc_key_ahash(desc, ctx);
270 /* Import context from software */
271 append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
272 LDST_CLASS_2_CCB | ctx->ctx_len);
274 /* Class 2 operation */
275 append_operation(desc, op | state | OP_ALG_ENCRYPT);
278 * Load from buf and/or src and write to req->result or state->context
280 ahash_append_load_str(desc, digestsize);
283 /* For ahash firsts and digest, read and write to seqout */
284 static inline void ahash_data_to_out(u32 *desc, u32 op, u32 state,
285 int digestsize, struct caam_hash_ctx *ctx)
287 init_sh_desc_key_ahash(desc, ctx);
289 /* Class 2 operation */
290 append_operation(desc, op | state | OP_ALG_ENCRYPT);
293 * Load from buf and/or src and write to req->result or state->context
295 ahash_append_load_str(desc, digestsize);
298 static int ahash_set_sh_desc(struct crypto_ahash *ahash)
300 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
301 int digestsize = crypto_ahash_digestsize(ahash);
302 struct device *jrdev = ctx->jrdev;
306 if (ctx->split_key_len)
307 have_key = OP_ALG_AAI_HMAC_PRECOMP;
309 /* ahash_update shared descriptor */
310 desc = ctx->sh_desc_update;
312 init_sh_desc(desc, HDR_SHARE_SERIAL);
314 /* Import context from software */
315 append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
316 LDST_CLASS_2_CCB | ctx->ctx_len);
318 /* Class 2 operation */
319 append_operation(desc, ctx->alg_type | OP_ALG_AS_UPDATE |
322 /* Load data and write to result or context */
323 ahash_append_load_str(desc, ctx->ctx_len);
325 ctx->sh_desc_update_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
327 if (dma_mapping_error(jrdev, ctx->sh_desc_update_dma)) {
328 dev_err(jrdev, "unable to map shared descriptor\n");
332 print_hex_dump(KERN_ERR, "ahash update shdesc@"xstr(__LINE__)": ",
333 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
336 /* ahash_update_first shared descriptor */
337 desc = ctx->sh_desc_update_first;
339 ahash_data_to_out(desc, have_key | ctx->alg_type, OP_ALG_AS_INIT,
342 ctx->sh_desc_update_first_dma = dma_map_single(jrdev, desc,
345 if (dma_mapping_error(jrdev, ctx->sh_desc_update_first_dma)) {
346 dev_err(jrdev, "unable to map shared descriptor\n");
350 print_hex_dump(KERN_ERR, "ahash update first shdesc@"xstr(__LINE__)": ",
351 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
354 /* ahash_final shared descriptor */
355 desc = ctx->sh_desc_fin;
357 ahash_ctx_data_to_out(desc, have_key | ctx->alg_type,
358 OP_ALG_AS_FINALIZE, digestsize, ctx);
360 ctx->sh_desc_fin_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
362 if (dma_mapping_error(jrdev, ctx->sh_desc_fin_dma)) {
363 dev_err(jrdev, "unable to map shared descriptor\n");
367 print_hex_dump(KERN_ERR, "ahash final shdesc@"xstr(__LINE__)": ",
368 DUMP_PREFIX_ADDRESS, 16, 4, desc,
369 desc_bytes(desc), 1);
372 /* ahash_finup shared descriptor */
373 desc = ctx->sh_desc_finup;
375 ahash_ctx_data_to_out(desc, have_key | ctx->alg_type,
376 OP_ALG_AS_FINALIZE, digestsize, ctx);
378 ctx->sh_desc_finup_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
380 if (dma_mapping_error(jrdev, ctx->sh_desc_finup_dma)) {
381 dev_err(jrdev, "unable to map shared descriptor\n");
385 print_hex_dump(KERN_ERR, "ahash finup shdesc@"xstr(__LINE__)": ",
386 DUMP_PREFIX_ADDRESS, 16, 4, desc,
387 desc_bytes(desc), 1);
390 /* ahash_digest shared descriptor */
391 desc = ctx->sh_desc_digest;
393 ahash_data_to_out(desc, have_key | ctx->alg_type, OP_ALG_AS_INITFINAL,
396 ctx->sh_desc_digest_dma = dma_map_single(jrdev, desc,
399 if (dma_mapping_error(jrdev, ctx->sh_desc_digest_dma)) {
400 dev_err(jrdev, "unable to map shared descriptor\n");
404 print_hex_dump(KERN_ERR, "ahash digest shdesc@"xstr(__LINE__)": ",
405 DUMP_PREFIX_ADDRESS, 16, 4, desc,
406 desc_bytes(desc), 1);
412 static int gen_split_hash_key(struct caam_hash_ctx *ctx, const u8 *key_in,
415 return gen_split_key(ctx->jrdev, ctx->key, ctx->split_key_len,
416 ctx->split_key_pad_len, key_in, keylen,
420 /* Digest hash size if it is too large */
421 static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in,
422 u32 *keylen, u8 *key_out, u32 digestsize)
424 struct device *jrdev = ctx->jrdev;
426 struct split_key_result result;
427 dma_addr_t src_dma, dst_dma;
430 desc = kmalloc(CAAM_CMD_SZ * 6 + CAAM_PTR_SZ * 2, GFP_KERNEL | GFP_DMA);
432 dev_err(jrdev, "unable to allocate key input memory\n");
436 init_job_desc(desc, 0);
438 src_dma = dma_map_single(jrdev, (void *)key_in, *keylen,
440 if (dma_mapping_error(jrdev, src_dma)) {
441 dev_err(jrdev, "unable to map key input memory\n");
445 dst_dma = dma_map_single(jrdev, (void *)key_out, digestsize,
447 if (dma_mapping_error(jrdev, dst_dma)) {
448 dev_err(jrdev, "unable to map key output memory\n");
449 dma_unmap_single(jrdev, src_dma, *keylen, DMA_TO_DEVICE);
454 /* Job descriptor to perform unkeyed hash on key_in */
455 append_operation(desc, ctx->alg_type | OP_ALG_ENCRYPT |
456 OP_ALG_AS_INITFINAL);
457 append_seq_in_ptr(desc, src_dma, *keylen, 0);
458 append_seq_fifo_load(desc, *keylen, FIFOLD_CLASS_CLASS2 |
459 FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_MSG);
460 append_seq_out_ptr(desc, dst_dma, digestsize, 0);
461 append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
462 LDST_SRCDST_BYTE_CONTEXT);
465 print_hex_dump(KERN_ERR, "key_in@"xstr(__LINE__)": ",
466 DUMP_PREFIX_ADDRESS, 16, 4, key_in, *keylen, 1);
467 print_hex_dump(KERN_ERR, "jobdesc@"xstr(__LINE__)": ",
468 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
472 init_completion(&result.completion);
474 ret = caam_jr_enqueue(jrdev, desc, split_key_done, &result);
477 wait_for_completion_interruptible(&result.completion);
480 print_hex_dump(KERN_ERR, "digested key@"xstr(__LINE__)": ",
481 DUMP_PREFIX_ADDRESS, 16, 4, key_in,
485 *keylen = digestsize;
487 dma_unmap_single(jrdev, src_dma, *keylen, DMA_TO_DEVICE);
488 dma_unmap_single(jrdev, dst_dma, digestsize, DMA_FROM_DEVICE);
495 static int ahash_setkey(struct crypto_ahash *ahash,
496 const u8 *key, unsigned int keylen)
498 /* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */
499 static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 };
500 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
501 struct device *jrdev = ctx->jrdev;
502 int blocksize = crypto_tfm_alg_blocksize(&ahash->base);
503 int digestsize = crypto_ahash_digestsize(ahash);
505 u8 *hashed_key = NULL;
508 printk(KERN_ERR "keylen %d\n", keylen);
511 if (keylen > blocksize) {
512 hashed_key = kmalloc(sizeof(u8) * digestsize, GFP_KERNEL |
516 ret = hash_digest_key(ctx, key, &keylen, hashed_key,
523 /* Pick class 2 key length from algorithm submask */
524 ctx->split_key_len = mdpadlen[(ctx->alg_op & OP_ALG_ALGSEL_SUBMASK) >>
525 OP_ALG_ALGSEL_SHIFT] * 2;
526 ctx->split_key_pad_len = ALIGN(ctx->split_key_len, 16);
529 printk(KERN_ERR "split_key_len %d split_key_pad_len %d\n",
530 ctx->split_key_len, ctx->split_key_pad_len);
531 print_hex_dump(KERN_ERR, "key in @"xstr(__LINE__)": ",
532 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
535 ret = gen_split_hash_key(ctx, key, keylen);
539 ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->split_key_pad_len,
541 if (dma_mapping_error(jrdev, ctx->key_dma)) {
542 dev_err(jrdev, "unable to map key i/o memory\n");
546 print_hex_dump(KERN_ERR, "ctx.key@"xstr(__LINE__)": ",
547 DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
548 ctx->split_key_pad_len, 1);
551 ret = ahash_set_sh_desc(ahash);
553 dma_unmap_single(jrdev, ctx->key_dma, ctx->split_key_pad_len,
561 crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN);
566 * ahash_edesc - s/w-extended ahash descriptor
567 * @dst_dma: physical mapped address of req->result
568 * @sec4_sg_dma: physical mapped address of h/w link table
569 * @chained: if source is chained
570 * @src_nents: number of segments in input scatterlist
571 * @sec4_sg_bytes: length of dma mapped sec4_sg space
572 * @sec4_sg: pointer to h/w link table
573 * @hw_desc: the h/w job descriptor followed by any referenced link tables
577 dma_addr_t sec4_sg_dma;
581 struct sec4_sg_entry *sec4_sg;
585 static inline void ahash_unmap(struct device *dev,
586 struct ahash_edesc *edesc,
587 struct ahash_request *req, int dst_len)
589 if (edesc->src_nents)
590 dma_unmap_sg_chained(dev, req->src, edesc->src_nents,
591 DMA_TO_DEVICE, edesc->chained);
593 dma_unmap_single(dev, edesc->dst_dma, dst_len, DMA_FROM_DEVICE);
595 if (edesc->sec4_sg_bytes)
596 dma_unmap_single(dev, edesc->sec4_sg_dma,
597 edesc->sec4_sg_bytes, DMA_TO_DEVICE);
600 static inline void ahash_unmap_ctx(struct device *dev,
601 struct ahash_edesc *edesc,
602 struct ahash_request *req, int dst_len, u32 flag)
604 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
605 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
606 struct caam_hash_state *state = ahash_request_ctx(req);
609 dma_unmap_single(dev, state->ctx_dma, ctx->ctx_len, flag);
610 ahash_unmap(dev, edesc, req, dst_len);
613 static void ahash_done(struct device *jrdev, u32 *desc, u32 err,
616 struct ahash_request *req = context;
617 struct ahash_edesc *edesc;
618 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
619 int digestsize = crypto_ahash_digestsize(ahash);
621 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
622 struct caam_hash_state *state = ahash_request_ctx(req);
624 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
627 edesc = (struct ahash_edesc *)((char *)desc -
628 offsetof(struct ahash_edesc, hw_desc));
630 char tmp[CAAM_ERROR_STR_MAX];
632 dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
635 ahash_unmap(jrdev, edesc, req, digestsize);
639 print_hex_dump(KERN_ERR, "ctx@"xstr(__LINE__)": ",
640 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
643 print_hex_dump(KERN_ERR, "result@"xstr(__LINE__)": ",
644 DUMP_PREFIX_ADDRESS, 16, 4, req->result,
648 req->base.complete(&req->base, err);
651 static void ahash_done_bi(struct device *jrdev, u32 *desc, u32 err,
654 struct ahash_request *req = context;
655 struct ahash_edesc *edesc;
656 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
657 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
659 struct caam_hash_state *state = ahash_request_ctx(req);
660 int digestsize = crypto_ahash_digestsize(ahash);
662 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
665 edesc = (struct ahash_edesc *)((char *)desc -
666 offsetof(struct ahash_edesc, hw_desc));
668 char tmp[CAAM_ERROR_STR_MAX];
670 dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
673 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL);
677 print_hex_dump(KERN_ERR, "ctx@"xstr(__LINE__)": ",
678 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
681 print_hex_dump(KERN_ERR, "result@"xstr(__LINE__)": ",
682 DUMP_PREFIX_ADDRESS, 16, 4, req->result,
686 req->base.complete(&req->base, err);
689 static void ahash_done_ctx_src(struct device *jrdev, u32 *desc, u32 err,
692 struct ahash_request *req = context;
693 struct ahash_edesc *edesc;
694 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
695 int digestsize = crypto_ahash_digestsize(ahash);
697 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
698 struct caam_hash_state *state = ahash_request_ctx(req);
700 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
703 edesc = (struct ahash_edesc *)((char *)desc -
704 offsetof(struct ahash_edesc, hw_desc));
706 char tmp[CAAM_ERROR_STR_MAX];
708 dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
711 ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
715 print_hex_dump(KERN_ERR, "ctx@"xstr(__LINE__)": ",
716 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
719 print_hex_dump(KERN_ERR, "result@"xstr(__LINE__)": ",
720 DUMP_PREFIX_ADDRESS, 16, 4, req->result,
724 req->base.complete(&req->base, err);
727 static void ahash_done_ctx_dst(struct device *jrdev, u32 *desc, u32 err,
730 struct ahash_request *req = context;
731 struct ahash_edesc *edesc;
732 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
733 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
735 struct caam_hash_state *state = ahash_request_ctx(req);
736 int digestsize = crypto_ahash_digestsize(ahash);
738 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
741 edesc = (struct ahash_edesc *)((char *)desc -
742 offsetof(struct ahash_edesc, hw_desc));
744 char tmp[CAAM_ERROR_STR_MAX];
746 dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
749 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE);
753 print_hex_dump(KERN_ERR, "ctx@"xstr(__LINE__)": ",
754 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
757 print_hex_dump(KERN_ERR, "result@"xstr(__LINE__)": ",
758 DUMP_PREFIX_ADDRESS, 16, 4, req->result,
762 req->base.complete(&req->base, err);
765 /* submit update job descriptor */
766 static int ahash_update_ctx(struct ahash_request *req)
768 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
769 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
770 struct caam_hash_state *state = ahash_request_ctx(req);
771 struct device *jrdev = ctx->jrdev;
772 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
773 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
774 u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
775 int *buflen = state->current_buf ? &state->buflen_1 : &state->buflen_0;
776 u8 *next_buf = state->current_buf ? state->buf_0 : state->buf_1;
777 int *next_buflen = state->current_buf ? &state->buflen_0 :
778 &state->buflen_1, last_buflen;
779 int in_len = *buflen + req->nbytes, to_hash;
780 u32 *sh_desc = ctx->sh_desc_update, *desc;
781 dma_addr_t ptr = ctx->sh_desc_update_dma;
782 int src_nents, sec4_sg_bytes, sec4_sg_src_index;
783 struct ahash_edesc *edesc;
784 bool chained = false;
788 last_buflen = *next_buflen;
789 *next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1);
790 to_hash = in_len - *next_buflen;
793 src_nents = __sg_count(req->src, req->nbytes - (*next_buflen),
795 sec4_sg_src_index = 1 + (*buflen ? 1 : 0);
796 sec4_sg_bytes = (sec4_sg_src_index + src_nents) *
797 sizeof(struct sec4_sg_entry);
800 * allocate space for base edesc and hw desc commands,
803 edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
804 sec4_sg_bytes, GFP_DMA | flags);
807 "could not allocate extended descriptor\n");
811 edesc->src_nents = src_nents;
812 edesc->chained = chained;
813 edesc->sec4_sg_bytes = sec4_sg_bytes;
814 edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
816 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
820 ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len,
821 edesc->sec4_sg, DMA_BIDIRECTIONAL);
823 state->buf_dma = try_buf_map_to_sec4_sg(jrdev,
826 *buflen, last_buflen);
829 src_map_to_sec4_sg(jrdev, req->src, src_nents,
830 edesc->sec4_sg + sec4_sg_src_index,
833 sg_copy_part(next_buf, req->src, to_hash -
834 *buflen, req->nbytes);
835 state->current_buf = !state->current_buf;
838 (edesc->sec4_sg + sec4_sg_src_index - 1)->len |=
842 sh_len = desc_len(sh_desc);
843 desc = edesc->hw_desc;
844 init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER |
847 append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len +
850 append_seq_out_ptr(desc, state->ctx_dma, ctx->ctx_len, 0);
853 print_hex_dump(KERN_ERR, "jobdesc@"xstr(__LINE__)": ",
854 DUMP_PREFIX_ADDRESS, 16, 4, desc,
855 desc_bytes(desc), 1);
858 ret = caam_jr_enqueue(jrdev, desc, ahash_done_bi, req);
862 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len,
866 } else if (*next_buflen) {
867 sg_copy(buf + *buflen, req->src, req->nbytes);
868 *buflen = *next_buflen;
869 *next_buflen = last_buflen;
872 print_hex_dump(KERN_ERR, "buf@"xstr(__LINE__)": ",
873 DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
874 print_hex_dump(KERN_ERR, "next buf@"xstr(__LINE__)": ",
875 DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
882 static int ahash_final_ctx(struct ahash_request *req)
884 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
885 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
886 struct caam_hash_state *state = ahash_request_ctx(req);
887 struct device *jrdev = ctx->jrdev;
888 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
889 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
890 u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
891 int buflen = state->current_buf ? state->buflen_1 : state->buflen_0;
892 int last_buflen = state->current_buf ? state->buflen_0 :
894 u32 *sh_desc = ctx->sh_desc_fin, *desc;
895 dma_addr_t ptr = ctx->sh_desc_fin_dma;
897 int digestsize = crypto_ahash_digestsize(ahash);
898 struct ahash_edesc *edesc;
902 sec4_sg_bytes = (1 + (buflen ? 1 : 0)) * sizeof(struct sec4_sg_entry);
904 /* allocate space for base edesc and hw desc commands, link tables */
905 edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
906 sec4_sg_bytes, GFP_DMA | flags);
908 dev_err(jrdev, "could not allocate extended descriptor\n");
912 sh_len = desc_len(sh_desc);
913 desc = edesc->hw_desc;
914 init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE);
916 edesc->sec4_sg_bytes = sec4_sg_bytes;
917 edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
919 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
920 sec4_sg_bytes, DMA_TO_DEVICE);
921 edesc->src_nents = 0;
923 ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len, edesc->sec4_sg,
926 state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1,
927 buf, state->buf_dma, buflen,
929 (edesc->sec4_sg + sec4_sg_bytes - 1)->len |= SEC4_SG_LEN_FIN;
931 append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len + buflen,
934 edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
938 print_hex_dump(KERN_ERR, "jobdesc@"xstr(__LINE__)": ",
939 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
942 ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_src, req);
946 ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
953 static int ahash_finup_ctx(struct ahash_request *req)
955 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
956 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
957 struct caam_hash_state *state = ahash_request_ctx(req);
958 struct device *jrdev = ctx->jrdev;
959 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
960 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
961 u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
962 int buflen = state->current_buf ? state->buflen_1 : state->buflen_0;
963 int last_buflen = state->current_buf ? state->buflen_0 :
965 u32 *sh_desc = ctx->sh_desc_finup, *desc;
966 dma_addr_t ptr = ctx->sh_desc_finup_dma;
967 int sec4_sg_bytes, sec4_sg_src_index;
969 int digestsize = crypto_ahash_digestsize(ahash);
970 struct ahash_edesc *edesc;
971 bool chained = false;
975 src_nents = __sg_count(req->src, req->nbytes, &chained);
976 sec4_sg_src_index = 1 + (buflen ? 1 : 0);
977 sec4_sg_bytes = (sec4_sg_src_index + src_nents) *
978 sizeof(struct sec4_sg_entry);
980 /* allocate space for base edesc and hw desc commands, link tables */
981 edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
982 sec4_sg_bytes, GFP_DMA | flags);
984 dev_err(jrdev, "could not allocate extended descriptor\n");
988 sh_len = desc_len(sh_desc);
989 desc = edesc->hw_desc;
990 init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE);
992 edesc->src_nents = src_nents;
993 edesc->chained = chained;
994 edesc->sec4_sg_bytes = sec4_sg_bytes;
995 edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
997 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
998 sec4_sg_bytes, DMA_TO_DEVICE);
1000 ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len, edesc->sec4_sg,
1003 state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1,
1004 buf, state->buf_dma, buflen,
1007 src_map_to_sec4_sg(jrdev, req->src, src_nents, edesc->sec4_sg +
1008 sec4_sg_src_index, chained);
1010 append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len +
1011 buflen + req->nbytes, LDST_SGF);
1013 edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
1017 print_hex_dump(KERN_ERR, "jobdesc@"xstr(__LINE__)": ",
1018 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
1021 ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_src, req);
1025 ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
1032 static int ahash_digest(struct ahash_request *req)
1034 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1035 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1036 struct device *jrdev = ctx->jrdev;
1037 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1038 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1039 u32 *sh_desc = ctx->sh_desc_digest, *desc;
1040 dma_addr_t ptr = ctx->sh_desc_digest_dma;
1041 int digestsize = crypto_ahash_digestsize(ahash);
1042 int src_nents, sec4_sg_bytes;
1044 struct ahash_edesc *edesc;
1045 bool chained = false;
1050 src_nents = sg_count(req->src, req->nbytes, &chained);
1051 dma_map_sg_chained(jrdev, req->src, src_nents ? : 1, DMA_TO_DEVICE,
1053 sec4_sg_bytes = src_nents * sizeof(struct sec4_sg_entry);
1055 /* allocate space for base edesc and hw desc commands, link tables */
1056 edesc = kmalloc(sizeof(struct ahash_edesc) + sec4_sg_bytes +
1057 DESC_JOB_IO_LEN, GFP_DMA | flags);
1059 dev_err(jrdev, "could not allocate extended descriptor\n");
1062 edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
1064 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1065 sec4_sg_bytes, DMA_TO_DEVICE);
1066 edesc->src_nents = src_nents;
1067 edesc->chained = chained;
1069 sh_len = desc_len(sh_desc);
1070 desc = edesc->hw_desc;
1071 init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE);
1074 sg_to_sec4_sg_last(req->src, src_nents, edesc->sec4_sg, 0);
1075 src_dma = edesc->sec4_sg_dma;
1078 src_dma = sg_dma_address(req->src);
1081 append_seq_in_ptr(desc, src_dma, req->nbytes, options);
1083 edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
1087 print_hex_dump(KERN_ERR, "jobdesc@"xstr(__LINE__)": ",
1088 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
1091 ret = caam_jr_enqueue(jrdev, desc, ahash_done, req);
1095 ahash_unmap(jrdev, edesc, req, digestsize);
1102 /* submit ahash final if it the first job descriptor */
1103 static int ahash_final_no_ctx(struct ahash_request *req)
1105 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1106 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1107 struct caam_hash_state *state = ahash_request_ctx(req);
1108 struct device *jrdev = ctx->jrdev;
1109 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1110 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1111 u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
1112 int buflen = state->current_buf ? state->buflen_1 : state->buflen_0;
1113 u32 *sh_desc = ctx->sh_desc_digest, *desc;
1114 dma_addr_t ptr = ctx->sh_desc_digest_dma;
1115 int digestsize = crypto_ahash_digestsize(ahash);
1116 struct ahash_edesc *edesc;
1120 /* allocate space for base edesc and hw desc commands, link tables */
1121 edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN,
1124 dev_err(jrdev, "could not allocate extended descriptor\n");
1128 sh_len = desc_len(sh_desc);
1129 desc = edesc->hw_desc;
1130 init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE);
1132 state->buf_dma = dma_map_single(jrdev, buf, buflen, DMA_TO_DEVICE);
1134 append_seq_in_ptr(desc, state->buf_dma, buflen, 0);
1136 edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
1138 edesc->src_nents = 0;
1141 print_hex_dump(KERN_ERR, "jobdesc@"xstr(__LINE__)": ",
1142 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
1145 ret = caam_jr_enqueue(jrdev, desc, ahash_done, req);
1149 ahash_unmap(jrdev, edesc, req, digestsize);
1156 /* submit ahash update if it the first job descriptor after update */
1157 static int ahash_update_no_ctx(struct ahash_request *req)
1159 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1160 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1161 struct caam_hash_state *state = ahash_request_ctx(req);
1162 struct device *jrdev = ctx->jrdev;
1163 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1164 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1165 u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
1166 int *buflen = state->current_buf ? &state->buflen_1 : &state->buflen_0;
1167 u8 *next_buf = state->current_buf ? state->buf_0 : state->buf_1;
1168 int *next_buflen = state->current_buf ? &state->buflen_0 :
1170 int in_len = *buflen + req->nbytes, to_hash;
1171 int sec4_sg_bytes, src_nents;
1172 struct ahash_edesc *edesc;
1173 u32 *desc, *sh_desc = ctx->sh_desc_update_first;
1174 dma_addr_t ptr = ctx->sh_desc_update_first_dma;
1175 bool chained = false;
1179 *next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1);
1180 to_hash = in_len - *next_buflen;
1183 src_nents = __sg_count(req->src, req->nbytes - (*next_buflen),
1185 sec4_sg_bytes = (1 + src_nents) *
1186 sizeof(struct sec4_sg_entry);
1189 * allocate space for base edesc and hw desc commands,
1192 edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
1193 sec4_sg_bytes, GFP_DMA | flags);
1196 "could not allocate extended descriptor\n");
1200 edesc->src_nents = src_nents;
1201 edesc->chained = chained;
1202 edesc->sec4_sg_bytes = sec4_sg_bytes;
1203 edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
1205 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1209 state->buf_dma = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg,
1211 src_map_to_sec4_sg(jrdev, req->src, src_nents,
1212 edesc->sec4_sg + 1, chained);
1214 sg_copy_part(next_buf, req->src, to_hash - *buflen,
1216 state->current_buf = !state->current_buf;
1219 sh_len = desc_len(sh_desc);
1220 desc = edesc->hw_desc;
1221 init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER |
1224 append_seq_in_ptr(desc, edesc->sec4_sg_dma, to_hash, LDST_SGF);
1226 map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len);
1229 print_hex_dump(KERN_ERR, "jobdesc@"xstr(__LINE__)": ",
1230 DUMP_PREFIX_ADDRESS, 16, 4, desc,
1231 desc_bytes(desc), 1);
1234 ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_dst, req);
1237 state->update = ahash_update_ctx;
1238 state->finup = ahash_finup_ctx;
1239 state->final = ahash_final_ctx;
1241 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len,
1245 } else if (*next_buflen) {
1246 sg_copy(buf + *buflen, req->src, req->nbytes);
1247 *buflen = *next_buflen;
1251 print_hex_dump(KERN_ERR, "buf@"xstr(__LINE__)": ",
1252 DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
1253 print_hex_dump(KERN_ERR, "next buf@"xstr(__LINE__)": ",
1254 DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
1261 /* submit ahash finup if it the first job descriptor after update */
1262 static int ahash_finup_no_ctx(struct ahash_request *req)
1264 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1265 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1266 struct caam_hash_state *state = ahash_request_ctx(req);
1267 struct device *jrdev = ctx->jrdev;
1268 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1269 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1270 u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
1271 int buflen = state->current_buf ? state->buflen_1 : state->buflen_0;
1272 int last_buflen = state->current_buf ? state->buflen_0 :
1274 u32 *sh_desc = ctx->sh_desc_digest, *desc;
1275 dma_addr_t ptr = ctx->sh_desc_digest_dma;
1276 int sec4_sg_bytes, sec4_sg_src_index, src_nents;
1277 int digestsize = crypto_ahash_digestsize(ahash);
1278 struct ahash_edesc *edesc;
1279 bool chained = false;
1283 src_nents = __sg_count(req->src, req->nbytes, &chained);
1284 sec4_sg_src_index = 2;
1285 sec4_sg_bytes = (sec4_sg_src_index + src_nents) *
1286 sizeof(struct sec4_sg_entry);
1288 /* allocate space for base edesc and hw desc commands, link tables */
1289 edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
1290 sec4_sg_bytes, GFP_DMA | flags);
1292 dev_err(jrdev, "could not allocate extended descriptor\n");
1296 sh_len = desc_len(sh_desc);
1297 desc = edesc->hw_desc;
1298 init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE);
1300 edesc->src_nents = src_nents;
1301 edesc->chained = chained;
1302 edesc->sec4_sg_bytes = sec4_sg_bytes;
1303 edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
1305 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1306 sec4_sg_bytes, DMA_TO_DEVICE);
1308 state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, buf,
1309 state->buf_dma, buflen,
1312 src_map_to_sec4_sg(jrdev, req->src, src_nents, edesc->sec4_sg + 1,
1315 append_seq_in_ptr(desc, edesc->sec4_sg_dma, buflen +
1316 req->nbytes, LDST_SGF);
1318 edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
1322 print_hex_dump(KERN_ERR, "jobdesc@"xstr(__LINE__)": ",
1323 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
1326 ret = caam_jr_enqueue(jrdev, desc, ahash_done, req);
1330 ahash_unmap(jrdev, edesc, req, digestsize);
1337 /* submit first update job descriptor after init */
1338 static int ahash_update_first(struct ahash_request *req)
1340 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1341 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1342 struct caam_hash_state *state = ahash_request_ctx(req);
1343 struct device *jrdev = ctx->jrdev;
1344 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1345 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1346 u8 *next_buf = state->buf_0 + state->current_buf *
1347 CAAM_MAX_HASH_BLOCK_SIZE;
1348 int *next_buflen = &state->buflen_0 + state->current_buf;
1350 u32 *sh_desc = ctx->sh_desc_update_first, *desc;
1351 dma_addr_t ptr = ctx->sh_desc_update_first_dma;
1352 int sec4_sg_bytes, src_nents;
1355 struct ahash_edesc *edesc;
1356 bool chained = false;
1360 *next_buflen = req->nbytes & (crypto_tfm_alg_blocksize(&ahash->base) -
1362 to_hash = req->nbytes - *next_buflen;
1365 src_nents = sg_count(req->src, req->nbytes - (*next_buflen),
1367 dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
1368 DMA_TO_DEVICE, chained);
1369 sec4_sg_bytes = src_nents * sizeof(struct sec4_sg_entry);
1372 * allocate space for base edesc and hw desc commands,
1375 edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
1376 sec4_sg_bytes, GFP_DMA | flags);
1379 "could not allocate extended descriptor\n");
1383 edesc->src_nents = src_nents;
1384 edesc->chained = chained;
1385 edesc->sec4_sg_bytes = sec4_sg_bytes;
1386 edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
1388 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1393 sg_to_sec4_sg_last(req->src, src_nents,
1395 src_dma = edesc->sec4_sg_dma;
1398 src_dma = sg_dma_address(req->src);
1403 sg_copy_part(next_buf, req->src, to_hash, req->nbytes);
1405 sh_len = desc_len(sh_desc);
1406 desc = edesc->hw_desc;
1407 init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER |
1410 append_seq_in_ptr(desc, src_dma, to_hash, options);
1412 map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len);
1415 print_hex_dump(KERN_ERR, "jobdesc@"xstr(__LINE__)": ",
1416 DUMP_PREFIX_ADDRESS, 16, 4, desc,
1417 desc_bytes(desc), 1);
1420 ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_dst,
1424 state->update = ahash_update_ctx;
1425 state->finup = ahash_finup_ctx;
1426 state->final = ahash_final_ctx;
1428 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len,
1432 } else if (*next_buflen) {
1433 state->update = ahash_update_no_ctx;
1434 state->finup = ahash_finup_no_ctx;
1435 state->final = ahash_final_no_ctx;
1436 sg_copy(next_buf, req->src, req->nbytes);
1439 print_hex_dump(KERN_ERR, "next buf@"xstr(__LINE__)": ",
1440 DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
1447 static int ahash_finup_first(struct ahash_request *req)
1449 return ahash_digest(req);
1452 static int ahash_init(struct ahash_request *req)
1454 struct caam_hash_state *state = ahash_request_ctx(req);
1456 state->update = ahash_update_first;
1457 state->finup = ahash_finup_first;
1458 state->final = ahash_final_no_ctx;
1460 state->current_buf = 0;
1465 static int ahash_update(struct ahash_request *req)
1467 struct caam_hash_state *state = ahash_request_ctx(req);
1469 return state->update(req);
1472 static int ahash_finup(struct ahash_request *req)
1474 struct caam_hash_state *state = ahash_request_ctx(req);
1476 return state->finup(req);
1479 static int ahash_final(struct ahash_request *req)
1481 struct caam_hash_state *state = ahash_request_ctx(req);
1483 return state->final(req);
1486 static int ahash_export(struct ahash_request *req, void *out)
1488 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1489 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1490 struct caam_hash_state *state = ahash_request_ctx(req);
1492 memcpy(out, ctx, sizeof(struct caam_hash_ctx));
1493 memcpy(out + sizeof(struct caam_hash_ctx), state,
1494 sizeof(struct caam_hash_state));
1498 static int ahash_import(struct ahash_request *req, const void *in)
1500 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1501 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1502 struct caam_hash_state *state = ahash_request_ctx(req);
1504 memcpy(ctx, in, sizeof(struct caam_hash_ctx));
1505 memcpy(state, in + sizeof(struct caam_hash_ctx),
1506 sizeof(struct caam_hash_state));
1510 struct caam_hash_template {
1511 char name[CRYPTO_MAX_ALG_NAME];
1512 char driver_name[CRYPTO_MAX_ALG_NAME];
1513 char hmac_name[CRYPTO_MAX_ALG_NAME];
1514 char hmac_driver_name[CRYPTO_MAX_ALG_NAME];
1515 unsigned int blocksize;
1516 struct ahash_alg template_ahash;
1521 /* ahash descriptors */
1522 static struct caam_hash_template driver_hash[] = {
1525 .driver_name = "sha1-caam",
1526 .hmac_name = "hmac(sha1)",
1527 .hmac_driver_name = "hmac-sha1-caam",
1528 .blocksize = SHA1_BLOCK_SIZE,
1531 .update = ahash_update,
1532 .final = ahash_final,
1533 .finup = ahash_finup,
1534 .digest = ahash_digest,
1535 .export = ahash_export,
1536 .import = ahash_import,
1537 .setkey = ahash_setkey,
1539 .digestsize = SHA1_DIGEST_SIZE,
1542 .alg_type = OP_ALG_ALGSEL_SHA1,
1543 .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
1546 .driver_name = "sha224-caam",
1547 .hmac_name = "hmac(sha224)",
1548 .hmac_driver_name = "hmac-sha224-caam",
1549 .blocksize = SHA224_BLOCK_SIZE,
1552 .update = ahash_update,
1553 .final = ahash_final,
1554 .finup = ahash_finup,
1555 .digest = ahash_digest,
1556 .export = ahash_export,
1557 .import = ahash_import,
1558 .setkey = ahash_setkey,
1560 .digestsize = SHA224_DIGEST_SIZE,
1563 .alg_type = OP_ALG_ALGSEL_SHA224,
1564 .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
1567 .driver_name = "sha256-caam",
1568 .hmac_name = "hmac(sha256)",
1569 .hmac_driver_name = "hmac-sha256-caam",
1570 .blocksize = SHA256_BLOCK_SIZE,
1573 .update = ahash_update,
1574 .final = ahash_final,
1575 .finup = ahash_finup,
1576 .digest = ahash_digest,
1577 .export = ahash_export,
1578 .import = ahash_import,
1579 .setkey = ahash_setkey,
1581 .digestsize = SHA256_DIGEST_SIZE,
1584 .alg_type = OP_ALG_ALGSEL_SHA256,
1585 .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
1588 .driver_name = "sha384-caam",
1589 .hmac_name = "hmac(sha384)",
1590 .hmac_driver_name = "hmac-sha384-caam",
1591 .blocksize = SHA384_BLOCK_SIZE,
1594 .update = ahash_update,
1595 .final = ahash_final,
1596 .finup = ahash_finup,
1597 .digest = ahash_digest,
1598 .export = ahash_export,
1599 .import = ahash_import,
1600 .setkey = ahash_setkey,
1602 .digestsize = SHA384_DIGEST_SIZE,
1605 .alg_type = OP_ALG_ALGSEL_SHA384,
1606 .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
1609 .driver_name = "sha512-caam",
1610 .hmac_name = "hmac(sha512)",
1611 .hmac_driver_name = "hmac-sha512-caam",
1612 .blocksize = SHA512_BLOCK_SIZE,
1615 .update = ahash_update,
1616 .final = ahash_final,
1617 .finup = ahash_finup,
1618 .digest = ahash_digest,
1619 .export = ahash_export,
1620 .import = ahash_import,
1621 .setkey = ahash_setkey,
1623 .digestsize = SHA512_DIGEST_SIZE,
1626 .alg_type = OP_ALG_ALGSEL_SHA512,
1627 .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
1630 .driver_name = "md5-caam",
1631 .hmac_name = "hmac(md5)",
1632 .hmac_driver_name = "hmac-md5-caam",
1633 .blocksize = MD5_BLOCK_WORDS * 4,
1636 .update = ahash_update,
1637 .final = ahash_final,
1638 .finup = ahash_finup,
1639 .digest = ahash_digest,
1640 .export = ahash_export,
1641 .import = ahash_import,
1642 .setkey = ahash_setkey,
1644 .digestsize = MD5_DIGEST_SIZE,
1647 .alg_type = OP_ALG_ALGSEL_MD5,
1648 .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
1652 struct caam_hash_alg {
1653 struct list_head entry;
1654 struct device *ctrldev;
1657 struct ahash_alg ahash_alg;
1660 static int caam_hash_cra_init(struct crypto_tfm *tfm)
1662 struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
1663 struct crypto_alg *base = tfm->__crt_alg;
1664 struct hash_alg_common *halg =
1665 container_of(base, struct hash_alg_common, base);
1666 struct ahash_alg *alg =
1667 container_of(halg, struct ahash_alg, halg);
1668 struct caam_hash_alg *caam_hash =
1669 container_of(alg, struct caam_hash_alg, ahash_alg);
1670 struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
1671 struct caam_drv_private *priv = dev_get_drvdata(caam_hash->ctrldev);
1672 /* Sizes for MDHA running digests: MD5, SHA1, 224, 256, 384, 512 */
1673 static const u8 runninglen[] = { HASH_MSG_LEN + MD5_DIGEST_SIZE,
1674 HASH_MSG_LEN + SHA1_DIGEST_SIZE,
1676 HASH_MSG_LEN + SHA256_DIGEST_SIZE,
1678 HASH_MSG_LEN + SHA512_DIGEST_SIZE };
1679 int tgt_jr = atomic_inc_return(&priv->tfm_count);
1683 * distribute tfms across job rings to ensure in-order
1684 * crypto request processing per tfm
1686 ctx->jrdev = priv->jrdev[tgt_jr % priv->total_jobrs];
1688 /* copy descriptor header template value */
1689 ctx->alg_type = OP_TYPE_CLASS2_ALG | caam_hash->alg_type;
1690 ctx->alg_op = OP_TYPE_CLASS2_ALG | caam_hash->alg_op;
1692 ctx->ctx_len = runninglen[(ctx->alg_op & OP_ALG_ALGSEL_SUBMASK) >>
1693 OP_ALG_ALGSEL_SHIFT];
1695 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1696 sizeof(struct caam_hash_state));
1698 ret = ahash_set_sh_desc(ahash);
1703 static void caam_hash_cra_exit(struct crypto_tfm *tfm)
1705 struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
1707 if (ctx->sh_desc_update_dma &&
1708 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_update_dma))
1709 dma_unmap_single(ctx->jrdev, ctx->sh_desc_update_dma,
1710 desc_bytes(ctx->sh_desc_update),
1712 if (ctx->sh_desc_update_first_dma &&
1713 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_update_first_dma))
1714 dma_unmap_single(ctx->jrdev, ctx->sh_desc_update_first_dma,
1715 desc_bytes(ctx->sh_desc_update_first),
1717 if (ctx->sh_desc_fin_dma &&
1718 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_fin_dma))
1719 dma_unmap_single(ctx->jrdev, ctx->sh_desc_fin_dma,
1720 desc_bytes(ctx->sh_desc_fin), DMA_TO_DEVICE);
1721 if (ctx->sh_desc_digest_dma &&
1722 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_digest_dma))
1723 dma_unmap_single(ctx->jrdev, ctx->sh_desc_digest_dma,
1724 desc_bytes(ctx->sh_desc_digest),
1726 if (ctx->sh_desc_finup_dma &&
1727 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_finup_dma))
1728 dma_unmap_single(ctx->jrdev, ctx->sh_desc_finup_dma,
1729 desc_bytes(ctx->sh_desc_finup), DMA_TO_DEVICE);
1732 static void __exit caam_algapi_hash_exit(void)
1734 struct device_node *dev_node;
1735 struct platform_device *pdev;
1736 struct device *ctrldev;
1737 struct caam_drv_private *priv;
1738 struct caam_hash_alg *t_alg, *n;
1740 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
1742 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
1747 pdev = of_find_device_by_node(dev_node);
1751 ctrldev = &pdev->dev;
1752 of_node_put(dev_node);
1753 priv = dev_get_drvdata(ctrldev);
1755 if (!priv->hash_list.next)
1758 list_for_each_entry_safe(t_alg, n, &priv->hash_list, entry) {
1759 crypto_unregister_ahash(&t_alg->ahash_alg);
1760 list_del(&t_alg->entry);
1765 static struct caam_hash_alg *
1766 caam_hash_alloc(struct device *ctrldev, struct caam_hash_template *template,
1769 struct caam_hash_alg *t_alg;
1770 struct ahash_alg *halg;
1771 struct crypto_alg *alg;
1773 t_alg = kzalloc(sizeof(struct caam_hash_alg), GFP_KERNEL);
1775 dev_err(ctrldev, "failed to allocate t_alg\n");
1776 return ERR_PTR(-ENOMEM);
1779 t_alg->ahash_alg = template->template_ahash;
1780 halg = &t_alg->ahash_alg;
1781 alg = &halg->halg.base;
1784 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
1785 template->hmac_name);
1786 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
1787 template->hmac_driver_name);
1789 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
1791 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
1792 template->driver_name);
1794 alg->cra_module = THIS_MODULE;
1795 alg->cra_init = caam_hash_cra_init;
1796 alg->cra_exit = caam_hash_cra_exit;
1797 alg->cra_ctxsize = sizeof(struct caam_hash_ctx);
1798 alg->cra_priority = CAAM_CRA_PRIORITY;
1799 alg->cra_blocksize = template->blocksize;
1800 alg->cra_alignmask = 0;
1801 alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_TYPE_AHASH;
1802 alg->cra_type = &crypto_ahash_type;
1804 t_alg->alg_type = template->alg_type;
1805 t_alg->alg_op = template->alg_op;
1806 t_alg->ctrldev = ctrldev;
1811 static int __init caam_algapi_hash_init(void)
1813 struct device_node *dev_node;
1814 struct platform_device *pdev;
1815 struct device *ctrldev;
1816 struct caam_drv_private *priv;
1819 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
1821 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
1826 pdev = of_find_device_by_node(dev_node);
1830 ctrldev = &pdev->dev;
1831 priv = dev_get_drvdata(ctrldev);
1832 of_node_put(dev_node);
1834 INIT_LIST_HEAD(&priv->hash_list);
1836 atomic_set(&priv->tfm_count, -1);
1838 /* register crypto algorithms the device supports */
1839 for (i = 0; i < ARRAY_SIZE(driver_hash); i++) {
1840 /* TODO: check if h/w supports alg */
1841 struct caam_hash_alg *t_alg;
1843 /* register hmac version */
1844 t_alg = caam_hash_alloc(ctrldev, &driver_hash[i], true);
1845 if (IS_ERR(t_alg)) {
1846 err = PTR_ERR(t_alg);
1847 dev_warn(ctrldev, "%s alg allocation failed\n",
1848 driver_hash[i].driver_name);
1852 err = crypto_register_ahash(&t_alg->ahash_alg);
1854 dev_warn(ctrldev, "%s alg registration failed\n",
1855 t_alg->ahash_alg.halg.base.cra_driver_name);
1858 list_add_tail(&t_alg->entry, &priv->hash_list);
1860 /* register unkeyed version */
1861 t_alg = caam_hash_alloc(ctrldev, &driver_hash[i], false);
1862 if (IS_ERR(t_alg)) {
1863 err = PTR_ERR(t_alg);
1864 dev_warn(ctrldev, "%s alg allocation failed\n",
1865 driver_hash[i].driver_name);
1869 err = crypto_register_ahash(&t_alg->ahash_alg);
1871 dev_warn(ctrldev, "%s alg registration failed\n",
1872 t_alg->ahash_alg.halg.base.cra_driver_name);
1875 list_add_tail(&t_alg->entry, &priv->hash_list);
1881 module_init(caam_algapi_hash_init);
1882 module_exit(caam_algapi_hash_exit);
1884 MODULE_LICENSE("GPL");
1885 MODULE_DESCRIPTION("FSL CAAM support for ahash functions of crypto API");
1886 MODULE_AUTHOR("Freescale Semiconductor - NMG");