Merge remote-tracking branches 'spi/fix/pxa2xx', 'spi/fix/qup' and 'spi/fix/sh-sci...
[firefly-linux-kernel-4.4.55.git] / drivers / crypto / caam / caamhash.c
1 /*
2  * caam - Freescale FSL CAAM support for ahash functions of crypto API
3  *
4  * Copyright 2011 Freescale Semiconductor, Inc.
5  *
6  * Based on caamalg.c crypto API driver.
7  *
8  * relationship of digest job descriptor or first job descriptor after init to
9  * shared descriptors:
10  *
11  * ---------------                     ---------------
12  * | JobDesc #1  |-------------------->|  ShareDesc  |
13  * | *(packet 1) |                     |  (hashKey)  |
14  * ---------------                     | (operation) |
15  *                                     ---------------
16  *
17  * relationship of subsequent job descriptors to shared descriptors:
18  *
19  * ---------------                     ---------------
20  * | JobDesc #2  |-------------------->|  ShareDesc  |
21  * | *(packet 2) |      |------------->|  (hashKey)  |
22  * ---------------      |    |-------->| (operation) |
23  *       .              |    |         | (load ctx2) |
24  *       .              |    |         ---------------
25  * ---------------      |    |
26  * | JobDesc #3  |------|    |
27  * | *(packet 3) |           |
28  * ---------------           |
29  *       .                   |
30  *       .                   |
31  * ---------------           |
32  * | JobDesc #4  |------------
33  * | *(packet 4) |
34  * ---------------
35  *
36  * The SharedDesc never changes for a connection unless rekeyed, but
37  * each packet will likely be in a different place. So all we need
38  * to know to process the packet is where the input is, where the
39  * output goes, and what context we want to process with. Context is
40  * in the SharedDesc, packet references in the JobDesc.
41  *
42  * So, a job desc looks like:
43  *
44  * ---------------------
45  * | Header            |
46  * | ShareDesc Pointer |
47  * | SEQ_OUT_PTR       |
48  * | (output buffer)   |
49  * | (output length)   |
50  * | SEQ_IN_PTR        |
51  * | (input buffer)    |
52  * | (input length)    |
53  * ---------------------
54  */
55
56 #include "compat.h"
57
58 #include "regs.h"
59 #include "intern.h"
60 #include "desc_constr.h"
61 #include "jr.h"
62 #include "error.h"
63 #include "sg_sw_sec4.h"
64 #include "key_gen.h"
65
66 #define CAAM_CRA_PRIORITY               3000
67
68 /* max hash key is max split key size */
69 #define CAAM_MAX_HASH_KEY_SIZE          (SHA512_DIGEST_SIZE * 2)
70
71 #define CAAM_MAX_HASH_BLOCK_SIZE        SHA512_BLOCK_SIZE
72 #define CAAM_MAX_HASH_DIGEST_SIZE       SHA512_DIGEST_SIZE
73
74 /* length of descriptors text */
75 #define DESC_AHASH_BASE                 (4 * CAAM_CMD_SZ)
76 #define DESC_AHASH_UPDATE_LEN           (6 * CAAM_CMD_SZ)
77 #define DESC_AHASH_UPDATE_FIRST_LEN     (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ)
78 #define DESC_AHASH_FINAL_LEN            (DESC_AHASH_BASE + 5 * CAAM_CMD_SZ)
79 #define DESC_AHASH_FINUP_LEN            (DESC_AHASH_BASE + 5 * CAAM_CMD_SZ)
80 #define DESC_AHASH_DIGEST_LEN           (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ)
81
82 #define DESC_HASH_MAX_USED_BYTES        (DESC_AHASH_FINAL_LEN + \
83                                          CAAM_MAX_HASH_KEY_SIZE)
84 #define DESC_HASH_MAX_USED_LEN          (DESC_HASH_MAX_USED_BYTES / CAAM_CMD_SZ)
85
86 /* caam context sizes for hashes: running digest + 8 */
87 #define HASH_MSG_LEN                    8
88 #define MAX_CTX_LEN                     (HASH_MSG_LEN + SHA512_DIGEST_SIZE)
89
90 #ifdef DEBUG
91 /* for print_hex_dumps with line references */
92 #define debug(format, arg...) printk(format, arg)
93 #else
94 #define debug(format, arg...)
95 #endif
96
97
98 static struct list_head hash_list;
99
100 /* ahash per-session context */
101 struct caam_hash_ctx {
102         struct device *jrdev;
103         u32 sh_desc_update[DESC_HASH_MAX_USED_LEN];
104         u32 sh_desc_update_first[DESC_HASH_MAX_USED_LEN];
105         u32 sh_desc_fin[DESC_HASH_MAX_USED_LEN];
106         u32 sh_desc_digest[DESC_HASH_MAX_USED_LEN];
107         u32 sh_desc_finup[DESC_HASH_MAX_USED_LEN];
108         dma_addr_t sh_desc_update_dma;
109         dma_addr_t sh_desc_update_first_dma;
110         dma_addr_t sh_desc_fin_dma;
111         dma_addr_t sh_desc_digest_dma;
112         dma_addr_t sh_desc_finup_dma;
113         u32 alg_type;
114         u32 alg_op;
115         u8 key[CAAM_MAX_HASH_KEY_SIZE];
116         dma_addr_t key_dma;
117         int ctx_len;
118         unsigned int split_key_len;
119         unsigned int split_key_pad_len;
120 };
121
122 /* ahash state */
123 struct caam_hash_state {
124         dma_addr_t buf_dma;
125         dma_addr_t ctx_dma;
126         u8 buf_0[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
127         int buflen_0;
128         u8 buf_1[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
129         int buflen_1;
130         u8 caam_ctx[MAX_CTX_LEN];
131         int (*update)(struct ahash_request *req);
132         int (*final)(struct ahash_request *req);
133         int (*finup)(struct ahash_request *req);
134         int current_buf;
135 };
136
137 /* Common job descriptor seq in/out ptr routines */
138
139 /* Map state->caam_ctx, and append seq_out_ptr command that points to it */
140 static inline void map_seq_out_ptr_ctx(u32 *desc, struct device *jrdev,
141                                        struct caam_hash_state *state,
142                                        int ctx_len)
143 {
144         state->ctx_dma = dma_map_single(jrdev, state->caam_ctx,
145                                         ctx_len, DMA_FROM_DEVICE);
146         append_seq_out_ptr(desc, state->ctx_dma, ctx_len, 0);
147 }
148
149 /* Map req->result, and append seq_out_ptr command that points to it */
150 static inline dma_addr_t map_seq_out_ptr_result(u32 *desc, struct device *jrdev,
151                                                 u8 *result, int digestsize)
152 {
153         dma_addr_t dst_dma;
154
155         dst_dma = dma_map_single(jrdev, result, digestsize, DMA_FROM_DEVICE);
156         append_seq_out_ptr(desc, dst_dma, digestsize, 0);
157
158         return dst_dma;
159 }
160
161 /* Map current buffer in state and put it in link table */
162 static inline dma_addr_t buf_map_to_sec4_sg(struct device *jrdev,
163                                             struct sec4_sg_entry *sec4_sg,
164                                             u8 *buf, int buflen)
165 {
166         dma_addr_t buf_dma;
167
168         buf_dma = dma_map_single(jrdev, buf, buflen, DMA_TO_DEVICE);
169         dma_to_sec4_sg_one(sec4_sg, buf_dma, buflen, 0);
170
171         return buf_dma;
172 }
173
174 /* Map req->src and put it in link table */
175 static inline void src_map_to_sec4_sg(struct device *jrdev,
176                                       struct scatterlist *src, int src_nents,
177                                       struct sec4_sg_entry *sec4_sg,
178                                       bool chained)
179 {
180         dma_map_sg_chained(jrdev, src, src_nents, DMA_TO_DEVICE, chained);
181         sg_to_sec4_sg_last(src, src_nents, sec4_sg, 0);
182 }
183
184 /*
185  * Only put buffer in link table if it contains data, which is possible,
186  * since a buffer has previously been used, and needs to be unmapped,
187  */
188 static inline dma_addr_t
189 try_buf_map_to_sec4_sg(struct device *jrdev, struct sec4_sg_entry *sec4_sg,
190                        u8 *buf, dma_addr_t buf_dma, int buflen,
191                        int last_buflen)
192 {
193         if (buf_dma && !dma_mapping_error(jrdev, buf_dma))
194                 dma_unmap_single(jrdev, buf_dma, last_buflen, DMA_TO_DEVICE);
195         if (buflen)
196                 buf_dma = buf_map_to_sec4_sg(jrdev, sec4_sg, buf, buflen);
197         else
198                 buf_dma = 0;
199
200         return buf_dma;
201 }
202
203 /* Map state->caam_ctx, and add it to link table */
204 static inline void ctx_map_to_sec4_sg(u32 *desc, struct device *jrdev,
205                                       struct caam_hash_state *state,
206                                       int ctx_len,
207                                       struct sec4_sg_entry *sec4_sg,
208                                       u32 flag)
209 {
210         state->ctx_dma = dma_map_single(jrdev, state->caam_ctx, ctx_len, flag);
211         dma_to_sec4_sg_one(sec4_sg, state->ctx_dma, ctx_len, 0);
212 }
213
214 /* Common shared descriptor commands */
215 static inline void append_key_ahash(u32 *desc, struct caam_hash_ctx *ctx)
216 {
217         append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
218                           ctx->split_key_len, CLASS_2 |
219                           KEY_DEST_MDHA_SPLIT | KEY_ENC);
220 }
221
222 /* Append key if it has been set */
223 static inline void init_sh_desc_key_ahash(u32 *desc, struct caam_hash_ctx *ctx)
224 {
225         u32 *key_jump_cmd;
226
227         init_sh_desc(desc, HDR_SHARE_SERIAL);
228
229         if (ctx->split_key_len) {
230                 /* Skip if already shared */
231                 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
232                                            JUMP_COND_SHRD);
233
234                 append_key_ahash(desc, ctx);
235
236                 set_jump_tgt_here(desc, key_jump_cmd);
237         }
238
239         /* Propagate errors from shared to job descriptor */
240         append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD);
241 }
242
243 /*
244  * For ahash read data from seqin following state->caam_ctx,
245  * and write resulting class2 context to seqout, which may be state->caam_ctx
246  * or req->result
247  */
248 static inline void ahash_append_load_str(u32 *desc, int digestsize)
249 {
250         /* Calculate remaining bytes to read */
251         append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
252
253         /* Read remaining bytes */
254         append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_LAST2 |
255                              FIFOLD_TYPE_MSG | KEY_VLF);
256
257         /* Store class2 context bytes */
258         append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
259                          LDST_SRCDST_BYTE_CONTEXT);
260 }
261
262 /*
263  * For ahash update, final and finup, import context, read and write to seqout
264  */
265 static inline void ahash_ctx_data_to_out(u32 *desc, u32 op, u32 state,
266                                          int digestsize,
267                                          struct caam_hash_ctx *ctx)
268 {
269         init_sh_desc_key_ahash(desc, ctx);
270
271         /* Import context from software */
272         append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
273                    LDST_CLASS_2_CCB | ctx->ctx_len);
274
275         /* Class 2 operation */
276         append_operation(desc, op | state | OP_ALG_ENCRYPT);
277
278         /*
279          * Load from buf and/or src and write to req->result or state->context
280          */
281         ahash_append_load_str(desc, digestsize);
282 }
283
284 /* For ahash firsts and digest, read and write to seqout */
285 static inline void ahash_data_to_out(u32 *desc, u32 op, u32 state,
286                                      int digestsize, struct caam_hash_ctx *ctx)
287 {
288         init_sh_desc_key_ahash(desc, ctx);
289
290         /* Class 2 operation */
291         append_operation(desc, op | state | OP_ALG_ENCRYPT);
292
293         /*
294          * Load from buf and/or src and write to req->result or state->context
295          */
296         ahash_append_load_str(desc, digestsize);
297 }
298
299 static int ahash_set_sh_desc(struct crypto_ahash *ahash)
300 {
301         struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
302         int digestsize = crypto_ahash_digestsize(ahash);
303         struct device *jrdev = ctx->jrdev;
304         u32 have_key = 0;
305         u32 *desc;
306
307         if (ctx->split_key_len)
308                 have_key = OP_ALG_AAI_HMAC_PRECOMP;
309
310         /* ahash_update shared descriptor */
311         desc = ctx->sh_desc_update;
312
313         init_sh_desc(desc, HDR_SHARE_SERIAL);
314
315         /* Import context from software */
316         append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
317                    LDST_CLASS_2_CCB | ctx->ctx_len);
318
319         /* Class 2 operation */
320         append_operation(desc, ctx->alg_type | OP_ALG_AS_UPDATE |
321                          OP_ALG_ENCRYPT);
322
323         /* Load data and write to result or context */
324         ahash_append_load_str(desc, ctx->ctx_len);
325
326         ctx->sh_desc_update_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
327                                                  DMA_TO_DEVICE);
328         if (dma_mapping_error(jrdev, ctx->sh_desc_update_dma)) {
329                 dev_err(jrdev, "unable to map shared descriptor\n");
330                 return -ENOMEM;
331         }
332 #ifdef DEBUG
333         print_hex_dump(KERN_ERR,
334                        "ahash update shdesc@"__stringify(__LINE__)": ",
335                        DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
336 #endif
337
338         /* ahash_update_first shared descriptor */
339         desc = ctx->sh_desc_update_first;
340
341         ahash_data_to_out(desc, have_key | ctx->alg_type, OP_ALG_AS_INIT,
342                           ctx->ctx_len, ctx);
343
344         ctx->sh_desc_update_first_dma = dma_map_single(jrdev, desc,
345                                                        desc_bytes(desc),
346                                                        DMA_TO_DEVICE);
347         if (dma_mapping_error(jrdev, ctx->sh_desc_update_first_dma)) {
348                 dev_err(jrdev, "unable to map shared descriptor\n");
349                 return -ENOMEM;
350         }
351 #ifdef DEBUG
352         print_hex_dump(KERN_ERR,
353                        "ahash update first shdesc@"__stringify(__LINE__)": ",
354                        DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
355 #endif
356
357         /* ahash_final shared descriptor */
358         desc = ctx->sh_desc_fin;
359
360         ahash_ctx_data_to_out(desc, have_key | ctx->alg_type,
361                               OP_ALG_AS_FINALIZE, digestsize, ctx);
362
363         ctx->sh_desc_fin_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
364                                               DMA_TO_DEVICE);
365         if (dma_mapping_error(jrdev, ctx->sh_desc_fin_dma)) {
366                 dev_err(jrdev, "unable to map shared descriptor\n");
367                 return -ENOMEM;
368         }
369 #ifdef DEBUG
370         print_hex_dump(KERN_ERR, "ahash final shdesc@"__stringify(__LINE__)": ",
371                        DUMP_PREFIX_ADDRESS, 16, 4, desc,
372                        desc_bytes(desc), 1);
373 #endif
374
375         /* ahash_finup shared descriptor */
376         desc = ctx->sh_desc_finup;
377
378         ahash_ctx_data_to_out(desc, have_key | ctx->alg_type,
379                               OP_ALG_AS_FINALIZE, digestsize, ctx);
380
381         ctx->sh_desc_finup_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
382                                                 DMA_TO_DEVICE);
383         if (dma_mapping_error(jrdev, ctx->sh_desc_finup_dma)) {
384                 dev_err(jrdev, "unable to map shared descriptor\n");
385                 return -ENOMEM;
386         }
387 #ifdef DEBUG
388         print_hex_dump(KERN_ERR, "ahash finup shdesc@"__stringify(__LINE__)": ",
389                        DUMP_PREFIX_ADDRESS, 16, 4, desc,
390                        desc_bytes(desc), 1);
391 #endif
392
393         /* ahash_digest shared descriptor */
394         desc = ctx->sh_desc_digest;
395
396         ahash_data_to_out(desc, have_key | ctx->alg_type, OP_ALG_AS_INITFINAL,
397                           digestsize, ctx);
398
399         ctx->sh_desc_digest_dma = dma_map_single(jrdev, desc,
400                                                  desc_bytes(desc),
401                                                  DMA_TO_DEVICE);
402         if (dma_mapping_error(jrdev, ctx->sh_desc_digest_dma)) {
403                 dev_err(jrdev, "unable to map shared descriptor\n");
404                 return -ENOMEM;
405         }
406 #ifdef DEBUG
407         print_hex_dump(KERN_ERR,
408                        "ahash digest shdesc@"__stringify(__LINE__)": ",
409                        DUMP_PREFIX_ADDRESS, 16, 4, desc,
410                        desc_bytes(desc), 1);
411 #endif
412
413         return 0;
414 }
415
416 static int gen_split_hash_key(struct caam_hash_ctx *ctx, const u8 *key_in,
417                               u32 keylen)
418 {
419         return gen_split_key(ctx->jrdev, ctx->key, ctx->split_key_len,
420                                ctx->split_key_pad_len, key_in, keylen,
421                                ctx->alg_op);
422 }
423
424 /* Digest hash size if it is too large */
425 static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in,
426                            u32 *keylen, u8 *key_out, u32 digestsize)
427 {
428         struct device *jrdev = ctx->jrdev;
429         u32 *desc;
430         struct split_key_result result;
431         dma_addr_t src_dma, dst_dma;
432         int ret = 0;
433
434         desc = kmalloc(CAAM_CMD_SZ * 8 + CAAM_PTR_SZ * 2, GFP_KERNEL | GFP_DMA);
435         if (!desc) {
436                 dev_err(jrdev, "unable to allocate key input memory\n");
437                 return -ENOMEM;
438         }
439
440         init_job_desc(desc, 0);
441
442         src_dma = dma_map_single(jrdev, (void *)key_in, *keylen,
443                                  DMA_TO_DEVICE);
444         if (dma_mapping_error(jrdev, src_dma)) {
445                 dev_err(jrdev, "unable to map key input memory\n");
446                 kfree(desc);
447                 return -ENOMEM;
448         }
449         dst_dma = dma_map_single(jrdev, (void *)key_out, digestsize,
450                                  DMA_FROM_DEVICE);
451         if (dma_mapping_error(jrdev, dst_dma)) {
452                 dev_err(jrdev, "unable to map key output memory\n");
453                 dma_unmap_single(jrdev, src_dma, *keylen, DMA_TO_DEVICE);
454                 kfree(desc);
455                 return -ENOMEM;
456         }
457
458         /* Job descriptor to perform unkeyed hash on key_in */
459         append_operation(desc, ctx->alg_type | OP_ALG_ENCRYPT |
460                          OP_ALG_AS_INITFINAL);
461         append_seq_in_ptr(desc, src_dma, *keylen, 0);
462         append_seq_fifo_load(desc, *keylen, FIFOLD_CLASS_CLASS2 |
463                              FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_MSG);
464         append_seq_out_ptr(desc, dst_dma, digestsize, 0);
465         append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
466                          LDST_SRCDST_BYTE_CONTEXT);
467
468 #ifdef DEBUG
469         print_hex_dump(KERN_ERR, "key_in@"__stringify(__LINE__)": ",
470                        DUMP_PREFIX_ADDRESS, 16, 4, key_in, *keylen, 1);
471         print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
472                        DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
473 #endif
474
475         result.err = 0;
476         init_completion(&result.completion);
477
478         ret = caam_jr_enqueue(jrdev, desc, split_key_done, &result);
479         if (!ret) {
480                 /* in progress */
481                 wait_for_completion_interruptible(&result.completion);
482                 ret = result.err;
483 #ifdef DEBUG
484                 print_hex_dump(KERN_ERR,
485                                "digested key@"__stringify(__LINE__)": ",
486                                DUMP_PREFIX_ADDRESS, 16, 4, key_in,
487                                digestsize, 1);
488 #endif
489         }
490         *keylen = digestsize;
491
492         dma_unmap_single(jrdev, src_dma, *keylen, DMA_TO_DEVICE);
493         dma_unmap_single(jrdev, dst_dma, digestsize, DMA_FROM_DEVICE);
494
495         kfree(desc);
496
497         return ret;
498 }
499
500 static int ahash_setkey(struct crypto_ahash *ahash,
501                         const u8 *key, unsigned int keylen)
502 {
503         /* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */
504         static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 };
505         struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
506         struct device *jrdev = ctx->jrdev;
507         int blocksize = crypto_tfm_alg_blocksize(&ahash->base);
508         int digestsize = crypto_ahash_digestsize(ahash);
509         int ret = 0;
510         u8 *hashed_key = NULL;
511
512 #ifdef DEBUG
513         printk(KERN_ERR "keylen %d\n", keylen);
514 #endif
515
516         if (keylen > blocksize) {
517                 hashed_key = kmalloc(sizeof(u8) * digestsize, GFP_KERNEL |
518                                      GFP_DMA);
519                 if (!hashed_key)
520                         return -ENOMEM;
521                 ret = hash_digest_key(ctx, key, &keylen, hashed_key,
522                                       digestsize);
523                 if (ret)
524                         goto badkey;
525                 key = hashed_key;
526         }
527
528         /* Pick class 2 key length from algorithm submask */
529         ctx->split_key_len = mdpadlen[(ctx->alg_op & OP_ALG_ALGSEL_SUBMASK) >>
530                                       OP_ALG_ALGSEL_SHIFT] * 2;
531         ctx->split_key_pad_len = ALIGN(ctx->split_key_len, 16);
532
533 #ifdef DEBUG
534         printk(KERN_ERR "split_key_len %d split_key_pad_len %d\n",
535                ctx->split_key_len, ctx->split_key_pad_len);
536         print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
537                        DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
538 #endif
539
540         ret = gen_split_hash_key(ctx, key, keylen);
541         if (ret)
542                 goto badkey;
543
544         ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->split_key_pad_len,
545                                       DMA_TO_DEVICE);
546         if (dma_mapping_error(jrdev, ctx->key_dma)) {
547                 dev_err(jrdev, "unable to map key i/o memory\n");
548                 ret = -ENOMEM;
549                 goto map_err;
550         }
551 #ifdef DEBUG
552         print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
553                        DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
554                        ctx->split_key_pad_len, 1);
555 #endif
556
557         ret = ahash_set_sh_desc(ahash);
558         if (ret) {
559                 dma_unmap_single(jrdev, ctx->key_dma, ctx->split_key_pad_len,
560                                  DMA_TO_DEVICE);
561         }
562
563 map_err:
564         kfree(hashed_key);
565         return ret;
566 badkey:
567         kfree(hashed_key);
568         crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN);
569         return -EINVAL;
570 }
571
572 /*
573  * ahash_edesc - s/w-extended ahash descriptor
574  * @dst_dma: physical mapped address of req->result
575  * @sec4_sg_dma: physical mapped address of h/w link table
576  * @chained: if source is chained
577  * @src_nents: number of segments in input scatterlist
578  * @sec4_sg_bytes: length of dma mapped sec4_sg space
579  * @sec4_sg: pointer to h/w link table
580  * @hw_desc: the h/w job descriptor followed by any referenced link tables
581  */
582 struct ahash_edesc {
583         dma_addr_t dst_dma;
584         dma_addr_t sec4_sg_dma;
585         bool chained;
586         int src_nents;
587         int sec4_sg_bytes;
588         struct sec4_sg_entry *sec4_sg;
589         u32 hw_desc[0];
590 };
591
592 static inline void ahash_unmap(struct device *dev,
593                         struct ahash_edesc *edesc,
594                         struct ahash_request *req, int dst_len)
595 {
596         if (edesc->src_nents)
597                 dma_unmap_sg_chained(dev, req->src, edesc->src_nents,
598                                      DMA_TO_DEVICE, edesc->chained);
599         if (edesc->dst_dma)
600                 dma_unmap_single(dev, edesc->dst_dma, dst_len, DMA_FROM_DEVICE);
601
602         if (edesc->sec4_sg_bytes)
603                 dma_unmap_single(dev, edesc->sec4_sg_dma,
604                                  edesc->sec4_sg_bytes, DMA_TO_DEVICE);
605 }
606
607 static inline void ahash_unmap_ctx(struct device *dev,
608                         struct ahash_edesc *edesc,
609                         struct ahash_request *req, int dst_len, u32 flag)
610 {
611         struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
612         struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
613         struct caam_hash_state *state = ahash_request_ctx(req);
614
615         if (state->ctx_dma)
616                 dma_unmap_single(dev, state->ctx_dma, ctx->ctx_len, flag);
617         ahash_unmap(dev, edesc, req, dst_len);
618 }
619
620 static void ahash_done(struct device *jrdev, u32 *desc, u32 err,
621                        void *context)
622 {
623         struct ahash_request *req = context;
624         struct ahash_edesc *edesc;
625         struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
626         int digestsize = crypto_ahash_digestsize(ahash);
627 #ifdef DEBUG
628         struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
629         struct caam_hash_state *state = ahash_request_ctx(req);
630
631         dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
632 #endif
633
634         edesc = (struct ahash_edesc *)((char *)desc -
635                  offsetof(struct ahash_edesc, hw_desc));
636         if (err)
637                 caam_jr_strstatus(jrdev, err);
638
639         ahash_unmap(jrdev, edesc, req, digestsize);
640         kfree(edesc);
641
642 #ifdef DEBUG
643         print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
644                        DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
645                        ctx->ctx_len, 1);
646         if (req->result)
647                 print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ",
648                                DUMP_PREFIX_ADDRESS, 16, 4, req->result,
649                                digestsize, 1);
650 #endif
651
652         req->base.complete(&req->base, err);
653 }
654
655 static void ahash_done_bi(struct device *jrdev, u32 *desc, u32 err,
656                             void *context)
657 {
658         struct ahash_request *req = context;
659         struct ahash_edesc *edesc;
660         struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
661         struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
662 #ifdef DEBUG
663         struct caam_hash_state *state = ahash_request_ctx(req);
664         int digestsize = crypto_ahash_digestsize(ahash);
665
666         dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
667 #endif
668
669         edesc = (struct ahash_edesc *)((char *)desc -
670                  offsetof(struct ahash_edesc, hw_desc));
671         if (err)
672                 caam_jr_strstatus(jrdev, err);
673
674         ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL);
675         kfree(edesc);
676
677 #ifdef DEBUG
678         print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
679                        DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
680                        ctx->ctx_len, 1);
681         if (req->result)
682                 print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ",
683                                DUMP_PREFIX_ADDRESS, 16, 4, req->result,
684                                digestsize, 1);
685 #endif
686
687         req->base.complete(&req->base, err);
688 }
689
690 static void ahash_done_ctx_src(struct device *jrdev, u32 *desc, u32 err,
691                                void *context)
692 {
693         struct ahash_request *req = context;
694         struct ahash_edesc *edesc;
695         struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
696         int digestsize = crypto_ahash_digestsize(ahash);
697 #ifdef DEBUG
698         struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
699         struct caam_hash_state *state = ahash_request_ctx(req);
700
701         dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
702 #endif
703
704         edesc = (struct ahash_edesc *)((char *)desc -
705                  offsetof(struct ahash_edesc, hw_desc));
706         if (err)
707                 caam_jr_strstatus(jrdev, err);
708
709         ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
710         kfree(edesc);
711
712 #ifdef DEBUG
713         print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
714                        DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
715                        ctx->ctx_len, 1);
716         if (req->result)
717                 print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ",
718                                DUMP_PREFIX_ADDRESS, 16, 4, req->result,
719                                digestsize, 1);
720 #endif
721
722         req->base.complete(&req->base, err);
723 }
724
725 static void ahash_done_ctx_dst(struct device *jrdev, u32 *desc, u32 err,
726                                void *context)
727 {
728         struct ahash_request *req = context;
729         struct ahash_edesc *edesc;
730         struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
731         struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
732 #ifdef DEBUG
733         struct caam_hash_state *state = ahash_request_ctx(req);
734         int digestsize = crypto_ahash_digestsize(ahash);
735
736         dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
737 #endif
738
739         edesc = (struct ahash_edesc *)((char *)desc -
740                  offsetof(struct ahash_edesc, hw_desc));
741         if (err)
742                 caam_jr_strstatus(jrdev, err);
743
744         ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE);
745         kfree(edesc);
746
747 #ifdef DEBUG
748         print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
749                        DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
750                        ctx->ctx_len, 1);
751         if (req->result)
752                 print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ",
753                                DUMP_PREFIX_ADDRESS, 16, 4, req->result,
754                                digestsize, 1);
755 #endif
756
757         req->base.complete(&req->base, err);
758 }
759
760 /* submit update job descriptor */
761 static int ahash_update_ctx(struct ahash_request *req)
762 {
763         struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
764         struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
765         struct caam_hash_state *state = ahash_request_ctx(req);
766         struct device *jrdev = ctx->jrdev;
767         gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
768                        CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
769         u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
770         int *buflen = state->current_buf ? &state->buflen_1 : &state->buflen_0;
771         u8 *next_buf = state->current_buf ? state->buf_0 : state->buf_1;
772         int *next_buflen = state->current_buf ? &state->buflen_0 :
773                            &state->buflen_1, last_buflen;
774         int in_len = *buflen + req->nbytes, to_hash;
775         u32 *sh_desc = ctx->sh_desc_update, *desc;
776         dma_addr_t ptr = ctx->sh_desc_update_dma;
777         int src_nents, sec4_sg_bytes, sec4_sg_src_index;
778         struct ahash_edesc *edesc;
779         bool chained = false;
780         int ret = 0;
781         int sh_len;
782
783         last_buflen = *next_buflen;
784         *next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1);
785         to_hash = in_len - *next_buflen;
786
787         if (to_hash) {
788                 src_nents = __sg_count(req->src, req->nbytes - (*next_buflen),
789                                        &chained);
790                 sec4_sg_src_index = 1 + (*buflen ? 1 : 0);
791                 sec4_sg_bytes = (sec4_sg_src_index + src_nents) *
792                                  sizeof(struct sec4_sg_entry);
793
794                 /*
795                  * allocate space for base edesc and hw desc commands,
796                  * link tables
797                  */
798                 edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
799                                 sec4_sg_bytes, GFP_DMA | flags);
800                 if (!edesc) {
801                         dev_err(jrdev,
802                                 "could not allocate extended descriptor\n");
803                         return -ENOMEM;
804                 }
805
806                 edesc->src_nents = src_nents;
807                 edesc->chained = chained;
808                 edesc->sec4_sg_bytes = sec4_sg_bytes;
809                 edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
810                                  DESC_JOB_IO_LEN;
811                 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
812                                                      sec4_sg_bytes,
813                                                      DMA_TO_DEVICE);
814
815                 ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len,
816                                    edesc->sec4_sg, DMA_BIDIRECTIONAL);
817
818                 state->buf_dma = try_buf_map_to_sec4_sg(jrdev,
819                                                         edesc->sec4_sg + 1,
820                                                         buf, state->buf_dma,
821                                                         *buflen, last_buflen);
822
823                 if (src_nents) {
824                         src_map_to_sec4_sg(jrdev, req->src, src_nents,
825                                            edesc->sec4_sg + sec4_sg_src_index,
826                                            chained);
827                         if (*next_buflen) {
828                                 sg_copy_part(next_buf, req->src, to_hash -
829                                              *buflen, req->nbytes);
830                                 state->current_buf = !state->current_buf;
831                         }
832                 } else {
833                         (edesc->sec4_sg + sec4_sg_src_index - 1)->len |=
834                                                         SEC4_SG_LEN_FIN;
835                 }
836
837                 sh_len = desc_len(sh_desc);
838                 desc = edesc->hw_desc;
839                 init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER |
840                                      HDR_REVERSE);
841
842                 append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len +
843                                        to_hash, LDST_SGF);
844
845                 append_seq_out_ptr(desc, state->ctx_dma, ctx->ctx_len, 0);
846
847 #ifdef DEBUG
848                 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
849                                DUMP_PREFIX_ADDRESS, 16, 4, desc,
850                                desc_bytes(desc), 1);
851 #endif
852
853                 ret = caam_jr_enqueue(jrdev, desc, ahash_done_bi, req);
854                 if (!ret) {
855                         ret = -EINPROGRESS;
856                 } else {
857                         ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len,
858                                            DMA_BIDIRECTIONAL);
859                         kfree(edesc);
860                 }
861         } else if (*next_buflen) {
862                 sg_copy(buf + *buflen, req->src, req->nbytes);
863                 *buflen = *next_buflen;
864                 *next_buflen = last_buflen;
865         }
866 #ifdef DEBUG
867         print_hex_dump(KERN_ERR, "buf@"__stringify(__LINE__)": ",
868                        DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
869         print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ",
870                        DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
871                        *next_buflen, 1);
872 #endif
873
874         return ret;
875 }
876
877 static int ahash_final_ctx(struct ahash_request *req)
878 {
879         struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
880         struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
881         struct caam_hash_state *state = ahash_request_ctx(req);
882         struct device *jrdev = ctx->jrdev;
883         gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
884                        CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
885         u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
886         int buflen = state->current_buf ? state->buflen_1 : state->buflen_0;
887         int last_buflen = state->current_buf ? state->buflen_0 :
888                           state->buflen_1;
889         u32 *sh_desc = ctx->sh_desc_fin, *desc;
890         dma_addr_t ptr = ctx->sh_desc_fin_dma;
891         int sec4_sg_bytes;
892         int digestsize = crypto_ahash_digestsize(ahash);
893         struct ahash_edesc *edesc;
894         int ret = 0;
895         int sh_len;
896
897         sec4_sg_bytes = (1 + (buflen ? 1 : 0)) * sizeof(struct sec4_sg_entry);
898
899         /* allocate space for base edesc and hw desc commands, link tables */
900         edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
901                         sec4_sg_bytes, GFP_DMA | flags);
902         if (!edesc) {
903                 dev_err(jrdev, "could not allocate extended descriptor\n");
904                 return -ENOMEM;
905         }
906
907         sh_len = desc_len(sh_desc);
908         desc = edesc->hw_desc;
909         init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE);
910
911         edesc->sec4_sg_bytes = sec4_sg_bytes;
912         edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
913                          DESC_JOB_IO_LEN;
914         edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
915                                             sec4_sg_bytes, DMA_TO_DEVICE);
916         edesc->src_nents = 0;
917
918         ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len, edesc->sec4_sg,
919                            DMA_TO_DEVICE);
920
921         state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1,
922                                                 buf, state->buf_dma, buflen,
923                                                 last_buflen);
924         (edesc->sec4_sg + sec4_sg_bytes - 1)->len |= SEC4_SG_LEN_FIN;
925
926         append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len + buflen,
927                           LDST_SGF);
928
929         edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
930                                                 digestsize);
931
932 #ifdef DEBUG
933         print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
934                        DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
935 #endif
936
937         ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_src, req);
938         if (!ret) {
939                 ret = -EINPROGRESS;
940         } else {
941                 ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
942                 kfree(edesc);
943         }
944
945         return ret;
946 }
947
948 static int ahash_finup_ctx(struct ahash_request *req)
949 {
950         struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
951         struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
952         struct caam_hash_state *state = ahash_request_ctx(req);
953         struct device *jrdev = ctx->jrdev;
954         gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
955                        CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
956         u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
957         int buflen = state->current_buf ? state->buflen_1 : state->buflen_0;
958         int last_buflen = state->current_buf ? state->buflen_0 :
959                           state->buflen_1;
960         u32 *sh_desc = ctx->sh_desc_finup, *desc;
961         dma_addr_t ptr = ctx->sh_desc_finup_dma;
962         int sec4_sg_bytes, sec4_sg_src_index;
963         int src_nents;
964         int digestsize = crypto_ahash_digestsize(ahash);
965         struct ahash_edesc *edesc;
966         bool chained = false;
967         int ret = 0;
968         int sh_len;
969
970         src_nents = __sg_count(req->src, req->nbytes, &chained);
971         sec4_sg_src_index = 1 + (buflen ? 1 : 0);
972         sec4_sg_bytes = (sec4_sg_src_index + src_nents) *
973                          sizeof(struct sec4_sg_entry);
974
975         /* allocate space for base edesc and hw desc commands, link tables */
976         edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
977                         sec4_sg_bytes, GFP_DMA | flags);
978         if (!edesc) {
979                 dev_err(jrdev, "could not allocate extended descriptor\n");
980                 return -ENOMEM;
981         }
982
983         sh_len = desc_len(sh_desc);
984         desc = edesc->hw_desc;
985         init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE);
986
987         edesc->src_nents = src_nents;
988         edesc->chained = chained;
989         edesc->sec4_sg_bytes = sec4_sg_bytes;
990         edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
991                          DESC_JOB_IO_LEN;
992         edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
993                                             sec4_sg_bytes, DMA_TO_DEVICE);
994
995         ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len, edesc->sec4_sg,
996                            DMA_TO_DEVICE);
997
998         state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1,
999                                                 buf, state->buf_dma, buflen,
1000                                                 last_buflen);
1001
1002         src_map_to_sec4_sg(jrdev, req->src, src_nents, edesc->sec4_sg +
1003                            sec4_sg_src_index, chained);
1004
1005         append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len +
1006                                buflen + req->nbytes, LDST_SGF);
1007
1008         edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
1009                                                 digestsize);
1010
1011 #ifdef DEBUG
1012         print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
1013                        DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
1014 #endif
1015
1016         ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_src, req);
1017         if (!ret) {
1018                 ret = -EINPROGRESS;
1019         } else {
1020                 ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
1021                 kfree(edesc);
1022         }
1023
1024         return ret;
1025 }
1026
1027 static int ahash_digest(struct ahash_request *req)
1028 {
1029         struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1030         struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1031         struct device *jrdev = ctx->jrdev;
1032         gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1033                        CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1034         u32 *sh_desc = ctx->sh_desc_digest, *desc;
1035         dma_addr_t ptr = ctx->sh_desc_digest_dma;
1036         int digestsize = crypto_ahash_digestsize(ahash);
1037         int src_nents, sec4_sg_bytes;
1038         dma_addr_t src_dma;
1039         struct ahash_edesc *edesc;
1040         bool chained = false;
1041         int ret = 0;
1042         u32 options;
1043         int sh_len;
1044
1045         src_nents = sg_count(req->src, req->nbytes, &chained);
1046         dma_map_sg_chained(jrdev, req->src, src_nents ? : 1, DMA_TO_DEVICE,
1047                            chained);
1048         sec4_sg_bytes = src_nents * sizeof(struct sec4_sg_entry);
1049
1050         /* allocate space for base edesc and hw desc commands, link tables */
1051         edesc = kmalloc(sizeof(struct ahash_edesc) + sec4_sg_bytes +
1052                         DESC_JOB_IO_LEN, GFP_DMA | flags);
1053         if (!edesc) {
1054                 dev_err(jrdev, "could not allocate extended descriptor\n");
1055                 return -ENOMEM;
1056         }
1057         edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
1058                           DESC_JOB_IO_LEN;
1059         edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1060                                             sec4_sg_bytes, DMA_TO_DEVICE);
1061         edesc->src_nents = src_nents;
1062         edesc->chained = chained;
1063
1064         sh_len = desc_len(sh_desc);
1065         desc = edesc->hw_desc;
1066         init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE);
1067
1068         if (src_nents) {
1069                 sg_to_sec4_sg_last(req->src, src_nents, edesc->sec4_sg, 0);
1070                 src_dma = edesc->sec4_sg_dma;
1071                 options = LDST_SGF;
1072         } else {
1073                 src_dma = sg_dma_address(req->src);
1074                 options = 0;
1075         }
1076         append_seq_in_ptr(desc, src_dma, req->nbytes, options);
1077
1078         edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
1079                                                 digestsize);
1080
1081 #ifdef DEBUG
1082         print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
1083                        DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
1084 #endif
1085
1086         ret = caam_jr_enqueue(jrdev, desc, ahash_done, req);
1087         if (!ret) {
1088                 ret = -EINPROGRESS;
1089         } else {
1090                 ahash_unmap(jrdev, edesc, req, digestsize);
1091                 kfree(edesc);
1092         }
1093
1094         return ret;
1095 }
1096
1097 /* submit ahash final if it the first job descriptor */
1098 static int ahash_final_no_ctx(struct ahash_request *req)
1099 {
1100         struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1101         struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1102         struct caam_hash_state *state = ahash_request_ctx(req);
1103         struct device *jrdev = ctx->jrdev;
1104         gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1105                        CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1106         u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
1107         int buflen = state->current_buf ? state->buflen_1 : state->buflen_0;
1108         u32 *sh_desc = ctx->sh_desc_digest, *desc;
1109         dma_addr_t ptr = ctx->sh_desc_digest_dma;
1110         int digestsize = crypto_ahash_digestsize(ahash);
1111         struct ahash_edesc *edesc;
1112         int ret = 0;
1113         int sh_len;
1114
1115         /* allocate space for base edesc and hw desc commands, link tables */
1116         edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN,
1117                         GFP_DMA | flags);
1118         if (!edesc) {
1119                 dev_err(jrdev, "could not allocate extended descriptor\n");
1120                 return -ENOMEM;
1121         }
1122
1123         sh_len = desc_len(sh_desc);
1124         desc = edesc->hw_desc;
1125         init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE);
1126
1127         state->buf_dma = dma_map_single(jrdev, buf, buflen, DMA_TO_DEVICE);
1128
1129         append_seq_in_ptr(desc, state->buf_dma, buflen, 0);
1130
1131         edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
1132                                                 digestsize);
1133         edesc->src_nents = 0;
1134
1135 #ifdef DEBUG
1136         print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
1137                        DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
1138 #endif
1139
1140         ret = caam_jr_enqueue(jrdev, desc, ahash_done, req);
1141         if (!ret) {
1142                 ret = -EINPROGRESS;
1143         } else {
1144                 ahash_unmap(jrdev, edesc, req, digestsize);
1145                 kfree(edesc);
1146         }
1147
1148         return ret;
1149 }
1150
1151 /* submit ahash update if it the first job descriptor after update */
1152 static int ahash_update_no_ctx(struct ahash_request *req)
1153 {
1154         struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1155         struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1156         struct caam_hash_state *state = ahash_request_ctx(req);
1157         struct device *jrdev = ctx->jrdev;
1158         gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1159                        CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1160         u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
1161         int *buflen = state->current_buf ? &state->buflen_1 : &state->buflen_0;
1162         u8 *next_buf = state->current_buf ? state->buf_0 : state->buf_1;
1163         int *next_buflen = state->current_buf ? &state->buflen_0 :
1164                            &state->buflen_1;
1165         int in_len = *buflen + req->nbytes, to_hash;
1166         int sec4_sg_bytes, src_nents;
1167         struct ahash_edesc *edesc;
1168         u32 *desc, *sh_desc = ctx->sh_desc_update_first;
1169         dma_addr_t ptr = ctx->sh_desc_update_first_dma;
1170         bool chained = false;
1171         int ret = 0;
1172         int sh_len;
1173
1174         *next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1);
1175         to_hash = in_len - *next_buflen;
1176
1177         if (to_hash) {
1178                 src_nents = __sg_count(req->src, req->nbytes - (*next_buflen),
1179                                        &chained);
1180                 sec4_sg_bytes = (1 + src_nents) *
1181                                 sizeof(struct sec4_sg_entry);
1182
1183                 /*
1184                  * allocate space for base edesc and hw desc commands,
1185                  * link tables
1186                  */
1187                 edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
1188                                 sec4_sg_bytes, GFP_DMA | flags);
1189                 if (!edesc) {
1190                         dev_err(jrdev,
1191                                 "could not allocate extended descriptor\n");
1192                         return -ENOMEM;
1193                 }
1194
1195                 edesc->src_nents = src_nents;
1196                 edesc->chained = chained;
1197                 edesc->sec4_sg_bytes = sec4_sg_bytes;
1198                 edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
1199                                  DESC_JOB_IO_LEN;
1200                 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1201                                                     sec4_sg_bytes,
1202                                                     DMA_TO_DEVICE);
1203
1204                 state->buf_dma = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg,
1205                                                     buf, *buflen);
1206                 src_map_to_sec4_sg(jrdev, req->src, src_nents,
1207                                    edesc->sec4_sg + 1, chained);
1208                 if (*next_buflen) {
1209                         sg_copy_part(next_buf, req->src, to_hash - *buflen,
1210                                     req->nbytes);
1211                         state->current_buf = !state->current_buf;
1212                 }
1213
1214                 sh_len = desc_len(sh_desc);
1215                 desc = edesc->hw_desc;
1216                 init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER |
1217                                      HDR_REVERSE);
1218
1219                 append_seq_in_ptr(desc, edesc->sec4_sg_dma, to_hash, LDST_SGF);
1220
1221                 map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len);
1222
1223 #ifdef DEBUG
1224                 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
1225                                DUMP_PREFIX_ADDRESS, 16, 4, desc,
1226                                desc_bytes(desc), 1);
1227 #endif
1228
1229                 ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_dst, req);
1230                 if (!ret) {
1231                         ret = -EINPROGRESS;
1232                         state->update = ahash_update_ctx;
1233                         state->finup = ahash_finup_ctx;
1234                         state->final = ahash_final_ctx;
1235                 } else {
1236                         ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len,
1237                                         DMA_TO_DEVICE);
1238                         kfree(edesc);
1239                 }
1240         } else if (*next_buflen) {
1241                 sg_copy(buf + *buflen, req->src, req->nbytes);
1242                 *buflen = *next_buflen;
1243                 *next_buflen = 0;
1244         }
1245 #ifdef DEBUG
1246         print_hex_dump(KERN_ERR, "buf@"__stringify(__LINE__)": ",
1247                        DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
1248         print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ",
1249                        DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
1250                        *next_buflen, 1);
1251 #endif
1252
1253         return ret;
1254 }
1255
1256 /* submit ahash finup if it the first job descriptor after update */
1257 static int ahash_finup_no_ctx(struct ahash_request *req)
1258 {
1259         struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1260         struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1261         struct caam_hash_state *state = ahash_request_ctx(req);
1262         struct device *jrdev = ctx->jrdev;
1263         gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1264                        CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1265         u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
1266         int buflen = state->current_buf ? state->buflen_1 : state->buflen_0;
1267         int last_buflen = state->current_buf ? state->buflen_0 :
1268                           state->buflen_1;
1269         u32 *sh_desc = ctx->sh_desc_digest, *desc;
1270         dma_addr_t ptr = ctx->sh_desc_digest_dma;
1271         int sec4_sg_bytes, sec4_sg_src_index, src_nents;
1272         int digestsize = crypto_ahash_digestsize(ahash);
1273         struct ahash_edesc *edesc;
1274         bool chained = false;
1275         int sh_len;
1276         int ret = 0;
1277
1278         src_nents = __sg_count(req->src, req->nbytes, &chained);
1279         sec4_sg_src_index = 2;
1280         sec4_sg_bytes = (sec4_sg_src_index + src_nents) *
1281                          sizeof(struct sec4_sg_entry);
1282
1283         /* allocate space for base edesc and hw desc commands, link tables */
1284         edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
1285                         sec4_sg_bytes, GFP_DMA | flags);
1286         if (!edesc) {
1287                 dev_err(jrdev, "could not allocate extended descriptor\n");
1288                 return -ENOMEM;
1289         }
1290
1291         sh_len = desc_len(sh_desc);
1292         desc = edesc->hw_desc;
1293         init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE);
1294
1295         edesc->src_nents = src_nents;
1296         edesc->chained = chained;
1297         edesc->sec4_sg_bytes = sec4_sg_bytes;
1298         edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
1299                          DESC_JOB_IO_LEN;
1300         edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1301                                             sec4_sg_bytes, DMA_TO_DEVICE);
1302
1303         state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, buf,
1304                                                 state->buf_dma, buflen,
1305                                                 last_buflen);
1306
1307         src_map_to_sec4_sg(jrdev, req->src, src_nents, edesc->sec4_sg + 1,
1308                            chained);
1309
1310         append_seq_in_ptr(desc, edesc->sec4_sg_dma, buflen +
1311                                req->nbytes, LDST_SGF);
1312
1313         edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
1314                                                 digestsize);
1315
1316 #ifdef DEBUG
1317         print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
1318                        DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
1319 #endif
1320
1321         ret = caam_jr_enqueue(jrdev, desc, ahash_done, req);
1322         if (!ret) {
1323                 ret = -EINPROGRESS;
1324         } else {
1325                 ahash_unmap(jrdev, edesc, req, digestsize);
1326                 kfree(edesc);
1327         }
1328
1329         return ret;
1330 }
1331
1332 /* submit first update job descriptor after init */
1333 static int ahash_update_first(struct ahash_request *req)
1334 {
1335         struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1336         struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1337         struct caam_hash_state *state = ahash_request_ctx(req);
1338         struct device *jrdev = ctx->jrdev;
1339         gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1340                        CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1341         u8 *next_buf = state->buf_0 + state->current_buf *
1342                        CAAM_MAX_HASH_BLOCK_SIZE;
1343         int *next_buflen = &state->buflen_0 + state->current_buf;
1344         int to_hash;
1345         u32 *sh_desc = ctx->sh_desc_update_first, *desc;
1346         dma_addr_t ptr = ctx->sh_desc_update_first_dma;
1347         int sec4_sg_bytes, src_nents;
1348         dma_addr_t src_dma;
1349         u32 options;
1350         struct ahash_edesc *edesc;
1351         bool chained = false;
1352         int ret = 0;
1353         int sh_len;
1354
1355         *next_buflen = req->nbytes & (crypto_tfm_alg_blocksize(&ahash->base) -
1356                                       1);
1357         to_hash = req->nbytes - *next_buflen;
1358
1359         if (to_hash) {
1360                 src_nents = sg_count(req->src, req->nbytes - (*next_buflen),
1361                                      &chained);
1362                 dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
1363                                    DMA_TO_DEVICE, chained);
1364                 sec4_sg_bytes = src_nents * sizeof(struct sec4_sg_entry);
1365
1366                 /*
1367                  * allocate space for base edesc and hw desc commands,
1368                  * link tables
1369                  */
1370                 edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
1371                                 sec4_sg_bytes, GFP_DMA | flags);
1372                 if (!edesc) {
1373                         dev_err(jrdev,
1374                                 "could not allocate extended descriptor\n");
1375                         return -ENOMEM;
1376                 }
1377
1378                 edesc->src_nents = src_nents;
1379                 edesc->chained = chained;
1380                 edesc->sec4_sg_bytes = sec4_sg_bytes;
1381                 edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
1382                                  DESC_JOB_IO_LEN;
1383                 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1384                                                     sec4_sg_bytes,
1385                                                     DMA_TO_DEVICE);
1386
1387                 if (src_nents) {
1388                         sg_to_sec4_sg_last(req->src, src_nents,
1389                                            edesc->sec4_sg, 0);
1390                         src_dma = edesc->sec4_sg_dma;
1391                         options = LDST_SGF;
1392                 } else {
1393                         src_dma = sg_dma_address(req->src);
1394                         options = 0;
1395                 }
1396
1397                 if (*next_buflen)
1398                         sg_copy_part(next_buf, req->src, to_hash, req->nbytes);
1399
1400                 sh_len = desc_len(sh_desc);
1401                 desc = edesc->hw_desc;
1402                 init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER |
1403                                      HDR_REVERSE);
1404
1405                 append_seq_in_ptr(desc, src_dma, to_hash, options);
1406
1407                 map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len);
1408
1409 #ifdef DEBUG
1410                 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
1411                                DUMP_PREFIX_ADDRESS, 16, 4, desc,
1412                                desc_bytes(desc), 1);
1413 #endif
1414
1415                 ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_dst,
1416                                       req);
1417                 if (!ret) {
1418                         ret = -EINPROGRESS;
1419                         state->update = ahash_update_ctx;
1420                         state->finup = ahash_finup_ctx;
1421                         state->final = ahash_final_ctx;
1422                 } else {
1423                         ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len,
1424                                         DMA_TO_DEVICE);
1425                         kfree(edesc);
1426                 }
1427         } else if (*next_buflen) {
1428                 state->update = ahash_update_no_ctx;
1429                 state->finup = ahash_finup_no_ctx;
1430                 state->final = ahash_final_no_ctx;
1431                 sg_copy(next_buf, req->src, req->nbytes);
1432         }
1433 #ifdef DEBUG
1434         print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ",
1435                        DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
1436                        *next_buflen, 1);
1437 #endif
1438
1439         return ret;
1440 }
1441
1442 static int ahash_finup_first(struct ahash_request *req)
1443 {
1444         return ahash_digest(req);
1445 }
1446
1447 static int ahash_init(struct ahash_request *req)
1448 {
1449         struct caam_hash_state *state = ahash_request_ctx(req);
1450
1451         state->update = ahash_update_first;
1452         state->finup = ahash_finup_first;
1453         state->final = ahash_final_no_ctx;
1454
1455         state->current_buf = 0;
1456
1457         return 0;
1458 }
1459
1460 static int ahash_update(struct ahash_request *req)
1461 {
1462         struct caam_hash_state *state = ahash_request_ctx(req);
1463
1464         return state->update(req);
1465 }
1466
1467 static int ahash_finup(struct ahash_request *req)
1468 {
1469         struct caam_hash_state *state = ahash_request_ctx(req);
1470
1471         return state->finup(req);
1472 }
1473
1474 static int ahash_final(struct ahash_request *req)
1475 {
1476         struct caam_hash_state *state = ahash_request_ctx(req);
1477
1478         return state->final(req);
1479 }
1480
1481 static int ahash_export(struct ahash_request *req, void *out)
1482 {
1483         struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1484         struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1485         struct caam_hash_state *state = ahash_request_ctx(req);
1486
1487         memcpy(out, ctx, sizeof(struct caam_hash_ctx));
1488         memcpy(out + sizeof(struct caam_hash_ctx), state,
1489                sizeof(struct caam_hash_state));
1490         return 0;
1491 }
1492
1493 static int ahash_import(struct ahash_request *req, const void *in)
1494 {
1495         struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1496         struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1497         struct caam_hash_state *state = ahash_request_ctx(req);
1498
1499         memcpy(ctx, in, sizeof(struct caam_hash_ctx));
1500         memcpy(state, in + sizeof(struct caam_hash_ctx),
1501                sizeof(struct caam_hash_state));
1502         return 0;
1503 }
1504
1505 struct caam_hash_template {
1506         char name[CRYPTO_MAX_ALG_NAME];
1507         char driver_name[CRYPTO_MAX_ALG_NAME];
1508         char hmac_name[CRYPTO_MAX_ALG_NAME];
1509         char hmac_driver_name[CRYPTO_MAX_ALG_NAME];
1510         unsigned int blocksize;
1511         struct ahash_alg template_ahash;
1512         u32 alg_type;
1513         u32 alg_op;
1514 };
1515
1516 /* ahash descriptors */
1517 static struct caam_hash_template driver_hash[] = {
1518         {
1519                 .name = "sha1",
1520                 .driver_name = "sha1-caam",
1521                 .hmac_name = "hmac(sha1)",
1522                 .hmac_driver_name = "hmac-sha1-caam",
1523                 .blocksize = SHA1_BLOCK_SIZE,
1524                 .template_ahash = {
1525                         .init = ahash_init,
1526                         .update = ahash_update,
1527                         .final = ahash_final,
1528                         .finup = ahash_finup,
1529                         .digest = ahash_digest,
1530                         .export = ahash_export,
1531                         .import = ahash_import,
1532                         .setkey = ahash_setkey,
1533                         .halg = {
1534                                 .digestsize = SHA1_DIGEST_SIZE,
1535                                 },
1536                         },
1537                 .alg_type = OP_ALG_ALGSEL_SHA1,
1538                 .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
1539         }, {
1540                 .name = "sha224",
1541                 .driver_name = "sha224-caam",
1542                 .hmac_name = "hmac(sha224)",
1543                 .hmac_driver_name = "hmac-sha224-caam",
1544                 .blocksize = SHA224_BLOCK_SIZE,
1545                 .template_ahash = {
1546                         .init = ahash_init,
1547                         .update = ahash_update,
1548                         .final = ahash_final,
1549                         .finup = ahash_finup,
1550                         .digest = ahash_digest,
1551                         .export = ahash_export,
1552                         .import = ahash_import,
1553                         .setkey = ahash_setkey,
1554                         .halg = {
1555                                 .digestsize = SHA224_DIGEST_SIZE,
1556                                 },
1557                         },
1558                 .alg_type = OP_ALG_ALGSEL_SHA224,
1559                 .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
1560         }, {
1561                 .name = "sha256",
1562                 .driver_name = "sha256-caam",
1563                 .hmac_name = "hmac(sha256)",
1564                 .hmac_driver_name = "hmac-sha256-caam",
1565                 .blocksize = SHA256_BLOCK_SIZE,
1566                 .template_ahash = {
1567                         .init = ahash_init,
1568                         .update = ahash_update,
1569                         .final = ahash_final,
1570                         .finup = ahash_finup,
1571                         .digest = ahash_digest,
1572                         .export = ahash_export,
1573                         .import = ahash_import,
1574                         .setkey = ahash_setkey,
1575                         .halg = {
1576                                 .digestsize = SHA256_DIGEST_SIZE,
1577                                 },
1578                         },
1579                 .alg_type = OP_ALG_ALGSEL_SHA256,
1580                 .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
1581         }, {
1582                 .name = "sha384",
1583                 .driver_name = "sha384-caam",
1584                 .hmac_name = "hmac(sha384)",
1585                 .hmac_driver_name = "hmac-sha384-caam",
1586                 .blocksize = SHA384_BLOCK_SIZE,
1587                 .template_ahash = {
1588                         .init = ahash_init,
1589                         .update = ahash_update,
1590                         .final = ahash_final,
1591                         .finup = ahash_finup,
1592                         .digest = ahash_digest,
1593                         .export = ahash_export,
1594                         .import = ahash_import,
1595                         .setkey = ahash_setkey,
1596                         .halg = {
1597                                 .digestsize = SHA384_DIGEST_SIZE,
1598                                 },
1599                         },
1600                 .alg_type = OP_ALG_ALGSEL_SHA384,
1601                 .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
1602         }, {
1603                 .name = "sha512",
1604                 .driver_name = "sha512-caam",
1605                 .hmac_name = "hmac(sha512)",
1606                 .hmac_driver_name = "hmac-sha512-caam",
1607                 .blocksize = SHA512_BLOCK_SIZE,
1608                 .template_ahash = {
1609                         .init = ahash_init,
1610                         .update = ahash_update,
1611                         .final = ahash_final,
1612                         .finup = ahash_finup,
1613                         .digest = ahash_digest,
1614                         .export = ahash_export,
1615                         .import = ahash_import,
1616                         .setkey = ahash_setkey,
1617                         .halg = {
1618                                 .digestsize = SHA512_DIGEST_SIZE,
1619                                 },
1620                         },
1621                 .alg_type = OP_ALG_ALGSEL_SHA512,
1622                 .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
1623         }, {
1624                 .name = "md5",
1625                 .driver_name = "md5-caam",
1626                 .hmac_name = "hmac(md5)",
1627                 .hmac_driver_name = "hmac-md5-caam",
1628                 .blocksize = MD5_BLOCK_WORDS * 4,
1629                 .template_ahash = {
1630                         .init = ahash_init,
1631                         .update = ahash_update,
1632                         .final = ahash_final,
1633                         .finup = ahash_finup,
1634                         .digest = ahash_digest,
1635                         .export = ahash_export,
1636                         .import = ahash_import,
1637                         .setkey = ahash_setkey,
1638                         .halg = {
1639                                 .digestsize = MD5_DIGEST_SIZE,
1640                                 },
1641                         },
1642                 .alg_type = OP_ALG_ALGSEL_MD5,
1643                 .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
1644         },
1645 };
1646
1647 struct caam_hash_alg {
1648         struct list_head entry;
1649         int alg_type;
1650         int alg_op;
1651         struct ahash_alg ahash_alg;
1652 };
1653
1654 static int caam_hash_cra_init(struct crypto_tfm *tfm)
1655 {
1656         struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
1657         struct crypto_alg *base = tfm->__crt_alg;
1658         struct hash_alg_common *halg =
1659                  container_of(base, struct hash_alg_common, base);
1660         struct ahash_alg *alg =
1661                  container_of(halg, struct ahash_alg, halg);
1662         struct caam_hash_alg *caam_hash =
1663                  container_of(alg, struct caam_hash_alg, ahash_alg);
1664         struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
1665         /* Sizes for MDHA running digests: MD5, SHA1, 224, 256, 384, 512 */
1666         static const u8 runninglen[] = { HASH_MSG_LEN + MD5_DIGEST_SIZE,
1667                                          HASH_MSG_LEN + SHA1_DIGEST_SIZE,
1668                                          HASH_MSG_LEN + 32,
1669                                          HASH_MSG_LEN + SHA256_DIGEST_SIZE,
1670                                          HASH_MSG_LEN + 64,
1671                                          HASH_MSG_LEN + SHA512_DIGEST_SIZE };
1672         int ret = 0;
1673
1674         /*
1675          * Get a Job ring from Job Ring driver to ensure in-order
1676          * crypto request processing per tfm
1677          */
1678         ctx->jrdev = caam_jr_alloc();
1679         if (IS_ERR(ctx->jrdev)) {
1680                 pr_err("Job Ring Device allocation for transform failed\n");
1681                 return PTR_ERR(ctx->jrdev);
1682         }
1683         /* copy descriptor header template value */
1684         ctx->alg_type = OP_TYPE_CLASS2_ALG | caam_hash->alg_type;
1685         ctx->alg_op = OP_TYPE_CLASS2_ALG | caam_hash->alg_op;
1686
1687         ctx->ctx_len = runninglen[(ctx->alg_op & OP_ALG_ALGSEL_SUBMASK) >>
1688                                   OP_ALG_ALGSEL_SHIFT];
1689
1690         crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1691                                  sizeof(struct caam_hash_state));
1692
1693         ret = ahash_set_sh_desc(ahash);
1694
1695         return ret;
1696 }
1697
1698 static void caam_hash_cra_exit(struct crypto_tfm *tfm)
1699 {
1700         struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
1701
1702         if (ctx->sh_desc_update_dma &&
1703             !dma_mapping_error(ctx->jrdev, ctx->sh_desc_update_dma))
1704                 dma_unmap_single(ctx->jrdev, ctx->sh_desc_update_dma,
1705                                  desc_bytes(ctx->sh_desc_update),
1706                                  DMA_TO_DEVICE);
1707         if (ctx->sh_desc_update_first_dma &&
1708             !dma_mapping_error(ctx->jrdev, ctx->sh_desc_update_first_dma))
1709                 dma_unmap_single(ctx->jrdev, ctx->sh_desc_update_first_dma,
1710                                  desc_bytes(ctx->sh_desc_update_first),
1711                                  DMA_TO_DEVICE);
1712         if (ctx->sh_desc_fin_dma &&
1713             !dma_mapping_error(ctx->jrdev, ctx->sh_desc_fin_dma))
1714                 dma_unmap_single(ctx->jrdev, ctx->sh_desc_fin_dma,
1715                                  desc_bytes(ctx->sh_desc_fin), DMA_TO_DEVICE);
1716         if (ctx->sh_desc_digest_dma &&
1717             !dma_mapping_error(ctx->jrdev, ctx->sh_desc_digest_dma))
1718                 dma_unmap_single(ctx->jrdev, ctx->sh_desc_digest_dma,
1719                                  desc_bytes(ctx->sh_desc_digest),
1720                                  DMA_TO_DEVICE);
1721         if (ctx->sh_desc_finup_dma &&
1722             !dma_mapping_error(ctx->jrdev, ctx->sh_desc_finup_dma))
1723                 dma_unmap_single(ctx->jrdev, ctx->sh_desc_finup_dma,
1724                                  desc_bytes(ctx->sh_desc_finup), DMA_TO_DEVICE);
1725
1726         caam_jr_free(ctx->jrdev);
1727 }
1728
1729 static void __exit caam_algapi_hash_exit(void)
1730 {
1731         struct caam_hash_alg *t_alg, *n;
1732
1733         if (!hash_list.next)
1734                 return;
1735
1736         list_for_each_entry_safe(t_alg, n, &hash_list, entry) {
1737                 crypto_unregister_ahash(&t_alg->ahash_alg);
1738                 list_del(&t_alg->entry);
1739                 kfree(t_alg);
1740         }
1741 }
1742
1743 static struct caam_hash_alg *
1744 caam_hash_alloc(struct caam_hash_template *template,
1745                 bool keyed)
1746 {
1747         struct caam_hash_alg *t_alg;
1748         struct ahash_alg *halg;
1749         struct crypto_alg *alg;
1750
1751         t_alg = kzalloc(sizeof(struct caam_hash_alg), GFP_KERNEL);
1752         if (!t_alg) {
1753                 pr_err("failed to allocate t_alg\n");
1754                 return ERR_PTR(-ENOMEM);
1755         }
1756
1757         t_alg->ahash_alg = template->template_ahash;
1758         halg = &t_alg->ahash_alg;
1759         alg = &halg->halg.base;
1760
1761         if (keyed) {
1762                 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
1763                          template->hmac_name);
1764                 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
1765                          template->hmac_driver_name);
1766         } else {
1767                 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
1768                          template->name);
1769                 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
1770                          template->driver_name);
1771         }
1772         alg->cra_module = THIS_MODULE;
1773         alg->cra_init = caam_hash_cra_init;
1774         alg->cra_exit = caam_hash_cra_exit;
1775         alg->cra_ctxsize = sizeof(struct caam_hash_ctx);
1776         alg->cra_priority = CAAM_CRA_PRIORITY;
1777         alg->cra_blocksize = template->blocksize;
1778         alg->cra_alignmask = 0;
1779         alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_TYPE_AHASH;
1780         alg->cra_type = &crypto_ahash_type;
1781
1782         t_alg->alg_type = template->alg_type;
1783         t_alg->alg_op = template->alg_op;
1784
1785         return t_alg;
1786 }
1787
1788 static int __init caam_algapi_hash_init(void)
1789 {
1790         int i = 0, err = 0;
1791
1792         INIT_LIST_HEAD(&hash_list);
1793
1794         /* register crypto algorithms the device supports */
1795         for (i = 0; i < ARRAY_SIZE(driver_hash); i++) {
1796                 /* TODO: check if h/w supports alg */
1797                 struct caam_hash_alg *t_alg;
1798
1799                 /* register hmac version */
1800                 t_alg = caam_hash_alloc(&driver_hash[i], true);
1801                 if (IS_ERR(t_alg)) {
1802                         err = PTR_ERR(t_alg);
1803                         pr_warn("%s alg allocation failed\n",
1804                                 driver_hash[i].driver_name);
1805                         continue;
1806                 }
1807
1808                 err = crypto_register_ahash(&t_alg->ahash_alg);
1809                 if (err) {
1810                         pr_warn("%s alg registration failed\n",
1811                                 t_alg->ahash_alg.halg.base.cra_driver_name);
1812                         kfree(t_alg);
1813                 } else
1814                         list_add_tail(&t_alg->entry, &hash_list);
1815
1816                 /* register unkeyed version */
1817                 t_alg = caam_hash_alloc(&driver_hash[i], false);
1818                 if (IS_ERR(t_alg)) {
1819                         err = PTR_ERR(t_alg);
1820                         pr_warn("%s alg allocation failed\n",
1821                                 driver_hash[i].driver_name);
1822                         continue;
1823                 }
1824
1825                 err = crypto_register_ahash(&t_alg->ahash_alg);
1826                 if (err) {
1827                         pr_warn("%s alg registration failed\n",
1828                                 t_alg->ahash_alg.halg.base.cra_driver_name);
1829                         kfree(t_alg);
1830                 } else
1831                         list_add_tail(&t_alg->entry, &hash_list);
1832         }
1833
1834         return err;
1835 }
1836
1837 module_init(caam_algapi_hash_init);
1838 module_exit(caam_algapi_hash_exit);
1839
1840 MODULE_LICENSE("GPL");
1841 MODULE_DESCRIPTION("FSL CAAM support for ahash functions of crypto API");
1842 MODULE_AUTHOR("Freescale Semiconductor - NMG");