crypto: caam - Moved macro DESC_JOB_IO_LEN to desc_constr.h
[firefly-linux-kernel-4.4.55.git] / drivers / crypto / caam / caamhash.c
1 /*
2  * caam - Freescale FSL CAAM support for ahash functions of crypto API
3  *
4  * Copyright 2011 Freescale Semiconductor, Inc.
5  *
6  * Based on caamalg.c crypto API driver.
7  *
8  * relationship of digest job descriptor or first job descriptor after init to
9  * shared descriptors:
10  *
11  * ---------------                     ---------------
12  * | JobDesc #1  |-------------------->|  ShareDesc  |
13  * | *(packet 1) |                     |  (hashKey)  |
14  * ---------------                     | (operation) |
15  *                                     ---------------
16  *
17  * relationship of subsequent job descriptors to shared descriptors:
18  *
19  * ---------------                     ---------------
20  * | JobDesc #2  |-------------------->|  ShareDesc  |
21  * | *(packet 2) |      |------------->|  (hashKey)  |
22  * ---------------      |    |-------->| (operation) |
23  *       .              |    |         | (load ctx2) |
24  *       .              |    |         ---------------
25  * ---------------      |    |
26  * | JobDesc #3  |------|    |
27  * | *(packet 3) |           |
28  * ---------------           |
29  *       .                   |
30  *       .                   |
31  * ---------------           |
32  * | JobDesc #4  |------------
33  * | *(packet 4) |
34  * ---------------
35  *
36  * The SharedDesc never changes for a connection unless rekeyed, but
37  * each packet will likely be in a different place. So all we need
38  * to know to process the packet is where the input is, where the
39  * output goes, and what context we want to process with. Context is
40  * in the SharedDesc, packet references in the JobDesc.
41  *
42  * So, a job desc looks like:
43  *
44  * ---------------------
45  * | Header            |
46  * | ShareDesc Pointer |
47  * | SEQ_OUT_PTR       |
48  * | (output buffer)   |
49  * | (output length)   |
50  * | SEQ_IN_PTR        |
51  * | (input buffer)    |
52  * | (input length)    |
53  * ---------------------
54  */
55
56 #include "compat.h"
57
58 #include "regs.h"
59 #include "intern.h"
60 #include "desc_constr.h"
61 #include "jr.h"
62 #include "error.h"
63 #include "sg_sw_sec4.h"
64 #include "key_gen.h"
65
66 #define CAAM_CRA_PRIORITY               3000
67
68 /* max hash key is max split key size */
69 #define CAAM_MAX_HASH_KEY_SIZE          (SHA512_DIGEST_SIZE * 2)
70
71 #define CAAM_MAX_HASH_BLOCK_SIZE        SHA512_BLOCK_SIZE
72 #define CAAM_MAX_HASH_DIGEST_SIZE       SHA512_DIGEST_SIZE
73
74 /* length of descriptors text */
75 #define DESC_AHASH_BASE                 (4 * CAAM_CMD_SZ)
76 #define DESC_AHASH_UPDATE_LEN           (6 * CAAM_CMD_SZ)
77 #define DESC_AHASH_UPDATE_FIRST_LEN     (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ)
78 #define DESC_AHASH_FINAL_LEN            (DESC_AHASH_BASE + 5 * CAAM_CMD_SZ)
79 #define DESC_AHASH_FINUP_LEN            (DESC_AHASH_BASE + 5 * CAAM_CMD_SZ)
80 #define DESC_AHASH_DIGEST_LEN           (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ)
81
82 #define DESC_HASH_MAX_USED_BYTES        (DESC_AHASH_FINAL_LEN + \
83                                          CAAM_MAX_HASH_KEY_SIZE)
84 #define DESC_HASH_MAX_USED_LEN          (DESC_HASH_MAX_USED_BYTES / CAAM_CMD_SZ)
85
86 /* caam context sizes for hashes: running digest + 8 */
87 #define HASH_MSG_LEN                    8
88 #define MAX_CTX_LEN                     (HASH_MSG_LEN + SHA512_DIGEST_SIZE)
89
90 #ifdef DEBUG
91 /* for print_hex_dumps with line references */
92 #define xstr(s) str(s)
93 #define str(s) #s
94 #define debug(format, arg...) printk(format, arg)
95 #else
96 #define debug(format, arg...)
97 #endif
98
99 /* ahash per-session context */
100 struct caam_hash_ctx {
101         struct device *jrdev;
102         u32 sh_desc_update[DESC_HASH_MAX_USED_LEN];
103         u32 sh_desc_update_first[DESC_HASH_MAX_USED_LEN];
104         u32 sh_desc_fin[DESC_HASH_MAX_USED_LEN];
105         u32 sh_desc_digest[DESC_HASH_MAX_USED_LEN];
106         u32 sh_desc_finup[DESC_HASH_MAX_USED_LEN];
107         dma_addr_t sh_desc_update_dma;
108         dma_addr_t sh_desc_update_first_dma;
109         dma_addr_t sh_desc_fin_dma;
110         dma_addr_t sh_desc_digest_dma;
111         dma_addr_t sh_desc_finup_dma;
112         u32 alg_type;
113         u32 alg_op;
114         u8 key[CAAM_MAX_HASH_KEY_SIZE];
115         dma_addr_t key_dma;
116         int ctx_len;
117         unsigned int split_key_len;
118         unsigned int split_key_pad_len;
119 };
120
121 /* ahash state */
122 struct caam_hash_state {
123         dma_addr_t buf_dma;
124         dma_addr_t ctx_dma;
125         u8 buf_0[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
126         int buflen_0;
127         u8 buf_1[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
128         int buflen_1;
129         u8 caam_ctx[MAX_CTX_LEN];
130         int (*update)(struct ahash_request *req);
131         int (*final)(struct ahash_request *req);
132         int (*finup)(struct ahash_request *req);
133         int current_buf;
134 };
135
136 /* Common job descriptor seq in/out ptr routines */
137
138 /* Map state->caam_ctx, and append seq_out_ptr command that points to it */
139 static inline void map_seq_out_ptr_ctx(u32 *desc, struct device *jrdev,
140                                        struct caam_hash_state *state,
141                                        int ctx_len)
142 {
143         state->ctx_dma = dma_map_single(jrdev, state->caam_ctx,
144                                         ctx_len, DMA_FROM_DEVICE);
145         append_seq_out_ptr(desc, state->ctx_dma, ctx_len, 0);
146 }
147
148 /* Map req->result, and append seq_out_ptr command that points to it */
149 static inline dma_addr_t map_seq_out_ptr_result(u32 *desc, struct device *jrdev,
150                                                 u8 *result, int digestsize)
151 {
152         dma_addr_t dst_dma;
153
154         dst_dma = dma_map_single(jrdev, result, digestsize, DMA_FROM_DEVICE);
155         append_seq_out_ptr(desc, dst_dma, digestsize, 0);
156
157         return dst_dma;
158 }
159
160 /* Map current buffer in state and put it in link table */
161 static inline dma_addr_t buf_map_to_sec4_sg(struct device *jrdev,
162                                             struct sec4_sg_entry *sec4_sg,
163                                             u8 *buf, int buflen)
164 {
165         dma_addr_t buf_dma;
166
167         buf_dma = dma_map_single(jrdev, buf, buflen, DMA_TO_DEVICE);
168         dma_to_sec4_sg_one(sec4_sg, buf_dma, buflen, 0);
169
170         return buf_dma;
171 }
172
173 /* Map req->src and put it in link table */
174 static inline void src_map_to_sec4_sg(struct device *jrdev,
175                                       struct scatterlist *src, int src_nents,
176                                       struct sec4_sg_entry *sec4_sg,
177                                       bool chained)
178 {
179         dma_map_sg_chained(jrdev, src, src_nents, DMA_TO_DEVICE, chained);
180         sg_to_sec4_sg_last(src, src_nents, sec4_sg, 0);
181 }
182
183 /*
184  * Only put buffer in link table if it contains data, which is possible,
185  * since a buffer has previously been used, and needs to be unmapped,
186  */
187 static inline dma_addr_t
188 try_buf_map_to_sec4_sg(struct device *jrdev, struct sec4_sg_entry *sec4_sg,
189                        u8 *buf, dma_addr_t buf_dma, int buflen,
190                        int last_buflen)
191 {
192         if (buf_dma && !dma_mapping_error(jrdev, buf_dma))
193                 dma_unmap_single(jrdev, buf_dma, last_buflen, DMA_TO_DEVICE);
194         if (buflen)
195                 buf_dma = buf_map_to_sec4_sg(jrdev, sec4_sg, buf, buflen);
196         else
197                 buf_dma = 0;
198
199         return buf_dma;
200 }
201
202 /* Map state->caam_ctx, and add it to link table */
203 static inline void ctx_map_to_sec4_sg(u32 *desc, struct device *jrdev,
204                                       struct caam_hash_state *state,
205                                       int ctx_len,
206                                       struct sec4_sg_entry *sec4_sg,
207                                       u32 flag)
208 {
209         state->ctx_dma = dma_map_single(jrdev, state->caam_ctx, ctx_len, flag);
210         dma_to_sec4_sg_one(sec4_sg, state->ctx_dma, ctx_len, 0);
211 }
212
213 /* Common shared descriptor commands */
214 static inline void append_key_ahash(u32 *desc, struct caam_hash_ctx *ctx)
215 {
216         append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
217                           ctx->split_key_len, CLASS_2 |
218                           KEY_DEST_MDHA_SPLIT | KEY_ENC);
219 }
220
221 /* Append key if it has been set */
222 static inline void init_sh_desc_key_ahash(u32 *desc, struct caam_hash_ctx *ctx)
223 {
224         u32 *key_jump_cmd;
225
226         init_sh_desc(desc, HDR_SHARE_SERIAL);
227
228         if (ctx->split_key_len) {
229                 /* Skip if already shared */
230                 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
231                                            JUMP_COND_SHRD);
232
233                 append_key_ahash(desc, ctx);
234
235                 set_jump_tgt_here(desc, key_jump_cmd);
236         }
237
238         /* Propagate errors from shared to job descriptor */
239         append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD);
240 }
241
242 /*
243  * For ahash read data from seqin following state->caam_ctx,
244  * and write resulting class2 context to seqout, which may be state->caam_ctx
245  * or req->result
246  */
247 static inline void ahash_append_load_str(u32 *desc, int digestsize)
248 {
249         /* Calculate remaining bytes to read */
250         append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
251
252         /* Read remaining bytes */
253         append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_LAST2 |
254                              FIFOLD_TYPE_MSG | KEY_VLF);
255
256         /* Store class2 context bytes */
257         append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
258                          LDST_SRCDST_BYTE_CONTEXT);
259 }
260
261 /*
262  * For ahash update, final and finup, import context, read and write to seqout
263  */
264 static inline void ahash_ctx_data_to_out(u32 *desc, u32 op, u32 state,
265                                          int digestsize,
266                                          struct caam_hash_ctx *ctx)
267 {
268         init_sh_desc_key_ahash(desc, ctx);
269
270         /* Import context from software */
271         append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
272                    LDST_CLASS_2_CCB | ctx->ctx_len);
273
274         /* Class 2 operation */
275         append_operation(desc, op | state | OP_ALG_ENCRYPT);
276
277         /*
278          * Load from buf and/or src and write to req->result or state->context
279          */
280         ahash_append_load_str(desc, digestsize);
281 }
282
283 /* For ahash firsts and digest, read and write to seqout */
284 static inline void ahash_data_to_out(u32 *desc, u32 op, u32 state,
285                                      int digestsize, struct caam_hash_ctx *ctx)
286 {
287         init_sh_desc_key_ahash(desc, ctx);
288
289         /* Class 2 operation */
290         append_operation(desc, op | state | OP_ALG_ENCRYPT);
291
292         /*
293          * Load from buf and/or src and write to req->result or state->context
294          */
295         ahash_append_load_str(desc, digestsize);
296 }
297
298 static int ahash_set_sh_desc(struct crypto_ahash *ahash)
299 {
300         struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
301         int digestsize = crypto_ahash_digestsize(ahash);
302         struct device *jrdev = ctx->jrdev;
303         u32 have_key = 0;
304         u32 *desc;
305
306         if (ctx->split_key_len)
307                 have_key = OP_ALG_AAI_HMAC_PRECOMP;
308
309         /* ahash_update shared descriptor */
310         desc = ctx->sh_desc_update;
311
312         init_sh_desc(desc, HDR_SHARE_SERIAL);
313
314         /* Import context from software */
315         append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
316                    LDST_CLASS_2_CCB | ctx->ctx_len);
317
318         /* Class 2 operation */
319         append_operation(desc, ctx->alg_type | OP_ALG_AS_UPDATE |
320                          OP_ALG_ENCRYPT);
321
322         /* Load data and write to result or context */
323         ahash_append_load_str(desc, ctx->ctx_len);
324
325         ctx->sh_desc_update_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
326                                                  DMA_TO_DEVICE);
327         if (dma_mapping_error(jrdev, ctx->sh_desc_update_dma)) {
328                 dev_err(jrdev, "unable to map shared descriptor\n");
329                 return -ENOMEM;
330         }
331 #ifdef DEBUG
332         print_hex_dump(KERN_ERR, "ahash update shdesc@"xstr(__LINE__)": ",
333                        DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
334 #endif
335
336         /* ahash_update_first shared descriptor */
337         desc = ctx->sh_desc_update_first;
338
339         ahash_data_to_out(desc, have_key | ctx->alg_type, OP_ALG_AS_INIT,
340                           ctx->ctx_len, ctx);
341
342         ctx->sh_desc_update_first_dma = dma_map_single(jrdev, desc,
343                                                        desc_bytes(desc),
344                                                        DMA_TO_DEVICE);
345         if (dma_mapping_error(jrdev, ctx->sh_desc_update_first_dma)) {
346                 dev_err(jrdev, "unable to map shared descriptor\n");
347                 return -ENOMEM;
348         }
349 #ifdef DEBUG
350         print_hex_dump(KERN_ERR, "ahash update first shdesc@"xstr(__LINE__)": ",
351                        DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
352 #endif
353
354         /* ahash_final shared descriptor */
355         desc = ctx->sh_desc_fin;
356
357         ahash_ctx_data_to_out(desc, have_key | ctx->alg_type,
358                               OP_ALG_AS_FINALIZE, digestsize, ctx);
359
360         ctx->sh_desc_fin_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
361                                               DMA_TO_DEVICE);
362         if (dma_mapping_error(jrdev, ctx->sh_desc_fin_dma)) {
363                 dev_err(jrdev, "unable to map shared descriptor\n");
364                 return -ENOMEM;
365         }
366 #ifdef DEBUG
367         print_hex_dump(KERN_ERR, "ahash final shdesc@"xstr(__LINE__)": ",
368                        DUMP_PREFIX_ADDRESS, 16, 4, desc,
369                        desc_bytes(desc), 1);
370 #endif
371
372         /* ahash_finup shared descriptor */
373         desc = ctx->sh_desc_finup;
374
375         ahash_ctx_data_to_out(desc, have_key | ctx->alg_type,
376                               OP_ALG_AS_FINALIZE, digestsize, ctx);
377
378         ctx->sh_desc_finup_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
379                                                 DMA_TO_DEVICE);
380         if (dma_mapping_error(jrdev, ctx->sh_desc_finup_dma)) {
381                 dev_err(jrdev, "unable to map shared descriptor\n");
382                 return -ENOMEM;
383         }
384 #ifdef DEBUG
385         print_hex_dump(KERN_ERR, "ahash finup shdesc@"xstr(__LINE__)": ",
386                        DUMP_PREFIX_ADDRESS, 16, 4, desc,
387                        desc_bytes(desc), 1);
388 #endif
389
390         /* ahash_digest shared descriptor */
391         desc = ctx->sh_desc_digest;
392
393         ahash_data_to_out(desc, have_key | ctx->alg_type, OP_ALG_AS_INITFINAL,
394                           digestsize, ctx);
395
396         ctx->sh_desc_digest_dma = dma_map_single(jrdev, desc,
397                                                  desc_bytes(desc),
398                                                  DMA_TO_DEVICE);
399         if (dma_mapping_error(jrdev, ctx->sh_desc_digest_dma)) {
400                 dev_err(jrdev, "unable to map shared descriptor\n");
401                 return -ENOMEM;
402         }
403 #ifdef DEBUG
404         print_hex_dump(KERN_ERR, "ahash digest shdesc@"xstr(__LINE__)": ",
405                        DUMP_PREFIX_ADDRESS, 16, 4, desc,
406                        desc_bytes(desc), 1);
407 #endif
408
409         return 0;
410 }
411
412 static int gen_split_hash_key(struct caam_hash_ctx *ctx, const u8 *key_in,
413                               u32 keylen)
414 {
415         return gen_split_key(ctx->jrdev, ctx->key, ctx->split_key_len,
416                                ctx->split_key_pad_len, key_in, keylen,
417                                ctx->alg_op);
418 }
419
420 /* Digest hash size if it is too large */
421 static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in,
422                            u32 *keylen, u8 *key_out, u32 digestsize)
423 {
424         struct device *jrdev = ctx->jrdev;
425         u32 *desc;
426         struct split_key_result result;
427         dma_addr_t src_dma, dst_dma;
428         int ret = 0;
429
430         desc = kmalloc(CAAM_CMD_SZ * 6 + CAAM_PTR_SZ * 2, GFP_KERNEL | GFP_DMA);
431         if (!desc) {
432                 dev_err(jrdev, "unable to allocate key input memory\n");
433                 return -ENOMEM;
434         }
435
436         init_job_desc(desc, 0);
437
438         src_dma = dma_map_single(jrdev, (void *)key_in, *keylen,
439                                  DMA_TO_DEVICE);
440         if (dma_mapping_error(jrdev, src_dma)) {
441                 dev_err(jrdev, "unable to map key input memory\n");
442                 kfree(desc);
443                 return -ENOMEM;
444         }
445         dst_dma = dma_map_single(jrdev, (void *)key_out, digestsize,
446                                  DMA_FROM_DEVICE);
447         if (dma_mapping_error(jrdev, dst_dma)) {
448                 dev_err(jrdev, "unable to map key output memory\n");
449                 dma_unmap_single(jrdev, src_dma, *keylen, DMA_TO_DEVICE);
450                 kfree(desc);
451                 return -ENOMEM;
452         }
453
454         /* Job descriptor to perform unkeyed hash on key_in */
455         append_operation(desc, ctx->alg_type | OP_ALG_ENCRYPT |
456                          OP_ALG_AS_INITFINAL);
457         append_seq_in_ptr(desc, src_dma, *keylen, 0);
458         append_seq_fifo_load(desc, *keylen, FIFOLD_CLASS_CLASS2 |
459                              FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_MSG);
460         append_seq_out_ptr(desc, dst_dma, digestsize, 0);
461         append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
462                          LDST_SRCDST_BYTE_CONTEXT);
463
464 #ifdef DEBUG
465         print_hex_dump(KERN_ERR, "key_in@"xstr(__LINE__)": ",
466                        DUMP_PREFIX_ADDRESS, 16, 4, key_in, *keylen, 1);
467         print_hex_dump(KERN_ERR, "jobdesc@"xstr(__LINE__)": ",
468                        DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
469 #endif
470
471         result.err = 0;
472         init_completion(&result.completion);
473
474         ret = caam_jr_enqueue(jrdev, desc, split_key_done, &result);
475         if (!ret) {
476                 /* in progress */
477                 wait_for_completion_interruptible(&result.completion);
478                 ret = result.err;
479 #ifdef DEBUG
480                 print_hex_dump(KERN_ERR, "digested key@"xstr(__LINE__)": ",
481                                DUMP_PREFIX_ADDRESS, 16, 4, key_in,
482                                digestsize, 1);
483 #endif
484         }
485         *keylen = digestsize;
486
487         dma_unmap_single(jrdev, src_dma, *keylen, DMA_TO_DEVICE);
488         dma_unmap_single(jrdev, dst_dma, digestsize, DMA_FROM_DEVICE);
489
490         kfree(desc);
491
492         return ret;
493 }
494
495 static int ahash_setkey(struct crypto_ahash *ahash,
496                         const u8 *key, unsigned int keylen)
497 {
498         /* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */
499         static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 };
500         struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
501         struct device *jrdev = ctx->jrdev;
502         int blocksize = crypto_tfm_alg_blocksize(&ahash->base);
503         int digestsize = crypto_ahash_digestsize(ahash);
504         int ret = 0;
505         u8 *hashed_key = NULL;
506
507 #ifdef DEBUG
508         printk(KERN_ERR "keylen %d\n", keylen);
509 #endif
510
511         if (keylen > blocksize) {
512                 hashed_key = kmalloc(sizeof(u8) * digestsize, GFP_KERNEL |
513                                      GFP_DMA);
514                 if (!hashed_key)
515                         return -ENOMEM;
516                 ret = hash_digest_key(ctx, key, &keylen, hashed_key,
517                                       digestsize);
518                 if (ret)
519                         goto badkey;
520                 key = hashed_key;
521         }
522
523         /* Pick class 2 key length from algorithm submask */
524         ctx->split_key_len = mdpadlen[(ctx->alg_op & OP_ALG_ALGSEL_SUBMASK) >>
525                                       OP_ALG_ALGSEL_SHIFT] * 2;
526         ctx->split_key_pad_len = ALIGN(ctx->split_key_len, 16);
527
528 #ifdef DEBUG
529         printk(KERN_ERR "split_key_len %d split_key_pad_len %d\n",
530                ctx->split_key_len, ctx->split_key_pad_len);
531         print_hex_dump(KERN_ERR, "key in @"xstr(__LINE__)": ",
532                        DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
533 #endif
534
535         ret = gen_split_hash_key(ctx, key, keylen);
536         if (ret)
537                 goto badkey;
538
539         ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->split_key_pad_len,
540                                       DMA_TO_DEVICE);
541         if (dma_mapping_error(jrdev, ctx->key_dma)) {
542                 dev_err(jrdev, "unable to map key i/o memory\n");
543                 return -ENOMEM;
544         }
545 #ifdef DEBUG
546         print_hex_dump(KERN_ERR, "ctx.key@"xstr(__LINE__)": ",
547                        DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
548                        ctx->split_key_pad_len, 1);
549 #endif
550
551         ret = ahash_set_sh_desc(ahash);
552         if (ret) {
553                 dma_unmap_single(jrdev, ctx->key_dma, ctx->split_key_pad_len,
554                                  DMA_TO_DEVICE);
555         }
556
557         kfree(hashed_key);
558         return ret;
559 badkey:
560         kfree(hashed_key);
561         crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN);
562         return -EINVAL;
563 }
564
565 /*
566  * ahash_edesc - s/w-extended ahash descriptor
567  * @dst_dma: physical mapped address of req->result
568  * @sec4_sg_dma: physical mapped address of h/w link table
569  * @chained: if source is chained
570  * @src_nents: number of segments in input scatterlist
571  * @sec4_sg_bytes: length of dma mapped sec4_sg space
572  * @sec4_sg: pointer to h/w link table
573  * @hw_desc: the h/w job descriptor followed by any referenced link tables
574  */
575 struct ahash_edesc {
576         dma_addr_t dst_dma;
577         dma_addr_t sec4_sg_dma;
578         bool chained;
579         int src_nents;
580         int sec4_sg_bytes;
581         struct sec4_sg_entry *sec4_sg;
582         u32 hw_desc[0];
583 };
584
585 static inline void ahash_unmap(struct device *dev,
586                         struct ahash_edesc *edesc,
587                         struct ahash_request *req, int dst_len)
588 {
589         if (edesc->src_nents)
590                 dma_unmap_sg_chained(dev, req->src, edesc->src_nents,
591                                      DMA_TO_DEVICE, edesc->chained);
592         if (edesc->dst_dma)
593                 dma_unmap_single(dev, edesc->dst_dma, dst_len, DMA_FROM_DEVICE);
594
595         if (edesc->sec4_sg_bytes)
596                 dma_unmap_single(dev, edesc->sec4_sg_dma,
597                                  edesc->sec4_sg_bytes, DMA_TO_DEVICE);
598 }
599
600 static inline void ahash_unmap_ctx(struct device *dev,
601                         struct ahash_edesc *edesc,
602                         struct ahash_request *req, int dst_len, u32 flag)
603 {
604         struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
605         struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
606         struct caam_hash_state *state = ahash_request_ctx(req);
607
608         if (state->ctx_dma)
609                 dma_unmap_single(dev, state->ctx_dma, ctx->ctx_len, flag);
610         ahash_unmap(dev, edesc, req, dst_len);
611 }
612
613 static void ahash_done(struct device *jrdev, u32 *desc, u32 err,
614                        void *context)
615 {
616         struct ahash_request *req = context;
617         struct ahash_edesc *edesc;
618         struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
619         int digestsize = crypto_ahash_digestsize(ahash);
620 #ifdef DEBUG
621         struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
622         struct caam_hash_state *state = ahash_request_ctx(req);
623
624         dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
625 #endif
626
627         edesc = (struct ahash_edesc *)((char *)desc -
628                  offsetof(struct ahash_edesc, hw_desc));
629         if (err) {
630                 char tmp[CAAM_ERROR_STR_MAX];
631
632                 dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
633         }
634
635         ahash_unmap(jrdev, edesc, req, digestsize);
636         kfree(edesc);
637
638 #ifdef DEBUG
639         print_hex_dump(KERN_ERR, "ctx@"xstr(__LINE__)": ",
640                        DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
641                        ctx->ctx_len, 1);
642         if (req->result)
643                 print_hex_dump(KERN_ERR, "result@"xstr(__LINE__)": ",
644                                DUMP_PREFIX_ADDRESS, 16, 4, req->result,
645                                digestsize, 1);
646 #endif
647
648         req->base.complete(&req->base, err);
649 }
650
651 static void ahash_done_bi(struct device *jrdev, u32 *desc, u32 err,
652                             void *context)
653 {
654         struct ahash_request *req = context;
655         struct ahash_edesc *edesc;
656         struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
657         struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
658 #ifdef DEBUG
659         struct caam_hash_state *state = ahash_request_ctx(req);
660         int digestsize = crypto_ahash_digestsize(ahash);
661
662         dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
663 #endif
664
665         edesc = (struct ahash_edesc *)((char *)desc -
666                  offsetof(struct ahash_edesc, hw_desc));
667         if (err) {
668                 char tmp[CAAM_ERROR_STR_MAX];
669
670                 dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
671         }
672
673         ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL);
674         kfree(edesc);
675
676 #ifdef DEBUG
677         print_hex_dump(KERN_ERR, "ctx@"xstr(__LINE__)": ",
678                        DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
679                        ctx->ctx_len, 1);
680         if (req->result)
681                 print_hex_dump(KERN_ERR, "result@"xstr(__LINE__)": ",
682                                DUMP_PREFIX_ADDRESS, 16, 4, req->result,
683                                digestsize, 1);
684 #endif
685
686         req->base.complete(&req->base, err);
687 }
688
689 static void ahash_done_ctx_src(struct device *jrdev, u32 *desc, u32 err,
690                                void *context)
691 {
692         struct ahash_request *req = context;
693         struct ahash_edesc *edesc;
694         struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
695         int digestsize = crypto_ahash_digestsize(ahash);
696 #ifdef DEBUG
697         struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
698         struct caam_hash_state *state = ahash_request_ctx(req);
699
700         dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
701 #endif
702
703         edesc = (struct ahash_edesc *)((char *)desc -
704                  offsetof(struct ahash_edesc, hw_desc));
705         if (err) {
706                 char tmp[CAAM_ERROR_STR_MAX];
707
708                 dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
709         }
710
711         ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
712         kfree(edesc);
713
714 #ifdef DEBUG
715         print_hex_dump(KERN_ERR, "ctx@"xstr(__LINE__)": ",
716                        DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
717                        ctx->ctx_len, 1);
718         if (req->result)
719                 print_hex_dump(KERN_ERR, "result@"xstr(__LINE__)": ",
720                                DUMP_PREFIX_ADDRESS, 16, 4, req->result,
721                                digestsize, 1);
722 #endif
723
724         req->base.complete(&req->base, err);
725 }
726
727 static void ahash_done_ctx_dst(struct device *jrdev, u32 *desc, u32 err,
728                                void *context)
729 {
730         struct ahash_request *req = context;
731         struct ahash_edesc *edesc;
732         struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
733         struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
734 #ifdef DEBUG
735         struct caam_hash_state *state = ahash_request_ctx(req);
736         int digestsize = crypto_ahash_digestsize(ahash);
737
738         dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
739 #endif
740
741         edesc = (struct ahash_edesc *)((char *)desc -
742                  offsetof(struct ahash_edesc, hw_desc));
743         if (err) {
744                 char tmp[CAAM_ERROR_STR_MAX];
745
746                 dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
747         }
748
749         ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE);
750         kfree(edesc);
751
752 #ifdef DEBUG
753         print_hex_dump(KERN_ERR, "ctx@"xstr(__LINE__)": ",
754                        DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
755                        ctx->ctx_len, 1);
756         if (req->result)
757                 print_hex_dump(KERN_ERR, "result@"xstr(__LINE__)": ",
758                                DUMP_PREFIX_ADDRESS, 16, 4, req->result,
759                                digestsize, 1);
760 #endif
761
762         req->base.complete(&req->base, err);
763 }
764
765 /* submit update job descriptor */
766 static int ahash_update_ctx(struct ahash_request *req)
767 {
768         struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
769         struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
770         struct caam_hash_state *state = ahash_request_ctx(req);
771         struct device *jrdev = ctx->jrdev;
772         gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
773                        CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
774         u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
775         int *buflen = state->current_buf ? &state->buflen_1 : &state->buflen_0;
776         u8 *next_buf = state->current_buf ? state->buf_0 : state->buf_1;
777         int *next_buflen = state->current_buf ? &state->buflen_0 :
778                            &state->buflen_1, last_buflen;
779         int in_len = *buflen + req->nbytes, to_hash;
780         u32 *sh_desc = ctx->sh_desc_update, *desc;
781         dma_addr_t ptr = ctx->sh_desc_update_dma;
782         int src_nents, sec4_sg_bytes, sec4_sg_src_index;
783         struct ahash_edesc *edesc;
784         bool chained = false;
785         int ret = 0;
786         int sh_len;
787
788         last_buflen = *next_buflen;
789         *next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1);
790         to_hash = in_len - *next_buflen;
791
792         if (to_hash) {
793                 src_nents = __sg_count(req->src, req->nbytes - (*next_buflen),
794                                        &chained);
795                 sec4_sg_src_index = 1 + (*buflen ? 1 : 0);
796                 sec4_sg_bytes = (sec4_sg_src_index + src_nents) *
797                                  sizeof(struct sec4_sg_entry);
798
799                 /*
800                  * allocate space for base edesc and hw desc commands,
801                  * link tables
802                  */
803                 edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
804                                 sec4_sg_bytes, GFP_DMA | flags);
805                 if (!edesc) {
806                         dev_err(jrdev,
807                                 "could not allocate extended descriptor\n");
808                         return -ENOMEM;
809                 }
810
811                 edesc->src_nents = src_nents;
812                 edesc->chained = chained;
813                 edesc->sec4_sg_bytes = sec4_sg_bytes;
814                 edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
815                                  DESC_JOB_IO_LEN;
816                 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
817                                                      sec4_sg_bytes,
818                                                      DMA_TO_DEVICE);
819
820                 ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len,
821                                    edesc->sec4_sg, DMA_BIDIRECTIONAL);
822
823                 state->buf_dma = try_buf_map_to_sec4_sg(jrdev,
824                                                         edesc->sec4_sg + 1,
825                                                         buf, state->buf_dma,
826                                                         *buflen, last_buflen);
827
828                 if (src_nents) {
829                         src_map_to_sec4_sg(jrdev, req->src, src_nents,
830                                            edesc->sec4_sg + sec4_sg_src_index,
831                                            chained);
832                         if (*next_buflen) {
833                                 sg_copy_part(next_buf, req->src, to_hash -
834                                              *buflen, req->nbytes);
835                                 state->current_buf = !state->current_buf;
836                         }
837                 } else {
838                         (edesc->sec4_sg + sec4_sg_src_index - 1)->len |=
839                                                         SEC4_SG_LEN_FIN;
840                 }
841
842                 sh_len = desc_len(sh_desc);
843                 desc = edesc->hw_desc;
844                 init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER |
845                                      HDR_REVERSE);
846
847                 append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len +
848                                        to_hash, LDST_SGF);
849
850                 append_seq_out_ptr(desc, state->ctx_dma, ctx->ctx_len, 0);
851
852 #ifdef DEBUG
853                 print_hex_dump(KERN_ERR, "jobdesc@"xstr(__LINE__)": ",
854                                DUMP_PREFIX_ADDRESS, 16, 4, desc,
855                                desc_bytes(desc), 1);
856 #endif
857
858                 ret = caam_jr_enqueue(jrdev, desc, ahash_done_bi, req);
859                 if (!ret) {
860                         ret = -EINPROGRESS;
861                 } else {
862                         ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len,
863                                            DMA_BIDIRECTIONAL);
864                         kfree(edesc);
865                 }
866         } else if (*next_buflen) {
867                 sg_copy(buf + *buflen, req->src, req->nbytes);
868                 *buflen = *next_buflen;
869                 *next_buflen = last_buflen;
870         }
871 #ifdef DEBUG
872         print_hex_dump(KERN_ERR, "buf@"xstr(__LINE__)": ",
873                        DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
874         print_hex_dump(KERN_ERR, "next buf@"xstr(__LINE__)": ",
875                        DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
876                        *next_buflen, 1);
877 #endif
878
879         return ret;
880 }
881
882 static int ahash_final_ctx(struct ahash_request *req)
883 {
884         struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
885         struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
886         struct caam_hash_state *state = ahash_request_ctx(req);
887         struct device *jrdev = ctx->jrdev;
888         gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
889                        CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
890         u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
891         int buflen = state->current_buf ? state->buflen_1 : state->buflen_0;
892         int last_buflen = state->current_buf ? state->buflen_0 :
893                           state->buflen_1;
894         u32 *sh_desc = ctx->sh_desc_fin, *desc;
895         dma_addr_t ptr = ctx->sh_desc_fin_dma;
896         int sec4_sg_bytes;
897         int digestsize = crypto_ahash_digestsize(ahash);
898         struct ahash_edesc *edesc;
899         int ret = 0;
900         int sh_len;
901
902         sec4_sg_bytes = (1 + (buflen ? 1 : 0)) * sizeof(struct sec4_sg_entry);
903
904         /* allocate space for base edesc and hw desc commands, link tables */
905         edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
906                         sec4_sg_bytes, GFP_DMA | flags);
907         if (!edesc) {
908                 dev_err(jrdev, "could not allocate extended descriptor\n");
909                 return -ENOMEM;
910         }
911
912         sh_len = desc_len(sh_desc);
913         desc = edesc->hw_desc;
914         init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE);
915
916         edesc->sec4_sg_bytes = sec4_sg_bytes;
917         edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
918                          DESC_JOB_IO_LEN;
919         edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
920                                             sec4_sg_bytes, DMA_TO_DEVICE);
921         edesc->src_nents = 0;
922
923         ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len, edesc->sec4_sg,
924                            DMA_TO_DEVICE);
925
926         state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1,
927                                                 buf, state->buf_dma, buflen,
928                                                 last_buflen);
929         (edesc->sec4_sg + sec4_sg_bytes - 1)->len |= SEC4_SG_LEN_FIN;
930
931         append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len + buflen,
932                           LDST_SGF);
933
934         edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
935                                                 digestsize);
936
937 #ifdef DEBUG
938         print_hex_dump(KERN_ERR, "jobdesc@"xstr(__LINE__)": ",
939                        DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
940 #endif
941
942         ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_src, req);
943         if (!ret) {
944                 ret = -EINPROGRESS;
945         } else {
946                 ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
947                 kfree(edesc);
948         }
949
950         return ret;
951 }
952
953 static int ahash_finup_ctx(struct ahash_request *req)
954 {
955         struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
956         struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
957         struct caam_hash_state *state = ahash_request_ctx(req);
958         struct device *jrdev = ctx->jrdev;
959         gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
960                        CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
961         u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
962         int buflen = state->current_buf ? state->buflen_1 : state->buflen_0;
963         int last_buflen = state->current_buf ? state->buflen_0 :
964                           state->buflen_1;
965         u32 *sh_desc = ctx->sh_desc_finup, *desc;
966         dma_addr_t ptr = ctx->sh_desc_finup_dma;
967         int sec4_sg_bytes, sec4_sg_src_index;
968         int src_nents;
969         int digestsize = crypto_ahash_digestsize(ahash);
970         struct ahash_edesc *edesc;
971         bool chained = false;
972         int ret = 0;
973         int sh_len;
974
975         src_nents = __sg_count(req->src, req->nbytes, &chained);
976         sec4_sg_src_index = 1 + (buflen ? 1 : 0);
977         sec4_sg_bytes = (sec4_sg_src_index + src_nents) *
978                          sizeof(struct sec4_sg_entry);
979
980         /* allocate space for base edesc and hw desc commands, link tables */
981         edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
982                         sec4_sg_bytes, GFP_DMA | flags);
983         if (!edesc) {
984                 dev_err(jrdev, "could not allocate extended descriptor\n");
985                 return -ENOMEM;
986         }
987
988         sh_len = desc_len(sh_desc);
989         desc = edesc->hw_desc;
990         init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE);
991
992         edesc->src_nents = src_nents;
993         edesc->chained = chained;
994         edesc->sec4_sg_bytes = sec4_sg_bytes;
995         edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
996                          DESC_JOB_IO_LEN;
997         edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
998                                             sec4_sg_bytes, DMA_TO_DEVICE);
999
1000         ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len, edesc->sec4_sg,
1001                            DMA_TO_DEVICE);
1002
1003         state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1,
1004                                                 buf, state->buf_dma, buflen,
1005                                                 last_buflen);
1006
1007         src_map_to_sec4_sg(jrdev, req->src, src_nents, edesc->sec4_sg +
1008                            sec4_sg_src_index, chained);
1009
1010         append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len +
1011                                buflen + req->nbytes, LDST_SGF);
1012
1013         edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
1014                                                 digestsize);
1015
1016 #ifdef DEBUG
1017         print_hex_dump(KERN_ERR, "jobdesc@"xstr(__LINE__)": ",
1018                        DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
1019 #endif
1020
1021         ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_src, req);
1022         if (!ret) {
1023                 ret = -EINPROGRESS;
1024         } else {
1025                 ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
1026                 kfree(edesc);
1027         }
1028
1029         return ret;
1030 }
1031
1032 static int ahash_digest(struct ahash_request *req)
1033 {
1034         struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1035         struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1036         struct device *jrdev = ctx->jrdev;
1037         gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1038                        CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1039         u32 *sh_desc = ctx->sh_desc_digest, *desc;
1040         dma_addr_t ptr = ctx->sh_desc_digest_dma;
1041         int digestsize = crypto_ahash_digestsize(ahash);
1042         int src_nents, sec4_sg_bytes;
1043         dma_addr_t src_dma;
1044         struct ahash_edesc *edesc;
1045         bool chained = false;
1046         int ret = 0;
1047         u32 options;
1048         int sh_len;
1049
1050         src_nents = sg_count(req->src, req->nbytes, &chained);
1051         dma_map_sg_chained(jrdev, req->src, src_nents ? : 1, DMA_TO_DEVICE,
1052                            chained);
1053         sec4_sg_bytes = src_nents * sizeof(struct sec4_sg_entry);
1054
1055         /* allocate space for base edesc and hw desc commands, link tables */
1056         edesc = kmalloc(sizeof(struct ahash_edesc) + sec4_sg_bytes +
1057                         DESC_JOB_IO_LEN, GFP_DMA | flags);
1058         if (!edesc) {
1059                 dev_err(jrdev, "could not allocate extended descriptor\n");
1060                 return -ENOMEM;
1061         }
1062         edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
1063                           DESC_JOB_IO_LEN;
1064         edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1065                                             sec4_sg_bytes, DMA_TO_DEVICE);
1066         edesc->src_nents = src_nents;
1067         edesc->chained = chained;
1068
1069         sh_len = desc_len(sh_desc);
1070         desc = edesc->hw_desc;
1071         init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE);
1072
1073         if (src_nents) {
1074                 sg_to_sec4_sg_last(req->src, src_nents, edesc->sec4_sg, 0);
1075                 src_dma = edesc->sec4_sg_dma;
1076                 options = LDST_SGF;
1077         } else {
1078                 src_dma = sg_dma_address(req->src);
1079                 options = 0;
1080         }
1081         append_seq_in_ptr(desc, src_dma, req->nbytes, options);
1082
1083         edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
1084                                                 digestsize);
1085
1086 #ifdef DEBUG
1087         print_hex_dump(KERN_ERR, "jobdesc@"xstr(__LINE__)": ",
1088                        DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
1089 #endif
1090
1091         ret = caam_jr_enqueue(jrdev, desc, ahash_done, req);
1092         if (!ret) {
1093                 ret = -EINPROGRESS;
1094         } else {
1095                 ahash_unmap(jrdev, edesc, req, digestsize);
1096                 kfree(edesc);
1097         }
1098
1099         return ret;
1100 }
1101
1102 /* submit ahash final if it the first job descriptor */
1103 static int ahash_final_no_ctx(struct ahash_request *req)
1104 {
1105         struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1106         struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1107         struct caam_hash_state *state = ahash_request_ctx(req);
1108         struct device *jrdev = ctx->jrdev;
1109         gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1110                        CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1111         u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
1112         int buflen = state->current_buf ? state->buflen_1 : state->buflen_0;
1113         u32 *sh_desc = ctx->sh_desc_digest, *desc;
1114         dma_addr_t ptr = ctx->sh_desc_digest_dma;
1115         int digestsize = crypto_ahash_digestsize(ahash);
1116         struct ahash_edesc *edesc;
1117         int ret = 0;
1118         int sh_len;
1119
1120         /* allocate space for base edesc and hw desc commands, link tables */
1121         edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN,
1122                         GFP_DMA | flags);
1123         if (!edesc) {
1124                 dev_err(jrdev, "could not allocate extended descriptor\n");
1125                 return -ENOMEM;
1126         }
1127
1128         sh_len = desc_len(sh_desc);
1129         desc = edesc->hw_desc;
1130         init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE);
1131
1132         state->buf_dma = dma_map_single(jrdev, buf, buflen, DMA_TO_DEVICE);
1133
1134         append_seq_in_ptr(desc, state->buf_dma, buflen, 0);
1135
1136         edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
1137                                                 digestsize);
1138         edesc->src_nents = 0;
1139
1140 #ifdef DEBUG
1141         print_hex_dump(KERN_ERR, "jobdesc@"xstr(__LINE__)": ",
1142                        DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
1143 #endif
1144
1145         ret = caam_jr_enqueue(jrdev, desc, ahash_done, req);
1146         if (!ret) {
1147                 ret = -EINPROGRESS;
1148         } else {
1149                 ahash_unmap(jrdev, edesc, req, digestsize);
1150                 kfree(edesc);
1151         }
1152
1153         return ret;
1154 }
1155
1156 /* submit ahash update if it the first job descriptor after update */
1157 static int ahash_update_no_ctx(struct ahash_request *req)
1158 {
1159         struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1160         struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1161         struct caam_hash_state *state = ahash_request_ctx(req);
1162         struct device *jrdev = ctx->jrdev;
1163         gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1164                        CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1165         u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
1166         int *buflen = state->current_buf ? &state->buflen_1 : &state->buflen_0;
1167         u8 *next_buf = state->current_buf ? state->buf_0 : state->buf_1;
1168         int *next_buflen = state->current_buf ? &state->buflen_0 :
1169                            &state->buflen_1;
1170         int in_len = *buflen + req->nbytes, to_hash;
1171         int sec4_sg_bytes, src_nents;
1172         struct ahash_edesc *edesc;
1173         u32 *desc, *sh_desc = ctx->sh_desc_update_first;
1174         dma_addr_t ptr = ctx->sh_desc_update_first_dma;
1175         bool chained = false;
1176         int ret = 0;
1177         int sh_len;
1178
1179         *next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1);
1180         to_hash = in_len - *next_buflen;
1181
1182         if (to_hash) {
1183                 src_nents = __sg_count(req->src, req->nbytes - (*next_buflen),
1184                                        &chained);
1185                 sec4_sg_bytes = (1 + src_nents) *
1186                                 sizeof(struct sec4_sg_entry);
1187
1188                 /*
1189                  * allocate space for base edesc and hw desc commands,
1190                  * link tables
1191                  */
1192                 edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
1193                                 sec4_sg_bytes, GFP_DMA | flags);
1194                 if (!edesc) {
1195                         dev_err(jrdev,
1196                                 "could not allocate extended descriptor\n");
1197                         return -ENOMEM;
1198                 }
1199
1200                 edesc->src_nents = src_nents;
1201                 edesc->chained = chained;
1202                 edesc->sec4_sg_bytes = sec4_sg_bytes;
1203                 edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
1204                                  DESC_JOB_IO_LEN;
1205                 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1206                                                     sec4_sg_bytes,
1207                                                     DMA_TO_DEVICE);
1208
1209                 state->buf_dma = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg,
1210                                                     buf, *buflen);
1211                 src_map_to_sec4_sg(jrdev, req->src, src_nents,
1212                                    edesc->sec4_sg + 1, chained);
1213                 if (*next_buflen) {
1214                         sg_copy_part(next_buf, req->src, to_hash - *buflen,
1215                                     req->nbytes);
1216                         state->current_buf = !state->current_buf;
1217                 }
1218
1219                 sh_len = desc_len(sh_desc);
1220                 desc = edesc->hw_desc;
1221                 init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER |
1222                                      HDR_REVERSE);
1223
1224                 append_seq_in_ptr(desc, edesc->sec4_sg_dma, to_hash, LDST_SGF);
1225
1226                 map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len);
1227
1228 #ifdef DEBUG
1229                 print_hex_dump(KERN_ERR, "jobdesc@"xstr(__LINE__)": ",
1230                                DUMP_PREFIX_ADDRESS, 16, 4, desc,
1231                                desc_bytes(desc), 1);
1232 #endif
1233
1234                 ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_dst, req);
1235                 if (!ret) {
1236                         ret = -EINPROGRESS;
1237                         state->update = ahash_update_ctx;
1238                         state->finup = ahash_finup_ctx;
1239                         state->final = ahash_final_ctx;
1240                 } else {
1241                         ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len,
1242                                         DMA_TO_DEVICE);
1243                         kfree(edesc);
1244                 }
1245         } else if (*next_buflen) {
1246                 sg_copy(buf + *buflen, req->src, req->nbytes);
1247                 *buflen = *next_buflen;
1248                 *next_buflen = 0;
1249         }
1250 #ifdef DEBUG
1251         print_hex_dump(KERN_ERR, "buf@"xstr(__LINE__)": ",
1252                        DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
1253         print_hex_dump(KERN_ERR, "next buf@"xstr(__LINE__)": ",
1254                        DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
1255                        *next_buflen, 1);
1256 #endif
1257
1258         return ret;
1259 }
1260
1261 /* submit ahash finup if it the first job descriptor after update */
1262 static int ahash_finup_no_ctx(struct ahash_request *req)
1263 {
1264         struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1265         struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1266         struct caam_hash_state *state = ahash_request_ctx(req);
1267         struct device *jrdev = ctx->jrdev;
1268         gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1269                        CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1270         u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
1271         int buflen = state->current_buf ? state->buflen_1 : state->buflen_0;
1272         int last_buflen = state->current_buf ? state->buflen_0 :
1273                           state->buflen_1;
1274         u32 *sh_desc = ctx->sh_desc_digest, *desc;
1275         dma_addr_t ptr = ctx->sh_desc_digest_dma;
1276         int sec4_sg_bytes, sec4_sg_src_index, src_nents;
1277         int digestsize = crypto_ahash_digestsize(ahash);
1278         struct ahash_edesc *edesc;
1279         bool chained = false;
1280         int sh_len;
1281         int ret = 0;
1282
1283         src_nents = __sg_count(req->src, req->nbytes, &chained);
1284         sec4_sg_src_index = 2;
1285         sec4_sg_bytes = (sec4_sg_src_index + src_nents) *
1286                          sizeof(struct sec4_sg_entry);
1287
1288         /* allocate space for base edesc and hw desc commands, link tables */
1289         edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
1290                         sec4_sg_bytes, GFP_DMA | flags);
1291         if (!edesc) {
1292                 dev_err(jrdev, "could not allocate extended descriptor\n");
1293                 return -ENOMEM;
1294         }
1295
1296         sh_len = desc_len(sh_desc);
1297         desc = edesc->hw_desc;
1298         init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE);
1299
1300         edesc->src_nents = src_nents;
1301         edesc->chained = chained;
1302         edesc->sec4_sg_bytes = sec4_sg_bytes;
1303         edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
1304                          DESC_JOB_IO_LEN;
1305         edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1306                                             sec4_sg_bytes, DMA_TO_DEVICE);
1307
1308         state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, buf,
1309                                                 state->buf_dma, buflen,
1310                                                 last_buflen);
1311
1312         src_map_to_sec4_sg(jrdev, req->src, src_nents, edesc->sec4_sg + 1,
1313                            chained);
1314
1315         append_seq_in_ptr(desc, edesc->sec4_sg_dma, buflen +
1316                                req->nbytes, LDST_SGF);
1317
1318         edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
1319                                                 digestsize);
1320
1321 #ifdef DEBUG
1322         print_hex_dump(KERN_ERR, "jobdesc@"xstr(__LINE__)": ",
1323                        DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
1324 #endif
1325
1326         ret = caam_jr_enqueue(jrdev, desc, ahash_done, req);
1327         if (!ret) {
1328                 ret = -EINPROGRESS;
1329         } else {
1330                 ahash_unmap(jrdev, edesc, req, digestsize);
1331                 kfree(edesc);
1332         }
1333
1334         return ret;
1335 }
1336
1337 /* submit first update job descriptor after init */
1338 static int ahash_update_first(struct ahash_request *req)
1339 {
1340         struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1341         struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1342         struct caam_hash_state *state = ahash_request_ctx(req);
1343         struct device *jrdev = ctx->jrdev;
1344         gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1345                        CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1346         u8 *next_buf = state->buf_0 + state->current_buf *
1347                        CAAM_MAX_HASH_BLOCK_SIZE;
1348         int *next_buflen = &state->buflen_0 + state->current_buf;
1349         int to_hash;
1350         u32 *sh_desc = ctx->sh_desc_update_first, *desc;
1351         dma_addr_t ptr = ctx->sh_desc_update_first_dma;
1352         int sec4_sg_bytes, src_nents;
1353         dma_addr_t src_dma;
1354         u32 options;
1355         struct ahash_edesc *edesc;
1356         bool chained = false;
1357         int ret = 0;
1358         int sh_len;
1359
1360         *next_buflen = req->nbytes & (crypto_tfm_alg_blocksize(&ahash->base) -
1361                                       1);
1362         to_hash = req->nbytes - *next_buflen;
1363
1364         if (to_hash) {
1365                 src_nents = sg_count(req->src, req->nbytes - (*next_buflen),
1366                                      &chained);
1367                 dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
1368                                    DMA_TO_DEVICE, chained);
1369                 sec4_sg_bytes = src_nents * sizeof(struct sec4_sg_entry);
1370
1371                 /*
1372                  * allocate space for base edesc and hw desc commands,
1373                  * link tables
1374                  */
1375                 edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
1376                                 sec4_sg_bytes, GFP_DMA | flags);
1377                 if (!edesc) {
1378                         dev_err(jrdev,
1379                                 "could not allocate extended descriptor\n");
1380                         return -ENOMEM;
1381                 }
1382
1383                 edesc->src_nents = src_nents;
1384                 edesc->chained = chained;
1385                 edesc->sec4_sg_bytes = sec4_sg_bytes;
1386                 edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
1387                                  DESC_JOB_IO_LEN;
1388                 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1389                                                     sec4_sg_bytes,
1390                                                     DMA_TO_DEVICE);
1391
1392                 if (src_nents) {
1393                         sg_to_sec4_sg_last(req->src, src_nents,
1394                                            edesc->sec4_sg, 0);
1395                         src_dma = edesc->sec4_sg_dma;
1396                         options = LDST_SGF;
1397                 } else {
1398                         src_dma = sg_dma_address(req->src);
1399                         options = 0;
1400                 }
1401
1402                 if (*next_buflen)
1403                         sg_copy_part(next_buf, req->src, to_hash, req->nbytes);
1404
1405                 sh_len = desc_len(sh_desc);
1406                 desc = edesc->hw_desc;
1407                 init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER |
1408                                      HDR_REVERSE);
1409
1410                 append_seq_in_ptr(desc, src_dma, to_hash, options);
1411
1412                 map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len);
1413
1414 #ifdef DEBUG
1415                 print_hex_dump(KERN_ERR, "jobdesc@"xstr(__LINE__)": ",
1416                                DUMP_PREFIX_ADDRESS, 16, 4, desc,
1417                                desc_bytes(desc), 1);
1418 #endif
1419
1420                 ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_dst,
1421                                       req);
1422                 if (!ret) {
1423                         ret = -EINPROGRESS;
1424                         state->update = ahash_update_ctx;
1425                         state->finup = ahash_finup_ctx;
1426                         state->final = ahash_final_ctx;
1427                 } else {
1428                         ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len,
1429                                         DMA_TO_DEVICE);
1430                         kfree(edesc);
1431                 }
1432         } else if (*next_buflen) {
1433                 state->update = ahash_update_no_ctx;
1434                 state->finup = ahash_finup_no_ctx;
1435                 state->final = ahash_final_no_ctx;
1436                 sg_copy(next_buf, req->src, req->nbytes);
1437         }
1438 #ifdef DEBUG
1439         print_hex_dump(KERN_ERR, "next buf@"xstr(__LINE__)": ",
1440                        DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
1441                        *next_buflen, 1);
1442 #endif
1443
1444         return ret;
1445 }
1446
1447 static int ahash_finup_first(struct ahash_request *req)
1448 {
1449         return ahash_digest(req);
1450 }
1451
1452 static int ahash_init(struct ahash_request *req)
1453 {
1454         struct caam_hash_state *state = ahash_request_ctx(req);
1455
1456         state->update = ahash_update_first;
1457         state->finup = ahash_finup_first;
1458         state->final = ahash_final_no_ctx;
1459
1460         state->current_buf = 0;
1461
1462         return 0;
1463 }
1464
1465 static int ahash_update(struct ahash_request *req)
1466 {
1467         struct caam_hash_state *state = ahash_request_ctx(req);
1468
1469         return state->update(req);
1470 }
1471
1472 static int ahash_finup(struct ahash_request *req)
1473 {
1474         struct caam_hash_state *state = ahash_request_ctx(req);
1475
1476         return state->finup(req);
1477 }
1478
1479 static int ahash_final(struct ahash_request *req)
1480 {
1481         struct caam_hash_state *state = ahash_request_ctx(req);
1482
1483         return state->final(req);
1484 }
1485
1486 static int ahash_export(struct ahash_request *req, void *out)
1487 {
1488         struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1489         struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1490         struct caam_hash_state *state = ahash_request_ctx(req);
1491
1492         memcpy(out, ctx, sizeof(struct caam_hash_ctx));
1493         memcpy(out + sizeof(struct caam_hash_ctx), state,
1494                sizeof(struct caam_hash_state));
1495         return 0;
1496 }
1497
1498 static int ahash_import(struct ahash_request *req, const void *in)
1499 {
1500         struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1501         struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1502         struct caam_hash_state *state = ahash_request_ctx(req);
1503
1504         memcpy(ctx, in, sizeof(struct caam_hash_ctx));
1505         memcpy(state, in + sizeof(struct caam_hash_ctx),
1506                sizeof(struct caam_hash_state));
1507         return 0;
1508 }
1509
1510 struct caam_hash_template {
1511         char name[CRYPTO_MAX_ALG_NAME];
1512         char driver_name[CRYPTO_MAX_ALG_NAME];
1513         char hmac_name[CRYPTO_MAX_ALG_NAME];
1514         char hmac_driver_name[CRYPTO_MAX_ALG_NAME];
1515         unsigned int blocksize;
1516         struct ahash_alg template_ahash;
1517         u32 alg_type;
1518         u32 alg_op;
1519 };
1520
1521 /* ahash descriptors */
1522 static struct caam_hash_template driver_hash[] = {
1523         {
1524                 .name = "sha1",
1525                 .driver_name = "sha1-caam",
1526                 .hmac_name = "hmac(sha1)",
1527                 .hmac_driver_name = "hmac-sha1-caam",
1528                 .blocksize = SHA1_BLOCK_SIZE,
1529                 .template_ahash = {
1530                         .init = ahash_init,
1531                         .update = ahash_update,
1532                         .final = ahash_final,
1533                         .finup = ahash_finup,
1534                         .digest = ahash_digest,
1535                         .export = ahash_export,
1536                         .import = ahash_import,
1537                         .setkey = ahash_setkey,
1538                         .halg = {
1539                                 .digestsize = SHA1_DIGEST_SIZE,
1540                                 },
1541                         },
1542                 .alg_type = OP_ALG_ALGSEL_SHA1,
1543                 .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
1544         }, {
1545                 .name = "sha224",
1546                 .driver_name = "sha224-caam",
1547                 .hmac_name = "hmac(sha224)",
1548                 .hmac_driver_name = "hmac-sha224-caam",
1549                 .blocksize = SHA224_BLOCK_SIZE,
1550                 .template_ahash = {
1551                         .init = ahash_init,
1552                         .update = ahash_update,
1553                         .final = ahash_final,
1554                         .finup = ahash_finup,
1555                         .digest = ahash_digest,
1556                         .export = ahash_export,
1557                         .import = ahash_import,
1558                         .setkey = ahash_setkey,
1559                         .halg = {
1560                                 .digestsize = SHA224_DIGEST_SIZE,
1561                                 },
1562                         },
1563                 .alg_type = OP_ALG_ALGSEL_SHA224,
1564                 .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
1565         }, {
1566                 .name = "sha256",
1567                 .driver_name = "sha256-caam",
1568                 .hmac_name = "hmac(sha256)",
1569                 .hmac_driver_name = "hmac-sha256-caam",
1570                 .blocksize = SHA256_BLOCK_SIZE,
1571                 .template_ahash = {
1572                         .init = ahash_init,
1573                         .update = ahash_update,
1574                         .final = ahash_final,
1575                         .finup = ahash_finup,
1576                         .digest = ahash_digest,
1577                         .export = ahash_export,
1578                         .import = ahash_import,
1579                         .setkey = ahash_setkey,
1580                         .halg = {
1581                                 .digestsize = SHA256_DIGEST_SIZE,
1582                                 },
1583                         },
1584                 .alg_type = OP_ALG_ALGSEL_SHA256,
1585                 .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
1586         }, {
1587                 .name = "sha384",
1588                 .driver_name = "sha384-caam",
1589                 .hmac_name = "hmac(sha384)",
1590                 .hmac_driver_name = "hmac-sha384-caam",
1591                 .blocksize = SHA384_BLOCK_SIZE,
1592                 .template_ahash = {
1593                         .init = ahash_init,
1594                         .update = ahash_update,
1595                         .final = ahash_final,
1596                         .finup = ahash_finup,
1597                         .digest = ahash_digest,
1598                         .export = ahash_export,
1599                         .import = ahash_import,
1600                         .setkey = ahash_setkey,
1601                         .halg = {
1602                                 .digestsize = SHA384_DIGEST_SIZE,
1603                                 },
1604                         },
1605                 .alg_type = OP_ALG_ALGSEL_SHA384,
1606                 .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
1607         }, {
1608                 .name = "sha512",
1609                 .driver_name = "sha512-caam",
1610                 .hmac_name = "hmac(sha512)",
1611                 .hmac_driver_name = "hmac-sha512-caam",
1612                 .blocksize = SHA512_BLOCK_SIZE,
1613                 .template_ahash = {
1614                         .init = ahash_init,
1615                         .update = ahash_update,
1616                         .final = ahash_final,
1617                         .finup = ahash_finup,
1618                         .digest = ahash_digest,
1619                         .export = ahash_export,
1620                         .import = ahash_import,
1621                         .setkey = ahash_setkey,
1622                         .halg = {
1623                                 .digestsize = SHA512_DIGEST_SIZE,
1624                                 },
1625                         },
1626                 .alg_type = OP_ALG_ALGSEL_SHA512,
1627                 .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
1628         }, {
1629                 .name = "md5",
1630                 .driver_name = "md5-caam",
1631                 .hmac_name = "hmac(md5)",
1632                 .hmac_driver_name = "hmac-md5-caam",
1633                 .blocksize = MD5_BLOCK_WORDS * 4,
1634                 .template_ahash = {
1635                         .init = ahash_init,
1636                         .update = ahash_update,
1637                         .final = ahash_final,
1638                         .finup = ahash_finup,
1639                         .digest = ahash_digest,
1640                         .export = ahash_export,
1641                         .import = ahash_import,
1642                         .setkey = ahash_setkey,
1643                         .halg = {
1644                                 .digestsize = MD5_DIGEST_SIZE,
1645                                 },
1646                         },
1647                 .alg_type = OP_ALG_ALGSEL_MD5,
1648                 .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
1649         },
1650 };
1651
1652 struct caam_hash_alg {
1653         struct list_head entry;
1654         struct device *ctrldev;
1655         int alg_type;
1656         int alg_op;
1657         struct ahash_alg ahash_alg;
1658 };
1659
1660 static int caam_hash_cra_init(struct crypto_tfm *tfm)
1661 {
1662         struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
1663         struct crypto_alg *base = tfm->__crt_alg;
1664         struct hash_alg_common *halg =
1665                  container_of(base, struct hash_alg_common, base);
1666         struct ahash_alg *alg =
1667                  container_of(halg, struct ahash_alg, halg);
1668         struct caam_hash_alg *caam_hash =
1669                  container_of(alg, struct caam_hash_alg, ahash_alg);
1670         struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
1671         struct caam_drv_private *priv = dev_get_drvdata(caam_hash->ctrldev);
1672         /* Sizes for MDHA running digests: MD5, SHA1, 224, 256, 384, 512 */
1673         static const u8 runninglen[] = { HASH_MSG_LEN + MD5_DIGEST_SIZE,
1674                                          HASH_MSG_LEN + SHA1_DIGEST_SIZE,
1675                                          HASH_MSG_LEN + 32,
1676                                          HASH_MSG_LEN + SHA256_DIGEST_SIZE,
1677                                          HASH_MSG_LEN + 64,
1678                                          HASH_MSG_LEN + SHA512_DIGEST_SIZE };
1679         int tgt_jr = atomic_inc_return(&priv->tfm_count);
1680         int ret = 0;
1681
1682         /*
1683          * distribute tfms across job rings to ensure in-order
1684          * crypto request processing per tfm
1685          */
1686         ctx->jrdev = priv->jrdev[tgt_jr % priv->total_jobrs];
1687
1688         /* copy descriptor header template value */
1689         ctx->alg_type = OP_TYPE_CLASS2_ALG | caam_hash->alg_type;
1690         ctx->alg_op = OP_TYPE_CLASS2_ALG | caam_hash->alg_op;
1691
1692         ctx->ctx_len = runninglen[(ctx->alg_op & OP_ALG_ALGSEL_SUBMASK) >>
1693                                   OP_ALG_ALGSEL_SHIFT];
1694
1695         crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1696                                  sizeof(struct caam_hash_state));
1697
1698         ret = ahash_set_sh_desc(ahash);
1699
1700         return ret;
1701 }
1702
1703 static void caam_hash_cra_exit(struct crypto_tfm *tfm)
1704 {
1705         struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
1706
1707         if (ctx->sh_desc_update_dma &&
1708             !dma_mapping_error(ctx->jrdev, ctx->sh_desc_update_dma))
1709                 dma_unmap_single(ctx->jrdev, ctx->sh_desc_update_dma,
1710                                  desc_bytes(ctx->sh_desc_update),
1711                                  DMA_TO_DEVICE);
1712         if (ctx->sh_desc_update_first_dma &&
1713             !dma_mapping_error(ctx->jrdev, ctx->sh_desc_update_first_dma))
1714                 dma_unmap_single(ctx->jrdev, ctx->sh_desc_update_first_dma,
1715                                  desc_bytes(ctx->sh_desc_update_first),
1716                                  DMA_TO_DEVICE);
1717         if (ctx->sh_desc_fin_dma &&
1718             !dma_mapping_error(ctx->jrdev, ctx->sh_desc_fin_dma))
1719                 dma_unmap_single(ctx->jrdev, ctx->sh_desc_fin_dma,
1720                                  desc_bytes(ctx->sh_desc_fin), DMA_TO_DEVICE);
1721         if (ctx->sh_desc_digest_dma &&
1722             !dma_mapping_error(ctx->jrdev, ctx->sh_desc_digest_dma))
1723                 dma_unmap_single(ctx->jrdev, ctx->sh_desc_digest_dma,
1724                                  desc_bytes(ctx->sh_desc_digest),
1725                                  DMA_TO_DEVICE);
1726         if (ctx->sh_desc_finup_dma &&
1727             !dma_mapping_error(ctx->jrdev, ctx->sh_desc_finup_dma))
1728                 dma_unmap_single(ctx->jrdev, ctx->sh_desc_finup_dma,
1729                                  desc_bytes(ctx->sh_desc_finup), DMA_TO_DEVICE);
1730 }
1731
1732 static void __exit caam_algapi_hash_exit(void)
1733 {
1734         struct device_node *dev_node;
1735         struct platform_device *pdev;
1736         struct device *ctrldev;
1737         struct caam_drv_private *priv;
1738         struct caam_hash_alg *t_alg, *n;
1739
1740         dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
1741         if (!dev_node) {
1742                 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
1743                 if (!dev_node)
1744                         return;
1745         }
1746
1747         pdev = of_find_device_by_node(dev_node);
1748         if (!pdev)
1749                 return;
1750
1751         ctrldev = &pdev->dev;
1752         of_node_put(dev_node);
1753         priv = dev_get_drvdata(ctrldev);
1754
1755         if (!priv->hash_list.next)
1756                 return;
1757
1758         list_for_each_entry_safe(t_alg, n, &priv->hash_list, entry) {
1759                 crypto_unregister_ahash(&t_alg->ahash_alg);
1760                 list_del(&t_alg->entry);
1761                 kfree(t_alg);
1762         }
1763 }
1764
1765 static struct caam_hash_alg *
1766 caam_hash_alloc(struct device *ctrldev, struct caam_hash_template *template,
1767                 bool keyed)
1768 {
1769         struct caam_hash_alg *t_alg;
1770         struct ahash_alg *halg;
1771         struct crypto_alg *alg;
1772
1773         t_alg = kzalloc(sizeof(struct caam_hash_alg), GFP_KERNEL);
1774         if (!t_alg) {
1775                 dev_err(ctrldev, "failed to allocate t_alg\n");
1776                 return ERR_PTR(-ENOMEM);
1777         }
1778
1779         t_alg->ahash_alg = template->template_ahash;
1780         halg = &t_alg->ahash_alg;
1781         alg = &halg->halg.base;
1782
1783         if (keyed) {
1784                 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
1785                          template->hmac_name);
1786                 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
1787                          template->hmac_driver_name);
1788         } else {
1789                 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
1790                          template->name);
1791                 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
1792                          template->driver_name);
1793         }
1794         alg->cra_module = THIS_MODULE;
1795         alg->cra_init = caam_hash_cra_init;
1796         alg->cra_exit = caam_hash_cra_exit;
1797         alg->cra_ctxsize = sizeof(struct caam_hash_ctx);
1798         alg->cra_priority = CAAM_CRA_PRIORITY;
1799         alg->cra_blocksize = template->blocksize;
1800         alg->cra_alignmask = 0;
1801         alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_TYPE_AHASH;
1802         alg->cra_type = &crypto_ahash_type;
1803
1804         t_alg->alg_type = template->alg_type;
1805         t_alg->alg_op = template->alg_op;
1806         t_alg->ctrldev = ctrldev;
1807
1808         return t_alg;
1809 }
1810
1811 static int __init caam_algapi_hash_init(void)
1812 {
1813         struct device_node *dev_node;
1814         struct platform_device *pdev;
1815         struct device *ctrldev;
1816         struct caam_drv_private *priv;
1817         int i = 0, err = 0;
1818
1819         dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
1820         if (!dev_node) {
1821                 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
1822                 if (!dev_node)
1823                         return -ENODEV;
1824         }
1825
1826         pdev = of_find_device_by_node(dev_node);
1827         if (!pdev)
1828                 return -ENODEV;
1829
1830         ctrldev = &pdev->dev;
1831         priv = dev_get_drvdata(ctrldev);
1832         of_node_put(dev_node);
1833
1834         INIT_LIST_HEAD(&priv->hash_list);
1835
1836         atomic_set(&priv->tfm_count, -1);
1837
1838         /* register crypto algorithms the device supports */
1839         for (i = 0; i < ARRAY_SIZE(driver_hash); i++) {
1840                 /* TODO: check if h/w supports alg */
1841                 struct caam_hash_alg *t_alg;
1842
1843                 /* register hmac version */
1844                 t_alg = caam_hash_alloc(ctrldev, &driver_hash[i], true);
1845                 if (IS_ERR(t_alg)) {
1846                         err = PTR_ERR(t_alg);
1847                         dev_warn(ctrldev, "%s alg allocation failed\n",
1848                                  driver_hash[i].driver_name);
1849                         continue;
1850                 }
1851
1852                 err = crypto_register_ahash(&t_alg->ahash_alg);
1853                 if (err) {
1854                         dev_warn(ctrldev, "%s alg registration failed\n",
1855                                 t_alg->ahash_alg.halg.base.cra_driver_name);
1856                         kfree(t_alg);
1857                 } else
1858                         list_add_tail(&t_alg->entry, &priv->hash_list);
1859
1860                 /* register unkeyed version */
1861                 t_alg = caam_hash_alloc(ctrldev, &driver_hash[i], false);
1862                 if (IS_ERR(t_alg)) {
1863                         err = PTR_ERR(t_alg);
1864                         dev_warn(ctrldev, "%s alg allocation failed\n",
1865                                  driver_hash[i].driver_name);
1866                         continue;
1867                 }
1868
1869                 err = crypto_register_ahash(&t_alg->ahash_alg);
1870                 if (err) {
1871                         dev_warn(ctrldev, "%s alg registration failed\n",
1872                                 t_alg->ahash_alg.halg.base.cra_driver_name);
1873                         kfree(t_alg);
1874                 } else
1875                         list_add_tail(&t_alg->entry, &priv->hash_list);
1876         }
1877
1878         return err;
1879 }
1880
1881 module_init(caam_algapi_hash_init);
1882 module_exit(caam_algapi_hash_exit);
1883
1884 MODULE_LICENSE("GPL");
1885 MODULE_DESCRIPTION("FSL CAAM support for ahash functions of crypto API");
1886 MODULE_AUTHOR("Freescale Semiconductor - NMG");