Merge branch 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[firefly-linux-kernel-4.4.55.git] / drivers / crypto / caam / caamalg.c
1 /*
2  * caam - Freescale FSL CAAM support for crypto API
3  *
4  * Copyright 2008-2011 Freescale Semiconductor, Inc.
5  *
6  * Based on talitos crypto API driver.
7  *
8  * relationship of job descriptors to shared descriptors (SteveC Dec 10 2008):
9  *
10  * ---------------                     ---------------
11  * | JobDesc #1  |-------------------->|  ShareDesc  |
12  * | *(packet 1) |                     |   (PDB)     |
13  * ---------------      |------------->|  (hashKey)  |
14  *       .              |              | (cipherKey) |
15  *       .              |    |-------->| (operation) |
16  * ---------------      |    |         ---------------
17  * | JobDesc #2  |------|    |
18  * | *(packet 2) |           |
19  * ---------------           |
20  *       .                   |
21  *       .                   |
22  * ---------------           |
23  * | JobDesc #3  |------------
24  * | *(packet 3) |
25  * ---------------
26  *
27  * The SharedDesc never changes for a connection unless rekeyed, but
28  * each packet will likely be in a different place. So all we need
29  * to know to process the packet is where the input is, where the
30  * output goes, and what context we want to process with. Context is
31  * in the SharedDesc, packet references in the JobDesc.
32  *
33  * So, a job desc looks like:
34  *
35  * ---------------------
36  * | Header            |
37  * | ShareDesc Pointer |
38  * | SEQ_OUT_PTR       |
39  * | (output buffer)   |
40  * | (output length)   |
41  * | SEQ_IN_PTR        |
42  * | (input buffer)    |
43  * | (input length)    |
44  * ---------------------
45  */
46
47 #include "compat.h"
48
49 #include "regs.h"
50 #include "intern.h"
51 #include "desc_constr.h"
52 #include "jr.h"
53 #include "error.h"
54 #include "sg_sw_sec4.h"
55 #include "key_gen.h"
56
57 /*
58  * crypto alg
59  */
60 #define CAAM_CRA_PRIORITY               3000
61 /* max key is sum of AES_MAX_KEY_SIZE, max split key size */
62 #define CAAM_MAX_KEY_SIZE               (AES_MAX_KEY_SIZE + \
63                                          SHA512_DIGEST_SIZE * 2)
64 /* max IV is max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
65 #define CAAM_MAX_IV_LENGTH              16
66
67 /* length of descriptors text */
68 #define DESC_AEAD_BASE                  (4 * CAAM_CMD_SZ)
69 #define DESC_AEAD_ENC_LEN               (DESC_AEAD_BASE + 15 * CAAM_CMD_SZ)
70 #define DESC_AEAD_DEC_LEN               (DESC_AEAD_BASE + 18 * CAAM_CMD_SZ)
71 #define DESC_AEAD_GIVENC_LEN            (DESC_AEAD_ENC_LEN + 7 * CAAM_CMD_SZ)
72
73 #define DESC_AEAD_NULL_BASE             (3 * CAAM_CMD_SZ)
74 #define DESC_AEAD_NULL_ENC_LEN          (DESC_AEAD_NULL_BASE + 14 * CAAM_CMD_SZ)
75 #define DESC_AEAD_NULL_DEC_LEN          (DESC_AEAD_NULL_BASE + 17 * CAAM_CMD_SZ)
76
77 #define DESC_ABLKCIPHER_BASE            (3 * CAAM_CMD_SZ)
78 #define DESC_ABLKCIPHER_ENC_LEN         (DESC_ABLKCIPHER_BASE + \
79                                          20 * CAAM_CMD_SZ)
80 #define DESC_ABLKCIPHER_DEC_LEN         (DESC_ABLKCIPHER_BASE + \
81                                          15 * CAAM_CMD_SZ)
82
83 #define DESC_MAX_USED_BYTES             (DESC_AEAD_GIVENC_LEN + \
84                                          CAAM_MAX_KEY_SIZE)
85 #define DESC_MAX_USED_LEN               (DESC_MAX_USED_BYTES / CAAM_CMD_SZ)
86
87 #ifdef DEBUG
88 /* for print_hex_dumps with line references */
89 #define debug(format, arg...) printk(format, arg)
90 #else
91 #define debug(format, arg...)
92 #endif
93 static struct list_head alg_list;
94
95 /* Set DK bit in class 1 operation if shared */
96 static inline void append_dec_op1(u32 *desc, u32 type)
97 {
98         u32 *jump_cmd, *uncond_jump_cmd;
99
100         jump_cmd = append_jump(desc, JUMP_TEST_ALL | JUMP_COND_SHRD);
101         append_operation(desc, type | OP_ALG_AS_INITFINAL |
102                          OP_ALG_DECRYPT);
103         uncond_jump_cmd = append_jump(desc, JUMP_TEST_ALL);
104         set_jump_tgt_here(desc, jump_cmd);
105         append_operation(desc, type | OP_ALG_AS_INITFINAL |
106                          OP_ALG_DECRYPT | OP_ALG_AAI_DK);
107         set_jump_tgt_here(desc, uncond_jump_cmd);
108 }
109
110 /*
111  * For aead functions, read payload and write payload,
112  * both of which are specified in req->src and req->dst
113  */
114 static inline void aead_append_src_dst(u32 *desc, u32 msg_type)
115 {
116         append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
117         append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH |
118                              KEY_VLF | msg_type | FIFOLD_TYPE_LASTBOTH);
119 }
120
121 /*
122  * For aead encrypt and decrypt, read iv for both classes
123  */
124 static inline void aead_append_ld_iv(u32 *desc, int ivsize)
125 {
126         append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
127                    LDST_CLASS_1_CCB | ivsize);
128         append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_CLASS2INFIFO | ivsize);
129 }
130
131 /*
132  * For ablkcipher encrypt and decrypt, read from req->src and
133  * write to req->dst
134  */
135 static inline void ablkcipher_append_src_dst(u32 *desc)
136 {
137         append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
138         append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
139         append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 |
140                              KEY_VLF | FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
141         append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
142 }
143
144 /*
145  * If all data, including src (with assoc and iv) or dst (with iv only) are
146  * contiguous
147  */
148 #define GIV_SRC_CONTIG          1
149 #define GIV_DST_CONTIG          (1 << 1)
150
151 /*
152  * per-session context
153  */
154 struct caam_ctx {
155         struct device *jrdev;
156         u32 sh_desc_enc[DESC_MAX_USED_LEN];
157         u32 sh_desc_dec[DESC_MAX_USED_LEN];
158         u32 sh_desc_givenc[DESC_MAX_USED_LEN];
159         dma_addr_t sh_desc_enc_dma;
160         dma_addr_t sh_desc_dec_dma;
161         dma_addr_t sh_desc_givenc_dma;
162         u32 class1_alg_type;
163         u32 class2_alg_type;
164         u32 alg_op;
165         u8 key[CAAM_MAX_KEY_SIZE];
166         dma_addr_t key_dma;
167         unsigned int enckeylen;
168         unsigned int split_key_len;
169         unsigned int split_key_pad_len;
170         unsigned int authsize;
171 };
172
173 static void append_key_aead(u32 *desc, struct caam_ctx *ctx,
174                             int keys_fit_inline)
175 {
176         if (keys_fit_inline) {
177                 append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
178                                   ctx->split_key_len, CLASS_2 |
179                                   KEY_DEST_MDHA_SPLIT | KEY_ENC);
180                 append_key_as_imm(desc, (void *)ctx->key +
181                                   ctx->split_key_pad_len, ctx->enckeylen,
182                                   ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
183         } else {
184                 append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 |
185                            KEY_DEST_MDHA_SPLIT | KEY_ENC);
186                 append_key(desc, ctx->key_dma + ctx->split_key_pad_len,
187                            ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
188         }
189 }
190
191 static void init_sh_desc_key_aead(u32 *desc, struct caam_ctx *ctx,
192                                   int keys_fit_inline)
193 {
194         u32 *key_jump_cmd;
195
196         init_sh_desc(desc, HDR_SHARE_SERIAL);
197
198         /* Skip if already shared */
199         key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
200                                    JUMP_COND_SHRD);
201
202         append_key_aead(desc, ctx, keys_fit_inline);
203
204         set_jump_tgt_here(desc, key_jump_cmd);
205 }
206
207 static int aead_null_set_sh_desc(struct crypto_aead *aead)
208 {
209         struct aead_tfm *tfm = &aead->base.crt_aead;
210         struct caam_ctx *ctx = crypto_aead_ctx(aead);
211         struct device *jrdev = ctx->jrdev;
212         bool keys_fit_inline = false;
213         u32 *key_jump_cmd, *jump_cmd, *read_move_cmd, *write_move_cmd;
214         u32 *desc;
215
216         /*
217          * Job Descriptor and Shared Descriptors
218          * must all fit into the 64-word Descriptor h/w Buffer
219          */
220         if (DESC_AEAD_NULL_ENC_LEN + DESC_JOB_IO_LEN +
221             ctx->split_key_pad_len <= CAAM_DESC_BYTES_MAX)
222                 keys_fit_inline = true;
223
224         /* aead_encrypt shared descriptor */
225         desc = ctx->sh_desc_enc;
226
227         init_sh_desc(desc, HDR_SHARE_SERIAL);
228
229         /* Skip if already shared */
230         key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
231                                    JUMP_COND_SHRD);
232         if (keys_fit_inline)
233                 append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
234                                   ctx->split_key_len, CLASS_2 |
235                                   KEY_DEST_MDHA_SPLIT | KEY_ENC);
236         else
237                 append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 |
238                            KEY_DEST_MDHA_SPLIT | KEY_ENC);
239         set_jump_tgt_here(desc, key_jump_cmd);
240
241         /* cryptlen = seqoutlen - authsize */
242         append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
243
244         /*
245          * NULL encryption; IV is zero
246          * assoclen = (assoclen + cryptlen) - cryptlen
247          */
248         append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG3, CAAM_CMD_SZ);
249
250         /* read assoc before reading payload */
251         append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
252                              KEY_VLF);
253
254         /* Prepare to read and write cryptlen bytes */
255         append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
256         append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
257
258         /*
259          * MOVE_LEN opcode is not available in all SEC HW revisions,
260          * thus need to do some magic, i.e. self-patch the descriptor
261          * buffer.
262          */
263         read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF |
264                                     MOVE_DEST_MATH3 |
265                                     (0x6 << MOVE_LEN_SHIFT));
266         write_move_cmd = append_move(desc, MOVE_SRC_MATH3 |
267                                      MOVE_DEST_DESCBUF |
268                                      MOVE_WAITCOMP |
269                                      (0x8 << MOVE_LEN_SHIFT));
270
271         /* Class 2 operation */
272         append_operation(desc, ctx->class2_alg_type |
273                          OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
274
275         /* Read and write cryptlen bytes */
276         aead_append_src_dst(desc, FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
277
278         set_move_tgt_here(desc, read_move_cmd);
279         set_move_tgt_here(desc, write_move_cmd);
280         append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
281         append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO |
282                     MOVE_AUX_LS);
283
284         /* Write ICV */
285         append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB |
286                          LDST_SRCDST_BYTE_CONTEXT);
287
288         ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
289                                               desc_bytes(desc),
290                                               DMA_TO_DEVICE);
291         if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
292                 dev_err(jrdev, "unable to map shared descriptor\n");
293                 return -ENOMEM;
294         }
295 #ifdef DEBUG
296         print_hex_dump(KERN_ERR,
297                        "aead null enc shdesc@"__stringify(__LINE__)": ",
298                        DUMP_PREFIX_ADDRESS, 16, 4, desc,
299                        desc_bytes(desc), 1);
300 #endif
301
302         /*
303          * Job Descriptor and Shared Descriptors
304          * must all fit into the 64-word Descriptor h/w Buffer
305          */
306         if (DESC_AEAD_NULL_DEC_LEN + DESC_JOB_IO_LEN +
307             ctx->split_key_pad_len <= CAAM_DESC_BYTES_MAX)
308                 keys_fit_inline = true;
309
310         desc = ctx->sh_desc_dec;
311
312         /* aead_decrypt shared descriptor */
313         init_sh_desc(desc, HDR_SHARE_SERIAL);
314
315         /* Skip if already shared */
316         key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
317                                    JUMP_COND_SHRD);
318         if (keys_fit_inline)
319                 append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
320                                   ctx->split_key_len, CLASS_2 |
321                                   KEY_DEST_MDHA_SPLIT | KEY_ENC);
322         else
323                 append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 |
324                            KEY_DEST_MDHA_SPLIT | KEY_ENC);
325         set_jump_tgt_here(desc, key_jump_cmd);
326
327         /* Class 2 operation */
328         append_operation(desc, ctx->class2_alg_type |
329                          OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
330
331         /* assoclen + cryptlen = seqinlen - ivsize - authsize */
332         append_math_sub_imm_u32(desc, REG3, SEQINLEN, IMM,
333                                 ctx->authsize + tfm->ivsize);
334         /* assoclen = (assoclen + cryptlen) - cryptlen */
335         append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ);
336         append_math_sub(desc, VARSEQINLEN, REG3, REG2, CAAM_CMD_SZ);
337
338         /* read assoc before reading payload */
339         append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
340                              KEY_VLF);
341
342         /* Prepare to read and write cryptlen bytes */
343         append_math_add(desc, VARSEQINLEN, ZERO, REG2, CAAM_CMD_SZ);
344         append_math_add(desc, VARSEQOUTLEN, ZERO, REG2, CAAM_CMD_SZ);
345
346         /*
347          * MOVE_LEN opcode is not available in all SEC HW revisions,
348          * thus need to do some magic, i.e. self-patch the descriptor
349          * buffer.
350          */
351         read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF |
352                                     MOVE_DEST_MATH2 |
353                                     (0x6 << MOVE_LEN_SHIFT));
354         write_move_cmd = append_move(desc, MOVE_SRC_MATH2 |
355                                      MOVE_DEST_DESCBUF |
356                                      MOVE_WAITCOMP |
357                                      (0x8 << MOVE_LEN_SHIFT));
358
359         /* Read and write cryptlen bytes */
360         aead_append_src_dst(desc, FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
361
362         /*
363          * Insert a NOP here, since we need at least 4 instructions between
364          * code patching the descriptor buffer and the location being patched.
365          */
366         jump_cmd = append_jump(desc, JUMP_TEST_ALL);
367         set_jump_tgt_here(desc, jump_cmd);
368
369         set_move_tgt_here(desc, read_move_cmd);
370         set_move_tgt_here(desc, write_move_cmd);
371         append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
372         append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO |
373                     MOVE_AUX_LS);
374         append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
375
376         /* Load ICV */
377         append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS2 |
378                              FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV);
379
380         ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
381                                               desc_bytes(desc),
382                                               DMA_TO_DEVICE);
383         if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
384                 dev_err(jrdev, "unable to map shared descriptor\n");
385                 return -ENOMEM;
386         }
387 #ifdef DEBUG
388         print_hex_dump(KERN_ERR,
389                        "aead null dec shdesc@"__stringify(__LINE__)": ",
390                        DUMP_PREFIX_ADDRESS, 16, 4, desc,
391                        desc_bytes(desc), 1);
392 #endif
393
394         return 0;
395 }
396
397 static int aead_set_sh_desc(struct crypto_aead *aead)
398 {
399         struct aead_tfm *tfm = &aead->base.crt_aead;
400         struct caam_ctx *ctx = crypto_aead_ctx(aead);
401         struct device *jrdev = ctx->jrdev;
402         bool keys_fit_inline = false;
403         u32 geniv, moveiv;
404         u32 *desc;
405
406         if (!ctx->authsize)
407                 return 0;
408
409         /* NULL encryption / decryption */
410         if (!ctx->enckeylen)
411                 return aead_null_set_sh_desc(aead);
412
413         /*
414          * Job Descriptor and Shared Descriptors
415          * must all fit into the 64-word Descriptor h/w Buffer
416          */
417         if (DESC_AEAD_ENC_LEN + DESC_JOB_IO_LEN +
418             ctx->split_key_pad_len + ctx->enckeylen <=
419             CAAM_DESC_BYTES_MAX)
420                 keys_fit_inline = true;
421
422         /* aead_encrypt shared descriptor */
423         desc = ctx->sh_desc_enc;
424
425         init_sh_desc_key_aead(desc, ctx, keys_fit_inline);
426
427         /* Class 2 operation */
428         append_operation(desc, ctx->class2_alg_type |
429                          OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
430
431         /* cryptlen = seqoutlen - authsize */
432         append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
433
434         /* assoclen + cryptlen = seqinlen - ivsize */
435         append_math_sub_imm_u32(desc, REG2, SEQINLEN, IMM, tfm->ivsize);
436
437         /* assoclen = (assoclen + cryptlen) - cryptlen */
438         append_math_sub(desc, VARSEQINLEN, REG2, REG3, CAAM_CMD_SZ);
439
440         /* read assoc before reading payload */
441         append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
442                              KEY_VLF);
443         aead_append_ld_iv(desc, tfm->ivsize);
444
445         /* Class 1 operation */
446         append_operation(desc, ctx->class1_alg_type |
447                          OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
448
449         /* Read and write cryptlen bytes */
450         append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
451         append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
452         aead_append_src_dst(desc, FIFOLD_TYPE_MSG1OUT2);
453
454         /* Write ICV */
455         append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB |
456                          LDST_SRCDST_BYTE_CONTEXT);
457
458         ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
459                                               desc_bytes(desc),
460                                               DMA_TO_DEVICE);
461         if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
462                 dev_err(jrdev, "unable to map shared descriptor\n");
463                 return -ENOMEM;
464         }
465 #ifdef DEBUG
466         print_hex_dump(KERN_ERR, "aead enc shdesc@"__stringify(__LINE__)": ",
467                        DUMP_PREFIX_ADDRESS, 16, 4, desc,
468                        desc_bytes(desc), 1);
469 #endif
470
471         /*
472          * Job Descriptor and Shared Descriptors
473          * must all fit into the 64-word Descriptor h/w Buffer
474          */
475         if (DESC_AEAD_DEC_LEN + DESC_JOB_IO_LEN +
476             ctx->split_key_pad_len + ctx->enckeylen <=
477             CAAM_DESC_BYTES_MAX)
478                 keys_fit_inline = true;
479
480         /* aead_decrypt shared descriptor */
481         desc = ctx->sh_desc_dec;
482
483         init_sh_desc_key_aead(desc, ctx, keys_fit_inline);
484
485         /* Class 2 operation */
486         append_operation(desc, ctx->class2_alg_type |
487                          OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
488
489         /* assoclen + cryptlen = seqinlen - ivsize - authsize */
490         append_math_sub_imm_u32(desc, REG3, SEQINLEN, IMM,
491                                 ctx->authsize + tfm->ivsize);
492         /* assoclen = (assoclen + cryptlen) - cryptlen */
493         append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ);
494         append_math_sub(desc, VARSEQINLEN, REG3, REG2, CAAM_CMD_SZ);
495
496         /* read assoc before reading payload */
497         append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
498                              KEY_VLF);
499
500         aead_append_ld_iv(desc, tfm->ivsize);
501
502         append_dec_op1(desc, ctx->class1_alg_type);
503
504         /* Read and write cryptlen bytes */
505         append_math_add(desc, VARSEQINLEN, ZERO, REG2, CAAM_CMD_SZ);
506         append_math_add(desc, VARSEQOUTLEN, ZERO, REG2, CAAM_CMD_SZ);
507         aead_append_src_dst(desc, FIFOLD_TYPE_MSG);
508
509         /* Load ICV */
510         append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS2 |
511                              FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV);
512
513         ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
514                                               desc_bytes(desc),
515                                               DMA_TO_DEVICE);
516         if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
517                 dev_err(jrdev, "unable to map shared descriptor\n");
518                 return -ENOMEM;
519         }
520 #ifdef DEBUG
521         print_hex_dump(KERN_ERR, "aead dec shdesc@"__stringify(__LINE__)": ",
522                        DUMP_PREFIX_ADDRESS, 16, 4, desc,
523                        desc_bytes(desc), 1);
524 #endif
525
526         /*
527          * Job Descriptor and Shared Descriptors
528          * must all fit into the 64-word Descriptor h/w Buffer
529          */
530         if (DESC_AEAD_GIVENC_LEN + DESC_JOB_IO_LEN +
531             ctx->split_key_pad_len + ctx->enckeylen <=
532             CAAM_DESC_BYTES_MAX)
533                 keys_fit_inline = true;
534
535         /* aead_givencrypt shared descriptor */
536         desc = ctx->sh_desc_givenc;
537
538         init_sh_desc_key_aead(desc, ctx, keys_fit_inline);
539
540         /* Generate IV */
541         geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO |
542                 NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 |
543                 NFIFOENTRY_PTYPE_RND | (tfm->ivsize << NFIFOENTRY_DLEN_SHIFT);
544         append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB |
545                             LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
546         append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
547         append_move(desc, MOVE_SRC_INFIFO |
548                     MOVE_DEST_CLASS1CTX | (tfm->ivsize << MOVE_LEN_SHIFT));
549         append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
550
551         /* Copy IV to class 1 context */
552         append_move(desc, MOVE_SRC_CLASS1CTX |
553                     MOVE_DEST_OUTFIFO | (tfm->ivsize << MOVE_LEN_SHIFT));
554
555         /* Return to encryption */
556         append_operation(desc, ctx->class2_alg_type |
557                          OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
558
559         /* ivsize + cryptlen = seqoutlen - authsize */
560         append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
561
562         /* assoclen = seqinlen - (ivsize + cryptlen) */
563         append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG3, CAAM_CMD_SZ);
564
565         /* read assoc before reading payload */
566         append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
567                              KEY_VLF);
568
569         /* Copy iv from class 1 ctx to class 2 fifo*/
570         moveiv = NFIFOENTRY_STYPE_OFIFO | NFIFOENTRY_DEST_CLASS2 |
571                  NFIFOENTRY_DTYPE_MSG | (tfm->ivsize << NFIFOENTRY_DLEN_SHIFT);
572         append_load_imm_u32(desc, moveiv, LDST_CLASS_IND_CCB |
573                             LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
574         append_load_imm_u32(desc, tfm->ivsize, LDST_CLASS_2_CCB |
575                             LDST_SRCDST_WORD_DATASZ_REG | LDST_IMM);
576
577         /* Class 1 operation */
578         append_operation(desc, ctx->class1_alg_type |
579                          OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
580
581         /* Will write ivsize + cryptlen */
582         append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
583
584         /* Not need to reload iv */
585         append_seq_fifo_load(desc, tfm->ivsize,
586                              FIFOLD_CLASS_SKIP);
587
588         /* Will read cryptlen */
589         append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
590         aead_append_src_dst(desc, FIFOLD_TYPE_MSG1OUT2);
591
592         /* Write ICV */
593         append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB |
594                          LDST_SRCDST_BYTE_CONTEXT);
595
596         ctx->sh_desc_givenc_dma = dma_map_single(jrdev, desc,
597                                                  desc_bytes(desc),
598                                                  DMA_TO_DEVICE);
599         if (dma_mapping_error(jrdev, ctx->sh_desc_givenc_dma)) {
600                 dev_err(jrdev, "unable to map shared descriptor\n");
601                 return -ENOMEM;
602         }
603 #ifdef DEBUG
604         print_hex_dump(KERN_ERR, "aead givenc shdesc@"__stringify(__LINE__)": ",
605                        DUMP_PREFIX_ADDRESS, 16, 4, desc,
606                        desc_bytes(desc), 1);
607 #endif
608
609         return 0;
610 }
611
612 static int aead_setauthsize(struct crypto_aead *authenc,
613                                     unsigned int authsize)
614 {
615         struct caam_ctx *ctx = crypto_aead_ctx(authenc);
616
617         ctx->authsize = authsize;
618         aead_set_sh_desc(authenc);
619
620         return 0;
621 }
622
623 static u32 gen_split_aead_key(struct caam_ctx *ctx, const u8 *key_in,
624                               u32 authkeylen)
625 {
626         return gen_split_key(ctx->jrdev, ctx->key, ctx->split_key_len,
627                                ctx->split_key_pad_len, key_in, authkeylen,
628                                ctx->alg_op);
629 }
630
631 static int aead_setkey(struct crypto_aead *aead,
632                                const u8 *key, unsigned int keylen)
633 {
634         /* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */
635         static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 };
636         struct caam_ctx *ctx = crypto_aead_ctx(aead);
637         struct device *jrdev = ctx->jrdev;
638         struct crypto_authenc_keys keys;
639         int ret = 0;
640
641         if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
642                 goto badkey;
643
644         /* Pick class 2 key length from algorithm submask */
645         ctx->split_key_len = mdpadlen[(ctx->alg_op & OP_ALG_ALGSEL_SUBMASK) >>
646                                       OP_ALG_ALGSEL_SHIFT] * 2;
647         ctx->split_key_pad_len = ALIGN(ctx->split_key_len, 16);
648
649         if (ctx->split_key_pad_len + keys.enckeylen > CAAM_MAX_KEY_SIZE)
650                 goto badkey;
651
652 #ifdef DEBUG
653         printk(KERN_ERR "keylen %d enckeylen %d authkeylen %d\n",
654                keys.authkeylen + keys.enckeylen, keys.enckeylen,
655                keys.authkeylen);
656         printk(KERN_ERR "split_key_len %d split_key_pad_len %d\n",
657                ctx->split_key_len, ctx->split_key_pad_len);
658         print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
659                        DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
660 #endif
661
662         ret = gen_split_aead_key(ctx, keys.authkey, keys.authkeylen);
663         if (ret) {
664                 goto badkey;
665         }
666
667         /* postpend encryption key to auth split key */
668         memcpy(ctx->key + ctx->split_key_pad_len, keys.enckey, keys.enckeylen);
669
670         ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->split_key_pad_len +
671                                       keys.enckeylen, DMA_TO_DEVICE);
672         if (dma_mapping_error(jrdev, ctx->key_dma)) {
673                 dev_err(jrdev, "unable to map key i/o memory\n");
674                 return -ENOMEM;
675         }
676 #ifdef DEBUG
677         print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
678                        DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
679                        ctx->split_key_pad_len + keys.enckeylen, 1);
680 #endif
681
682         ctx->enckeylen = keys.enckeylen;
683
684         ret = aead_set_sh_desc(aead);
685         if (ret) {
686                 dma_unmap_single(jrdev, ctx->key_dma, ctx->split_key_pad_len +
687                                  keys.enckeylen, DMA_TO_DEVICE);
688         }
689
690         return ret;
691 badkey:
692         crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
693         return -EINVAL;
694 }
695
696 static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
697                              const u8 *key, unsigned int keylen)
698 {
699         struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
700         struct ablkcipher_tfm *tfm = &ablkcipher->base.crt_ablkcipher;
701         struct device *jrdev = ctx->jrdev;
702         int ret = 0;
703         u32 *key_jump_cmd;
704         u32 *desc;
705
706 #ifdef DEBUG
707         print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
708                        DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
709 #endif
710
711         memcpy(ctx->key, key, keylen);
712         ctx->key_dma = dma_map_single(jrdev, ctx->key, keylen,
713                                       DMA_TO_DEVICE);
714         if (dma_mapping_error(jrdev, ctx->key_dma)) {
715                 dev_err(jrdev, "unable to map key i/o memory\n");
716                 return -ENOMEM;
717         }
718         ctx->enckeylen = keylen;
719
720         /* ablkcipher_encrypt shared descriptor */
721         desc = ctx->sh_desc_enc;
722         init_sh_desc(desc, HDR_SHARE_SERIAL);
723         /* Skip if already shared */
724         key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
725                                    JUMP_COND_SHRD);
726
727         /* Load class1 key only */
728         append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
729                           ctx->enckeylen, CLASS_1 |
730                           KEY_DEST_CLASS_REG);
731
732         set_jump_tgt_here(desc, key_jump_cmd);
733
734         /* Load iv */
735         append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
736                    LDST_CLASS_1_CCB | tfm->ivsize);
737
738         /* Load operation */
739         append_operation(desc, ctx->class1_alg_type |
740                          OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
741
742         /* Perform operation */
743         ablkcipher_append_src_dst(desc);
744
745         ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
746                                               desc_bytes(desc),
747                                               DMA_TO_DEVICE);
748         if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
749                 dev_err(jrdev, "unable to map shared descriptor\n");
750                 return -ENOMEM;
751         }
752 #ifdef DEBUG
753         print_hex_dump(KERN_ERR,
754                        "ablkcipher enc shdesc@"__stringify(__LINE__)": ",
755                        DUMP_PREFIX_ADDRESS, 16, 4, desc,
756                        desc_bytes(desc), 1);
757 #endif
758         /* ablkcipher_decrypt shared descriptor */
759         desc = ctx->sh_desc_dec;
760
761         init_sh_desc(desc, HDR_SHARE_SERIAL);
762         /* Skip if already shared */
763         key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
764                                    JUMP_COND_SHRD);
765
766         /* Load class1 key only */
767         append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
768                           ctx->enckeylen, CLASS_1 |
769                           KEY_DEST_CLASS_REG);
770
771         set_jump_tgt_here(desc, key_jump_cmd);
772
773         /* load IV */
774         append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
775                    LDST_CLASS_1_CCB | tfm->ivsize);
776
777         /* Choose operation */
778         append_dec_op1(desc, ctx->class1_alg_type);
779
780         /* Perform operation */
781         ablkcipher_append_src_dst(desc);
782
783         ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
784                                               desc_bytes(desc),
785                                               DMA_TO_DEVICE);
786         if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
787                 dev_err(jrdev, "unable to map shared descriptor\n");
788                 return -ENOMEM;
789         }
790
791 #ifdef DEBUG
792         print_hex_dump(KERN_ERR,
793                        "ablkcipher dec shdesc@"__stringify(__LINE__)": ",
794                        DUMP_PREFIX_ADDRESS, 16, 4, desc,
795                        desc_bytes(desc), 1);
796 #endif
797
798         return ret;
799 }
800
801 /*
802  * aead_edesc - s/w-extended aead descriptor
803  * @assoc_nents: number of segments in associated data (SPI+Seq) scatterlist
804  * @assoc_chained: if source is chained
805  * @src_nents: number of segments in input scatterlist
806  * @src_chained: if source is chained
807  * @dst_nents: number of segments in output scatterlist
808  * @dst_chained: if destination is chained
809  * @iv_dma: dma address of iv for checking continuity and link table
810  * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE)
811  * @sec4_sg_bytes: length of dma mapped sec4_sg space
812  * @sec4_sg_dma: bus physical mapped address of h/w link table
813  * @hw_desc: the h/w job descriptor followed by any referenced link tables
814  */
815 struct aead_edesc {
816         int assoc_nents;
817         bool assoc_chained;
818         int src_nents;
819         bool src_chained;
820         int dst_nents;
821         bool dst_chained;
822         dma_addr_t iv_dma;
823         int sec4_sg_bytes;
824         dma_addr_t sec4_sg_dma;
825         struct sec4_sg_entry *sec4_sg;
826         u32 hw_desc[0];
827 };
828
829 /*
830  * ablkcipher_edesc - s/w-extended ablkcipher descriptor
831  * @src_nents: number of segments in input scatterlist
832  * @src_chained: if source is chained
833  * @dst_nents: number of segments in output scatterlist
834  * @dst_chained: if destination is chained
835  * @iv_dma: dma address of iv for checking continuity and link table
836  * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE)
837  * @sec4_sg_bytes: length of dma mapped sec4_sg space
838  * @sec4_sg_dma: bus physical mapped address of h/w link table
839  * @hw_desc: the h/w job descriptor followed by any referenced link tables
840  */
841 struct ablkcipher_edesc {
842         int src_nents;
843         bool src_chained;
844         int dst_nents;
845         bool dst_chained;
846         dma_addr_t iv_dma;
847         int sec4_sg_bytes;
848         dma_addr_t sec4_sg_dma;
849         struct sec4_sg_entry *sec4_sg;
850         u32 hw_desc[0];
851 };
852
853 static void caam_unmap(struct device *dev, struct scatterlist *src,
854                        struct scatterlist *dst, int src_nents,
855                        bool src_chained, int dst_nents, bool dst_chained,
856                        dma_addr_t iv_dma, int ivsize, dma_addr_t sec4_sg_dma,
857                        int sec4_sg_bytes)
858 {
859         if (dst != src) {
860                 dma_unmap_sg_chained(dev, src, src_nents ? : 1, DMA_TO_DEVICE,
861                                      src_chained);
862                 dma_unmap_sg_chained(dev, dst, dst_nents ? : 1, DMA_FROM_DEVICE,
863                                      dst_chained);
864         } else {
865                 dma_unmap_sg_chained(dev, src, src_nents ? : 1,
866                                      DMA_BIDIRECTIONAL, src_chained);
867         }
868
869         if (iv_dma)
870                 dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE);
871         if (sec4_sg_bytes)
872                 dma_unmap_single(dev, sec4_sg_dma, sec4_sg_bytes,
873                                  DMA_TO_DEVICE);
874 }
875
876 static void aead_unmap(struct device *dev,
877                        struct aead_edesc *edesc,
878                        struct aead_request *req)
879 {
880         struct crypto_aead *aead = crypto_aead_reqtfm(req);
881         int ivsize = crypto_aead_ivsize(aead);
882
883         dma_unmap_sg_chained(dev, req->assoc, edesc->assoc_nents,
884                              DMA_TO_DEVICE, edesc->assoc_chained);
885
886         caam_unmap(dev, req->src, req->dst,
887                    edesc->src_nents, edesc->src_chained, edesc->dst_nents,
888                    edesc->dst_chained, edesc->iv_dma, ivsize,
889                    edesc->sec4_sg_dma, edesc->sec4_sg_bytes);
890 }
891
892 static void ablkcipher_unmap(struct device *dev,
893                              struct ablkcipher_edesc *edesc,
894                              struct ablkcipher_request *req)
895 {
896         struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
897         int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
898
899         caam_unmap(dev, req->src, req->dst,
900                    edesc->src_nents, edesc->src_chained, edesc->dst_nents,
901                    edesc->dst_chained, edesc->iv_dma, ivsize,
902                    edesc->sec4_sg_dma, edesc->sec4_sg_bytes);
903 }
904
905 static void aead_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
906                                    void *context)
907 {
908         struct aead_request *req = context;
909         struct aead_edesc *edesc;
910 #ifdef DEBUG
911         struct crypto_aead *aead = crypto_aead_reqtfm(req);
912         struct caam_ctx *ctx = crypto_aead_ctx(aead);
913         int ivsize = crypto_aead_ivsize(aead);
914
915         dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
916 #endif
917
918         edesc = (struct aead_edesc *)((char *)desc -
919                  offsetof(struct aead_edesc, hw_desc));
920
921         if (err) {
922                 char tmp[CAAM_ERROR_STR_MAX];
923
924                 dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
925         }
926
927         aead_unmap(jrdev, edesc, req);
928
929 #ifdef DEBUG
930         print_hex_dump(KERN_ERR, "assoc  @"__stringify(__LINE__)": ",
931                        DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->assoc),
932                        req->assoclen , 1);
933         print_hex_dump(KERN_ERR, "dstiv  @"__stringify(__LINE__)": ",
934                        DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src) - ivsize,
935                        edesc->src_nents ? 100 : ivsize, 1);
936         print_hex_dump(KERN_ERR, "dst    @"__stringify(__LINE__)": ",
937                        DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
938                        edesc->src_nents ? 100 : req->cryptlen +
939                        ctx->authsize + 4, 1);
940 #endif
941
942         kfree(edesc);
943
944         aead_request_complete(req, err);
945 }
946
947 static void aead_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
948                                    void *context)
949 {
950         struct aead_request *req = context;
951         struct aead_edesc *edesc;
952 #ifdef DEBUG
953         struct crypto_aead *aead = crypto_aead_reqtfm(req);
954         struct caam_ctx *ctx = crypto_aead_ctx(aead);
955         int ivsize = crypto_aead_ivsize(aead);
956
957         dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
958 #endif
959
960         edesc = (struct aead_edesc *)((char *)desc -
961                  offsetof(struct aead_edesc, hw_desc));
962
963 #ifdef DEBUG
964         print_hex_dump(KERN_ERR, "dstiv  @"__stringify(__LINE__)": ",
965                        DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
966                        ivsize, 1);
967         print_hex_dump(KERN_ERR, "dst    @"__stringify(__LINE__)": ",
968                        DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->dst),
969                        req->cryptlen - ctx->authsize, 1);
970 #endif
971
972         if (err) {
973                 char tmp[CAAM_ERROR_STR_MAX];
974
975                 dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
976         }
977
978         aead_unmap(jrdev, edesc, req);
979
980         /*
981          * verify hw auth check passed else return -EBADMSG
982          */
983         if ((err & JRSTA_CCBERR_ERRID_MASK) == JRSTA_CCBERR_ERRID_ICVCHK)
984                 err = -EBADMSG;
985
986 #ifdef DEBUG
987         print_hex_dump(KERN_ERR, "iphdrout@"__stringify(__LINE__)": ",
988                        DUMP_PREFIX_ADDRESS, 16, 4,
989                        ((char *)sg_virt(req->assoc) - sizeof(struct iphdr)),
990                        sizeof(struct iphdr) + req->assoclen +
991                        ((req->cryptlen > 1500) ? 1500 : req->cryptlen) +
992                        ctx->authsize + 36, 1);
993         if (!err && edesc->sec4_sg_bytes) {
994                 struct scatterlist *sg = sg_last(req->src, edesc->src_nents);
995                 print_hex_dump(KERN_ERR, "sglastout@"__stringify(__LINE__)": ",
996                                DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(sg),
997                         sg->length + ctx->authsize + 16, 1);
998         }
999 #endif
1000
1001         kfree(edesc);
1002
1003         aead_request_complete(req, err);
1004 }
1005
1006 static void ablkcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
1007                                    void *context)
1008 {
1009         struct ablkcipher_request *req = context;
1010         struct ablkcipher_edesc *edesc;
1011 #ifdef DEBUG
1012         struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1013         int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
1014
1015         dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
1016 #endif
1017
1018         edesc = (struct ablkcipher_edesc *)((char *)desc -
1019                  offsetof(struct ablkcipher_edesc, hw_desc));
1020
1021         if (err) {
1022                 char tmp[CAAM_ERROR_STR_MAX];
1023
1024                 dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
1025         }
1026
1027 #ifdef DEBUG
1028         print_hex_dump(KERN_ERR, "dstiv  @"__stringify(__LINE__)": ",
1029                        DUMP_PREFIX_ADDRESS, 16, 4, req->info,
1030                        edesc->src_nents > 1 ? 100 : ivsize, 1);
1031         print_hex_dump(KERN_ERR, "dst    @"__stringify(__LINE__)": ",
1032                        DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
1033                        edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
1034 #endif
1035
1036         ablkcipher_unmap(jrdev, edesc, req);
1037         kfree(edesc);
1038
1039         ablkcipher_request_complete(req, err);
1040 }
1041
1042 static void ablkcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
1043                                     void *context)
1044 {
1045         struct ablkcipher_request *req = context;
1046         struct ablkcipher_edesc *edesc;
1047 #ifdef DEBUG
1048         struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1049         int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
1050
1051         dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
1052 #endif
1053
1054         edesc = (struct ablkcipher_edesc *)((char *)desc -
1055                  offsetof(struct ablkcipher_edesc, hw_desc));
1056         if (err) {
1057                 char tmp[CAAM_ERROR_STR_MAX];
1058
1059                 dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
1060         }
1061
1062 #ifdef DEBUG
1063         print_hex_dump(KERN_ERR, "dstiv  @"__stringify(__LINE__)": ",
1064                        DUMP_PREFIX_ADDRESS, 16, 4, req->info,
1065                        ivsize, 1);
1066         print_hex_dump(KERN_ERR, "dst    @"__stringify(__LINE__)": ",
1067                        DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
1068                        edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
1069 #endif
1070
1071         ablkcipher_unmap(jrdev, edesc, req);
1072         kfree(edesc);
1073
1074         ablkcipher_request_complete(req, err);
1075 }
1076
1077 /*
1078  * Fill in aead job descriptor
1079  */
1080 static void init_aead_job(u32 *sh_desc, dma_addr_t ptr,
1081                           struct aead_edesc *edesc,
1082                           struct aead_request *req,
1083                           bool all_contig, bool encrypt)
1084 {
1085         struct crypto_aead *aead = crypto_aead_reqtfm(req);
1086         struct caam_ctx *ctx = crypto_aead_ctx(aead);
1087         int ivsize = crypto_aead_ivsize(aead);
1088         int authsize = ctx->authsize;
1089         u32 *desc = edesc->hw_desc;
1090         u32 out_options = 0, in_options;
1091         dma_addr_t dst_dma, src_dma;
1092         int len, sec4_sg_index = 0;
1093
1094 #ifdef DEBUG
1095         debug("assoclen %d cryptlen %d authsize %d\n",
1096               req->assoclen, req->cryptlen, authsize);
1097         print_hex_dump(KERN_ERR, "assoc  @"__stringify(__LINE__)": ",
1098                        DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->assoc),
1099                        req->assoclen , 1);
1100         print_hex_dump(KERN_ERR, "presciv@"__stringify(__LINE__)": ",
1101                        DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
1102                        edesc->src_nents ? 100 : ivsize, 1);
1103         print_hex_dump(KERN_ERR, "src    @"__stringify(__LINE__)": ",
1104                        DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
1105                         edesc->src_nents ? 100 : req->cryptlen, 1);
1106         print_hex_dump(KERN_ERR, "shrdesc@"__stringify(__LINE__)": ",
1107                        DUMP_PREFIX_ADDRESS, 16, 4, sh_desc,
1108                        desc_bytes(sh_desc), 1);
1109 #endif
1110
1111         len = desc_len(sh_desc);
1112         init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
1113
1114         if (all_contig) {
1115                 src_dma = sg_dma_address(req->assoc);
1116                 in_options = 0;
1117         } else {
1118                 src_dma = edesc->sec4_sg_dma;
1119                 sec4_sg_index += (edesc->assoc_nents ? : 1) + 1 +
1120                                  (edesc->src_nents ? : 1);
1121                 in_options = LDST_SGF;
1122         }
1123
1124         append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize + req->cryptlen,
1125                           in_options);
1126
1127         if (likely(req->src == req->dst)) {
1128                 if (all_contig) {
1129                         dst_dma = sg_dma_address(req->src);
1130                 } else {
1131                         dst_dma = src_dma + sizeof(struct sec4_sg_entry) *
1132                                   ((edesc->assoc_nents ? : 1) + 1);
1133                         out_options = LDST_SGF;
1134                 }
1135         } else {
1136                 if (!edesc->dst_nents) {
1137                         dst_dma = sg_dma_address(req->dst);
1138                 } else {
1139                         dst_dma = edesc->sec4_sg_dma +
1140                                   sec4_sg_index *
1141                                   sizeof(struct sec4_sg_entry);
1142                         out_options = LDST_SGF;
1143                 }
1144         }
1145         if (encrypt)
1146                 append_seq_out_ptr(desc, dst_dma, req->cryptlen + authsize,
1147                                    out_options);
1148         else
1149                 append_seq_out_ptr(desc, dst_dma, req->cryptlen - authsize,
1150                                    out_options);
1151 }
1152
1153 /*
1154  * Fill in aead givencrypt job descriptor
1155  */
1156 static void init_aead_giv_job(u32 *sh_desc, dma_addr_t ptr,
1157                               struct aead_edesc *edesc,
1158                               struct aead_request *req,
1159                               int contig)
1160 {
1161         struct crypto_aead *aead = crypto_aead_reqtfm(req);
1162         struct caam_ctx *ctx = crypto_aead_ctx(aead);
1163         int ivsize = crypto_aead_ivsize(aead);
1164         int authsize = ctx->authsize;
1165         u32 *desc = edesc->hw_desc;
1166         u32 out_options = 0, in_options;
1167         dma_addr_t dst_dma, src_dma;
1168         int len, sec4_sg_index = 0;
1169
1170 #ifdef DEBUG
1171         debug("assoclen %d cryptlen %d authsize %d\n",
1172               req->assoclen, req->cryptlen, authsize);
1173         print_hex_dump(KERN_ERR, "assoc  @"__stringify(__LINE__)": ",
1174                        DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->assoc),
1175                        req->assoclen , 1);
1176         print_hex_dump(KERN_ERR, "presciv@"__stringify(__LINE__)": ",
1177                        DUMP_PREFIX_ADDRESS, 16, 4, req->iv, ivsize, 1);
1178         print_hex_dump(KERN_ERR, "src    @"__stringify(__LINE__)": ",
1179                        DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
1180                         edesc->src_nents > 1 ? 100 : req->cryptlen, 1);
1181         print_hex_dump(KERN_ERR, "shrdesc@"__stringify(__LINE__)": ",
1182                        DUMP_PREFIX_ADDRESS, 16, 4, sh_desc,
1183                        desc_bytes(sh_desc), 1);
1184 #endif
1185
1186         len = desc_len(sh_desc);
1187         init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
1188
1189         if (contig & GIV_SRC_CONTIG) {
1190                 src_dma = sg_dma_address(req->assoc);
1191                 in_options = 0;
1192         } else {
1193                 src_dma = edesc->sec4_sg_dma;
1194                 sec4_sg_index += edesc->assoc_nents + 1 + edesc->src_nents;
1195                 in_options = LDST_SGF;
1196         }
1197         append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize + req->cryptlen,
1198                           in_options);
1199
1200         if (contig & GIV_DST_CONTIG) {
1201                 dst_dma = edesc->iv_dma;
1202         } else {
1203                 if (likely(req->src == req->dst)) {
1204                         dst_dma = src_dma + sizeof(struct sec4_sg_entry) *
1205                                   edesc->assoc_nents;
1206                         out_options = LDST_SGF;
1207                 } else {
1208                         dst_dma = edesc->sec4_sg_dma +
1209                                   sec4_sg_index *
1210                                   sizeof(struct sec4_sg_entry);
1211                         out_options = LDST_SGF;
1212                 }
1213         }
1214
1215         append_seq_out_ptr(desc, dst_dma, ivsize + req->cryptlen + authsize,
1216                            out_options);
1217 }
1218
1219 /*
1220  * Fill in ablkcipher job descriptor
1221  */
1222 static void init_ablkcipher_job(u32 *sh_desc, dma_addr_t ptr,
1223                                 struct ablkcipher_edesc *edesc,
1224                                 struct ablkcipher_request *req,
1225                                 bool iv_contig)
1226 {
1227         struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1228         int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
1229         u32 *desc = edesc->hw_desc;
1230         u32 out_options = 0, in_options;
1231         dma_addr_t dst_dma, src_dma;
1232         int len, sec4_sg_index = 0;
1233
1234 #ifdef DEBUG
1235         print_hex_dump(KERN_ERR, "presciv@"__stringify(__LINE__)": ",
1236                        DUMP_PREFIX_ADDRESS, 16, 4, req->info,
1237                        ivsize, 1);
1238         print_hex_dump(KERN_ERR, "src    @"__stringify(__LINE__)": ",
1239                        DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
1240                        edesc->src_nents ? 100 : req->nbytes, 1);
1241 #endif
1242
1243         len = desc_len(sh_desc);
1244         init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
1245
1246         if (iv_contig) {
1247                 src_dma = edesc->iv_dma;
1248                 in_options = 0;
1249         } else {
1250                 src_dma = edesc->sec4_sg_dma;
1251                 sec4_sg_index += (iv_contig ? 0 : 1) + edesc->src_nents;
1252                 in_options = LDST_SGF;
1253         }
1254         append_seq_in_ptr(desc, src_dma, req->nbytes + ivsize, in_options);
1255
1256         if (likely(req->src == req->dst)) {
1257                 if (!edesc->src_nents && iv_contig) {
1258                         dst_dma = sg_dma_address(req->src);
1259                 } else {
1260                         dst_dma = edesc->sec4_sg_dma +
1261                                 sizeof(struct sec4_sg_entry);
1262                         out_options = LDST_SGF;
1263                 }
1264         } else {
1265                 if (!edesc->dst_nents) {
1266                         dst_dma = sg_dma_address(req->dst);
1267                 } else {
1268                         dst_dma = edesc->sec4_sg_dma +
1269                                 sec4_sg_index * sizeof(struct sec4_sg_entry);
1270                         out_options = LDST_SGF;
1271                 }
1272         }
1273         append_seq_out_ptr(desc, dst_dma, req->nbytes, out_options);
1274 }
1275
1276 /*
1277  * allocate and map the aead extended descriptor
1278  */
1279 static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
1280                                            int desc_bytes, bool *all_contig_ptr,
1281                                            bool encrypt)
1282 {
1283         struct crypto_aead *aead = crypto_aead_reqtfm(req);
1284         struct caam_ctx *ctx = crypto_aead_ctx(aead);
1285         struct device *jrdev = ctx->jrdev;
1286         gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1287                        CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1288         int assoc_nents, src_nents, dst_nents = 0;
1289         struct aead_edesc *edesc;
1290         dma_addr_t iv_dma = 0;
1291         int sgc;
1292         bool all_contig = true;
1293         bool assoc_chained = false, src_chained = false, dst_chained = false;
1294         int ivsize = crypto_aead_ivsize(aead);
1295         int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes;
1296         unsigned int authsize = ctx->authsize;
1297
1298         assoc_nents = sg_count(req->assoc, req->assoclen, &assoc_chained);
1299
1300         if (unlikely(req->dst != req->src)) {
1301                 src_nents = sg_count(req->src, req->cryptlen, &src_chained);
1302                 dst_nents = sg_count(req->dst,
1303                                      req->cryptlen +
1304                                         (encrypt ? authsize : (-authsize)),
1305                                      &dst_chained);
1306         } else {
1307                 src_nents = sg_count(req->src,
1308                                      req->cryptlen +
1309                                         (encrypt ? authsize : 0),
1310                                      &src_chained);
1311         }
1312
1313         sgc = dma_map_sg_chained(jrdev, req->assoc, assoc_nents ? : 1,
1314                                  DMA_TO_DEVICE, assoc_chained);
1315         if (likely(req->src == req->dst)) {
1316                 sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
1317                                          DMA_BIDIRECTIONAL, src_chained);
1318         } else {
1319                 sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
1320                                          DMA_TO_DEVICE, src_chained);
1321                 sgc = dma_map_sg_chained(jrdev, req->dst, dst_nents ? : 1,
1322                                          DMA_FROM_DEVICE, dst_chained);
1323         }
1324
1325         /* Check if data are contiguous */
1326         iv_dma = dma_map_single(jrdev, req->iv, ivsize, DMA_TO_DEVICE);
1327         if (assoc_nents || sg_dma_address(req->assoc) + req->assoclen !=
1328             iv_dma || src_nents || iv_dma + ivsize !=
1329             sg_dma_address(req->src)) {
1330                 all_contig = false;
1331                 assoc_nents = assoc_nents ? : 1;
1332                 src_nents = src_nents ? : 1;
1333                 sec4_sg_len = assoc_nents + 1 + src_nents;
1334         }
1335         sec4_sg_len += dst_nents;
1336
1337         sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);
1338
1339         /* allocate space for base edesc and hw desc commands, link tables */
1340         edesc = kmalloc(sizeof(struct aead_edesc) + desc_bytes +
1341                         sec4_sg_bytes, GFP_DMA | flags);
1342         if (!edesc) {
1343                 dev_err(jrdev, "could not allocate extended descriptor\n");
1344                 return ERR_PTR(-ENOMEM);
1345         }
1346
1347         edesc->assoc_nents = assoc_nents;
1348         edesc->assoc_chained = assoc_chained;
1349         edesc->src_nents = src_nents;
1350         edesc->src_chained = src_chained;
1351         edesc->dst_nents = dst_nents;
1352         edesc->dst_chained = dst_chained;
1353         edesc->iv_dma = iv_dma;
1354         edesc->sec4_sg_bytes = sec4_sg_bytes;
1355         edesc->sec4_sg = (void *)edesc + sizeof(struct aead_edesc) +
1356                          desc_bytes;
1357         edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1358                                             sec4_sg_bytes, DMA_TO_DEVICE);
1359         *all_contig_ptr = all_contig;
1360
1361         sec4_sg_index = 0;
1362         if (!all_contig) {
1363                 sg_to_sec4_sg(req->assoc,
1364                               (assoc_nents ? : 1),
1365                               edesc->sec4_sg +
1366                               sec4_sg_index, 0);
1367                 sec4_sg_index += assoc_nents ? : 1;
1368                 dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index,
1369                                    iv_dma, ivsize, 0);
1370                 sec4_sg_index += 1;
1371                 sg_to_sec4_sg_last(req->src,
1372                                    (src_nents ? : 1),
1373                                    edesc->sec4_sg +
1374                                    sec4_sg_index, 0);
1375                 sec4_sg_index += src_nents ? : 1;
1376         }
1377         if (dst_nents) {
1378                 sg_to_sec4_sg_last(req->dst, dst_nents,
1379                                    edesc->sec4_sg + sec4_sg_index, 0);
1380         }
1381
1382         return edesc;
1383 }
1384
1385 static int aead_encrypt(struct aead_request *req)
1386 {
1387         struct aead_edesc *edesc;
1388         struct crypto_aead *aead = crypto_aead_reqtfm(req);
1389         struct caam_ctx *ctx = crypto_aead_ctx(aead);
1390         struct device *jrdev = ctx->jrdev;
1391         bool all_contig;
1392         u32 *desc;
1393         int ret = 0;
1394
1395         /* allocate extended descriptor */
1396         edesc = aead_edesc_alloc(req, DESC_JOB_IO_LEN *
1397                                  CAAM_CMD_SZ, &all_contig, true);
1398         if (IS_ERR(edesc))
1399                 return PTR_ERR(edesc);
1400
1401         /* Create and submit job descriptor */
1402         init_aead_job(ctx->sh_desc_enc, ctx->sh_desc_enc_dma, edesc, req,
1403                       all_contig, true);
1404 #ifdef DEBUG
1405         print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
1406                        DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
1407                        desc_bytes(edesc->hw_desc), 1);
1408 #endif
1409
1410         desc = edesc->hw_desc;
1411         ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req);
1412         if (!ret) {
1413                 ret = -EINPROGRESS;
1414         } else {
1415                 aead_unmap(jrdev, edesc, req);
1416                 kfree(edesc);
1417         }
1418
1419         return ret;
1420 }
1421
1422 static int aead_decrypt(struct aead_request *req)
1423 {
1424         struct aead_edesc *edesc;
1425         struct crypto_aead *aead = crypto_aead_reqtfm(req);
1426         struct caam_ctx *ctx = crypto_aead_ctx(aead);
1427         struct device *jrdev = ctx->jrdev;
1428         bool all_contig;
1429         u32 *desc;
1430         int ret = 0;
1431
1432         /* allocate extended descriptor */
1433         edesc = aead_edesc_alloc(req, DESC_JOB_IO_LEN *
1434                                  CAAM_CMD_SZ, &all_contig, false);
1435         if (IS_ERR(edesc))
1436                 return PTR_ERR(edesc);
1437
1438 #ifdef DEBUG
1439         print_hex_dump(KERN_ERR, "dec src@"__stringify(__LINE__)": ",
1440                        DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
1441                        req->cryptlen, 1);
1442 #endif
1443
1444         /* Create and submit job descriptor*/
1445         init_aead_job(ctx->sh_desc_dec,
1446                       ctx->sh_desc_dec_dma, edesc, req, all_contig, false);
1447 #ifdef DEBUG
1448         print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
1449                        DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
1450                        desc_bytes(edesc->hw_desc), 1);
1451 #endif
1452
1453         desc = edesc->hw_desc;
1454         ret = caam_jr_enqueue(jrdev, desc, aead_decrypt_done, req);
1455         if (!ret) {
1456                 ret = -EINPROGRESS;
1457         } else {
1458                 aead_unmap(jrdev, edesc, req);
1459                 kfree(edesc);
1460         }
1461
1462         return ret;
1463 }
1464
1465 /*
1466  * allocate and map the aead extended descriptor for aead givencrypt
1467  */
1468 static struct aead_edesc *aead_giv_edesc_alloc(struct aead_givcrypt_request
1469                                                *greq, int desc_bytes,
1470                                                u32 *contig_ptr)
1471 {
1472         struct aead_request *req = &greq->areq;
1473         struct crypto_aead *aead = crypto_aead_reqtfm(req);
1474         struct caam_ctx *ctx = crypto_aead_ctx(aead);
1475         struct device *jrdev = ctx->jrdev;
1476         gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1477                        CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1478         int assoc_nents, src_nents, dst_nents = 0;
1479         struct aead_edesc *edesc;
1480         dma_addr_t iv_dma = 0;
1481         int sgc;
1482         u32 contig = GIV_SRC_CONTIG | GIV_DST_CONTIG;
1483         int ivsize = crypto_aead_ivsize(aead);
1484         bool assoc_chained = false, src_chained = false, dst_chained = false;
1485         int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes;
1486
1487         assoc_nents = sg_count(req->assoc, req->assoclen, &assoc_chained);
1488         src_nents = sg_count(req->src, req->cryptlen, &src_chained);
1489
1490         if (unlikely(req->dst != req->src))
1491                 dst_nents = sg_count(req->dst, req->cryptlen + ctx->authsize,
1492                                      &dst_chained);
1493
1494         sgc = dma_map_sg_chained(jrdev, req->assoc, assoc_nents ? : 1,
1495                                  DMA_TO_DEVICE, assoc_chained);
1496         if (likely(req->src == req->dst)) {
1497                 sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
1498                                          DMA_BIDIRECTIONAL, src_chained);
1499         } else {
1500                 sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
1501                                          DMA_TO_DEVICE, src_chained);
1502                 sgc = dma_map_sg_chained(jrdev, req->dst, dst_nents ? : 1,
1503                                          DMA_FROM_DEVICE, dst_chained);
1504         }
1505
1506         /* Check if data are contiguous */
1507         iv_dma = dma_map_single(jrdev, greq->giv, ivsize, DMA_TO_DEVICE);
1508         if (assoc_nents || sg_dma_address(req->assoc) + req->assoclen !=
1509             iv_dma || src_nents || iv_dma + ivsize != sg_dma_address(req->src))
1510                 contig &= ~GIV_SRC_CONTIG;
1511         if (dst_nents || iv_dma + ivsize != sg_dma_address(req->dst))
1512                 contig &= ~GIV_DST_CONTIG;
1513         if (unlikely(req->src != req->dst)) {
1514                 dst_nents = dst_nents ? : 1;
1515                 sec4_sg_len += 1;
1516         }
1517         if (!(contig & GIV_SRC_CONTIG)) {
1518                 assoc_nents = assoc_nents ? : 1;
1519                 src_nents = src_nents ? : 1;
1520                 sec4_sg_len += assoc_nents + 1 + src_nents;
1521                 if (likely(req->src == req->dst))
1522                         contig &= ~GIV_DST_CONTIG;
1523         }
1524         sec4_sg_len += dst_nents;
1525
1526         sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);
1527
1528         /* allocate space for base edesc and hw desc commands, link tables */
1529         edesc = kmalloc(sizeof(struct aead_edesc) + desc_bytes +
1530                         sec4_sg_bytes, GFP_DMA | flags);
1531         if (!edesc) {
1532                 dev_err(jrdev, "could not allocate extended descriptor\n");
1533                 return ERR_PTR(-ENOMEM);
1534         }
1535
1536         edesc->assoc_nents = assoc_nents;
1537         edesc->assoc_chained = assoc_chained;
1538         edesc->src_nents = src_nents;
1539         edesc->src_chained = src_chained;
1540         edesc->dst_nents = dst_nents;
1541         edesc->dst_chained = dst_chained;
1542         edesc->iv_dma = iv_dma;
1543         edesc->sec4_sg_bytes = sec4_sg_bytes;
1544         edesc->sec4_sg = (void *)edesc + sizeof(struct aead_edesc) +
1545                          desc_bytes;
1546         edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1547                                             sec4_sg_bytes, DMA_TO_DEVICE);
1548         *contig_ptr = contig;
1549
1550         sec4_sg_index = 0;
1551         if (!(contig & GIV_SRC_CONTIG)) {
1552                 sg_to_sec4_sg(req->assoc, assoc_nents,
1553                               edesc->sec4_sg +
1554                               sec4_sg_index, 0);
1555                 sec4_sg_index += assoc_nents;
1556                 dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index,
1557                                    iv_dma, ivsize, 0);
1558                 sec4_sg_index += 1;
1559                 sg_to_sec4_sg_last(req->src, src_nents,
1560                                    edesc->sec4_sg +
1561                                    sec4_sg_index, 0);
1562                 sec4_sg_index += src_nents;
1563         }
1564         if (unlikely(req->src != req->dst && !(contig & GIV_DST_CONTIG))) {
1565                 dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index,
1566                                    iv_dma, ivsize, 0);
1567                 sec4_sg_index += 1;
1568                 sg_to_sec4_sg_last(req->dst, dst_nents,
1569                                    edesc->sec4_sg + sec4_sg_index, 0);
1570         }
1571
1572         return edesc;
1573 }
1574
1575 static int aead_givencrypt(struct aead_givcrypt_request *areq)
1576 {
1577         struct aead_request *req = &areq->areq;
1578         struct aead_edesc *edesc;
1579         struct crypto_aead *aead = crypto_aead_reqtfm(req);
1580         struct caam_ctx *ctx = crypto_aead_ctx(aead);
1581         struct device *jrdev = ctx->jrdev;
1582         u32 contig;
1583         u32 *desc;
1584         int ret = 0;
1585
1586         /* allocate extended descriptor */
1587         edesc = aead_giv_edesc_alloc(areq, DESC_JOB_IO_LEN *
1588                                      CAAM_CMD_SZ, &contig);
1589
1590         if (IS_ERR(edesc))
1591                 return PTR_ERR(edesc);
1592
1593 #ifdef DEBUG
1594         print_hex_dump(KERN_ERR, "giv src@"__stringify(__LINE__)": ",
1595                        DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
1596                        req->cryptlen, 1);
1597 #endif
1598
1599         /* Create and submit job descriptor*/
1600         init_aead_giv_job(ctx->sh_desc_givenc,
1601                           ctx->sh_desc_givenc_dma, edesc, req, contig);
1602 #ifdef DEBUG
1603         print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
1604                        DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
1605                        desc_bytes(edesc->hw_desc), 1);
1606 #endif
1607
1608         desc = edesc->hw_desc;
1609         ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req);
1610         if (!ret) {
1611                 ret = -EINPROGRESS;
1612         } else {
1613                 aead_unmap(jrdev, edesc, req);
1614                 kfree(edesc);
1615         }
1616
1617         return ret;
1618 }
1619
1620 static int aead_null_givencrypt(struct aead_givcrypt_request *areq)
1621 {
1622         return aead_encrypt(&areq->areq);
1623 }
1624
1625 /*
1626  * allocate and map the ablkcipher extended descriptor for ablkcipher
1627  */
1628 static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
1629                                                        *req, int desc_bytes,
1630                                                        bool *iv_contig_out)
1631 {
1632         struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1633         struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
1634         struct device *jrdev = ctx->jrdev;
1635         gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1636                                           CRYPTO_TFM_REQ_MAY_SLEEP)) ?
1637                        GFP_KERNEL : GFP_ATOMIC;
1638         int src_nents, dst_nents = 0, sec4_sg_bytes;
1639         struct ablkcipher_edesc *edesc;
1640         dma_addr_t iv_dma = 0;
1641         bool iv_contig = false;
1642         int sgc;
1643         int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
1644         bool src_chained = false, dst_chained = false;
1645         int sec4_sg_index;
1646
1647         src_nents = sg_count(req->src, req->nbytes, &src_chained);
1648
1649         if (req->dst != req->src)
1650                 dst_nents = sg_count(req->dst, req->nbytes, &dst_chained);
1651
1652         if (likely(req->src == req->dst)) {
1653                 sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
1654                                          DMA_BIDIRECTIONAL, src_chained);
1655         } else {
1656                 sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
1657                                          DMA_TO_DEVICE, src_chained);
1658                 sgc = dma_map_sg_chained(jrdev, req->dst, dst_nents ? : 1,
1659                                          DMA_FROM_DEVICE, dst_chained);
1660         }
1661
1662         /*
1663          * Check if iv can be contiguous with source and destination.
1664          * If so, include it. If not, create scatterlist.
1665          */
1666         iv_dma = dma_map_single(jrdev, req->info, ivsize, DMA_TO_DEVICE);
1667         if (!src_nents && iv_dma + ivsize == sg_dma_address(req->src))
1668                 iv_contig = true;
1669         else
1670                 src_nents = src_nents ? : 1;
1671         sec4_sg_bytes = ((iv_contig ? 0 : 1) + src_nents + dst_nents) *
1672                         sizeof(struct sec4_sg_entry);
1673
1674         /* allocate space for base edesc and hw desc commands, link tables */
1675         edesc = kmalloc(sizeof(struct ablkcipher_edesc) + desc_bytes +
1676                         sec4_sg_bytes, GFP_DMA | flags);
1677         if (!edesc) {
1678                 dev_err(jrdev, "could not allocate extended descriptor\n");
1679                 return ERR_PTR(-ENOMEM);
1680         }
1681
1682         edesc->src_nents = src_nents;
1683         edesc->src_chained = src_chained;
1684         edesc->dst_nents = dst_nents;
1685         edesc->dst_chained = dst_chained;
1686         edesc->sec4_sg_bytes = sec4_sg_bytes;
1687         edesc->sec4_sg = (void *)edesc + sizeof(struct ablkcipher_edesc) +
1688                          desc_bytes;
1689
1690         sec4_sg_index = 0;
1691         if (!iv_contig) {
1692                 dma_to_sec4_sg_one(edesc->sec4_sg, iv_dma, ivsize, 0);
1693                 sg_to_sec4_sg_last(req->src, src_nents,
1694                                    edesc->sec4_sg + 1, 0);
1695                 sec4_sg_index += 1 + src_nents;
1696         }
1697
1698         if (dst_nents) {
1699                 sg_to_sec4_sg_last(req->dst, dst_nents,
1700                         edesc->sec4_sg + sec4_sg_index, 0);
1701         }
1702
1703         edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1704                                             sec4_sg_bytes, DMA_TO_DEVICE);
1705         edesc->iv_dma = iv_dma;
1706
1707 #ifdef DEBUG
1708         print_hex_dump(KERN_ERR, "ablkcipher sec4_sg@"__stringify(__LINE__)": ",
1709                        DUMP_PREFIX_ADDRESS, 16, 4, edesc->sec4_sg,
1710                        sec4_sg_bytes, 1);
1711 #endif
1712
1713         *iv_contig_out = iv_contig;
1714         return edesc;
1715 }
1716
1717 static int ablkcipher_encrypt(struct ablkcipher_request *req)
1718 {
1719         struct ablkcipher_edesc *edesc;
1720         struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1721         struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
1722         struct device *jrdev = ctx->jrdev;
1723         bool iv_contig;
1724         u32 *desc;
1725         int ret = 0;
1726
1727         /* allocate extended descriptor */
1728         edesc = ablkcipher_edesc_alloc(req, DESC_JOB_IO_LEN *
1729                                        CAAM_CMD_SZ, &iv_contig);
1730         if (IS_ERR(edesc))
1731                 return PTR_ERR(edesc);
1732
1733         /* Create and submit job descriptor*/
1734         init_ablkcipher_job(ctx->sh_desc_enc,
1735                 ctx->sh_desc_enc_dma, edesc, req, iv_contig);
1736 #ifdef DEBUG
1737         print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"__stringify(__LINE__)": ",
1738                        DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
1739                        desc_bytes(edesc->hw_desc), 1);
1740 #endif
1741         desc = edesc->hw_desc;
1742         ret = caam_jr_enqueue(jrdev, desc, ablkcipher_encrypt_done, req);
1743
1744         if (!ret) {
1745                 ret = -EINPROGRESS;
1746         } else {
1747                 ablkcipher_unmap(jrdev, edesc, req);
1748                 kfree(edesc);
1749         }
1750
1751         return ret;
1752 }
1753
1754 static int ablkcipher_decrypt(struct ablkcipher_request *req)
1755 {
1756         struct ablkcipher_edesc *edesc;
1757         struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1758         struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
1759         struct device *jrdev = ctx->jrdev;
1760         bool iv_contig;
1761         u32 *desc;
1762         int ret = 0;
1763
1764         /* allocate extended descriptor */
1765         edesc = ablkcipher_edesc_alloc(req, DESC_JOB_IO_LEN *
1766                                        CAAM_CMD_SZ, &iv_contig);
1767         if (IS_ERR(edesc))
1768                 return PTR_ERR(edesc);
1769
1770         /* Create and submit job descriptor*/
1771         init_ablkcipher_job(ctx->sh_desc_dec,
1772                 ctx->sh_desc_dec_dma, edesc, req, iv_contig);
1773         desc = edesc->hw_desc;
1774 #ifdef DEBUG
1775         print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"__stringify(__LINE__)": ",
1776                        DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
1777                        desc_bytes(edesc->hw_desc), 1);
1778 #endif
1779
1780         ret = caam_jr_enqueue(jrdev, desc, ablkcipher_decrypt_done, req);
1781         if (!ret) {
1782                 ret = -EINPROGRESS;
1783         } else {
1784                 ablkcipher_unmap(jrdev, edesc, req);
1785                 kfree(edesc);
1786         }
1787
1788         return ret;
1789 }
1790
1791 #define template_aead           template_u.aead
1792 #define template_ablkcipher     template_u.ablkcipher
1793 struct caam_alg_template {
1794         char name[CRYPTO_MAX_ALG_NAME];
1795         char driver_name[CRYPTO_MAX_ALG_NAME];
1796         unsigned int blocksize;
1797         u32 type;
1798         union {
1799                 struct ablkcipher_alg ablkcipher;
1800                 struct aead_alg aead;
1801                 struct blkcipher_alg blkcipher;
1802                 struct cipher_alg cipher;
1803                 struct compress_alg compress;
1804                 struct rng_alg rng;
1805         } template_u;
1806         u32 class1_alg_type;
1807         u32 class2_alg_type;
1808         u32 alg_op;
1809 };
1810
1811 static struct caam_alg_template driver_algs[] = {
1812         /* single-pass ipsec_esp descriptor */
1813         {
1814                 .name = "authenc(hmac(md5),ecb(cipher_null))",
1815                 .driver_name = "authenc-hmac-md5-ecb-cipher_null-caam",
1816                 .blocksize = NULL_BLOCK_SIZE,
1817                 .type = CRYPTO_ALG_TYPE_AEAD,
1818                 .template_aead = {
1819                         .setkey = aead_setkey,
1820                         .setauthsize = aead_setauthsize,
1821                         .encrypt = aead_encrypt,
1822                         .decrypt = aead_decrypt,
1823                         .givencrypt = aead_null_givencrypt,
1824                         .geniv = "<built-in>",
1825                         .ivsize = NULL_IV_SIZE,
1826                         .maxauthsize = MD5_DIGEST_SIZE,
1827                         },
1828                 .class1_alg_type = 0,
1829                 .class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP,
1830                 .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
1831         },
1832         {
1833                 .name = "authenc(hmac(sha1),ecb(cipher_null))",
1834                 .driver_name = "authenc-hmac-sha1-ecb-cipher_null-caam",
1835                 .blocksize = NULL_BLOCK_SIZE,
1836                 .type = CRYPTO_ALG_TYPE_AEAD,
1837                 .template_aead = {
1838                         .setkey = aead_setkey,
1839                         .setauthsize = aead_setauthsize,
1840                         .encrypt = aead_encrypt,
1841                         .decrypt = aead_decrypt,
1842                         .givencrypt = aead_null_givencrypt,
1843                         .geniv = "<built-in>",
1844                         .ivsize = NULL_IV_SIZE,
1845                         .maxauthsize = SHA1_DIGEST_SIZE,
1846                         },
1847                 .class1_alg_type = 0,
1848                 .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP,
1849                 .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
1850         },
1851         {
1852                 .name = "authenc(hmac(sha224),ecb(cipher_null))",
1853                 .driver_name = "authenc-hmac-sha224-ecb-cipher_null-caam",
1854                 .blocksize = NULL_BLOCK_SIZE,
1855                 .type = CRYPTO_ALG_TYPE_AEAD,
1856                 .template_aead = {
1857                         .setkey = aead_setkey,
1858                         .setauthsize = aead_setauthsize,
1859                         .encrypt = aead_encrypt,
1860                         .decrypt = aead_decrypt,
1861                         .givencrypt = aead_null_givencrypt,
1862                         .geniv = "<built-in>",
1863                         .ivsize = NULL_IV_SIZE,
1864                         .maxauthsize = SHA224_DIGEST_SIZE,
1865                         },
1866                 .class1_alg_type = 0,
1867                 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
1868                                    OP_ALG_AAI_HMAC_PRECOMP,
1869                 .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
1870         },
1871         {
1872                 .name = "authenc(hmac(sha256),ecb(cipher_null))",
1873                 .driver_name = "authenc-hmac-sha256-ecb-cipher_null-caam",
1874                 .blocksize = NULL_BLOCK_SIZE,
1875                 .type = CRYPTO_ALG_TYPE_AEAD,
1876                 .template_aead = {
1877                         .setkey = aead_setkey,
1878                         .setauthsize = aead_setauthsize,
1879                         .encrypt = aead_encrypt,
1880                         .decrypt = aead_decrypt,
1881                         .givencrypt = aead_null_givencrypt,
1882                         .geniv = "<built-in>",
1883                         .ivsize = NULL_IV_SIZE,
1884                         .maxauthsize = SHA256_DIGEST_SIZE,
1885                         },
1886                 .class1_alg_type = 0,
1887                 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
1888                                    OP_ALG_AAI_HMAC_PRECOMP,
1889                 .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
1890         },
1891         {
1892                 .name = "authenc(hmac(sha384),ecb(cipher_null))",
1893                 .driver_name = "authenc-hmac-sha384-ecb-cipher_null-caam",
1894                 .blocksize = NULL_BLOCK_SIZE,
1895                 .type = CRYPTO_ALG_TYPE_AEAD,
1896                 .template_aead = {
1897                         .setkey = aead_setkey,
1898                         .setauthsize = aead_setauthsize,
1899                         .encrypt = aead_encrypt,
1900                         .decrypt = aead_decrypt,
1901                         .givencrypt = aead_null_givencrypt,
1902                         .geniv = "<built-in>",
1903                         .ivsize = NULL_IV_SIZE,
1904                         .maxauthsize = SHA384_DIGEST_SIZE,
1905                         },
1906                 .class1_alg_type = 0,
1907                 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
1908                                    OP_ALG_AAI_HMAC_PRECOMP,
1909                 .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
1910         },
1911         {
1912                 .name = "authenc(hmac(sha512),ecb(cipher_null))",
1913                 .driver_name = "authenc-hmac-sha512-ecb-cipher_null-caam",
1914                 .blocksize = NULL_BLOCK_SIZE,
1915                 .type = CRYPTO_ALG_TYPE_AEAD,
1916                 .template_aead = {
1917                         .setkey = aead_setkey,
1918                         .setauthsize = aead_setauthsize,
1919                         .encrypt = aead_encrypt,
1920                         .decrypt = aead_decrypt,
1921                         .givencrypt = aead_null_givencrypt,
1922                         .geniv = "<built-in>",
1923                         .ivsize = NULL_IV_SIZE,
1924                         .maxauthsize = SHA512_DIGEST_SIZE,
1925                         },
1926                 .class1_alg_type = 0,
1927                 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
1928                                    OP_ALG_AAI_HMAC_PRECOMP,
1929                 .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
1930         },
1931         {
1932                 .name = "authenc(hmac(md5),cbc(aes))",
1933                 .driver_name = "authenc-hmac-md5-cbc-aes-caam",
1934                 .blocksize = AES_BLOCK_SIZE,
1935                 .type = CRYPTO_ALG_TYPE_AEAD,
1936                 .template_aead = {
1937                         .setkey = aead_setkey,
1938                         .setauthsize = aead_setauthsize,
1939                         .encrypt = aead_encrypt,
1940                         .decrypt = aead_decrypt,
1941                         .givencrypt = aead_givencrypt,
1942                         .geniv = "<built-in>",
1943                         .ivsize = AES_BLOCK_SIZE,
1944                         .maxauthsize = MD5_DIGEST_SIZE,
1945                         },
1946                 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1947                 .class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP,
1948                 .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
1949         },
1950         {
1951                 .name = "authenc(hmac(sha1),cbc(aes))",
1952                 .driver_name = "authenc-hmac-sha1-cbc-aes-caam",
1953                 .blocksize = AES_BLOCK_SIZE,
1954                 .type = CRYPTO_ALG_TYPE_AEAD,
1955                 .template_aead = {
1956                         .setkey = aead_setkey,
1957                         .setauthsize = aead_setauthsize,
1958                         .encrypt = aead_encrypt,
1959                         .decrypt = aead_decrypt,
1960                         .givencrypt = aead_givencrypt,
1961                         .geniv = "<built-in>",
1962                         .ivsize = AES_BLOCK_SIZE,
1963                         .maxauthsize = SHA1_DIGEST_SIZE,
1964                         },
1965                 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1966                 .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP,
1967                 .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
1968         },
1969         {
1970                 .name = "authenc(hmac(sha224),cbc(aes))",
1971                 .driver_name = "authenc-hmac-sha224-cbc-aes-caam",
1972                 .blocksize = AES_BLOCK_SIZE,
1973                 .type = CRYPTO_ALG_TYPE_AEAD,
1974                 .template_aead = {
1975                         .setkey = aead_setkey,
1976                         .setauthsize = aead_setauthsize,
1977                         .encrypt = aead_encrypt,
1978                         .decrypt = aead_decrypt,
1979                         .givencrypt = aead_givencrypt,
1980                         .geniv = "<built-in>",
1981                         .ivsize = AES_BLOCK_SIZE,
1982                         .maxauthsize = SHA224_DIGEST_SIZE,
1983                         },
1984                 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1985                 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
1986                                    OP_ALG_AAI_HMAC_PRECOMP,
1987                 .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
1988         },
1989         {
1990                 .name = "authenc(hmac(sha256),cbc(aes))",
1991                 .driver_name = "authenc-hmac-sha256-cbc-aes-caam",
1992                 .blocksize = AES_BLOCK_SIZE,
1993                 .type = CRYPTO_ALG_TYPE_AEAD,
1994                 .template_aead = {
1995                         .setkey = aead_setkey,
1996                         .setauthsize = aead_setauthsize,
1997                         .encrypt = aead_encrypt,
1998                         .decrypt = aead_decrypt,
1999                         .givencrypt = aead_givencrypt,
2000                         .geniv = "<built-in>",
2001                         .ivsize = AES_BLOCK_SIZE,
2002                         .maxauthsize = SHA256_DIGEST_SIZE,
2003                         },
2004                 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2005                 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2006                                    OP_ALG_AAI_HMAC_PRECOMP,
2007                 .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
2008         },
2009         {
2010                 .name = "authenc(hmac(sha384),cbc(aes))",
2011                 .driver_name = "authenc-hmac-sha384-cbc-aes-caam",
2012                 .blocksize = AES_BLOCK_SIZE,
2013                 .type = CRYPTO_ALG_TYPE_AEAD,
2014                 .template_aead = {
2015                         .setkey = aead_setkey,
2016                         .setauthsize = aead_setauthsize,
2017                         .encrypt = aead_encrypt,
2018                         .decrypt = aead_decrypt,
2019                         .givencrypt = aead_givencrypt,
2020                         .geniv = "<built-in>",
2021                         .ivsize = AES_BLOCK_SIZE,
2022                         .maxauthsize = SHA384_DIGEST_SIZE,
2023                         },
2024                 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2025                 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2026                                    OP_ALG_AAI_HMAC_PRECOMP,
2027                 .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
2028         },
2029
2030         {
2031                 .name = "authenc(hmac(sha512),cbc(aes))",
2032                 .driver_name = "authenc-hmac-sha512-cbc-aes-caam",
2033                 .blocksize = AES_BLOCK_SIZE,
2034                 .type = CRYPTO_ALG_TYPE_AEAD,
2035                 .template_aead = {
2036                         .setkey = aead_setkey,
2037                         .setauthsize = aead_setauthsize,
2038                         .encrypt = aead_encrypt,
2039                         .decrypt = aead_decrypt,
2040                         .givencrypt = aead_givencrypt,
2041                         .geniv = "<built-in>",
2042                         .ivsize = AES_BLOCK_SIZE,
2043                         .maxauthsize = SHA512_DIGEST_SIZE,
2044                         },
2045                 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2046                 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2047                                    OP_ALG_AAI_HMAC_PRECOMP,
2048                 .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
2049         },
2050         {
2051                 .name = "authenc(hmac(md5),cbc(des3_ede))",
2052                 .driver_name = "authenc-hmac-md5-cbc-des3_ede-caam",
2053                 .blocksize = DES3_EDE_BLOCK_SIZE,
2054                 .type = CRYPTO_ALG_TYPE_AEAD,
2055                 .template_aead = {
2056                         .setkey = aead_setkey,
2057                         .setauthsize = aead_setauthsize,
2058                         .encrypt = aead_encrypt,
2059                         .decrypt = aead_decrypt,
2060                         .givencrypt = aead_givencrypt,
2061                         .geniv = "<built-in>",
2062                         .ivsize = DES3_EDE_BLOCK_SIZE,
2063                         .maxauthsize = MD5_DIGEST_SIZE,
2064                         },
2065                 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2066                 .class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP,
2067                 .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
2068         },
2069         {
2070                 .name = "authenc(hmac(sha1),cbc(des3_ede))",
2071                 .driver_name = "authenc-hmac-sha1-cbc-des3_ede-caam",
2072                 .blocksize = DES3_EDE_BLOCK_SIZE,
2073                 .type = CRYPTO_ALG_TYPE_AEAD,
2074                 .template_aead = {
2075                         .setkey = aead_setkey,
2076                         .setauthsize = aead_setauthsize,
2077                         .encrypt = aead_encrypt,
2078                         .decrypt = aead_decrypt,
2079                         .givencrypt = aead_givencrypt,
2080                         .geniv = "<built-in>",
2081                         .ivsize = DES3_EDE_BLOCK_SIZE,
2082                         .maxauthsize = SHA1_DIGEST_SIZE,
2083                         },
2084                 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2085                 .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP,
2086                 .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
2087         },
2088         {
2089                 .name = "authenc(hmac(sha224),cbc(des3_ede))",
2090                 .driver_name = "authenc-hmac-sha224-cbc-des3_ede-caam",
2091                 .blocksize = DES3_EDE_BLOCK_SIZE,
2092                 .type = CRYPTO_ALG_TYPE_AEAD,
2093                 .template_aead = {
2094                         .setkey = aead_setkey,
2095                         .setauthsize = aead_setauthsize,
2096                         .encrypt = aead_encrypt,
2097                         .decrypt = aead_decrypt,
2098                         .givencrypt = aead_givencrypt,
2099                         .geniv = "<built-in>",
2100                         .ivsize = DES3_EDE_BLOCK_SIZE,
2101                         .maxauthsize = SHA224_DIGEST_SIZE,
2102                         },
2103                 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2104                 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2105                                    OP_ALG_AAI_HMAC_PRECOMP,
2106                 .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
2107         },
2108         {
2109                 .name = "authenc(hmac(sha256),cbc(des3_ede))",
2110                 .driver_name = "authenc-hmac-sha256-cbc-des3_ede-caam",
2111                 .blocksize = DES3_EDE_BLOCK_SIZE,
2112                 .type = CRYPTO_ALG_TYPE_AEAD,
2113                 .template_aead = {
2114                         .setkey = aead_setkey,
2115                         .setauthsize = aead_setauthsize,
2116                         .encrypt = aead_encrypt,
2117                         .decrypt = aead_decrypt,
2118                         .givencrypt = aead_givencrypt,
2119                         .geniv = "<built-in>",
2120                         .ivsize = DES3_EDE_BLOCK_SIZE,
2121                         .maxauthsize = SHA256_DIGEST_SIZE,
2122                         },
2123                 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2124                 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2125                                    OP_ALG_AAI_HMAC_PRECOMP,
2126                 .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
2127         },
2128         {
2129                 .name = "authenc(hmac(sha384),cbc(des3_ede))",
2130                 .driver_name = "authenc-hmac-sha384-cbc-des3_ede-caam",
2131                 .blocksize = DES3_EDE_BLOCK_SIZE,
2132                 .type = CRYPTO_ALG_TYPE_AEAD,
2133                 .template_aead = {
2134                         .setkey = aead_setkey,
2135                         .setauthsize = aead_setauthsize,
2136                         .encrypt = aead_encrypt,
2137                         .decrypt = aead_decrypt,
2138                         .givencrypt = aead_givencrypt,
2139                         .geniv = "<built-in>",
2140                         .ivsize = DES3_EDE_BLOCK_SIZE,
2141                         .maxauthsize = SHA384_DIGEST_SIZE,
2142                         },
2143                 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2144                 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2145                                    OP_ALG_AAI_HMAC_PRECOMP,
2146                 .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
2147         },
2148         {
2149                 .name = "authenc(hmac(sha512),cbc(des3_ede))",
2150                 .driver_name = "authenc-hmac-sha512-cbc-des3_ede-caam",
2151                 .blocksize = DES3_EDE_BLOCK_SIZE,
2152                 .type = CRYPTO_ALG_TYPE_AEAD,
2153                 .template_aead = {
2154                         .setkey = aead_setkey,
2155                         .setauthsize = aead_setauthsize,
2156                         .encrypt = aead_encrypt,
2157                         .decrypt = aead_decrypt,
2158                         .givencrypt = aead_givencrypt,
2159                         .geniv = "<built-in>",
2160                         .ivsize = DES3_EDE_BLOCK_SIZE,
2161                         .maxauthsize = SHA512_DIGEST_SIZE,
2162                         },
2163                 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2164                 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2165                                    OP_ALG_AAI_HMAC_PRECOMP,
2166                 .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
2167         },
2168         {
2169                 .name = "authenc(hmac(md5),cbc(des))",
2170                 .driver_name = "authenc-hmac-md5-cbc-des-caam",
2171                 .blocksize = DES_BLOCK_SIZE,
2172                 .type = CRYPTO_ALG_TYPE_AEAD,
2173                 .template_aead = {
2174                         .setkey = aead_setkey,
2175                         .setauthsize = aead_setauthsize,
2176                         .encrypt = aead_encrypt,
2177                         .decrypt = aead_decrypt,
2178                         .givencrypt = aead_givencrypt,
2179                         .geniv = "<built-in>",
2180                         .ivsize = DES_BLOCK_SIZE,
2181                         .maxauthsize = MD5_DIGEST_SIZE,
2182                         },
2183                 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2184                 .class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP,
2185                 .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
2186         },
2187         {
2188                 .name = "authenc(hmac(sha1),cbc(des))",
2189                 .driver_name = "authenc-hmac-sha1-cbc-des-caam",
2190                 .blocksize = DES_BLOCK_SIZE,
2191                 .type = CRYPTO_ALG_TYPE_AEAD,
2192                 .template_aead = {
2193                         .setkey = aead_setkey,
2194                         .setauthsize = aead_setauthsize,
2195                         .encrypt = aead_encrypt,
2196                         .decrypt = aead_decrypt,
2197                         .givencrypt = aead_givencrypt,
2198                         .geniv = "<built-in>",
2199                         .ivsize = DES_BLOCK_SIZE,
2200                         .maxauthsize = SHA1_DIGEST_SIZE,
2201                         },
2202                 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2203                 .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP,
2204                 .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
2205         },
2206         {
2207                 .name = "authenc(hmac(sha224),cbc(des))",
2208                 .driver_name = "authenc-hmac-sha224-cbc-des-caam",
2209                 .blocksize = DES_BLOCK_SIZE,
2210                 .type = CRYPTO_ALG_TYPE_AEAD,
2211                 .template_aead = {
2212                         .setkey = aead_setkey,
2213                         .setauthsize = aead_setauthsize,
2214                         .encrypt = aead_encrypt,
2215                         .decrypt = aead_decrypt,
2216                         .givencrypt = aead_givencrypt,
2217                         .geniv = "<built-in>",
2218                         .ivsize = DES_BLOCK_SIZE,
2219                         .maxauthsize = SHA224_DIGEST_SIZE,
2220                         },
2221                 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2222                 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2223                                    OP_ALG_AAI_HMAC_PRECOMP,
2224                 .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
2225         },
2226         {
2227                 .name = "authenc(hmac(sha256),cbc(des))",
2228                 .driver_name = "authenc-hmac-sha256-cbc-des-caam",
2229                 .blocksize = DES_BLOCK_SIZE,
2230                 .type = CRYPTO_ALG_TYPE_AEAD,
2231                 .template_aead = {
2232                         .setkey = aead_setkey,
2233                         .setauthsize = aead_setauthsize,
2234                         .encrypt = aead_encrypt,
2235                         .decrypt = aead_decrypt,
2236                         .givencrypt = aead_givencrypt,
2237                         .geniv = "<built-in>",
2238                         .ivsize = DES_BLOCK_SIZE,
2239                         .maxauthsize = SHA256_DIGEST_SIZE,
2240                         },
2241                 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2242                 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2243                                    OP_ALG_AAI_HMAC_PRECOMP,
2244                 .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
2245         },
2246         {
2247                 .name = "authenc(hmac(sha384),cbc(des))",
2248                 .driver_name = "authenc-hmac-sha384-cbc-des-caam",
2249                 .blocksize = DES_BLOCK_SIZE,
2250                 .type = CRYPTO_ALG_TYPE_AEAD,
2251                 .template_aead = {
2252                         .setkey = aead_setkey,
2253                         .setauthsize = aead_setauthsize,
2254                         .encrypt = aead_encrypt,
2255                         .decrypt = aead_decrypt,
2256                         .givencrypt = aead_givencrypt,
2257                         .geniv = "<built-in>",
2258                         .ivsize = DES_BLOCK_SIZE,
2259                         .maxauthsize = SHA384_DIGEST_SIZE,
2260                         },
2261                 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2262                 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2263                                    OP_ALG_AAI_HMAC_PRECOMP,
2264                 .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
2265         },
2266         {
2267                 .name = "authenc(hmac(sha512),cbc(des))",
2268                 .driver_name = "authenc-hmac-sha512-cbc-des-caam",
2269                 .blocksize = DES_BLOCK_SIZE,
2270                 .type = CRYPTO_ALG_TYPE_AEAD,
2271                 .template_aead = {
2272                         .setkey = aead_setkey,
2273                         .setauthsize = aead_setauthsize,
2274                         .encrypt = aead_encrypt,
2275                         .decrypt = aead_decrypt,
2276                         .givencrypt = aead_givencrypt,
2277                         .geniv = "<built-in>",
2278                         .ivsize = DES_BLOCK_SIZE,
2279                         .maxauthsize = SHA512_DIGEST_SIZE,
2280                         },
2281                 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2282                 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2283                                    OP_ALG_AAI_HMAC_PRECOMP,
2284                 .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
2285         },
2286         /* ablkcipher descriptor */
2287         {
2288                 .name = "cbc(aes)",
2289                 .driver_name = "cbc-aes-caam",
2290                 .blocksize = AES_BLOCK_SIZE,
2291                 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2292                 .template_ablkcipher = {
2293                         .setkey = ablkcipher_setkey,
2294                         .encrypt = ablkcipher_encrypt,
2295                         .decrypt = ablkcipher_decrypt,
2296                         .geniv = "eseqiv",
2297                         .min_keysize = AES_MIN_KEY_SIZE,
2298                         .max_keysize = AES_MAX_KEY_SIZE,
2299                         .ivsize = AES_BLOCK_SIZE,
2300                         },
2301                 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2302         },
2303         {
2304                 .name = "cbc(des3_ede)",
2305                 .driver_name = "cbc-3des-caam",
2306                 .blocksize = DES3_EDE_BLOCK_SIZE,
2307                 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2308                 .template_ablkcipher = {
2309                         .setkey = ablkcipher_setkey,
2310                         .encrypt = ablkcipher_encrypt,
2311                         .decrypt = ablkcipher_decrypt,
2312                         .geniv = "eseqiv",
2313                         .min_keysize = DES3_EDE_KEY_SIZE,
2314                         .max_keysize = DES3_EDE_KEY_SIZE,
2315                         .ivsize = DES3_EDE_BLOCK_SIZE,
2316                         },
2317                 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2318         },
2319         {
2320                 .name = "cbc(des)",
2321                 .driver_name = "cbc-des-caam",
2322                 .blocksize = DES_BLOCK_SIZE,
2323                 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2324                 .template_ablkcipher = {
2325                         .setkey = ablkcipher_setkey,
2326                         .encrypt = ablkcipher_encrypt,
2327                         .decrypt = ablkcipher_decrypt,
2328                         .geniv = "eseqiv",
2329                         .min_keysize = DES_KEY_SIZE,
2330                         .max_keysize = DES_KEY_SIZE,
2331                         .ivsize = DES_BLOCK_SIZE,
2332                         },
2333                 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2334         }
2335 };
2336
2337 struct caam_crypto_alg {
2338         struct list_head entry;
2339         int class1_alg_type;
2340         int class2_alg_type;
2341         int alg_op;
2342         struct crypto_alg crypto_alg;
2343 };
2344
2345 static int caam_cra_init(struct crypto_tfm *tfm)
2346 {
2347         struct crypto_alg *alg = tfm->__crt_alg;
2348         struct caam_crypto_alg *caam_alg =
2349                  container_of(alg, struct caam_crypto_alg, crypto_alg);
2350         struct caam_ctx *ctx = crypto_tfm_ctx(tfm);
2351
2352         ctx->jrdev = caam_jr_alloc();
2353         if (IS_ERR(ctx->jrdev)) {
2354                 pr_err("Job Ring Device allocation for transform failed\n");
2355                 return PTR_ERR(ctx->jrdev);
2356         }
2357
2358         /* copy descriptor header template value */
2359         ctx->class1_alg_type = OP_TYPE_CLASS1_ALG | caam_alg->class1_alg_type;
2360         ctx->class2_alg_type = OP_TYPE_CLASS2_ALG | caam_alg->class2_alg_type;
2361         ctx->alg_op = OP_TYPE_CLASS2_ALG | caam_alg->alg_op;
2362
2363         return 0;
2364 }
2365
2366 static void caam_cra_exit(struct crypto_tfm *tfm)
2367 {
2368         struct caam_ctx *ctx = crypto_tfm_ctx(tfm);
2369
2370         if (ctx->sh_desc_enc_dma &&
2371             !dma_mapping_error(ctx->jrdev, ctx->sh_desc_enc_dma))
2372                 dma_unmap_single(ctx->jrdev, ctx->sh_desc_enc_dma,
2373                                  desc_bytes(ctx->sh_desc_enc), DMA_TO_DEVICE);
2374         if (ctx->sh_desc_dec_dma &&
2375             !dma_mapping_error(ctx->jrdev, ctx->sh_desc_dec_dma))
2376                 dma_unmap_single(ctx->jrdev, ctx->sh_desc_dec_dma,
2377                                  desc_bytes(ctx->sh_desc_dec), DMA_TO_DEVICE);
2378         if (ctx->sh_desc_givenc_dma &&
2379             !dma_mapping_error(ctx->jrdev, ctx->sh_desc_givenc_dma))
2380                 dma_unmap_single(ctx->jrdev, ctx->sh_desc_givenc_dma,
2381                                  desc_bytes(ctx->sh_desc_givenc),
2382                                  DMA_TO_DEVICE);
2383         if (ctx->key_dma &&
2384             !dma_mapping_error(ctx->jrdev, ctx->key_dma))
2385                 dma_unmap_single(ctx->jrdev, ctx->key_dma,
2386                                  ctx->enckeylen + ctx->split_key_pad_len,
2387                                  DMA_TO_DEVICE);
2388
2389         caam_jr_free(ctx->jrdev);
2390 }
2391
2392 static void __exit caam_algapi_exit(void)
2393 {
2394
2395         struct caam_crypto_alg *t_alg, *n;
2396
2397         if (!alg_list.next)
2398                 return;
2399
2400         list_for_each_entry_safe(t_alg, n, &alg_list, entry) {
2401                 crypto_unregister_alg(&t_alg->crypto_alg);
2402                 list_del(&t_alg->entry);
2403                 kfree(t_alg);
2404         }
2405 }
2406
2407 static struct caam_crypto_alg *caam_alg_alloc(struct caam_alg_template
2408                                               *template)
2409 {
2410         struct caam_crypto_alg *t_alg;
2411         struct crypto_alg *alg;
2412
2413         t_alg = kzalloc(sizeof(struct caam_crypto_alg), GFP_KERNEL);
2414         if (!t_alg) {
2415                 pr_err("failed to allocate t_alg\n");
2416                 return ERR_PTR(-ENOMEM);
2417         }
2418
2419         alg = &t_alg->crypto_alg;
2420
2421         snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", template->name);
2422         snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
2423                  template->driver_name);
2424         alg->cra_module = THIS_MODULE;
2425         alg->cra_init = caam_cra_init;
2426         alg->cra_exit = caam_cra_exit;
2427         alg->cra_priority = CAAM_CRA_PRIORITY;
2428         alg->cra_blocksize = template->blocksize;
2429         alg->cra_alignmask = 0;
2430         alg->cra_ctxsize = sizeof(struct caam_ctx);
2431         alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY |
2432                          template->type;
2433         switch (template->type) {
2434         case CRYPTO_ALG_TYPE_ABLKCIPHER:
2435                 alg->cra_type = &crypto_ablkcipher_type;
2436                 alg->cra_ablkcipher = template->template_ablkcipher;
2437                 break;
2438         case CRYPTO_ALG_TYPE_AEAD:
2439                 alg->cra_type = &crypto_aead_type;
2440                 alg->cra_aead = template->template_aead;
2441                 break;
2442         }
2443
2444         t_alg->class1_alg_type = template->class1_alg_type;
2445         t_alg->class2_alg_type = template->class2_alg_type;
2446         t_alg->alg_op = template->alg_op;
2447
2448         return t_alg;
2449 }
2450
2451 static int __init caam_algapi_init(void)
2452 {
2453         int i = 0, err = 0;
2454
2455         INIT_LIST_HEAD(&alg_list);
2456
2457         /* register crypto algorithms the device supports */
2458         for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
2459                 /* TODO: check if h/w supports alg */
2460                 struct caam_crypto_alg *t_alg;
2461
2462                 t_alg = caam_alg_alloc(&driver_algs[i]);
2463                 if (IS_ERR(t_alg)) {
2464                         err = PTR_ERR(t_alg);
2465                         pr_warn("%s alg allocation failed\n",
2466                                 driver_algs[i].driver_name);
2467                         continue;
2468                 }
2469
2470                 err = crypto_register_alg(&t_alg->crypto_alg);
2471                 if (err) {
2472                         pr_warn("%s alg registration failed\n",
2473                                 t_alg->crypto_alg.cra_driver_name);
2474                         kfree(t_alg);
2475                 } else
2476                         list_add_tail(&t_alg->entry, &alg_list);
2477         }
2478         if (!list_empty(&alg_list))
2479                 pr_info("caam algorithms registered in /proc/crypto\n");
2480
2481         return err;
2482 }
2483
2484 module_init(caam_algapi_init);
2485 module_exit(caam_algapi_exit);
2486
2487 MODULE_LICENSE("GPL");
2488 MODULE_DESCRIPTION("FSL CAAM support for crypto API");
2489 MODULE_AUTHOR("Freescale Semiconductor - NMG/STC");