Merge remote-tracking branches 'spi/fix/pxa2xx', 'spi/fix/qup' and 'spi/fix/sh-sci...
[firefly-linux-kernel-4.4.55.git] / drivers / crypto / caam / caamalg.c
1 /*
2  * caam - Freescale FSL CAAM support for crypto API
3  *
4  * Copyright 2008-2011 Freescale Semiconductor, Inc.
5  *
6  * Based on talitos crypto API driver.
7  *
8  * relationship of job descriptors to shared descriptors (SteveC Dec 10 2008):
9  *
10  * ---------------                     ---------------
11  * | JobDesc #1  |-------------------->|  ShareDesc  |
12  * | *(packet 1) |                     |   (PDB)     |
13  * ---------------      |------------->|  (hashKey)  |
14  *       .              |              | (cipherKey) |
15  *       .              |    |-------->| (operation) |
16  * ---------------      |    |         ---------------
17  * | JobDesc #2  |------|    |
18  * | *(packet 2) |           |
19  * ---------------           |
20  *       .                   |
21  *       .                   |
22  * ---------------           |
23  * | JobDesc #3  |------------
24  * | *(packet 3) |
25  * ---------------
26  *
27  * The SharedDesc never changes for a connection unless rekeyed, but
28  * each packet will likely be in a different place. So all we need
29  * to know to process the packet is where the input is, where the
30  * output goes, and what context we want to process with. Context is
31  * in the SharedDesc, packet references in the JobDesc.
32  *
33  * So, a job desc looks like:
34  *
35  * ---------------------
36  * | Header            |
37  * | ShareDesc Pointer |
38  * | SEQ_OUT_PTR       |
39  * | (output buffer)   |
40  * | (output length)   |
41  * | SEQ_IN_PTR        |
42  * | (input buffer)    |
43  * | (input length)    |
44  * ---------------------
45  */
46
47 #include "compat.h"
48
49 #include "regs.h"
50 #include "intern.h"
51 #include "desc_constr.h"
52 #include "jr.h"
53 #include "error.h"
54 #include "sg_sw_sec4.h"
55 #include "key_gen.h"
56
57 /*
58  * crypto alg
59  */
60 #define CAAM_CRA_PRIORITY               3000
61 /* max key is sum of AES_MAX_KEY_SIZE, max split key size */
62 #define CAAM_MAX_KEY_SIZE               (AES_MAX_KEY_SIZE + \
63                                          SHA512_DIGEST_SIZE * 2)
64 /* max IV is max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
65 #define CAAM_MAX_IV_LENGTH              16
66
67 /* length of descriptors text */
68 #define DESC_AEAD_BASE                  (4 * CAAM_CMD_SZ)
69 #define DESC_AEAD_ENC_LEN               (DESC_AEAD_BASE + 15 * CAAM_CMD_SZ)
70 #define DESC_AEAD_DEC_LEN               (DESC_AEAD_BASE + 18 * CAAM_CMD_SZ)
71 #define DESC_AEAD_GIVENC_LEN            (DESC_AEAD_ENC_LEN + 7 * CAAM_CMD_SZ)
72
73 #define DESC_AEAD_NULL_BASE             (3 * CAAM_CMD_SZ)
74 #define DESC_AEAD_NULL_ENC_LEN          (DESC_AEAD_NULL_BASE + 14 * CAAM_CMD_SZ)
75 #define DESC_AEAD_NULL_DEC_LEN          (DESC_AEAD_NULL_BASE + 17 * CAAM_CMD_SZ)
76
77 #define DESC_ABLKCIPHER_BASE            (3 * CAAM_CMD_SZ)
78 #define DESC_ABLKCIPHER_ENC_LEN         (DESC_ABLKCIPHER_BASE + \
79                                          20 * CAAM_CMD_SZ)
80 #define DESC_ABLKCIPHER_DEC_LEN         (DESC_ABLKCIPHER_BASE + \
81                                          15 * CAAM_CMD_SZ)
82
83 #define DESC_MAX_USED_BYTES             (DESC_AEAD_GIVENC_LEN + \
84                                          CAAM_MAX_KEY_SIZE)
85 #define DESC_MAX_USED_LEN               (DESC_MAX_USED_BYTES / CAAM_CMD_SZ)
86
87 #ifdef DEBUG
88 /* for print_hex_dumps with line references */
89 #define debug(format, arg...) printk(format, arg)
90 #else
91 #define debug(format, arg...)
92 #endif
93 static struct list_head alg_list;
94
95 /* Set DK bit in class 1 operation if shared */
96 static inline void append_dec_op1(u32 *desc, u32 type)
97 {
98         u32 *jump_cmd, *uncond_jump_cmd;
99
100         jump_cmd = append_jump(desc, JUMP_TEST_ALL | JUMP_COND_SHRD);
101         append_operation(desc, type | OP_ALG_AS_INITFINAL |
102                          OP_ALG_DECRYPT);
103         uncond_jump_cmd = append_jump(desc, JUMP_TEST_ALL);
104         set_jump_tgt_here(desc, jump_cmd);
105         append_operation(desc, type | OP_ALG_AS_INITFINAL |
106                          OP_ALG_DECRYPT | OP_ALG_AAI_DK);
107         set_jump_tgt_here(desc, uncond_jump_cmd);
108 }
109
110 /*
111  * For aead functions, read payload and write payload,
112  * both of which are specified in req->src and req->dst
113  */
114 static inline void aead_append_src_dst(u32 *desc, u32 msg_type)
115 {
116         append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
117         append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH |
118                              KEY_VLF | msg_type | FIFOLD_TYPE_LASTBOTH);
119 }
120
121 /*
122  * For aead encrypt and decrypt, read iv for both classes
123  */
124 static inline void aead_append_ld_iv(u32 *desc, int ivsize)
125 {
126         append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
127                    LDST_CLASS_1_CCB | ivsize);
128         append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_CLASS2INFIFO | ivsize);
129 }
130
131 /*
132  * For ablkcipher encrypt and decrypt, read from req->src and
133  * write to req->dst
134  */
135 static inline void ablkcipher_append_src_dst(u32 *desc)
136 {
137         append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
138         append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
139         append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 |
140                              KEY_VLF | FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
141         append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
142 }
143
144 /*
145  * If all data, including src (with assoc and iv) or dst (with iv only) are
146  * contiguous
147  */
148 #define GIV_SRC_CONTIG          1
149 #define GIV_DST_CONTIG          (1 << 1)
150
151 /*
152  * per-session context
153  */
154 struct caam_ctx {
155         struct device *jrdev;
156         u32 sh_desc_enc[DESC_MAX_USED_LEN];
157         u32 sh_desc_dec[DESC_MAX_USED_LEN];
158         u32 sh_desc_givenc[DESC_MAX_USED_LEN];
159         dma_addr_t sh_desc_enc_dma;
160         dma_addr_t sh_desc_dec_dma;
161         dma_addr_t sh_desc_givenc_dma;
162         u32 class1_alg_type;
163         u32 class2_alg_type;
164         u32 alg_op;
165         u8 key[CAAM_MAX_KEY_SIZE];
166         dma_addr_t key_dma;
167         unsigned int enckeylen;
168         unsigned int split_key_len;
169         unsigned int split_key_pad_len;
170         unsigned int authsize;
171 };
172
173 static void append_key_aead(u32 *desc, struct caam_ctx *ctx,
174                             int keys_fit_inline)
175 {
176         if (keys_fit_inline) {
177                 append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
178                                   ctx->split_key_len, CLASS_2 |
179                                   KEY_DEST_MDHA_SPLIT | KEY_ENC);
180                 append_key_as_imm(desc, (void *)ctx->key +
181                                   ctx->split_key_pad_len, ctx->enckeylen,
182                                   ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
183         } else {
184                 append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 |
185                            KEY_DEST_MDHA_SPLIT | KEY_ENC);
186                 append_key(desc, ctx->key_dma + ctx->split_key_pad_len,
187                            ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
188         }
189 }
190
191 static void init_sh_desc_key_aead(u32 *desc, struct caam_ctx *ctx,
192                                   int keys_fit_inline)
193 {
194         u32 *key_jump_cmd;
195
196         init_sh_desc(desc, HDR_SHARE_SERIAL);
197
198         /* Skip if already shared */
199         key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
200                                    JUMP_COND_SHRD);
201
202         append_key_aead(desc, ctx, keys_fit_inline);
203
204         set_jump_tgt_here(desc, key_jump_cmd);
205 }
206
207 static int aead_null_set_sh_desc(struct crypto_aead *aead)
208 {
209         struct aead_tfm *tfm = &aead->base.crt_aead;
210         struct caam_ctx *ctx = crypto_aead_ctx(aead);
211         struct device *jrdev = ctx->jrdev;
212         bool keys_fit_inline = false;
213         u32 *key_jump_cmd, *jump_cmd, *read_move_cmd, *write_move_cmd;
214         u32 *desc;
215
216         /*
217          * Job Descriptor and Shared Descriptors
218          * must all fit into the 64-word Descriptor h/w Buffer
219          */
220         if (DESC_AEAD_NULL_ENC_LEN + DESC_JOB_IO_LEN +
221             ctx->split_key_pad_len <= CAAM_DESC_BYTES_MAX)
222                 keys_fit_inline = true;
223
224         /* aead_encrypt shared descriptor */
225         desc = ctx->sh_desc_enc;
226
227         init_sh_desc(desc, HDR_SHARE_SERIAL);
228
229         /* Skip if already shared */
230         key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
231                                    JUMP_COND_SHRD);
232         if (keys_fit_inline)
233                 append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
234                                   ctx->split_key_len, CLASS_2 |
235                                   KEY_DEST_MDHA_SPLIT | KEY_ENC);
236         else
237                 append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 |
238                            KEY_DEST_MDHA_SPLIT | KEY_ENC);
239         set_jump_tgt_here(desc, key_jump_cmd);
240
241         /* cryptlen = seqoutlen - authsize */
242         append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
243
244         /*
245          * NULL encryption; IV is zero
246          * assoclen = (assoclen + cryptlen) - cryptlen
247          */
248         append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG3, CAAM_CMD_SZ);
249
250         /* read assoc before reading payload */
251         append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
252                              KEY_VLF);
253
254         /* Prepare to read and write cryptlen bytes */
255         append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
256         append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
257
258         /*
259          * MOVE_LEN opcode is not available in all SEC HW revisions,
260          * thus need to do some magic, i.e. self-patch the descriptor
261          * buffer.
262          */
263         read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF |
264                                     MOVE_DEST_MATH3 |
265                                     (0x6 << MOVE_LEN_SHIFT));
266         write_move_cmd = append_move(desc, MOVE_SRC_MATH3 |
267                                      MOVE_DEST_DESCBUF |
268                                      MOVE_WAITCOMP |
269                                      (0x8 << MOVE_LEN_SHIFT));
270
271         /* Class 2 operation */
272         append_operation(desc, ctx->class2_alg_type |
273                          OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
274
275         /* Read and write cryptlen bytes */
276         aead_append_src_dst(desc, FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
277
278         set_move_tgt_here(desc, read_move_cmd);
279         set_move_tgt_here(desc, write_move_cmd);
280         append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
281         append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO |
282                     MOVE_AUX_LS);
283
284         /* Write ICV */
285         append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB |
286                          LDST_SRCDST_BYTE_CONTEXT);
287
288         ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
289                                               desc_bytes(desc),
290                                               DMA_TO_DEVICE);
291         if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
292                 dev_err(jrdev, "unable to map shared descriptor\n");
293                 return -ENOMEM;
294         }
295 #ifdef DEBUG
296         print_hex_dump(KERN_ERR,
297                        "aead null enc shdesc@"__stringify(__LINE__)": ",
298                        DUMP_PREFIX_ADDRESS, 16, 4, desc,
299                        desc_bytes(desc), 1);
300 #endif
301
302         /*
303          * Job Descriptor and Shared Descriptors
304          * must all fit into the 64-word Descriptor h/w Buffer
305          */
306         keys_fit_inline = false;
307         if (DESC_AEAD_NULL_DEC_LEN + DESC_JOB_IO_LEN +
308             ctx->split_key_pad_len <= CAAM_DESC_BYTES_MAX)
309                 keys_fit_inline = true;
310
311         desc = ctx->sh_desc_dec;
312
313         /* aead_decrypt shared descriptor */
314         init_sh_desc(desc, HDR_SHARE_SERIAL);
315
316         /* Skip if already shared */
317         key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
318                                    JUMP_COND_SHRD);
319         if (keys_fit_inline)
320                 append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
321                                   ctx->split_key_len, CLASS_2 |
322                                   KEY_DEST_MDHA_SPLIT | KEY_ENC);
323         else
324                 append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 |
325                            KEY_DEST_MDHA_SPLIT | KEY_ENC);
326         set_jump_tgt_here(desc, key_jump_cmd);
327
328         /* Class 2 operation */
329         append_operation(desc, ctx->class2_alg_type |
330                          OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
331
332         /* assoclen + cryptlen = seqinlen - ivsize - authsize */
333         append_math_sub_imm_u32(desc, REG3, SEQINLEN, IMM,
334                                 ctx->authsize + tfm->ivsize);
335         /* assoclen = (assoclen + cryptlen) - cryptlen */
336         append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ);
337         append_math_sub(desc, VARSEQINLEN, REG3, REG2, CAAM_CMD_SZ);
338
339         /* read assoc before reading payload */
340         append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
341                              KEY_VLF);
342
343         /* Prepare to read and write cryptlen bytes */
344         append_math_add(desc, VARSEQINLEN, ZERO, REG2, CAAM_CMD_SZ);
345         append_math_add(desc, VARSEQOUTLEN, ZERO, REG2, CAAM_CMD_SZ);
346
347         /*
348          * MOVE_LEN opcode is not available in all SEC HW revisions,
349          * thus need to do some magic, i.e. self-patch the descriptor
350          * buffer.
351          */
352         read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF |
353                                     MOVE_DEST_MATH2 |
354                                     (0x6 << MOVE_LEN_SHIFT));
355         write_move_cmd = append_move(desc, MOVE_SRC_MATH2 |
356                                      MOVE_DEST_DESCBUF |
357                                      MOVE_WAITCOMP |
358                                      (0x8 << MOVE_LEN_SHIFT));
359
360         /* Read and write cryptlen bytes */
361         aead_append_src_dst(desc, FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
362
363         /*
364          * Insert a NOP here, since we need at least 4 instructions between
365          * code patching the descriptor buffer and the location being patched.
366          */
367         jump_cmd = append_jump(desc, JUMP_TEST_ALL);
368         set_jump_tgt_here(desc, jump_cmd);
369
370         set_move_tgt_here(desc, read_move_cmd);
371         set_move_tgt_here(desc, write_move_cmd);
372         append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
373         append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO |
374                     MOVE_AUX_LS);
375         append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
376
377         /* Load ICV */
378         append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS2 |
379                              FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV);
380
381         ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
382                                               desc_bytes(desc),
383                                               DMA_TO_DEVICE);
384         if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
385                 dev_err(jrdev, "unable to map shared descriptor\n");
386                 return -ENOMEM;
387         }
388 #ifdef DEBUG
389         print_hex_dump(KERN_ERR,
390                        "aead null dec shdesc@"__stringify(__LINE__)": ",
391                        DUMP_PREFIX_ADDRESS, 16, 4, desc,
392                        desc_bytes(desc), 1);
393 #endif
394
395         return 0;
396 }
397
398 static int aead_set_sh_desc(struct crypto_aead *aead)
399 {
400         struct aead_tfm *tfm = &aead->base.crt_aead;
401         struct caam_ctx *ctx = crypto_aead_ctx(aead);
402         struct device *jrdev = ctx->jrdev;
403         bool keys_fit_inline = false;
404         u32 geniv, moveiv;
405         u32 *desc;
406
407         if (!ctx->authsize)
408                 return 0;
409
410         /* NULL encryption / decryption */
411         if (!ctx->enckeylen)
412                 return aead_null_set_sh_desc(aead);
413
414         /*
415          * Job Descriptor and Shared Descriptors
416          * must all fit into the 64-word Descriptor h/w Buffer
417          */
418         if (DESC_AEAD_ENC_LEN + DESC_JOB_IO_LEN +
419             ctx->split_key_pad_len + ctx->enckeylen <=
420             CAAM_DESC_BYTES_MAX)
421                 keys_fit_inline = true;
422
423         /* aead_encrypt shared descriptor */
424         desc = ctx->sh_desc_enc;
425
426         init_sh_desc_key_aead(desc, ctx, keys_fit_inline);
427
428         /* Class 2 operation */
429         append_operation(desc, ctx->class2_alg_type |
430                          OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
431
432         /* cryptlen = seqoutlen - authsize */
433         append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
434
435         /* assoclen + cryptlen = seqinlen - ivsize */
436         append_math_sub_imm_u32(desc, REG2, SEQINLEN, IMM, tfm->ivsize);
437
438         /* assoclen = (assoclen + cryptlen) - cryptlen */
439         append_math_sub(desc, VARSEQINLEN, REG2, REG3, CAAM_CMD_SZ);
440
441         /* read assoc before reading payload */
442         append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
443                              KEY_VLF);
444         aead_append_ld_iv(desc, tfm->ivsize);
445
446         /* Class 1 operation */
447         append_operation(desc, ctx->class1_alg_type |
448                          OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
449
450         /* Read and write cryptlen bytes */
451         append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
452         append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
453         aead_append_src_dst(desc, FIFOLD_TYPE_MSG1OUT2);
454
455         /* Write ICV */
456         append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB |
457                          LDST_SRCDST_BYTE_CONTEXT);
458
459         ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
460                                               desc_bytes(desc),
461                                               DMA_TO_DEVICE);
462         if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
463                 dev_err(jrdev, "unable to map shared descriptor\n");
464                 return -ENOMEM;
465         }
466 #ifdef DEBUG
467         print_hex_dump(KERN_ERR, "aead enc shdesc@"__stringify(__LINE__)": ",
468                        DUMP_PREFIX_ADDRESS, 16, 4, desc,
469                        desc_bytes(desc), 1);
470 #endif
471
472         /*
473          * Job Descriptor and Shared Descriptors
474          * must all fit into the 64-word Descriptor h/w Buffer
475          */
476         keys_fit_inline = false;
477         if (DESC_AEAD_DEC_LEN + DESC_JOB_IO_LEN +
478             ctx->split_key_pad_len + ctx->enckeylen <=
479             CAAM_DESC_BYTES_MAX)
480                 keys_fit_inline = true;
481
482         /* aead_decrypt shared descriptor */
483         desc = ctx->sh_desc_dec;
484
485         init_sh_desc_key_aead(desc, ctx, keys_fit_inline);
486
487         /* Class 2 operation */
488         append_operation(desc, ctx->class2_alg_type |
489                          OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
490
491         /* assoclen + cryptlen = seqinlen - ivsize - authsize */
492         append_math_sub_imm_u32(desc, REG3, SEQINLEN, IMM,
493                                 ctx->authsize + tfm->ivsize);
494         /* assoclen = (assoclen + cryptlen) - cryptlen */
495         append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ);
496         append_math_sub(desc, VARSEQINLEN, REG3, REG2, CAAM_CMD_SZ);
497
498         /* read assoc before reading payload */
499         append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
500                              KEY_VLF);
501
502         aead_append_ld_iv(desc, tfm->ivsize);
503
504         append_dec_op1(desc, ctx->class1_alg_type);
505
506         /* Read and write cryptlen bytes */
507         append_math_add(desc, VARSEQINLEN, ZERO, REG2, CAAM_CMD_SZ);
508         append_math_add(desc, VARSEQOUTLEN, ZERO, REG2, CAAM_CMD_SZ);
509         aead_append_src_dst(desc, FIFOLD_TYPE_MSG);
510
511         /* Load ICV */
512         append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS2 |
513                              FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV);
514
515         ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
516                                               desc_bytes(desc),
517                                               DMA_TO_DEVICE);
518         if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
519                 dev_err(jrdev, "unable to map shared descriptor\n");
520                 return -ENOMEM;
521         }
522 #ifdef DEBUG
523         print_hex_dump(KERN_ERR, "aead dec shdesc@"__stringify(__LINE__)": ",
524                        DUMP_PREFIX_ADDRESS, 16, 4, desc,
525                        desc_bytes(desc), 1);
526 #endif
527
528         /*
529          * Job Descriptor and Shared Descriptors
530          * must all fit into the 64-word Descriptor h/w Buffer
531          */
532         keys_fit_inline = false;
533         if (DESC_AEAD_GIVENC_LEN + DESC_JOB_IO_LEN +
534             ctx->split_key_pad_len + ctx->enckeylen <=
535             CAAM_DESC_BYTES_MAX)
536                 keys_fit_inline = true;
537
538         /* aead_givencrypt shared descriptor */
539         desc = ctx->sh_desc_givenc;
540
541         init_sh_desc_key_aead(desc, ctx, keys_fit_inline);
542
543         /* Generate IV */
544         geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO |
545                 NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 |
546                 NFIFOENTRY_PTYPE_RND | (tfm->ivsize << NFIFOENTRY_DLEN_SHIFT);
547         append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB |
548                             LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
549         append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
550         append_move(desc, MOVE_SRC_INFIFO |
551                     MOVE_DEST_CLASS1CTX | (tfm->ivsize << MOVE_LEN_SHIFT));
552         append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
553
554         /* Copy IV to class 1 context */
555         append_move(desc, MOVE_SRC_CLASS1CTX |
556                     MOVE_DEST_OUTFIFO | (tfm->ivsize << MOVE_LEN_SHIFT));
557
558         /* Return to encryption */
559         append_operation(desc, ctx->class2_alg_type |
560                          OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
561
562         /* ivsize + cryptlen = seqoutlen - authsize */
563         append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
564
565         /* assoclen = seqinlen - (ivsize + cryptlen) */
566         append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG3, CAAM_CMD_SZ);
567
568         /* read assoc before reading payload */
569         append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
570                              KEY_VLF);
571
572         /* Copy iv from class 1 ctx to class 2 fifo*/
573         moveiv = NFIFOENTRY_STYPE_OFIFO | NFIFOENTRY_DEST_CLASS2 |
574                  NFIFOENTRY_DTYPE_MSG | (tfm->ivsize << NFIFOENTRY_DLEN_SHIFT);
575         append_load_imm_u32(desc, moveiv, LDST_CLASS_IND_CCB |
576                             LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
577         append_load_imm_u32(desc, tfm->ivsize, LDST_CLASS_2_CCB |
578                             LDST_SRCDST_WORD_DATASZ_REG | LDST_IMM);
579
580         /* Class 1 operation */
581         append_operation(desc, ctx->class1_alg_type |
582                          OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
583
584         /* Will write ivsize + cryptlen */
585         append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
586
587         /* Not need to reload iv */
588         append_seq_fifo_load(desc, tfm->ivsize,
589                              FIFOLD_CLASS_SKIP);
590
591         /* Will read cryptlen */
592         append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
593         aead_append_src_dst(desc, FIFOLD_TYPE_MSG1OUT2);
594
595         /* Write ICV */
596         append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB |
597                          LDST_SRCDST_BYTE_CONTEXT);
598
599         ctx->sh_desc_givenc_dma = dma_map_single(jrdev, desc,
600                                                  desc_bytes(desc),
601                                                  DMA_TO_DEVICE);
602         if (dma_mapping_error(jrdev, ctx->sh_desc_givenc_dma)) {
603                 dev_err(jrdev, "unable to map shared descriptor\n");
604                 return -ENOMEM;
605         }
606 #ifdef DEBUG
607         print_hex_dump(KERN_ERR, "aead givenc shdesc@"__stringify(__LINE__)": ",
608                        DUMP_PREFIX_ADDRESS, 16, 4, desc,
609                        desc_bytes(desc), 1);
610 #endif
611
612         return 0;
613 }
614
615 static int aead_setauthsize(struct crypto_aead *authenc,
616                                     unsigned int authsize)
617 {
618         struct caam_ctx *ctx = crypto_aead_ctx(authenc);
619
620         ctx->authsize = authsize;
621         aead_set_sh_desc(authenc);
622
623         return 0;
624 }
625
626 static u32 gen_split_aead_key(struct caam_ctx *ctx, const u8 *key_in,
627                               u32 authkeylen)
628 {
629         return gen_split_key(ctx->jrdev, ctx->key, ctx->split_key_len,
630                                ctx->split_key_pad_len, key_in, authkeylen,
631                                ctx->alg_op);
632 }
633
634 static int aead_setkey(struct crypto_aead *aead,
635                                const u8 *key, unsigned int keylen)
636 {
637         /* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */
638         static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 };
639         struct caam_ctx *ctx = crypto_aead_ctx(aead);
640         struct device *jrdev = ctx->jrdev;
641         struct crypto_authenc_keys keys;
642         int ret = 0;
643
644         if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
645                 goto badkey;
646
647         /* Pick class 2 key length from algorithm submask */
648         ctx->split_key_len = mdpadlen[(ctx->alg_op & OP_ALG_ALGSEL_SUBMASK) >>
649                                       OP_ALG_ALGSEL_SHIFT] * 2;
650         ctx->split_key_pad_len = ALIGN(ctx->split_key_len, 16);
651
652         if (ctx->split_key_pad_len + keys.enckeylen > CAAM_MAX_KEY_SIZE)
653                 goto badkey;
654
655 #ifdef DEBUG
656         printk(KERN_ERR "keylen %d enckeylen %d authkeylen %d\n",
657                keys.authkeylen + keys.enckeylen, keys.enckeylen,
658                keys.authkeylen);
659         printk(KERN_ERR "split_key_len %d split_key_pad_len %d\n",
660                ctx->split_key_len, ctx->split_key_pad_len);
661         print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
662                        DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
663 #endif
664
665         ret = gen_split_aead_key(ctx, keys.authkey, keys.authkeylen);
666         if (ret) {
667                 goto badkey;
668         }
669
670         /* postpend encryption key to auth split key */
671         memcpy(ctx->key + ctx->split_key_pad_len, keys.enckey, keys.enckeylen);
672
673         ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->split_key_pad_len +
674                                       keys.enckeylen, DMA_TO_DEVICE);
675         if (dma_mapping_error(jrdev, ctx->key_dma)) {
676                 dev_err(jrdev, "unable to map key i/o memory\n");
677                 return -ENOMEM;
678         }
679 #ifdef DEBUG
680         print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
681                        DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
682                        ctx->split_key_pad_len + keys.enckeylen, 1);
683 #endif
684
685         ctx->enckeylen = keys.enckeylen;
686
687         ret = aead_set_sh_desc(aead);
688         if (ret) {
689                 dma_unmap_single(jrdev, ctx->key_dma, ctx->split_key_pad_len +
690                                  keys.enckeylen, DMA_TO_DEVICE);
691         }
692
693         return ret;
694 badkey:
695         crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
696         return -EINVAL;
697 }
698
699 static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
700                              const u8 *key, unsigned int keylen)
701 {
702         struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
703         struct ablkcipher_tfm *tfm = &ablkcipher->base.crt_ablkcipher;
704         struct device *jrdev = ctx->jrdev;
705         int ret = 0;
706         u32 *key_jump_cmd;
707         u32 *desc;
708
709 #ifdef DEBUG
710         print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
711                        DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
712 #endif
713
714         memcpy(ctx->key, key, keylen);
715         ctx->key_dma = dma_map_single(jrdev, ctx->key, keylen,
716                                       DMA_TO_DEVICE);
717         if (dma_mapping_error(jrdev, ctx->key_dma)) {
718                 dev_err(jrdev, "unable to map key i/o memory\n");
719                 return -ENOMEM;
720         }
721         ctx->enckeylen = keylen;
722
723         /* ablkcipher_encrypt shared descriptor */
724         desc = ctx->sh_desc_enc;
725         init_sh_desc(desc, HDR_SHARE_SERIAL);
726         /* Skip if already shared */
727         key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
728                                    JUMP_COND_SHRD);
729
730         /* Load class1 key only */
731         append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
732                           ctx->enckeylen, CLASS_1 |
733                           KEY_DEST_CLASS_REG);
734
735         set_jump_tgt_here(desc, key_jump_cmd);
736
737         /* Load iv */
738         append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
739                    LDST_CLASS_1_CCB | tfm->ivsize);
740
741         /* Load operation */
742         append_operation(desc, ctx->class1_alg_type |
743                          OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
744
745         /* Perform operation */
746         ablkcipher_append_src_dst(desc);
747
748         ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
749                                               desc_bytes(desc),
750                                               DMA_TO_DEVICE);
751         if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
752                 dev_err(jrdev, "unable to map shared descriptor\n");
753                 return -ENOMEM;
754         }
755 #ifdef DEBUG
756         print_hex_dump(KERN_ERR,
757                        "ablkcipher enc shdesc@"__stringify(__LINE__)": ",
758                        DUMP_PREFIX_ADDRESS, 16, 4, desc,
759                        desc_bytes(desc), 1);
760 #endif
761         /* ablkcipher_decrypt shared descriptor */
762         desc = ctx->sh_desc_dec;
763
764         init_sh_desc(desc, HDR_SHARE_SERIAL);
765         /* Skip if already shared */
766         key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
767                                    JUMP_COND_SHRD);
768
769         /* Load class1 key only */
770         append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
771                           ctx->enckeylen, CLASS_1 |
772                           KEY_DEST_CLASS_REG);
773
774         set_jump_tgt_here(desc, key_jump_cmd);
775
776         /* load IV */
777         append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
778                    LDST_CLASS_1_CCB | tfm->ivsize);
779
780         /* Choose operation */
781         append_dec_op1(desc, ctx->class1_alg_type);
782
783         /* Perform operation */
784         ablkcipher_append_src_dst(desc);
785
786         ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
787                                               desc_bytes(desc),
788                                               DMA_TO_DEVICE);
789         if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
790                 dev_err(jrdev, "unable to map shared descriptor\n");
791                 return -ENOMEM;
792         }
793
794 #ifdef DEBUG
795         print_hex_dump(KERN_ERR,
796                        "ablkcipher dec shdesc@"__stringify(__LINE__)": ",
797                        DUMP_PREFIX_ADDRESS, 16, 4, desc,
798                        desc_bytes(desc), 1);
799 #endif
800
801         return ret;
802 }
803
804 /*
805  * aead_edesc - s/w-extended aead descriptor
806  * @assoc_nents: number of segments in associated data (SPI+Seq) scatterlist
807  * @assoc_chained: if source is chained
808  * @src_nents: number of segments in input scatterlist
809  * @src_chained: if source is chained
810  * @dst_nents: number of segments in output scatterlist
811  * @dst_chained: if destination is chained
812  * @iv_dma: dma address of iv for checking continuity and link table
813  * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE)
814  * @sec4_sg_bytes: length of dma mapped sec4_sg space
815  * @sec4_sg_dma: bus physical mapped address of h/w link table
816  * @hw_desc: the h/w job descriptor followed by any referenced link tables
817  */
818 struct aead_edesc {
819         int assoc_nents;
820         bool assoc_chained;
821         int src_nents;
822         bool src_chained;
823         int dst_nents;
824         bool dst_chained;
825         dma_addr_t iv_dma;
826         int sec4_sg_bytes;
827         dma_addr_t sec4_sg_dma;
828         struct sec4_sg_entry *sec4_sg;
829         u32 hw_desc[0];
830 };
831
832 /*
833  * ablkcipher_edesc - s/w-extended ablkcipher descriptor
834  * @src_nents: number of segments in input scatterlist
835  * @src_chained: if source is chained
836  * @dst_nents: number of segments in output scatterlist
837  * @dst_chained: if destination is chained
838  * @iv_dma: dma address of iv for checking continuity and link table
839  * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE)
840  * @sec4_sg_bytes: length of dma mapped sec4_sg space
841  * @sec4_sg_dma: bus physical mapped address of h/w link table
842  * @hw_desc: the h/w job descriptor followed by any referenced link tables
843  */
844 struct ablkcipher_edesc {
845         int src_nents;
846         bool src_chained;
847         int dst_nents;
848         bool dst_chained;
849         dma_addr_t iv_dma;
850         int sec4_sg_bytes;
851         dma_addr_t sec4_sg_dma;
852         struct sec4_sg_entry *sec4_sg;
853         u32 hw_desc[0];
854 };
855
856 static void caam_unmap(struct device *dev, struct scatterlist *src,
857                        struct scatterlist *dst, int src_nents,
858                        bool src_chained, int dst_nents, bool dst_chained,
859                        dma_addr_t iv_dma, int ivsize, dma_addr_t sec4_sg_dma,
860                        int sec4_sg_bytes)
861 {
862         if (dst != src) {
863                 dma_unmap_sg_chained(dev, src, src_nents ? : 1, DMA_TO_DEVICE,
864                                      src_chained);
865                 dma_unmap_sg_chained(dev, dst, dst_nents ? : 1, DMA_FROM_DEVICE,
866                                      dst_chained);
867         } else {
868                 dma_unmap_sg_chained(dev, src, src_nents ? : 1,
869                                      DMA_BIDIRECTIONAL, src_chained);
870         }
871
872         if (iv_dma)
873                 dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE);
874         if (sec4_sg_bytes)
875                 dma_unmap_single(dev, sec4_sg_dma, sec4_sg_bytes,
876                                  DMA_TO_DEVICE);
877 }
878
879 static void aead_unmap(struct device *dev,
880                        struct aead_edesc *edesc,
881                        struct aead_request *req)
882 {
883         struct crypto_aead *aead = crypto_aead_reqtfm(req);
884         int ivsize = crypto_aead_ivsize(aead);
885
886         dma_unmap_sg_chained(dev, req->assoc, edesc->assoc_nents,
887                              DMA_TO_DEVICE, edesc->assoc_chained);
888
889         caam_unmap(dev, req->src, req->dst,
890                    edesc->src_nents, edesc->src_chained, edesc->dst_nents,
891                    edesc->dst_chained, edesc->iv_dma, ivsize,
892                    edesc->sec4_sg_dma, edesc->sec4_sg_bytes);
893 }
894
895 static void ablkcipher_unmap(struct device *dev,
896                              struct ablkcipher_edesc *edesc,
897                              struct ablkcipher_request *req)
898 {
899         struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
900         int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
901
902         caam_unmap(dev, req->src, req->dst,
903                    edesc->src_nents, edesc->src_chained, edesc->dst_nents,
904                    edesc->dst_chained, edesc->iv_dma, ivsize,
905                    edesc->sec4_sg_dma, edesc->sec4_sg_bytes);
906 }
907
908 static void aead_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
909                                    void *context)
910 {
911         struct aead_request *req = context;
912         struct aead_edesc *edesc;
913 #ifdef DEBUG
914         struct crypto_aead *aead = crypto_aead_reqtfm(req);
915         struct caam_ctx *ctx = crypto_aead_ctx(aead);
916         int ivsize = crypto_aead_ivsize(aead);
917
918         dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
919 #endif
920
921         edesc = (struct aead_edesc *)((char *)desc -
922                  offsetof(struct aead_edesc, hw_desc));
923
924         if (err)
925                 caam_jr_strstatus(jrdev, err);
926
927         aead_unmap(jrdev, edesc, req);
928
929 #ifdef DEBUG
930         print_hex_dump(KERN_ERR, "assoc  @"__stringify(__LINE__)": ",
931                        DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->assoc),
932                        req->assoclen , 1);
933         print_hex_dump(KERN_ERR, "dstiv  @"__stringify(__LINE__)": ",
934                        DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src) - ivsize,
935                        edesc->src_nents ? 100 : ivsize, 1);
936         print_hex_dump(KERN_ERR, "dst    @"__stringify(__LINE__)": ",
937                        DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
938                        edesc->src_nents ? 100 : req->cryptlen +
939                        ctx->authsize + 4, 1);
940 #endif
941
942         kfree(edesc);
943
944         aead_request_complete(req, err);
945 }
946
947 static void aead_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
948                                    void *context)
949 {
950         struct aead_request *req = context;
951         struct aead_edesc *edesc;
952 #ifdef DEBUG
953         struct crypto_aead *aead = crypto_aead_reqtfm(req);
954         struct caam_ctx *ctx = crypto_aead_ctx(aead);
955         int ivsize = crypto_aead_ivsize(aead);
956
957         dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
958 #endif
959
960         edesc = (struct aead_edesc *)((char *)desc -
961                  offsetof(struct aead_edesc, hw_desc));
962
963 #ifdef DEBUG
964         print_hex_dump(KERN_ERR, "dstiv  @"__stringify(__LINE__)": ",
965                        DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
966                        ivsize, 1);
967         print_hex_dump(KERN_ERR, "dst    @"__stringify(__LINE__)": ",
968                        DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->dst),
969                        req->cryptlen - ctx->authsize, 1);
970 #endif
971
972         if (err)
973                 caam_jr_strstatus(jrdev, err);
974
975         aead_unmap(jrdev, edesc, req);
976
977         /*
978          * verify hw auth check passed else return -EBADMSG
979          */
980         if ((err & JRSTA_CCBERR_ERRID_MASK) == JRSTA_CCBERR_ERRID_ICVCHK)
981                 err = -EBADMSG;
982
983 #ifdef DEBUG
984         print_hex_dump(KERN_ERR, "iphdrout@"__stringify(__LINE__)": ",
985                        DUMP_PREFIX_ADDRESS, 16, 4,
986                        ((char *)sg_virt(req->assoc) - sizeof(struct iphdr)),
987                        sizeof(struct iphdr) + req->assoclen +
988                        ((req->cryptlen > 1500) ? 1500 : req->cryptlen) +
989                        ctx->authsize + 36, 1);
990         if (!err && edesc->sec4_sg_bytes) {
991                 struct scatterlist *sg = sg_last(req->src, edesc->src_nents);
992                 print_hex_dump(KERN_ERR, "sglastout@"__stringify(__LINE__)": ",
993                                DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(sg),
994                         sg->length + ctx->authsize + 16, 1);
995         }
996 #endif
997
998         kfree(edesc);
999
1000         aead_request_complete(req, err);
1001 }
1002
1003 static void ablkcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
1004                                    void *context)
1005 {
1006         struct ablkcipher_request *req = context;
1007         struct ablkcipher_edesc *edesc;
1008 #ifdef DEBUG
1009         struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1010         int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
1011
1012         dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
1013 #endif
1014
1015         edesc = (struct ablkcipher_edesc *)((char *)desc -
1016                  offsetof(struct ablkcipher_edesc, hw_desc));
1017
1018         if (err)
1019                 caam_jr_strstatus(jrdev, err);
1020
1021 #ifdef DEBUG
1022         print_hex_dump(KERN_ERR, "dstiv  @"__stringify(__LINE__)": ",
1023                        DUMP_PREFIX_ADDRESS, 16, 4, req->info,
1024                        edesc->src_nents > 1 ? 100 : ivsize, 1);
1025         print_hex_dump(KERN_ERR, "dst    @"__stringify(__LINE__)": ",
1026                        DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
1027                        edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
1028 #endif
1029
1030         ablkcipher_unmap(jrdev, edesc, req);
1031         kfree(edesc);
1032
1033         ablkcipher_request_complete(req, err);
1034 }
1035
1036 static void ablkcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
1037                                     void *context)
1038 {
1039         struct ablkcipher_request *req = context;
1040         struct ablkcipher_edesc *edesc;
1041 #ifdef DEBUG
1042         struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1043         int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
1044
1045         dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
1046 #endif
1047
1048         edesc = (struct ablkcipher_edesc *)((char *)desc -
1049                  offsetof(struct ablkcipher_edesc, hw_desc));
1050         if (err)
1051                 caam_jr_strstatus(jrdev, err);
1052
1053 #ifdef DEBUG
1054         print_hex_dump(KERN_ERR, "dstiv  @"__stringify(__LINE__)": ",
1055                        DUMP_PREFIX_ADDRESS, 16, 4, req->info,
1056                        ivsize, 1);
1057         print_hex_dump(KERN_ERR, "dst    @"__stringify(__LINE__)": ",
1058                        DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
1059                        edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
1060 #endif
1061
1062         ablkcipher_unmap(jrdev, edesc, req);
1063         kfree(edesc);
1064
1065         ablkcipher_request_complete(req, err);
1066 }
1067
1068 /*
1069  * Fill in aead job descriptor
1070  */
1071 static void init_aead_job(u32 *sh_desc, dma_addr_t ptr,
1072                           struct aead_edesc *edesc,
1073                           struct aead_request *req,
1074                           bool all_contig, bool encrypt)
1075 {
1076         struct crypto_aead *aead = crypto_aead_reqtfm(req);
1077         struct caam_ctx *ctx = crypto_aead_ctx(aead);
1078         int ivsize = crypto_aead_ivsize(aead);
1079         int authsize = ctx->authsize;
1080         u32 *desc = edesc->hw_desc;
1081         u32 out_options = 0, in_options;
1082         dma_addr_t dst_dma, src_dma;
1083         int len, sec4_sg_index = 0;
1084
1085 #ifdef DEBUG
1086         debug("assoclen %d cryptlen %d authsize %d\n",
1087               req->assoclen, req->cryptlen, authsize);
1088         print_hex_dump(KERN_ERR, "assoc  @"__stringify(__LINE__)": ",
1089                        DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->assoc),
1090                        req->assoclen , 1);
1091         print_hex_dump(KERN_ERR, "presciv@"__stringify(__LINE__)": ",
1092                        DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
1093                        edesc->src_nents ? 100 : ivsize, 1);
1094         print_hex_dump(KERN_ERR, "src    @"__stringify(__LINE__)": ",
1095                        DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
1096                         edesc->src_nents ? 100 : req->cryptlen, 1);
1097         print_hex_dump(KERN_ERR, "shrdesc@"__stringify(__LINE__)": ",
1098                        DUMP_PREFIX_ADDRESS, 16, 4, sh_desc,
1099                        desc_bytes(sh_desc), 1);
1100 #endif
1101
1102         len = desc_len(sh_desc);
1103         init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
1104
1105         if (all_contig) {
1106                 src_dma = sg_dma_address(req->assoc);
1107                 in_options = 0;
1108         } else {
1109                 src_dma = edesc->sec4_sg_dma;
1110                 sec4_sg_index += (edesc->assoc_nents ? : 1) + 1 +
1111                                  (edesc->src_nents ? : 1);
1112                 in_options = LDST_SGF;
1113         }
1114
1115         append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize + req->cryptlen,
1116                           in_options);
1117
1118         if (likely(req->src == req->dst)) {
1119                 if (all_contig) {
1120                         dst_dma = sg_dma_address(req->src);
1121                 } else {
1122                         dst_dma = src_dma + sizeof(struct sec4_sg_entry) *
1123                                   ((edesc->assoc_nents ? : 1) + 1);
1124                         out_options = LDST_SGF;
1125                 }
1126         } else {
1127                 if (!edesc->dst_nents) {
1128                         dst_dma = sg_dma_address(req->dst);
1129                 } else {
1130                         dst_dma = edesc->sec4_sg_dma +
1131                                   sec4_sg_index *
1132                                   sizeof(struct sec4_sg_entry);
1133                         out_options = LDST_SGF;
1134                 }
1135         }
1136         if (encrypt)
1137                 append_seq_out_ptr(desc, dst_dma, req->cryptlen + authsize,
1138                                    out_options);
1139         else
1140                 append_seq_out_ptr(desc, dst_dma, req->cryptlen - authsize,
1141                                    out_options);
1142 }
1143
1144 /*
1145  * Fill in aead givencrypt job descriptor
1146  */
1147 static void init_aead_giv_job(u32 *sh_desc, dma_addr_t ptr,
1148                               struct aead_edesc *edesc,
1149                               struct aead_request *req,
1150                               int contig)
1151 {
1152         struct crypto_aead *aead = crypto_aead_reqtfm(req);
1153         struct caam_ctx *ctx = crypto_aead_ctx(aead);
1154         int ivsize = crypto_aead_ivsize(aead);
1155         int authsize = ctx->authsize;
1156         u32 *desc = edesc->hw_desc;
1157         u32 out_options = 0, in_options;
1158         dma_addr_t dst_dma, src_dma;
1159         int len, sec4_sg_index = 0;
1160
1161 #ifdef DEBUG
1162         debug("assoclen %d cryptlen %d authsize %d\n",
1163               req->assoclen, req->cryptlen, authsize);
1164         print_hex_dump(KERN_ERR, "assoc  @"__stringify(__LINE__)": ",
1165                        DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->assoc),
1166                        req->assoclen , 1);
1167         print_hex_dump(KERN_ERR, "presciv@"__stringify(__LINE__)": ",
1168                        DUMP_PREFIX_ADDRESS, 16, 4, req->iv, ivsize, 1);
1169         print_hex_dump(KERN_ERR, "src    @"__stringify(__LINE__)": ",
1170                        DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
1171                         edesc->src_nents > 1 ? 100 : req->cryptlen, 1);
1172         print_hex_dump(KERN_ERR, "shrdesc@"__stringify(__LINE__)": ",
1173                        DUMP_PREFIX_ADDRESS, 16, 4, sh_desc,
1174                        desc_bytes(sh_desc), 1);
1175 #endif
1176
1177         len = desc_len(sh_desc);
1178         init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
1179
1180         if (contig & GIV_SRC_CONTIG) {
1181                 src_dma = sg_dma_address(req->assoc);
1182                 in_options = 0;
1183         } else {
1184                 src_dma = edesc->sec4_sg_dma;
1185                 sec4_sg_index += edesc->assoc_nents + 1 + edesc->src_nents;
1186                 in_options = LDST_SGF;
1187         }
1188         append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize + req->cryptlen,
1189                           in_options);
1190
1191         if (contig & GIV_DST_CONTIG) {
1192                 dst_dma = edesc->iv_dma;
1193         } else {
1194                 if (likely(req->src == req->dst)) {
1195                         dst_dma = src_dma + sizeof(struct sec4_sg_entry) *
1196                                   edesc->assoc_nents;
1197                         out_options = LDST_SGF;
1198                 } else {
1199                         dst_dma = edesc->sec4_sg_dma +
1200                                   sec4_sg_index *
1201                                   sizeof(struct sec4_sg_entry);
1202                         out_options = LDST_SGF;
1203                 }
1204         }
1205
1206         append_seq_out_ptr(desc, dst_dma, ivsize + req->cryptlen + authsize,
1207                            out_options);
1208 }
1209
1210 /*
1211  * Fill in ablkcipher job descriptor
1212  */
1213 static void init_ablkcipher_job(u32 *sh_desc, dma_addr_t ptr,
1214                                 struct ablkcipher_edesc *edesc,
1215                                 struct ablkcipher_request *req,
1216                                 bool iv_contig)
1217 {
1218         struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1219         int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
1220         u32 *desc = edesc->hw_desc;
1221         u32 out_options = 0, in_options;
1222         dma_addr_t dst_dma, src_dma;
1223         int len, sec4_sg_index = 0;
1224
1225 #ifdef DEBUG
1226         print_hex_dump(KERN_ERR, "presciv@"__stringify(__LINE__)": ",
1227                        DUMP_PREFIX_ADDRESS, 16, 4, req->info,
1228                        ivsize, 1);
1229         print_hex_dump(KERN_ERR, "src    @"__stringify(__LINE__)": ",
1230                        DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
1231                        edesc->src_nents ? 100 : req->nbytes, 1);
1232 #endif
1233
1234         len = desc_len(sh_desc);
1235         init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
1236
1237         if (iv_contig) {
1238                 src_dma = edesc->iv_dma;
1239                 in_options = 0;
1240         } else {
1241                 src_dma = edesc->sec4_sg_dma;
1242                 sec4_sg_index += (iv_contig ? 0 : 1) + edesc->src_nents;
1243                 in_options = LDST_SGF;
1244         }
1245         append_seq_in_ptr(desc, src_dma, req->nbytes + ivsize, in_options);
1246
1247         if (likely(req->src == req->dst)) {
1248                 if (!edesc->src_nents && iv_contig) {
1249                         dst_dma = sg_dma_address(req->src);
1250                 } else {
1251                         dst_dma = edesc->sec4_sg_dma +
1252                                 sizeof(struct sec4_sg_entry);
1253                         out_options = LDST_SGF;
1254                 }
1255         } else {
1256                 if (!edesc->dst_nents) {
1257                         dst_dma = sg_dma_address(req->dst);
1258                 } else {
1259                         dst_dma = edesc->sec4_sg_dma +
1260                                 sec4_sg_index * sizeof(struct sec4_sg_entry);
1261                         out_options = LDST_SGF;
1262                 }
1263         }
1264         append_seq_out_ptr(desc, dst_dma, req->nbytes, out_options);
1265 }
1266
1267 /*
1268  * allocate and map the aead extended descriptor
1269  */
1270 static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
1271                                            int desc_bytes, bool *all_contig_ptr,
1272                                            bool encrypt)
1273 {
1274         struct crypto_aead *aead = crypto_aead_reqtfm(req);
1275         struct caam_ctx *ctx = crypto_aead_ctx(aead);
1276         struct device *jrdev = ctx->jrdev;
1277         gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1278                        CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1279         int assoc_nents, src_nents, dst_nents = 0;
1280         struct aead_edesc *edesc;
1281         dma_addr_t iv_dma = 0;
1282         int sgc;
1283         bool all_contig = true;
1284         bool assoc_chained = false, src_chained = false, dst_chained = false;
1285         int ivsize = crypto_aead_ivsize(aead);
1286         int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes;
1287         unsigned int authsize = ctx->authsize;
1288
1289         assoc_nents = sg_count(req->assoc, req->assoclen, &assoc_chained);
1290
1291         if (unlikely(req->dst != req->src)) {
1292                 src_nents = sg_count(req->src, req->cryptlen, &src_chained);
1293                 dst_nents = sg_count(req->dst,
1294                                      req->cryptlen +
1295                                         (encrypt ? authsize : (-authsize)),
1296                                      &dst_chained);
1297         } else {
1298                 src_nents = sg_count(req->src,
1299                                      req->cryptlen +
1300                                         (encrypt ? authsize : 0),
1301                                      &src_chained);
1302         }
1303
1304         sgc = dma_map_sg_chained(jrdev, req->assoc, assoc_nents ? : 1,
1305                                  DMA_TO_DEVICE, assoc_chained);
1306         if (likely(req->src == req->dst)) {
1307                 sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
1308                                          DMA_BIDIRECTIONAL, src_chained);
1309         } else {
1310                 sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
1311                                          DMA_TO_DEVICE, src_chained);
1312                 sgc = dma_map_sg_chained(jrdev, req->dst, dst_nents ? : 1,
1313                                          DMA_FROM_DEVICE, dst_chained);
1314         }
1315
1316         /* Check if data are contiguous */
1317         iv_dma = dma_map_single(jrdev, req->iv, ivsize, DMA_TO_DEVICE);
1318         if (assoc_nents || sg_dma_address(req->assoc) + req->assoclen !=
1319             iv_dma || src_nents || iv_dma + ivsize !=
1320             sg_dma_address(req->src)) {
1321                 all_contig = false;
1322                 assoc_nents = assoc_nents ? : 1;
1323                 src_nents = src_nents ? : 1;
1324                 sec4_sg_len = assoc_nents + 1 + src_nents;
1325         }
1326         sec4_sg_len += dst_nents;
1327
1328         sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);
1329
1330         /* allocate space for base edesc and hw desc commands, link tables */
1331         edesc = kmalloc(sizeof(struct aead_edesc) + desc_bytes +
1332                         sec4_sg_bytes, GFP_DMA | flags);
1333         if (!edesc) {
1334                 dev_err(jrdev, "could not allocate extended descriptor\n");
1335                 return ERR_PTR(-ENOMEM);
1336         }
1337
1338         edesc->assoc_nents = assoc_nents;
1339         edesc->assoc_chained = assoc_chained;
1340         edesc->src_nents = src_nents;
1341         edesc->src_chained = src_chained;
1342         edesc->dst_nents = dst_nents;
1343         edesc->dst_chained = dst_chained;
1344         edesc->iv_dma = iv_dma;
1345         edesc->sec4_sg_bytes = sec4_sg_bytes;
1346         edesc->sec4_sg = (void *)edesc + sizeof(struct aead_edesc) +
1347                          desc_bytes;
1348         edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1349                                             sec4_sg_bytes, DMA_TO_DEVICE);
1350         *all_contig_ptr = all_contig;
1351
1352         sec4_sg_index = 0;
1353         if (!all_contig) {
1354                 sg_to_sec4_sg(req->assoc,
1355                               (assoc_nents ? : 1),
1356                               edesc->sec4_sg +
1357                               sec4_sg_index, 0);
1358                 sec4_sg_index += assoc_nents ? : 1;
1359                 dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index,
1360                                    iv_dma, ivsize, 0);
1361                 sec4_sg_index += 1;
1362                 sg_to_sec4_sg_last(req->src,
1363                                    (src_nents ? : 1),
1364                                    edesc->sec4_sg +
1365                                    sec4_sg_index, 0);
1366                 sec4_sg_index += src_nents ? : 1;
1367         }
1368         if (dst_nents) {
1369                 sg_to_sec4_sg_last(req->dst, dst_nents,
1370                                    edesc->sec4_sg + sec4_sg_index, 0);
1371         }
1372
1373         return edesc;
1374 }
1375
1376 static int aead_encrypt(struct aead_request *req)
1377 {
1378         struct aead_edesc *edesc;
1379         struct crypto_aead *aead = crypto_aead_reqtfm(req);
1380         struct caam_ctx *ctx = crypto_aead_ctx(aead);
1381         struct device *jrdev = ctx->jrdev;
1382         bool all_contig;
1383         u32 *desc;
1384         int ret = 0;
1385
1386         /* allocate extended descriptor */
1387         edesc = aead_edesc_alloc(req, DESC_JOB_IO_LEN *
1388                                  CAAM_CMD_SZ, &all_contig, true);
1389         if (IS_ERR(edesc))
1390                 return PTR_ERR(edesc);
1391
1392         /* Create and submit job descriptor */
1393         init_aead_job(ctx->sh_desc_enc, ctx->sh_desc_enc_dma, edesc, req,
1394                       all_contig, true);
1395 #ifdef DEBUG
1396         print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
1397                        DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
1398                        desc_bytes(edesc->hw_desc), 1);
1399 #endif
1400
1401         desc = edesc->hw_desc;
1402         ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req);
1403         if (!ret) {
1404                 ret = -EINPROGRESS;
1405         } else {
1406                 aead_unmap(jrdev, edesc, req);
1407                 kfree(edesc);
1408         }
1409
1410         return ret;
1411 }
1412
1413 static int aead_decrypt(struct aead_request *req)
1414 {
1415         struct aead_edesc *edesc;
1416         struct crypto_aead *aead = crypto_aead_reqtfm(req);
1417         struct caam_ctx *ctx = crypto_aead_ctx(aead);
1418         struct device *jrdev = ctx->jrdev;
1419         bool all_contig;
1420         u32 *desc;
1421         int ret = 0;
1422
1423         /* allocate extended descriptor */
1424         edesc = aead_edesc_alloc(req, DESC_JOB_IO_LEN *
1425                                  CAAM_CMD_SZ, &all_contig, false);
1426         if (IS_ERR(edesc))
1427                 return PTR_ERR(edesc);
1428
1429 #ifdef DEBUG
1430         print_hex_dump(KERN_ERR, "dec src@"__stringify(__LINE__)": ",
1431                        DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
1432                        req->cryptlen, 1);
1433 #endif
1434
1435         /* Create and submit job descriptor*/
1436         init_aead_job(ctx->sh_desc_dec,
1437                       ctx->sh_desc_dec_dma, edesc, req, all_contig, false);
1438 #ifdef DEBUG
1439         print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
1440                        DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
1441                        desc_bytes(edesc->hw_desc), 1);
1442 #endif
1443
1444         desc = edesc->hw_desc;
1445         ret = caam_jr_enqueue(jrdev, desc, aead_decrypt_done, req);
1446         if (!ret) {
1447                 ret = -EINPROGRESS;
1448         } else {
1449                 aead_unmap(jrdev, edesc, req);
1450                 kfree(edesc);
1451         }
1452
1453         return ret;
1454 }
1455
1456 /*
1457  * allocate and map the aead extended descriptor for aead givencrypt
1458  */
1459 static struct aead_edesc *aead_giv_edesc_alloc(struct aead_givcrypt_request
1460                                                *greq, int desc_bytes,
1461                                                u32 *contig_ptr)
1462 {
1463         struct aead_request *req = &greq->areq;
1464         struct crypto_aead *aead = crypto_aead_reqtfm(req);
1465         struct caam_ctx *ctx = crypto_aead_ctx(aead);
1466         struct device *jrdev = ctx->jrdev;
1467         gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1468                        CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1469         int assoc_nents, src_nents, dst_nents = 0;
1470         struct aead_edesc *edesc;
1471         dma_addr_t iv_dma = 0;
1472         int sgc;
1473         u32 contig = GIV_SRC_CONTIG | GIV_DST_CONTIG;
1474         int ivsize = crypto_aead_ivsize(aead);
1475         bool assoc_chained = false, src_chained = false, dst_chained = false;
1476         int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes;
1477
1478         assoc_nents = sg_count(req->assoc, req->assoclen, &assoc_chained);
1479         src_nents = sg_count(req->src, req->cryptlen, &src_chained);
1480
1481         if (unlikely(req->dst != req->src))
1482                 dst_nents = sg_count(req->dst, req->cryptlen + ctx->authsize,
1483                                      &dst_chained);
1484
1485         sgc = dma_map_sg_chained(jrdev, req->assoc, assoc_nents ? : 1,
1486                                  DMA_TO_DEVICE, assoc_chained);
1487         if (likely(req->src == req->dst)) {
1488                 sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
1489                                          DMA_BIDIRECTIONAL, src_chained);
1490         } else {
1491                 sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
1492                                          DMA_TO_DEVICE, src_chained);
1493                 sgc = dma_map_sg_chained(jrdev, req->dst, dst_nents ? : 1,
1494                                          DMA_FROM_DEVICE, dst_chained);
1495         }
1496
1497         /* Check if data are contiguous */
1498         iv_dma = dma_map_single(jrdev, greq->giv, ivsize, DMA_TO_DEVICE);
1499         if (assoc_nents || sg_dma_address(req->assoc) + req->assoclen !=
1500             iv_dma || src_nents || iv_dma + ivsize != sg_dma_address(req->src))
1501                 contig &= ~GIV_SRC_CONTIG;
1502         if (dst_nents || iv_dma + ivsize != sg_dma_address(req->dst))
1503                 contig &= ~GIV_DST_CONTIG;
1504         if (unlikely(req->src != req->dst)) {
1505                 dst_nents = dst_nents ? : 1;
1506                 sec4_sg_len += 1;
1507         }
1508         if (!(contig & GIV_SRC_CONTIG)) {
1509                 assoc_nents = assoc_nents ? : 1;
1510                 src_nents = src_nents ? : 1;
1511                 sec4_sg_len += assoc_nents + 1 + src_nents;
1512                 if (likely(req->src == req->dst))
1513                         contig &= ~GIV_DST_CONTIG;
1514         }
1515         sec4_sg_len += dst_nents;
1516
1517         sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);
1518
1519         /* allocate space for base edesc and hw desc commands, link tables */
1520         edesc = kmalloc(sizeof(struct aead_edesc) + desc_bytes +
1521                         sec4_sg_bytes, GFP_DMA | flags);
1522         if (!edesc) {
1523                 dev_err(jrdev, "could not allocate extended descriptor\n");
1524                 return ERR_PTR(-ENOMEM);
1525         }
1526
1527         edesc->assoc_nents = assoc_nents;
1528         edesc->assoc_chained = assoc_chained;
1529         edesc->src_nents = src_nents;
1530         edesc->src_chained = src_chained;
1531         edesc->dst_nents = dst_nents;
1532         edesc->dst_chained = dst_chained;
1533         edesc->iv_dma = iv_dma;
1534         edesc->sec4_sg_bytes = sec4_sg_bytes;
1535         edesc->sec4_sg = (void *)edesc + sizeof(struct aead_edesc) +
1536                          desc_bytes;
1537         edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1538                                             sec4_sg_bytes, DMA_TO_DEVICE);
1539         *contig_ptr = contig;
1540
1541         sec4_sg_index = 0;
1542         if (!(contig & GIV_SRC_CONTIG)) {
1543                 sg_to_sec4_sg(req->assoc, assoc_nents,
1544                               edesc->sec4_sg +
1545                               sec4_sg_index, 0);
1546                 sec4_sg_index += assoc_nents;
1547                 dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index,
1548                                    iv_dma, ivsize, 0);
1549                 sec4_sg_index += 1;
1550                 sg_to_sec4_sg_last(req->src, src_nents,
1551                                    edesc->sec4_sg +
1552                                    sec4_sg_index, 0);
1553                 sec4_sg_index += src_nents;
1554         }
1555         if (unlikely(req->src != req->dst && !(contig & GIV_DST_CONTIG))) {
1556                 dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index,
1557                                    iv_dma, ivsize, 0);
1558                 sec4_sg_index += 1;
1559                 sg_to_sec4_sg_last(req->dst, dst_nents,
1560                                    edesc->sec4_sg + sec4_sg_index, 0);
1561         }
1562
1563         return edesc;
1564 }
1565
1566 static int aead_givencrypt(struct aead_givcrypt_request *areq)
1567 {
1568         struct aead_request *req = &areq->areq;
1569         struct aead_edesc *edesc;
1570         struct crypto_aead *aead = crypto_aead_reqtfm(req);
1571         struct caam_ctx *ctx = crypto_aead_ctx(aead);
1572         struct device *jrdev = ctx->jrdev;
1573         u32 contig;
1574         u32 *desc;
1575         int ret = 0;
1576
1577         /* allocate extended descriptor */
1578         edesc = aead_giv_edesc_alloc(areq, DESC_JOB_IO_LEN *
1579                                      CAAM_CMD_SZ, &contig);
1580
1581         if (IS_ERR(edesc))
1582                 return PTR_ERR(edesc);
1583
1584 #ifdef DEBUG
1585         print_hex_dump(KERN_ERR, "giv src@"__stringify(__LINE__)": ",
1586                        DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
1587                        req->cryptlen, 1);
1588 #endif
1589
1590         /* Create and submit job descriptor*/
1591         init_aead_giv_job(ctx->sh_desc_givenc,
1592                           ctx->sh_desc_givenc_dma, edesc, req, contig);
1593 #ifdef DEBUG
1594         print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
1595                        DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
1596                        desc_bytes(edesc->hw_desc), 1);
1597 #endif
1598
1599         desc = edesc->hw_desc;
1600         ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req);
1601         if (!ret) {
1602                 ret = -EINPROGRESS;
1603         } else {
1604                 aead_unmap(jrdev, edesc, req);
1605                 kfree(edesc);
1606         }
1607
1608         return ret;
1609 }
1610
1611 static int aead_null_givencrypt(struct aead_givcrypt_request *areq)
1612 {
1613         return aead_encrypt(&areq->areq);
1614 }
1615
1616 /*
1617  * allocate and map the ablkcipher extended descriptor for ablkcipher
1618  */
1619 static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
1620                                                        *req, int desc_bytes,
1621                                                        bool *iv_contig_out)
1622 {
1623         struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1624         struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
1625         struct device *jrdev = ctx->jrdev;
1626         gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1627                                           CRYPTO_TFM_REQ_MAY_SLEEP)) ?
1628                        GFP_KERNEL : GFP_ATOMIC;
1629         int src_nents, dst_nents = 0, sec4_sg_bytes;
1630         struct ablkcipher_edesc *edesc;
1631         dma_addr_t iv_dma = 0;
1632         bool iv_contig = false;
1633         int sgc;
1634         int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
1635         bool src_chained = false, dst_chained = false;
1636         int sec4_sg_index;
1637
1638         src_nents = sg_count(req->src, req->nbytes, &src_chained);
1639
1640         if (req->dst != req->src)
1641                 dst_nents = sg_count(req->dst, req->nbytes, &dst_chained);
1642
1643         if (likely(req->src == req->dst)) {
1644                 sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
1645                                          DMA_BIDIRECTIONAL, src_chained);
1646         } else {
1647                 sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
1648                                          DMA_TO_DEVICE, src_chained);
1649                 sgc = dma_map_sg_chained(jrdev, req->dst, dst_nents ? : 1,
1650                                          DMA_FROM_DEVICE, dst_chained);
1651         }
1652
1653         /*
1654          * Check if iv can be contiguous with source and destination.
1655          * If so, include it. If not, create scatterlist.
1656          */
1657         iv_dma = dma_map_single(jrdev, req->info, ivsize, DMA_TO_DEVICE);
1658         if (!src_nents && iv_dma + ivsize == sg_dma_address(req->src))
1659                 iv_contig = true;
1660         else
1661                 src_nents = src_nents ? : 1;
1662         sec4_sg_bytes = ((iv_contig ? 0 : 1) + src_nents + dst_nents) *
1663                         sizeof(struct sec4_sg_entry);
1664
1665         /* allocate space for base edesc and hw desc commands, link tables */
1666         edesc = kmalloc(sizeof(struct ablkcipher_edesc) + desc_bytes +
1667                         sec4_sg_bytes, GFP_DMA | flags);
1668         if (!edesc) {
1669                 dev_err(jrdev, "could not allocate extended descriptor\n");
1670                 return ERR_PTR(-ENOMEM);
1671         }
1672
1673         edesc->src_nents = src_nents;
1674         edesc->src_chained = src_chained;
1675         edesc->dst_nents = dst_nents;
1676         edesc->dst_chained = dst_chained;
1677         edesc->sec4_sg_bytes = sec4_sg_bytes;
1678         edesc->sec4_sg = (void *)edesc + sizeof(struct ablkcipher_edesc) +
1679                          desc_bytes;
1680
1681         sec4_sg_index = 0;
1682         if (!iv_contig) {
1683                 dma_to_sec4_sg_one(edesc->sec4_sg, iv_dma, ivsize, 0);
1684                 sg_to_sec4_sg_last(req->src, src_nents,
1685                                    edesc->sec4_sg + 1, 0);
1686                 sec4_sg_index += 1 + src_nents;
1687         }
1688
1689         if (dst_nents) {
1690                 sg_to_sec4_sg_last(req->dst, dst_nents,
1691                         edesc->sec4_sg + sec4_sg_index, 0);
1692         }
1693
1694         edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1695                                             sec4_sg_bytes, DMA_TO_DEVICE);
1696         edesc->iv_dma = iv_dma;
1697
1698 #ifdef DEBUG
1699         print_hex_dump(KERN_ERR, "ablkcipher sec4_sg@"__stringify(__LINE__)": ",
1700                        DUMP_PREFIX_ADDRESS, 16, 4, edesc->sec4_sg,
1701                        sec4_sg_bytes, 1);
1702 #endif
1703
1704         *iv_contig_out = iv_contig;
1705         return edesc;
1706 }
1707
1708 static int ablkcipher_encrypt(struct ablkcipher_request *req)
1709 {
1710         struct ablkcipher_edesc *edesc;
1711         struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1712         struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
1713         struct device *jrdev = ctx->jrdev;
1714         bool iv_contig;
1715         u32 *desc;
1716         int ret = 0;
1717
1718         /* allocate extended descriptor */
1719         edesc = ablkcipher_edesc_alloc(req, DESC_JOB_IO_LEN *
1720                                        CAAM_CMD_SZ, &iv_contig);
1721         if (IS_ERR(edesc))
1722                 return PTR_ERR(edesc);
1723
1724         /* Create and submit job descriptor*/
1725         init_ablkcipher_job(ctx->sh_desc_enc,
1726                 ctx->sh_desc_enc_dma, edesc, req, iv_contig);
1727 #ifdef DEBUG
1728         print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"__stringify(__LINE__)": ",
1729                        DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
1730                        desc_bytes(edesc->hw_desc), 1);
1731 #endif
1732         desc = edesc->hw_desc;
1733         ret = caam_jr_enqueue(jrdev, desc, ablkcipher_encrypt_done, req);
1734
1735         if (!ret) {
1736                 ret = -EINPROGRESS;
1737         } else {
1738                 ablkcipher_unmap(jrdev, edesc, req);
1739                 kfree(edesc);
1740         }
1741
1742         return ret;
1743 }
1744
1745 static int ablkcipher_decrypt(struct ablkcipher_request *req)
1746 {
1747         struct ablkcipher_edesc *edesc;
1748         struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1749         struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
1750         struct device *jrdev = ctx->jrdev;
1751         bool iv_contig;
1752         u32 *desc;
1753         int ret = 0;
1754
1755         /* allocate extended descriptor */
1756         edesc = ablkcipher_edesc_alloc(req, DESC_JOB_IO_LEN *
1757                                        CAAM_CMD_SZ, &iv_contig);
1758         if (IS_ERR(edesc))
1759                 return PTR_ERR(edesc);
1760
1761         /* Create and submit job descriptor*/
1762         init_ablkcipher_job(ctx->sh_desc_dec,
1763                 ctx->sh_desc_dec_dma, edesc, req, iv_contig);
1764         desc = edesc->hw_desc;
1765 #ifdef DEBUG
1766         print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"__stringify(__LINE__)": ",
1767                        DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
1768                        desc_bytes(edesc->hw_desc), 1);
1769 #endif
1770
1771         ret = caam_jr_enqueue(jrdev, desc, ablkcipher_decrypt_done, req);
1772         if (!ret) {
1773                 ret = -EINPROGRESS;
1774         } else {
1775                 ablkcipher_unmap(jrdev, edesc, req);
1776                 kfree(edesc);
1777         }
1778
1779         return ret;
1780 }
1781
1782 #define template_aead           template_u.aead
1783 #define template_ablkcipher     template_u.ablkcipher
1784 struct caam_alg_template {
1785         char name[CRYPTO_MAX_ALG_NAME];
1786         char driver_name[CRYPTO_MAX_ALG_NAME];
1787         unsigned int blocksize;
1788         u32 type;
1789         union {
1790                 struct ablkcipher_alg ablkcipher;
1791                 struct aead_alg aead;
1792                 struct blkcipher_alg blkcipher;
1793                 struct cipher_alg cipher;
1794                 struct compress_alg compress;
1795                 struct rng_alg rng;
1796         } template_u;
1797         u32 class1_alg_type;
1798         u32 class2_alg_type;
1799         u32 alg_op;
1800 };
1801
1802 static struct caam_alg_template driver_algs[] = {
1803         /* single-pass ipsec_esp descriptor */
1804         {
1805                 .name = "authenc(hmac(md5),ecb(cipher_null))",
1806                 .driver_name = "authenc-hmac-md5-ecb-cipher_null-caam",
1807                 .blocksize = NULL_BLOCK_SIZE,
1808                 .type = CRYPTO_ALG_TYPE_AEAD,
1809                 .template_aead = {
1810                         .setkey = aead_setkey,
1811                         .setauthsize = aead_setauthsize,
1812                         .encrypt = aead_encrypt,
1813                         .decrypt = aead_decrypt,
1814                         .givencrypt = aead_null_givencrypt,
1815                         .geniv = "<built-in>",
1816                         .ivsize = NULL_IV_SIZE,
1817                         .maxauthsize = MD5_DIGEST_SIZE,
1818                         },
1819                 .class1_alg_type = 0,
1820                 .class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP,
1821                 .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
1822         },
1823         {
1824                 .name = "authenc(hmac(sha1),ecb(cipher_null))",
1825                 .driver_name = "authenc-hmac-sha1-ecb-cipher_null-caam",
1826                 .blocksize = NULL_BLOCK_SIZE,
1827                 .type = CRYPTO_ALG_TYPE_AEAD,
1828                 .template_aead = {
1829                         .setkey = aead_setkey,
1830                         .setauthsize = aead_setauthsize,
1831                         .encrypt = aead_encrypt,
1832                         .decrypt = aead_decrypt,
1833                         .givencrypt = aead_null_givencrypt,
1834                         .geniv = "<built-in>",
1835                         .ivsize = NULL_IV_SIZE,
1836                         .maxauthsize = SHA1_DIGEST_SIZE,
1837                         },
1838                 .class1_alg_type = 0,
1839                 .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP,
1840                 .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
1841         },
1842         {
1843                 .name = "authenc(hmac(sha224),ecb(cipher_null))",
1844                 .driver_name = "authenc-hmac-sha224-ecb-cipher_null-caam",
1845                 .blocksize = NULL_BLOCK_SIZE,
1846                 .type = CRYPTO_ALG_TYPE_AEAD,
1847                 .template_aead = {
1848                         .setkey = aead_setkey,
1849                         .setauthsize = aead_setauthsize,
1850                         .encrypt = aead_encrypt,
1851                         .decrypt = aead_decrypt,
1852                         .givencrypt = aead_null_givencrypt,
1853                         .geniv = "<built-in>",
1854                         .ivsize = NULL_IV_SIZE,
1855                         .maxauthsize = SHA224_DIGEST_SIZE,
1856                         },
1857                 .class1_alg_type = 0,
1858                 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
1859                                    OP_ALG_AAI_HMAC_PRECOMP,
1860                 .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
1861         },
1862         {
1863                 .name = "authenc(hmac(sha256),ecb(cipher_null))",
1864                 .driver_name = "authenc-hmac-sha256-ecb-cipher_null-caam",
1865                 .blocksize = NULL_BLOCK_SIZE,
1866                 .type = CRYPTO_ALG_TYPE_AEAD,
1867                 .template_aead = {
1868                         .setkey = aead_setkey,
1869                         .setauthsize = aead_setauthsize,
1870                         .encrypt = aead_encrypt,
1871                         .decrypt = aead_decrypt,
1872                         .givencrypt = aead_null_givencrypt,
1873                         .geniv = "<built-in>",
1874                         .ivsize = NULL_IV_SIZE,
1875                         .maxauthsize = SHA256_DIGEST_SIZE,
1876                         },
1877                 .class1_alg_type = 0,
1878                 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
1879                                    OP_ALG_AAI_HMAC_PRECOMP,
1880                 .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
1881         },
1882         {
1883                 .name = "authenc(hmac(sha384),ecb(cipher_null))",
1884                 .driver_name = "authenc-hmac-sha384-ecb-cipher_null-caam",
1885                 .blocksize = NULL_BLOCK_SIZE,
1886                 .type = CRYPTO_ALG_TYPE_AEAD,
1887                 .template_aead = {
1888                         .setkey = aead_setkey,
1889                         .setauthsize = aead_setauthsize,
1890                         .encrypt = aead_encrypt,
1891                         .decrypt = aead_decrypt,
1892                         .givencrypt = aead_null_givencrypt,
1893                         .geniv = "<built-in>",
1894                         .ivsize = NULL_IV_SIZE,
1895                         .maxauthsize = SHA384_DIGEST_SIZE,
1896                         },
1897                 .class1_alg_type = 0,
1898                 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
1899                                    OP_ALG_AAI_HMAC_PRECOMP,
1900                 .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
1901         },
1902         {
1903                 .name = "authenc(hmac(sha512),ecb(cipher_null))",
1904                 .driver_name = "authenc-hmac-sha512-ecb-cipher_null-caam",
1905                 .blocksize = NULL_BLOCK_SIZE,
1906                 .type = CRYPTO_ALG_TYPE_AEAD,
1907                 .template_aead = {
1908                         .setkey = aead_setkey,
1909                         .setauthsize = aead_setauthsize,
1910                         .encrypt = aead_encrypt,
1911                         .decrypt = aead_decrypt,
1912                         .givencrypt = aead_null_givencrypt,
1913                         .geniv = "<built-in>",
1914                         .ivsize = NULL_IV_SIZE,
1915                         .maxauthsize = SHA512_DIGEST_SIZE,
1916                         },
1917                 .class1_alg_type = 0,
1918                 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
1919                                    OP_ALG_AAI_HMAC_PRECOMP,
1920                 .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
1921         },
1922         {
1923                 .name = "authenc(hmac(md5),cbc(aes))",
1924                 .driver_name = "authenc-hmac-md5-cbc-aes-caam",
1925                 .blocksize = AES_BLOCK_SIZE,
1926                 .type = CRYPTO_ALG_TYPE_AEAD,
1927                 .template_aead = {
1928                         .setkey = aead_setkey,
1929                         .setauthsize = aead_setauthsize,
1930                         .encrypt = aead_encrypt,
1931                         .decrypt = aead_decrypt,
1932                         .givencrypt = aead_givencrypt,
1933                         .geniv = "<built-in>",
1934                         .ivsize = AES_BLOCK_SIZE,
1935                         .maxauthsize = MD5_DIGEST_SIZE,
1936                         },
1937                 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1938                 .class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP,
1939                 .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
1940         },
1941         {
1942                 .name = "authenc(hmac(sha1),cbc(aes))",
1943                 .driver_name = "authenc-hmac-sha1-cbc-aes-caam",
1944                 .blocksize = AES_BLOCK_SIZE,
1945                 .type = CRYPTO_ALG_TYPE_AEAD,
1946                 .template_aead = {
1947                         .setkey = aead_setkey,
1948                         .setauthsize = aead_setauthsize,
1949                         .encrypt = aead_encrypt,
1950                         .decrypt = aead_decrypt,
1951                         .givencrypt = aead_givencrypt,
1952                         .geniv = "<built-in>",
1953                         .ivsize = AES_BLOCK_SIZE,
1954                         .maxauthsize = SHA1_DIGEST_SIZE,
1955                         },
1956                 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1957                 .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP,
1958                 .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
1959         },
1960         {
1961                 .name = "authenc(hmac(sha224),cbc(aes))",
1962                 .driver_name = "authenc-hmac-sha224-cbc-aes-caam",
1963                 .blocksize = AES_BLOCK_SIZE,
1964                 .type = CRYPTO_ALG_TYPE_AEAD,
1965                 .template_aead = {
1966                         .setkey = aead_setkey,
1967                         .setauthsize = aead_setauthsize,
1968                         .encrypt = aead_encrypt,
1969                         .decrypt = aead_decrypt,
1970                         .givencrypt = aead_givencrypt,
1971                         .geniv = "<built-in>",
1972                         .ivsize = AES_BLOCK_SIZE,
1973                         .maxauthsize = SHA224_DIGEST_SIZE,
1974                         },
1975                 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1976                 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
1977                                    OP_ALG_AAI_HMAC_PRECOMP,
1978                 .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
1979         },
1980         {
1981                 .name = "authenc(hmac(sha256),cbc(aes))",
1982                 .driver_name = "authenc-hmac-sha256-cbc-aes-caam",
1983                 .blocksize = AES_BLOCK_SIZE,
1984                 .type = CRYPTO_ALG_TYPE_AEAD,
1985                 .template_aead = {
1986                         .setkey = aead_setkey,
1987                         .setauthsize = aead_setauthsize,
1988                         .encrypt = aead_encrypt,
1989                         .decrypt = aead_decrypt,
1990                         .givencrypt = aead_givencrypt,
1991                         .geniv = "<built-in>",
1992                         .ivsize = AES_BLOCK_SIZE,
1993                         .maxauthsize = SHA256_DIGEST_SIZE,
1994                         },
1995                 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1996                 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
1997                                    OP_ALG_AAI_HMAC_PRECOMP,
1998                 .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
1999         },
2000         {
2001                 .name = "authenc(hmac(sha384),cbc(aes))",
2002                 .driver_name = "authenc-hmac-sha384-cbc-aes-caam",
2003                 .blocksize = AES_BLOCK_SIZE,
2004                 .type = CRYPTO_ALG_TYPE_AEAD,
2005                 .template_aead = {
2006                         .setkey = aead_setkey,
2007                         .setauthsize = aead_setauthsize,
2008                         .encrypt = aead_encrypt,
2009                         .decrypt = aead_decrypt,
2010                         .givencrypt = aead_givencrypt,
2011                         .geniv = "<built-in>",
2012                         .ivsize = AES_BLOCK_SIZE,
2013                         .maxauthsize = SHA384_DIGEST_SIZE,
2014                         },
2015                 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2016                 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2017                                    OP_ALG_AAI_HMAC_PRECOMP,
2018                 .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
2019         },
2020
2021         {
2022                 .name = "authenc(hmac(sha512),cbc(aes))",
2023                 .driver_name = "authenc-hmac-sha512-cbc-aes-caam",
2024                 .blocksize = AES_BLOCK_SIZE,
2025                 .type = CRYPTO_ALG_TYPE_AEAD,
2026                 .template_aead = {
2027                         .setkey = aead_setkey,
2028                         .setauthsize = aead_setauthsize,
2029                         .encrypt = aead_encrypt,
2030                         .decrypt = aead_decrypt,
2031                         .givencrypt = aead_givencrypt,
2032                         .geniv = "<built-in>",
2033                         .ivsize = AES_BLOCK_SIZE,
2034                         .maxauthsize = SHA512_DIGEST_SIZE,
2035                         },
2036                 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2037                 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2038                                    OP_ALG_AAI_HMAC_PRECOMP,
2039                 .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
2040         },
2041         {
2042                 .name = "authenc(hmac(md5),cbc(des3_ede))",
2043                 .driver_name = "authenc-hmac-md5-cbc-des3_ede-caam",
2044                 .blocksize = DES3_EDE_BLOCK_SIZE,
2045                 .type = CRYPTO_ALG_TYPE_AEAD,
2046                 .template_aead = {
2047                         .setkey = aead_setkey,
2048                         .setauthsize = aead_setauthsize,
2049                         .encrypt = aead_encrypt,
2050                         .decrypt = aead_decrypt,
2051                         .givencrypt = aead_givencrypt,
2052                         .geniv = "<built-in>",
2053                         .ivsize = DES3_EDE_BLOCK_SIZE,
2054                         .maxauthsize = MD5_DIGEST_SIZE,
2055                         },
2056                 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2057                 .class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP,
2058                 .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
2059         },
2060         {
2061                 .name = "authenc(hmac(sha1),cbc(des3_ede))",
2062                 .driver_name = "authenc-hmac-sha1-cbc-des3_ede-caam",
2063                 .blocksize = DES3_EDE_BLOCK_SIZE,
2064                 .type = CRYPTO_ALG_TYPE_AEAD,
2065                 .template_aead = {
2066                         .setkey = aead_setkey,
2067                         .setauthsize = aead_setauthsize,
2068                         .encrypt = aead_encrypt,
2069                         .decrypt = aead_decrypt,
2070                         .givencrypt = aead_givencrypt,
2071                         .geniv = "<built-in>",
2072                         .ivsize = DES3_EDE_BLOCK_SIZE,
2073                         .maxauthsize = SHA1_DIGEST_SIZE,
2074                         },
2075                 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2076                 .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP,
2077                 .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
2078         },
2079         {
2080                 .name = "authenc(hmac(sha224),cbc(des3_ede))",
2081                 .driver_name = "authenc-hmac-sha224-cbc-des3_ede-caam",
2082                 .blocksize = DES3_EDE_BLOCK_SIZE,
2083                 .type = CRYPTO_ALG_TYPE_AEAD,
2084                 .template_aead = {
2085                         .setkey = aead_setkey,
2086                         .setauthsize = aead_setauthsize,
2087                         .encrypt = aead_encrypt,
2088                         .decrypt = aead_decrypt,
2089                         .givencrypt = aead_givencrypt,
2090                         .geniv = "<built-in>",
2091                         .ivsize = DES3_EDE_BLOCK_SIZE,
2092                         .maxauthsize = SHA224_DIGEST_SIZE,
2093                         },
2094                 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2095                 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2096                                    OP_ALG_AAI_HMAC_PRECOMP,
2097                 .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
2098         },
2099         {
2100                 .name = "authenc(hmac(sha256),cbc(des3_ede))",
2101                 .driver_name = "authenc-hmac-sha256-cbc-des3_ede-caam",
2102                 .blocksize = DES3_EDE_BLOCK_SIZE,
2103                 .type = CRYPTO_ALG_TYPE_AEAD,
2104                 .template_aead = {
2105                         .setkey = aead_setkey,
2106                         .setauthsize = aead_setauthsize,
2107                         .encrypt = aead_encrypt,
2108                         .decrypt = aead_decrypt,
2109                         .givencrypt = aead_givencrypt,
2110                         .geniv = "<built-in>",
2111                         .ivsize = DES3_EDE_BLOCK_SIZE,
2112                         .maxauthsize = SHA256_DIGEST_SIZE,
2113                         },
2114                 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2115                 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2116                                    OP_ALG_AAI_HMAC_PRECOMP,
2117                 .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
2118         },
2119         {
2120                 .name = "authenc(hmac(sha384),cbc(des3_ede))",
2121                 .driver_name = "authenc-hmac-sha384-cbc-des3_ede-caam",
2122                 .blocksize = DES3_EDE_BLOCK_SIZE,
2123                 .type = CRYPTO_ALG_TYPE_AEAD,
2124                 .template_aead = {
2125                         .setkey = aead_setkey,
2126                         .setauthsize = aead_setauthsize,
2127                         .encrypt = aead_encrypt,
2128                         .decrypt = aead_decrypt,
2129                         .givencrypt = aead_givencrypt,
2130                         .geniv = "<built-in>",
2131                         .ivsize = DES3_EDE_BLOCK_SIZE,
2132                         .maxauthsize = SHA384_DIGEST_SIZE,
2133                         },
2134                 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2135                 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2136                                    OP_ALG_AAI_HMAC_PRECOMP,
2137                 .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
2138         },
2139         {
2140                 .name = "authenc(hmac(sha512),cbc(des3_ede))",
2141                 .driver_name = "authenc-hmac-sha512-cbc-des3_ede-caam",
2142                 .blocksize = DES3_EDE_BLOCK_SIZE,
2143                 .type = CRYPTO_ALG_TYPE_AEAD,
2144                 .template_aead = {
2145                         .setkey = aead_setkey,
2146                         .setauthsize = aead_setauthsize,
2147                         .encrypt = aead_encrypt,
2148                         .decrypt = aead_decrypt,
2149                         .givencrypt = aead_givencrypt,
2150                         .geniv = "<built-in>",
2151                         .ivsize = DES3_EDE_BLOCK_SIZE,
2152                         .maxauthsize = SHA512_DIGEST_SIZE,
2153                         },
2154                 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2155                 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2156                                    OP_ALG_AAI_HMAC_PRECOMP,
2157                 .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
2158         },
2159         {
2160                 .name = "authenc(hmac(md5),cbc(des))",
2161                 .driver_name = "authenc-hmac-md5-cbc-des-caam",
2162                 .blocksize = DES_BLOCK_SIZE,
2163                 .type = CRYPTO_ALG_TYPE_AEAD,
2164                 .template_aead = {
2165                         .setkey = aead_setkey,
2166                         .setauthsize = aead_setauthsize,
2167                         .encrypt = aead_encrypt,
2168                         .decrypt = aead_decrypt,
2169                         .givencrypt = aead_givencrypt,
2170                         .geniv = "<built-in>",
2171                         .ivsize = DES_BLOCK_SIZE,
2172                         .maxauthsize = MD5_DIGEST_SIZE,
2173                         },
2174                 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2175                 .class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP,
2176                 .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
2177         },
2178         {
2179                 .name = "authenc(hmac(sha1),cbc(des))",
2180                 .driver_name = "authenc-hmac-sha1-cbc-des-caam",
2181                 .blocksize = DES_BLOCK_SIZE,
2182                 .type = CRYPTO_ALG_TYPE_AEAD,
2183                 .template_aead = {
2184                         .setkey = aead_setkey,
2185                         .setauthsize = aead_setauthsize,
2186                         .encrypt = aead_encrypt,
2187                         .decrypt = aead_decrypt,
2188                         .givencrypt = aead_givencrypt,
2189                         .geniv = "<built-in>",
2190                         .ivsize = DES_BLOCK_SIZE,
2191                         .maxauthsize = SHA1_DIGEST_SIZE,
2192                         },
2193                 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2194                 .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP,
2195                 .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
2196         },
2197         {
2198                 .name = "authenc(hmac(sha224),cbc(des))",
2199                 .driver_name = "authenc-hmac-sha224-cbc-des-caam",
2200                 .blocksize = DES_BLOCK_SIZE,
2201                 .type = CRYPTO_ALG_TYPE_AEAD,
2202                 .template_aead = {
2203                         .setkey = aead_setkey,
2204                         .setauthsize = aead_setauthsize,
2205                         .encrypt = aead_encrypt,
2206                         .decrypt = aead_decrypt,
2207                         .givencrypt = aead_givencrypt,
2208                         .geniv = "<built-in>",
2209                         .ivsize = DES_BLOCK_SIZE,
2210                         .maxauthsize = SHA224_DIGEST_SIZE,
2211                         },
2212                 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2213                 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2214                                    OP_ALG_AAI_HMAC_PRECOMP,
2215                 .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
2216         },
2217         {
2218                 .name = "authenc(hmac(sha256),cbc(des))",
2219                 .driver_name = "authenc-hmac-sha256-cbc-des-caam",
2220                 .blocksize = DES_BLOCK_SIZE,
2221                 .type = CRYPTO_ALG_TYPE_AEAD,
2222                 .template_aead = {
2223                         .setkey = aead_setkey,
2224                         .setauthsize = aead_setauthsize,
2225                         .encrypt = aead_encrypt,
2226                         .decrypt = aead_decrypt,
2227                         .givencrypt = aead_givencrypt,
2228                         .geniv = "<built-in>",
2229                         .ivsize = DES_BLOCK_SIZE,
2230                         .maxauthsize = SHA256_DIGEST_SIZE,
2231                         },
2232                 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2233                 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2234                                    OP_ALG_AAI_HMAC_PRECOMP,
2235                 .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
2236         },
2237         {
2238                 .name = "authenc(hmac(sha384),cbc(des))",
2239                 .driver_name = "authenc-hmac-sha384-cbc-des-caam",
2240                 .blocksize = DES_BLOCK_SIZE,
2241                 .type = CRYPTO_ALG_TYPE_AEAD,
2242                 .template_aead = {
2243                         .setkey = aead_setkey,
2244                         .setauthsize = aead_setauthsize,
2245                         .encrypt = aead_encrypt,
2246                         .decrypt = aead_decrypt,
2247                         .givencrypt = aead_givencrypt,
2248                         .geniv = "<built-in>",
2249                         .ivsize = DES_BLOCK_SIZE,
2250                         .maxauthsize = SHA384_DIGEST_SIZE,
2251                         },
2252                 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2253                 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2254                                    OP_ALG_AAI_HMAC_PRECOMP,
2255                 .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
2256         },
2257         {
2258                 .name = "authenc(hmac(sha512),cbc(des))",
2259                 .driver_name = "authenc-hmac-sha512-cbc-des-caam",
2260                 .blocksize = DES_BLOCK_SIZE,
2261                 .type = CRYPTO_ALG_TYPE_AEAD,
2262                 .template_aead = {
2263                         .setkey = aead_setkey,
2264                         .setauthsize = aead_setauthsize,
2265                         .encrypt = aead_encrypt,
2266                         .decrypt = aead_decrypt,
2267                         .givencrypt = aead_givencrypt,
2268                         .geniv = "<built-in>",
2269                         .ivsize = DES_BLOCK_SIZE,
2270                         .maxauthsize = SHA512_DIGEST_SIZE,
2271                         },
2272                 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2273                 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2274                                    OP_ALG_AAI_HMAC_PRECOMP,
2275                 .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
2276         },
2277         /* ablkcipher descriptor */
2278         {
2279                 .name = "cbc(aes)",
2280                 .driver_name = "cbc-aes-caam",
2281                 .blocksize = AES_BLOCK_SIZE,
2282                 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2283                 .template_ablkcipher = {
2284                         .setkey = ablkcipher_setkey,
2285                         .encrypt = ablkcipher_encrypt,
2286                         .decrypt = ablkcipher_decrypt,
2287                         .geniv = "eseqiv",
2288                         .min_keysize = AES_MIN_KEY_SIZE,
2289                         .max_keysize = AES_MAX_KEY_SIZE,
2290                         .ivsize = AES_BLOCK_SIZE,
2291                         },
2292                 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2293         },
2294         {
2295                 .name = "cbc(des3_ede)",
2296                 .driver_name = "cbc-3des-caam",
2297                 .blocksize = DES3_EDE_BLOCK_SIZE,
2298                 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2299                 .template_ablkcipher = {
2300                         .setkey = ablkcipher_setkey,
2301                         .encrypt = ablkcipher_encrypt,
2302                         .decrypt = ablkcipher_decrypt,
2303                         .geniv = "eseqiv",
2304                         .min_keysize = DES3_EDE_KEY_SIZE,
2305                         .max_keysize = DES3_EDE_KEY_SIZE,
2306                         .ivsize = DES3_EDE_BLOCK_SIZE,
2307                         },
2308                 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2309         },
2310         {
2311                 .name = "cbc(des)",
2312                 .driver_name = "cbc-des-caam",
2313                 .blocksize = DES_BLOCK_SIZE,
2314                 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2315                 .template_ablkcipher = {
2316                         .setkey = ablkcipher_setkey,
2317                         .encrypt = ablkcipher_encrypt,
2318                         .decrypt = ablkcipher_decrypt,
2319                         .geniv = "eseqiv",
2320                         .min_keysize = DES_KEY_SIZE,
2321                         .max_keysize = DES_KEY_SIZE,
2322                         .ivsize = DES_BLOCK_SIZE,
2323                         },
2324                 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2325         }
2326 };
2327
2328 struct caam_crypto_alg {
2329         struct list_head entry;
2330         int class1_alg_type;
2331         int class2_alg_type;
2332         int alg_op;
2333         struct crypto_alg crypto_alg;
2334 };
2335
2336 static int caam_cra_init(struct crypto_tfm *tfm)
2337 {
2338         struct crypto_alg *alg = tfm->__crt_alg;
2339         struct caam_crypto_alg *caam_alg =
2340                  container_of(alg, struct caam_crypto_alg, crypto_alg);
2341         struct caam_ctx *ctx = crypto_tfm_ctx(tfm);
2342
2343         ctx->jrdev = caam_jr_alloc();
2344         if (IS_ERR(ctx->jrdev)) {
2345                 pr_err("Job Ring Device allocation for transform failed\n");
2346                 return PTR_ERR(ctx->jrdev);
2347         }
2348
2349         /* copy descriptor header template value */
2350         ctx->class1_alg_type = OP_TYPE_CLASS1_ALG | caam_alg->class1_alg_type;
2351         ctx->class2_alg_type = OP_TYPE_CLASS2_ALG | caam_alg->class2_alg_type;
2352         ctx->alg_op = OP_TYPE_CLASS2_ALG | caam_alg->alg_op;
2353
2354         return 0;
2355 }
2356
2357 static void caam_cra_exit(struct crypto_tfm *tfm)
2358 {
2359         struct caam_ctx *ctx = crypto_tfm_ctx(tfm);
2360
2361         if (ctx->sh_desc_enc_dma &&
2362             !dma_mapping_error(ctx->jrdev, ctx->sh_desc_enc_dma))
2363                 dma_unmap_single(ctx->jrdev, ctx->sh_desc_enc_dma,
2364                                  desc_bytes(ctx->sh_desc_enc), DMA_TO_DEVICE);
2365         if (ctx->sh_desc_dec_dma &&
2366             !dma_mapping_error(ctx->jrdev, ctx->sh_desc_dec_dma))
2367                 dma_unmap_single(ctx->jrdev, ctx->sh_desc_dec_dma,
2368                                  desc_bytes(ctx->sh_desc_dec), DMA_TO_DEVICE);
2369         if (ctx->sh_desc_givenc_dma &&
2370             !dma_mapping_error(ctx->jrdev, ctx->sh_desc_givenc_dma))
2371                 dma_unmap_single(ctx->jrdev, ctx->sh_desc_givenc_dma,
2372                                  desc_bytes(ctx->sh_desc_givenc),
2373                                  DMA_TO_DEVICE);
2374         if (ctx->key_dma &&
2375             !dma_mapping_error(ctx->jrdev, ctx->key_dma))
2376                 dma_unmap_single(ctx->jrdev, ctx->key_dma,
2377                                  ctx->enckeylen + ctx->split_key_pad_len,
2378                                  DMA_TO_DEVICE);
2379
2380         caam_jr_free(ctx->jrdev);
2381 }
2382
2383 static void __exit caam_algapi_exit(void)
2384 {
2385
2386         struct caam_crypto_alg *t_alg, *n;
2387
2388         if (!alg_list.next)
2389                 return;
2390
2391         list_for_each_entry_safe(t_alg, n, &alg_list, entry) {
2392                 crypto_unregister_alg(&t_alg->crypto_alg);
2393                 list_del(&t_alg->entry);
2394                 kfree(t_alg);
2395         }
2396 }
2397
2398 static struct caam_crypto_alg *caam_alg_alloc(struct caam_alg_template
2399                                               *template)
2400 {
2401         struct caam_crypto_alg *t_alg;
2402         struct crypto_alg *alg;
2403
2404         t_alg = kzalloc(sizeof(struct caam_crypto_alg), GFP_KERNEL);
2405         if (!t_alg) {
2406                 pr_err("failed to allocate t_alg\n");
2407                 return ERR_PTR(-ENOMEM);
2408         }
2409
2410         alg = &t_alg->crypto_alg;
2411
2412         snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", template->name);
2413         snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
2414                  template->driver_name);
2415         alg->cra_module = THIS_MODULE;
2416         alg->cra_init = caam_cra_init;
2417         alg->cra_exit = caam_cra_exit;
2418         alg->cra_priority = CAAM_CRA_PRIORITY;
2419         alg->cra_blocksize = template->blocksize;
2420         alg->cra_alignmask = 0;
2421         alg->cra_ctxsize = sizeof(struct caam_ctx);
2422         alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY |
2423                          template->type;
2424         switch (template->type) {
2425         case CRYPTO_ALG_TYPE_ABLKCIPHER:
2426                 alg->cra_type = &crypto_ablkcipher_type;
2427                 alg->cra_ablkcipher = template->template_ablkcipher;
2428                 break;
2429         case CRYPTO_ALG_TYPE_AEAD:
2430                 alg->cra_type = &crypto_aead_type;
2431                 alg->cra_aead = template->template_aead;
2432                 break;
2433         }
2434
2435         t_alg->class1_alg_type = template->class1_alg_type;
2436         t_alg->class2_alg_type = template->class2_alg_type;
2437         t_alg->alg_op = template->alg_op;
2438
2439         return t_alg;
2440 }
2441
2442 static int __init caam_algapi_init(void)
2443 {
2444         int i = 0, err = 0;
2445
2446         INIT_LIST_HEAD(&alg_list);
2447
2448         /* register crypto algorithms the device supports */
2449         for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
2450                 /* TODO: check if h/w supports alg */
2451                 struct caam_crypto_alg *t_alg;
2452
2453                 t_alg = caam_alg_alloc(&driver_algs[i]);
2454                 if (IS_ERR(t_alg)) {
2455                         err = PTR_ERR(t_alg);
2456                         pr_warn("%s alg allocation failed\n",
2457                                 driver_algs[i].driver_name);
2458                         continue;
2459                 }
2460
2461                 err = crypto_register_alg(&t_alg->crypto_alg);
2462                 if (err) {
2463                         pr_warn("%s alg registration failed\n",
2464                                 t_alg->crypto_alg.cra_driver_name);
2465                         kfree(t_alg);
2466                 } else
2467                         list_add_tail(&t_alg->entry, &alg_list);
2468         }
2469         if (!list_empty(&alg_list))
2470                 pr_info("caam algorithms registered in /proc/crypto\n");
2471
2472         return err;
2473 }
2474
2475 module_init(caam_algapi_init);
2476 module_exit(caam_algapi_exit);
2477
2478 MODULE_LICENSE("GPL");
2479 MODULE_DESCRIPTION("FSL CAAM support for crypto API");
2480 MODULE_AUTHOR("Freescale Semiconductor - NMG/STC");