daca933a82ec9ea1c918a868516e859ffcdbca98
[firefly-linux-kernel-4.4.55.git] / drivers / crypto / caam / caamalg.c
1 /*
2  * caam - Freescale FSL CAAM support for crypto API
3  *
4  * Copyright 2008-2011 Freescale Semiconductor, Inc.
5  *
6  * Based on talitos crypto API driver.
7  *
8  * relationship of job descriptors to shared descriptors (SteveC Dec 10 2008):
9  *
10  * ---------------                     ---------------
11  * | JobDesc #1  |-------------------->|  ShareDesc  |
12  * | *(packet 1) |                     |   (PDB)     |
13  * ---------------      |------------->|  (hashKey)  |
14  *       .              |              | (cipherKey) |
15  *       .              |    |-------->| (operation) |
16  * ---------------      |    |         ---------------
17  * | JobDesc #2  |------|    |
18  * | *(packet 2) |           |
19  * ---------------           |
20  *       .                   |
21  *       .                   |
22  * ---------------           |
23  * | JobDesc #3  |------------
24  * | *(packet 3) |
25  * ---------------
26  *
27  * The SharedDesc never changes for a connection unless rekeyed, but
28  * each packet will likely be in a different place. So all we need
29  * to know to process the packet is where the input is, where the
30  * output goes, and what context we want to process with. Context is
31  * in the SharedDesc, packet references in the JobDesc.
32  *
33  * So, a job desc looks like:
34  *
35  * ---------------------
36  * | Header            |
37  * | ShareDesc Pointer |
38  * | SEQ_OUT_PTR       |
39  * | (output buffer)   |
40  * | (output length)   |
41  * | SEQ_IN_PTR        |
42  * | (input buffer)    |
43  * | (input length)    |
44  * ---------------------
45  */
46
47 #include "compat.h"
48
49 #include "regs.h"
50 #include "intern.h"
51 #include "desc_constr.h"
52 #include "jr.h"
53 #include "error.h"
54 #include "sg_sw_sec4.h"
55 #include "key_gen.h"
56
57 /*
58  * crypto alg
59  */
60 #define CAAM_CRA_PRIORITY               3000
61 /* max key is sum of AES_MAX_KEY_SIZE, max split key size */
62 #define CAAM_MAX_KEY_SIZE               (AES_MAX_KEY_SIZE + \
63                                          CTR_RFC3686_NONCE_SIZE + \
64                                          SHA512_DIGEST_SIZE * 2)
65 /* max IV is max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
66 #define CAAM_MAX_IV_LENGTH              16
67
68 #define AEAD_DESC_JOB_IO_LEN            (DESC_JOB_IO_LEN + CAAM_CMD_SZ * 2)
69 #define GCM_DESC_JOB_IO_LEN             (AEAD_DESC_JOB_IO_LEN + \
70                                          CAAM_CMD_SZ * 4)
71
72 /* length of descriptors text */
73 #define DESC_AEAD_BASE                  (4 * CAAM_CMD_SZ)
74 #define DESC_AEAD_ENC_LEN               (DESC_AEAD_BASE + 15 * CAAM_CMD_SZ)
75 #define DESC_AEAD_DEC_LEN               (DESC_AEAD_BASE + 18 * CAAM_CMD_SZ)
76 #define DESC_AEAD_GIVENC_LEN            (DESC_AEAD_ENC_LEN + 7 * CAAM_CMD_SZ)
77
78 /* Note: Nonce is counted in enckeylen */
79 #define DESC_AEAD_CTR_RFC3686_LEN       (6 * CAAM_CMD_SZ)
80
81 #define DESC_AEAD_NULL_BASE             (3 * CAAM_CMD_SZ)
82 #define DESC_AEAD_NULL_ENC_LEN          (DESC_AEAD_NULL_BASE + 14 * CAAM_CMD_SZ)
83 #define DESC_AEAD_NULL_DEC_LEN          (DESC_AEAD_NULL_BASE + 17 * CAAM_CMD_SZ)
84
85 #define DESC_GCM_BASE                   (3 * CAAM_CMD_SZ)
86 #define DESC_GCM_ENC_LEN                (DESC_GCM_BASE + 16 * CAAM_CMD_SZ)
87 #define DESC_GCM_DEC_LEN                (DESC_GCM_BASE + 12 * CAAM_CMD_SZ)
88
89 #define DESC_RFC4106_BASE               (3 * CAAM_CMD_SZ)
90 #define DESC_RFC4106_ENC_LEN            (DESC_RFC4106_BASE + 10 * CAAM_CMD_SZ)
91 #define DESC_RFC4106_DEC_LEN            (DESC_RFC4106_BASE + 10 * CAAM_CMD_SZ)
92
93 #define DESC_RFC4543_BASE               (3 * CAAM_CMD_SZ)
94 #define DESC_RFC4543_ENC_LEN            (DESC_RFC4543_BASE + 11 * CAAM_CMD_SZ)
95 #define DESC_RFC4543_DEC_LEN            (DESC_RFC4543_BASE + 12 * CAAM_CMD_SZ)
96
97 #define DESC_ABLKCIPHER_BASE            (3 * CAAM_CMD_SZ)
98 #define DESC_ABLKCIPHER_ENC_LEN         (DESC_ABLKCIPHER_BASE + \
99                                          20 * CAAM_CMD_SZ)
100 #define DESC_ABLKCIPHER_DEC_LEN         (DESC_ABLKCIPHER_BASE + \
101                                          15 * CAAM_CMD_SZ)
102
103 #define DESC_MAX_USED_BYTES             (CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN)
104 #define DESC_MAX_USED_LEN               (DESC_MAX_USED_BYTES / CAAM_CMD_SZ)
105
106 #ifdef DEBUG
107 /* for print_hex_dumps with line references */
108 #define debug(format, arg...) printk(format, arg)
109 #else
110 #define debug(format, arg...)
111 #endif
112 static struct list_head alg_list;
113
114 /* Set DK bit in class 1 operation if shared */
115 static inline void append_dec_op1(u32 *desc, u32 type)
116 {
117         u32 *jump_cmd, *uncond_jump_cmd;
118
119         /* DK bit is valid only for AES */
120         if ((type & OP_ALG_ALGSEL_MASK) != OP_ALG_ALGSEL_AES) {
121                 append_operation(desc, type | OP_ALG_AS_INITFINAL |
122                                  OP_ALG_DECRYPT);
123                 return;
124         }
125
126         jump_cmd = append_jump(desc, JUMP_TEST_ALL | JUMP_COND_SHRD);
127         append_operation(desc, type | OP_ALG_AS_INITFINAL |
128                          OP_ALG_DECRYPT);
129         uncond_jump_cmd = append_jump(desc, JUMP_TEST_ALL);
130         set_jump_tgt_here(desc, jump_cmd);
131         append_operation(desc, type | OP_ALG_AS_INITFINAL |
132                          OP_ALG_DECRYPT | OP_ALG_AAI_DK);
133         set_jump_tgt_here(desc, uncond_jump_cmd);
134 }
135
136 /*
137  * For aead functions, read payload and write payload,
138  * both of which are specified in req->src and req->dst
139  */
140 static inline void aead_append_src_dst(u32 *desc, u32 msg_type)
141 {
142         append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
143         append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH |
144                              KEY_VLF | msg_type | FIFOLD_TYPE_LASTBOTH);
145 }
146
147 /*
148  * For aead encrypt and decrypt, read iv for both classes
149  */
150 static inline void aead_append_ld_iv(u32 *desc, int ivsize, int ivoffset)
151 {
152         append_seq_load(desc, ivsize, LDST_CLASS_1_CCB |
153                         LDST_SRCDST_BYTE_CONTEXT |
154                         (ivoffset << LDST_OFFSET_SHIFT));
155         append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_CLASS2INFIFO |
156                     (ivoffset << MOVE_OFFSET_SHIFT) | ivsize);
157 }
158
159 /*
160  * For ablkcipher encrypt and decrypt, read from req->src and
161  * write to req->dst
162  */
163 static inline void ablkcipher_append_src_dst(u32 *desc)
164 {
165         append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
166         append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
167         append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 |
168                              KEY_VLF | FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
169         append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
170 }
171
172 /*
173  * If all data, including src (with assoc and iv) or dst (with iv only) are
174  * contiguous
175  */
176 #define GIV_SRC_CONTIG          1
177 #define GIV_DST_CONTIG          (1 << 1)
178
179 /*
180  * per-session context
181  */
182 struct caam_ctx {
183         struct device *jrdev;
184         u32 sh_desc_enc[DESC_MAX_USED_LEN];
185         u32 sh_desc_dec[DESC_MAX_USED_LEN];
186         u32 sh_desc_givenc[DESC_MAX_USED_LEN];
187         dma_addr_t sh_desc_enc_dma;
188         dma_addr_t sh_desc_dec_dma;
189         dma_addr_t sh_desc_givenc_dma;
190         u32 class1_alg_type;
191         u32 class2_alg_type;
192         u32 alg_op;
193         u8 key[CAAM_MAX_KEY_SIZE];
194         dma_addr_t key_dma;
195         unsigned int enckeylen;
196         unsigned int split_key_len;
197         unsigned int split_key_pad_len;
198         unsigned int authsize;
199 };
200
201 static void append_key_aead(u32 *desc, struct caam_ctx *ctx,
202                             int keys_fit_inline, bool is_rfc3686)
203 {
204         u32 *nonce;
205         unsigned int enckeylen = ctx->enckeylen;
206
207         /*
208          * RFC3686 specific:
209          *      | ctx->key = {AUTH_KEY, ENC_KEY, NONCE}
210          *      | enckeylen = encryption key size + nonce size
211          */
212         if (is_rfc3686)
213                 enckeylen -= CTR_RFC3686_NONCE_SIZE;
214
215         if (keys_fit_inline) {
216                 append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
217                                   ctx->split_key_len, CLASS_2 |
218                                   KEY_DEST_MDHA_SPLIT | KEY_ENC);
219                 append_key_as_imm(desc, (void *)ctx->key +
220                                   ctx->split_key_pad_len, enckeylen,
221                                   enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
222         } else {
223                 append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 |
224                            KEY_DEST_MDHA_SPLIT | KEY_ENC);
225                 append_key(desc, ctx->key_dma + ctx->split_key_pad_len,
226                            enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
227         }
228
229         /* Load Counter into CONTEXT1 reg */
230         if (is_rfc3686) {
231                 nonce = (u32 *)((void *)ctx->key + ctx->split_key_pad_len +
232                                enckeylen);
233                 append_load_imm_u32(desc, *nonce, LDST_CLASS_IND_CCB |
234                                     LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
235                 append_move(desc,
236                             MOVE_SRC_OUTFIFO |
237                             MOVE_DEST_CLASS1CTX |
238                             (16 << MOVE_OFFSET_SHIFT) |
239                             (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
240         }
241 }
242
243 static void init_sh_desc_key_aead(u32 *desc, struct caam_ctx *ctx,
244                                   int keys_fit_inline, bool is_rfc3686)
245 {
246         u32 *key_jump_cmd;
247
248         /* Note: Context registers are saved. */
249         init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
250
251         /* Skip if already shared */
252         key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
253                                    JUMP_COND_SHRD);
254
255         append_key_aead(desc, ctx, keys_fit_inline, is_rfc3686);
256
257         set_jump_tgt_here(desc, key_jump_cmd);
258 }
259
260 static int aead_null_set_sh_desc(struct crypto_aead *aead)
261 {
262         unsigned int ivsize = crypto_aead_ivsize(aead);
263         struct caam_ctx *ctx = crypto_aead_ctx(aead);
264         struct device *jrdev = ctx->jrdev;
265         bool keys_fit_inline = false;
266         u32 *key_jump_cmd, *jump_cmd, *read_move_cmd, *write_move_cmd;
267         u32 *desc;
268
269         /*
270          * Job Descriptor and Shared Descriptors
271          * must all fit into the 64-word Descriptor h/w Buffer
272          */
273         if (DESC_AEAD_NULL_ENC_LEN + DESC_JOB_IO_LEN +
274             ctx->split_key_pad_len <= CAAM_DESC_BYTES_MAX)
275                 keys_fit_inline = true;
276
277         /* old_aead_encrypt shared descriptor */
278         desc = ctx->sh_desc_enc;
279
280         init_sh_desc(desc, HDR_SHARE_SERIAL);
281
282         /* Skip if already shared */
283         key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
284                                    JUMP_COND_SHRD);
285         if (keys_fit_inline)
286                 append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
287                                   ctx->split_key_len, CLASS_2 |
288                                   KEY_DEST_MDHA_SPLIT | KEY_ENC);
289         else
290                 append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 |
291                            KEY_DEST_MDHA_SPLIT | KEY_ENC);
292         set_jump_tgt_here(desc, key_jump_cmd);
293
294         /* cryptlen = seqoutlen - authsize */
295         append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
296
297         /*
298          * NULL encryption; IV is zero
299          * assoclen = (assoclen + cryptlen) - cryptlen
300          */
301         append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG3, CAAM_CMD_SZ);
302
303         /* read assoc before reading payload */
304         append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
305                              KEY_VLF);
306
307         /* Prepare to read and write cryptlen bytes */
308         append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
309         append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
310
311         /*
312          * MOVE_LEN opcode is not available in all SEC HW revisions,
313          * thus need to do some magic, i.e. self-patch the descriptor
314          * buffer.
315          */
316         read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF |
317                                     MOVE_DEST_MATH3 |
318                                     (0x6 << MOVE_LEN_SHIFT));
319         write_move_cmd = append_move(desc, MOVE_SRC_MATH3 |
320                                      MOVE_DEST_DESCBUF |
321                                      MOVE_WAITCOMP |
322                                      (0x8 << MOVE_LEN_SHIFT));
323
324         /* Class 2 operation */
325         append_operation(desc, ctx->class2_alg_type |
326                          OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
327
328         /* Read and write cryptlen bytes */
329         aead_append_src_dst(desc, FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
330
331         set_move_tgt_here(desc, read_move_cmd);
332         set_move_tgt_here(desc, write_move_cmd);
333         append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
334         append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO |
335                     MOVE_AUX_LS);
336
337         /* Write ICV */
338         append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB |
339                          LDST_SRCDST_BYTE_CONTEXT);
340
341         ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
342                                               desc_bytes(desc),
343                                               DMA_TO_DEVICE);
344         if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
345                 dev_err(jrdev, "unable to map shared descriptor\n");
346                 return -ENOMEM;
347         }
348 #ifdef DEBUG
349         print_hex_dump(KERN_ERR,
350                        "aead null enc shdesc@"__stringify(__LINE__)": ",
351                        DUMP_PREFIX_ADDRESS, 16, 4, desc,
352                        desc_bytes(desc), 1);
353 #endif
354
355         /*
356          * Job Descriptor and Shared Descriptors
357          * must all fit into the 64-word Descriptor h/w Buffer
358          */
359         keys_fit_inline = false;
360         if (DESC_AEAD_NULL_DEC_LEN + DESC_JOB_IO_LEN +
361             ctx->split_key_pad_len <= CAAM_DESC_BYTES_MAX)
362                 keys_fit_inline = true;
363
364         desc = ctx->sh_desc_dec;
365
366         /* old_aead_decrypt shared descriptor */
367         init_sh_desc(desc, HDR_SHARE_SERIAL);
368
369         /* Skip if already shared */
370         key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
371                                    JUMP_COND_SHRD);
372         if (keys_fit_inline)
373                 append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
374                                   ctx->split_key_len, CLASS_2 |
375                                   KEY_DEST_MDHA_SPLIT | KEY_ENC);
376         else
377                 append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 |
378                            KEY_DEST_MDHA_SPLIT | KEY_ENC);
379         set_jump_tgt_here(desc, key_jump_cmd);
380
381         /* Class 2 operation */
382         append_operation(desc, ctx->class2_alg_type |
383                          OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
384
385         /* assoclen + cryptlen = seqinlen - ivsize - authsize */
386         append_math_sub_imm_u32(desc, REG3, SEQINLEN, IMM,
387                                 ctx->authsize + ivsize);
388         /* assoclen = (assoclen + cryptlen) - cryptlen */
389         append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ);
390         append_math_sub(desc, VARSEQINLEN, REG3, REG2, CAAM_CMD_SZ);
391
392         /* read assoc before reading payload */
393         append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
394                              KEY_VLF);
395
396         /* Prepare to read and write cryptlen bytes */
397         append_math_add(desc, VARSEQINLEN, ZERO, REG2, CAAM_CMD_SZ);
398         append_math_add(desc, VARSEQOUTLEN, ZERO, REG2, CAAM_CMD_SZ);
399
400         /*
401          * MOVE_LEN opcode is not available in all SEC HW revisions,
402          * thus need to do some magic, i.e. self-patch the descriptor
403          * buffer.
404          */
405         read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF |
406                                     MOVE_DEST_MATH2 |
407                                     (0x6 << MOVE_LEN_SHIFT));
408         write_move_cmd = append_move(desc, MOVE_SRC_MATH2 |
409                                      MOVE_DEST_DESCBUF |
410                                      MOVE_WAITCOMP |
411                                      (0x8 << MOVE_LEN_SHIFT));
412
413         /* Read and write cryptlen bytes */
414         aead_append_src_dst(desc, FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
415
416         /*
417          * Insert a NOP here, since we need at least 4 instructions between
418          * code patching the descriptor buffer and the location being patched.
419          */
420         jump_cmd = append_jump(desc, JUMP_TEST_ALL);
421         set_jump_tgt_here(desc, jump_cmd);
422
423         set_move_tgt_here(desc, read_move_cmd);
424         set_move_tgt_here(desc, write_move_cmd);
425         append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
426         append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO |
427                     MOVE_AUX_LS);
428         append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
429
430         /* Load ICV */
431         append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS2 |
432                              FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV);
433
434         ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
435                                               desc_bytes(desc),
436                                               DMA_TO_DEVICE);
437         if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
438                 dev_err(jrdev, "unable to map shared descriptor\n");
439                 return -ENOMEM;
440         }
441 #ifdef DEBUG
442         print_hex_dump(KERN_ERR,
443                        "aead null dec shdesc@"__stringify(__LINE__)": ",
444                        DUMP_PREFIX_ADDRESS, 16, 4, desc,
445                        desc_bytes(desc), 1);
446 #endif
447
448         return 0;
449 }
450
451 static int aead_set_sh_desc(struct crypto_aead *aead)
452 {
453         unsigned int ivsize = crypto_aead_ivsize(aead);
454         struct caam_ctx *ctx = crypto_aead_ctx(aead);
455         struct crypto_tfm *ctfm = crypto_aead_tfm(aead);
456         const char *alg_name = crypto_tfm_alg_name(ctfm);
457         struct device *jrdev = ctx->jrdev;
458         bool keys_fit_inline;
459         u32 geniv, moveiv;
460         u32 ctx1_iv_off = 0;
461         u32 *desc;
462         const bool ctr_mode = ((ctx->class1_alg_type & OP_ALG_AAI_MASK) ==
463                                OP_ALG_AAI_CTR_MOD128);
464         const bool is_rfc3686 = (ctr_mode &&
465                                  (strstr(alg_name, "rfc3686") != NULL));
466
467         if (!ctx->authsize)
468                 return 0;
469
470         /* NULL encryption / decryption */
471         if (!ctx->enckeylen)
472                 return aead_null_set_sh_desc(aead);
473
474         /*
475          * AES-CTR needs to load IV in CONTEXT1 reg
476          * at an offset of 128bits (16bytes)
477          * CONTEXT1[255:128] = IV
478          */
479         if (ctr_mode)
480                 ctx1_iv_off = 16;
481
482         /*
483          * RFC3686 specific:
484          *      CONTEXT1[255:128] = {NONCE, IV, COUNTER}
485          */
486         if (is_rfc3686)
487                 ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
488
489         /*
490          * Job Descriptor and Shared Descriptors
491          * must all fit into the 64-word Descriptor h/w Buffer
492          */
493         keys_fit_inline = false;
494         if (DESC_AEAD_ENC_LEN + DESC_JOB_IO_LEN +
495             ctx->split_key_pad_len + ctx->enckeylen +
496             (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0) <=
497             CAAM_DESC_BYTES_MAX)
498                 keys_fit_inline = true;
499
500         /* old_aead_encrypt shared descriptor */
501         desc = ctx->sh_desc_enc;
502
503         /* Note: Context registers are saved. */
504         init_sh_desc_key_aead(desc, ctx, keys_fit_inline, is_rfc3686);
505
506         /* Class 2 operation */
507         append_operation(desc, ctx->class2_alg_type |
508                          OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
509
510         /* cryptlen = seqoutlen - authsize */
511         append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
512
513         /* assoclen + cryptlen = seqinlen - ivsize */
514         append_math_sub_imm_u32(desc, REG2, SEQINLEN, IMM, ivsize);
515
516         /* assoclen = (assoclen + cryptlen) - cryptlen */
517         append_math_sub(desc, VARSEQINLEN, REG2, REG3, CAAM_CMD_SZ);
518
519         /* read assoc before reading payload */
520         append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
521                              KEY_VLF);
522         aead_append_ld_iv(desc, ivsize, ctx1_iv_off);
523
524         /* Load Counter into CONTEXT1 reg */
525         if (is_rfc3686)
526                 append_load_imm_u32(desc, be32_to_cpu(1), LDST_IMM |
527                                     LDST_CLASS_1_CCB |
528                                     LDST_SRCDST_BYTE_CONTEXT |
529                                     ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
530                                      LDST_OFFSET_SHIFT));
531
532         /* Class 1 operation */
533         append_operation(desc, ctx->class1_alg_type |
534                          OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
535
536         /* Read and write cryptlen bytes */
537         append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
538         append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
539         aead_append_src_dst(desc, FIFOLD_TYPE_MSG1OUT2);
540
541         /* Write ICV */
542         append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB |
543                          LDST_SRCDST_BYTE_CONTEXT);
544
545         ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
546                                               desc_bytes(desc),
547                                               DMA_TO_DEVICE);
548         if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
549                 dev_err(jrdev, "unable to map shared descriptor\n");
550                 return -ENOMEM;
551         }
552 #ifdef DEBUG
553         print_hex_dump(KERN_ERR, "aead enc shdesc@"__stringify(__LINE__)": ",
554                        DUMP_PREFIX_ADDRESS, 16, 4, desc,
555                        desc_bytes(desc), 1);
556 #endif
557
558         /*
559          * Job Descriptor and Shared Descriptors
560          * must all fit into the 64-word Descriptor h/w Buffer
561          */
562         keys_fit_inline = false;
563         if (DESC_AEAD_DEC_LEN + DESC_JOB_IO_LEN +
564             ctx->split_key_pad_len + ctx->enckeylen +
565             (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0) <=
566             CAAM_DESC_BYTES_MAX)
567                 keys_fit_inline = true;
568
569         /* old_aead_decrypt shared descriptor */
570         desc = ctx->sh_desc_dec;
571
572         /* Note: Context registers are saved. */
573         init_sh_desc_key_aead(desc, ctx, keys_fit_inline, is_rfc3686);
574
575         /* Class 2 operation */
576         append_operation(desc, ctx->class2_alg_type |
577                          OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
578
579         /* assoclen + cryptlen = seqinlen - ivsize - authsize */
580         append_math_sub_imm_u32(desc, REG3, SEQINLEN, IMM,
581                                 ctx->authsize + ivsize);
582         /* assoclen = (assoclen + cryptlen) - cryptlen */
583         append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ);
584         append_math_sub(desc, VARSEQINLEN, REG3, REG2, CAAM_CMD_SZ);
585
586         /* read assoc before reading payload */
587         append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
588                              KEY_VLF);
589
590         aead_append_ld_iv(desc, ivsize, ctx1_iv_off);
591
592         /* Load Counter into CONTEXT1 reg */
593         if (is_rfc3686)
594                 append_load_imm_u32(desc, be32_to_cpu(1), LDST_IMM |
595                                     LDST_CLASS_1_CCB |
596                                     LDST_SRCDST_BYTE_CONTEXT |
597                                     ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
598                                      LDST_OFFSET_SHIFT));
599
600         /* Choose operation */
601         if (ctr_mode)
602                 append_operation(desc, ctx->class1_alg_type |
603                                  OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT);
604         else
605                 append_dec_op1(desc, ctx->class1_alg_type);
606
607         /* Read and write cryptlen bytes */
608         append_math_add(desc, VARSEQINLEN, ZERO, REG2, CAAM_CMD_SZ);
609         append_math_add(desc, VARSEQOUTLEN, ZERO, REG2, CAAM_CMD_SZ);
610         aead_append_src_dst(desc, FIFOLD_TYPE_MSG);
611
612         /* Load ICV */
613         append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS2 |
614                              FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV);
615
616         ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
617                                               desc_bytes(desc),
618                                               DMA_TO_DEVICE);
619         if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
620                 dev_err(jrdev, "unable to map shared descriptor\n");
621                 return -ENOMEM;
622         }
623 #ifdef DEBUG
624         print_hex_dump(KERN_ERR, "aead dec shdesc@"__stringify(__LINE__)": ",
625                        DUMP_PREFIX_ADDRESS, 16, 4, desc,
626                        desc_bytes(desc), 1);
627 #endif
628
629         /*
630          * Job Descriptor and Shared Descriptors
631          * must all fit into the 64-word Descriptor h/w Buffer
632          */
633         keys_fit_inline = false;
634         if (DESC_AEAD_GIVENC_LEN + DESC_JOB_IO_LEN +
635             ctx->split_key_pad_len + ctx->enckeylen +
636             (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0) <=
637             CAAM_DESC_BYTES_MAX)
638                 keys_fit_inline = true;
639
640         /* aead_givencrypt shared descriptor */
641         desc = ctx->sh_desc_givenc;
642
643         /* Note: Context registers are saved. */
644         init_sh_desc_key_aead(desc, ctx, keys_fit_inline, is_rfc3686);
645
646         /* Generate IV */
647         geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO |
648                 NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 |
649                 NFIFOENTRY_PTYPE_RND | (ivsize << NFIFOENTRY_DLEN_SHIFT);
650         append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB |
651                             LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
652         append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
653         append_move(desc, MOVE_WAITCOMP |
654                     MOVE_SRC_INFIFO | MOVE_DEST_CLASS1CTX |
655                     (ctx1_iv_off << MOVE_OFFSET_SHIFT) |
656                     (ivsize << MOVE_LEN_SHIFT));
657         append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
658
659         /* Copy IV to class 1 context */
660         append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_OUTFIFO |
661                     (ctx1_iv_off << MOVE_OFFSET_SHIFT) |
662                     (ivsize << MOVE_LEN_SHIFT));
663
664         /* Return to encryption */
665         append_operation(desc, ctx->class2_alg_type |
666                          OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
667
668         /* ivsize + cryptlen = seqoutlen - authsize */
669         append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
670
671         /* assoclen = seqinlen - (ivsize + cryptlen) */
672         append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG3, CAAM_CMD_SZ);
673
674         /* read assoc before reading payload */
675         append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
676                              KEY_VLF);
677
678         /* Copy iv from outfifo to class 2 fifo */
679         moveiv = NFIFOENTRY_STYPE_OFIFO | NFIFOENTRY_DEST_CLASS2 |
680                  NFIFOENTRY_DTYPE_MSG | (ivsize << NFIFOENTRY_DLEN_SHIFT);
681         append_load_imm_u32(desc, moveiv, LDST_CLASS_IND_CCB |
682                             LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
683         append_load_imm_u32(desc, ivsize, LDST_CLASS_2_CCB |
684                             LDST_SRCDST_WORD_DATASZ_REG | LDST_IMM);
685
686         /* Load Counter into CONTEXT1 reg */
687         if (is_rfc3686)
688                 append_load_imm_u32(desc, be32_to_cpu(1), LDST_IMM |
689                                     LDST_CLASS_1_CCB |
690                                     LDST_SRCDST_BYTE_CONTEXT |
691                                     ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
692                                      LDST_OFFSET_SHIFT));
693
694         /* Class 1 operation */
695         append_operation(desc, ctx->class1_alg_type |
696                          OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
697
698         /* Will write ivsize + cryptlen */
699         append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
700
701         /* Not need to reload iv */
702         append_seq_fifo_load(desc, ivsize,
703                              FIFOLD_CLASS_SKIP);
704
705         /* Will read cryptlen */
706         append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
707         aead_append_src_dst(desc, FIFOLD_TYPE_MSG1OUT2);
708
709         /* Write ICV */
710         append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB |
711                          LDST_SRCDST_BYTE_CONTEXT);
712
713         ctx->sh_desc_givenc_dma = dma_map_single(jrdev, desc,
714                                                  desc_bytes(desc),
715                                                  DMA_TO_DEVICE);
716         if (dma_mapping_error(jrdev, ctx->sh_desc_givenc_dma)) {
717                 dev_err(jrdev, "unable to map shared descriptor\n");
718                 return -ENOMEM;
719         }
720 #ifdef DEBUG
721         print_hex_dump(KERN_ERR, "aead givenc shdesc@"__stringify(__LINE__)": ",
722                        DUMP_PREFIX_ADDRESS, 16, 4, desc,
723                        desc_bytes(desc), 1);
724 #endif
725
726         return 0;
727 }
728
729 static int aead_setauthsize(struct crypto_aead *authenc,
730                                     unsigned int authsize)
731 {
732         struct caam_ctx *ctx = crypto_aead_ctx(authenc);
733
734         ctx->authsize = authsize;
735         aead_set_sh_desc(authenc);
736
737         return 0;
738 }
739
740 static int gcm_set_sh_desc(struct crypto_aead *aead)
741 {
742         struct caam_ctx *ctx = crypto_aead_ctx(aead);
743         struct device *jrdev = ctx->jrdev;
744         bool keys_fit_inline = false;
745         u32 *key_jump_cmd, *zero_payload_jump_cmd,
746             *zero_assoc_jump_cmd1, *zero_assoc_jump_cmd2;
747         u32 *desc;
748
749         if (!ctx->enckeylen || !ctx->authsize)
750                 return 0;
751
752         /*
753          * AES GCM encrypt shared descriptor
754          * Job Descriptor and Shared Descriptor
755          * must fit into the 64-word Descriptor h/w Buffer
756          */
757         if (DESC_GCM_ENC_LEN + GCM_DESC_JOB_IO_LEN +
758             ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
759                 keys_fit_inline = true;
760
761         desc = ctx->sh_desc_enc;
762
763         init_sh_desc(desc, HDR_SHARE_SERIAL);
764
765         /* skip key loading if they are loaded due to sharing */
766         key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
767                                    JUMP_COND_SHRD | JUMP_COND_SELF);
768         if (keys_fit_inline)
769                 append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
770                                   ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
771         else
772                 append_key(desc, ctx->key_dma, ctx->enckeylen,
773                            CLASS_1 | KEY_DEST_CLASS_REG);
774         set_jump_tgt_here(desc, key_jump_cmd);
775
776         /* class 1 operation */
777         append_operation(desc, ctx->class1_alg_type |
778                          OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
779
780         /* if assoclen + cryptlen is ZERO, skip to ICV write */
781         append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
782         zero_assoc_jump_cmd2 = append_jump(desc, JUMP_TEST_ALL |
783                                                  JUMP_COND_MATH_Z);
784
785         /* if assoclen is ZERO, skip reading the assoc data */
786         append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
787         zero_assoc_jump_cmd1 = append_jump(desc, JUMP_TEST_ALL |
788                                                  JUMP_COND_MATH_Z);
789
790         append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
791
792         /* skip assoc data */
793         append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
794
795         /* cryptlen = seqinlen - assoclen */
796         append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG3, CAAM_CMD_SZ);
797
798         /* if cryptlen is ZERO jump to zero-payload commands */
799         zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL |
800                                             JUMP_COND_MATH_Z);
801
802         /* read assoc data */
803         append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
804                              FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
805         set_jump_tgt_here(desc, zero_assoc_jump_cmd1);
806
807         append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
808
809         /* write encrypted data */
810         append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
811
812         /* read payload data */
813         append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
814                              FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
815
816         /* jump the zero-payload commands */
817         append_jump(desc, JUMP_TEST_ALL | 2);
818
819         /* zero-payload commands */
820         set_jump_tgt_here(desc, zero_payload_jump_cmd);
821
822         /* read assoc data */
823         append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
824                              FIFOLD_TYPE_AAD | FIFOLD_TYPE_LAST1);
825
826         /* There is no input data */
827         set_jump_tgt_here(desc, zero_assoc_jump_cmd2);
828
829         /* write ICV */
830         append_seq_store(desc, ctx->authsize, LDST_CLASS_1_CCB |
831                          LDST_SRCDST_BYTE_CONTEXT);
832
833         ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
834                                               desc_bytes(desc),
835                                               DMA_TO_DEVICE);
836         if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
837                 dev_err(jrdev, "unable to map shared descriptor\n");
838                 return -ENOMEM;
839         }
840 #ifdef DEBUG
841         print_hex_dump(KERN_ERR, "gcm enc shdesc@"__stringify(__LINE__)": ",
842                        DUMP_PREFIX_ADDRESS, 16, 4, desc,
843                        desc_bytes(desc), 1);
844 #endif
845
846         /*
847          * Job Descriptor and Shared Descriptors
848          * must all fit into the 64-word Descriptor h/w Buffer
849          */
850         keys_fit_inline = false;
851         if (DESC_GCM_DEC_LEN + GCM_DESC_JOB_IO_LEN +
852             ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
853                 keys_fit_inline = true;
854
855         desc = ctx->sh_desc_dec;
856
857         init_sh_desc(desc, HDR_SHARE_SERIAL);
858
859         /* skip key loading if they are loaded due to sharing */
860         key_jump_cmd = append_jump(desc, JUMP_JSL |
861                                    JUMP_TEST_ALL | JUMP_COND_SHRD |
862                                    JUMP_COND_SELF);
863         if (keys_fit_inline)
864                 append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
865                                   ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
866         else
867                 append_key(desc, ctx->key_dma, ctx->enckeylen,
868                            CLASS_1 | KEY_DEST_CLASS_REG);
869         set_jump_tgt_here(desc, key_jump_cmd);
870
871         /* class 1 operation */
872         append_operation(desc, ctx->class1_alg_type |
873                          OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
874
875         /* if assoclen is ZERO, skip reading the assoc data */
876         append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
877         zero_assoc_jump_cmd1 = append_jump(desc, JUMP_TEST_ALL |
878                                                  JUMP_COND_MATH_Z);
879
880         append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
881
882         /* skip assoc data */
883         append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
884
885         /* read assoc data */
886         append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
887                              FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
888
889         set_jump_tgt_here(desc, zero_assoc_jump_cmd1);
890
891         /* cryptlen = seqoutlen - assoclen */
892         append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
893
894         /* jump to zero-payload command if cryptlen is zero */
895         zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL |
896                                             JUMP_COND_MATH_Z);
897
898         append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
899
900         /* store encrypted data */
901         append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
902
903         /* read payload data */
904         append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
905                              FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
906
907         /* zero-payload command */
908         set_jump_tgt_here(desc, zero_payload_jump_cmd);
909
910         /* read ICV */
911         append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS1 |
912                              FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
913
914         ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
915                                               desc_bytes(desc),
916                                               DMA_TO_DEVICE);
917         if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
918                 dev_err(jrdev, "unable to map shared descriptor\n");
919                 return -ENOMEM;
920         }
921 #ifdef DEBUG
922         print_hex_dump(KERN_ERR, "gcm dec shdesc@"__stringify(__LINE__)": ",
923                        DUMP_PREFIX_ADDRESS, 16, 4, desc,
924                        desc_bytes(desc), 1);
925 #endif
926
927         return 0;
928 }
929
930 static int gcm_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
931 {
932         struct caam_ctx *ctx = crypto_aead_ctx(authenc);
933
934         ctx->authsize = authsize;
935         gcm_set_sh_desc(authenc);
936
937         return 0;
938 }
939
940 static int rfc4106_set_sh_desc(struct crypto_aead *aead)
941 {
942         struct caam_ctx *ctx = crypto_aead_ctx(aead);
943         struct device *jrdev = ctx->jrdev;
944         bool keys_fit_inline = false;
945         u32 *key_jump_cmd;
946         u32 *desc;
947
948         if (!ctx->enckeylen || !ctx->authsize)
949                 return 0;
950
951         /*
952          * RFC4106 encrypt shared descriptor
953          * Job Descriptor and Shared Descriptor
954          * must fit into the 64-word Descriptor h/w Buffer
955          */
956         if (DESC_RFC4106_ENC_LEN + GCM_DESC_JOB_IO_LEN +
957             ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
958                 keys_fit_inline = true;
959
960         desc = ctx->sh_desc_enc;
961
962         init_sh_desc(desc, HDR_SHARE_SERIAL);
963
964         /* Skip key loading if it is loaded due to sharing */
965         key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
966                                    JUMP_COND_SHRD);
967         if (keys_fit_inline)
968                 append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
969                                   ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
970         else
971                 append_key(desc, ctx->key_dma, ctx->enckeylen,
972                            CLASS_1 | KEY_DEST_CLASS_REG);
973         set_jump_tgt_here(desc, key_jump_cmd);
974
975         /* Class 1 operation */
976         append_operation(desc, ctx->class1_alg_type |
977                          OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
978
979         append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
980         append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
981
982         /* Skip assoc data */
983         append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
984
985         /* Read assoc data */
986         append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
987                              FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
988
989         /* cryptlen = seqoutlen - assoclen */
990         append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
991
992         /* Will read cryptlen bytes */
993         append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
994
995         /* Write encrypted data */
996         append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
997
998         /* Read payload data */
999         append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
1000                              FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
1001
1002         /* Write ICV */
1003         append_seq_store(desc, ctx->authsize, LDST_CLASS_1_CCB |
1004                          LDST_SRCDST_BYTE_CONTEXT);
1005
1006         ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
1007                                               desc_bytes(desc),
1008                                               DMA_TO_DEVICE);
1009         if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
1010                 dev_err(jrdev, "unable to map shared descriptor\n");
1011                 return -ENOMEM;
1012         }
1013 #ifdef DEBUG
1014         print_hex_dump(KERN_ERR, "rfc4106 enc shdesc@"__stringify(__LINE__)": ",
1015                        DUMP_PREFIX_ADDRESS, 16, 4, desc,
1016                        desc_bytes(desc), 1);
1017 #endif
1018
1019         /*
1020          * Job Descriptor and Shared Descriptors
1021          * must all fit into the 64-word Descriptor h/w Buffer
1022          */
1023         keys_fit_inline = false;
1024         if (DESC_RFC4106_DEC_LEN + DESC_JOB_IO_LEN +
1025             ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
1026                 keys_fit_inline = true;
1027
1028         desc = ctx->sh_desc_dec;
1029
1030         init_sh_desc(desc, HDR_SHARE_SERIAL);
1031
1032         /* Skip key loading if it is loaded due to sharing */
1033         key_jump_cmd = append_jump(desc, JUMP_JSL |
1034                                    JUMP_TEST_ALL | JUMP_COND_SHRD);
1035         if (keys_fit_inline)
1036                 append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
1037                                   ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
1038         else
1039                 append_key(desc, ctx->key_dma, ctx->enckeylen,
1040                            CLASS_1 | KEY_DEST_CLASS_REG);
1041         set_jump_tgt_here(desc, key_jump_cmd);
1042
1043         /* Class 1 operation */
1044         append_operation(desc, ctx->class1_alg_type |
1045                          OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
1046
1047         append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
1048         append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
1049
1050         /* Skip assoc data */
1051         append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
1052
1053         /* Read assoc data */
1054         append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
1055                              FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
1056
1057         /* Will write cryptlen bytes */
1058         append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
1059
1060         /* Will read cryptlen bytes */
1061         append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
1062
1063         /* Store payload data */
1064         append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
1065
1066         /* Read encrypted data */
1067         append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
1068                              FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
1069
1070         /* Read ICV */
1071         append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS1 |
1072                              FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
1073
1074         ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
1075                                               desc_bytes(desc),
1076                                               DMA_TO_DEVICE);
1077         if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
1078                 dev_err(jrdev, "unable to map shared descriptor\n");
1079                 return -ENOMEM;
1080         }
1081 #ifdef DEBUG
1082         print_hex_dump(KERN_ERR, "rfc4106 dec shdesc@"__stringify(__LINE__)": ",
1083                        DUMP_PREFIX_ADDRESS, 16, 4, desc,
1084                        desc_bytes(desc), 1);
1085 #endif
1086
1087         return 0;
1088 }
1089
1090 static int rfc4106_setauthsize(struct crypto_aead *authenc,
1091                                unsigned int authsize)
1092 {
1093         struct caam_ctx *ctx = crypto_aead_ctx(authenc);
1094
1095         ctx->authsize = authsize;
1096         rfc4106_set_sh_desc(authenc);
1097
1098         return 0;
1099 }
1100
1101 static int rfc4543_set_sh_desc(struct crypto_aead *aead)
1102 {
1103         struct caam_ctx *ctx = crypto_aead_ctx(aead);
1104         struct device *jrdev = ctx->jrdev;
1105         bool keys_fit_inline = false;
1106         u32 *key_jump_cmd;
1107         u32 *read_move_cmd, *write_move_cmd;
1108         u32 *desc;
1109
1110         if (!ctx->enckeylen || !ctx->authsize)
1111                 return 0;
1112
1113         /*
1114          * RFC4543 encrypt shared descriptor
1115          * Job Descriptor and Shared Descriptor
1116          * must fit into the 64-word Descriptor h/w Buffer
1117          */
1118         if (DESC_RFC4543_ENC_LEN + GCM_DESC_JOB_IO_LEN +
1119             ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
1120                 keys_fit_inline = true;
1121
1122         desc = ctx->sh_desc_enc;
1123
1124         init_sh_desc(desc, HDR_SHARE_SERIAL);
1125
1126         /* Skip key loading if it is loaded due to sharing */
1127         key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
1128                                    JUMP_COND_SHRD);
1129         if (keys_fit_inline)
1130                 append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
1131                                   ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
1132         else
1133                 append_key(desc, ctx->key_dma, ctx->enckeylen,
1134                            CLASS_1 | KEY_DEST_CLASS_REG);
1135         set_jump_tgt_here(desc, key_jump_cmd);
1136
1137         /* Class 1 operation */
1138         append_operation(desc, ctx->class1_alg_type |
1139                          OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
1140
1141         /* assoclen + cryptlen = seqinlen */
1142         append_math_sub(desc, REG3, SEQINLEN, REG0, CAAM_CMD_SZ);
1143
1144         /*
1145          * MOVE_LEN opcode is not available in all SEC HW revisions,
1146          * thus need to do some magic, i.e. self-patch the descriptor
1147          * buffer.
1148          */
1149         read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH3 |
1150                                     (0x6 << MOVE_LEN_SHIFT));
1151         write_move_cmd = append_move(desc, MOVE_SRC_MATH3 | MOVE_DEST_DESCBUF |
1152                                      (0x8 << MOVE_LEN_SHIFT));
1153
1154         /* Will read assoclen + cryptlen bytes */
1155         append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
1156
1157         /* Will write assoclen + cryptlen bytes */
1158         append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
1159
1160         /* Read and write assoclen + cryptlen bytes */
1161         aead_append_src_dst(desc, FIFOLD_TYPE_AAD);
1162
1163         set_move_tgt_here(desc, read_move_cmd);
1164         set_move_tgt_here(desc, write_move_cmd);
1165         append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
1166         /* Move payload data to OFIFO */
1167         append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO);
1168
1169         /* Write ICV */
1170         append_seq_store(desc, ctx->authsize, LDST_CLASS_1_CCB |
1171                          LDST_SRCDST_BYTE_CONTEXT);
1172
1173         ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
1174                                               desc_bytes(desc),
1175                                               DMA_TO_DEVICE);
1176         if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
1177                 dev_err(jrdev, "unable to map shared descriptor\n");
1178                 return -ENOMEM;
1179         }
1180 #ifdef DEBUG
1181         print_hex_dump(KERN_ERR, "rfc4543 enc shdesc@"__stringify(__LINE__)": ",
1182                        DUMP_PREFIX_ADDRESS, 16, 4, desc,
1183                        desc_bytes(desc), 1);
1184 #endif
1185
1186         /*
1187          * Job Descriptor and Shared Descriptors
1188          * must all fit into the 64-word Descriptor h/w Buffer
1189          */
1190         keys_fit_inline = false;
1191         if (DESC_RFC4543_DEC_LEN + GCM_DESC_JOB_IO_LEN +
1192             ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
1193                 keys_fit_inline = true;
1194
1195         desc = ctx->sh_desc_dec;
1196
1197         init_sh_desc(desc, HDR_SHARE_SERIAL);
1198
1199         /* Skip key loading if it is loaded due to sharing */
1200         key_jump_cmd = append_jump(desc, JUMP_JSL |
1201                                    JUMP_TEST_ALL | JUMP_COND_SHRD);
1202         if (keys_fit_inline)
1203                 append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
1204                                   ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
1205         else
1206                 append_key(desc, ctx->key_dma, ctx->enckeylen,
1207                            CLASS_1 | KEY_DEST_CLASS_REG);
1208         set_jump_tgt_here(desc, key_jump_cmd);
1209
1210         /* Class 1 operation */
1211         append_operation(desc, ctx->class1_alg_type |
1212                          OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
1213
1214         /* assoclen + cryptlen = seqoutlen */
1215         append_math_sub(desc, REG3, SEQOUTLEN, REG0, CAAM_CMD_SZ);
1216
1217         /*
1218          * MOVE_LEN opcode is not available in all SEC HW revisions,
1219          * thus need to do some magic, i.e. self-patch the descriptor
1220          * buffer.
1221          */
1222         read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH3 |
1223                                     (0x6 << MOVE_LEN_SHIFT));
1224         write_move_cmd = append_move(desc, MOVE_SRC_MATH3 | MOVE_DEST_DESCBUF |
1225                                      (0x8 << MOVE_LEN_SHIFT));
1226
1227         /* Will read assoclen + cryptlen bytes */
1228         append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
1229
1230         /* Will write assoclen + cryptlen bytes */
1231         append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
1232
1233         /* Store payload data */
1234         append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
1235
1236         /* In-snoop assoclen + cryptlen data */
1237         append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH | FIFOLDST_VLF |
1238                              FIFOLD_TYPE_AAD | FIFOLD_TYPE_LAST2FLUSH1);
1239
1240         set_move_tgt_here(desc, read_move_cmd);
1241         set_move_tgt_here(desc, write_move_cmd);
1242         append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
1243         /* Move payload data to OFIFO */
1244         append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO);
1245         append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
1246
1247         /* Read ICV */
1248         append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS1 |
1249                              FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
1250
1251         ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
1252                                               desc_bytes(desc),
1253                                               DMA_TO_DEVICE);
1254         if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
1255                 dev_err(jrdev, "unable to map shared descriptor\n");
1256                 return -ENOMEM;
1257         }
1258 #ifdef DEBUG
1259         print_hex_dump(KERN_ERR, "rfc4543 dec shdesc@"__stringify(__LINE__)": ",
1260                        DUMP_PREFIX_ADDRESS, 16, 4, desc,
1261                        desc_bytes(desc), 1);
1262 #endif
1263
1264         return 0;
1265 }
1266
1267 static int rfc4543_setauthsize(struct crypto_aead *authenc,
1268                                unsigned int authsize)
1269 {
1270         struct caam_ctx *ctx = crypto_aead_ctx(authenc);
1271
1272         ctx->authsize = authsize;
1273         rfc4543_set_sh_desc(authenc);
1274
1275         return 0;
1276 }
1277
1278 static u32 gen_split_aead_key(struct caam_ctx *ctx, const u8 *key_in,
1279                               u32 authkeylen)
1280 {
1281         return gen_split_key(ctx->jrdev, ctx->key, ctx->split_key_len,
1282                                ctx->split_key_pad_len, key_in, authkeylen,
1283                                ctx->alg_op);
1284 }
1285
1286 static int aead_setkey(struct crypto_aead *aead,
1287                                const u8 *key, unsigned int keylen)
1288 {
1289         /* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */
1290         static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 };
1291         struct caam_ctx *ctx = crypto_aead_ctx(aead);
1292         struct device *jrdev = ctx->jrdev;
1293         struct crypto_authenc_keys keys;
1294         int ret = 0;
1295
1296         if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
1297                 goto badkey;
1298
1299         /* Pick class 2 key length from algorithm submask */
1300         ctx->split_key_len = mdpadlen[(ctx->alg_op & OP_ALG_ALGSEL_SUBMASK) >>
1301                                       OP_ALG_ALGSEL_SHIFT] * 2;
1302         ctx->split_key_pad_len = ALIGN(ctx->split_key_len, 16);
1303
1304         if (ctx->split_key_pad_len + keys.enckeylen > CAAM_MAX_KEY_SIZE)
1305                 goto badkey;
1306
1307 #ifdef DEBUG
1308         printk(KERN_ERR "keylen %d enckeylen %d authkeylen %d\n",
1309                keys.authkeylen + keys.enckeylen, keys.enckeylen,
1310                keys.authkeylen);
1311         printk(KERN_ERR "split_key_len %d split_key_pad_len %d\n",
1312                ctx->split_key_len, ctx->split_key_pad_len);
1313         print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
1314                        DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
1315 #endif
1316
1317         ret = gen_split_aead_key(ctx, keys.authkey, keys.authkeylen);
1318         if (ret) {
1319                 goto badkey;
1320         }
1321
1322         /* postpend encryption key to auth split key */
1323         memcpy(ctx->key + ctx->split_key_pad_len, keys.enckey, keys.enckeylen);
1324
1325         ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->split_key_pad_len +
1326                                       keys.enckeylen, DMA_TO_DEVICE);
1327         if (dma_mapping_error(jrdev, ctx->key_dma)) {
1328                 dev_err(jrdev, "unable to map key i/o memory\n");
1329                 return -ENOMEM;
1330         }
1331 #ifdef DEBUG
1332         print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
1333                        DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
1334                        ctx->split_key_pad_len + keys.enckeylen, 1);
1335 #endif
1336
1337         ctx->enckeylen = keys.enckeylen;
1338
1339         ret = aead_set_sh_desc(aead);
1340         if (ret) {
1341                 dma_unmap_single(jrdev, ctx->key_dma, ctx->split_key_pad_len +
1342                                  keys.enckeylen, DMA_TO_DEVICE);
1343         }
1344
1345         return ret;
1346 badkey:
1347         crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
1348         return -EINVAL;
1349 }
1350
1351 static int gcm_setkey(struct crypto_aead *aead,
1352                       const u8 *key, unsigned int keylen)
1353 {
1354         struct caam_ctx *ctx = crypto_aead_ctx(aead);
1355         struct device *jrdev = ctx->jrdev;
1356         int ret = 0;
1357
1358 #ifdef DEBUG
1359         print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
1360                        DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
1361 #endif
1362
1363         memcpy(ctx->key, key, keylen);
1364         ctx->key_dma = dma_map_single(jrdev, ctx->key, keylen,
1365                                       DMA_TO_DEVICE);
1366         if (dma_mapping_error(jrdev, ctx->key_dma)) {
1367                 dev_err(jrdev, "unable to map key i/o memory\n");
1368                 return -ENOMEM;
1369         }
1370         ctx->enckeylen = keylen;
1371
1372         ret = gcm_set_sh_desc(aead);
1373         if (ret) {
1374                 dma_unmap_single(jrdev, ctx->key_dma, ctx->enckeylen,
1375                                  DMA_TO_DEVICE);
1376         }
1377
1378         return ret;
1379 }
1380
1381 static int rfc4106_setkey(struct crypto_aead *aead,
1382                           const u8 *key, unsigned int keylen)
1383 {
1384         struct caam_ctx *ctx = crypto_aead_ctx(aead);
1385         struct device *jrdev = ctx->jrdev;
1386         int ret = 0;
1387
1388         if (keylen < 4)
1389                 return -EINVAL;
1390
1391 #ifdef DEBUG
1392         print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
1393                        DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
1394 #endif
1395
1396         memcpy(ctx->key, key, keylen);
1397
1398         /*
1399          * The last four bytes of the key material are used as the salt value
1400          * in the nonce. Update the AES key length.
1401          */
1402         ctx->enckeylen = keylen - 4;
1403
1404         ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->enckeylen,
1405                                       DMA_TO_DEVICE);
1406         if (dma_mapping_error(jrdev, ctx->key_dma)) {
1407                 dev_err(jrdev, "unable to map key i/o memory\n");
1408                 return -ENOMEM;
1409         }
1410
1411         ret = rfc4106_set_sh_desc(aead);
1412         if (ret) {
1413                 dma_unmap_single(jrdev, ctx->key_dma, ctx->enckeylen,
1414                                  DMA_TO_DEVICE);
1415         }
1416
1417         return ret;
1418 }
1419
1420 static int rfc4543_setkey(struct crypto_aead *aead,
1421                           const u8 *key, unsigned int keylen)
1422 {
1423         struct caam_ctx *ctx = crypto_aead_ctx(aead);
1424         struct device *jrdev = ctx->jrdev;
1425         int ret = 0;
1426
1427         if (keylen < 4)
1428                 return -EINVAL;
1429
1430 #ifdef DEBUG
1431         print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
1432                        DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
1433 #endif
1434
1435         memcpy(ctx->key, key, keylen);
1436
1437         /*
1438          * The last four bytes of the key material are used as the salt value
1439          * in the nonce. Update the AES key length.
1440          */
1441         ctx->enckeylen = keylen - 4;
1442
1443         ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->enckeylen,
1444                                       DMA_TO_DEVICE);
1445         if (dma_mapping_error(jrdev, ctx->key_dma)) {
1446                 dev_err(jrdev, "unable to map key i/o memory\n");
1447                 return -ENOMEM;
1448         }
1449
1450         ret = rfc4543_set_sh_desc(aead);
1451         if (ret) {
1452                 dma_unmap_single(jrdev, ctx->key_dma, ctx->enckeylen,
1453                                  DMA_TO_DEVICE);
1454         }
1455
1456         return ret;
1457 }
1458
1459 static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
1460                              const u8 *key, unsigned int keylen)
1461 {
1462         struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
1463         struct ablkcipher_tfm *crt = &ablkcipher->base.crt_ablkcipher;
1464         struct crypto_tfm *tfm = crypto_ablkcipher_tfm(ablkcipher);
1465         const char *alg_name = crypto_tfm_alg_name(tfm);
1466         struct device *jrdev = ctx->jrdev;
1467         int ret = 0;
1468         u32 *key_jump_cmd;
1469         u32 *desc;
1470         u32 *nonce;
1471         u32 geniv;
1472         u32 ctx1_iv_off = 0;
1473         const bool ctr_mode = ((ctx->class1_alg_type & OP_ALG_AAI_MASK) ==
1474                                OP_ALG_AAI_CTR_MOD128);
1475         const bool is_rfc3686 = (ctr_mode &&
1476                                  (strstr(alg_name, "rfc3686") != NULL));
1477
1478 #ifdef DEBUG
1479         print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
1480                        DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
1481 #endif
1482         /*
1483          * AES-CTR needs to load IV in CONTEXT1 reg
1484          * at an offset of 128bits (16bytes)
1485          * CONTEXT1[255:128] = IV
1486          */
1487         if (ctr_mode)
1488                 ctx1_iv_off = 16;
1489
1490         /*
1491          * RFC3686 specific:
1492          *      | CONTEXT1[255:128] = {NONCE, IV, COUNTER}
1493          *      | *key = {KEY, NONCE}
1494          */
1495         if (is_rfc3686) {
1496                 ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
1497                 keylen -= CTR_RFC3686_NONCE_SIZE;
1498         }
1499
1500         memcpy(ctx->key, key, keylen);
1501         ctx->key_dma = dma_map_single(jrdev, ctx->key, keylen,
1502                                       DMA_TO_DEVICE);
1503         if (dma_mapping_error(jrdev, ctx->key_dma)) {
1504                 dev_err(jrdev, "unable to map key i/o memory\n");
1505                 return -ENOMEM;
1506         }
1507         ctx->enckeylen = keylen;
1508
1509         /* ablkcipher_encrypt shared descriptor */
1510         desc = ctx->sh_desc_enc;
1511         init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
1512         /* Skip if already shared */
1513         key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
1514                                    JUMP_COND_SHRD);
1515
1516         /* Load class1 key only */
1517         append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
1518                           ctx->enckeylen, CLASS_1 |
1519                           KEY_DEST_CLASS_REG);
1520
1521         /* Load nonce into CONTEXT1 reg */
1522         if (is_rfc3686) {
1523                 nonce = (u32 *)(key + keylen);
1524                 append_load_imm_u32(desc, *nonce, LDST_CLASS_IND_CCB |
1525                                     LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
1526                 append_move(desc, MOVE_WAITCOMP |
1527                             MOVE_SRC_OUTFIFO |
1528                             MOVE_DEST_CLASS1CTX |
1529                             (16 << MOVE_OFFSET_SHIFT) |
1530                             (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
1531         }
1532
1533         set_jump_tgt_here(desc, key_jump_cmd);
1534
1535         /* Load iv */
1536         append_seq_load(desc, crt->ivsize, LDST_SRCDST_BYTE_CONTEXT |
1537                         LDST_CLASS_1_CCB | (ctx1_iv_off << LDST_OFFSET_SHIFT));
1538
1539         /* Load counter into CONTEXT1 reg */
1540         if (is_rfc3686)
1541                 append_load_imm_u32(desc, be32_to_cpu(1), LDST_IMM |
1542                                     LDST_CLASS_1_CCB |
1543                                     LDST_SRCDST_BYTE_CONTEXT |
1544                                     ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
1545                                      LDST_OFFSET_SHIFT));
1546
1547         /* Load operation */
1548         append_operation(desc, ctx->class1_alg_type |
1549                          OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
1550
1551         /* Perform operation */
1552         ablkcipher_append_src_dst(desc);
1553
1554         ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
1555                                               desc_bytes(desc),
1556                                               DMA_TO_DEVICE);
1557         if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
1558                 dev_err(jrdev, "unable to map shared descriptor\n");
1559                 return -ENOMEM;
1560         }
1561 #ifdef DEBUG
1562         print_hex_dump(KERN_ERR,
1563                        "ablkcipher enc shdesc@"__stringify(__LINE__)": ",
1564                        DUMP_PREFIX_ADDRESS, 16, 4, desc,
1565                        desc_bytes(desc), 1);
1566 #endif
1567         /* ablkcipher_decrypt shared descriptor */
1568         desc = ctx->sh_desc_dec;
1569
1570         init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
1571         /* Skip if already shared */
1572         key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
1573                                    JUMP_COND_SHRD);
1574
1575         /* Load class1 key only */
1576         append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
1577                           ctx->enckeylen, CLASS_1 |
1578                           KEY_DEST_CLASS_REG);
1579
1580         /* Load nonce into CONTEXT1 reg */
1581         if (is_rfc3686) {
1582                 nonce = (u32 *)(key + keylen);
1583                 append_load_imm_u32(desc, *nonce, LDST_CLASS_IND_CCB |
1584                                     LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
1585                 append_move(desc, MOVE_WAITCOMP |
1586                             MOVE_SRC_OUTFIFO |
1587                             MOVE_DEST_CLASS1CTX |
1588                             (16 << MOVE_OFFSET_SHIFT) |
1589                             (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
1590         }
1591
1592         set_jump_tgt_here(desc, key_jump_cmd);
1593
1594         /* load IV */
1595         append_seq_load(desc, crt->ivsize, LDST_SRCDST_BYTE_CONTEXT |
1596                         LDST_CLASS_1_CCB | (ctx1_iv_off << LDST_OFFSET_SHIFT));
1597
1598         /* Load counter into CONTEXT1 reg */
1599         if (is_rfc3686)
1600                 append_load_imm_u32(desc, be32_to_cpu(1), LDST_IMM |
1601                                     LDST_CLASS_1_CCB |
1602                                     LDST_SRCDST_BYTE_CONTEXT |
1603                                     ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
1604                                      LDST_OFFSET_SHIFT));
1605
1606         /* Choose operation */
1607         if (ctr_mode)
1608                 append_operation(desc, ctx->class1_alg_type |
1609                                  OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT);
1610         else
1611                 append_dec_op1(desc, ctx->class1_alg_type);
1612
1613         /* Perform operation */
1614         ablkcipher_append_src_dst(desc);
1615
1616         ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
1617                                               desc_bytes(desc),
1618                                               DMA_TO_DEVICE);
1619         if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
1620                 dev_err(jrdev, "unable to map shared descriptor\n");
1621                 return -ENOMEM;
1622         }
1623
1624 #ifdef DEBUG
1625         print_hex_dump(KERN_ERR,
1626                        "ablkcipher dec shdesc@"__stringify(__LINE__)": ",
1627                        DUMP_PREFIX_ADDRESS, 16, 4, desc,
1628                        desc_bytes(desc), 1);
1629 #endif
1630         /* ablkcipher_givencrypt shared descriptor */
1631         desc = ctx->sh_desc_givenc;
1632
1633         init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
1634         /* Skip if already shared */
1635         key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
1636                                    JUMP_COND_SHRD);
1637
1638         /* Load class1 key only */
1639         append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
1640                           ctx->enckeylen, CLASS_1 |
1641                           KEY_DEST_CLASS_REG);
1642
1643         /* Load Nonce into CONTEXT1 reg */
1644         if (is_rfc3686) {
1645                 nonce = (u32 *)(key + keylen);
1646                 append_load_imm_u32(desc, *nonce, LDST_CLASS_IND_CCB |
1647                                     LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
1648                 append_move(desc, MOVE_WAITCOMP |
1649                             MOVE_SRC_OUTFIFO |
1650                             MOVE_DEST_CLASS1CTX |
1651                             (16 << MOVE_OFFSET_SHIFT) |
1652                             (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
1653         }
1654         set_jump_tgt_here(desc, key_jump_cmd);
1655
1656         /* Generate IV */
1657         geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO |
1658                 NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 |
1659                 NFIFOENTRY_PTYPE_RND | (crt->ivsize << NFIFOENTRY_DLEN_SHIFT);
1660         append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB |
1661                             LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
1662         append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
1663         append_move(desc, MOVE_WAITCOMP |
1664                     MOVE_SRC_INFIFO |
1665                     MOVE_DEST_CLASS1CTX |
1666                     (crt->ivsize << MOVE_LEN_SHIFT) |
1667                     (ctx1_iv_off << MOVE_OFFSET_SHIFT));
1668         append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
1669
1670         /* Copy generated IV to memory */
1671         append_seq_store(desc, crt->ivsize,
1672                          LDST_SRCDST_BYTE_CONTEXT | LDST_CLASS_1_CCB |
1673                          (ctx1_iv_off << LDST_OFFSET_SHIFT));
1674
1675         /* Load Counter into CONTEXT1 reg */
1676         if (is_rfc3686)
1677                 append_load_imm_u32(desc, (u32)1, LDST_IMM |
1678                                     LDST_CLASS_1_CCB |
1679                                     LDST_SRCDST_BYTE_CONTEXT |
1680                                     ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
1681                                      LDST_OFFSET_SHIFT));
1682
1683         if (ctx1_iv_off)
1684                 append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | JUMP_COND_NCP |
1685                             (1 << JUMP_OFFSET_SHIFT));
1686
1687         /* Load operation */
1688         append_operation(desc, ctx->class1_alg_type |
1689                          OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
1690
1691         /* Perform operation */
1692         ablkcipher_append_src_dst(desc);
1693
1694         ctx->sh_desc_givenc_dma = dma_map_single(jrdev, desc,
1695                                                  desc_bytes(desc),
1696                                                  DMA_TO_DEVICE);
1697         if (dma_mapping_error(jrdev, ctx->sh_desc_givenc_dma)) {
1698                 dev_err(jrdev, "unable to map shared descriptor\n");
1699                 return -ENOMEM;
1700         }
1701 #ifdef DEBUG
1702         print_hex_dump(KERN_ERR,
1703                        "ablkcipher givenc shdesc@" __stringify(__LINE__) ": ",
1704                        DUMP_PREFIX_ADDRESS, 16, 4, desc,
1705                        desc_bytes(desc), 1);
1706 #endif
1707
1708         return ret;
1709 }
1710
1711 /*
1712  * aead_edesc - s/w-extended aead descriptor
1713  * @assoc_nents: number of segments in associated data (SPI+Seq) scatterlist
1714  * @assoc_chained: if source is chained
1715  * @src_nents: number of segments in input scatterlist
1716  * @src_chained: if source is chained
1717  * @dst_nents: number of segments in output scatterlist
1718  * @dst_chained: if destination is chained
1719  * @iv_dma: dma address of iv for checking continuity and link table
1720  * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE)
1721  * @sec4_sg_bytes: length of dma mapped sec4_sg space
1722  * @sec4_sg_dma: bus physical mapped address of h/w link table
1723  * @hw_desc: the h/w job descriptor followed by any referenced link tables
1724  */
1725 struct aead_edesc {
1726         int assoc_nents;
1727         bool assoc_chained;
1728         int src_nents;
1729         bool src_chained;
1730         int dst_nents;
1731         bool dst_chained;
1732         dma_addr_t iv_dma;
1733         int sec4_sg_bytes;
1734         dma_addr_t sec4_sg_dma;
1735         struct sec4_sg_entry *sec4_sg;
1736         u32 hw_desc[];
1737 };
1738
1739 /*
1740  * ablkcipher_edesc - s/w-extended ablkcipher descriptor
1741  * @src_nents: number of segments in input scatterlist
1742  * @src_chained: if source is chained
1743  * @dst_nents: number of segments in output scatterlist
1744  * @dst_chained: if destination is chained
1745  * @iv_dma: dma address of iv for checking continuity and link table
1746  * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE)
1747  * @sec4_sg_bytes: length of dma mapped sec4_sg space
1748  * @sec4_sg_dma: bus physical mapped address of h/w link table
1749  * @hw_desc: the h/w job descriptor followed by any referenced link tables
1750  */
1751 struct ablkcipher_edesc {
1752         int src_nents;
1753         bool src_chained;
1754         int dst_nents;
1755         bool dst_chained;
1756         dma_addr_t iv_dma;
1757         int sec4_sg_bytes;
1758         dma_addr_t sec4_sg_dma;
1759         struct sec4_sg_entry *sec4_sg;
1760         u32 hw_desc[0];
1761 };
1762
1763 static void caam_unmap(struct device *dev, struct scatterlist *src,
1764                        struct scatterlist *dst, int src_nents,
1765                        bool src_chained, int dst_nents, bool dst_chained,
1766                        dma_addr_t iv_dma, int ivsize, dma_addr_t sec4_sg_dma,
1767                        int sec4_sg_bytes)
1768 {
1769         if (dst != src) {
1770                 dma_unmap_sg_chained(dev, src, src_nents ? : 1, DMA_TO_DEVICE,
1771                                      src_chained);
1772                 dma_unmap_sg_chained(dev, dst, dst_nents ? : 1, DMA_FROM_DEVICE,
1773                                      dst_chained);
1774         } else {
1775                 dma_unmap_sg_chained(dev, src, src_nents ? : 1,
1776                                      DMA_BIDIRECTIONAL, src_chained);
1777         }
1778
1779         if (iv_dma)
1780                 dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE);
1781         if (sec4_sg_bytes)
1782                 dma_unmap_single(dev, sec4_sg_dma, sec4_sg_bytes,
1783                                  DMA_TO_DEVICE);
1784 }
1785
1786 static void aead_unmap(struct device *dev,
1787                        struct aead_edesc *edesc,
1788                        struct aead_request *req)
1789 {
1790         caam_unmap(dev, req->src, req->dst,
1791                    edesc->src_nents, edesc->src_chained, edesc->dst_nents,
1792                    edesc->dst_chained, 0, 0,
1793                    edesc->sec4_sg_dma, edesc->sec4_sg_bytes);
1794 }
1795
1796 static void old_aead_unmap(struct device *dev,
1797                            struct aead_edesc *edesc,
1798                            struct aead_request *req)
1799 {
1800         struct crypto_aead *aead = crypto_aead_reqtfm(req);
1801         int ivsize = crypto_aead_ivsize(aead);
1802
1803         dma_unmap_sg_chained(dev, req->assoc, edesc->assoc_nents,
1804                              DMA_TO_DEVICE, edesc->assoc_chained);
1805
1806         caam_unmap(dev, req->src, req->dst,
1807                    edesc->src_nents, edesc->src_chained, edesc->dst_nents,
1808                    edesc->dst_chained, edesc->iv_dma, ivsize,
1809                    edesc->sec4_sg_dma, edesc->sec4_sg_bytes);
1810 }
1811
1812 static void ablkcipher_unmap(struct device *dev,
1813                              struct ablkcipher_edesc *edesc,
1814                              struct ablkcipher_request *req)
1815 {
1816         struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1817         int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
1818
1819         caam_unmap(dev, req->src, req->dst,
1820                    edesc->src_nents, edesc->src_chained, edesc->dst_nents,
1821                    edesc->dst_chained, edesc->iv_dma, ivsize,
1822                    edesc->sec4_sg_dma, edesc->sec4_sg_bytes);
1823 }
1824
1825 static void aead_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
1826                                    void *context)
1827 {
1828         struct aead_request *req = context;
1829         struct aead_edesc *edesc;
1830
1831 #ifdef DEBUG
1832         dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
1833 #endif
1834
1835         edesc = container_of(desc, struct aead_edesc, hw_desc[0]);
1836
1837         if (err)
1838                 caam_jr_strstatus(jrdev, err);
1839
1840         aead_unmap(jrdev, edesc, req);
1841
1842         kfree(edesc);
1843
1844         aead_request_complete(req, err);
1845 }
1846
1847 static void old_aead_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
1848                                   void *context)
1849 {
1850         struct aead_request *req = context;
1851         struct aead_edesc *edesc;
1852 #ifdef DEBUG
1853         struct crypto_aead *aead = crypto_aead_reqtfm(req);
1854         struct caam_ctx *ctx = crypto_aead_ctx(aead);
1855         int ivsize = crypto_aead_ivsize(aead);
1856
1857         dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
1858 #endif
1859
1860         edesc = (struct aead_edesc *)((char *)desc -
1861                  offsetof(struct aead_edesc, hw_desc));
1862
1863         if (err)
1864                 caam_jr_strstatus(jrdev, err);
1865
1866         old_aead_unmap(jrdev, edesc, req);
1867
1868 #ifdef DEBUG
1869         print_hex_dump(KERN_ERR, "assoc  @"__stringify(__LINE__)": ",
1870                        DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->assoc),
1871                        req->assoclen , 1);
1872         print_hex_dump(KERN_ERR, "dstiv  @"__stringify(__LINE__)": ",
1873                        DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src) - ivsize,
1874                        edesc->src_nents ? 100 : ivsize, 1);
1875         print_hex_dump(KERN_ERR, "dst    @"__stringify(__LINE__)": ",
1876                        DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
1877                        edesc->src_nents ? 100 : req->cryptlen +
1878                        ctx->authsize + 4, 1);
1879 #endif
1880
1881         kfree(edesc);
1882
1883         aead_request_complete(req, err);
1884 }
1885
1886 static void aead_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
1887                                    void *context)
1888 {
1889         struct aead_request *req = context;
1890         struct aead_edesc *edesc;
1891
1892 #ifdef DEBUG
1893         dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
1894 #endif
1895
1896         edesc = container_of(desc, struct aead_edesc, hw_desc[0]);
1897
1898         if (err)
1899                 caam_jr_strstatus(jrdev, err);
1900
1901         aead_unmap(jrdev, edesc, req);
1902
1903         /*
1904          * verify hw auth check passed else return -EBADMSG
1905          */
1906         if ((err & JRSTA_CCBERR_ERRID_MASK) == JRSTA_CCBERR_ERRID_ICVCHK)
1907                 err = -EBADMSG;
1908
1909         kfree(edesc);
1910
1911         aead_request_complete(req, err);
1912 }
1913
1914 static void old_aead_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
1915                                   void *context)
1916 {
1917         struct aead_request *req = context;
1918         struct aead_edesc *edesc;
1919 #ifdef DEBUG
1920         struct crypto_aead *aead = crypto_aead_reqtfm(req);
1921         struct caam_ctx *ctx = crypto_aead_ctx(aead);
1922         int ivsize = crypto_aead_ivsize(aead);
1923
1924         dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
1925 #endif
1926
1927         edesc = (struct aead_edesc *)((char *)desc -
1928                  offsetof(struct aead_edesc, hw_desc));
1929
1930 #ifdef DEBUG
1931         print_hex_dump(KERN_ERR, "dstiv  @"__stringify(__LINE__)": ",
1932                        DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
1933                        ivsize, 1);
1934         print_hex_dump(KERN_ERR, "dst    @"__stringify(__LINE__)": ",
1935                        DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->dst),
1936                        req->cryptlen - ctx->authsize, 1);
1937 #endif
1938
1939         if (err)
1940                 caam_jr_strstatus(jrdev, err);
1941
1942         old_aead_unmap(jrdev, edesc, req);
1943
1944         /*
1945          * verify hw auth check passed else return -EBADMSG
1946          */
1947         if ((err & JRSTA_CCBERR_ERRID_MASK) == JRSTA_CCBERR_ERRID_ICVCHK)
1948                 err = -EBADMSG;
1949
1950 #ifdef DEBUG
1951         print_hex_dump(KERN_ERR, "iphdrout@"__stringify(__LINE__)": ",
1952                        DUMP_PREFIX_ADDRESS, 16, 4,
1953                        ((char *)sg_virt(req->assoc) - sizeof(struct iphdr)),
1954                        sizeof(struct iphdr) + req->assoclen +
1955                        ((req->cryptlen > 1500) ? 1500 : req->cryptlen) +
1956                        ctx->authsize + 36, 1);
1957         if (!err && edesc->sec4_sg_bytes) {
1958                 struct scatterlist *sg = sg_last(req->src, edesc->src_nents);
1959                 print_hex_dump(KERN_ERR, "sglastout@"__stringify(__LINE__)": ",
1960                                DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(sg),
1961                         sg->length + ctx->authsize + 16, 1);
1962         }
1963 #endif
1964
1965         kfree(edesc);
1966
1967         aead_request_complete(req, err);
1968 }
1969
1970 static void ablkcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
1971                                    void *context)
1972 {
1973         struct ablkcipher_request *req = context;
1974         struct ablkcipher_edesc *edesc;
1975 #ifdef DEBUG
1976         struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1977         int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
1978
1979         dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
1980 #endif
1981
1982         edesc = (struct ablkcipher_edesc *)((char *)desc -
1983                  offsetof(struct ablkcipher_edesc, hw_desc));
1984
1985         if (err)
1986                 caam_jr_strstatus(jrdev, err);
1987
1988 #ifdef DEBUG
1989         print_hex_dump(KERN_ERR, "dstiv  @"__stringify(__LINE__)": ",
1990                        DUMP_PREFIX_ADDRESS, 16, 4, req->info,
1991                        edesc->src_nents > 1 ? 100 : ivsize, 1);
1992         print_hex_dump(KERN_ERR, "dst    @"__stringify(__LINE__)": ",
1993                        DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
1994                        edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
1995 #endif
1996
1997         ablkcipher_unmap(jrdev, edesc, req);
1998         kfree(edesc);
1999
2000         ablkcipher_request_complete(req, err);
2001 }
2002
2003 static void ablkcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
2004                                     void *context)
2005 {
2006         struct ablkcipher_request *req = context;
2007         struct ablkcipher_edesc *edesc;
2008 #ifdef DEBUG
2009         struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
2010         int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
2011
2012         dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
2013 #endif
2014
2015         edesc = (struct ablkcipher_edesc *)((char *)desc -
2016                  offsetof(struct ablkcipher_edesc, hw_desc));
2017         if (err)
2018                 caam_jr_strstatus(jrdev, err);
2019
2020 #ifdef DEBUG
2021         print_hex_dump(KERN_ERR, "dstiv  @"__stringify(__LINE__)": ",
2022                        DUMP_PREFIX_ADDRESS, 16, 4, req->info,
2023                        ivsize, 1);
2024         print_hex_dump(KERN_ERR, "dst    @"__stringify(__LINE__)": ",
2025                        DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
2026                        edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
2027 #endif
2028
2029         ablkcipher_unmap(jrdev, edesc, req);
2030         kfree(edesc);
2031
2032         ablkcipher_request_complete(req, err);
2033 }
2034
2035 /*
2036  * Fill in aead job descriptor
2037  */
2038 static void old_init_aead_job(u32 *sh_desc, dma_addr_t ptr,
2039                               struct aead_edesc *edesc,
2040                               struct aead_request *req,
2041                               bool all_contig, bool encrypt)
2042 {
2043         struct crypto_aead *aead = crypto_aead_reqtfm(req);
2044         struct caam_ctx *ctx = crypto_aead_ctx(aead);
2045         int ivsize = crypto_aead_ivsize(aead);
2046         int authsize = ctx->authsize;
2047         u32 *desc = edesc->hw_desc;
2048         u32 out_options = 0, in_options;
2049         dma_addr_t dst_dma, src_dma;
2050         int len, sec4_sg_index = 0;
2051         bool is_gcm = false;
2052
2053 #ifdef DEBUG
2054         debug("assoclen %d cryptlen %d authsize %d\n",
2055               req->assoclen, req->cryptlen, authsize);
2056         print_hex_dump(KERN_ERR, "assoc  @"__stringify(__LINE__)": ",
2057                        DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->assoc),
2058                        req->assoclen , 1);
2059         print_hex_dump(KERN_ERR, "presciv@"__stringify(__LINE__)": ",
2060                        DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
2061                        edesc->src_nents ? 100 : ivsize, 1);
2062         print_hex_dump(KERN_ERR, "src    @"__stringify(__LINE__)": ",
2063                        DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
2064                         edesc->src_nents ? 100 : req->cryptlen, 1);
2065         print_hex_dump(KERN_ERR, "shrdesc@"__stringify(__LINE__)": ",
2066                        DUMP_PREFIX_ADDRESS, 16, 4, sh_desc,
2067                        desc_bytes(sh_desc), 1);
2068 #endif
2069
2070         if (((ctx->class1_alg_type & OP_ALG_ALGSEL_MASK) ==
2071               OP_ALG_ALGSEL_AES) &&
2072             ((ctx->class1_alg_type & OP_ALG_AAI_MASK) == OP_ALG_AAI_GCM))
2073                 is_gcm = true;
2074
2075         len = desc_len(sh_desc);
2076         init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
2077
2078         if (all_contig) {
2079                 if (is_gcm)
2080                         src_dma = edesc->iv_dma;
2081                 else
2082                         src_dma = sg_dma_address(req->assoc);
2083                 in_options = 0;
2084         } else {
2085                 src_dma = edesc->sec4_sg_dma;
2086                 sec4_sg_index += (edesc->assoc_nents ? : 1) + 1 +
2087                                  (edesc->src_nents ? : 1);
2088                 in_options = LDST_SGF;
2089         }
2090
2091         append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize + req->cryptlen,
2092                           in_options);
2093
2094         if (likely(req->src == req->dst)) {
2095                 if (all_contig) {
2096                         dst_dma = sg_dma_address(req->src);
2097                 } else {
2098                         dst_dma = src_dma + sizeof(struct sec4_sg_entry) *
2099                                   ((edesc->assoc_nents ? : 1) + 1);
2100                         out_options = LDST_SGF;
2101                 }
2102         } else {
2103                 if (!edesc->dst_nents) {
2104                         dst_dma = sg_dma_address(req->dst);
2105                 } else {
2106                         dst_dma = edesc->sec4_sg_dma +
2107                                   sec4_sg_index *
2108                                   sizeof(struct sec4_sg_entry);
2109                         out_options = LDST_SGF;
2110                 }
2111         }
2112         if (encrypt)
2113                 append_seq_out_ptr(desc, dst_dma, req->cryptlen + authsize,
2114                                    out_options);
2115         else
2116                 append_seq_out_ptr(desc, dst_dma, req->cryptlen - authsize,
2117                                    out_options);
2118 }
2119
2120 /*
2121  * Fill in aead job descriptor
2122  */
2123 static void init_aead_job(struct aead_request *req,
2124                           struct aead_edesc *edesc,
2125                           bool all_contig, bool encrypt)
2126 {
2127         struct crypto_aead *aead = crypto_aead_reqtfm(req);
2128         struct caam_ctx *ctx = crypto_aead_ctx(aead);
2129         int authsize = ctx->authsize;
2130         u32 *desc = edesc->hw_desc;
2131         u32 out_options, in_options;
2132         dma_addr_t dst_dma, src_dma;
2133         int len, sec4_sg_index = 0;
2134         dma_addr_t ptr;
2135         u32 *sh_desc;
2136
2137         sh_desc = encrypt ? ctx->sh_desc_enc : ctx->sh_desc_dec;
2138         ptr = encrypt ? ctx->sh_desc_enc_dma : ctx->sh_desc_dec_dma;
2139
2140         len = desc_len(sh_desc);
2141         init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
2142
2143         if (all_contig) {
2144                 src_dma = sg_dma_address(req->src);
2145                 in_options = 0;
2146         } else {
2147                 src_dma = edesc->sec4_sg_dma;
2148                 sec4_sg_index += edesc->src_nents;
2149                 in_options = LDST_SGF;
2150         }
2151
2152         append_seq_in_ptr(desc, src_dma, req->assoclen + req->cryptlen,
2153                           in_options);
2154
2155         dst_dma = src_dma;
2156         out_options = in_options;
2157
2158         if (unlikely(req->src != req->dst)) {
2159                 if (!edesc->dst_nents) {
2160                         dst_dma = sg_dma_address(req->dst);
2161                 } else {
2162                         dst_dma = edesc->sec4_sg_dma +
2163                                   sec4_sg_index *
2164                                   sizeof(struct sec4_sg_entry);
2165                         out_options = LDST_SGF;
2166                 }
2167         }
2168
2169         if (encrypt)
2170                 append_seq_out_ptr(desc, dst_dma,
2171                                    req->assoclen + req->cryptlen + authsize,
2172                                    out_options);
2173         else
2174                 append_seq_out_ptr(desc, dst_dma,
2175                                    req->assoclen + req->cryptlen - authsize,
2176                                    out_options);
2177
2178         /* REG3 = assoclen */
2179         append_math_add_imm_u32(desc, REG3, ZERO, IMM, req->assoclen);
2180 }
2181
2182 static void init_gcm_job(struct aead_request *req,
2183                          struct aead_edesc *edesc,
2184                          bool all_contig, bool encrypt)
2185 {
2186         struct crypto_aead *aead = crypto_aead_reqtfm(req);
2187         struct caam_ctx *ctx = crypto_aead_ctx(aead);
2188         unsigned int ivsize = crypto_aead_ivsize(aead);
2189         u32 *desc = edesc->hw_desc;
2190         bool generic_gcm = (ivsize == 12);
2191         unsigned int last;
2192
2193         init_aead_job(req, edesc, all_contig, encrypt);
2194
2195         /* BUG This should not be specific to generic GCM. */
2196         last = 0;
2197         if (encrypt && generic_gcm && !(req->assoclen + req->cryptlen))
2198                 last = FIFOLD_TYPE_LAST1;
2199
2200         /* Read GCM IV */
2201         append_cmd(desc, CMD_FIFO_LOAD | FIFOLD_CLASS_CLASS1 | IMMEDIATE |
2202                          FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1 | 12 | last);
2203         /* Append Salt */
2204         if (!generic_gcm)
2205                 append_data(desc, ctx->key + ctx->enckeylen, 4);
2206         /* Append IV */
2207         append_data(desc, req->iv, ivsize);
2208         /* End of blank commands */
2209 }
2210
2211 /*
2212  * Fill in aead givencrypt job descriptor
2213  */
2214 static void init_aead_giv_job(u32 *sh_desc, dma_addr_t ptr,
2215                               struct aead_edesc *edesc,
2216                               struct aead_request *req,
2217                               int contig)
2218 {
2219         struct crypto_aead *aead = crypto_aead_reqtfm(req);
2220         struct caam_ctx *ctx = crypto_aead_ctx(aead);
2221         int ivsize = crypto_aead_ivsize(aead);
2222         int authsize = ctx->authsize;
2223         u32 *desc = edesc->hw_desc;
2224         u32 out_options = 0, in_options;
2225         dma_addr_t dst_dma, src_dma;
2226         int len, sec4_sg_index = 0;
2227         bool is_gcm = false;
2228
2229 #ifdef DEBUG
2230         debug("assoclen %d cryptlen %d authsize %d\n",
2231               req->assoclen, req->cryptlen, authsize);
2232         print_hex_dump(KERN_ERR, "assoc  @"__stringify(__LINE__)": ",
2233                        DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->assoc),
2234                        req->assoclen , 1);
2235         print_hex_dump(KERN_ERR, "presciv@"__stringify(__LINE__)": ",
2236                        DUMP_PREFIX_ADDRESS, 16, 4, req->iv, ivsize, 1);
2237         print_hex_dump(KERN_ERR, "src    @"__stringify(__LINE__)": ",
2238                        DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
2239                         edesc->src_nents > 1 ? 100 : req->cryptlen, 1);
2240         print_hex_dump(KERN_ERR, "shrdesc@"__stringify(__LINE__)": ",
2241                        DUMP_PREFIX_ADDRESS, 16, 4, sh_desc,
2242                        desc_bytes(sh_desc), 1);
2243 #endif
2244
2245         if (((ctx->class1_alg_type & OP_ALG_ALGSEL_MASK) ==
2246               OP_ALG_ALGSEL_AES) &&
2247             ((ctx->class1_alg_type & OP_ALG_AAI_MASK) == OP_ALG_AAI_GCM))
2248                 is_gcm = true;
2249
2250         len = desc_len(sh_desc);
2251         init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
2252
2253         if (contig & GIV_SRC_CONTIG) {
2254                 if (is_gcm)
2255                         src_dma = edesc->iv_dma;
2256                 else
2257                         src_dma = sg_dma_address(req->assoc);
2258                 in_options = 0;
2259         } else {
2260                 src_dma = edesc->sec4_sg_dma;
2261                 sec4_sg_index += edesc->assoc_nents + 1 + edesc->src_nents;
2262                 in_options = LDST_SGF;
2263         }
2264         append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize + req->cryptlen,
2265                           in_options);
2266
2267         if (contig & GIV_DST_CONTIG) {
2268                 dst_dma = edesc->iv_dma;
2269         } else {
2270                 if (likely(req->src == req->dst)) {
2271                         dst_dma = src_dma + sizeof(struct sec4_sg_entry) *
2272                                   (edesc->assoc_nents +
2273                                    (is_gcm ? 1 + edesc->src_nents : 0));
2274                         out_options = LDST_SGF;
2275                 } else {
2276                         dst_dma = edesc->sec4_sg_dma +
2277                                   sec4_sg_index *
2278                                   sizeof(struct sec4_sg_entry);
2279                         out_options = LDST_SGF;
2280                 }
2281         }
2282
2283         append_seq_out_ptr(desc, dst_dma, ivsize + req->cryptlen + authsize,
2284                            out_options);
2285 }
2286
2287 /*
2288  * Fill in ablkcipher job descriptor
2289  */
2290 static void init_ablkcipher_job(u32 *sh_desc, dma_addr_t ptr,
2291                                 struct ablkcipher_edesc *edesc,
2292                                 struct ablkcipher_request *req,
2293                                 bool iv_contig)
2294 {
2295         struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
2296         int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
2297         u32 *desc = edesc->hw_desc;
2298         u32 out_options = 0, in_options;
2299         dma_addr_t dst_dma, src_dma;
2300         int len, sec4_sg_index = 0;
2301
2302 #ifdef DEBUG
2303         print_hex_dump(KERN_ERR, "presciv@"__stringify(__LINE__)": ",
2304                        DUMP_PREFIX_ADDRESS, 16, 4, req->info,
2305                        ivsize, 1);
2306         print_hex_dump(KERN_ERR, "src    @"__stringify(__LINE__)": ",
2307                        DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
2308                        edesc->src_nents ? 100 : req->nbytes, 1);
2309 #endif
2310
2311         len = desc_len(sh_desc);
2312         init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
2313
2314         if (iv_contig) {
2315                 src_dma = edesc->iv_dma;
2316                 in_options = 0;
2317         } else {
2318                 src_dma = edesc->sec4_sg_dma;
2319                 sec4_sg_index += edesc->src_nents + 1;
2320                 in_options = LDST_SGF;
2321         }
2322         append_seq_in_ptr(desc, src_dma, req->nbytes + ivsize, in_options);
2323
2324         if (likely(req->src == req->dst)) {
2325                 if (!edesc->src_nents && iv_contig) {
2326                         dst_dma = sg_dma_address(req->src);
2327                 } else {
2328                         dst_dma = edesc->sec4_sg_dma +
2329                                 sizeof(struct sec4_sg_entry);
2330                         out_options = LDST_SGF;
2331                 }
2332         } else {
2333                 if (!edesc->dst_nents) {
2334                         dst_dma = sg_dma_address(req->dst);
2335                 } else {
2336                         dst_dma = edesc->sec4_sg_dma +
2337                                 sec4_sg_index * sizeof(struct sec4_sg_entry);
2338                         out_options = LDST_SGF;
2339                 }
2340         }
2341         append_seq_out_ptr(desc, dst_dma, req->nbytes, out_options);
2342 }
2343
2344 /*
2345  * Fill in ablkcipher givencrypt job descriptor
2346  */
2347 static void init_ablkcipher_giv_job(u32 *sh_desc, dma_addr_t ptr,
2348                                     struct ablkcipher_edesc *edesc,
2349                                     struct ablkcipher_request *req,
2350                                     bool iv_contig)
2351 {
2352         struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
2353         int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
2354         u32 *desc = edesc->hw_desc;
2355         u32 out_options, in_options;
2356         dma_addr_t dst_dma, src_dma;
2357         int len, sec4_sg_index = 0;
2358
2359 #ifdef DEBUG
2360         print_hex_dump(KERN_ERR, "presciv@" __stringify(__LINE__) ": ",
2361                        DUMP_PREFIX_ADDRESS, 16, 4, req->info,
2362                        ivsize, 1);
2363         print_hex_dump(KERN_ERR, "src    @" __stringify(__LINE__) ": ",
2364                        DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
2365                        edesc->src_nents ? 100 : req->nbytes, 1);
2366 #endif
2367
2368         len = desc_len(sh_desc);
2369         init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
2370
2371         if (!edesc->src_nents) {
2372                 src_dma = sg_dma_address(req->src);
2373                 in_options = 0;
2374         } else {
2375                 src_dma = edesc->sec4_sg_dma;
2376                 sec4_sg_index += edesc->src_nents;
2377                 in_options = LDST_SGF;
2378         }
2379         append_seq_in_ptr(desc, src_dma, req->nbytes, in_options);
2380
2381         if (iv_contig) {
2382                 dst_dma = edesc->iv_dma;
2383                 out_options = 0;
2384         } else {
2385                 dst_dma = edesc->sec4_sg_dma +
2386                           sec4_sg_index * sizeof(struct sec4_sg_entry);
2387                 out_options = LDST_SGF;
2388         }
2389         append_seq_out_ptr(desc, dst_dma, req->nbytes + ivsize, out_options);
2390 }
2391
2392 /*
2393  * allocate and map the aead extended descriptor
2394  */
2395 static struct aead_edesc *old_aead_edesc_alloc(struct aead_request *req,
2396                                                int desc_bytes,
2397                                                bool *all_contig_ptr,
2398                                                bool encrypt)
2399 {
2400         struct crypto_aead *aead = crypto_aead_reqtfm(req);
2401         struct caam_ctx *ctx = crypto_aead_ctx(aead);
2402         struct device *jrdev = ctx->jrdev;
2403         gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
2404                        CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
2405         int assoc_nents, src_nents, dst_nents = 0;
2406         struct aead_edesc *edesc;
2407         dma_addr_t iv_dma = 0;
2408         int sgc;
2409         bool all_contig = true;
2410         bool assoc_chained = false, src_chained = false, dst_chained = false;
2411         int ivsize = crypto_aead_ivsize(aead);
2412         int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes;
2413         unsigned int authsize = ctx->authsize;
2414         bool is_gcm = false;
2415
2416         assoc_nents = sg_count(req->assoc, req->assoclen, &assoc_chained);
2417
2418         if (unlikely(req->dst != req->src)) {
2419                 src_nents = sg_count(req->src, req->cryptlen, &src_chained);
2420                 dst_nents = sg_count(req->dst,
2421                                      req->cryptlen +
2422                                         (encrypt ? authsize : (-authsize)),
2423                                      &dst_chained);
2424         } else {
2425                 src_nents = sg_count(req->src,
2426                                      req->cryptlen +
2427                                         (encrypt ? authsize : 0),
2428                                      &src_chained);
2429         }
2430
2431         sgc = dma_map_sg_chained(jrdev, req->assoc, assoc_nents ? : 1,
2432                                  DMA_TO_DEVICE, assoc_chained);
2433         if (likely(req->src == req->dst)) {
2434                 sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
2435                                          DMA_BIDIRECTIONAL, src_chained);
2436         } else {
2437                 sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
2438                                          DMA_TO_DEVICE, src_chained);
2439                 sgc = dma_map_sg_chained(jrdev, req->dst, dst_nents ? : 1,
2440                                          DMA_FROM_DEVICE, dst_chained);
2441         }
2442
2443         iv_dma = dma_map_single(jrdev, req->iv, ivsize, DMA_TO_DEVICE);
2444         if (dma_mapping_error(jrdev, iv_dma)) {
2445                 dev_err(jrdev, "unable to map IV\n");
2446                 return ERR_PTR(-ENOMEM);
2447         }
2448
2449         if (((ctx->class1_alg_type & OP_ALG_ALGSEL_MASK) ==
2450               OP_ALG_ALGSEL_AES) &&
2451             ((ctx->class1_alg_type & OP_ALG_AAI_MASK) == OP_ALG_AAI_GCM))
2452                 is_gcm = true;
2453
2454         /*
2455          * Check if data are contiguous.
2456          * GCM expected input sequence: IV, AAD, text
2457          * All other - expected input sequence: AAD, IV, text
2458          */
2459         if (is_gcm)
2460                 all_contig = (!assoc_nents &&
2461                               iv_dma + ivsize == sg_dma_address(req->assoc) &&
2462                               !src_nents && sg_dma_address(req->assoc) +
2463                               req->assoclen == sg_dma_address(req->src));
2464         else
2465                 all_contig = (!assoc_nents && sg_dma_address(req->assoc) +
2466                               req->assoclen == iv_dma && !src_nents &&
2467                               iv_dma + ivsize == sg_dma_address(req->src));
2468         if (!all_contig) {
2469                 assoc_nents = assoc_nents ? : 1;
2470                 src_nents = src_nents ? : 1;
2471                 sec4_sg_len = assoc_nents + 1 + src_nents;
2472         }
2473
2474         sec4_sg_len += dst_nents;
2475
2476         sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);
2477
2478         /* allocate space for base edesc and hw desc commands, link tables */
2479         edesc = kmalloc(sizeof(struct aead_edesc) + desc_bytes +
2480                         sec4_sg_bytes, GFP_DMA | flags);
2481         if (!edesc) {
2482                 dev_err(jrdev, "could not allocate extended descriptor\n");
2483                 return ERR_PTR(-ENOMEM);
2484         }
2485
2486         edesc->assoc_nents = assoc_nents;
2487         edesc->assoc_chained = assoc_chained;
2488         edesc->src_nents = src_nents;
2489         edesc->src_chained = src_chained;
2490         edesc->dst_nents = dst_nents;
2491         edesc->dst_chained = dst_chained;
2492         edesc->iv_dma = iv_dma;
2493         edesc->sec4_sg_bytes = sec4_sg_bytes;
2494         edesc->sec4_sg = (void *)edesc + sizeof(struct aead_edesc) +
2495                          desc_bytes;
2496         *all_contig_ptr = all_contig;
2497
2498         sec4_sg_index = 0;
2499         if (!all_contig) {
2500                 if (!is_gcm) {
2501                         sg_to_sec4_sg_len(req->assoc, req->assoclen,
2502                                           edesc->sec4_sg + sec4_sg_index);
2503                         sec4_sg_index += assoc_nents;
2504                 }
2505
2506                 dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index,
2507                                    iv_dma, ivsize, 0);
2508                 sec4_sg_index += 1;
2509
2510                 if (is_gcm) {
2511                         sg_to_sec4_sg_len(req->assoc, req->assoclen,
2512                                           edesc->sec4_sg + sec4_sg_index);
2513                         sec4_sg_index += assoc_nents;
2514                 }
2515
2516                 sg_to_sec4_sg_last(req->src,
2517                                    src_nents,
2518                                    edesc->sec4_sg +
2519                                    sec4_sg_index, 0);
2520                 sec4_sg_index += src_nents;
2521         }
2522         if (dst_nents) {
2523                 sg_to_sec4_sg_last(req->dst, dst_nents,
2524                                    edesc->sec4_sg + sec4_sg_index, 0);
2525         }
2526         edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
2527                                             sec4_sg_bytes, DMA_TO_DEVICE);
2528         if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
2529                 dev_err(jrdev, "unable to map S/G table\n");
2530                 return ERR_PTR(-ENOMEM);
2531         }
2532
2533         return edesc;
2534 }
2535
2536 /*
2537  * allocate and map the aead extended descriptor
2538  */
2539 static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
2540                                            int desc_bytes, bool *all_contig_ptr,
2541                                            bool encrypt)
2542 {
2543         struct crypto_aead *aead = crypto_aead_reqtfm(req);
2544         struct caam_ctx *ctx = crypto_aead_ctx(aead);
2545         struct device *jrdev = ctx->jrdev;
2546         gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
2547                        CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
2548         int src_nents, dst_nents = 0;
2549         struct aead_edesc *edesc;
2550         int sgc;
2551         bool all_contig = true;
2552         bool src_chained = false, dst_chained = false;
2553         int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes;
2554         unsigned int authsize = ctx->authsize;
2555
2556         if (unlikely(req->dst != req->src)) {
2557                 src_nents = sg_count(req->src, req->assoclen + req->cryptlen,
2558                                      &src_chained);
2559                 dst_nents = sg_count(req->dst,
2560                                      req->assoclen + req->cryptlen +
2561                                         (encrypt ? authsize : (-authsize)),
2562                                      &dst_chained);
2563         } else {
2564                 src_nents = sg_count(req->src,
2565                                      req->assoclen + req->cryptlen +
2566                                         (encrypt ? authsize : 0),
2567                                      &src_chained);
2568         }
2569
2570         /* Check if data are contiguous. */
2571         all_contig = !src_nents;
2572         if (!all_contig) {
2573                 src_nents = src_nents ? : 1;
2574                 sec4_sg_len = src_nents;
2575         }
2576
2577         sec4_sg_len += dst_nents;
2578
2579         sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);
2580
2581         /* allocate space for base edesc and hw desc commands, link tables */
2582         edesc = kzalloc(sizeof(struct aead_edesc) + desc_bytes +
2583                         sec4_sg_bytes, GFP_DMA | flags);
2584         if (!edesc) {
2585                 dev_err(jrdev, "could not allocate extended descriptor\n");
2586                 return ERR_PTR(-ENOMEM);
2587         }
2588
2589         if (likely(req->src == req->dst)) {
2590                 sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
2591                                          DMA_BIDIRECTIONAL, src_chained);
2592                 if (unlikely(!sgc)) {
2593                         dev_err(jrdev, "unable to map source\n");
2594                         kfree(edesc);
2595                         return ERR_PTR(-ENOMEM);
2596                 }
2597         } else {
2598                 sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
2599                                          DMA_TO_DEVICE, src_chained);
2600                 if (unlikely(!sgc)) {
2601                         dev_err(jrdev, "unable to map source\n");
2602                         kfree(edesc);
2603                         return ERR_PTR(-ENOMEM);
2604                 }
2605
2606                 sgc = dma_map_sg_chained(jrdev, req->dst, dst_nents ? : 1,
2607                                          DMA_FROM_DEVICE, dst_chained);
2608                 if (unlikely(!sgc)) {
2609                         dev_err(jrdev, "unable to map destination\n");
2610                         dma_unmap_sg_chained(jrdev, req->src, src_nents ? : 1,
2611                                              DMA_TO_DEVICE, src_chained);
2612                         kfree(edesc);
2613                         return ERR_PTR(-ENOMEM);
2614                 }
2615         }
2616
2617         edesc->src_nents = src_nents;
2618         edesc->src_chained = src_chained;
2619         edesc->dst_nents = dst_nents;
2620         edesc->dst_chained = dst_chained;
2621         edesc->sec4_sg = (void *)edesc + sizeof(struct aead_edesc) +
2622                          desc_bytes;
2623         *all_contig_ptr = all_contig;
2624
2625         sec4_sg_index = 0;
2626         if (!all_contig) {
2627                 sg_to_sec4_sg_last(req->src, src_nents,
2628                               edesc->sec4_sg + sec4_sg_index, 0);
2629                 sec4_sg_index += src_nents;
2630         }
2631         if (dst_nents) {
2632                 sg_to_sec4_sg_last(req->dst, dst_nents,
2633                                    edesc->sec4_sg + sec4_sg_index, 0);
2634         }
2635
2636         if (!sec4_sg_bytes)
2637                 return edesc;
2638
2639         edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
2640                                             sec4_sg_bytes, DMA_TO_DEVICE);
2641         if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
2642                 dev_err(jrdev, "unable to map S/G table\n");
2643                 aead_unmap(jrdev, edesc, req);
2644                 kfree(edesc);
2645                 return ERR_PTR(-ENOMEM);
2646         }
2647
2648         edesc->sec4_sg_bytes = sec4_sg_bytes;
2649
2650         return edesc;
2651 }
2652
2653 static int gcm_encrypt(struct aead_request *req)
2654 {
2655         struct aead_edesc *edesc;
2656         struct crypto_aead *aead = crypto_aead_reqtfm(req);
2657         struct caam_ctx *ctx = crypto_aead_ctx(aead);
2658         struct device *jrdev = ctx->jrdev;
2659         bool all_contig;
2660         u32 *desc;
2661         int ret = 0;
2662
2663         /* allocate extended descriptor */
2664         edesc = aead_edesc_alloc(req, GCM_DESC_JOB_IO_LEN, &all_contig, true);
2665         if (IS_ERR(edesc))
2666                 return PTR_ERR(edesc);
2667
2668         /* Create and submit job descriptor */
2669         init_gcm_job(req, edesc, all_contig, true);
2670 #ifdef DEBUG
2671         print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
2672                        DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
2673                        desc_bytes(edesc->hw_desc), 1);
2674 #endif
2675
2676         desc = edesc->hw_desc;
2677         ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req);
2678         if (!ret) {
2679                 ret = -EINPROGRESS;
2680         } else {
2681                 aead_unmap(jrdev, edesc, req);
2682                 kfree(edesc);
2683         }
2684
2685         return ret;
2686 }
2687
2688 static int old_aead_encrypt(struct aead_request *req)
2689 {
2690         struct aead_edesc *edesc;
2691         struct crypto_aead *aead = crypto_aead_reqtfm(req);
2692         struct caam_ctx *ctx = crypto_aead_ctx(aead);
2693         struct device *jrdev = ctx->jrdev;
2694         bool all_contig;
2695         u32 *desc;
2696         int ret = 0;
2697
2698         /* allocate extended descriptor */
2699         edesc = old_aead_edesc_alloc(req, DESC_JOB_IO_LEN *
2700                                      CAAM_CMD_SZ, &all_contig, true);
2701         if (IS_ERR(edesc))
2702                 return PTR_ERR(edesc);
2703
2704         /* Create and submit job descriptor */
2705         old_init_aead_job(ctx->sh_desc_enc, ctx->sh_desc_enc_dma, edesc, req,
2706                           all_contig, true);
2707 #ifdef DEBUG
2708         print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
2709                        DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
2710                        desc_bytes(edesc->hw_desc), 1);
2711 #endif
2712
2713         desc = edesc->hw_desc;
2714         ret = caam_jr_enqueue(jrdev, desc, old_aead_encrypt_done, req);
2715         if (!ret) {
2716                 ret = -EINPROGRESS;
2717         } else {
2718                 old_aead_unmap(jrdev, edesc, req);
2719                 kfree(edesc);
2720         }
2721
2722         return ret;
2723 }
2724
2725 static int gcm_decrypt(struct aead_request *req)
2726 {
2727         struct aead_edesc *edesc;
2728         struct crypto_aead *aead = crypto_aead_reqtfm(req);
2729         struct caam_ctx *ctx = crypto_aead_ctx(aead);
2730         struct device *jrdev = ctx->jrdev;
2731         bool all_contig;
2732         u32 *desc;
2733         int ret = 0;
2734
2735         /* allocate extended descriptor */
2736         edesc = aead_edesc_alloc(req, GCM_DESC_JOB_IO_LEN, &all_contig, false);
2737         if (IS_ERR(edesc))
2738                 return PTR_ERR(edesc);
2739
2740         /* Create and submit job descriptor*/
2741         init_gcm_job(req, edesc, all_contig, false);
2742 #ifdef DEBUG
2743         print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
2744                        DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
2745                        desc_bytes(edesc->hw_desc), 1);
2746 #endif
2747
2748         desc = edesc->hw_desc;
2749         ret = caam_jr_enqueue(jrdev, desc, aead_decrypt_done, req);
2750         if (!ret) {
2751                 ret = -EINPROGRESS;
2752         } else {
2753                 aead_unmap(jrdev, edesc, req);
2754                 kfree(edesc);
2755         }
2756
2757         return ret;
2758 }
2759
2760 static int old_aead_decrypt(struct aead_request *req)
2761 {
2762         struct aead_edesc *edesc;
2763         struct crypto_aead *aead = crypto_aead_reqtfm(req);
2764         struct caam_ctx *ctx = crypto_aead_ctx(aead);
2765         struct device *jrdev = ctx->jrdev;
2766         bool all_contig;
2767         u32 *desc;
2768         int ret = 0;
2769
2770         /* allocate extended descriptor */
2771         edesc = old_aead_edesc_alloc(req, DESC_JOB_IO_LEN *
2772                                      CAAM_CMD_SZ, &all_contig, false);
2773         if (IS_ERR(edesc))
2774                 return PTR_ERR(edesc);
2775
2776 #ifdef DEBUG
2777         print_hex_dump(KERN_ERR, "dec src@"__stringify(__LINE__)": ",
2778                        DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
2779                        req->cryptlen, 1);
2780 #endif
2781
2782         /* Create and submit job descriptor*/
2783         old_init_aead_job(ctx->sh_desc_dec,
2784                           ctx->sh_desc_dec_dma, edesc, req, all_contig, false);
2785 #ifdef DEBUG
2786         print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
2787                        DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
2788                        desc_bytes(edesc->hw_desc), 1);
2789 #endif
2790
2791         desc = edesc->hw_desc;
2792         ret = caam_jr_enqueue(jrdev, desc, old_aead_decrypt_done, req);
2793         if (!ret) {
2794                 ret = -EINPROGRESS;
2795         } else {
2796                 old_aead_unmap(jrdev, edesc, req);
2797                 kfree(edesc);
2798         }
2799
2800         return ret;
2801 }
2802
2803 /*
2804  * allocate and map the aead extended descriptor for aead givencrypt
2805  */
2806 static struct aead_edesc *aead_giv_edesc_alloc(struct aead_givcrypt_request
2807                                                *greq, int desc_bytes,
2808                                                u32 *contig_ptr)
2809 {
2810         struct aead_request *req = &greq->areq;
2811         struct crypto_aead *aead = crypto_aead_reqtfm(req);
2812         struct caam_ctx *ctx = crypto_aead_ctx(aead);
2813         struct device *jrdev = ctx->jrdev;
2814         gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
2815                        CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
2816         int assoc_nents, src_nents, dst_nents = 0;
2817         struct aead_edesc *edesc;
2818         dma_addr_t iv_dma = 0;
2819         int sgc;
2820         u32 contig = GIV_SRC_CONTIG | GIV_DST_CONTIG;
2821         int ivsize = crypto_aead_ivsize(aead);
2822         bool assoc_chained = false, src_chained = false, dst_chained = false;
2823         int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes;
2824         bool is_gcm = false;
2825
2826         assoc_nents = sg_count(req->assoc, req->assoclen, &assoc_chained);
2827         src_nents = sg_count(req->src, req->cryptlen, &src_chained);
2828
2829         if (unlikely(req->dst != req->src))
2830                 dst_nents = sg_count(req->dst, req->cryptlen + ctx->authsize,
2831                                      &dst_chained);
2832
2833         sgc = dma_map_sg_chained(jrdev, req->assoc, assoc_nents ? : 1,
2834                                  DMA_TO_DEVICE, assoc_chained);
2835         if (likely(req->src == req->dst)) {
2836                 sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
2837                                          DMA_BIDIRECTIONAL, src_chained);
2838         } else {
2839                 sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
2840                                          DMA_TO_DEVICE, src_chained);
2841                 sgc = dma_map_sg_chained(jrdev, req->dst, dst_nents ? : 1,
2842                                          DMA_FROM_DEVICE, dst_chained);
2843         }
2844
2845         iv_dma = dma_map_single(jrdev, greq->giv, ivsize, DMA_TO_DEVICE);
2846         if (dma_mapping_error(jrdev, iv_dma)) {
2847                 dev_err(jrdev, "unable to map IV\n");
2848                 return ERR_PTR(-ENOMEM);
2849         }
2850
2851         if (((ctx->class1_alg_type & OP_ALG_ALGSEL_MASK) ==
2852               OP_ALG_ALGSEL_AES) &&
2853             ((ctx->class1_alg_type & OP_ALG_AAI_MASK) == OP_ALG_AAI_GCM))
2854                 is_gcm = true;
2855
2856         /*
2857          * Check if data are contiguous.
2858          * GCM expected input sequence: IV, AAD, text
2859          * All other - expected input sequence: AAD, IV, text
2860          */
2861
2862         if (is_gcm) {
2863                 if (assoc_nents || iv_dma + ivsize !=
2864                     sg_dma_address(req->assoc) || src_nents ||
2865                     sg_dma_address(req->assoc) + req->assoclen !=
2866                     sg_dma_address(req->src))
2867                         contig &= ~GIV_SRC_CONTIG;
2868         } else {
2869                 if (assoc_nents ||
2870                     sg_dma_address(req->assoc) + req->assoclen != iv_dma ||
2871                     src_nents || iv_dma + ivsize != sg_dma_address(req->src))
2872                         contig &= ~GIV_SRC_CONTIG;
2873         }
2874
2875         if (dst_nents || iv_dma + ivsize != sg_dma_address(req->dst))
2876                 contig &= ~GIV_DST_CONTIG;
2877
2878         if (!(contig & GIV_SRC_CONTIG)) {
2879                 assoc_nents = assoc_nents ? : 1;
2880                 src_nents = src_nents ? : 1;
2881                 sec4_sg_len += assoc_nents + 1 + src_nents;
2882                 if (req->src == req->dst &&
2883                     (src_nents || iv_dma + ivsize != sg_dma_address(req->src)))
2884                         contig &= ~GIV_DST_CONTIG;
2885         }
2886
2887         /*
2888          * Add new sg entries for GCM output sequence.
2889          * Expected output sequence: IV, encrypted text.
2890          */
2891         if (is_gcm && req->src == req->dst && !(contig & GIV_DST_CONTIG))
2892                 sec4_sg_len += 1 + src_nents;
2893
2894         if (unlikely(req->src != req->dst)) {
2895                 dst_nents = dst_nents ? : 1;
2896                 sec4_sg_len += 1 + dst_nents;
2897         }
2898
2899         sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);
2900
2901         /* allocate space for base edesc and hw desc commands, link tables */
2902         edesc = kmalloc(sizeof(struct aead_edesc) + desc_bytes +
2903                         sec4_sg_bytes, GFP_DMA | flags);
2904         if (!edesc) {
2905                 dev_err(jrdev, "could not allocate extended descriptor\n");
2906                 return ERR_PTR(-ENOMEM);
2907         }
2908
2909         edesc->assoc_nents = assoc_nents;
2910         edesc->assoc_chained = assoc_chained;
2911         edesc->src_nents = src_nents;
2912         edesc->src_chained = src_chained;
2913         edesc->dst_nents = dst_nents;
2914         edesc->dst_chained = dst_chained;
2915         edesc->iv_dma = iv_dma;
2916         edesc->sec4_sg_bytes = sec4_sg_bytes;
2917         edesc->sec4_sg = (void *)edesc + sizeof(struct aead_edesc) +
2918                          desc_bytes;
2919         *contig_ptr = contig;
2920
2921         sec4_sg_index = 0;
2922         if (!(contig & GIV_SRC_CONTIG)) {
2923                 if (!is_gcm) {
2924                         sg_to_sec4_sg_len(req->assoc, req->assoclen,
2925                                           edesc->sec4_sg + sec4_sg_index);
2926                         sec4_sg_index += assoc_nents;
2927                 }
2928
2929                 dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index,
2930                                    iv_dma, ivsize, 0);
2931                 sec4_sg_index += 1;
2932
2933                 if (is_gcm) {
2934                         sg_to_sec4_sg_len(req->assoc, req->assoclen,
2935                                           edesc->sec4_sg + sec4_sg_index);
2936                         sec4_sg_index += assoc_nents;
2937                 }
2938
2939                 sg_to_sec4_sg_last(req->src, src_nents,
2940                                    edesc->sec4_sg +
2941                                    sec4_sg_index, 0);
2942                 sec4_sg_index += src_nents;
2943         }
2944
2945         if (is_gcm && req->src == req->dst && !(contig & GIV_DST_CONTIG)) {
2946                 dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index,
2947                                    iv_dma, ivsize, 0);
2948                 sec4_sg_index += 1;
2949                 sg_to_sec4_sg_last(req->src, src_nents,
2950                                    edesc->sec4_sg + sec4_sg_index, 0);
2951         }
2952
2953         if (unlikely(req->src != req->dst && !(contig & GIV_DST_CONTIG))) {
2954                 dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index,
2955                                    iv_dma, ivsize, 0);
2956                 sec4_sg_index += 1;
2957                 sg_to_sec4_sg_last(req->dst, dst_nents,
2958                                    edesc->sec4_sg + sec4_sg_index, 0);
2959         }
2960         edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
2961                                             sec4_sg_bytes, DMA_TO_DEVICE);
2962         if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
2963                 dev_err(jrdev, "unable to map S/G table\n");
2964                 return ERR_PTR(-ENOMEM);
2965         }
2966
2967         return edesc;
2968 }
2969
2970 static int old_aead_givencrypt(struct aead_givcrypt_request *areq)
2971 {
2972         struct aead_request *req = &areq->areq;
2973         struct aead_edesc *edesc;
2974         struct crypto_aead *aead = crypto_aead_reqtfm(req);
2975         struct caam_ctx *ctx = crypto_aead_ctx(aead);
2976         struct device *jrdev = ctx->jrdev;
2977         u32 contig;
2978         u32 *desc;
2979         int ret = 0;
2980
2981         /* allocate extended descriptor */
2982         edesc = aead_giv_edesc_alloc(areq, DESC_JOB_IO_LEN *
2983                                      CAAM_CMD_SZ, &contig);
2984
2985         if (IS_ERR(edesc))
2986                 return PTR_ERR(edesc);
2987
2988 #ifdef DEBUG
2989         print_hex_dump(KERN_ERR, "giv src@"__stringify(__LINE__)": ",
2990                        DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
2991                        req->cryptlen, 1);
2992 #endif
2993
2994         /* Create and submit job descriptor*/
2995         init_aead_giv_job(ctx->sh_desc_givenc,
2996                           ctx->sh_desc_givenc_dma, edesc, req, contig);
2997 #ifdef DEBUG
2998         print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
2999                        DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
3000                        desc_bytes(edesc->hw_desc), 1);
3001 #endif
3002
3003         desc = edesc->hw_desc;
3004         ret = caam_jr_enqueue(jrdev, desc, old_aead_encrypt_done, req);
3005         if (!ret) {
3006                 ret = -EINPROGRESS;
3007         } else {
3008                 old_aead_unmap(jrdev, edesc, req);
3009                 kfree(edesc);
3010         }
3011
3012         return ret;
3013 }
3014
3015 static int aead_null_givencrypt(struct aead_givcrypt_request *areq)
3016 {
3017         return old_aead_encrypt(&areq->areq);
3018 }
3019
3020 /*
3021  * allocate and map the ablkcipher extended descriptor for ablkcipher
3022  */
3023 static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
3024                                                        *req, int desc_bytes,
3025                                                        bool *iv_contig_out)
3026 {
3027         struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
3028         struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
3029         struct device *jrdev = ctx->jrdev;
3030         gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
3031                                           CRYPTO_TFM_REQ_MAY_SLEEP)) ?
3032                        GFP_KERNEL : GFP_ATOMIC;
3033         int src_nents, dst_nents = 0, sec4_sg_bytes;
3034         struct ablkcipher_edesc *edesc;
3035         dma_addr_t iv_dma = 0;
3036         bool iv_contig = false;
3037         int sgc;
3038         int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
3039         bool src_chained = false, dst_chained = false;
3040         int sec4_sg_index;
3041
3042         src_nents = sg_count(req->src, req->nbytes, &src_chained);
3043
3044         if (req->dst != req->src)
3045                 dst_nents = sg_count(req->dst, req->nbytes, &dst_chained);
3046
3047         if (likely(req->src == req->dst)) {
3048                 sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
3049                                          DMA_BIDIRECTIONAL, src_chained);
3050         } else {
3051                 sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
3052                                          DMA_TO_DEVICE, src_chained);
3053                 sgc = dma_map_sg_chained(jrdev, req->dst, dst_nents ? : 1,
3054                                          DMA_FROM_DEVICE, dst_chained);
3055         }
3056
3057         iv_dma = dma_map_single(jrdev, req->info, ivsize, DMA_TO_DEVICE);
3058         if (dma_mapping_error(jrdev, iv_dma)) {
3059                 dev_err(jrdev, "unable to map IV\n");
3060                 return ERR_PTR(-ENOMEM);
3061         }
3062
3063         /*
3064          * Check if iv can be contiguous with source and destination.
3065          * If so, include it. If not, create scatterlist.
3066          */
3067         if (!src_nents && iv_dma + ivsize == sg_dma_address(req->src))
3068                 iv_contig = true;
3069         else
3070                 src_nents = src_nents ? : 1;
3071         sec4_sg_bytes = ((iv_contig ? 0 : 1) + src_nents + dst_nents) *
3072                         sizeof(struct sec4_sg_entry);
3073
3074         /* allocate space for base edesc and hw desc commands, link tables */
3075         edesc = kmalloc(sizeof(struct ablkcipher_edesc) + desc_bytes +
3076                         sec4_sg_bytes, GFP_DMA | flags);
3077         if (!edesc) {
3078                 dev_err(jrdev, "could not allocate extended descriptor\n");
3079                 return ERR_PTR(-ENOMEM);
3080         }
3081
3082         edesc->src_nents = src_nents;
3083         edesc->src_chained = src_chained;
3084         edesc->dst_nents = dst_nents;
3085         edesc->dst_chained = dst_chained;
3086         edesc->sec4_sg_bytes = sec4_sg_bytes;
3087         edesc->sec4_sg = (void *)edesc + sizeof(struct ablkcipher_edesc) +
3088                          desc_bytes;
3089
3090         sec4_sg_index = 0;
3091         if (!iv_contig) {
3092                 dma_to_sec4_sg_one(edesc->sec4_sg, iv_dma, ivsize, 0);
3093                 sg_to_sec4_sg_last(req->src, src_nents,
3094                                    edesc->sec4_sg + 1, 0);
3095                 sec4_sg_index += 1 + src_nents;
3096         }
3097
3098         if (dst_nents) {
3099                 sg_to_sec4_sg_last(req->dst, dst_nents,
3100                         edesc->sec4_sg + sec4_sg_index, 0);
3101         }
3102
3103         edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
3104                                             sec4_sg_bytes, DMA_TO_DEVICE);
3105         if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
3106                 dev_err(jrdev, "unable to map S/G table\n");
3107                 return ERR_PTR(-ENOMEM);
3108         }
3109
3110         edesc->iv_dma = iv_dma;
3111
3112 #ifdef DEBUG
3113         print_hex_dump(KERN_ERR, "ablkcipher sec4_sg@"__stringify(__LINE__)": ",
3114                        DUMP_PREFIX_ADDRESS, 16, 4, edesc->sec4_sg,
3115                        sec4_sg_bytes, 1);
3116 #endif
3117
3118         *iv_contig_out = iv_contig;
3119         return edesc;
3120 }
3121
3122 static int ablkcipher_encrypt(struct ablkcipher_request *req)
3123 {
3124         struct ablkcipher_edesc *edesc;
3125         struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
3126         struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
3127         struct device *jrdev = ctx->jrdev;
3128         bool iv_contig;
3129         u32 *desc;
3130         int ret = 0;
3131
3132         /* allocate extended descriptor */
3133         edesc = ablkcipher_edesc_alloc(req, DESC_JOB_IO_LEN *
3134                                        CAAM_CMD_SZ, &iv_contig);
3135         if (IS_ERR(edesc))
3136                 return PTR_ERR(edesc);
3137
3138         /* Create and submit job descriptor*/
3139         init_ablkcipher_job(ctx->sh_desc_enc,
3140                 ctx->sh_desc_enc_dma, edesc, req, iv_contig);
3141 #ifdef DEBUG
3142         print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"__stringify(__LINE__)": ",
3143                        DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
3144                        desc_bytes(edesc->hw_desc), 1);
3145 #endif
3146         desc = edesc->hw_desc;
3147         ret = caam_jr_enqueue(jrdev, desc, ablkcipher_encrypt_done, req);
3148
3149         if (!ret) {
3150                 ret = -EINPROGRESS;
3151         } else {
3152                 ablkcipher_unmap(jrdev, edesc, req);
3153                 kfree(edesc);
3154         }
3155
3156         return ret;
3157 }
3158
3159 static int ablkcipher_decrypt(struct ablkcipher_request *req)
3160 {
3161         struct ablkcipher_edesc *edesc;
3162         struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
3163         struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
3164         struct device *jrdev = ctx->jrdev;
3165         bool iv_contig;
3166         u32 *desc;
3167         int ret = 0;
3168
3169         /* allocate extended descriptor */
3170         edesc = ablkcipher_edesc_alloc(req, DESC_JOB_IO_LEN *
3171                                        CAAM_CMD_SZ, &iv_contig);
3172         if (IS_ERR(edesc))
3173                 return PTR_ERR(edesc);
3174
3175         /* Create and submit job descriptor*/
3176         init_ablkcipher_job(ctx->sh_desc_dec,
3177                 ctx->sh_desc_dec_dma, edesc, req, iv_contig);
3178         desc = edesc->hw_desc;
3179 #ifdef DEBUG
3180         print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"__stringify(__LINE__)": ",
3181                        DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
3182                        desc_bytes(edesc->hw_desc), 1);
3183 #endif
3184
3185         ret = caam_jr_enqueue(jrdev, desc, ablkcipher_decrypt_done, req);
3186         if (!ret) {
3187                 ret = -EINPROGRESS;
3188         } else {
3189                 ablkcipher_unmap(jrdev, edesc, req);
3190                 kfree(edesc);
3191         }
3192
3193         return ret;
3194 }
3195
3196 /*
3197  * allocate and map the ablkcipher extended descriptor
3198  * for ablkcipher givencrypt
3199  */
3200 static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
3201                                 struct skcipher_givcrypt_request *greq,
3202                                 int desc_bytes,
3203                                 bool *iv_contig_out)
3204 {
3205         struct ablkcipher_request *req = &greq->creq;
3206         struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
3207         struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
3208         struct device *jrdev = ctx->jrdev;
3209         gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
3210                                           CRYPTO_TFM_REQ_MAY_SLEEP)) ?
3211                        GFP_KERNEL : GFP_ATOMIC;
3212         int src_nents, dst_nents = 0, sec4_sg_bytes;
3213         struct ablkcipher_edesc *edesc;
3214         dma_addr_t iv_dma = 0;
3215         bool iv_contig = false;
3216         int sgc;
3217         int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
3218         bool src_chained = false, dst_chained = false;
3219         int sec4_sg_index;
3220
3221         src_nents = sg_count(req->src, req->nbytes, &src_chained);
3222
3223         if (unlikely(req->dst != req->src))
3224                 dst_nents = sg_count(req->dst, req->nbytes, &dst_chained);
3225
3226         if (likely(req->src == req->dst)) {
3227                 sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
3228                                          DMA_BIDIRECTIONAL, src_chained);
3229         } else {
3230                 sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
3231                                          DMA_TO_DEVICE, src_chained);
3232                 sgc = dma_map_sg_chained(jrdev, req->dst, dst_nents ? : 1,
3233                                          DMA_FROM_DEVICE, dst_chained);
3234         }
3235
3236         /*
3237          * Check if iv can be contiguous with source and destination.
3238          * If so, include it. If not, create scatterlist.
3239          */
3240         iv_dma = dma_map_single(jrdev, greq->giv, ivsize, DMA_TO_DEVICE);
3241         if (dma_mapping_error(jrdev, iv_dma)) {
3242                 dev_err(jrdev, "unable to map IV\n");
3243                 return ERR_PTR(-ENOMEM);
3244         }
3245
3246         if (!dst_nents && iv_dma + ivsize == sg_dma_address(req->dst))
3247                 iv_contig = true;
3248         else
3249                 dst_nents = dst_nents ? : 1;
3250         sec4_sg_bytes = ((iv_contig ? 0 : 1) + src_nents + dst_nents) *
3251                         sizeof(struct sec4_sg_entry);
3252
3253         /* allocate space for base edesc and hw desc commands, link tables */
3254         edesc = kmalloc(sizeof(*edesc) + desc_bytes +
3255                         sec4_sg_bytes, GFP_DMA | flags);
3256         if (!edesc) {
3257                 dev_err(jrdev, "could not allocate extended descriptor\n");
3258                 return ERR_PTR(-ENOMEM);
3259         }
3260
3261         edesc->src_nents = src_nents;
3262         edesc->src_chained = src_chained;
3263         edesc->dst_nents = dst_nents;
3264         edesc->dst_chained = dst_chained;
3265         edesc->sec4_sg_bytes = sec4_sg_bytes;
3266         edesc->sec4_sg = (void *)edesc + sizeof(struct ablkcipher_edesc) +
3267                          desc_bytes;
3268
3269         sec4_sg_index = 0;
3270         if (src_nents) {
3271                 sg_to_sec4_sg_last(req->src, src_nents, edesc->sec4_sg, 0);
3272                 sec4_sg_index += src_nents;
3273         }
3274
3275         if (!iv_contig) {
3276                 dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index,
3277                                    iv_dma, ivsize, 0);
3278                 sec4_sg_index += 1;
3279                 sg_to_sec4_sg_last(req->dst, dst_nents,
3280                                    edesc->sec4_sg + sec4_sg_index, 0);
3281         }
3282
3283         edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
3284                                             sec4_sg_bytes, DMA_TO_DEVICE);
3285         if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
3286                 dev_err(jrdev, "unable to map S/G table\n");
3287                 return ERR_PTR(-ENOMEM);
3288         }
3289         edesc->iv_dma = iv_dma;
3290
3291 #ifdef DEBUG
3292         print_hex_dump(KERN_ERR,
3293                        "ablkcipher sec4_sg@" __stringify(__LINE__) ": ",
3294                        DUMP_PREFIX_ADDRESS, 16, 4, edesc->sec4_sg,
3295                        sec4_sg_bytes, 1);
3296 #endif
3297
3298         *iv_contig_out = iv_contig;
3299         return edesc;
3300 }
3301
3302 static int ablkcipher_givencrypt(struct skcipher_givcrypt_request *creq)
3303 {
3304         struct ablkcipher_request *req = &creq->creq;
3305         struct ablkcipher_edesc *edesc;
3306         struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
3307         struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
3308         struct device *jrdev = ctx->jrdev;
3309         bool iv_contig;
3310         u32 *desc;
3311         int ret = 0;
3312
3313         /* allocate extended descriptor */
3314         edesc = ablkcipher_giv_edesc_alloc(creq, DESC_JOB_IO_LEN *
3315                                        CAAM_CMD_SZ, &iv_contig);
3316         if (IS_ERR(edesc))
3317                 return PTR_ERR(edesc);
3318
3319         /* Create and submit job descriptor*/
3320         init_ablkcipher_giv_job(ctx->sh_desc_givenc, ctx->sh_desc_givenc_dma,
3321                                 edesc, req, iv_contig);
3322 #ifdef DEBUG
3323         print_hex_dump(KERN_ERR,
3324                        "ablkcipher jobdesc@" __stringify(__LINE__) ": ",
3325                        DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
3326                        desc_bytes(edesc->hw_desc), 1);
3327 #endif
3328         desc = edesc->hw_desc;
3329         ret = caam_jr_enqueue(jrdev, desc, ablkcipher_encrypt_done, req);
3330
3331         if (!ret) {
3332                 ret = -EINPROGRESS;
3333         } else {
3334                 ablkcipher_unmap(jrdev, edesc, req);
3335                 kfree(edesc);
3336         }
3337
3338         return ret;
3339 }
3340
3341 #define template_aead           template_u.aead
3342 #define template_ablkcipher     template_u.ablkcipher
3343 struct caam_alg_template {
3344         char name[CRYPTO_MAX_ALG_NAME];
3345         char driver_name[CRYPTO_MAX_ALG_NAME];
3346         unsigned int blocksize;
3347         u32 type;
3348         union {
3349                 struct ablkcipher_alg ablkcipher;
3350                 struct old_aead_alg aead;
3351         } template_u;
3352         u32 class1_alg_type;
3353         u32 class2_alg_type;
3354         u32 alg_op;
3355 };
3356
3357 static struct caam_alg_template driver_algs[] = {
3358         /* single-pass ipsec_esp descriptor */
3359         {
3360                 .name = "authenc(hmac(md5),ecb(cipher_null))",
3361                 .driver_name = "authenc-hmac-md5-ecb-cipher_null-caam",
3362                 .blocksize = NULL_BLOCK_SIZE,
3363                 .type = CRYPTO_ALG_TYPE_AEAD,
3364                 .template_aead = {
3365                         .setkey = aead_setkey,
3366                         .setauthsize = aead_setauthsize,
3367                         .encrypt = old_aead_encrypt,
3368                         .decrypt = old_aead_decrypt,
3369                         .givencrypt = aead_null_givencrypt,
3370                         .geniv = "<built-in>",
3371                         .ivsize = NULL_IV_SIZE,
3372                         .maxauthsize = MD5_DIGEST_SIZE,
3373                         },
3374                 .class1_alg_type = 0,
3375                 .class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP,
3376                 .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
3377         },
3378         {
3379                 .name = "authenc(hmac(sha1),ecb(cipher_null))",
3380                 .driver_name = "authenc-hmac-sha1-ecb-cipher_null-caam",
3381                 .blocksize = NULL_BLOCK_SIZE,
3382                 .type = CRYPTO_ALG_TYPE_AEAD,
3383                 .template_aead = {
3384                         .setkey = aead_setkey,
3385                         .setauthsize = aead_setauthsize,
3386                         .encrypt = old_aead_encrypt,
3387                         .decrypt = old_aead_decrypt,
3388                         .givencrypt = aead_null_givencrypt,
3389                         .geniv = "<built-in>",
3390                         .ivsize = NULL_IV_SIZE,
3391                         .maxauthsize = SHA1_DIGEST_SIZE,
3392                         },
3393                 .class1_alg_type = 0,
3394                 .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP,
3395                 .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
3396         },
3397         {
3398                 .name = "authenc(hmac(sha224),ecb(cipher_null))",
3399                 .driver_name = "authenc-hmac-sha224-ecb-cipher_null-caam",
3400                 .blocksize = NULL_BLOCK_SIZE,
3401                 .type = CRYPTO_ALG_TYPE_AEAD,
3402                 .template_aead = {
3403                         .setkey = aead_setkey,
3404                         .setauthsize = aead_setauthsize,
3405                         .encrypt = old_aead_encrypt,
3406                         .decrypt = old_aead_decrypt,
3407                         .givencrypt = aead_null_givencrypt,
3408                         .geniv = "<built-in>",
3409                         .ivsize = NULL_IV_SIZE,
3410                         .maxauthsize = SHA224_DIGEST_SIZE,
3411                         },
3412                 .class1_alg_type = 0,
3413                 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
3414                                    OP_ALG_AAI_HMAC_PRECOMP,
3415                 .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
3416         },
3417         {
3418                 .name = "authenc(hmac(sha256),ecb(cipher_null))",
3419                 .driver_name = "authenc-hmac-sha256-ecb-cipher_null-caam",
3420                 .blocksize = NULL_BLOCK_SIZE,
3421                 .type = CRYPTO_ALG_TYPE_AEAD,
3422                 .template_aead = {
3423                         .setkey = aead_setkey,
3424                         .setauthsize = aead_setauthsize,
3425                         .encrypt = old_aead_encrypt,
3426                         .decrypt = old_aead_decrypt,
3427                         .givencrypt = aead_null_givencrypt,
3428                         .geniv = "<built-in>",
3429                         .ivsize = NULL_IV_SIZE,
3430                         .maxauthsize = SHA256_DIGEST_SIZE,
3431                         },
3432                 .class1_alg_type = 0,
3433                 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
3434                                    OP_ALG_AAI_HMAC_PRECOMP,
3435                 .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
3436         },
3437         {
3438                 .name = "authenc(hmac(sha384),ecb(cipher_null))",
3439                 .driver_name = "authenc-hmac-sha384-ecb-cipher_null-caam",
3440                 .blocksize = NULL_BLOCK_SIZE,
3441                 .type = CRYPTO_ALG_TYPE_AEAD,
3442                 .template_aead = {
3443                         .setkey = aead_setkey,
3444                         .setauthsize = aead_setauthsize,
3445                         .encrypt = old_aead_encrypt,
3446                         .decrypt = old_aead_decrypt,
3447                         .givencrypt = aead_null_givencrypt,
3448                         .geniv = "<built-in>",
3449                         .ivsize = NULL_IV_SIZE,
3450                         .maxauthsize = SHA384_DIGEST_SIZE,
3451                         },
3452                 .class1_alg_type = 0,
3453                 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
3454                                    OP_ALG_AAI_HMAC_PRECOMP,
3455                 .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
3456         },
3457         {
3458                 .name = "authenc(hmac(sha512),ecb(cipher_null))",
3459                 .driver_name = "authenc-hmac-sha512-ecb-cipher_null-caam",
3460                 .blocksize = NULL_BLOCK_SIZE,
3461                 .type = CRYPTO_ALG_TYPE_AEAD,
3462                 .template_aead = {
3463                         .setkey = aead_setkey,
3464                         .setauthsize = aead_setauthsize,
3465                         .encrypt = old_aead_encrypt,
3466                         .decrypt = old_aead_decrypt,
3467                         .givencrypt = aead_null_givencrypt,
3468                         .geniv = "<built-in>",
3469                         .ivsize = NULL_IV_SIZE,
3470                         .maxauthsize = SHA512_DIGEST_SIZE,
3471                         },
3472                 .class1_alg_type = 0,
3473                 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
3474                                    OP_ALG_AAI_HMAC_PRECOMP,
3475                 .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
3476         },
3477         {
3478                 .name = "authenc(hmac(md5),cbc(aes))",
3479                 .driver_name = "authenc-hmac-md5-cbc-aes-caam",
3480                 .blocksize = AES_BLOCK_SIZE,
3481                 .type = CRYPTO_ALG_TYPE_AEAD,
3482                 .template_aead = {
3483                         .setkey = aead_setkey,
3484                         .setauthsize = aead_setauthsize,
3485                         .encrypt = old_aead_encrypt,
3486                         .decrypt = old_aead_decrypt,
3487                         .givencrypt = old_aead_givencrypt,
3488                         .geniv = "<built-in>",
3489                         .ivsize = AES_BLOCK_SIZE,
3490                         .maxauthsize = MD5_DIGEST_SIZE,
3491                         },
3492                 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
3493                 .class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP,
3494                 .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
3495         },
3496         {
3497                 .name = "authenc(hmac(sha1),cbc(aes))",
3498                 .driver_name = "authenc-hmac-sha1-cbc-aes-caam",
3499                 .blocksize = AES_BLOCK_SIZE,
3500                 .type = CRYPTO_ALG_TYPE_AEAD,
3501                 .template_aead = {
3502                         .setkey = aead_setkey,
3503                         .setauthsize = aead_setauthsize,
3504                         .encrypt = old_aead_encrypt,
3505                         .decrypt = old_aead_decrypt,
3506                         .givencrypt = old_aead_givencrypt,
3507                         .geniv = "<built-in>",
3508                         .ivsize = AES_BLOCK_SIZE,
3509                         .maxauthsize = SHA1_DIGEST_SIZE,
3510                         },
3511                 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
3512                 .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP,
3513                 .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
3514         },
3515         {
3516                 .name = "authenc(hmac(sha224),cbc(aes))",
3517                 .driver_name = "authenc-hmac-sha224-cbc-aes-caam",
3518                 .blocksize = AES_BLOCK_SIZE,
3519                 .type = CRYPTO_ALG_TYPE_AEAD,
3520                 .template_aead = {
3521                         .setkey = aead_setkey,
3522                         .setauthsize = aead_setauthsize,
3523                         .encrypt = old_aead_encrypt,
3524                         .decrypt = old_aead_decrypt,
3525                         .givencrypt = old_aead_givencrypt,
3526                         .geniv = "<built-in>",
3527                         .ivsize = AES_BLOCK_SIZE,
3528                         .maxauthsize = SHA224_DIGEST_SIZE,
3529                         },
3530                 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
3531                 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
3532                                    OP_ALG_AAI_HMAC_PRECOMP,
3533                 .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
3534         },
3535         {
3536                 .name = "authenc(hmac(sha256),cbc(aes))",
3537                 .driver_name = "authenc-hmac-sha256-cbc-aes-caam",
3538                 .blocksize = AES_BLOCK_SIZE,
3539                 .type = CRYPTO_ALG_TYPE_AEAD,
3540                 .template_aead = {
3541                         .setkey = aead_setkey,
3542                         .setauthsize = aead_setauthsize,
3543                         .encrypt = old_aead_encrypt,
3544                         .decrypt = old_aead_decrypt,
3545                         .givencrypt = old_aead_givencrypt,
3546                         .geniv = "<built-in>",
3547                         .ivsize = AES_BLOCK_SIZE,
3548                         .maxauthsize = SHA256_DIGEST_SIZE,
3549                         },
3550                 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
3551                 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
3552                                    OP_ALG_AAI_HMAC_PRECOMP,
3553                 .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
3554         },
3555         {
3556                 .name = "authenc(hmac(sha384),cbc(aes))",
3557                 .driver_name = "authenc-hmac-sha384-cbc-aes-caam",
3558                 .blocksize = AES_BLOCK_SIZE,
3559                 .type = CRYPTO_ALG_TYPE_AEAD,
3560                 .template_aead = {
3561                         .setkey = aead_setkey,
3562                         .setauthsize = aead_setauthsize,
3563                         .encrypt = old_aead_encrypt,
3564                         .decrypt = old_aead_decrypt,
3565                         .givencrypt = old_aead_givencrypt,
3566                         .geniv = "<built-in>",
3567                         .ivsize = AES_BLOCK_SIZE,
3568                         .maxauthsize = SHA384_DIGEST_SIZE,
3569                         },
3570                 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
3571                 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
3572                                    OP_ALG_AAI_HMAC_PRECOMP,
3573                 .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
3574         },
3575
3576         {
3577                 .name = "authenc(hmac(sha512),cbc(aes))",
3578                 .driver_name = "authenc-hmac-sha512-cbc-aes-caam",
3579                 .blocksize = AES_BLOCK_SIZE,
3580                 .type = CRYPTO_ALG_TYPE_AEAD,
3581                 .template_aead = {
3582                         .setkey = aead_setkey,
3583                         .setauthsize = aead_setauthsize,
3584                         .encrypt = old_aead_encrypt,
3585                         .decrypt = old_aead_decrypt,
3586                         .givencrypt = old_aead_givencrypt,
3587                         .geniv = "<built-in>",
3588                         .ivsize = AES_BLOCK_SIZE,
3589                         .maxauthsize = SHA512_DIGEST_SIZE,
3590                         },
3591                 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
3592                 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
3593                                    OP_ALG_AAI_HMAC_PRECOMP,
3594                 .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
3595         },
3596         {
3597                 .name = "authenc(hmac(md5),cbc(des3_ede))",
3598                 .driver_name = "authenc-hmac-md5-cbc-des3_ede-caam",
3599                 .blocksize = DES3_EDE_BLOCK_SIZE,
3600                 .type = CRYPTO_ALG_TYPE_AEAD,
3601                 .template_aead = {
3602                         .setkey = aead_setkey,
3603                         .setauthsize = aead_setauthsize,
3604                         .encrypt = old_aead_encrypt,
3605                         .decrypt = old_aead_decrypt,
3606                         .givencrypt = old_aead_givencrypt,
3607                         .geniv = "<built-in>",
3608                         .ivsize = DES3_EDE_BLOCK_SIZE,
3609                         .maxauthsize = MD5_DIGEST_SIZE,
3610                         },
3611                 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
3612                 .class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP,
3613                 .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
3614         },
3615         {
3616                 .name = "authenc(hmac(sha1),cbc(des3_ede))",
3617                 .driver_name = "authenc-hmac-sha1-cbc-des3_ede-caam",
3618                 .blocksize = DES3_EDE_BLOCK_SIZE,
3619                 .type = CRYPTO_ALG_TYPE_AEAD,
3620                 .template_aead = {
3621                         .setkey = aead_setkey,
3622                         .setauthsize = aead_setauthsize,
3623                         .encrypt = old_aead_encrypt,
3624                         .decrypt = old_aead_decrypt,
3625                         .givencrypt = old_aead_givencrypt,
3626                         .geniv = "<built-in>",
3627                         .ivsize = DES3_EDE_BLOCK_SIZE,
3628                         .maxauthsize = SHA1_DIGEST_SIZE,
3629                         },
3630                 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
3631                 .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP,
3632                 .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
3633         },
3634         {
3635                 .name = "authenc(hmac(sha224),cbc(des3_ede))",
3636                 .driver_name = "authenc-hmac-sha224-cbc-des3_ede-caam",
3637                 .blocksize = DES3_EDE_BLOCK_SIZE,
3638                 .type = CRYPTO_ALG_TYPE_AEAD,
3639                 .template_aead = {
3640                         .setkey = aead_setkey,
3641                         .setauthsize = aead_setauthsize,
3642                         .encrypt = old_aead_encrypt,
3643                         .decrypt = old_aead_decrypt,
3644                         .givencrypt = old_aead_givencrypt,
3645                         .geniv = "<built-in>",
3646                         .ivsize = DES3_EDE_BLOCK_SIZE,
3647                         .maxauthsize = SHA224_DIGEST_SIZE,
3648                         },
3649                 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
3650                 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
3651                                    OP_ALG_AAI_HMAC_PRECOMP,
3652                 .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
3653         },
3654         {
3655                 .name = "authenc(hmac(sha256),cbc(des3_ede))",
3656                 .driver_name = "authenc-hmac-sha256-cbc-des3_ede-caam",
3657                 .blocksize = DES3_EDE_BLOCK_SIZE,
3658                 .type = CRYPTO_ALG_TYPE_AEAD,
3659                 .template_aead = {
3660                         .setkey = aead_setkey,
3661                         .setauthsize = aead_setauthsize,
3662                         .encrypt = old_aead_encrypt,
3663                         .decrypt = old_aead_decrypt,
3664                         .givencrypt = old_aead_givencrypt,
3665                         .geniv = "<built-in>",
3666                         .ivsize = DES3_EDE_BLOCK_SIZE,
3667                         .maxauthsize = SHA256_DIGEST_SIZE,
3668                         },
3669                 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
3670                 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
3671                                    OP_ALG_AAI_HMAC_PRECOMP,
3672                 .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
3673         },
3674         {
3675                 .name = "authenc(hmac(sha384),cbc(des3_ede))",
3676                 .driver_name = "authenc-hmac-sha384-cbc-des3_ede-caam",
3677                 .blocksize = DES3_EDE_BLOCK_SIZE,
3678                 .type = CRYPTO_ALG_TYPE_AEAD,
3679                 .template_aead = {
3680                         .setkey = aead_setkey,
3681                         .setauthsize = aead_setauthsize,
3682                         .encrypt = old_aead_encrypt,
3683                         .decrypt = old_aead_decrypt,
3684                         .givencrypt = old_aead_givencrypt,
3685                         .geniv = "<built-in>",
3686                         .ivsize = DES3_EDE_BLOCK_SIZE,
3687                         .maxauthsize = SHA384_DIGEST_SIZE,
3688                         },
3689                 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
3690                 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
3691                                    OP_ALG_AAI_HMAC_PRECOMP,
3692                 .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
3693         },
3694         {
3695                 .name = "authenc(hmac(sha512),cbc(des3_ede))",
3696                 .driver_name = "authenc-hmac-sha512-cbc-des3_ede-caam",
3697                 .blocksize = DES3_EDE_BLOCK_SIZE,
3698                 .type = CRYPTO_ALG_TYPE_AEAD,
3699                 .template_aead = {
3700                         .setkey = aead_setkey,
3701                         .setauthsize = aead_setauthsize,
3702                         .encrypt = old_aead_encrypt,
3703                         .decrypt = old_aead_decrypt,
3704                         .givencrypt = old_aead_givencrypt,
3705                         .geniv = "<built-in>",
3706                         .ivsize = DES3_EDE_BLOCK_SIZE,
3707                         .maxauthsize = SHA512_DIGEST_SIZE,
3708                         },
3709                 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
3710                 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
3711                                    OP_ALG_AAI_HMAC_PRECOMP,
3712                 .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
3713         },
3714         {
3715                 .name = "authenc(hmac(md5),cbc(des))",
3716                 .driver_name = "authenc-hmac-md5-cbc-des-caam",
3717                 .blocksize = DES_BLOCK_SIZE,
3718                 .type = CRYPTO_ALG_TYPE_AEAD,
3719                 .template_aead = {
3720                         .setkey = aead_setkey,
3721                         .setauthsize = aead_setauthsize,
3722                         .encrypt = old_aead_encrypt,
3723                         .decrypt = old_aead_decrypt,
3724                         .givencrypt = old_aead_givencrypt,
3725                         .geniv = "<built-in>",
3726                         .ivsize = DES_BLOCK_SIZE,
3727                         .maxauthsize = MD5_DIGEST_SIZE,
3728                         },
3729                 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
3730                 .class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP,
3731                 .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
3732         },
3733         {
3734                 .name = "authenc(hmac(sha1),cbc(des))",
3735                 .driver_name = "authenc-hmac-sha1-cbc-des-caam",
3736                 .blocksize = DES_BLOCK_SIZE,
3737                 .type = CRYPTO_ALG_TYPE_AEAD,
3738                 .template_aead = {
3739                         .setkey = aead_setkey,
3740                         .setauthsize = aead_setauthsize,
3741                         .encrypt = old_aead_encrypt,
3742                         .decrypt = old_aead_decrypt,
3743                         .givencrypt = old_aead_givencrypt,
3744                         .geniv = "<built-in>",
3745                         .ivsize = DES_BLOCK_SIZE,
3746                         .maxauthsize = SHA1_DIGEST_SIZE,
3747                         },
3748                 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
3749                 .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP,
3750                 .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
3751         },
3752         {
3753                 .name = "authenc(hmac(sha224),cbc(des))",
3754                 .driver_name = "authenc-hmac-sha224-cbc-des-caam",
3755                 .blocksize = DES_BLOCK_SIZE,
3756                 .type = CRYPTO_ALG_TYPE_AEAD,
3757                 .template_aead = {
3758                         .setkey = aead_setkey,
3759                         .setauthsize = aead_setauthsize,
3760                         .encrypt = old_aead_encrypt,
3761                         .decrypt = old_aead_decrypt,
3762                         .givencrypt = old_aead_givencrypt,
3763                         .geniv = "<built-in>",
3764                         .ivsize = DES_BLOCK_SIZE,
3765                         .maxauthsize = SHA224_DIGEST_SIZE,
3766                         },
3767                 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
3768                 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
3769                                    OP_ALG_AAI_HMAC_PRECOMP,
3770                 .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
3771         },
3772         {
3773                 .name = "authenc(hmac(sha256),cbc(des))",
3774                 .driver_name = "authenc-hmac-sha256-cbc-des-caam",
3775                 .blocksize = DES_BLOCK_SIZE,
3776                 .type = CRYPTO_ALG_TYPE_AEAD,
3777                 .template_aead = {
3778                         .setkey = aead_setkey,
3779                         .setauthsize = aead_setauthsize,
3780                         .encrypt = old_aead_encrypt,
3781                         .decrypt = old_aead_decrypt,
3782                         .givencrypt = old_aead_givencrypt,
3783                         .geniv = "<built-in>",
3784                         .ivsize = DES_BLOCK_SIZE,
3785                         .maxauthsize = SHA256_DIGEST_SIZE,
3786                         },
3787                 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
3788                 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
3789                                    OP_ALG_AAI_HMAC_PRECOMP,
3790                 .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
3791         },
3792         {
3793                 .name = "authenc(hmac(sha384),cbc(des))",
3794                 .driver_name = "authenc-hmac-sha384-cbc-des-caam",
3795                 .blocksize = DES_BLOCK_SIZE,
3796                 .type = CRYPTO_ALG_TYPE_AEAD,
3797                 .template_aead = {
3798                         .setkey = aead_setkey,
3799                         .setauthsize = aead_setauthsize,
3800                         .encrypt = old_aead_encrypt,
3801                         .decrypt = old_aead_decrypt,
3802                         .givencrypt = old_aead_givencrypt,
3803                         .geniv = "<built-in>",
3804                         .ivsize = DES_BLOCK_SIZE,
3805                         .maxauthsize = SHA384_DIGEST_SIZE,
3806                         },
3807                 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
3808                 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
3809                                    OP_ALG_AAI_HMAC_PRECOMP,
3810                 .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
3811         },
3812         {
3813                 .name = "authenc(hmac(sha512),cbc(des))",
3814                 .driver_name = "authenc-hmac-sha512-cbc-des-caam",
3815                 .blocksize = DES_BLOCK_SIZE,
3816                 .type = CRYPTO_ALG_TYPE_AEAD,
3817                 .template_aead = {
3818                         .setkey = aead_setkey,
3819                         .setauthsize = aead_setauthsize,
3820                         .encrypt = old_aead_encrypt,
3821                         .decrypt = old_aead_decrypt,
3822                         .givencrypt = old_aead_givencrypt,
3823                         .geniv = "<built-in>",
3824                         .ivsize = DES_BLOCK_SIZE,
3825                         .maxauthsize = SHA512_DIGEST_SIZE,
3826                         },
3827                 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
3828                 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
3829                                    OP_ALG_AAI_HMAC_PRECOMP,
3830                 .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
3831         },
3832         {
3833                 .name = "authenc(hmac(md5),rfc3686(ctr(aes)))",
3834                 .driver_name = "authenc-hmac-md5-rfc3686-ctr-aes-caam",
3835                 .blocksize = 1,
3836                 .type = CRYPTO_ALG_TYPE_AEAD,
3837                 .template_aead = {
3838                         .setkey = aead_setkey,
3839                         .setauthsize = aead_setauthsize,
3840                         .encrypt = old_aead_encrypt,
3841                         .decrypt = old_aead_decrypt,
3842                         .givencrypt = old_aead_givencrypt,
3843                         .geniv = "<built-in>",
3844                         .ivsize = CTR_RFC3686_IV_SIZE,
3845                         .maxauthsize = MD5_DIGEST_SIZE,
3846                         },
3847                 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
3848                 .class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP,
3849                 .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
3850         },
3851         {
3852                 .name = "authenc(hmac(sha1),rfc3686(ctr(aes)))",
3853                 .driver_name = "authenc-hmac-sha1-rfc3686-ctr-aes-caam",
3854                 .blocksize = 1,
3855                 .type = CRYPTO_ALG_TYPE_AEAD,
3856                 .template_aead = {
3857                         .setkey = aead_setkey,
3858                         .setauthsize = aead_setauthsize,
3859                         .encrypt = old_aead_encrypt,
3860                         .decrypt = old_aead_decrypt,
3861                         .givencrypt = old_aead_givencrypt,
3862                         .geniv = "<built-in>",
3863                         .ivsize = CTR_RFC3686_IV_SIZE,
3864                         .maxauthsize = SHA1_DIGEST_SIZE,
3865                         },
3866                 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
3867                 .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP,
3868                 .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
3869         },
3870         {
3871                 .name = "authenc(hmac(sha224),rfc3686(ctr(aes)))",
3872                 .driver_name = "authenc-hmac-sha224-rfc3686-ctr-aes-caam",
3873                 .blocksize = 1,
3874                 .type = CRYPTO_ALG_TYPE_AEAD,
3875                 .template_aead = {
3876                         .setkey = aead_setkey,
3877                         .setauthsize = aead_setauthsize,
3878                         .encrypt = old_aead_encrypt,
3879                         .decrypt = old_aead_decrypt,
3880                         .givencrypt = old_aead_givencrypt,
3881                         .geniv = "<built-in>",
3882                         .ivsize = CTR_RFC3686_IV_SIZE,
3883                         .maxauthsize = SHA224_DIGEST_SIZE,
3884                         },
3885                 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
3886                 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
3887                                    OP_ALG_AAI_HMAC_PRECOMP,
3888                 .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
3889         },
3890         {
3891                 .name = "authenc(hmac(sha256),rfc3686(ctr(aes)))",
3892                 .driver_name = "authenc-hmac-sha256-rfc3686-ctr-aes-caam",
3893                 .blocksize = 1,
3894                 .type = CRYPTO_ALG_TYPE_AEAD,
3895                 .template_aead = {
3896                         .setkey = aead_setkey,
3897                         .setauthsize = aead_setauthsize,
3898                         .encrypt = old_aead_encrypt,
3899                         .decrypt = old_aead_decrypt,
3900                         .givencrypt = old_aead_givencrypt,
3901                         .geniv = "<built-in>",
3902                         .ivsize = CTR_RFC3686_IV_SIZE,
3903                         .maxauthsize = SHA256_DIGEST_SIZE,
3904                         },
3905                 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
3906                 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
3907                                    OP_ALG_AAI_HMAC_PRECOMP,
3908                 .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
3909         },
3910         {
3911                 .name = "authenc(hmac(sha384),rfc3686(ctr(aes)))",
3912                 .driver_name = "authenc-hmac-sha384-rfc3686-ctr-aes-caam",
3913                 .blocksize = 1,
3914                 .type = CRYPTO_ALG_TYPE_AEAD,
3915                 .template_aead = {
3916                         .setkey = aead_setkey,
3917                         .setauthsize = aead_setauthsize,
3918                         .encrypt = old_aead_encrypt,
3919                         .decrypt = old_aead_decrypt,
3920                         .givencrypt = old_aead_givencrypt,
3921                         .geniv = "<built-in>",
3922                         .ivsize = CTR_RFC3686_IV_SIZE,
3923                         .maxauthsize = SHA384_DIGEST_SIZE,
3924                         },
3925                 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
3926                 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
3927                                    OP_ALG_AAI_HMAC_PRECOMP,
3928                 .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
3929         },
3930         {
3931                 .name = "authenc(hmac(sha512),rfc3686(ctr(aes)))",
3932                 .driver_name = "authenc-hmac-sha512-rfc3686-ctr-aes-caam",
3933                 .blocksize = 1,
3934                 .type = CRYPTO_ALG_TYPE_AEAD,
3935                 .template_aead = {
3936                         .setkey = aead_setkey,
3937                         .setauthsize = aead_setauthsize,
3938                         .encrypt = old_aead_encrypt,
3939                         .decrypt = old_aead_decrypt,
3940                         .givencrypt = old_aead_givencrypt,
3941                         .geniv = "<built-in>",
3942                         .ivsize = CTR_RFC3686_IV_SIZE,
3943                         .maxauthsize = SHA512_DIGEST_SIZE,
3944                         },
3945                 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
3946                 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
3947                                    OP_ALG_AAI_HMAC_PRECOMP,
3948                 .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
3949         },
3950         /* ablkcipher descriptor */
3951         {
3952                 .name = "cbc(aes)",
3953                 .driver_name = "cbc-aes-caam",
3954                 .blocksize = AES_BLOCK_SIZE,
3955                 .type = CRYPTO_ALG_TYPE_GIVCIPHER,
3956                 .template_ablkcipher = {
3957                         .setkey = ablkcipher_setkey,
3958                         .encrypt = ablkcipher_encrypt,
3959                         .decrypt = ablkcipher_decrypt,
3960                         .givencrypt = ablkcipher_givencrypt,
3961                         .geniv = "<built-in>",
3962                         .min_keysize = AES_MIN_KEY_SIZE,
3963                         .max_keysize = AES_MAX_KEY_SIZE,
3964                         .ivsize = AES_BLOCK_SIZE,
3965                         },
3966                 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
3967         },
3968         {
3969                 .name = "cbc(des3_ede)",
3970                 .driver_name = "cbc-3des-caam",
3971                 .blocksize = DES3_EDE_BLOCK_SIZE,
3972                 .type = CRYPTO_ALG_TYPE_GIVCIPHER,
3973                 .template_ablkcipher = {
3974                         .setkey = ablkcipher_setkey,
3975                         .encrypt = ablkcipher_encrypt,
3976                         .decrypt = ablkcipher_decrypt,
3977                         .givencrypt = ablkcipher_givencrypt,
3978                         .geniv = "<built-in>",
3979                         .min_keysize = DES3_EDE_KEY_SIZE,
3980                         .max_keysize = DES3_EDE_KEY_SIZE,
3981                         .ivsize = DES3_EDE_BLOCK_SIZE,
3982                         },
3983                 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
3984         },
3985         {
3986                 .name = "cbc(des)",
3987                 .driver_name = "cbc-des-caam",
3988                 .blocksize = DES_BLOCK_SIZE,
3989                 .type = CRYPTO_ALG_TYPE_GIVCIPHER,
3990                 .template_ablkcipher = {
3991                         .setkey = ablkcipher_setkey,
3992                         .encrypt = ablkcipher_encrypt,
3993                         .decrypt = ablkcipher_decrypt,
3994                         .givencrypt = ablkcipher_givencrypt,
3995                         .geniv = "<built-in>",
3996                         .min_keysize = DES_KEY_SIZE,
3997                         .max_keysize = DES_KEY_SIZE,
3998                         .ivsize = DES_BLOCK_SIZE,
3999                         },
4000                 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
4001         },
4002         {
4003                 .name = "ctr(aes)",
4004                 .driver_name = "ctr-aes-caam",
4005                 .blocksize = 1,
4006                 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
4007                 .template_ablkcipher = {
4008                         .setkey = ablkcipher_setkey,
4009                         .encrypt = ablkcipher_encrypt,
4010                         .decrypt = ablkcipher_decrypt,
4011                         .geniv = "chainiv",
4012                         .min_keysize = AES_MIN_KEY_SIZE,
4013                         .max_keysize = AES_MAX_KEY_SIZE,
4014                         .ivsize = AES_BLOCK_SIZE,
4015                         },
4016                 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
4017         },
4018         {
4019                 .name = "rfc3686(ctr(aes))",
4020                 .driver_name = "rfc3686-ctr-aes-caam",
4021                 .blocksize = 1,
4022                 .type = CRYPTO_ALG_TYPE_GIVCIPHER,
4023                 .template_ablkcipher = {
4024                         .setkey = ablkcipher_setkey,
4025                         .encrypt = ablkcipher_encrypt,
4026                         .decrypt = ablkcipher_decrypt,
4027                         .givencrypt = ablkcipher_givencrypt,
4028                         .geniv = "<built-in>",
4029                         .min_keysize = AES_MIN_KEY_SIZE +
4030                                        CTR_RFC3686_NONCE_SIZE,
4031                         .max_keysize = AES_MAX_KEY_SIZE +
4032                                        CTR_RFC3686_NONCE_SIZE,
4033                         .ivsize = CTR_RFC3686_IV_SIZE,
4034                         },
4035                 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
4036         }
4037 };
4038
4039 struct caam_alg_entry {
4040         int class1_alg_type;
4041         int class2_alg_type;
4042         int alg_op;
4043 };
4044
4045 struct caam_aead_alg {
4046         struct aead_alg aead;
4047         struct caam_alg_entry caam;
4048         bool registered;
4049 };
4050
4051 static struct caam_aead_alg driver_aeads[] = {
4052         {
4053                 .aead = {
4054                         .base = {
4055                                 .cra_name = "rfc4106(gcm(aes))",
4056                                 .cra_driver_name = "rfc4106-gcm-aes-caam",
4057                                 .cra_blocksize = 1,
4058                         },
4059                         .setkey = rfc4106_setkey,
4060                         .setauthsize = rfc4106_setauthsize,
4061                         .encrypt = gcm_encrypt,
4062                         .decrypt = gcm_decrypt,
4063                         .ivsize = 8,
4064                         .maxauthsize = AES_BLOCK_SIZE,
4065                 },
4066                 .caam = {
4067                         .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
4068                 },
4069         },
4070         {
4071                 .aead = {
4072                         .base = {
4073                                 .cra_name = "rfc4543(gcm(aes))",
4074                                 .cra_driver_name = "rfc4543-gcm-aes-caam",
4075                                 .cra_blocksize = 1,
4076                         },
4077                         .setkey = rfc4543_setkey,
4078                         .setauthsize = rfc4543_setauthsize,
4079                         .encrypt = gcm_encrypt,
4080                         .decrypt = gcm_decrypt,
4081                         .ivsize = 8,
4082                         .maxauthsize = AES_BLOCK_SIZE,
4083                 },
4084                 .caam = {
4085                         .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
4086                 },
4087         },
4088         /* Galois Counter Mode */
4089         {
4090                 .aead = {
4091                         .base = {
4092                                 .cra_name = "gcm(aes)",
4093                                 .cra_driver_name = "gcm-aes-caam",
4094                                 .cra_blocksize = 1,
4095                         },
4096                         .setkey = gcm_setkey,
4097                         .setauthsize = gcm_setauthsize,
4098                         .encrypt = gcm_encrypt,
4099                         .decrypt = gcm_decrypt,
4100                         .ivsize = 12,
4101                         .maxauthsize = AES_BLOCK_SIZE,
4102                 },
4103                 .caam = {
4104                         .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
4105                 },
4106         },
4107 };
4108
4109 struct caam_crypto_alg {
4110         struct crypto_alg crypto_alg;
4111         struct list_head entry;
4112         struct caam_alg_entry caam;
4113 };
4114
4115 static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam)
4116 {
4117         ctx->jrdev = caam_jr_alloc();
4118         if (IS_ERR(ctx->jrdev)) {
4119                 pr_err("Job Ring Device allocation for transform failed\n");
4120                 return PTR_ERR(ctx->jrdev);
4121         }
4122
4123         /* copy descriptor header template value */
4124         ctx->class1_alg_type = OP_TYPE_CLASS1_ALG | caam->class1_alg_type;
4125         ctx->class2_alg_type = OP_TYPE_CLASS2_ALG | caam->class2_alg_type;
4126         ctx->alg_op = OP_TYPE_CLASS2_ALG | caam->alg_op;
4127
4128         return 0;
4129 }
4130
4131 static int caam_cra_init(struct crypto_tfm *tfm)
4132 {
4133         struct crypto_alg *alg = tfm->__crt_alg;
4134         struct caam_crypto_alg *caam_alg =
4135                  container_of(alg, struct caam_crypto_alg, crypto_alg);
4136         struct caam_ctx *ctx = crypto_tfm_ctx(tfm);
4137
4138         return caam_init_common(ctx, &caam_alg->caam);
4139 }
4140
4141 static int caam_aead_init(struct crypto_aead *tfm)
4142 {
4143         struct aead_alg *alg = crypto_aead_alg(tfm);
4144         struct caam_aead_alg *caam_alg =
4145                  container_of(alg, struct caam_aead_alg, aead);
4146         struct caam_ctx *ctx = crypto_aead_ctx(tfm);
4147
4148         return caam_init_common(ctx, &caam_alg->caam);
4149 }
4150
4151 static void caam_exit_common(struct caam_ctx *ctx)
4152 {
4153         if (ctx->sh_desc_enc_dma &&
4154             !dma_mapping_error(ctx->jrdev, ctx->sh_desc_enc_dma))
4155                 dma_unmap_single(ctx->jrdev, ctx->sh_desc_enc_dma,
4156                                  desc_bytes(ctx->sh_desc_enc), DMA_TO_DEVICE);
4157         if (ctx->sh_desc_dec_dma &&
4158             !dma_mapping_error(ctx->jrdev, ctx->sh_desc_dec_dma))
4159                 dma_unmap_single(ctx->jrdev, ctx->sh_desc_dec_dma,
4160                                  desc_bytes(ctx->sh_desc_dec), DMA_TO_DEVICE);
4161         if (ctx->sh_desc_givenc_dma &&
4162             !dma_mapping_error(ctx->jrdev, ctx->sh_desc_givenc_dma))
4163                 dma_unmap_single(ctx->jrdev, ctx->sh_desc_givenc_dma,
4164                                  desc_bytes(ctx->sh_desc_givenc),
4165                                  DMA_TO_DEVICE);
4166         if (ctx->key_dma &&
4167             !dma_mapping_error(ctx->jrdev, ctx->key_dma))
4168                 dma_unmap_single(ctx->jrdev, ctx->key_dma,
4169                                  ctx->enckeylen + ctx->split_key_pad_len,
4170                                  DMA_TO_DEVICE);
4171
4172         caam_jr_free(ctx->jrdev);
4173 }
4174
4175 static void caam_cra_exit(struct crypto_tfm *tfm)
4176 {
4177         caam_exit_common(crypto_tfm_ctx(tfm));
4178 }
4179
4180 static void caam_aead_exit(struct crypto_aead *tfm)
4181 {
4182         caam_exit_common(crypto_aead_ctx(tfm));
4183 }
4184
4185 static void __exit caam_algapi_exit(void)
4186 {
4187
4188         struct caam_crypto_alg *t_alg, *n;
4189         int i;
4190
4191         for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
4192                 struct caam_aead_alg *t_alg = driver_aeads + i;
4193
4194                 if (t_alg->registered)
4195                         crypto_unregister_aead(&t_alg->aead);
4196         }
4197
4198         if (!alg_list.next)
4199                 return;
4200
4201         list_for_each_entry_safe(t_alg, n, &alg_list, entry) {
4202                 crypto_unregister_alg(&t_alg->crypto_alg);
4203                 list_del(&t_alg->entry);
4204                 kfree(t_alg);
4205         }
4206 }
4207
4208 static struct caam_crypto_alg *caam_alg_alloc(struct caam_alg_template
4209                                               *template)
4210 {
4211         struct caam_crypto_alg *t_alg;
4212         struct crypto_alg *alg;
4213
4214         t_alg = kzalloc(sizeof(struct caam_crypto_alg), GFP_KERNEL);
4215         if (!t_alg) {
4216                 pr_err("failed to allocate t_alg\n");
4217                 return ERR_PTR(-ENOMEM);
4218         }
4219
4220         alg = &t_alg->crypto_alg;
4221
4222         snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", template->name);
4223         snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
4224                  template->driver_name);
4225         alg->cra_module = THIS_MODULE;
4226         alg->cra_init = caam_cra_init;
4227         alg->cra_exit = caam_cra_exit;
4228         alg->cra_priority = CAAM_CRA_PRIORITY;
4229         alg->cra_blocksize = template->blocksize;
4230         alg->cra_alignmask = 0;
4231         alg->cra_ctxsize = sizeof(struct caam_ctx);
4232         alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY |
4233                          template->type;
4234         switch (template->type) {
4235         case CRYPTO_ALG_TYPE_GIVCIPHER:
4236                 alg->cra_type = &crypto_givcipher_type;
4237                 alg->cra_ablkcipher = template->template_ablkcipher;
4238                 break;
4239         case CRYPTO_ALG_TYPE_ABLKCIPHER:
4240                 alg->cra_type = &crypto_ablkcipher_type;
4241                 alg->cra_ablkcipher = template->template_ablkcipher;
4242                 break;
4243         case CRYPTO_ALG_TYPE_AEAD:
4244                 alg->cra_type = &crypto_aead_type;
4245                 alg->cra_aead = template->template_aead;
4246                 break;
4247         }
4248
4249         t_alg->caam.class1_alg_type = template->class1_alg_type;
4250         t_alg->caam.class2_alg_type = template->class2_alg_type;
4251         t_alg->caam.alg_op = template->alg_op;
4252
4253         return t_alg;
4254 }
4255
4256 static void caam_aead_alg_init(struct caam_aead_alg *t_alg)
4257 {
4258         struct aead_alg *alg = &t_alg->aead;
4259
4260         alg->base.cra_module = THIS_MODULE;
4261         alg->base.cra_priority = CAAM_CRA_PRIORITY;
4262         alg->base.cra_ctxsize = sizeof(struct caam_ctx);
4263         alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
4264
4265         alg->init = caam_aead_init;
4266         alg->exit = caam_aead_exit;
4267 }
4268
4269 static int __init caam_algapi_init(void)
4270 {
4271         struct device_node *dev_node;
4272         struct platform_device *pdev;
4273         struct device *ctrldev;
4274         void *priv;
4275         int i = 0, err = 0;
4276         bool registered = false;
4277
4278         dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
4279         if (!dev_node) {
4280                 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
4281                 if (!dev_node)
4282                         return -ENODEV;
4283         }
4284
4285         pdev = of_find_device_by_node(dev_node);
4286         if (!pdev) {
4287                 of_node_put(dev_node);
4288                 return -ENODEV;
4289         }
4290
4291         ctrldev = &pdev->dev;
4292         priv = dev_get_drvdata(ctrldev);
4293         of_node_put(dev_node);
4294
4295         /*
4296          * If priv is NULL, it's probably because the caam driver wasn't
4297          * properly initialized (e.g. RNG4 init failed). Thus, bail out here.
4298          */
4299         if (!priv)
4300                 return -ENODEV;
4301
4302
4303         INIT_LIST_HEAD(&alg_list);
4304
4305         /* register crypto algorithms the device supports */
4306         for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
4307                 /* TODO: check if h/w supports alg */
4308                 struct caam_crypto_alg *t_alg;
4309
4310                 t_alg = caam_alg_alloc(&driver_algs[i]);
4311                 if (IS_ERR(t_alg)) {
4312                         err = PTR_ERR(t_alg);
4313                         pr_warn("%s alg allocation failed\n",
4314                                 driver_algs[i].driver_name);
4315                         continue;
4316                 }
4317
4318                 err = crypto_register_alg(&t_alg->crypto_alg);
4319                 if (err) {
4320                         pr_warn("%s alg registration failed\n",
4321                                 t_alg->crypto_alg.cra_driver_name);
4322                         kfree(t_alg);
4323                         continue;
4324                 }
4325
4326                 list_add_tail(&t_alg->entry, &alg_list);
4327                 registered = true;
4328         }
4329
4330         for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
4331                 struct caam_aead_alg *t_alg = driver_aeads + i;
4332
4333                 caam_aead_alg_init(t_alg);
4334
4335                 err = crypto_register_aead(&t_alg->aead);
4336                 if (err) {
4337                         pr_warn("%s alg registration failed\n",
4338                                 t_alg->aead.base.cra_driver_name);
4339                         continue;
4340                 }
4341
4342                 t_alg->registered = true;
4343                 registered = true;
4344         }
4345
4346         if (registered)
4347                 pr_info("caam algorithms registered in /proc/crypto\n");
4348
4349         return err;
4350 }
4351
4352 module_init(caam_algapi_init);
4353 module_exit(caam_algapi_exit);
4354
4355 MODULE_LICENSE("GPL");
4356 MODULE_DESCRIPTION("FSL CAAM support for crypto API");
4357 MODULE_AUTHOR("Freescale Semiconductor - NMG/STC");