Merge branch 'linux-linaro-lsk-v4.4' into linux-linaro-lsk-v4.4-android
[firefly-linux-kernel-4.4.55.git] / crypto / blkcipher.c
1 /*
2  * Block chaining cipher operations.
3  * 
4  * Generic encrypt/decrypt wrapper for ciphers, handles operations across
5  * multiple page boundaries by using temporary blocks.  In user context,
6  * the kernel is given a chance to schedule us once per page.
7  *
8  * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
9  *
10  * This program is free software; you can redistribute it and/or modify it
11  * under the terms of the GNU General Public License as published by the Free
12  * Software Foundation; either version 2 of the License, or (at your option) 
13  * any later version.
14  *
15  */
16
17 #include <crypto/aead.h>
18 #include <crypto/internal/skcipher.h>
19 #include <crypto/scatterwalk.h>
20 #include <linux/errno.h>
21 #include <linux/hardirq.h>
22 #include <linux/kernel.h>
23 #include <linux/module.h>
24 #include <linux/scatterlist.h>
25 #include <linux/seq_file.h>
26 #include <linux/slab.h>
27 #include <linux/string.h>
28 #include <linux/cryptouser.h>
29 #include <net/netlink.h>
30
31 #include "internal.h"
32
33 enum {
34         BLKCIPHER_WALK_PHYS = 1 << 0,
35         BLKCIPHER_WALK_SLOW = 1 << 1,
36         BLKCIPHER_WALK_COPY = 1 << 2,
37         BLKCIPHER_WALK_DIFF = 1 << 3,
38 };
39
40 static int blkcipher_walk_next(struct blkcipher_desc *desc,
41                                struct blkcipher_walk *walk);
42 static int blkcipher_walk_first(struct blkcipher_desc *desc,
43                                 struct blkcipher_walk *walk);
44
45 static inline void blkcipher_map_src(struct blkcipher_walk *walk)
46 {
47         walk->src.virt.addr = scatterwalk_map(&walk->in);
48 }
49
50 static inline void blkcipher_map_dst(struct blkcipher_walk *walk)
51 {
52         walk->dst.virt.addr = scatterwalk_map(&walk->out);
53 }
54
55 static inline void blkcipher_unmap_src(struct blkcipher_walk *walk)
56 {
57         scatterwalk_unmap(walk->src.virt.addr);
58 }
59
60 static inline void blkcipher_unmap_dst(struct blkcipher_walk *walk)
61 {
62         scatterwalk_unmap(walk->dst.virt.addr);
63 }
64
65 /* Get a spot of the specified length that does not straddle a page.
66  * The caller needs to ensure that there is enough space for this operation.
67  */
68 static inline u8 *blkcipher_get_spot(u8 *start, unsigned int len)
69 {
70         u8 *end_page = (u8 *)(((unsigned long)(start + len - 1)) & PAGE_MASK);
71         return max(start, end_page);
72 }
73
74 static inline unsigned int blkcipher_done_slow(struct blkcipher_walk *walk,
75                                                unsigned int bsize)
76 {
77         u8 *addr;
78
79         addr = (u8 *)ALIGN((unsigned long)walk->buffer, walk->alignmask + 1);
80         addr = blkcipher_get_spot(addr, bsize);
81         scatterwalk_copychunks(addr, &walk->out, bsize, 1);
82         return bsize;
83 }
84
85 static inline unsigned int blkcipher_done_fast(struct blkcipher_walk *walk,
86                                                unsigned int n)
87 {
88         if (walk->flags & BLKCIPHER_WALK_COPY) {
89                 blkcipher_map_dst(walk);
90                 memcpy(walk->dst.virt.addr, walk->page, n);
91                 blkcipher_unmap_dst(walk);
92         } else if (!(walk->flags & BLKCIPHER_WALK_PHYS)) {
93                 if (walk->flags & BLKCIPHER_WALK_DIFF)
94                         blkcipher_unmap_dst(walk);
95                 blkcipher_unmap_src(walk);
96         }
97
98         scatterwalk_advance(&walk->in, n);
99         scatterwalk_advance(&walk->out, n);
100
101         return n;
102 }
103
104 int blkcipher_walk_done(struct blkcipher_desc *desc,
105                         struct blkcipher_walk *walk, int err)
106 {
107         unsigned int nbytes = 0;
108
109         if (likely(err >= 0)) {
110                 unsigned int n = walk->nbytes - err;
111
112                 if (likely(!(walk->flags & BLKCIPHER_WALK_SLOW)))
113                         n = blkcipher_done_fast(walk, n);
114                 else if (WARN_ON(err)) {
115                         err = -EINVAL;
116                         goto err;
117                 } else
118                         n = blkcipher_done_slow(walk, n);
119
120                 nbytes = walk->total - n;
121                 err = 0;
122         }
123
124         scatterwalk_done(&walk->in, 0, nbytes);
125         scatterwalk_done(&walk->out, 1, nbytes);
126
127 err:
128         walk->total = nbytes;
129         walk->nbytes = nbytes;
130
131         if (nbytes) {
132                 crypto_yield(desc->flags);
133                 return blkcipher_walk_next(desc, walk);
134         }
135
136         if (walk->iv != desc->info)
137                 memcpy(desc->info, walk->iv, walk->ivsize);
138         if (walk->buffer != walk->page)
139                 kfree(walk->buffer);
140         if (walk->page)
141                 free_page((unsigned long)walk->page);
142
143         return err;
144 }
145 EXPORT_SYMBOL_GPL(blkcipher_walk_done);
146
147 static inline int blkcipher_next_slow(struct blkcipher_desc *desc,
148                                       struct blkcipher_walk *walk,
149                                       unsigned int bsize,
150                                       unsigned int alignmask)
151 {
152         unsigned int n;
153         unsigned aligned_bsize = ALIGN(bsize, alignmask + 1);
154
155         if (walk->buffer)
156                 goto ok;
157
158         walk->buffer = walk->page;
159         if (walk->buffer)
160                 goto ok;
161
162         n = aligned_bsize * 3 - (alignmask + 1) +
163             (alignmask & ~(crypto_tfm_ctx_alignment() - 1));
164         walk->buffer = kmalloc(n, GFP_ATOMIC);
165         if (!walk->buffer)
166                 return blkcipher_walk_done(desc, walk, -ENOMEM);
167
168 ok:
169         walk->dst.virt.addr = (u8 *)ALIGN((unsigned long)walk->buffer,
170                                           alignmask + 1);
171         walk->dst.virt.addr = blkcipher_get_spot(walk->dst.virt.addr, bsize);
172         walk->src.virt.addr = blkcipher_get_spot(walk->dst.virt.addr +
173                                                  aligned_bsize, bsize);
174
175         scatterwalk_copychunks(walk->src.virt.addr, &walk->in, bsize, 0);
176
177         walk->nbytes = bsize;
178         walk->flags |= BLKCIPHER_WALK_SLOW;
179
180         return 0;
181 }
182
183 static inline int blkcipher_next_copy(struct blkcipher_walk *walk)
184 {
185         u8 *tmp = walk->page;
186
187         blkcipher_map_src(walk);
188         memcpy(tmp, walk->src.virt.addr, walk->nbytes);
189         blkcipher_unmap_src(walk);
190
191         walk->src.virt.addr = tmp;
192         walk->dst.virt.addr = tmp;
193
194         return 0;
195 }
196
197 static inline int blkcipher_next_fast(struct blkcipher_desc *desc,
198                                       struct blkcipher_walk *walk)
199 {
200         unsigned long diff;
201
202         walk->src.phys.page = scatterwalk_page(&walk->in);
203         walk->src.phys.offset = offset_in_page(walk->in.offset);
204         walk->dst.phys.page = scatterwalk_page(&walk->out);
205         walk->dst.phys.offset = offset_in_page(walk->out.offset);
206
207         if (walk->flags & BLKCIPHER_WALK_PHYS)
208                 return 0;
209
210         diff = walk->src.phys.offset - walk->dst.phys.offset;
211         diff |= walk->src.virt.page - walk->dst.virt.page;
212
213         blkcipher_map_src(walk);
214         walk->dst.virt.addr = walk->src.virt.addr;
215
216         if (diff) {
217                 walk->flags |= BLKCIPHER_WALK_DIFF;
218                 blkcipher_map_dst(walk);
219         }
220
221         return 0;
222 }
223
224 static int blkcipher_walk_next(struct blkcipher_desc *desc,
225                                struct blkcipher_walk *walk)
226 {
227         unsigned int bsize;
228         unsigned int n;
229         int err;
230
231         n = walk->total;
232         if (unlikely(n < walk->cipher_blocksize)) {
233                 desc->flags |= CRYPTO_TFM_RES_BAD_BLOCK_LEN;
234                 return blkcipher_walk_done(desc, walk, -EINVAL);
235         }
236
237         bsize = min(walk->walk_blocksize, n);
238
239         walk->flags &= ~(BLKCIPHER_WALK_SLOW | BLKCIPHER_WALK_COPY |
240                          BLKCIPHER_WALK_DIFF);
241         if (!scatterwalk_aligned(&walk->in, walk->alignmask) ||
242             !scatterwalk_aligned(&walk->out, walk->alignmask)) {
243                 walk->flags |= BLKCIPHER_WALK_COPY;
244                 if (!walk->page) {
245                         walk->page = (void *)__get_free_page(GFP_ATOMIC);
246                         if (!walk->page)
247                                 n = 0;
248                 }
249         }
250
251         n = scatterwalk_clamp(&walk->in, n);
252         n = scatterwalk_clamp(&walk->out, n);
253
254         if (unlikely(n < bsize)) {
255                 err = blkcipher_next_slow(desc, walk, bsize, walk->alignmask);
256                 goto set_phys_lowmem;
257         }
258
259         walk->nbytes = n;
260         if (walk->flags & BLKCIPHER_WALK_COPY) {
261                 err = blkcipher_next_copy(walk);
262                 goto set_phys_lowmem;
263         }
264
265         return blkcipher_next_fast(desc, walk);
266
267 set_phys_lowmem:
268         if (walk->flags & BLKCIPHER_WALK_PHYS) {
269                 walk->src.phys.page = virt_to_page(walk->src.virt.addr);
270                 walk->dst.phys.page = virt_to_page(walk->dst.virt.addr);
271                 walk->src.phys.offset &= PAGE_SIZE - 1;
272                 walk->dst.phys.offset &= PAGE_SIZE - 1;
273         }
274         return err;
275 }
276
277 static inline int blkcipher_copy_iv(struct blkcipher_walk *walk)
278 {
279         unsigned bs = walk->walk_blocksize;
280         unsigned aligned_bs = ALIGN(bs, walk->alignmask + 1);
281         unsigned int size = aligned_bs * 2 +
282                             walk->ivsize + max(aligned_bs, walk->ivsize) -
283                             (walk->alignmask + 1);
284         u8 *iv;
285
286         size += walk->alignmask & ~(crypto_tfm_ctx_alignment() - 1);
287         walk->buffer = kmalloc(size, GFP_ATOMIC);
288         if (!walk->buffer)
289                 return -ENOMEM;
290
291         iv = (u8 *)ALIGN((unsigned long)walk->buffer, walk->alignmask + 1);
292         iv = blkcipher_get_spot(iv, bs) + aligned_bs;
293         iv = blkcipher_get_spot(iv, bs) + aligned_bs;
294         iv = blkcipher_get_spot(iv, walk->ivsize);
295
296         walk->iv = memcpy(iv, walk->iv, walk->ivsize);
297         return 0;
298 }
299
300 int blkcipher_walk_virt(struct blkcipher_desc *desc,
301                         struct blkcipher_walk *walk)
302 {
303         walk->flags &= ~BLKCIPHER_WALK_PHYS;
304         walk->walk_blocksize = crypto_blkcipher_blocksize(desc->tfm);
305         walk->cipher_blocksize = walk->walk_blocksize;
306         walk->ivsize = crypto_blkcipher_ivsize(desc->tfm);
307         walk->alignmask = crypto_blkcipher_alignmask(desc->tfm);
308         return blkcipher_walk_first(desc, walk);
309 }
310 EXPORT_SYMBOL_GPL(blkcipher_walk_virt);
311
312 int blkcipher_walk_phys(struct blkcipher_desc *desc,
313                         struct blkcipher_walk *walk)
314 {
315         walk->flags |= BLKCIPHER_WALK_PHYS;
316         walk->walk_blocksize = crypto_blkcipher_blocksize(desc->tfm);
317         walk->cipher_blocksize = walk->walk_blocksize;
318         walk->ivsize = crypto_blkcipher_ivsize(desc->tfm);
319         walk->alignmask = crypto_blkcipher_alignmask(desc->tfm);
320         return blkcipher_walk_first(desc, walk);
321 }
322 EXPORT_SYMBOL_GPL(blkcipher_walk_phys);
323
324 static int blkcipher_walk_first(struct blkcipher_desc *desc,
325                                 struct blkcipher_walk *walk)
326 {
327         if (WARN_ON_ONCE(in_irq()))
328                 return -EDEADLK;
329
330         walk->iv = desc->info;
331         walk->nbytes = walk->total;
332         if (unlikely(!walk->total))
333                 return 0;
334
335         walk->buffer = NULL;
336         if (unlikely(((unsigned long)walk->iv & walk->alignmask))) {
337                 int err = blkcipher_copy_iv(walk);
338                 if (err)
339                         return err;
340         }
341
342         scatterwalk_start(&walk->in, walk->in.sg);
343         scatterwalk_start(&walk->out, walk->out.sg);
344         walk->page = NULL;
345
346         return blkcipher_walk_next(desc, walk);
347 }
348
349 int blkcipher_walk_virt_block(struct blkcipher_desc *desc,
350                               struct blkcipher_walk *walk,
351                               unsigned int blocksize)
352 {
353         walk->flags &= ~BLKCIPHER_WALK_PHYS;
354         walk->walk_blocksize = blocksize;
355         walk->cipher_blocksize = crypto_blkcipher_blocksize(desc->tfm);
356         walk->ivsize = crypto_blkcipher_ivsize(desc->tfm);
357         walk->alignmask = crypto_blkcipher_alignmask(desc->tfm);
358         return blkcipher_walk_first(desc, walk);
359 }
360 EXPORT_SYMBOL_GPL(blkcipher_walk_virt_block);
361
362 int blkcipher_aead_walk_virt_block(struct blkcipher_desc *desc,
363                                    struct blkcipher_walk *walk,
364                                    struct crypto_aead *tfm,
365                                    unsigned int blocksize)
366 {
367         walk->flags &= ~BLKCIPHER_WALK_PHYS;
368         walk->walk_blocksize = blocksize;
369         walk->cipher_blocksize = crypto_aead_blocksize(tfm);
370         walk->ivsize = crypto_aead_ivsize(tfm);
371         walk->alignmask = crypto_aead_alignmask(tfm);
372         return blkcipher_walk_first(desc, walk);
373 }
374 EXPORT_SYMBOL_GPL(blkcipher_aead_walk_virt_block);
375
376 /*
377  * This function allows ablkcipher algorithms to use the blkcipher_walk API to
378  * walk over their data.  The specified crypto_ablkcipher tfm is used to
379  * initialize the struct blkcipher_walk, and the crypto_blkcipher specified in
380  * desc->tfm is never used so it can be left NULL.  (Yes, this design is ugly,
381  * but it parallels blkcipher_aead_walk_virt_block() above.  In the 4.10 kernel
382  * this is starting to be cleaned up...)
383  */
384 int blkcipher_ablkcipher_walk_virt(struct blkcipher_desc *desc,
385                                    struct blkcipher_walk *walk,
386                                    struct crypto_ablkcipher *tfm)
387 {
388         walk->flags &= ~BLKCIPHER_WALK_PHYS;
389         walk->walk_blocksize = crypto_ablkcipher_blocksize(tfm);
390         walk->cipher_blocksize = walk->walk_blocksize;
391         walk->ivsize = crypto_ablkcipher_ivsize(tfm);
392         walk->alignmask = crypto_ablkcipher_alignmask(tfm);
393         return blkcipher_walk_first(desc, walk);
394 }
395 EXPORT_SYMBOL_GPL(blkcipher_ablkcipher_walk_virt);
396
397 static int setkey_unaligned(struct crypto_tfm *tfm, const u8 *key,
398                             unsigned int keylen)
399 {
400         struct blkcipher_alg *cipher = &tfm->__crt_alg->cra_blkcipher;
401         unsigned long alignmask = crypto_tfm_alg_alignmask(tfm);
402         int ret;
403         u8 *buffer, *alignbuffer;
404         unsigned long absize;
405
406         absize = keylen + alignmask;
407         buffer = kmalloc(absize, GFP_ATOMIC);
408         if (!buffer)
409                 return -ENOMEM;
410
411         alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
412         memcpy(alignbuffer, key, keylen);
413         ret = cipher->setkey(tfm, alignbuffer, keylen);
414         memset(alignbuffer, 0, keylen);
415         kfree(buffer);
416         return ret;
417 }
418
419 static int setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen)
420 {
421         struct blkcipher_alg *cipher = &tfm->__crt_alg->cra_blkcipher;
422         unsigned long alignmask = crypto_tfm_alg_alignmask(tfm);
423
424         if (keylen < cipher->min_keysize || keylen > cipher->max_keysize) {
425                 tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
426                 return -EINVAL;
427         }
428
429         if ((unsigned long)key & alignmask)
430                 return setkey_unaligned(tfm, key, keylen);
431
432         return cipher->setkey(tfm, key, keylen);
433 }
434
435 static int async_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
436                         unsigned int keylen)
437 {
438         return setkey(crypto_ablkcipher_tfm(tfm), key, keylen);
439 }
440
441 static int async_encrypt(struct ablkcipher_request *req)
442 {
443         struct crypto_tfm *tfm = req->base.tfm;
444         struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
445         struct blkcipher_desc desc = {
446                 .tfm = __crypto_blkcipher_cast(tfm),
447                 .info = req->info,
448                 .flags = req->base.flags,
449         };
450
451
452         return alg->encrypt(&desc, req->dst, req->src, req->nbytes);
453 }
454
455 static int async_decrypt(struct ablkcipher_request *req)
456 {
457         struct crypto_tfm *tfm = req->base.tfm;
458         struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
459         struct blkcipher_desc desc = {
460                 .tfm = __crypto_blkcipher_cast(tfm),
461                 .info = req->info,
462                 .flags = req->base.flags,
463         };
464
465         return alg->decrypt(&desc, req->dst, req->src, req->nbytes);
466 }
467
468 static unsigned int crypto_blkcipher_ctxsize(struct crypto_alg *alg, u32 type,
469                                              u32 mask)
470 {
471         struct blkcipher_alg *cipher = &alg->cra_blkcipher;
472         unsigned int len = alg->cra_ctxsize;
473
474         if ((mask & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_MASK &&
475             cipher->ivsize) {
476                 len = ALIGN(len, (unsigned long)alg->cra_alignmask + 1);
477                 len += cipher->ivsize;
478         }
479
480         return len;
481 }
482
483 static int crypto_init_blkcipher_ops_async(struct crypto_tfm *tfm)
484 {
485         struct ablkcipher_tfm *crt = &tfm->crt_ablkcipher;
486         struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
487
488         crt->setkey = async_setkey;
489         crt->encrypt = async_encrypt;
490         crt->decrypt = async_decrypt;
491         if (!alg->ivsize) {
492                 crt->givencrypt = skcipher_null_givencrypt;
493                 crt->givdecrypt = skcipher_null_givdecrypt;
494         }
495         crt->base = __crypto_ablkcipher_cast(tfm);
496         crt->ivsize = alg->ivsize;
497
498         return 0;
499 }
500
501 static int crypto_init_blkcipher_ops_sync(struct crypto_tfm *tfm)
502 {
503         struct blkcipher_tfm *crt = &tfm->crt_blkcipher;
504         struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
505         unsigned long align = crypto_tfm_alg_alignmask(tfm) + 1;
506         unsigned long addr;
507
508         crt->setkey = setkey;
509         crt->encrypt = alg->encrypt;
510         crt->decrypt = alg->decrypt;
511
512         addr = (unsigned long)crypto_tfm_ctx(tfm);
513         addr = ALIGN(addr, align);
514         addr += ALIGN(tfm->__crt_alg->cra_ctxsize, align);
515         crt->iv = (void *)addr;
516
517         return 0;
518 }
519
520 static int crypto_init_blkcipher_ops(struct crypto_tfm *tfm, u32 type, u32 mask)
521 {
522         struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
523
524         if (alg->ivsize > PAGE_SIZE / 8)
525                 return -EINVAL;
526
527         if ((mask & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_MASK)
528                 return crypto_init_blkcipher_ops_sync(tfm);
529         else
530                 return crypto_init_blkcipher_ops_async(tfm);
531 }
532
533 #ifdef CONFIG_NET
534 static int crypto_blkcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
535 {
536         struct crypto_report_blkcipher rblkcipher;
537
538         strncpy(rblkcipher.type, "blkcipher", sizeof(rblkcipher.type));
539         strncpy(rblkcipher.geniv, alg->cra_blkcipher.geniv ?: "<default>",
540                 sizeof(rblkcipher.geniv));
541
542         rblkcipher.blocksize = alg->cra_blocksize;
543         rblkcipher.min_keysize = alg->cra_blkcipher.min_keysize;
544         rblkcipher.max_keysize = alg->cra_blkcipher.max_keysize;
545         rblkcipher.ivsize = alg->cra_blkcipher.ivsize;
546
547         if (nla_put(skb, CRYPTOCFGA_REPORT_BLKCIPHER,
548                     sizeof(struct crypto_report_blkcipher), &rblkcipher))
549                 goto nla_put_failure;
550         return 0;
551
552 nla_put_failure:
553         return -EMSGSIZE;
554 }
555 #else
556 static int crypto_blkcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
557 {
558         return -ENOSYS;
559 }
560 #endif
561
562 static void crypto_blkcipher_show(struct seq_file *m, struct crypto_alg *alg)
563         __attribute__ ((unused));
564 static void crypto_blkcipher_show(struct seq_file *m, struct crypto_alg *alg)
565 {
566         seq_printf(m, "type         : blkcipher\n");
567         seq_printf(m, "blocksize    : %u\n", alg->cra_blocksize);
568         seq_printf(m, "min keysize  : %u\n", alg->cra_blkcipher.min_keysize);
569         seq_printf(m, "max keysize  : %u\n", alg->cra_blkcipher.max_keysize);
570         seq_printf(m, "ivsize       : %u\n", alg->cra_blkcipher.ivsize);
571         seq_printf(m, "geniv        : %s\n", alg->cra_blkcipher.geniv ?:
572                                              "<default>");
573 }
574
575 const struct crypto_type crypto_blkcipher_type = {
576         .ctxsize = crypto_blkcipher_ctxsize,
577         .init = crypto_init_blkcipher_ops,
578 #ifdef CONFIG_PROC_FS
579         .show = crypto_blkcipher_show,
580 #endif
581         .report = crypto_blkcipher_report,
582 };
583 EXPORT_SYMBOL_GPL(crypto_blkcipher_type);
584
585 static int crypto_grab_nivcipher(struct crypto_skcipher_spawn *spawn,
586                                 const char *name, u32 type, u32 mask)
587 {
588         struct crypto_alg *alg;
589         int err;
590
591         type = crypto_skcipher_type(type);
592         mask = crypto_skcipher_mask(mask)| CRYPTO_ALG_GENIV;
593
594         alg = crypto_alg_mod_lookup(name, type, mask);
595         if (IS_ERR(alg))
596                 return PTR_ERR(alg);
597
598         err = crypto_init_spawn(&spawn->base, alg, spawn->base.inst, mask);
599         crypto_mod_put(alg);
600         return err;
601 }
602
603 struct crypto_instance *skcipher_geniv_alloc(struct crypto_template *tmpl,
604                                              struct rtattr **tb, u32 type,
605                                              u32 mask)
606 {
607         struct {
608                 int (*setkey)(struct crypto_ablkcipher *tfm, const u8 *key,
609                               unsigned int keylen);
610                 int (*encrypt)(struct ablkcipher_request *req);
611                 int (*decrypt)(struct ablkcipher_request *req);
612
613                 unsigned int min_keysize;
614                 unsigned int max_keysize;
615                 unsigned int ivsize;
616
617                 const char *geniv;
618         } balg;
619         const char *name;
620         struct crypto_skcipher_spawn *spawn;
621         struct crypto_attr_type *algt;
622         struct crypto_instance *inst;
623         struct crypto_alg *alg;
624         int err;
625
626         algt = crypto_get_attr_type(tb);
627         if (IS_ERR(algt))
628                 return ERR_CAST(algt);
629
630         if ((algt->type ^ (CRYPTO_ALG_TYPE_GIVCIPHER | CRYPTO_ALG_GENIV)) &
631             algt->mask)
632                 return ERR_PTR(-EINVAL);
633
634         name = crypto_attr_alg_name(tb[1]);
635         if (IS_ERR(name))
636                 return ERR_CAST(name);
637
638         inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL);
639         if (!inst)
640                 return ERR_PTR(-ENOMEM);
641
642         spawn = crypto_instance_ctx(inst);
643
644         /* Ignore async algorithms if necessary. */
645         mask |= crypto_requires_sync(algt->type, algt->mask);
646
647         crypto_set_skcipher_spawn(spawn, inst);
648         err = crypto_grab_nivcipher(spawn, name, type, mask);
649         if (err)
650                 goto err_free_inst;
651
652         alg = crypto_skcipher_spawn_alg(spawn);
653
654         if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) ==
655             CRYPTO_ALG_TYPE_BLKCIPHER) {
656                 balg.ivsize = alg->cra_blkcipher.ivsize;
657                 balg.min_keysize = alg->cra_blkcipher.min_keysize;
658                 balg.max_keysize = alg->cra_blkcipher.max_keysize;
659
660                 balg.setkey = async_setkey;
661                 balg.encrypt = async_encrypt;
662                 balg.decrypt = async_decrypt;
663
664                 balg.geniv = alg->cra_blkcipher.geniv;
665         } else {
666                 balg.ivsize = alg->cra_ablkcipher.ivsize;
667                 balg.min_keysize = alg->cra_ablkcipher.min_keysize;
668                 balg.max_keysize = alg->cra_ablkcipher.max_keysize;
669
670                 balg.setkey = alg->cra_ablkcipher.setkey;
671                 balg.encrypt = alg->cra_ablkcipher.encrypt;
672                 balg.decrypt = alg->cra_ablkcipher.decrypt;
673
674                 balg.geniv = alg->cra_ablkcipher.geniv;
675         }
676
677         err = -EINVAL;
678         if (!balg.ivsize)
679                 goto err_drop_alg;
680
681         /*
682          * This is only true if we're constructing an algorithm with its
683          * default IV generator.  For the default generator we elide the
684          * template name and double-check the IV generator.
685          */
686         if (algt->mask & CRYPTO_ALG_GENIV) {
687                 if (!balg.geniv)
688                         balg.geniv = crypto_default_geniv(alg);
689                 err = -EAGAIN;
690                 if (strcmp(tmpl->name, balg.geniv))
691                         goto err_drop_alg;
692
693                 memcpy(inst->alg.cra_name, alg->cra_name, CRYPTO_MAX_ALG_NAME);
694                 memcpy(inst->alg.cra_driver_name, alg->cra_driver_name,
695                        CRYPTO_MAX_ALG_NAME);
696         } else {
697                 err = -ENAMETOOLONG;
698                 if (snprintf(inst->alg.cra_name, CRYPTO_MAX_ALG_NAME,
699                              "%s(%s)", tmpl->name, alg->cra_name) >=
700                     CRYPTO_MAX_ALG_NAME)
701                         goto err_drop_alg;
702                 if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME,
703                              "%s(%s)", tmpl->name, alg->cra_driver_name) >=
704                     CRYPTO_MAX_ALG_NAME)
705                         goto err_drop_alg;
706         }
707
708         inst->alg.cra_flags = CRYPTO_ALG_TYPE_GIVCIPHER | CRYPTO_ALG_GENIV;
709         inst->alg.cra_flags |= alg->cra_flags & CRYPTO_ALG_ASYNC;
710         inst->alg.cra_priority = alg->cra_priority;
711         inst->alg.cra_blocksize = alg->cra_blocksize;
712         inst->alg.cra_alignmask = alg->cra_alignmask;
713         inst->alg.cra_type = &crypto_givcipher_type;
714
715         inst->alg.cra_ablkcipher.ivsize = balg.ivsize;
716         inst->alg.cra_ablkcipher.min_keysize = balg.min_keysize;
717         inst->alg.cra_ablkcipher.max_keysize = balg.max_keysize;
718         inst->alg.cra_ablkcipher.geniv = balg.geniv;
719
720         inst->alg.cra_ablkcipher.setkey = balg.setkey;
721         inst->alg.cra_ablkcipher.encrypt = balg.encrypt;
722         inst->alg.cra_ablkcipher.decrypt = balg.decrypt;
723
724 out:
725         return inst;
726
727 err_drop_alg:
728         crypto_drop_skcipher(spawn);
729 err_free_inst:
730         kfree(inst);
731         inst = ERR_PTR(err);
732         goto out;
733 }
734 EXPORT_SYMBOL_GPL(skcipher_geniv_alloc);
735
736 void skcipher_geniv_free(struct crypto_instance *inst)
737 {
738         crypto_drop_skcipher(crypto_instance_ctx(inst));
739         kfree(inst);
740 }
741 EXPORT_SYMBOL_GPL(skcipher_geniv_free);
742
743 int skcipher_geniv_init(struct crypto_tfm *tfm)
744 {
745         struct crypto_instance *inst = (void *)tfm->__crt_alg;
746         struct crypto_ablkcipher *cipher;
747
748         cipher = crypto_spawn_skcipher(crypto_instance_ctx(inst));
749         if (IS_ERR(cipher))
750                 return PTR_ERR(cipher);
751
752         tfm->crt_ablkcipher.base = cipher;
753         tfm->crt_ablkcipher.reqsize += crypto_ablkcipher_reqsize(cipher);
754
755         return 0;
756 }
757 EXPORT_SYMBOL_GPL(skcipher_geniv_init);
758
759 void skcipher_geniv_exit(struct crypto_tfm *tfm)
760 {
761         crypto_free_ablkcipher(tfm->crt_ablkcipher.base);
762 }
763 EXPORT_SYMBOL_GPL(skcipher_geniv_exit);
764
765 MODULE_LICENSE("GPL");
766 MODULE_DESCRIPTION("Generic block chaining cipher type");