2 * Block chaining cipher operations.
4 * Generic encrypt/decrypt wrapper for ciphers, handles operations across
5 * multiple page boundaries by using temporary blocks. In user context,
6 * the kernel is given a chance to schedule us once per page.
8 * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the Free
12 * Software Foundation; either version 2 of the License, or (at your option)
17 #include <crypto/aead.h>
18 #include <crypto/internal/skcipher.h>
19 #include <crypto/scatterwalk.h>
20 #include <linux/errno.h>
21 #include <linux/hardirq.h>
22 #include <linux/kernel.h>
23 #include <linux/module.h>
24 #include <linux/scatterlist.h>
25 #include <linux/seq_file.h>
26 #include <linux/slab.h>
27 #include <linux/string.h>
28 #include <linux/cryptouser.h>
29 #include <net/netlink.h>
34 BLKCIPHER_WALK_PHYS = 1 << 0,
35 BLKCIPHER_WALK_SLOW = 1 << 1,
36 BLKCIPHER_WALK_COPY = 1 << 2,
37 BLKCIPHER_WALK_DIFF = 1 << 3,
40 static int blkcipher_walk_next(struct blkcipher_desc *desc,
41 struct blkcipher_walk *walk);
42 static int blkcipher_walk_first(struct blkcipher_desc *desc,
43 struct blkcipher_walk *walk);
45 static inline void blkcipher_map_src(struct blkcipher_walk *walk)
47 walk->src.virt.addr = scatterwalk_map(&walk->in);
50 static inline void blkcipher_map_dst(struct blkcipher_walk *walk)
52 walk->dst.virt.addr = scatterwalk_map(&walk->out);
55 static inline void blkcipher_unmap_src(struct blkcipher_walk *walk)
57 scatterwalk_unmap(walk->src.virt.addr);
60 static inline void blkcipher_unmap_dst(struct blkcipher_walk *walk)
62 scatterwalk_unmap(walk->dst.virt.addr);
65 /* Get a spot of the specified length that does not straddle a page.
66 * The caller needs to ensure that there is enough space for this operation.
68 static inline u8 *blkcipher_get_spot(u8 *start, unsigned int len)
70 u8 *end_page = (u8 *)(((unsigned long)(start + len - 1)) & PAGE_MASK);
71 return max(start, end_page);
74 static inline unsigned int blkcipher_done_slow(struct blkcipher_walk *walk,
79 addr = (u8 *)ALIGN((unsigned long)walk->buffer, walk->alignmask + 1);
80 addr = blkcipher_get_spot(addr, bsize);
81 scatterwalk_copychunks(addr, &walk->out, bsize, 1);
85 static inline unsigned int blkcipher_done_fast(struct blkcipher_walk *walk,
88 if (walk->flags & BLKCIPHER_WALK_COPY) {
89 blkcipher_map_dst(walk);
90 memcpy(walk->dst.virt.addr, walk->page, n);
91 blkcipher_unmap_dst(walk);
92 } else if (!(walk->flags & BLKCIPHER_WALK_PHYS)) {
93 if (walk->flags & BLKCIPHER_WALK_DIFF)
94 blkcipher_unmap_dst(walk);
95 blkcipher_unmap_src(walk);
98 scatterwalk_advance(&walk->in, n);
99 scatterwalk_advance(&walk->out, n);
104 int blkcipher_walk_done(struct blkcipher_desc *desc,
105 struct blkcipher_walk *walk, int err)
107 unsigned int nbytes = 0;
109 if (likely(err >= 0)) {
110 unsigned int n = walk->nbytes - err;
112 if (likely(!(walk->flags & BLKCIPHER_WALK_SLOW)))
113 n = blkcipher_done_fast(walk, n);
114 else if (WARN_ON(err)) {
118 n = blkcipher_done_slow(walk, n);
120 nbytes = walk->total - n;
124 scatterwalk_done(&walk->in, 0, nbytes);
125 scatterwalk_done(&walk->out, 1, nbytes);
128 walk->total = nbytes;
129 walk->nbytes = nbytes;
132 crypto_yield(desc->flags);
133 return blkcipher_walk_next(desc, walk);
136 if (walk->iv != desc->info)
137 memcpy(desc->info, walk->iv, walk->ivsize);
138 if (walk->buffer != walk->page)
141 free_page((unsigned long)walk->page);
145 EXPORT_SYMBOL_GPL(blkcipher_walk_done);
147 static inline int blkcipher_next_slow(struct blkcipher_desc *desc,
148 struct blkcipher_walk *walk,
150 unsigned int alignmask)
153 unsigned aligned_bsize = ALIGN(bsize, alignmask + 1);
158 walk->buffer = walk->page;
162 n = aligned_bsize * 3 - (alignmask + 1) +
163 (alignmask & ~(crypto_tfm_ctx_alignment() - 1));
164 walk->buffer = kmalloc(n, GFP_ATOMIC);
166 return blkcipher_walk_done(desc, walk, -ENOMEM);
169 walk->dst.virt.addr = (u8 *)ALIGN((unsigned long)walk->buffer,
171 walk->dst.virt.addr = blkcipher_get_spot(walk->dst.virt.addr, bsize);
172 walk->src.virt.addr = blkcipher_get_spot(walk->dst.virt.addr +
173 aligned_bsize, bsize);
175 scatterwalk_copychunks(walk->src.virt.addr, &walk->in, bsize, 0);
177 walk->nbytes = bsize;
178 walk->flags |= BLKCIPHER_WALK_SLOW;
183 static inline int blkcipher_next_copy(struct blkcipher_walk *walk)
185 u8 *tmp = walk->page;
187 blkcipher_map_src(walk);
188 memcpy(tmp, walk->src.virt.addr, walk->nbytes);
189 blkcipher_unmap_src(walk);
191 walk->src.virt.addr = tmp;
192 walk->dst.virt.addr = tmp;
197 static inline int blkcipher_next_fast(struct blkcipher_desc *desc,
198 struct blkcipher_walk *walk)
202 walk->src.phys.page = scatterwalk_page(&walk->in);
203 walk->src.phys.offset = offset_in_page(walk->in.offset);
204 walk->dst.phys.page = scatterwalk_page(&walk->out);
205 walk->dst.phys.offset = offset_in_page(walk->out.offset);
207 if (walk->flags & BLKCIPHER_WALK_PHYS)
210 diff = walk->src.phys.offset - walk->dst.phys.offset;
211 diff |= walk->src.virt.page - walk->dst.virt.page;
213 blkcipher_map_src(walk);
214 walk->dst.virt.addr = walk->src.virt.addr;
217 walk->flags |= BLKCIPHER_WALK_DIFF;
218 blkcipher_map_dst(walk);
224 static int blkcipher_walk_next(struct blkcipher_desc *desc,
225 struct blkcipher_walk *walk)
232 if (unlikely(n < walk->cipher_blocksize)) {
233 desc->flags |= CRYPTO_TFM_RES_BAD_BLOCK_LEN;
234 return blkcipher_walk_done(desc, walk, -EINVAL);
237 bsize = min(walk->walk_blocksize, n);
239 walk->flags &= ~(BLKCIPHER_WALK_SLOW | BLKCIPHER_WALK_COPY |
240 BLKCIPHER_WALK_DIFF);
241 if (!scatterwalk_aligned(&walk->in, walk->alignmask) ||
242 !scatterwalk_aligned(&walk->out, walk->alignmask)) {
243 walk->flags |= BLKCIPHER_WALK_COPY;
245 walk->page = (void *)__get_free_page(GFP_ATOMIC);
251 n = scatterwalk_clamp(&walk->in, n);
252 n = scatterwalk_clamp(&walk->out, n);
254 if (unlikely(n < bsize)) {
255 err = blkcipher_next_slow(desc, walk, bsize, walk->alignmask);
256 goto set_phys_lowmem;
260 if (walk->flags & BLKCIPHER_WALK_COPY) {
261 err = blkcipher_next_copy(walk);
262 goto set_phys_lowmem;
265 return blkcipher_next_fast(desc, walk);
268 if (walk->flags & BLKCIPHER_WALK_PHYS) {
269 walk->src.phys.page = virt_to_page(walk->src.virt.addr);
270 walk->dst.phys.page = virt_to_page(walk->dst.virt.addr);
271 walk->src.phys.offset &= PAGE_SIZE - 1;
272 walk->dst.phys.offset &= PAGE_SIZE - 1;
277 static inline int blkcipher_copy_iv(struct blkcipher_walk *walk)
279 unsigned bs = walk->walk_blocksize;
280 unsigned aligned_bs = ALIGN(bs, walk->alignmask + 1);
281 unsigned int size = aligned_bs * 2 +
282 walk->ivsize + max(aligned_bs, walk->ivsize) -
283 (walk->alignmask + 1);
286 size += walk->alignmask & ~(crypto_tfm_ctx_alignment() - 1);
287 walk->buffer = kmalloc(size, GFP_ATOMIC);
291 iv = (u8 *)ALIGN((unsigned long)walk->buffer, walk->alignmask + 1);
292 iv = blkcipher_get_spot(iv, bs) + aligned_bs;
293 iv = blkcipher_get_spot(iv, bs) + aligned_bs;
294 iv = blkcipher_get_spot(iv, walk->ivsize);
296 walk->iv = memcpy(iv, walk->iv, walk->ivsize);
300 int blkcipher_walk_virt(struct blkcipher_desc *desc,
301 struct blkcipher_walk *walk)
303 walk->flags &= ~BLKCIPHER_WALK_PHYS;
304 walk->walk_blocksize = crypto_blkcipher_blocksize(desc->tfm);
305 walk->cipher_blocksize = walk->walk_blocksize;
306 walk->ivsize = crypto_blkcipher_ivsize(desc->tfm);
307 walk->alignmask = crypto_blkcipher_alignmask(desc->tfm);
308 return blkcipher_walk_first(desc, walk);
310 EXPORT_SYMBOL_GPL(blkcipher_walk_virt);
312 int blkcipher_walk_phys(struct blkcipher_desc *desc,
313 struct blkcipher_walk *walk)
315 walk->flags |= BLKCIPHER_WALK_PHYS;
316 walk->walk_blocksize = crypto_blkcipher_blocksize(desc->tfm);
317 walk->cipher_blocksize = walk->walk_blocksize;
318 walk->ivsize = crypto_blkcipher_ivsize(desc->tfm);
319 walk->alignmask = crypto_blkcipher_alignmask(desc->tfm);
320 return blkcipher_walk_first(desc, walk);
322 EXPORT_SYMBOL_GPL(blkcipher_walk_phys);
324 static int blkcipher_walk_first(struct blkcipher_desc *desc,
325 struct blkcipher_walk *walk)
327 if (WARN_ON_ONCE(in_irq()))
330 walk->iv = desc->info;
331 walk->nbytes = walk->total;
332 if (unlikely(!walk->total))
336 if (unlikely(((unsigned long)walk->iv & walk->alignmask))) {
337 int err = blkcipher_copy_iv(walk);
342 scatterwalk_start(&walk->in, walk->in.sg);
343 scatterwalk_start(&walk->out, walk->out.sg);
346 return blkcipher_walk_next(desc, walk);
349 int blkcipher_walk_virt_block(struct blkcipher_desc *desc,
350 struct blkcipher_walk *walk,
351 unsigned int blocksize)
353 walk->flags &= ~BLKCIPHER_WALK_PHYS;
354 walk->walk_blocksize = blocksize;
355 walk->cipher_blocksize = crypto_blkcipher_blocksize(desc->tfm);
356 walk->ivsize = crypto_blkcipher_ivsize(desc->tfm);
357 walk->alignmask = crypto_blkcipher_alignmask(desc->tfm);
358 return blkcipher_walk_first(desc, walk);
360 EXPORT_SYMBOL_GPL(blkcipher_walk_virt_block);
362 int blkcipher_aead_walk_virt_block(struct blkcipher_desc *desc,
363 struct blkcipher_walk *walk,
364 struct crypto_aead *tfm,
365 unsigned int blocksize)
367 walk->flags &= ~BLKCIPHER_WALK_PHYS;
368 walk->walk_blocksize = blocksize;
369 walk->cipher_blocksize = crypto_aead_blocksize(tfm);
370 walk->ivsize = crypto_aead_ivsize(tfm);
371 walk->alignmask = crypto_aead_alignmask(tfm);
372 return blkcipher_walk_first(desc, walk);
374 EXPORT_SYMBOL_GPL(blkcipher_aead_walk_virt_block);
376 static int setkey_unaligned(struct crypto_tfm *tfm, const u8 *key,
379 struct blkcipher_alg *cipher = &tfm->__crt_alg->cra_blkcipher;
380 unsigned long alignmask = crypto_tfm_alg_alignmask(tfm);
382 u8 *buffer, *alignbuffer;
383 unsigned long absize;
385 absize = keylen + alignmask;
386 buffer = kmalloc(absize, GFP_ATOMIC);
390 alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
391 memcpy(alignbuffer, key, keylen);
392 ret = cipher->setkey(tfm, alignbuffer, keylen);
393 memset(alignbuffer, 0, keylen);
398 static int setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen)
400 struct blkcipher_alg *cipher = &tfm->__crt_alg->cra_blkcipher;
401 unsigned long alignmask = crypto_tfm_alg_alignmask(tfm);
403 if (keylen < cipher->min_keysize || keylen > cipher->max_keysize) {
404 tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
408 if ((unsigned long)key & alignmask)
409 return setkey_unaligned(tfm, key, keylen);
411 return cipher->setkey(tfm, key, keylen);
414 static int async_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
417 return setkey(crypto_ablkcipher_tfm(tfm), key, keylen);
420 static int async_encrypt(struct ablkcipher_request *req)
422 struct crypto_tfm *tfm = req->base.tfm;
423 struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
424 struct blkcipher_desc desc = {
425 .tfm = __crypto_blkcipher_cast(tfm),
427 .flags = req->base.flags,
431 return alg->encrypt(&desc, req->dst, req->src, req->nbytes);
434 static int async_decrypt(struct ablkcipher_request *req)
436 struct crypto_tfm *tfm = req->base.tfm;
437 struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
438 struct blkcipher_desc desc = {
439 .tfm = __crypto_blkcipher_cast(tfm),
441 .flags = req->base.flags,
444 return alg->decrypt(&desc, req->dst, req->src, req->nbytes);
447 static unsigned int crypto_blkcipher_ctxsize(struct crypto_alg *alg, u32 type,
450 struct blkcipher_alg *cipher = &alg->cra_blkcipher;
451 unsigned int len = alg->cra_ctxsize;
453 if ((mask & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_MASK &&
455 len = ALIGN(len, (unsigned long)alg->cra_alignmask + 1);
456 len += cipher->ivsize;
462 static int crypto_init_blkcipher_ops_async(struct crypto_tfm *tfm)
464 struct ablkcipher_tfm *crt = &tfm->crt_ablkcipher;
465 struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
467 crt->setkey = async_setkey;
468 crt->encrypt = async_encrypt;
469 crt->decrypt = async_decrypt;
471 crt->givencrypt = skcipher_null_givencrypt;
472 crt->givdecrypt = skcipher_null_givdecrypt;
474 crt->base = __crypto_ablkcipher_cast(tfm);
475 crt->ivsize = alg->ivsize;
480 static int crypto_init_blkcipher_ops_sync(struct crypto_tfm *tfm)
482 struct blkcipher_tfm *crt = &tfm->crt_blkcipher;
483 struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
484 unsigned long align = crypto_tfm_alg_alignmask(tfm) + 1;
487 crt->setkey = setkey;
488 crt->encrypt = alg->encrypt;
489 crt->decrypt = alg->decrypt;
491 addr = (unsigned long)crypto_tfm_ctx(tfm);
492 addr = ALIGN(addr, align);
493 addr += ALIGN(tfm->__crt_alg->cra_ctxsize, align);
494 crt->iv = (void *)addr;
499 static int crypto_init_blkcipher_ops(struct crypto_tfm *tfm, u32 type, u32 mask)
501 struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
503 if (alg->ivsize > PAGE_SIZE / 8)
506 if ((mask & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_MASK)
507 return crypto_init_blkcipher_ops_sync(tfm);
509 return crypto_init_blkcipher_ops_async(tfm);
513 static int crypto_blkcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
515 struct crypto_report_blkcipher rblkcipher;
517 strncpy(rblkcipher.type, "blkcipher", sizeof(rblkcipher.type));
518 strncpy(rblkcipher.geniv, alg->cra_blkcipher.geniv ?: "<default>",
519 sizeof(rblkcipher.geniv));
521 rblkcipher.blocksize = alg->cra_blocksize;
522 rblkcipher.min_keysize = alg->cra_blkcipher.min_keysize;
523 rblkcipher.max_keysize = alg->cra_blkcipher.max_keysize;
524 rblkcipher.ivsize = alg->cra_blkcipher.ivsize;
526 if (nla_put(skb, CRYPTOCFGA_REPORT_BLKCIPHER,
527 sizeof(struct crypto_report_blkcipher), &rblkcipher))
528 goto nla_put_failure;
535 static int crypto_blkcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
541 static void crypto_blkcipher_show(struct seq_file *m, struct crypto_alg *alg)
542 __attribute__ ((unused));
543 static void crypto_blkcipher_show(struct seq_file *m, struct crypto_alg *alg)
545 seq_printf(m, "type : blkcipher\n");
546 seq_printf(m, "blocksize : %u\n", alg->cra_blocksize);
547 seq_printf(m, "min keysize : %u\n", alg->cra_blkcipher.min_keysize);
548 seq_printf(m, "max keysize : %u\n", alg->cra_blkcipher.max_keysize);
549 seq_printf(m, "ivsize : %u\n", alg->cra_blkcipher.ivsize);
550 seq_printf(m, "geniv : %s\n", alg->cra_blkcipher.geniv ?:
554 const struct crypto_type crypto_blkcipher_type = {
555 .ctxsize = crypto_blkcipher_ctxsize,
556 .init = crypto_init_blkcipher_ops,
557 #ifdef CONFIG_PROC_FS
558 .show = crypto_blkcipher_show,
560 .report = crypto_blkcipher_report,
562 EXPORT_SYMBOL_GPL(crypto_blkcipher_type);
564 static int crypto_grab_nivcipher(struct crypto_skcipher_spawn *spawn,
565 const char *name, u32 type, u32 mask)
567 struct crypto_alg *alg;
570 type = crypto_skcipher_type(type);
571 mask = crypto_skcipher_mask(mask)| CRYPTO_ALG_GENIV;
573 alg = crypto_alg_mod_lookup(name, type, mask);
577 err = crypto_init_spawn(&spawn->base, alg, spawn->base.inst, mask);
582 struct crypto_instance *skcipher_geniv_alloc(struct crypto_template *tmpl,
583 struct rtattr **tb, u32 type,
587 int (*setkey)(struct crypto_ablkcipher *tfm, const u8 *key,
588 unsigned int keylen);
589 int (*encrypt)(struct ablkcipher_request *req);
590 int (*decrypt)(struct ablkcipher_request *req);
592 unsigned int min_keysize;
593 unsigned int max_keysize;
599 struct crypto_skcipher_spawn *spawn;
600 struct crypto_attr_type *algt;
601 struct crypto_instance *inst;
602 struct crypto_alg *alg;
605 algt = crypto_get_attr_type(tb);
607 return ERR_CAST(algt);
609 if ((algt->type ^ (CRYPTO_ALG_TYPE_GIVCIPHER | CRYPTO_ALG_GENIV)) &
611 return ERR_PTR(-EINVAL);
613 name = crypto_attr_alg_name(tb[1]);
615 return ERR_CAST(name);
617 inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL);
619 return ERR_PTR(-ENOMEM);
621 spawn = crypto_instance_ctx(inst);
623 /* Ignore async algorithms if necessary. */
624 mask |= crypto_requires_sync(algt->type, algt->mask);
626 crypto_set_skcipher_spawn(spawn, inst);
627 err = crypto_grab_nivcipher(spawn, name, type, mask);
631 alg = crypto_skcipher_spawn_alg(spawn);
633 if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) ==
634 CRYPTO_ALG_TYPE_BLKCIPHER) {
635 balg.ivsize = alg->cra_blkcipher.ivsize;
636 balg.min_keysize = alg->cra_blkcipher.min_keysize;
637 balg.max_keysize = alg->cra_blkcipher.max_keysize;
639 balg.setkey = async_setkey;
640 balg.encrypt = async_encrypt;
641 balg.decrypt = async_decrypt;
643 balg.geniv = alg->cra_blkcipher.geniv;
645 balg.ivsize = alg->cra_ablkcipher.ivsize;
646 balg.min_keysize = alg->cra_ablkcipher.min_keysize;
647 balg.max_keysize = alg->cra_ablkcipher.max_keysize;
649 balg.setkey = alg->cra_ablkcipher.setkey;
650 balg.encrypt = alg->cra_ablkcipher.encrypt;
651 balg.decrypt = alg->cra_ablkcipher.decrypt;
653 balg.geniv = alg->cra_ablkcipher.geniv;
661 * This is only true if we're constructing an algorithm with its
662 * default IV generator. For the default generator we elide the
663 * template name and double-check the IV generator.
665 if (algt->mask & CRYPTO_ALG_GENIV) {
667 balg.geniv = crypto_default_geniv(alg);
669 if (strcmp(tmpl->name, balg.geniv))
672 memcpy(inst->alg.cra_name, alg->cra_name, CRYPTO_MAX_ALG_NAME);
673 memcpy(inst->alg.cra_driver_name, alg->cra_driver_name,
674 CRYPTO_MAX_ALG_NAME);
677 if (snprintf(inst->alg.cra_name, CRYPTO_MAX_ALG_NAME,
678 "%s(%s)", tmpl->name, alg->cra_name) >=
681 if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME,
682 "%s(%s)", tmpl->name, alg->cra_driver_name) >=
687 inst->alg.cra_flags = CRYPTO_ALG_TYPE_GIVCIPHER | CRYPTO_ALG_GENIV;
688 inst->alg.cra_flags |= alg->cra_flags & CRYPTO_ALG_ASYNC;
689 inst->alg.cra_priority = alg->cra_priority;
690 inst->alg.cra_blocksize = alg->cra_blocksize;
691 inst->alg.cra_alignmask = alg->cra_alignmask;
692 inst->alg.cra_type = &crypto_givcipher_type;
694 inst->alg.cra_ablkcipher.ivsize = balg.ivsize;
695 inst->alg.cra_ablkcipher.min_keysize = balg.min_keysize;
696 inst->alg.cra_ablkcipher.max_keysize = balg.max_keysize;
697 inst->alg.cra_ablkcipher.geniv = balg.geniv;
699 inst->alg.cra_ablkcipher.setkey = balg.setkey;
700 inst->alg.cra_ablkcipher.encrypt = balg.encrypt;
701 inst->alg.cra_ablkcipher.decrypt = balg.decrypt;
707 crypto_drop_skcipher(spawn);
713 EXPORT_SYMBOL_GPL(skcipher_geniv_alloc);
715 void skcipher_geniv_free(struct crypto_instance *inst)
717 crypto_drop_skcipher(crypto_instance_ctx(inst));
720 EXPORT_SYMBOL_GPL(skcipher_geniv_free);
722 int skcipher_geniv_init(struct crypto_tfm *tfm)
724 struct crypto_instance *inst = (void *)tfm->__crt_alg;
725 struct crypto_ablkcipher *cipher;
727 cipher = crypto_spawn_skcipher(crypto_instance_ctx(inst));
729 return PTR_ERR(cipher);
731 tfm->crt_ablkcipher.base = cipher;
732 tfm->crt_ablkcipher.reqsize += crypto_ablkcipher_reqsize(cipher);
736 EXPORT_SYMBOL_GPL(skcipher_geniv_init);
738 void skcipher_geniv_exit(struct crypto_tfm *tfm)
740 crypto_free_ablkcipher(tfm->crt_ablkcipher.base);
742 EXPORT_SYMBOL_GPL(skcipher_geniv_exit);
744 MODULE_LICENSE("GPL");
745 MODULE_DESCRIPTION("Generic block chaining cipher type");