2 * talitos - Freescale Integrated Security Engine (SEC) device driver
4 * Copyright (c) 2008-2011 Freescale Semiconductor, Inc.
6 * Scatterlist Crypto API glue code copied from files with the following:
7 * Copyright (c) 2006-2007 Herbert Xu <herbert@gondor.apana.org.au>
9 * Crypto algorithm registration code copied from hifn driver:
10 * 2007+ Copyright (c) Evgeniy Polyakov <johnpol@2ka.mipt.ru>
11 * All rights reserved.
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2 of the License, or
16 * (at your option) any later version.
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details.
23 * You should have received a copy of the GNU General Public License
24 * along with this program; if not, write to the Free Software
25 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
28 #include <linux/kernel.h>
29 #include <linux/module.h>
30 #include <linux/mod_devicetable.h>
31 #include <linux/device.h>
32 #include <linux/interrupt.h>
33 #include <linux/crypto.h>
34 #include <linux/hw_random.h>
35 #include <linux/of_address.h>
36 #include <linux/of_irq.h>
37 #include <linux/of_platform.h>
38 #include <linux/dma-mapping.h>
40 #include <linux/spinlock.h>
41 #include <linux/rtnetlink.h>
42 #include <linux/slab.h>
44 #include <crypto/algapi.h>
45 #include <crypto/aes.h>
46 #include <crypto/des.h>
47 #include <crypto/sha.h>
48 #include <crypto/md5.h>
49 #include <crypto/aead.h>
50 #include <crypto/authenc.h>
51 #include <crypto/skcipher.h>
52 #include <crypto/hash.h>
53 #include <crypto/internal/hash.h>
54 #include <crypto/scatterwalk.h>
58 static void to_talitos_ptr(struct talitos_ptr *ptr, dma_addr_t dma_addr)
60 ptr->ptr = cpu_to_be32(lower_32_bits(dma_addr));
61 ptr->eptr = upper_32_bits(dma_addr);
64 static void to_talitos_ptr_len(struct talitos_ptr *ptr, unsigned short len)
66 ptr->len = cpu_to_be16(len);
69 static unsigned short from_talitos_ptr_len(struct talitos_ptr *ptr)
71 return be16_to_cpu(ptr->len);
74 static void to_talitos_ptr_extent_clear(struct talitos_ptr *ptr)
80 * map virtual single (contiguous) pointer to h/w descriptor pointer
82 static void map_single_talitos_ptr(struct device *dev,
83 struct talitos_ptr *ptr,
84 unsigned short len, void *data,
85 enum dma_data_direction dir)
87 dma_addr_t dma_addr = dma_map_single(dev, data, len, dir);
89 to_talitos_ptr_len(ptr, len);
90 to_talitos_ptr(ptr, dma_addr);
91 to_talitos_ptr_extent_clear(ptr);
95 * unmap bus single (contiguous) h/w descriptor pointer
97 static void unmap_single_talitos_ptr(struct device *dev,
98 struct talitos_ptr *ptr,
99 enum dma_data_direction dir)
101 dma_unmap_single(dev, be32_to_cpu(ptr->ptr),
102 from_talitos_ptr_len(ptr), dir);
105 static int reset_channel(struct device *dev, int ch)
107 struct talitos_private *priv = dev_get_drvdata(dev);
108 unsigned int timeout = TALITOS_TIMEOUT;
110 setbits32(priv->chan[ch].reg + TALITOS_CCCR, TALITOS_CCCR_RESET);
112 while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR) & TALITOS_CCCR_RESET)
117 dev_err(dev, "failed to reset channel %d\n", ch);
121 /* set 36-bit addressing, done writeback enable and done IRQ enable */
122 setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO, TALITOS_CCCR_LO_EAE |
123 TALITOS_CCCR_LO_CDWE | TALITOS_CCCR_LO_CDIE);
125 /* and ICCR writeback, if available */
126 if (priv->features & TALITOS_FTR_HW_AUTH_CHECK)
127 setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO,
128 TALITOS_CCCR_LO_IWSE);
133 static int reset_device(struct device *dev)
135 struct talitos_private *priv = dev_get_drvdata(dev);
136 unsigned int timeout = TALITOS_TIMEOUT;
137 u32 mcr = TALITOS_MCR_SWR;
139 setbits32(priv->reg + TALITOS_MCR, mcr);
141 while ((in_be32(priv->reg + TALITOS_MCR) & TALITOS_MCR_SWR)
146 mcr = TALITOS_MCR_RCA1 | TALITOS_MCR_RCA3;
147 setbits32(priv->reg + TALITOS_MCR, mcr);
151 dev_err(dev, "failed to reset device\n");
159 * Reset and initialize the device
161 static int init_device(struct device *dev)
163 struct talitos_private *priv = dev_get_drvdata(dev);
168 * errata documentation: warning: certain SEC interrupts
169 * are not fully cleared by writing the MCR:SWR bit,
170 * set bit twice to completely reset
172 err = reset_device(dev);
176 err = reset_device(dev);
181 for (ch = 0; ch < priv->num_channels; ch++) {
182 err = reset_channel(dev, ch);
187 /* enable channel done and error interrupts */
188 setbits32(priv->reg + TALITOS_IMR, TALITOS_IMR_INIT);
189 setbits32(priv->reg + TALITOS_IMR_LO, TALITOS_IMR_LO_INIT);
191 /* disable integrity check error interrupts (use writeback instead) */
192 if (priv->features & TALITOS_FTR_HW_AUTH_CHECK)
193 setbits32(priv->reg + TALITOS_MDEUICR_LO,
194 TALITOS_MDEUICR_LO_ICE);
200 * talitos_submit - submits a descriptor to the device for processing
201 * @dev: the SEC device to be used
202 * @ch: the SEC device channel to be used
203 * @desc: the descriptor to be processed by the device
204 * @callback: whom to call when processing is complete
205 * @context: a handle for use by caller (optional)
207 * desc must contain valid dma-mapped (bus physical) address pointers.
208 * callback must check err and feedback in descriptor header
209 * for device processing status.
211 int talitos_submit(struct device *dev, int ch, struct talitos_desc *desc,
212 void (*callback)(struct device *dev,
213 struct talitos_desc *desc,
214 void *context, int error),
217 struct talitos_private *priv = dev_get_drvdata(dev);
218 struct talitos_request *request;
222 spin_lock_irqsave(&priv->chan[ch].head_lock, flags);
224 if (!atomic_inc_not_zero(&priv->chan[ch].submit_count)) {
225 /* h/w fifo is full */
226 spin_unlock_irqrestore(&priv->chan[ch].head_lock, flags);
230 head = priv->chan[ch].head;
231 request = &priv->chan[ch].fifo[head];
233 /* map descriptor and save caller data */
234 request->dma_desc = dma_map_single(dev, desc, sizeof(*desc),
236 request->callback = callback;
237 request->context = context;
239 /* increment fifo head */
240 priv->chan[ch].head = (priv->chan[ch].head + 1) & (priv->fifo_len - 1);
243 request->desc = desc;
247 out_be32(priv->chan[ch].reg + TALITOS_FF,
248 upper_32_bits(request->dma_desc));
249 out_be32(priv->chan[ch].reg + TALITOS_FF_LO,
250 lower_32_bits(request->dma_desc));
252 spin_unlock_irqrestore(&priv->chan[ch].head_lock, flags);
256 EXPORT_SYMBOL(talitos_submit);
259 * process what was done, notify callback of error if not
261 static void flush_channel(struct device *dev, int ch, int error, int reset_ch)
263 struct talitos_private *priv = dev_get_drvdata(dev);
264 struct talitos_request *request, saved_req;
268 spin_lock_irqsave(&priv->chan[ch].tail_lock, flags);
270 tail = priv->chan[ch].tail;
271 while (priv->chan[ch].fifo[tail].desc) {
272 request = &priv->chan[ch].fifo[tail];
274 /* descriptors with their done bits set don't get the error */
276 if ((request->desc->hdr & DESC_HDR_DONE) == DESC_HDR_DONE)
284 dma_unmap_single(dev, request->dma_desc,
285 sizeof(struct talitos_desc),
288 /* copy entries so we can call callback outside lock */
289 saved_req.desc = request->desc;
290 saved_req.callback = request->callback;
291 saved_req.context = request->context;
293 /* release request entry in fifo */
295 request->desc = NULL;
297 /* increment fifo tail */
298 priv->chan[ch].tail = (tail + 1) & (priv->fifo_len - 1);
300 spin_unlock_irqrestore(&priv->chan[ch].tail_lock, flags);
302 atomic_dec(&priv->chan[ch].submit_count);
304 saved_req.callback(dev, saved_req.desc, saved_req.context,
306 /* channel may resume processing in single desc error case */
307 if (error && !reset_ch && status == error)
309 spin_lock_irqsave(&priv->chan[ch].tail_lock, flags);
310 tail = priv->chan[ch].tail;
313 spin_unlock_irqrestore(&priv->chan[ch].tail_lock, flags);
317 * process completed requests for channels that have done status
319 #define DEF_TALITOS_DONE(name, ch_done_mask) \
320 static void talitos_done_##name(unsigned long data) \
322 struct device *dev = (struct device *)data; \
323 struct talitos_private *priv = dev_get_drvdata(dev); \
324 unsigned long flags; \
326 if (ch_done_mask & 1) \
327 flush_channel(dev, 0, 0, 0); \
328 if (priv->num_channels == 1) \
330 if (ch_done_mask & (1 << 2)) \
331 flush_channel(dev, 1, 0, 0); \
332 if (ch_done_mask & (1 << 4)) \
333 flush_channel(dev, 2, 0, 0); \
334 if (ch_done_mask & (1 << 6)) \
335 flush_channel(dev, 3, 0, 0); \
338 /* At this point, all completed channels have been processed */ \
339 /* Unmask done interrupts for channels completed later on. */ \
340 spin_lock_irqsave(&priv->reg_lock, flags); \
341 setbits32(priv->reg + TALITOS_IMR, ch_done_mask); \
342 setbits32(priv->reg + TALITOS_IMR_LO, TALITOS_IMR_LO_INIT); \
343 spin_unlock_irqrestore(&priv->reg_lock, flags); \
345 DEF_TALITOS_DONE(4ch, TALITOS_ISR_4CHDONE)
346 DEF_TALITOS_DONE(ch0_2, TALITOS_ISR_CH_0_2_DONE)
347 DEF_TALITOS_DONE(ch1_3, TALITOS_ISR_CH_1_3_DONE)
350 * locate current (offending) descriptor
352 static u32 current_desc_hdr(struct device *dev, int ch)
354 struct talitos_private *priv = dev_get_drvdata(dev);
358 cur_desc = ((u64)in_be32(priv->chan[ch].reg + TALITOS_CDPR)) << 32;
359 cur_desc |= in_be32(priv->chan[ch].reg + TALITOS_CDPR_LO);
362 dev_err(dev, "CDPR is NULL, giving up search for offending descriptor\n");
366 tail = priv->chan[ch].tail;
369 while (priv->chan[ch].fifo[iter].dma_desc != cur_desc) {
370 iter = (iter + 1) & (priv->fifo_len - 1);
372 dev_err(dev, "couldn't locate current descriptor\n");
377 return priv->chan[ch].fifo[iter].desc->hdr;
381 * user diagnostics; report root cause of error based on execution unit status
383 static void report_eu_error(struct device *dev, int ch, u32 desc_hdr)
385 struct talitos_private *priv = dev_get_drvdata(dev);
389 desc_hdr = in_be32(priv->chan[ch].reg + TALITOS_DESCBUF);
391 switch (desc_hdr & DESC_HDR_SEL0_MASK) {
392 case DESC_HDR_SEL0_AFEU:
393 dev_err(dev, "AFEUISR 0x%08x_%08x\n",
394 in_be32(priv->reg + TALITOS_AFEUISR),
395 in_be32(priv->reg + TALITOS_AFEUISR_LO));
397 case DESC_HDR_SEL0_DEU:
398 dev_err(dev, "DEUISR 0x%08x_%08x\n",
399 in_be32(priv->reg + TALITOS_DEUISR),
400 in_be32(priv->reg + TALITOS_DEUISR_LO));
402 case DESC_HDR_SEL0_MDEUA:
403 case DESC_HDR_SEL0_MDEUB:
404 dev_err(dev, "MDEUISR 0x%08x_%08x\n",
405 in_be32(priv->reg + TALITOS_MDEUISR),
406 in_be32(priv->reg + TALITOS_MDEUISR_LO));
408 case DESC_HDR_SEL0_RNG:
409 dev_err(dev, "RNGUISR 0x%08x_%08x\n",
410 in_be32(priv->reg + TALITOS_RNGUISR),
411 in_be32(priv->reg + TALITOS_RNGUISR_LO));
413 case DESC_HDR_SEL0_PKEU:
414 dev_err(dev, "PKEUISR 0x%08x_%08x\n",
415 in_be32(priv->reg + TALITOS_PKEUISR),
416 in_be32(priv->reg + TALITOS_PKEUISR_LO));
418 case DESC_HDR_SEL0_AESU:
419 dev_err(dev, "AESUISR 0x%08x_%08x\n",
420 in_be32(priv->reg + TALITOS_AESUISR),
421 in_be32(priv->reg + TALITOS_AESUISR_LO));
423 case DESC_HDR_SEL0_CRCU:
424 dev_err(dev, "CRCUISR 0x%08x_%08x\n",
425 in_be32(priv->reg + TALITOS_CRCUISR),
426 in_be32(priv->reg + TALITOS_CRCUISR_LO));
428 case DESC_HDR_SEL0_KEU:
429 dev_err(dev, "KEUISR 0x%08x_%08x\n",
430 in_be32(priv->reg + TALITOS_KEUISR),
431 in_be32(priv->reg + TALITOS_KEUISR_LO));
435 switch (desc_hdr & DESC_HDR_SEL1_MASK) {
436 case DESC_HDR_SEL1_MDEUA:
437 case DESC_HDR_SEL1_MDEUB:
438 dev_err(dev, "MDEUISR 0x%08x_%08x\n",
439 in_be32(priv->reg + TALITOS_MDEUISR),
440 in_be32(priv->reg + TALITOS_MDEUISR_LO));
442 case DESC_HDR_SEL1_CRCU:
443 dev_err(dev, "CRCUISR 0x%08x_%08x\n",
444 in_be32(priv->reg + TALITOS_CRCUISR),
445 in_be32(priv->reg + TALITOS_CRCUISR_LO));
449 for (i = 0; i < 8; i++)
450 dev_err(dev, "DESCBUF 0x%08x_%08x\n",
451 in_be32(priv->chan[ch].reg + TALITOS_DESCBUF + 8*i),
452 in_be32(priv->chan[ch].reg + TALITOS_DESCBUF_LO + 8*i));
456 * recover from error interrupts
458 static void talitos_error(struct device *dev, u32 isr, u32 isr_lo)
460 struct talitos_private *priv = dev_get_drvdata(dev);
461 unsigned int timeout = TALITOS_TIMEOUT;
462 int ch, error, reset_dev = 0, reset_ch = 0;
465 for (ch = 0; ch < priv->num_channels; ch++) {
466 /* skip channels without errors */
467 if (!(isr & (1 << (ch * 2 + 1))))
472 v = in_be32(priv->chan[ch].reg + TALITOS_CCPSR);
473 v_lo = in_be32(priv->chan[ch].reg + TALITOS_CCPSR_LO);
475 if (v_lo & TALITOS_CCPSR_LO_DOF) {
476 dev_err(dev, "double fetch fifo overflow error\n");
480 if (v_lo & TALITOS_CCPSR_LO_SOF) {
481 /* h/w dropped descriptor */
482 dev_err(dev, "single fetch fifo overflow error\n");
485 if (v_lo & TALITOS_CCPSR_LO_MDTE)
486 dev_err(dev, "master data transfer error\n");
487 if (v_lo & TALITOS_CCPSR_LO_SGDLZ)
488 dev_err(dev, "s/g data length zero error\n");
489 if (v_lo & TALITOS_CCPSR_LO_FPZ)
490 dev_err(dev, "fetch pointer zero error\n");
491 if (v_lo & TALITOS_CCPSR_LO_IDH)
492 dev_err(dev, "illegal descriptor header error\n");
493 if (v_lo & TALITOS_CCPSR_LO_IEU)
494 dev_err(dev, "invalid execution unit error\n");
495 if (v_lo & TALITOS_CCPSR_LO_EU)
496 report_eu_error(dev, ch, current_desc_hdr(dev, ch));
497 if (v_lo & TALITOS_CCPSR_LO_GB)
498 dev_err(dev, "gather boundary error\n");
499 if (v_lo & TALITOS_CCPSR_LO_GRL)
500 dev_err(dev, "gather return/length error\n");
501 if (v_lo & TALITOS_CCPSR_LO_SB)
502 dev_err(dev, "scatter boundary error\n");
503 if (v_lo & TALITOS_CCPSR_LO_SRL)
504 dev_err(dev, "scatter return/length error\n");
506 flush_channel(dev, ch, error, reset_ch);
509 reset_channel(dev, ch);
511 setbits32(priv->chan[ch].reg + TALITOS_CCCR,
513 setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO, 0);
514 while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR) &
515 TALITOS_CCCR_CONT) && --timeout)
518 dev_err(dev, "failed to restart channel %d\n",
524 if (reset_dev || isr & ~TALITOS_ISR_4CHERR || isr_lo) {
525 dev_err(dev, "done overflow, internal time out, or rngu error: "
526 "ISR 0x%08x_%08x\n", isr, isr_lo);
528 /* purge request queues */
529 for (ch = 0; ch < priv->num_channels; ch++)
530 flush_channel(dev, ch, -EIO, 1);
532 /* reset and reinitialize the device */
537 #define DEF_TALITOS_INTERRUPT(name, ch_done_mask, ch_err_mask, tlet) \
538 static irqreturn_t talitos_interrupt_##name(int irq, void *data) \
540 struct device *dev = data; \
541 struct talitos_private *priv = dev_get_drvdata(dev); \
543 unsigned long flags; \
545 spin_lock_irqsave(&priv->reg_lock, flags); \
546 isr = in_be32(priv->reg + TALITOS_ISR); \
547 isr_lo = in_be32(priv->reg + TALITOS_ISR_LO); \
548 /* Acknowledge interrupt */ \
549 out_be32(priv->reg + TALITOS_ICR, isr & (ch_done_mask | ch_err_mask)); \
550 out_be32(priv->reg + TALITOS_ICR_LO, isr_lo); \
552 if (unlikely(isr & ch_err_mask || isr_lo)) { \
553 spin_unlock_irqrestore(&priv->reg_lock, flags); \
554 talitos_error(dev, isr & ch_err_mask, isr_lo); \
557 if (likely(isr & ch_done_mask)) { \
558 /* mask further done interrupts. */ \
559 clrbits32(priv->reg + TALITOS_IMR, ch_done_mask); \
560 /* done_task will unmask done interrupts at exit */ \
561 tasklet_schedule(&priv->done_task[tlet]); \
563 spin_unlock_irqrestore(&priv->reg_lock, flags); \
566 return (isr & (ch_done_mask | ch_err_mask) || isr_lo) ? IRQ_HANDLED : \
569 DEF_TALITOS_INTERRUPT(4ch, TALITOS_ISR_4CHDONE, TALITOS_ISR_4CHERR, 0)
570 DEF_TALITOS_INTERRUPT(ch0_2, TALITOS_ISR_CH_0_2_DONE, TALITOS_ISR_CH_0_2_ERR, 0)
571 DEF_TALITOS_INTERRUPT(ch1_3, TALITOS_ISR_CH_1_3_DONE, TALITOS_ISR_CH_1_3_ERR, 1)
576 static int talitos_rng_data_present(struct hwrng *rng, int wait)
578 struct device *dev = (struct device *)rng->priv;
579 struct talitos_private *priv = dev_get_drvdata(dev);
583 for (i = 0; i < 20; i++) {
584 ofl = in_be32(priv->reg + TALITOS_RNGUSR_LO) &
585 TALITOS_RNGUSR_LO_OFL;
594 static int talitos_rng_data_read(struct hwrng *rng, u32 *data)
596 struct device *dev = (struct device *)rng->priv;
597 struct talitos_private *priv = dev_get_drvdata(dev);
599 /* rng fifo requires 64-bit accesses */
600 *data = in_be32(priv->reg + TALITOS_RNGU_FIFO);
601 *data = in_be32(priv->reg + TALITOS_RNGU_FIFO_LO);
606 static int talitos_rng_init(struct hwrng *rng)
608 struct device *dev = (struct device *)rng->priv;
609 struct talitos_private *priv = dev_get_drvdata(dev);
610 unsigned int timeout = TALITOS_TIMEOUT;
612 setbits32(priv->reg + TALITOS_RNGURCR_LO, TALITOS_RNGURCR_LO_SR);
613 while (!(in_be32(priv->reg + TALITOS_RNGUSR_LO) & TALITOS_RNGUSR_LO_RD)
617 dev_err(dev, "failed to reset rng hw\n");
621 /* start generating */
622 setbits32(priv->reg + TALITOS_RNGUDSR_LO, 0);
627 static int talitos_register_rng(struct device *dev)
629 struct talitos_private *priv = dev_get_drvdata(dev);
631 priv->rng.name = dev_driver_string(dev),
632 priv->rng.init = talitos_rng_init,
633 priv->rng.data_present = talitos_rng_data_present,
634 priv->rng.data_read = talitos_rng_data_read,
635 priv->rng.priv = (unsigned long)dev;
637 return hwrng_register(&priv->rng);
640 static void talitos_unregister_rng(struct device *dev)
642 struct talitos_private *priv = dev_get_drvdata(dev);
644 hwrng_unregister(&priv->rng);
650 #define TALITOS_CRA_PRIORITY 3000
651 #define TALITOS_MAX_KEY_SIZE 96
652 #define TALITOS_MAX_IV_LENGTH 16 /* max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
657 __be32 desc_hdr_template;
658 u8 key[TALITOS_MAX_KEY_SIZE];
659 u8 iv[TALITOS_MAX_IV_LENGTH];
661 unsigned int enckeylen;
662 unsigned int authkeylen;
663 unsigned int authsize;
666 #define HASH_MAX_BLOCK_SIZE SHA512_BLOCK_SIZE
667 #define TALITOS_MDEU_MAX_CONTEXT_SIZE TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512
669 struct talitos_ahash_req_ctx {
670 u32 hw_context[TALITOS_MDEU_MAX_CONTEXT_SIZE / sizeof(u32)];
671 unsigned int hw_context_size;
672 u8 buf[HASH_MAX_BLOCK_SIZE];
673 u8 bufnext[HASH_MAX_BLOCK_SIZE];
677 unsigned int to_hash_later;
679 struct scatterlist bufsl[2];
680 struct scatterlist *psrc;
683 static int aead_setauthsize(struct crypto_aead *authenc,
684 unsigned int authsize)
686 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
688 ctx->authsize = authsize;
693 static int aead_setkey(struct crypto_aead *authenc,
694 const u8 *key, unsigned int keylen)
696 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
697 struct crypto_authenc_keys keys;
699 if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
702 if (keys.authkeylen + keys.enckeylen > TALITOS_MAX_KEY_SIZE)
705 memcpy(ctx->key, keys.authkey, keys.authkeylen);
706 memcpy(&ctx->key[keys.authkeylen], keys.enckey, keys.enckeylen);
708 ctx->keylen = keys.authkeylen + keys.enckeylen;
709 ctx->enckeylen = keys.enckeylen;
710 ctx->authkeylen = keys.authkeylen;
715 crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN);
720 * talitos_edesc - s/w-extended descriptor
721 * @assoc_nents: number of segments in associated data scatterlist
722 * @src_nents: number of segments in input scatterlist
723 * @dst_nents: number of segments in output scatterlist
724 * @assoc_chained: whether assoc is chained or not
725 * @src_chained: whether src is chained or not
726 * @dst_chained: whether dst is chained or not
727 * @iv_dma: dma address of iv for checking continuity and link table
728 * @dma_len: length of dma mapped link_tbl space
729 * @dma_link_tbl: bus physical address of link_tbl
730 * @desc: h/w descriptor
731 * @link_tbl: input and output h/w link tables (if {src,dst}_nents > 1)
733 * if decrypting (with authcheck), or either one of src_nents or dst_nents
734 * is greater than 1, an integrity check value is concatenated to the end
737 struct talitos_edesc {
746 dma_addr_t dma_link_tbl;
747 struct talitos_desc desc;
748 struct talitos_ptr link_tbl[0];
751 static int talitos_map_sg(struct device *dev, struct scatterlist *sg,
752 unsigned int nents, enum dma_data_direction dir,
755 if (unlikely(chained))
757 dma_map_sg(dev, sg, 1, dir);
761 dma_map_sg(dev, sg, nents, dir);
765 static void talitos_unmap_sg_chain(struct device *dev, struct scatterlist *sg,
766 enum dma_data_direction dir)
769 dma_unmap_sg(dev, sg, 1, dir);
774 static void talitos_sg_unmap(struct device *dev,
775 struct talitos_edesc *edesc,
776 struct scatterlist *src,
777 struct scatterlist *dst)
779 unsigned int src_nents = edesc->src_nents ? : 1;
780 unsigned int dst_nents = edesc->dst_nents ? : 1;
783 if (edesc->src_chained)
784 talitos_unmap_sg_chain(dev, src, DMA_TO_DEVICE);
786 dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
789 if (edesc->dst_chained)
790 talitos_unmap_sg_chain(dev, dst,
793 dma_unmap_sg(dev, dst, dst_nents,
797 if (edesc->src_chained)
798 talitos_unmap_sg_chain(dev, src, DMA_BIDIRECTIONAL);
800 dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
803 static void ipsec_esp_unmap(struct device *dev,
804 struct talitos_edesc *edesc,
805 struct aead_request *areq)
807 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[6], DMA_FROM_DEVICE);
808 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[3], DMA_TO_DEVICE);
809 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2], DMA_TO_DEVICE);
810 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[0], DMA_TO_DEVICE);
812 if (edesc->assoc_chained)
813 talitos_unmap_sg_chain(dev, areq->assoc, DMA_TO_DEVICE);
814 else if (areq->assoclen)
815 /* assoc_nents counts also for IV in non-contiguous cases */
816 dma_unmap_sg(dev, areq->assoc,
817 edesc->assoc_nents ? edesc->assoc_nents - 1 : 1,
820 talitos_sg_unmap(dev, edesc, areq->src, areq->dst);
823 dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
828 * ipsec_esp descriptor callbacks
830 static void ipsec_esp_encrypt_done(struct device *dev,
831 struct talitos_desc *desc, void *context,
834 struct aead_request *areq = context;
835 struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
836 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
837 struct talitos_edesc *edesc;
838 struct scatterlist *sg;
841 edesc = container_of(desc, struct talitos_edesc, desc);
843 ipsec_esp_unmap(dev, edesc, areq);
845 /* copy the generated ICV to dst */
846 if (edesc->dst_nents) {
847 icvdata = &edesc->link_tbl[edesc->src_nents +
848 edesc->dst_nents + 2 +
850 sg = sg_last(areq->dst, edesc->dst_nents);
851 memcpy((char *)sg_virt(sg) + sg->length - ctx->authsize,
852 icvdata, ctx->authsize);
857 aead_request_complete(areq, err);
860 static void ipsec_esp_decrypt_swauth_done(struct device *dev,
861 struct talitos_desc *desc,
862 void *context, int err)
864 struct aead_request *req = context;
865 struct crypto_aead *authenc = crypto_aead_reqtfm(req);
866 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
867 struct talitos_edesc *edesc;
868 struct scatterlist *sg;
871 edesc = container_of(desc, struct talitos_edesc, desc);
873 ipsec_esp_unmap(dev, edesc, req);
878 icvdata = &edesc->link_tbl[edesc->src_nents +
879 edesc->dst_nents + 2 +
882 icvdata = &edesc->link_tbl[0];
884 sg = sg_last(req->dst, edesc->dst_nents ? : 1);
885 err = memcmp(icvdata, (char *)sg_virt(sg) + sg->length -
886 ctx->authsize, ctx->authsize) ? -EBADMSG : 0;
891 aead_request_complete(req, err);
894 static void ipsec_esp_decrypt_hwauth_done(struct device *dev,
895 struct talitos_desc *desc,
896 void *context, int err)
898 struct aead_request *req = context;
899 struct talitos_edesc *edesc;
901 edesc = container_of(desc, struct talitos_edesc, desc);
903 ipsec_esp_unmap(dev, edesc, req);
905 /* check ICV auth status */
906 if (!err && ((desc->hdr_lo & DESC_HDR_LO_ICCR1_MASK) !=
907 DESC_HDR_LO_ICCR1_PASS))
912 aead_request_complete(req, err);
916 * convert scatterlist to SEC h/w link table format
917 * stop at cryptlen bytes
919 static int sg_to_link_tbl(struct scatterlist *sg, int sg_count,
920 int cryptlen, struct talitos_ptr *link_tbl_ptr)
925 to_talitos_ptr(link_tbl_ptr, sg_dma_address(sg));
926 link_tbl_ptr->len = cpu_to_be16(sg_dma_len(sg));
927 link_tbl_ptr->j_extent = 0;
929 cryptlen -= sg_dma_len(sg);
933 /* adjust (decrease) last one (or two) entry's len to cryptlen */
935 while (be16_to_cpu(link_tbl_ptr->len) <= (-cryptlen)) {
936 /* Empty this entry, and move to previous one */
937 cryptlen += be16_to_cpu(link_tbl_ptr->len);
938 link_tbl_ptr->len = 0;
942 be16_add_cpu(&link_tbl_ptr->len, cryptlen);
944 /* tag end of link table */
945 link_tbl_ptr->j_extent = DESC_PTR_LNKTBL_RETURN;
951 * fill in and submit ipsec_esp descriptor
953 static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
954 u64 seq, void (*callback) (struct device *dev,
955 struct talitos_desc *desc,
956 void *context, int error))
958 struct crypto_aead *aead = crypto_aead_reqtfm(areq);
959 struct talitos_ctx *ctx = crypto_aead_ctx(aead);
960 struct device *dev = ctx->dev;
961 struct talitos_desc *desc = &edesc->desc;
962 unsigned int cryptlen = areq->cryptlen;
963 unsigned int authsize = ctx->authsize;
964 unsigned int ivsize = crypto_aead_ivsize(aead);
969 map_single_talitos_ptr(dev, &desc->ptr[0], ctx->authkeylen, &ctx->key,
973 desc->ptr[1].len = cpu_to_be16(areq->assoclen + ivsize);
974 if (edesc->assoc_nents) {
975 int tbl_off = edesc->src_nents + edesc->dst_nents + 2;
976 struct talitos_ptr *tbl_ptr = &edesc->link_tbl[tbl_off];
978 to_talitos_ptr(&desc->ptr[1], edesc->dma_link_tbl + tbl_off *
979 sizeof(struct talitos_ptr));
980 desc->ptr[1].j_extent = DESC_PTR_LNKTBL_JUMP;
982 /* assoc_nents - 1 entries for assoc, 1 for IV */
983 sg_count = sg_to_link_tbl(areq->assoc, edesc->assoc_nents - 1,
984 areq->assoclen, tbl_ptr);
986 /* add IV to link table */
987 tbl_ptr += sg_count - 1;
988 tbl_ptr->j_extent = 0;
990 to_talitos_ptr(tbl_ptr, edesc->iv_dma);
991 tbl_ptr->len = cpu_to_be16(ivsize);
992 tbl_ptr->j_extent = DESC_PTR_LNKTBL_RETURN;
994 dma_sync_single_for_device(dev, edesc->dma_link_tbl,
995 edesc->dma_len, DMA_BIDIRECTIONAL);
998 to_talitos_ptr(&desc->ptr[1],
999 sg_dma_address(areq->assoc));
1001 to_talitos_ptr(&desc->ptr[1], edesc->iv_dma);
1002 desc->ptr[1].j_extent = 0;
1006 to_talitos_ptr(&desc->ptr[2], edesc->iv_dma);
1007 desc->ptr[2].len = cpu_to_be16(ivsize);
1008 desc->ptr[2].j_extent = 0;
1009 /* Sync needed for the aead_givencrypt case */
1010 dma_sync_single_for_device(dev, edesc->iv_dma, ivsize, DMA_TO_DEVICE);
1013 map_single_talitos_ptr(dev, &desc->ptr[3], ctx->enckeylen,
1014 (char *)&ctx->key + ctx->authkeylen,
1019 * map and adjust cipher len to aead request cryptlen.
1020 * extent is bytes of HMAC postpended to ciphertext,
1021 * typically 12 for ipsec
1023 desc->ptr[4].len = cpu_to_be16(cryptlen);
1024 desc->ptr[4].j_extent = authsize;
1026 sg_count = talitos_map_sg(dev, areq->src, edesc->src_nents ? : 1,
1027 (areq->src == areq->dst) ? DMA_BIDIRECTIONAL
1029 edesc->src_chained);
1031 if (sg_count == 1) {
1032 to_talitos_ptr(&desc->ptr[4], sg_dma_address(areq->src));
1034 sg_link_tbl_len = cryptlen;
1036 if (edesc->desc.hdr & DESC_HDR_MODE1_MDEU_CICV)
1037 sg_link_tbl_len = cryptlen + authsize;
1039 sg_count = sg_to_link_tbl(areq->src, sg_count, sg_link_tbl_len,
1040 &edesc->link_tbl[0]);
1042 desc->ptr[4].j_extent |= DESC_PTR_LNKTBL_JUMP;
1043 to_talitos_ptr(&desc->ptr[4], edesc->dma_link_tbl);
1044 dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1048 /* Only one segment now, so no link tbl needed */
1049 to_talitos_ptr(&desc->ptr[4],
1050 sg_dma_address(areq->src));
1055 desc->ptr[5].len = cpu_to_be16(cryptlen);
1056 desc->ptr[5].j_extent = authsize;
1058 if (areq->src != areq->dst)
1059 sg_count = talitos_map_sg(dev, areq->dst,
1060 edesc->dst_nents ? : 1,
1061 DMA_FROM_DEVICE, edesc->dst_chained);
1063 if (sg_count == 1) {
1064 to_talitos_ptr(&desc->ptr[5], sg_dma_address(areq->dst));
1066 int tbl_off = edesc->src_nents + 1;
1067 struct talitos_ptr *tbl_ptr = &edesc->link_tbl[tbl_off];
1069 to_talitos_ptr(&desc->ptr[5], edesc->dma_link_tbl +
1070 tbl_off * sizeof(struct talitos_ptr));
1071 sg_count = sg_to_link_tbl(areq->dst, sg_count, cryptlen,
1074 /* Add an entry to the link table for ICV data */
1075 tbl_ptr += sg_count - 1;
1076 tbl_ptr->j_extent = 0;
1078 tbl_ptr->j_extent = DESC_PTR_LNKTBL_RETURN;
1079 tbl_ptr->len = cpu_to_be16(authsize);
1081 /* icv data follows link tables */
1082 to_talitos_ptr(tbl_ptr, edesc->dma_link_tbl +
1083 (tbl_off + edesc->dst_nents + 1 +
1084 edesc->assoc_nents) *
1085 sizeof(struct talitos_ptr));
1086 desc->ptr[5].j_extent |= DESC_PTR_LNKTBL_JUMP;
1087 dma_sync_single_for_device(ctx->dev, edesc->dma_link_tbl,
1088 edesc->dma_len, DMA_BIDIRECTIONAL);
1092 map_single_talitos_ptr(dev, &desc->ptr[6], ivsize, ctx->iv,
1095 ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
1096 if (ret != -EINPROGRESS) {
1097 ipsec_esp_unmap(dev, edesc, areq);
1104 * derive number of elements in scatterlist
1106 static int sg_count(struct scatterlist *sg_list, int nbytes, bool *chained)
1108 struct scatterlist *sg = sg_list;
1112 while (nbytes > 0) {
1114 nbytes -= sg->length;
1115 if (!sg_is_last(sg) && (sg + 1)->length == 0)
1124 * allocate and map the extended descriptor
1126 static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
1127 struct scatterlist *assoc,
1128 struct scatterlist *src,
1129 struct scatterlist *dst,
1131 unsigned int assoclen,
1132 unsigned int cryptlen,
1133 unsigned int authsize,
1134 unsigned int ivsize,
1139 struct talitos_edesc *edesc;
1140 int assoc_nents = 0, src_nents, dst_nents, alloc_len, dma_len;
1141 bool assoc_chained = false, src_chained = false, dst_chained = false;
1142 dma_addr_t iv_dma = 0;
1143 gfp_t flags = cryptoflags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
1146 if (cryptlen + authsize > TALITOS_MAX_DATA_LEN) {
1147 dev_err(dev, "length exceeds h/w max limit\n");
1148 return ERR_PTR(-EINVAL);
1152 iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE);
1156 * Currently it is assumed that iv is provided whenever assoc
1161 assoc_nents = sg_count(assoc, assoclen, &assoc_chained);
1162 talitos_map_sg(dev, assoc, assoc_nents, DMA_TO_DEVICE,
1164 assoc_nents = (assoc_nents == 1) ? 0 : assoc_nents;
1166 if (assoc_nents || sg_dma_address(assoc) + assoclen != iv_dma)
1167 assoc_nents = assoc_nents ? assoc_nents + 1 : 2;
1170 if (!dst || dst == src) {
1171 src_nents = sg_count(src, cryptlen + authsize, &src_chained);
1172 src_nents = (src_nents == 1) ? 0 : src_nents;
1173 dst_nents = dst ? src_nents : 0;
1174 } else { /* dst && dst != src*/
1175 src_nents = sg_count(src, cryptlen + (encrypt ? 0 : authsize),
1177 src_nents = (src_nents == 1) ? 0 : src_nents;
1178 dst_nents = sg_count(dst, cryptlen + (encrypt ? authsize : 0),
1180 dst_nents = (dst_nents == 1) ? 0 : dst_nents;
1184 * allocate space for base edesc plus the link tables,
1185 * allowing for two separate entries for ICV and generated ICV (+ 2),
1186 * and the ICV data itself
1188 alloc_len = sizeof(struct talitos_edesc);
1189 if (assoc_nents || src_nents || dst_nents) {
1190 dma_len = (src_nents + dst_nents + 2 + assoc_nents) *
1191 sizeof(struct talitos_ptr) + authsize;
1192 alloc_len += dma_len;
1195 alloc_len += icv_stashing ? authsize : 0;
1198 edesc = kmalloc(alloc_len, GFP_DMA | flags);
1201 talitos_unmap_sg_chain(dev, assoc, DMA_TO_DEVICE);
1203 dma_unmap_sg(dev, assoc,
1204 assoc_nents ? assoc_nents - 1 : 1,
1208 dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE);
1210 dev_err(dev, "could not allocate edescriptor\n");
1211 return ERR_PTR(-ENOMEM);
1214 edesc->assoc_nents = assoc_nents;
1215 edesc->src_nents = src_nents;
1216 edesc->dst_nents = dst_nents;
1217 edesc->assoc_chained = assoc_chained;
1218 edesc->src_chained = src_chained;
1219 edesc->dst_chained = dst_chained;
1220 edesc->iv_dma = iv_dma;
1221 edesc->dma_len = dma_len;
1223 edesc->dma_link_tbl = dma_map_single(dev, &edesc->link_tbl[0],
1230 static struct talitos_edesc *aead_edesc_alloc(struct aead_request *areq, u8 *iv,
1231 int icv_stashing, bool encrypt)
1233 struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
1234 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1235 unsigned int ivsize = crypto_aead_ivsize(authenc);
1237 return talitos_edesc_alloc(ctx->dev, areq->assoc, areq->src, areq->dst,
1238 iv, areq->assoclen, areq->cryptlen,
1239 ctx->authsize, ivsize, icv_stashing,
1240 areq->base.flags, encrypt);
1243 static int aead_encrypt(struct aead_request *req)
1245 struct crypto_aead *authenc = crypto_aead_reqtfm(req);
1246 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1247 struct talitos_edesc *edesc;
1249 /* allocate extended descriptor */
1250 edesc = aead_edesc_alloc(req, req->iv, 0, true);
1252 return PTR_ERR(edesc);
1255 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;
1257 return ipsec_esp(edesc, req, 0, ipsec_esp_encrypt_done);
1260 static int aead_decrypt(struct aead_request *req)
1262 struct crypto_aead *authenc = crypto_aead_reqtfm(req);
1263 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1264 unsigned int authsize = ctx->authsize;
1265 struct talitos_private *priv = dev_get_drvdata(ctx->dev);
1266 struct talitos_edesc *edesc;
1267 struct scatterlist *sg;
1270 req->cryptlen -= authsize;
1272 /* allocate extended descriptor */
1273 edesc = aead_edesc_alloc(req, req->iv, 1, false);
1275 return PTR_ERR(edesc);
1277 if ((priv->features & TALITOS_FTR_HW_AUTH_CHECK) &&
1278 ((!edesc->src_nents && !edesc->dst_nents) ||
1279 priv->features & TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT)) {
1281 /* decrypt and check the ICV */
1282 edesc->desc.hdr = ctx->desc_hdr_template |
1283 DESC_HDR_DIR_INBOUND |
1284 DESC_HDR_MODE1_MDEU_CICV;
1286 /* reset integrity check result bits */
1287 edesc->desc.hdr_lo = 0;
1289 return ipsec_esp(edesc, req, 0, ipsec_esp_decrypt_hwauth_done);
1292 /* Have to check the ICV with software */
1293 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND;
1295 /* stash incoming ICV for later cmp with ICV generated by the h/w */
1297 icvdata = &edesc->link_tbl[edesc->src_nents +
1298 edesc->dst_nents + 2 +
1299 edesc->assoc_nents];
1301 icvdata = &edesc->link_tbl[0];
1303 sg = sg_last(req->src, edesc->src_nents ? : 1);
1305 memcpy(icvdata, (char *)sg_virt(sg) + sg->length - ctx->authsize,
1308 return ipsec_esp(edesc, req, 0, ipsec_esp_decrypt_swauth_done);
1311 static int aead_givencrypt(struct aead_givcrypt_request *req)
1313 struct aead_request *areq = &req->areq;
1314 struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
1315 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1316 struct talitos_edesc *edesc;
1318 /* allocate extended descriptor */
1319 edesc = aead_edesc_alloc(areq, req->giv, 0, true);
1321 return PTR_ERR(edesc);
1324 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;
1326 memcpy(req->giv, ctx->iv, crypto_aead_ivsize(authenc));
1327 /* avoid consecutive packets going out with same IV */
1328 *(__be64 *)req->giv ^= cpu_to_be64(req->seq);
1330 return ipsec_esp(edesc, areq, req->seq, ipsec_esp_encrypt_done);
1333 static int ablkcipher_setkey(struct crypto_ablkcipher *cipher,
1334 const u8 *key, unsigned int keylen)
1336 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1338 memcpy(&ctx->key, key, keylen);
1339 ctx->keylen = keylen;
1344 static void unmap_sg_talitos_ptr(struct device *dev, struct scatterlist *src,
1345 struct scatterlist *dst, unsigned int len,
1346 struct talitos_edesc *edesc)
1348 talitos_sg_unmap(dev, edesc, src, dst);
1351 static void common_nonsnoop_unmap(struct device *dev,
1352 struct talitos_edesc *edesc,
1353 struct ablkcipher_request *areq)
1355 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE);
1357 unmap_sg_talitos_ptr(dev, areq->src, areq->dst, areq->nbytes, edesc);
1358 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2], DMA_TO_DEVICE);
1359 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[1], DMA_TO_DEVICE);
1362 dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
1366 static void ablkcipher_done(struct device *dev,
1367 struct talitos_desc *desc, void *context,
1370 struct ablkcipher_request *areq = context;
1371 struct talitos_edesc *edesc;
1373 edesc = container_of(desc, struct talitos_edesc, desc);
1375 common_nonsnoop_unmap(dev, edesc, areq);
1379 areq->base.complete(&areq->base, err);
1382 int map_sg_in_talitos_ptr(struct device *dev, struct scatterlist *src,
1383 unsigned int len, struct talitos_edesc *edesc,
1384 enum dma_data_direction dir, struct talitos_ptr *ptr)
1388 to_talitos_ptr_len(ptr, len);
1389 to_talitos_ptr_extent_clear(ptr);
1391 sg_count = talitos_map_sg(dev, src, edesc->src_nents ? : 1, dir,
1392 edesc->src_chained);
1394 if (sg_count == 1) {
1395 to_talitos_ptr(ptr, sg_dma_address(src));
1397 sg_count = sg_to_link_tbl(src, sg_count, len,
1398 &edesc->link_tbl[0]);
1400 to_talitos_ptr(ptr, edesc->dma_link_tbl);
1401 ptr->j_extent |= DESC_PTR_LNKTBL_JUMP;
1402 dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1406 /* Only one segment now, so no link tbl needed */
1407 to_talitos_ptr(ptr, sg_dma_address(src));
1413 void map_sg_out_talitos_ptr(struct device *dev, struct scatterlist *dst,
1414 unsigned int len, struct talitos_edesc *edesc,
1415 enum dma_data_direction dir,
1416 struct talitos_ptr *ptr, int sg_count)
1418 to_talitos_ptr_len(ptr, len);
1419 to_talitos_ptr_extent_clear(ptr);
1421 if (dir != DMA_NONE)
1422 sg_count = talitos_map_sg(dev, dst, edesc->dst_nents ? : 1,
1423 dir, edesc->dst_chained);
1425 if (sg_count == 1) {
1426 to_talitos_ptr(ptr, sg_dma_address(dst));
1428 struct talitos_ptr *link_tbl_ptr =
1429 &edesc->link_tbl[edesc->src_nents + 1];
1431 to_talitos_ptr(ptr, edesc->dma_link_tbl +
1432 (edesc->src_nents + 1) *
1433 sizeof(struct talitos_ptr));
1434 ptr->j_extent |= DESC_PTR_LNKTBL_JUMP;
1435 sg_count = sg_to_link_tbl(dst, sg_count, len, link_tbl_ptr);
1436 dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1437 edesc->dma_len, DMA_BIDIRECTIONAL);
1441 static int common_nonsnoop(struct talitos_edesc *edesc,
1442 struct ablkcipher_request *areq,
1443 void (*callback) (struct device *dev,
1444 struct talitos_desc *desc,
1445 void *context, int error))
1447 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1448 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1449 struct device *dev = ctx->dev;
1450 struct talitos_desc *desc = &edesc->desc;
1451 unsigned int cryptlen = areq->nbytes;
1452 unsigned int ivsize = crypto_ablkcipher_ivsize(cipher);
1455 /* first DWORD empty */
1456 desc->ptr[0] = zero_entry;
1459 to_talitos_ptr(&desc->ptr[1], edesc->iv_dma);
1460 to_talitos_ptr_len(&desc->ptr[1], ivsize);
1461 to_talitos_ptr_extent_clear(&desc->ptr[1]);
1464 map_single_talitos_ptr(dev, &desc->ptr[2], ctx->keylen,
1465 (char *)&ctx->key, DMA_TO_DEVICE);
1470 sg_count = map_sg_in_talitos_ptr(dev, areq->src, cryptlen, edesc,
1471 (areq->src == areq->dst) ?
1472 DMA_BIDIRECTIONAL : DMA_TO_DEVICE,
1476 map_sg_out_talitos_ptr(dev, areq->dst, cryptlen, edesc,
1477 (areq->src == areq->dst) ? DMA_NONE
1479 &desc->ptr[4], sg_count);
1482 map_single_talitos_ptr(dev, &desc->ptr[5], ivsize, ctx->iv,
1485 /* last DWORD empty */
1486 desc->ptr[6] = zero_entry;
1488 ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
1489 if (ret != -EINPROGRESS) {
1490 common_nonsnoop_unmap(dev, edesc, areq);
1496 static struct talitos_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request *
1499 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1500 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1501 unsigned int ivsize = crypto_ablkcipher_ivsize(cipher);
1503 return talitos_edesc_alloc(ctx->dev, NULL, areq->src, areq->dst,
1504 areq->info, 0, areq->nbytes, 0, ivsize, 0,
1505 areq->base.flags, encrypt);
1508 static int ablkcipher_encrypt(struct ablkcipher_request *areq)
1510 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1511 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1512 struct talitos_edesc *edesc;
1514 /* allocate extended descriptor */
1515 edesc = ablkcipher_edesc_alloc(areq, true);
1517 return PTR_ERR(edesc);
1520 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;
1522 return common_nonsnoop(edesc, areq, ablkcipher_done);
1525 static int ablkcipher_decrypt(struct ablkcipher_request *areq)
1527 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1528 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1529 struct talitos_edesc *edesc;
1531 /* allocate extended descriptor */
1532 edesc = ablkcipher_edesc_alloc(areq, false);
1534 return PTR_ERR(edesc);
1536 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND;
1538 return common_nonsnoop(edesc, areq, ablkcipher_done);
1541 static void common_nonsnoop_hash_unmap(struct device *dev,
1542 struct talitos_edesc *edesc,
1543 struct ahash_request *areq)
1545 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1547 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE);
1549 unmap_sg_talitos_ptr(dev, req_ctx->psrc, NULL, 0, edesc);
1551 /* When using hashctx-in, must unmap it. */
1552 if (from_talitos_ptr_len(&edesc->desc.ptr[1]))
1553 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[1],
1556 if (from_talitos_ptr_len(&edesc->desc.ptr[2]))
1557 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2],
1561 dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
1566 static void ahash_done(struct device *dev,
1567 struct talitos_desc *desc, void *context,
1570 struct ahash_request *areq = context;
1571 struct talitos_edesc *edesc =
1572 container_of(desc, struct talitos_edesc, desc);
1573 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1575 if (!req_ctx->last && req_ctx->to_hash_later) {
1576 /* Position any partial block for next update/final/finup */
1577 memcpy(req_ctx->buf, req_ctx->bufnext, req_ctx->to_hash_later);
1578 req_ctx->nbuf = req_ctx->to_hash_later;
1580 common_nonsnoop_hash_unmap(dev, edesc, areq);
1584 areq->base.complete(&areq->base, err);
1587 static int common_nonsnoop_hash(struct talitos_edesc *edesc,
1588 struct ahash_request *areq, unsigned int length,
1589 void (*callback) (struct device *dev,
1590 struct talitos_desc *desc,
1591 void *context, int error))
1593 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1594 struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1595 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1596 struct device *dev = ctx->dev;
1597 struct talitos_desc *desc = &edesc->desc;
1600 /* first DWORD empty */
1601 desc->ptr[0] = zero_entry;
1603 /* hash context in */
1604 if (!req_ctx->first || req_ctx->swinit) {
1605 map_single_talitos_ptr(dev, &desc->ptr[1],
1606 req_ctx->hw_context_size,
1607 (char *)req_ctx->hw_context,
1609 req_ctx->swinit = 0;
1611 desc->ptr[1] = zero_entry;
1612 /* Indicate next op is not the first. */
1618 map_single_talitos_ptr(dev, &desc->ptr[2], ctx->keylen,
1619 (char *)&ctx->key, DMA_TO_DEVICE);
1621 desc->ptr[2] = zero_entry;
1626 map_sg_in_talitos_ptr(dev, req_ctx->psrc, length, edesc,
1627 DMA_TO_DEVICE, &desc->ptr[3]);
1629 /* fifth DWORD empty */
1630 desc->ptr[4] = zero_entry;
1632 /* hash/HMAC out -or- hash context out */
1634 map_single_talitos_ptr(dev, &desc->ptr[5],
1635 crypto_ahash_digestsize(tfm),
1636 areq->result, DMA_FROM_DEVICE);
1638 map_single_talitos_ptr(dev, &desc->ptr[5],
1639 req_ctx->hw_context_size,
1640 req_ctx->hw_context, DMA_FROM_DEVICE);
1642 /* last DWORD empty */
1643 desc->ptr[6] = zero_entry;
1645 ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
1646 if (ret != -EINPROGRESS) {
1647 common_nonsnoop_hash_unmap(dev, edesc, areq);
1653 static struct talitos_edesc *ahash_edesc_alloc(struct ahash_request *areq,
1654 unsigned int nbytes)
1656 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1657 struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1658 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1660 return talitos_edesc_alloc(ctx->dev, NULL, req_ctx->psrc, NULL, NULL, 0,
1661 nbytes, 0, 0, 0, areq->base.flags, false);
1664 static int ahash_init(struct ahash_request *areq)
1666 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1667 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1669 /* Initialize the context */
1671 req_ctx->first = 1; /* first indicates h/w must init its context */
1672 req_ctx->swinit = 0; /* assume h/w init of context */
1673 req_ctx->hw_context_size =
1674 (crypto_ahash_digestsize(tfm) <= SHA256_DIGEST_SIZE)
1675 ? TALITOS_MDEU_CONTEXT_SIZE_MD5_SHA1_SHA256
1676 : TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512;
1682 * on h/w without explicit sha224 support, we initialize h/w context
1683 * manually with sha224 constants, and tell it to run sha256.
1685 static int ahash_init_sha224_swinit(struct ahash_request *areq)
1687 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1690 req_ctx->swinit = 1;/* prevent h/w initting context with sha256 values*/
1692 req_ctx->hw_context[0] = SHA224_H0;
1693 req_ctx->hw_context[1] = SHA224_H1;
1694 req_ctx->hw_context[2] = SHA224_H2;
1695 req_ctx->hw_context[3] = SHA224_H3;
1696 req_ctx->hw_context[4] = SHA224_H4;
1697 req_ctx->hw_context[5] = SHA224_H5;
1698 req_ctx->hw_context[6] = SHA224_H6;
1699 req_ctx->hw_context[7] = SHA224_H7;
1701 /* init 64-bit count */
1702 req_ctx->hw_context[8] = 0;
1703 req_ctx->hw_context[9] = 0;
1708 static int ahash_process_req(struct ahash_request *areq, unsigned int nbytes)
1710 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1711 struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1712 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1713 struct talitos_edesc *edesc;
1714 unsigned int blocksize =
1715 crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
1716 unsigned int nbytes_to_hash;
1717 unsigned int to_hash_later;
1721 if (!req_ctx->last && (nbytes + req_ctx->nbuf <= blocksize)) {
1722 /* Buffer up to one whole block */
1723 sg_copy_to_buffer(areq->src,
1724 sg_count(areq->src, nbytes, &chained),
1725 req_ctx->buf + req_ctx->nbuf, nbytes);
1726 req_ctx->nbuf += nbytes;
1730 /* At least (blocksize + 1) bytes are available to hash */
1731 nbytes_to_hash = nbytes + req_ctx->nbuf;
1732 to_hash_later = nbytes_to_hash & (blocksize - 1);
1736 else if (to_hash_later)
1737 /* There is a partial block. Hash the full block(s) now */
1738 nbytes_to_hash -= to_hash_later;
1740 /* Keep one block buffered */
1741 nbytes_to_hash -= blocksize;
1742 to_hash_later = blocksize;
1745 /* Chain in any previously buffered data */
1746 if (req_ctx->nbuf) {
1747 nsg = (req_ctx->nbuf < nbytes_to_hash) ? 2 : 1;
1748 sg_init_table(req_ctx->bufsl, nsg);
1749 sg_set_buf(req_ctx->bufsl, req_ctx->buf, req_ctx->nbuf);
1751 scatterwalk_sg_chain(req_ctx->bufsl, 2, areq->src);
1752 req_ctx->psrc = req_ctx->bufsl;
1754 req_ctx->psrc = areq->src;
1756 if (to_hash_later) {
1757 int nents = sg_count(areq->src, nbytes, &chained);
1758 sg_pcopy_to_buffer(areq->src, nents,
1761 nbytes - to_hash_later);
1763 req_ctx->to_hash_later = to_hash_later;
1765 /* Allocate extended descriptor */
1766 edesc = ahash_edesc_alloc(areq, nbytes_to_hash);
1768 return PTR_ERR(edesc);
1770 edesc->desc.hdr = ctx->desc_hdr_template;
1772 /* On last one, request SEC to pad; otherwise continue */
1774 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_PAD;
1776 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_CONT;
1778 /* request SEC to INIT hash. */
1779 if (req_ctx->first && !req_ctx->swinit)
1780 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_INIT;
1782 /* When the tfm context has a keylen, it's an HMAC.
1783 * A first or last (ie. not middle) descriptor must request HMAC.
1785 if (ctx->keylen && (req_ctx->first || req_ctx->last))
1786 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_HMAC;
1788 return common_nonsnoop_hash(edesc, areq, nbytes_to_hash,
1792 static int ahash_update(struct ahash_request *areq)
1794 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1798 return ahash_process_req(areq, areq->nbytes);
1801 static int ahash_final(struct ahash_request *areq)
1803 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1807 return ahash_process_req(areq, 0);
1810 static int ahash_finup(struct ahash_request *areq)
1812 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1816 return ahash_process_req(areq, areq->nbytes);
1819 static int ahash_digest(struct ahash_request *areq)
1821 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1822 struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
1827 return ahash_process_req(areq, areq->nbytes);
1830 struct keyhash_result {
1831 struct completion completion;
1835 static void keyhash_complete(struct crypto_async_request *req, int err)
1837 struct keyhash_result *res = req->data;
1839 if (err == -EINPROGRESS)
1843 complete(&res->completion);
1846 static int keyhash(struct crypto_ahash *tfm, const u8 *key, unsigned int keylen,
1849 struct talitos_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
1851 struct scatterlist sg[1];
1852 struct ahash_request *req;
1853 struct keyhash_result hresult;
1856 init_completion(&hresult.completion);
1858 req = ahash_request_alloc(tfm, GFP_KERNEL);
1862 /* Keep tfm keylen == 0 during hash of the long key */
1864 ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
1865 keyhash_complete, &hresult);
1867 sg_init_one(&sg[0], key, keylen);
1869 ahash_request_set_crypt(req, sg, hash, keylen);
1870 ret = crypto_ahash_digest(req);
1876 ret = wait_for_completion_interruptible(
1877 &hresult.completion);
1884 ahash_request_free(req);
1889 static int ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
1890 unsigned int keylen)
1892 struct talitos_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
1893 unsigned int blocksize =
1894 crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
1895 unsigned int digestsize = crypto_ahash_digestsize(tfm);
1896 unsigned int keysize = keylen;
1897 u8 hash[SHA512_DIGEST_SIZE];
1900 if (keylen <= blocksize)
1901 memcpy(ctx->key, key, keysize);
1903 /* Must get the hash of the long key */
1904 ret = keyhash(tfm, key, keylen, hash);
1907 crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
1911 keysize = digestsize;
1912 memcpy(ctx->key, hash, digestsize);
1915 ctx->keylen = keysize;
1921 struct talitos_alg_template {
1924 struct crypto_alg crypto;
1925 struct ahash_alg hash;
1927 __be32 desc_hdr_template;
1930 static struct talitos_alg_template driver_algs[] = {
1931 /* AEAD algorithms. These use a single-pass ipsec_esp descriptor */
1932 { .type = CRYPTO_ALG_TYPE_AEAD,
1934 .cra_name = "authenc(hmac(sha1),cbc(aes))",
1935 .cra_driver_name = "authenc-hmac-sha1-cbc-aes-talitos",
1936 .cra_blocksize = AES_BLOCK_SIZE,
1937 .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
1939 .ivsize = AES_BLOCK_SIZE,
1940 .maxauthsize = SHA1_DIGEST_SIZE,
1943 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
1944 DESC_HDR_SEL0_AESU |
1945 DESC_HDR_MODE0_AESU_CBC |
1946 DESC_HDR_SEL1_MDEUA |
1947 DESC_HDR_MODE1_MDEU_INIT |
1948 DESC_HDR_MODE1_MDEU_PAD |
1949 DESC_HDR_MODE1_MDEU_SHA1_HMAC,
1951 { .type = CRYPTO_ALG_TYPE_AEAD,
1953 .cra_name = "authenc(hmac(sha1),cbc(des3_ede))",
1954 .cra_driver_name = "authenc-hmac-sha1-cbc-3des-talitos",
1955 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1956 .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
1958 .ivsize = DES3_EDE_BLOCK_SIZE,
1959 .maxauthsize = SHA1_DIGEST_SIZE,
1962 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
1964 DESC_HDR_MODE0_DEU_CBC |
1965 DESC_HDR_MODE0_DEU_3DES |
1966 DESC_HDR_SEL1_MDEUA |
1967 DESC_HDR_MODE1_MDEU_INIT |
1968 DESC_HDR_MODE1_MDEU_PAD |
1969 DESC_HDR_MODE1_MDEU_SHA1_HMAC,
1971 { .type = CRYPTO_ALG_TYPE_AEAD,
1973 .cra_name = "authenc(hmac(sha224),cbc(aes))",
1974 .cra_driver_name = "authenc-hmac-sha224-cbc-aes-talitos",
1975 .cra_blocksize = AES_BLOCK_SIZE,
1976 .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
1978 .ivsize = AES_BLOCK_SIZE,
1979 .maxauthsize = SHA224_DIGEST_SIZE,
1982 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
1983 DESC_HDR_SEL0_AESU |
1984 DESC_HDR_MODE0_AESU_CBC |
1985 DESC_HDR_SEL1_MDEUA |
1986 DESC_HDR_MODE1_MDEU_INIT |
1987 DESC_HDR_MODE1_MDEU_PAD |
1988 DESC_HDR_MODE1_MDEU_SHA224_HMAC,
1990 { .type = CRYPTO_ALG_TYPE_AEAD,
1992 .cra_name = "authenc(hmac(sha224),cbc(des3_ede))",
1993 .cra_driver_name = "authenc-hmac-sha224-cbc-3des-talitos",
1994 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1995 .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
1997 .ivsize = DES3_EDE_BLOCK_SIZE,
1998 .maxauthsize = SHA224_DIGEST_SIZE,
2001 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2003 DESC_HDR_MODE0_DEU_CBC |
2004 DESC_HDR_MODE0_DEU_3DES |
2005 DESC_HDR_SEL1_MDEUA |
2006 DESC_HDR_MODE1_MDEU_INIT |
2007 DESC_HDR_MODE1_MDEU_PAD |
2008 DESC_HDR_MODE1_MDEU_SHA224_HMAC,
2010 { .type = CRYPTO_ALG_TYPE_AEAD,
2012 .cra_name = "authenc(hmac(sha256),cbc(aes))",
2013 .cra_driver_name = "authenc-hmac-sha256-cbc-aes-talitos",
2014 .cra_blocksize = AES_BLOCK_SIZE,
2015 .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
2017 .ivsize = AES_BLOCK_SIZE,
2018 .maxauthsize = SHA256_DIGEST_SIZE,
2021 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2022 DESC_HDR_SEL0_AESU |
2023 DESC_HDR_MODE0_AESU_CBC |
2024 DESC_HDR_SEL1_MDEUA |
2025 DESC_HDR_MODE1_MDEU_INIT |
2026 DESC_HDR_MODE1_MDEU_PAD |
2027 DESC_HDR_MODE1_MDEU_SHA256_HMAC,
2029 { .type = CRYPTO_ALG_TYPE_AEAD,
2031 .cra_name = "authenc(hmac(sha256),cbc(des3_ede))",
2032 .cra_driver_name = "authenc-hmac-sha256-cbc-3des-talitos",
2033 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2034 .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
2036 .ivsize = DES3_EDE_BLOCK_SIZE,
2037 .maxauthsize = SHA256_DIGEST_SIZE,
2040 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2042 DESC_HDR_MODE0_DEU_CBC |
2043 DESC_HDR_MODE0_DEU_3DES |
2044 DESC_HDR_SEL1_MDEUA |
2045 DESC_HDR_MODE1_MDEU_INIT |
2046 DESC_HDR_MODE1_MDEU_PAD |
2047 DESC_HDR_MODE1_MDEU_SHA256_HMAC,
2049 { .type = CRYPTO_ALG_TYPE_AEAD,
2051 .cra_name = "authenc(hmac(sha384),cbc(aes))",
2052 .cra_driver_name = "authenc-hmac-sha384-cbc-aes-talitos",
2053 .cra_blocksize = AES_BLOCK_SIZE,
2054 .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
2056 .ivsize = AES_BLOCK_SIZE,
2057 .maxauthsize = SHA384_DIGEST_SIZE,
2060 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2061 DESC_HDR_SEL0_AESU |
2062 DESC_HDR_MODE0_AESU_CBC |
2063 DESC_HDR_SEL1_MDEUB |
2064 DESC_HDR_MODE1_MDEU_INIT |
2065 DESC_HDR_MODE1_MDEU_PAD |
2066 DESC_HDR_MODE1_MDEUB_SHA384_HMAC,
2068 { .type = CRYPTO_ALG_TYPE_AEAD,
2070 .cra_name = "authenc(hmac(sha384),cbc(des3_ede))",
2071 .cra_driver_name = "authenc-hmac-sha384-cbc-3des-talitos",
2072 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2073 .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
2075 .ivsize = DES3_EDE_BLOCK_SIZE,
2076 .maxauthsize = SHA384_DIGEST_SIZE,
2079 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2081 DESC_HDR_MODE0_DEU_CBC |
2082 DESC_HDR_MODE0_DEU_3DES |
2083 DESC_HDR_SEL1_MDEUB |
2084 DESC_HDR_MODE1_MDEU_INIT |
2085 DESC_HDR_MODE1_MDEU_PAD |
2086 DESC_HDR_MODE1_MDEUB_SHA384_HMAC,
2088 { .type = CRYPTO_ALG_TYPE_AEAD,
2090 .cra_name = "authenc(hmac(sha512),cbc(aes))",
2091 .cra_driver_name = "authenc-hmac-sha512-cbc-aes-talitos",
2092 .cra_blocksize = AES_BLOCK_SIZE,
2093 .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
2095 .ivsize = AES_BLOCK_SIZE,
2096 .maxauthsize = SHA512_DIGEST_SIZE,
2099 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2100 DESC_HDR_SEL0_AESU |
2101 DESC_HDR_MODE0_AESU_CBC |
2102 DESC_HDR_SEL1_MDEUB |
2103 DESC_HDR_MODE1_MDEU_INIT |
2104 DESC_HDR_MODE1_MDEU_PAD |
2105 DESC_HDR_MODE1_MDEUB_SHA512_HMAC,
2107 { .type = CRYPTO_ALG_TYPE_AEAD,
2109 .cra_name = "authenc(hmac(sha512),cbc(des3_ede))",
2110 .cra_driver_name = "authenc-hmac-sha512-cbc-3des-talitos",
2111 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2112 .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
2114 .ivsize = DES3_EDE_BLOCK_SIZE,
2115 .maxauthsize = SHA512_DIGEST_SIZE,
2118 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2120 DESC_HDR_MODE0_DEU_CBC |
2121 DESC_HDR_MODE0_DEU_3DES |
2122 DESC_HDR_SEL1_MDEUB |
2123 DESC_HDR_MODE1_MDEU_INIT |
2124 DESC_HDR_MODE1_MDEU_PAD |
2125 DESC_HDR_MODE1_MDEUB_SHA512_HMAC,
2127 { .type = CRYPTO_ALG_TYPE_AEAD,
2129 .cra_name = "authenc(hmac(md5),cbc(aes))",
2130 .cra_driver_name = "authenc-hmac-md5-cbc-aes-talitos",
2131 .cra_blocksize = AES_BLOCK_SIZE,
2132 .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
2134 .ivsize = AES_BLOCK_SIZE,
2135 .maxauthsize = MD5_DIGEST_SIZE,
2138 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2139 DESC_HDR_SEL0_AESU |
2140 DESC_HDR_MODE0_AESU_CBC |
2141 DESC_HDR_SEL1_MDEUA |
2142 DESC_HDR_MODE1_MDEU_INIT |
2143 DESC_HDR_MODE1_MDEU_PAD |
2144 DESC_HDR_MODE1_MDEU_MD5_HMAC,
2146 { .type = CRYPTO_ALG_TYPE_AEAD,
2148 .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
2149 .cra_driver_name = "authenc-hmac-md5-cbc-3des-talitos",
2150 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2151 .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
2153 .ivsize = DES3_EDE_BLOCK_SIZE,
2154 .maxauthsize = MD5_DIGEST_SIZE,
2157 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2159 DESC_HDR_MODE0_DEU_CBC |
2160 DESC_HDR_MODE0_DEU_3DES |
2161 DESC_HDR_SEL1_MDEUA |
2162 DESC_HDR_MODE1_MDEU_INIT |
2163 DESC_HDR_MODE1_MDEU_PAD |
2164 DESC_HDR_MODE1_MDEU_MD5_HMAC,
2166 /* ABLKCIPHER algorithms. */
2167 { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2169 .cra_name = "cbc(aes)",
2170 .cra_driver_name = "cbc-aes-talitos",
2171 .cra_blocksize = AES_BLOCK_SIZE,
2172 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2175 .min_keysize = AES_MIN_KEY_SIZE,
2176 .max_keysize = AES_MAX_KEY_SIZE,
2177 .ivsize = AES_BLOCK_SIZE,
2180 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2181 DESC_HDR_SEL0_AESU |
2182 DESC_HDR_MODE0_AESU_CBC,
2184 { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2186 .cra_name = "cbc(des3_ede)",
2187 .cra_driver_name = "cbc-3des-talitos",
2188 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2189 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2192 .min_keysize = DES3_EDE_KEY_SIZE,
2193 .max_keysize = DES3_EDE_KEY_SIZE,
2194 .ivsize = DES3_EDE_BLOCK_SIZE,
2197 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2199 DESC_HDR_MODE0_DEU_CBC |
2200 DESC_HDR_MODE0_DEU_3DES,
2202 /* AHASH algorithms. */
2203 { .type = CRYPTO_ALG_TYPE_AHASH,
2205 .halg.digestsize = MD5_DIGEST_SIZE,
2208 .cra_driver_name = "md5-talitos",
2209 .cra_blocksize = MD5_HMAC_BLOCK_SIZE,
2210 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2214 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2215 DESC_HDR_SEL0_MDEUA |
2216 DESC_HDR_MODE0_MDEU_MD5,
2218 { .type = CRYPTO_ALG_TYPE_AHASH,
2220 .halg.digestsize = SHA1_DIGEST_SIZE,
2223 .cra_driver_name = "sha1-talitos",
2224 .cra_blocksize = SHA1_BLOCK_SIZE,
2225 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2229 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2230 DESC_HDR_SEL0_MDEUA |
2231 DESC_HDR_MODE0_MDEU_SHA1,
2233 { .type = CRYPTO_ALG_TYPE_AHASH,
2235 .halg.digestsize = SHA224_DIGEST_SIZE,
2237 .cra_name = "sha224",
2238 .cra_driver_name = "sha224-talitos",
2239 .cra_blocksize = SHA224_BLOCK_SIZE,
2240 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2244 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2245 DESC_HDR_SEL0_MDEUA |
2246 DESC_HDR_MODE0_MDEU_SHA224,
2248 { .type = CRYPTO_ALG_TYPE_AHASH,
2250 .halg.digestsize = SHA256_DIGEST_SIZE,
2252 .cra_name = "sha256",
2253 .cra_driver_name = "sha256-talitos",
2254 .cra_blocksize = SHA256_BLOCK_SIZE,
2255 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2259 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2260 DESC_HDR_SEL0_MDEUA |
2261 DESC_HDR_MODE0_MDEU_SHA256,
2263 { .type = CRYPTO_ALG_TYPE_AHASH,
2265 .halg.digestsize = SHA384_DIGEST_SIZE,
2267 .cra_name = "sha384",
2268 .cra_driver_name = "sha384-talitos",
2269 .cra_blocksize = SHA384_BLOCK_SIZE,
2270 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2274 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2275 DESC_HDR_SEL0_MDEUB |
2276 DESC_HDR_MODE0_MDEUB_SHA384,
2278 { .type = CRYPTO_ALG_TYPE_AHASH,
2280 .halg.digestsize = SHA512_DIGEST_SIZE,
2282 .cra_name = "sha512",
2283 .cra_driver_name = "sha512-talitos",
2284 .cra_blocksize = SHA512_BLOCK_SIZE,
2285 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2289 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2290 DESC_HDR_SEL0_MDEUB |
2291 DESC_HDR_MODE0_MDEUB_SHA512,
2293 { .type = CRYPTO_ALG_TYPE_AHASH,
2295 .halg.digestsize = MD5_DIGEST_SIZE,
2297 .cra_name = "hmac(md5)",
2298 .cra_driver_name = "hmac-md5-talitos",
2299 .cra_blocksize = MD5_HMAC_BLOCK_SIZE,
2300 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2304 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2305 DESC_HDR_SEL0_MDEUA |
2306 DESC_HDR_MODE0_MDEU_MD5,
2308 { .type = CRYPTO_ALG_TYPE_AHASH,
2310 .halg.digestsize = SHA1_DIGEST_SIZE,
2312 .cra_name = "hmac(sha1)",
2313 .cra_driver_name = "hmac-sha1-talitos",
2314 .cra_blocksize = SHA1_BLOCK_SIZE,
2315 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2319 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2320 DESC_HDR_SEL0_MDEUA |
2321 DESC_HDR_MODE0_MDEU_SHA1,
2323 { .type = CRYPTO_ALG_TYPE_AHASH,
2325 .halg.digestsize = SHA224_DIGEST_SIZE,
2327 .cra_name = "hmac(sha224)",
2328 .cra_driver_name = "hmac-sha224-talitos",
2329 .cra_blocksize = SHA224_BLOCK_SIZE,
2330 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2334 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2335 DESC_HDR_SEL0_MDEUA |
2336 DESC_HDR_MODE0_MDEU_SHA224,
2338 { .type = CRYPTO_ALG_TYPE_AHASH,
2340 .halg.digestsize = SHA256_DIGEST_SIZE,
2342 .cra_name = "hmac(sha256)",
2343 .cra_driver_name = "hmac-sha256-talitos",
2344 .cra_blocksize = SHA256_BLOCK_SIZE,
2345 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2349 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2350 DESC_HDR_SEL0_MDEUA |
2351 DESC_HDR_MODE0_MDEU_SHA256,
2353 { .type = CRYPTO_ALG_TYPE_AHASH,
2355 .halg.digestsize = SHA384_DIGEST_SIZE,
2357 .cra_name = "hmac(sha384)",
2358 .cra_driver_name = "hmac-sha384-talitos",
2359 .cra_blocksize = SHA384_BLOCK_SIZE,
2360 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2364 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2365 DESC_HDR_SEL0_MDEUB |
2366 DESC_HDR_MODE0_MDEUB_SHA384,
2368 { .type = CRYPTO_ALG_TYPE_AHASH,
2370 .halg.digestsize = SHA512_DIGEST_SIZE,
2372 .cra_name = "hmac(sha512)",
2373 .cra_driver_name = "hmac-sha512-talitos",
2374 .cra_blocksize = SHA512_BLOCK_SIZE,
2375 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2379 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2380 DESC_HDR_SEL0_MDEUB |
2381 DESC_HDR_MODE0_MDEUB_SHA512,
2385 struct talitos_crypto_alg {
2386 struct list_head entry;
2388 struct talitos_alg_template algt;
2391 static int talitos_cra_init(struct crypto_tfm *tfm)
2393 struct crypto_alg *alg = tfm->__crt_alg;
2394 struct talitos_crypto_alg *talitos_alg;
2395 struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
2396 struct talitos_private *priv;
2398 if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_AHASH)
2399 talitos_alg = container_of(__crypto_ahash_alg(alg),
2400 struct talitos_crypto_alg,
2403 talitos_alg = container_of(alg, struct talitos_crypto_alg,
2406 /* update context with ptr to dev */
2407 ctx->dev = talitos_alg->dev;
2409 /* assign SEC channel to tfm in round-robin fashion */
2410 priv = dev_get_drvdata(ctx->dev);
2411 ctx->ch = atomic_inc_return(&priv->last_chan) &
2412 (priv->num_channels - 1);
2414 /* copy descriptor header template value */
2415 ctx->desc_hdr_template = talitos_alg->algt.desc_hdr_template;
2417 /* select done notification */
2418 ctx->desc_hdr_template |= DESC_HDR_DONE_NOTIFY;
2423 static int talitos_cra_init_aead(struct crypto_tfm *tfm)
2425 struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
2427 talitos_cra_init(tfm);
2429 /* random first IV */
2430 get_random_bytes(ctx->iv, TALITOS_MAX_IV_LENGTH);
2435 static int talitos_cra_init_ahash(struct crypto_tfm *tfm)
2437 struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
2439 talitos_cra_init(tfm);
2442 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
2443 sizeof(struct talitos_ahash_req_ctx));
2449 * given the alg's descriptor header template, determine whether descriptor
2450 * type and primary/secondary execution units required match the hw
2451 * capabilities description provided in the device tree node.
2453 static int hw_supports(struct device *dev, __be32 desc_hdr_template)
2455 struct talitos_private *priv = dev_get_drvdata(dev);
2458 ret = (1 << DESC_TYPE(desc_hdr_template) & priv->desc_types) &&
2459 (1 << PRIMARY_EU(desc_hdr_template) & priv->exec_units);
2461 if (SECONDARY_EU(desc_hdr_template))
2462 ret = ret && (1 << SECONDARY_EU(desc_hdr_template)
2463 & priv->exec_units);
2468 static int talitos_remove(struct platform_device *ofdev)
2470 struct device *dev = &ofdev->dev;
2471 struct talitos_private *priv = dev_get_drvdata(dev);
2472 struct talitos_crypto_alg *t_alg, *n;
2475 list_for_each_entry_safe(t_alg, n, &priv->alg_list, entry) {
2476 switch (t_alg->algt.type) {
2477 case CRYPTO_ALG_TYPE_ABLKCIPHER:
2478 case CRYPTO_ALG_TYPE_AEAD:
2479 crypto_unregister_alg(&t_alg->algt.alg.crypto);
2481 case CRYPTO_ALG_TYPE_AHASH:
2482 crypto_unregister_ahash(&t_alg->algt.alg.hash);
2485 list_del(&t_alg->entry);
2489 if (hw_supports(dev, DESC_HDR_SEL0_RNG))
2490 talitos_unregister_rng(dev);
2492 for (i = 0; i < priv->num_channels; i++)
2493 kfree(priv->chan[i].fifo);
2497 for (i = 0; i < 2; i++)
2499 free_irq(priv->irq[i], dev);
2500 irq_dispose_mapping(priv->irq[i]);
2503 tasklet_kill(&priv->done_task[0]);
2505 tasklet_kill(&priv->done_task[1]);
2514 static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev,
2515 struct talitos_alg_template
2518 struct talitos_private *priv = dev_get_drvdata(dev);
2519 struct talitos_crypto_alg *t_alg;
2520 struct crypto_alg *alg;
2522 t_alg = kzalloc(sizeof(struct talitos_crypto_alg), GFP_KERNEL);
2524 return ERR_PTR(-ENOMEM);
2526 t_alg->algt = *template;
2528 switch (t_alg->algt.type) {
2529 case CRYPTO_ALG_TYPE_ABLKCIPHER:
2530 alg = &t_alg->algt.alg.crypto;
2531 alg->cra_init = talitos_cra_init;
2532 alg->cra_type = &crypto_ablkcipher_type;
2533 alg->cra_ablkcipher.setkey = ablkcipher_setkey;
2534 alg->cra_ablkcipher.encrypt = ablkcipher_encrypt;
2535 alg->cra_ablkcipher.decrypt = ablkcipher_decrypt;
2536 alg->cra_ablkcipher.geniv = "eseqiv";
2538 case CRYPTO_ALG_TYPE_AEAD:
2539 alg = &t_alg->algt.alg.crypto;
2540 alg->cra_init = talitos_cra_init_aead;
2541 alg->cra_type = &crypto_aead_type;
2542 alg->cra_aead.setkey = aead_setkey;
2543 alg->cra_aead.setauthsize = aead_setauthsize;
2544 alg->cra_aead.encrypt = aead_encrypt;
2545 alg->cra_aead.decrypt = aead_decrypt;
2546 alg->cra_aead.givencrypt = aead_givencrypt;
2547 alg->cra_aead.geniv = "<built-in>";
2549 case CRYPTO_ALG_TYPE_AHASH:
2550 alg = &t_alg->algt.alg.hash.halg.base;
2551 alg->cra_init = talitos_cra_init_ahash;
2552 alg->cra_type = &crypto_ahash_type;
2553 t_alg->algt.alg.hash.init = ahash_init;
2554 t_alg->algt.alg.hash.update = ahash_update;
2555 t_alg->algt.alg.hash.final = ahash_final;
2556 t_alg->algt.alg.hash.finup = ahash_finup;
2557 t_alg->algt.alg.hash.digest = ahash_digest;
2558 t_alg->algt.alg.hash.setkey = ahash_setkey;
2560 if (!(priv->features & TALITOS_FTR_HMAC_OK) &&
2561 !strncmp(alg->cra_name, "hmac", 4)) {
2563 return ERR_PTR(-ENOTSUPP);
2565 if (!(priv->features & TALITOS_FTR_SHA224_HWINIT) &&
2566 (!strcmp(alg->cra_name, "sha224") ||
2567 !strcmp(alg->cra_name, "hmac(sha224)"))) {
2568 t_alg->algt.alg.hash.init = ahash_init_sha224_swinit;
2569 t_alg->algt.desc_hdr_template =
2570 DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2571 DESC_HDR_SEL0_MDEUA |
2572 DESC_HDR_MODE0_MDEU_SHA256;
2576 dev_err(dev, "unknown algorithm type %d\n", t_alg->algt.type);
2577 return ERR_PTR(-EINVAL);
2580 alg->cra_module = THIS_MODULE;
2581 alg->cra_priority = TALITOS_CRA_PRIORITY;
2582 alg->cra_alignmask = 0;
2583 alg->cra_ctxsize = sizeof(struct talitos_ctx);
2584 alg->cra_flags |= CRYPTO_ALG_KERN_DRIVER_ONLY;
2591 static int talitos_probe_irq(struct platform_device *ofdev)
2593 struct device *dev = &ofdev->dev;
2594 struct device_node *np = ofdev->dev.of_node;
2595 struct talitos_private *priv = dev_get_drvdata(dev);
2598 priv->irq[0] = irq_of_parse_and_map(np, 0);
2599 if (!priv->irq[0]) {
2600 dev_err(dev, "failed to map irq\n");
2604 priv->irq[1] = irq_of_parse_and_map(np, 1);
2606 /* get the primary irq line */
2607 if (!priv->irq[1]) {
2608 err = request_irq(priv->irq[0], talitos_interrupt_4ch, 0,
2609 dev_driver_string(dev), dev);
2613 err = request_irq(priv->irq[0], talitos_interrupt_ch0_2, 0,
2614 dev_driver_string(dev), dev);
2618 /* get the secondary irq line */
2619 err = request_irq(priv->irq[1], talitos_interrupt_ch1_3, 0,
2620 dev_driver_string(dev), dev);
2622 dev_err(dev, "failed to request secondary irq\n");
2623 irq_dispose_mapping(priv->irq[1]);
2631 dev_err(dev, "failed to request primary irq\n");
2632 irq_dispose_mapping(priv->irq[0]);
2639 static int talitos_probe(struct platform_device *ofdev)
2641 struct device *dev = &ofdev->dev;
2642 struct device_node *np = ofdev->dev.of_node;
2643 struct talitos_private *priv;
2644 const unsigned int *prop;
2647 priv = kzalloc(sizeof(struct talitos_private), GFP_KERNEL);
2651 INIT_LIST_HEAD(&priv->alg_list);
2653 dev_set_drvdata(dev, priv);
2655 priv->ofdev = ofdev;
2657 spin_lock_init(&priv->reg_lock);
2659 err = talitos_probe_irq(ofdev);
2663 if (!priv->irq[1]) {
2664 tasklet_init(&priv->done_task[0], talitos_done_4ch,
2665 (unsigned long)dev);
2667 tasklet_init(&priv->done_task[0], talitos_done_ch0_2,
2668 (unsigned long)dev);
2669 tasklet_init(&priv->done_task[1], talitos_done_ch1_3,
2670 (unsigned long)dev);
2673 priv->reg = of_iomap(np, 0);
2675 dev_err(dev, "failed to of_iomap\n");
2680 /* get SEC version capabilities from device tree */
2681 prop = of_get_property(np, "fsl,num-channels", NULL);
2683 priv->num_channels = *prop;
2685 prop = of_get_property(np, "fsl,channel-fifo-len", NULL);
2687 priv->chfifo_len = *prop;
2689 prop = of_get_property(np, "fsl,exec-units-mask", NULL);
2691 priv->exec_units = *prop;
2693 prop = of_get_property(np, "fsl,descriptor-types-mask", NULL);
2695 priv->desc_types = *prop;
2697 if (!is_power_of_2(priv->num_channels) || !priv->chfifo_len ||
2698 !priv->exec_units || !priv->desc_types) {
2699 dev_err(dev, "invalid property data in device tree node\n");
2704 if (of_device_is_compatible(np, "fsl,sec3.0"))
2705 priv->features |= TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT;
2707 if (of_device_is_compatible(np, "fsl,sec2.1"))
2708 priv->features |= TALITOS_FTR_HW_AUTH_CHECK |
2709 TALITOS_FTR_SHA224_HWINIT |
2710 TALITOS_FTR_HMAC_OK;
2712 if (of_device_is_compatible(np, "fsl,sec1.0"))
2713 priv->features |= TALITOS_FTR_SEC1;
2715 priv->chan = kzalloc(sizeof(struct talitos_channel) *
2716 priv->num_channels, GFP_KERNEL);
2718 dev_err(dev, "failed to allocate channel management space\n");
2723 priv->fifo_len = roundup_pow_of_two(priv->chfifo_len);
2725 for (i = 0; i < priv->num_channels; i++) {
2726 priv->chan[i].reg = priv->reg + TALITOS_CH_STRIDE * (i + 1);
2727 if (!priv->irq[1] || !(i & 1))
2728 priv->chan[i].reg += TALITOS_CH_BASE_OFFSET;
2730 spin_lock_init(&priv->chan[i].head_lock);
2731 spin_lock_init(&priv->chan[i].tail_lock);
2733 priv->chan[i].fifo = kzalloc(sizeof(struct talitos_request) *
2734 priv->fifo_len, GFP_KERNEL);
2735 if (!priv->chan[i].fifo) {
2736 dev_err(dev, "failed to allocate request fifo %d\n", i);
2741 atomic_set(&priv->chan[i].submit_count,
2742 -(priv->chfifo_len - 1));
2745 dma_set_mask(dev, DMA_BIT_MASK(36));
2747 /* reset and initialize the h/w */
2748 err = init_device(dev);
2750 dev_err(dev, "failed to initialize device\n");
2754 /* register the RNG, if available */
2755 if (hw_supports(dev, DESC_HDR_SEL0_RNG)) {
2756 err = talitos_register_rng(dev);
2758 dev_err(dev, "failed to register hwrng: %d\n", err);
2761 dev_info(dev, "hwrng\n");
2764 /* register crypto algorithms the device supports */
2765 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
2766 if (hw_supports(dev, driver_algs[i].desc_hdr_template)) {
2767 struct talitos_crypto_alg *t_alg;
2770 t_alg = talitos_alg_alloc(dev, &driver_algs[i]);
2771 if (IS_ERR(t_alg)) {
2772 err = PTR_ERR(t_alg);
2773 if (err == -ENOTSUPP)
2778 switch (t_alg->algt.type) {
2779 case CRYPTO_ALG_TYPE_ABLKCIPHER:
2780 case CRYPTO_ALG_TYPE_AEAD:
2781 err = crypto_register_alg(
2782 &t_alg->algt.alg.crypto);
2783 name = t_alg->algt.alg.crypto.cra_driver_name;
2785 case CRYPTO_ALG_TYPE_AHASH:
2786 err = crypto_register_ahash(
2787 &t_alg->algt.alg.hash);
2789 t_alg->algt.alg.hash.halg.base.cra_driver_name;
2793 dev_err(dev, "%s alg registration failed\n",
2797 list_add_tail(&t_alg->entry, &priv->alg_list);
2800 if (!list_empty(&priv->alg_list))
2801 dev_info(dev, "%s algorithms registered in /proc/crypto\n",
2802 (char *)of_get_property(np, "compatible", NULL));
2807 talitos_remove(ofdev);
2812 static const struct of_device_id talitos_match[] = {
2814 .compatible = "fsl,sec2.0",
2818 MODULE_DEVICE_TABLE(of, talitos_match);
2820 static struct platform_driver talitos_driver = {
2823 .of_match_table = talitos_match,
2825 .probe = talitos_probe,
2826 .remove = talitos_remove,
2829 module_platform_driver(talitos_driver);
2831 MODULE_LICENSE("GPL");
2832 MODULE_AUTHOR("Kim Phillips <kim.phillips@freescale.com>");
2833 MODULE_DESCRIPTION("Freescale integrated security engine (SEC) driver");