crypto: talitos - Add a feature to tag SEC1
[firefly-linux-kernel-4.4.55.git] / drivers / crypto / talitos.c
1 /*
2  * talitos - Freescale Integrated Security Engine (SEC) device driver
3  *
4  * Copyright (c) 2008-2011 Freescale Semiconductor, Inc.
5  *
6  * Scatterlist Crypto API glue code copied from files with the following:
7  * Copyright (c) 2006-2007 Herbert Xu <herbert@gondor.apana.org.au>
8  *
9  * Crypto algorithm registration code copied from hifn driver:
10  * 2007+ Copyright (c) Evgeniy Polyakov <johnpol@2ka.mipt.ru>
11  * All rights reserved.
12  *
13  * This program is free software; you can redistribute it and/or modify
14  * it under the terms of the GNU General Public License as published by
15  * the Free Software Foundation; either version 2 of the License, or
16  * (at your option) any later version.
17  *
18  * This program is distributed in the hope that it will be useful,
19  * but WITHOUT ANY WARRANTY; without even the implied warranty of
20  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
21  * GNU General Public License for more details.
22  *
23  * You should have received a copy of the GNU General Public License
24  * along with this program; if not, write to the Free Software
25  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
26  */
27
28 #include <linux/kernel.h>
29 #include <linux/module.h>
30 #include <linux/mod_devicetable.h>
31 #include <linux/device.h>
32 #include <linux/interrupt.h>
33 #include <linux/crypto.h>
34 #include <linux/hw_random.h>
35 #include <linux/of_address.h>
36 #include <linux/of_irq.h>
37 #include <linux/of_platform.h>
38 #include <linux/dma-mapping.h>
39 #include <linux/io.h>
40 #include <linux/spinlock.h>
41 #include <linux/rtnetlink.h>
42 #include <linux/slab.h>
43
44 #include <crypto/algapi.h>
45 #include <crypto/aes.h>
46 #include <crypto/des.h>
47 #include <crypto/sha.h>
48 #include <crypto/md5.h>
49 #include <crypto/aead.h>
50 #include <crypto/authenc.h>
51 #include <crypto/skcipher.h>
52 #include <crypto/hash.h>
53 #include <crypto/internal/hash.h>
54 #include <crypto/scatterwalk.h>
55
56 #include "talitos.h"
57
58 static void to_talitos_ptr(struct talitos_ptr *ptr, dma_addr_t dma_addr)
59 {
60         ptr->ptr = cpu_to_be32(lower_32_bits(dma_addr));
61         ptr->eptr = upper_32_bits(dma_addr);
62 }
63
64 static void to_talitos_ptr_len(struct talitos_ptr *ptr, unsigned short len)
65 {
66         ptr->len = cpu_to_be16(len);
67 }
68
69 static unsigned short from_talitos_ptr_len(struct talitos_ptr *ptr)
70 {
71         return be16_to_cpu(ptr->len);
72 }
73
74 static void to_talitos_ptr_extent_clear(struct talitos_ptr *ptr)
75 {
76         ptr->j_extent = 0;
77 }
78
79 /*
80  * map virtual single (contiguous) pointer to h/w descriptor pointer
81  */
82 static void map_single_talitos_ptr(struct device *dev,
83                                    struct talitos_ptr *ptr,
84                                    unsigned short len, void *data,
85                                    enum dma_data_direction dir)
86 {
87         dma_addr_t dma_addr = dma_map_single(dev, data, len, dir);
88
89         to_talitos_ptr_len(ptr, len);
90         to_talitos_ptr(ptr, dma_addr);
91         to_talitos_ptr_extent_clear(ptr);
92 }
93
94 /*
95  * unmap bus single (contiguous) h/w descriptor pointer
96  */
97 static void unmap_single_talitos_ptr(struct device *dev,
98                                      struct talitos_ptr *ptr,
99                                      enum dma_data_direction dir)
100 {
101         dma_unmap_single(dev, be32_to_cpu(ptr->ptr),
102                          from_talitos_ptr_len(ptr), dir);
103 }
104
105 static int reset_channel(struct device *dev, int ch)
106 {
107         struct talitos_private *priv = dev_get_drvdata(dev);
108         unsigned int timeout = TALITOS_TIMEOUT;
109
110         setbits32(priv->chan[ch].reg + TALITOS_CCCR, TALITOS_CCCR_RESET);
111
112         while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR) & TALITOS_CCCR_RESET)
113                && --timeout)
114                 cpu_relax();
115
116         if (timeout == 0) {
117                 dev_err(dev, "failed to reset channel %d\n", ch);
118                 return -EIO;
119         }
120
121         /* set 36-bit addressing, done writeback enable and done IRQ enable */
122         setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO, TALITOS_CCCR_LO_EAE |
123                   TALITOS_CCCR_LO_CDWE | TALITOS_CCCR_LO_CDIE);
124
125         /* and ICCR writeback, if available */
126         if (priv->features & TALITOS_FTR_HW_AUTH_CHECK)
127                 setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO,
128                           TALITOS_CCCR_LO_IWSE);
129
130         return 0;
131 }
132
133 static int reset_device(struct device *dev)
134 {
135         struct talitos_private *priv = dev_get_drvdata(dev);
136         unsigned int timeout = TALITOS_TIMEOUT;
137         u32 mcr = TALITOS_MCR_SWR;
138
139         setbits32(priv->reg + TALITOS_MCR, mcr);
140
141         while ((in_be32(priv->reg + TALITOS_MCR) & TALITOS_MCR_SWR)
142                && --timeout)
143                 cpu_relax();
144
145         if (priv->irq[1]) {
146                 mcr = TALITOS_MCR_RCA1 | TALITOS_MCR_RCA3;
147                 setbits32(priv->reg + TALITOS_MCR, mcr);
148         }
149
150         if (timeout == 0) {
151                 dev_err(dev, "failed to reset device\n");
152                 return -EIO;
153         }
154
155         return 0;
156 }
157
158 /*
159  * Reset and initialize the device
160  */
161 static int init_device(struct device *dev)
162 {
163         struct talitos_private *priv = dev_get_drvdata(dev);
164         int ch, err;
165
166         /*
167          * Master reset
168          * errata documentation: warning: certain SEC interrupts
169          * are not fully cleared by writing the MCR:SWR bit,
170          * set bit twice to completely reset
171          */
172         err = reset_device(dev);
173         if (err)
174                 return err;
175
176         err = reset_device(dev);
177         if (err)
178                 return err;
179
180         /* reset channels */
181         for (ch = 0; ch < priv->num_channels; ch++) {
182                 err = reset_channel(dev, ch);
183                 if (err)
184                         return err;
185         }
186
187         /* enable channel done and error interrupts */
188         setbits32(priv->reg + TALITOS_IMR, TALITOS_IMR_INIT);
189         setbits32(priv->reg + TALITOS_IMR_LO, TALITOS_IMR_LO_INIT);
190
191         /* disable integrity check error interrupts (use writeback instead) */
192         if (priv->features & TALITOS_FTR_HW_AUTH_CHECK)
193                 setbits32(priv->reg + TALITOS_MDEUICR_LO,
194                           TALITOS_MDEUICR_LO_ICE);
195
196         return 0;
197 }
198
199 /**
200  * talitos_submit - submits a descriptor to the device for processing
201  * @dev:        the SEC device to be used
202  * @ch:         the SEC device channel to be used
203  * @desc:       the descriptor to be processed by the device
204  * @callback:   whom to call when processing is complete
205  * @context:    a handle for use by caller (optional)
206  *
207  * desc must contain valid dma-mapped (bus physical) address pointers.
208  * callback must check err and feedback in descriptor header
209  * for device processing status.
210  */
211 int talitos_submit(struct device *dev, int ch, struct talitos_desc *desc,
212                    void (*callback)(struct device *dev,
213                                     struct talitos_desc *desc,
214                                     void *context, int error),
215                    void *context)
216 {
217         struct talitos_private *priv = dev_get_drvdata(dev);
218         struct talitos_request *request;
219         unsigned long flags;
220         int head;
221
222         spin_lock_irqsave(&priv->chan[ch].head_lock, flags);
223
224         if (!atomic_inc_not_zero(&priv->chan[ch].submit_count)) {
225                 /* h/w fifo is full */
226                 spin_unlock_irqrestore(&priv->chan[ch].head_lock, flags);
227                 return -EAGAIN;
228         }
229
230         head = priv->chan[ch].head;
231         request = &priv->chan[ch].fifo[head];
232
233         /* map descriptor and save caller data */
234         request->dma_desc = dma_map_single(dev, desc, sizeof(*desc),
235                                            DMA_BIDIRECTIONAL);
236         request->callback = callback;
237         request->context = context;
238
239         /* increment fifo head */
240         priv->chan[ch].head = (priv->chan[ch].head + 1) & (priv->fifo_len - 1);
241
242         smp_wmb();
243         request->desc = desc;
244
245         /* GO! */
246         wmb();
247         out_be32(priv->chan[ch].reg + TALITOS_FF,
248                  upper_32_bits(request->dma_desc));
249         out_be32(priv->chan[ch].reg + TALITOS_FF_LO,
250                  lower_32_bits(request->dma_desc));
251
252         spin_unlock_irqrestore(&priv->chan[ch].head_lock, flags);
253
254         return -EINPROGRESS;
255 }
256 EXPORT_SYMBOL(talitos_submit);
257
258 /*
259  * process what was done, notify callback of error if not
260  */
261 static void flush_channel(struct device *dev, int ch, int error, int reset_ch)
262 {
263         struct talitos_private *priv = dev_get_drvdata(dev);
264         struct talitos_request *request, saved_req;
265         unsigned long flags;
266         int tail, status;
267
268         spin_lock_irqsave(&priv->chan[ch].tail_lock, flags);
269
270         tail = priv->chan[ch].tail;
271         while (priv->chan[ch].fifo[tail].desc) {
272                 request = &priv->chan[ch].fifo[tail];
273
274                 /* descriptors with their done bits set don't get the error */
275                 rmb();
276                 if ((request->desc->hdr & DESC_HDR_DONE) == DESC_HDR_DONE)
277                         status = 0;
278                 else
279                         if (!error)
280                                 break;
281                         else
282                                 status = error;
283
284                 dma_unmap_single(dev, request->dma_desc,
285                                  sizeof(struct talitos_desc),
286                                  DMA_BIDIRECTIONAL);
287
288                 /* copy entries so we can call callback outside lock */
289                 saved_req.desc = request->desc;
290                 saved_req.callback = request->callback;
291                 saved_req.context = request->context;
292
293                 /* release request entry in fifo */
294                 smp_wmb();
295                 request->desc = NULL;
296
297                 /* increment fifo tail */
298                 priv->chan[ch].tail = (tail + 1) & (priv->fifo_len - 1);
299
300                 spin_unlock_irqrestore(&priv->chan[ch].tail_lock, flags);
301
302                 atomic_dec(&priv->chan[ch].submit_count);
303
304                 saved_req.callback(dev, saved_req.desc, saved_req.context,
305                                    status);
306                 /* channel may resume processing in single desc error case */
307                 if (error && !reset_ch && status == error)
308                         return;
309                 spin_lock_irqsave(&priv->chan[ch].tail_lock, flags);
310                 tail = priv->chan[ch].tail;
311         }
312
313         spin_unlock_irqrestore(&priv->chan[ch].tail_lock, flags);
314 }
315
316 /*
317  * process completed requests for channels that have done status
318  */
319 #define DEF_TALITOS_DONE(name, ch_done_mask)                            \
320 static void talitos_done_##name(unsigned long data)                     \
321 {                                                                       \
322         struct device *dev = (struct device *)data;                     \
323         struct talitos_private *priv = dev_get_drvdata(dev);            \
324         unsigned long flags;                                            \
325                                                                         \
326         if (ch_done_mask & 1)                                           \
327                 flush_channel(dev, 0, 0, 0);                            \
328         if (priv->num_channels == 1)                                    \
329                 goto out;                                               \
330         if (ch_done_mask & (1 << 2))                                    \
331                 flush_channel(dev, 1, 0, 0);                            \
332         if (ch_done_mask & (1 << 4))                                    \
333                 flush_channel(dev, 2, 0, 0);                            \
334         if (ch_done_mask & (1 << 6))                                    \
335                 flush_channel(dev, 3, 0, 0);                            \
336                                                                         \
337 out:                                                                    \
338         /* At this point, all completed channels have been processed */ \
339         /* Unmask done interrupts for channels completed later on. */   \
340         spin_lock_irqsave(&priv->reg_lock, flags);                      \
341         setbits32(priv->reg + TALITOS_IMR, ch_done_mask);               \
342         setbits32(priv->reg + TALITOS_IMR_LO, TALITOS_IMR_LO_INIT);     \
343         spin_unlock_irqrestore(&priv->reg_lock, flags);                 \
344 }
345 DEF_TALITOS_DONE(4ch, TALITOS_ISR_4CHDONE)
346 DEF_TALITOS_DONE(ch0_2, TALITOS_ISR_CH_0_2_DONE)
347 DEF_TALITOS_DONE(ch1_3, TALITOS_ISR_CH_1_3_DONE)
348
349 /*
350  * locate current (offending) descriptor
351  */
352 static u32 current_desc_hdr(struct device *dev, int ch)
353 {
354         struct talitos_private *priv = dev_get_drvdata(dev);
355         int tail, iter;
356         dma_addr_t cur_desc;
357
358         cur_desc = ((u64)in_be32(priv->chan[ch].reg + TALITOS_CDPR)) << 32;
359         cur_desc |= in_be32(priv->chan[ch].reg + TALITOS_CDPR_LO);
360
361         if (!cur_desc) {
362                 dev_err(dev, "CDPR is NULL, giving up search for offending descriptor\n");
363                 return 0;
364         }
365
366         tail = priv->chan[ch].tail;
367
368         iter = tail;
369         while (priv->chan[ch].fifo[iter].dma_desc != cur_desc) {
370                 iter = (iter + 1) & (priv->fifo_len - 1);
371                 if (iter == tail) {
372                         dev_err(dev, "couldn't locate current descriptor\n");
373                         return 0;
374                 }
375         }
376
377         return priv->chan[ch].fifo[iter].desc->hdr;
378 }
379
380 /*
381  * user diagnostics; report root cause of error based on execution unit status
382  */
383 static void report_eu_error(struct device *dev, int ch, u32 desc_hdr)
384 {
385         struct talitos_private *priv = dev_get_drvdata(dev);
386         int i;
387
388         if (!desc_hdr)
389                 desc_hdr = in_be32(priv->chan[ch].reg + TALITOS_DESCBUF);
390
391         switch (desc_hdr & DESC_HDR_SEL0_MASK) {
392         case DESC_HDR_SEL0_AFEU:
393                 dev_err(dev, "AFEUISR 0x%08x_%08x\n",
394                         in_be32(priv->reg + TALITOS_AFEUISR),
395                         in_be32(priv->reg + TALITOS_AFEUISR_LO));
396                 break;
397         case DESC_HDR_SEL0_DEU:
398                 dev_err(dev, "DEUISR 0x%08x_%08x\n",
399                         in_be32(priv->reg + TALITOS_DEUISR),
400                         in_be32(priv->reg + TALITOS_DEUISR_LO));
401                 break;
402         case DESC_HDR_SEL0_MDEUA:
403         case DESC_HDR_SEL0_MDEUB:
404                 dev_err(dev, "MDEUISR 0x%08x_%08x\n",
405                         in_be32(priv->reg + TALITOS_MDEUISR),
406                         in_be32(priv->reg + TALITOS_MDEUISR_LO));
407                 break;
408         case DESC_HDR_SEL0_RNG:
409                 dev_err(dev, "RNGUISR 0x%08x_%08x\n",
410                         in_be32(priv->reg + TALITOS_RNGUISR),
411                         in_be32(priv->reg + TALITOS_RNGUISR_LO));
412                 break;
413         case DESC_HDR_SEL0_PKEU:
414                 dev_err(dev, "PKEUISR 0x%08x_%08x\n",
415                         in_be32(priv->reg + TALITOS_PKEUISR),
416                         in_be32(priv->reg + TALITOS_PKEUISR_LO));
417                 break;
418         case DESC_HDR_SEL0_AESU:
419                 dev_err(dev, "AESUISR 0x%08x_%08x\n",
420                         in_be32(priv->reg + TALITOS_AESUISR),
421                         in_be32(priv->reg + TALITOS_AESUISR_LO));
422                 break;
423         case DESC_HDR_SEL0_CRCU:
424                 dev_err(dev, "CRCUISR 0x%08x_%08x\n",
425                         in_be32(priv->reg + TALITOS_CRCUISR),
426                         in_be32(priv->reg + TALITOS_CRCUISR_LO));
427                 break;
428         case DESC_HDR_SEL0_KEU:
429                 dev_err(dev, "KEUISR 0x%08x_%08x\n",
430                         in_be32(priv->reg + TALITOS_KEUISR),
431                         in_be32(priv->reg + TALITOS_KEUISR_LO));
432                 break;
433         }
434
435         switch (desc_hdr & DESC_HDR_SEL1_MASK) {
436         case DESC_HDR_SEL1_MDEUA:
437         case DESC_HDR_SEL1_MDEUB:
438                 dev_err(dev, "MDEUISR 0x%08x_%08x\n",
439                         in_be32(priv->reg + TALITOS_MDEUISR),
440                         in_be32(priv->reg + TALITOS_MDEUISR_LO));
441                 break;
442         case DESC_HDR_SEL1_CRCU:
443                 dev_err(dev, "CRCUISR 0x%08x_%08x\n",
444                         in_be32(priv->reg + TALITOS_CRCUISR),
445                         in_be32(priv->reg + TALITOS_CRCUISR_LO));
446                 break;
447         }
448
449         for (i = 0; i < 8; i++)
450                 dev_err(dev, "DESCBUF 0x%08x_%08x\n",
451                         in_be32(priv->chan[ch].reg + TALITOS_DESCBUF + 8*i),
452                         in_be32(priv->chan[ch].reg + TALITOS_DESCBUF_LO + 8*i));
453 }
454
455 /*
456  * recover from error interrupts
457  */
458 static void talitos_error(struct device *dev, u32 isr, u32 isr_lo)
459 {
460         struct talitos_private *priv = dev_get_drvdata(dev);
461         unsigned int timeout = TALITOS_TIMEOUT;
462         int ch, error, reset_dev = 0, reset_ch = 0;
463         u32 v, v_lo;
464
465         for (ch = 0; ch < priv->num_channels; ch++) {
466                 /* skip channels without errors */
467                 if (!(isr & (1 << (ch * 2 + 1))))
468                         continue;
469
470                 error = -EINVAL;
471
472                 v = in_be32(priv->chan[ch].reg + TALITOS_CCPSR);
473                 v_lo = in_be32(priv->chan[ch].reg + TALITOS_CCPSR_LO);
474
475                 if (v_lo & TALITOS_CCPSR_LO_DOF) {
476                         dev_err(dev, "double fetch fifo overflow error\n");
477                         error = -EAGAIN;
478                         reset_ch = 1;
479                 }
480                 if (v_lo & TALITOS_CCPSR_LO_SOF) {
481                         /* h/w dropped descriptor */
482                         dev_err(dev, "single fetch fifo overflow error\n");
483                         error = -EAGAIN;
484                 }
485                 if (v_lo & TALITOS_CCPSR_LO_MDTE)
486                         dev_err(dev, "master data transfer error\n");
487                 if (v_lo & TALITOS_CCPSR_LO_SGDLZ)
488                         dev_err(dev, "s/g data length zero error\n");
489                 if (v_lo & TALITOS_CCPSR_LO_FPZ)
490                         dev_err(dev, "fetch pointer zero error\n");
491                 if (v_lo & TALITOS_CCPSR_LO_IDH)
492                         dev_err(dev, "illegal descriptor header error\n");
493                 if (v_lo & TALITOS_CCPSR_LO_IEU)
494                         dev_err(dev, "invalid execution unit error\n");
495                 if (v_lo & TALITOS_CCPSR_LO_EU)
496                         report_eu_error(dev, ch, current_desc_hdr(dev, ch));
497                 if (v_lo & TALITOS_CCPSR_LO_GB)
498                         dev_err(dev, "gather boundary error\n");
499                 if (v_lo & TALITOS_CCPSR_LO_GRL)
500                         dev_err(dev, "gather return/length error\n");
501                 if (v_lo & TALITOS_CCPSR_LO_SB)
502                         dev_err(dev, "scatter boundary error\n");
503                 if (v_lo & TALITOS_CCPSR_LO_SRL)
504                         dev_err(dev, "scatter return/length error\n");
505
506                 flush_channel(dev, ch, error, reset_ch);
507
508                 if (reset_ch) {
509                         reset_channel(dev, ch);
510                 } else {
511                         setbits32(priv->chan[ch].reg + TALITOS_CCCR,
512                                   TALITOS_CCCR_CONT);
513                         setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO, 0);
514                         while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR) &
515                                TALITOS_CCCR_CONT) && --timeout)
516                                 cpu_relax();
517                         if (timeout == 0) {
518                                 dev_err(dev, "failed to restart channel %d\n",
519                                         ch);
520                                 reset_dev = 1;
521                         }
522                 }
523         }
524         if (reset_dev || isr & ~TALITOS_ISR_4CHERR || isr_lo) {
525                 dev_err(dev, "done overflow, internal time out, or rngu error: "
526                         "ISR 0x%08x_%08x\n", isr, isr_lo);
527
528                 /* purge request queues */
529                 for (ch = 0; ch < priv->num_channels; ch++)
530                         flush_channel(dev, ch, -EIO, 1);
531
532                 /* reset and reinitialize the device */
533                 init_device(dev);
534         }
535 }
536
537 #define DEF_TALITOS_INTERRUPT(name, ch_done_mask, ch_err_mask, tlet)           \
538 static irqreturn_t talitos_interrupt_##name(int irq, void *data)               \
539 {                                                                              \
540         struct device *dev = data;                                             \
541         struct talitos_private *priv = dev_get_drvdata(dev);                   \
542         u32 isr, isr_lo;                                                       \
543         unsigned long flags;                                                   \
544                                                                                \
545         spin_lock_irqsave(&priv->reg_lock, flags);                             \
546         isr = in_be32(priv->reg + TALITOS_ISR);                                \
547         isr_lo = in_be32(priv->reg + TALITOS_ISR_LO);                          \
548         /* Acknowledge interrupt */                                            \
549         out_be32(priv->reg + TALITOS_ICR, isr & (ch_done_mask | ch_err_mask)); \
550         out_be32(priv->reg + TALITOS_ICR_LO, isr_lo);                          \
551                                                                                \
552         if (unlikely(isr & ch_err_mask || isr_lo)) {                           \
553                 spin_unlock_irqrestore(&priv->reg_lock, flags);                \
554                 talitos_error(dev, isr & ch_err_mask, isr_lo);                 \
555         }                                                                      \
556         else {                                                                 \
557                 if (likely(isr & ch_done_mask)) {                              \
558                         /* mask further done interrupts. */                    \
559                         clrbits32(priv->reg + TALITOS_IMR, ch_done_mask);      \
560                         /* done_task will unmask done interrupts at exit */    \
561                         tasklet_schedule(&priv->done_task[tlet]);              \
562                 }                                                              \
563                 spin_unlock_irqrestore(&priv->reg_lock, flags);                \
564         }                                                                      \
565                                                                                \
566         return (isr & (ch_done_mask | ch_err_mask) || isr_lo) ? IRQ_HANDLED :  \
567                                                                 IRQ_NONE;      \
568 }
569 DEF_TALITOS_INTERRUPT(4ch, TALITOS_ISR_4CHDONE, TALITOS_ISR_4CHERR, 0)
570 DEF_TALITOS_INTERRUPT(ch0_2, TALITOS_ISR_CH_0_2_DONE, TALITOS_ISR_CH_0_2_ERR, 0)
571 DEF_TALITOS_INTERRUPT(ch1_3, TALITOS_ISR_CH_1_3_DONE, TALITOS_ISR_CH_1_3_ERR, 1)
572
573 /*
574  * hwrng
575  */
576 static int talitos_rng_data_present(struct hwrng *rng, int wait)
577 {
578         struct device *dev = (struct device *)rng->priv;
579         struct talitos_private *priv = dev_get_drvdata(dev);
580         u32 ofl;
581         int i;
582
583         for (i = 0; i < 20; i++) {
584                 ofl = in_be32(priv->reg + TALITOS_RNGUSR_LO) &
585                       TALITOS_RNGUSR_LO_OFL;
586                 if (ofl || !wait)
587                         break;
588                 udelay(10);
589         }
590
591         return !!ofl;
592 }
593
594 static int talitos_rng_data_read(struct hwrng *rng, u32 *data)
595 {
596         struct device *dev = (struct device *)rng->priv;
597         struct talitos_private *priv = dev_get_drvdata(dev);
598
599         /* rng fifo requires 64-bit accesses */
600         *data = in_be32(priv->reg + TALITOS_RNGU_FIFO);
601         *data = in_be32(priv->reg + TALITOS_RNGU_FIFO_LO);
602
603         return sizeof(u32);
604 }
605
606 static int talitos_rng_init(struct hwrng *rng)
607 {
608         struct device *dev = (struct device *)rng->priv;
609         struct talitos_private *priv = dev_get_drvdata(dev);
610         unsigned int timeout = TALITOS_TIMEOUT;
611
612         setbits32(priv->reg + TALITOS_RNGURCR_LO, TALITOS_RNGURCR_LO_SR);
613         while (!(in_be32(priv->reg + TALITOS_RNGUSR_LO) & TALITOS_RNGUSR_LO_RD)
614                && --timeout)
615                 cpu_relax();
616         if (timeout == 0) {
617                 dev_err(dev, "failed to reset rng hw\n");
618                 return -ENODEV;
619         }
620
621         /* start generating */
622         setbits32(priv->reg + TALITOS_RNGUDSR_LO, 0);
623
624         return 0;
625 }
626
627 static int talitos_register_rng(struct device *dev)
628 {
629         struct talitos_private *priv = dev_get_drvdata(dev);
630
631         priv->rng.name          = dev_driver_string(dev),
632         priv->rng.init          = talitos_rng_init,
633         priv->rng.data_present  = talitos_rng_data_present,
634         priv->rng.data_read     = talitos_rng_data_read,
635         priv->rng.priv          = (unsigned long)dev;
636
637         return hwrng_register(&priv->rng);
638 }
639
640 static void talitos_unregister_rng(struct device *dev)
641 {
642         struct talitos_private *priv = dev_get_drvdata(dev);
643
644         hwrng_unregister(&priv->rng);
645 }
646
647 /*
648  * crypto alg
649  */
650 #define TALITOS_CRA_PRIORITY            3000
651 #define TALITOS_MAX_KEY_SIZE            96
652 #define TALITOS_MAX_IV_LENGTH           16 /* max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
653
654 struct talitos_ctx {
655         struct device *dev;
656         int ch;
657         __be32 desc_hdr_template;
658         u8 key[TALITOS_MAX_KEY_SIZE];
659         u8 iv[TALITOS_MAX_IV_LENGTH];
660         unsigned int keylen;
661         unsigned int enckeylen;
662         unsigned int authkeylen;
663         unsigned int authsize;
664 };
665
666 #define HASH_MAX_BLOCK_SIZE             SHA512_BLOCK_SIZE
667 #define TALITOS_MDEU_MAX_CONTEXT_SIZE   TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512
668
669 struct talitos_ahash_req_ctx {
670         u32 hw_context[TALITOS_MDEU_MAX_CONTEXT_SIZE / sizeof(u32)];
671         unsigned int hw_context_size;
672         u8 buf[HASH_MAX_BLOCK_SIZE];
673         u8 bufnext[HASH_MAX_BLOCK_SIZE];
674         unsigned int swinit;
675         unsigned int first;
676         unsigned int last;
677         unsigned int to_hash_later;
678         u64 nbuf;
679         struct scatterlist bufsl[2];
680         struct scatterlist *psrc;
681 };
682
683 static int aead_setauthsize(struct crypto_aead *authenc,
684                             unsigned int authsize)
685 {
686         struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
687
688         ctx->authsize = authsize;
689
690         return 0;
691 }
692
693 static int aead_setkey(struct crypto_aead *authenc,
694                        const u8 *key, unsigned int keylen)
695 {
696         struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
697         struct crypto_authenc_keys keys;
698
699         if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
700                 goto badkey;
701
702         if (keys.authkeylen + keys.enckeylen > TALITOS_MAX_KEY_SIZE)
703                 goto badkey;
704
705         memcpy(ctx->key, keys.authkey, keys.authkeylen);
706         memcpy(&ctx->key[keys.authkeylen], keys.enckey, keys.enckeylen);
707
708         ctx->keylen = keys.authkeylen + keys.enckeylen;
709         ctx->enckeylen = keys.enckeylen;
710         ctx->authkeylen = keys.authkeylen;
711
712         return 0;
713
714 badkey:
715         crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN);
716         return -EINVAL;
717 }
718
719 /*
720  * talitos_edesc - s/w-extended descriptor
721  * @assoc_nents: number of segments in associated data scatterlist
722  * @src_nents: number of segments in input scatterlist
723  * @dst_nents: number of segments in output scatterlist
724  * @assoc_chained: whether assoc is chained or not
725  * @src_chained: whether src is chained or not
726  * @dst_chained: whether dst is chained or not
727  * @iv_dma: dma address of iv for checking continuity and link table
728  * @dma_len: length of dma mapped link_tbl space
729  * @dma_link_tbl: bus physical address of link_tbl
730  * @desc: h/w descriptor
731  * @link_tbl: input and output h/w link tables (if {src,dst}_nents > 1)
732  *
733  * if decrypting (with authcheck), or either one of src_nents or dst_nents
734  * is greater than 1, an integrity check value is concatenated to the end
735  * of link_tbl data
736  */
737 struct talitos_edesc {
738         int assoc_nents;
739         int src_nents;
740         int dst_nents;
741         bool assoc_chained;
742         bool src_chained;
743         bool dst_chained;
744         dma_addr_t iv_dma;
745         int dma_len;
746         dma_addr_t dma_link_tbl;
747         struct talitos_desc desc;
748         struct talitos_ptr link_tbl[0];
749 };
750
751 static int talitos_map_sg(struct device *dev, struct scatterlist *sg,
752                           unsigned int nents, enum dma_data_direction dir,
753                           bool chained)
754 {
755         if (unlikely(chained))
756                 while (sg) {
757                         dma_map_sg(dev, sg, 1, dir);
758                         sg = sg_next(sg);
759                 }
760         else
761                 dma_map_sg(dev, sg, nents, dir);
762         return nents;
763 }
764
765 static void talitos_unmap_sg_chain(struct device *dev, struct scatterlist *sg,
766                                    enum dma_data_direction dir)
767 {
768         while (sg) {
769                 dma_unmap_sg(dev, sg, 1, dir);
770                 sg = sg_next(sg);
771         }
772 }
773
774 static void talitos_sg_unmap(struct device *dev,
775                              struct talitos_edesc *edesc,
776                              struct scatterlist *src,
777                              struct scatterlist *dst)
778 {
779         unsigned int src_nents = edesc->src_nents ? : 1;
780         unsigned int dst_nents = edesc->dst_nents ? : 1;
781
782         if (src != dst) {
783                 if (edesc->src_chained)
784                         talitos_unmap_sg_chain(dev, src, DMA_TO_DEVICE);
785                 else
786                         dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
787
788                 if (dst) {
789                         if (edesc->dst_chained)
790                                 talitos_unmap_sg_chain(dev, dst,
791                                                        DMA_FROM_DEVICE);
792                         else
793                                 dma_unmap_sg(dev, dst, dst_nents,
794                                              DMA_FROM_DEVICE);
795                 }
796         } else
797                 if (edesc->src_chained)
798                         talitos_unmap_sg_chain(dev, src, DMA_BIDIRECTIONAL);
799                 else
800                         dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
801 }
802
803 static void ipsec_esp_unmap(struct device *dev,
804                             struct talitos_edesc *edesc,
805                             struct aead_request *areq)
806 {
807         unmap_single_talitos_ptr(dev, &edesc->desc.ptr[6], DMA_FROM_DEVICE);
808         unmap_single_talitos_ptr(dev, &edesc->desc.ptr[3], DMA_TO_DEVICE);
809         unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2], DMA_TO_DEVICE);
810         unmap_single_talitos_ptr(dev, &edesc->desc.ptr[0], DMA_TO_DEVICE);
811
812         if (edesc->assoc_chained)
813                 talitos_unmap_sg_chain(dev, areq->assoc, DMA_TO_DEVICE);
814         else if (areq->assoclen)
815                 /* assoc_nents counts also for IV in non-contiguous cases */
816                 dma_unmap_sg(dev, areq->assoc,
817                              edesc->assoc_nents ? edesc->assoc_nents - 1 : 1,
818                              DMA_TO_DEVICE);
819
820         talitos_sg_unmap(dev, edesc, areq->src, areq->dst);
821
822         if (edesc->dma_len)
823                 dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
824                                  DMA_BIDIRECTIONAL);
825 }
826
827 /*
828  * ipsec_esp descriptor callbacks
829  */
830 static void ipsec_esp_encrypt_done(struct device *dev,
831                                    struct talitos_desc *desc, void *context,
832                                    int err)
833 {
834         struct aead_request *areq = context;
835         struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
836         struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
837         struct talitos_edesc *edesc;
838         struct scatterlist *sg;
839         void *icvdata;
840
841         edesc = container_of(desc, struct talitos_edesc, desc);
842
843         ipsec_esp_unmap(dev, edesc, areq);
844
845         /* copy the generated ICV to dst */
846         if (edesc->dst_nents) {
847                 icvdata = &edesc->link_tbl[edesc->src_nents +
848                                            edesc->dst_nents + 2 +
849                                            edesc->assoc_nents];
850                 sg = sg_last(areq->dst, edesc->dst_nents);
851                 memcpy((char *)sg_virt(sg) + sg->length - ctx->authsize,
852                        icvdata, ctx->authsize);
853         }
854
855         kfree(edesc);
856
857         aead_request_complete(areq, err);
858 }
859
860 static void ipsec_esp_decrypt_swauth_done(struct device *dev,
861                                           struct talitos_desc *desc,
862                                           void *context, int err)
863 {
864         struct aead_request *req = context;
865         struct crypto_aead *authenc = crypto_aead_reqtfm(req);
866         struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
867         struct talitos_edesc *edesc;
868         struct scatterlist *sg;
869         void *icvdata;
870
871         edesc = container_of(desc, struct talitos_edesc, desc);
872
873         ipsec_esp_unmap(dev, edesc, req);
874
875         if (!err) {
876                 /* auth check */
877                 if (edesc->dma_len)
878                         icvdata = &edesc->link_tbl[edesc->src_nents +
879                                                    edesc->dst_nents + 2 +
880                                                    edesc->assoc_nents];
881                 else
882                         icvdata = &edesc->link_tbl[0];
883
884                 sg = sg_last(req->dst, edesc->dst_nents ? : 1);
885                 err = memcmp(icvdata, (char *)sg_virt(sg) + sg->length -
886                              ctx->authsize, ctx->authsize) ? -EBADMSG : 0;
887         }
888
889         kfree(edesc);
890
891         aead_request_complete(req, err);
892 }
893
894 static void ipsec_esp_decrypt_hwauth_done(struct device *dev,
895                                           struct talitos_desc *desc,
896                                           void *context, int err)
897 {
898         struct aead_request *req = context;
899         struct talitos_edesc *edesc;
900
901         edesc = container_of(desc, struct talitos_edesc, desc);
902
903         ipsec_esp_unmap(dev, edesc, req);
904
905         /* check ICV auth status */
906         if (!err && ((desc->hdr_lo & DESC_HDR_LO_ICCR1_MASK) !=
907                      DESC_HDR_LO_ICCR1_PASS))
908                 err = -EBADMSG;
909
910         kfree(edesc);
911
912         aead_request_complete(req, err);
913 }
914
915 /*
916  * convert scatterlist to SEC h/w link table format
917  * stop at cryptlen bytes
918  */
919 static int sg_to_link_tbl(struct scatterlist *sg, int sg_count,
920                            int cryptlen, struct talitos_ptr *link_tbl_ptr)
921 {
922         int n_sg = sg_count;
923
924         while (n_sg--) {
925                 to_talitos_ptr(link_tbl_ptr, sg_dma_address(sg));
926                 link_tbl_ptr->len = cpu_to_be16(sg_dma_len(sg));
927                 link_tbl_ptr->j_extent = 0;
928                 link_tbl_ptr++;
929                 cryptlen -= sg_dma_len(sg);
930                 sg = sg_next(sg);
931         }
932
933         /* adjust (decrease) last one (or two) entry's len to cryptlen */
934         link_tbl_ptr--;
935         while (be16_to_cpu(link_tbl_ptr->len) <= (-cryptlen)) {
936                 /* Empty this entry, and move to previous one */
937                 cryptlen += be16_to_cpu(link_tbl_ptr->len);
938                 link_tbl_ptr->len = 0;
939                 sg_count--;
940                 link_tbl_ptr--;
941         }
942         be16_add_cpu(&link_tbl_ptr->len, cryptlen);
943
944         /* tag end of link table */
945         link_tbl_ptr->j_extent = DESC_PTR_LNKTBL_RETURN;
946
947         return sg_count;
948 }
949
950 /*
951  * fill in and submit ipsec_esp descriptor
952  */
953 static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
954                      u64 seq, void (*callback) (struct device *dev,
955                                                 struct talitos_desc *desc,
956                                                 void *context, int error))
957 {
958         struct crypto_aead *aead = crypto_aead_reqtfm(areq);
959         struct talitos_ctx *ctx = crypto_aead_ctx(aead);
960         struct device *dev = ctx->dev;
961         struct talitos_desc *desc = &edesc->desc;
962         unsigned int cryptlen = areq->cryptlen;
963         unsigned int authsize = ctx->authsize;
964         unsigned int ivsize = crypto_aead_ivsize(aead);
965         int sg_count, ret;
966         int sg_link_tbl_len;
967
968         /* hmac key */
969         map_single_talitos_ptr(dev, &desc->ptr[0], ctx->authkeylen, &ctx->key,
970                                DMA_TO_DEVICE);
971
972         /* hmac data */
973         desc->ptr[1].len = cpu_to_be16(areq->assoclen + ivsize);
974         if (edesc->assoc_nents) {
975                 int tbl_off = edesc->src_nents + edesc->dst_nents + 2;
976                 struct talitos_ptr *tbl_ptr = &edesc->link_tbl[tbl_off];
977
978                 to_talitos_ptr(&desc->ptr[1], edesc->dma_link_tbl + tbl_off *
979                                sizeof(struct talitos_ptr));
980                 desc->ptr[1].j_extent = DESC_PTR_LNKTBL_JUMP;
981
982                 /* assoc_nents - 1 entries for assoc, 1 for IV */
983                 sg_count = sg_to_link_tbl(areq->assoc, edesc->assoc_nents - 1,
984                                           areq->assoclen, tbl_ptr);
985
986                 /* add IV to link table */
987                 tbl_ptr += sg_count - 1;
988                 tbl_ptr->j_extent = 0;
989                 tbl_ptr++;
990                 to_talitos_ptr(tbl_ptr, edesc->iv_dma);
991                 tbl_ptr->len = cpu_to_be16(ivsize);
992                 tbl_ptr->j_extent = DESC_PTR_LNKTBL_RETURN;
993
994                 dma_sync_single_for_device(dev, edesc->dma_link_tbl,
995                                            edesc->dma_len, DMA_BIDIRECTIONAL);
996         } else {
997                 if (areq->assoclen)
998                         to_talitos_ptr(&desc->ptr[1],
999                                        sg_dma_address(areq->assoc));
1000                 else
1001                         to_talitos_ptr(&desc->ptr[1], edesc->iv_dma);
1002                 desc->ptr[1].j_extent = 0;
1003         }
1004
1005         /* cipher iv */
1006         to_talitos_ptr(&desc->ptr[2], edesc->iv_dma);
1007         desc->ptr[2].len = cpu_to_be16(ivsize);
1008         desc->ptr[2].j_extent = 0;
1009         /* Sync needed for the aead_givencrypt case */
1010         dma_sync_single_for_device(dev, edesc->iv_dma, ivsize, DMA_TO_DEVICE);
1011
1012         /* cipher key */
1013         map_single_talitos_ptr(dev, &desc->ptr[3], ctx->enckeylen,
1014                                (char *)&ctx->key + ctx->authkeylen,
1015                                DMA_TO_DEVICE);
1016
1017         /*
1018          * cipher in
1019          * map and adjust cipher len to aead request cryptlen.
1020          * extent is bytes of HMAC postpended to ciphertext,
1021          * typically 12 for ipsec
1022          */
1023         desc->ptr[4].len = cpu_to_be16(cryptlen);
1024         desc->ptr[4].j_extent = authsize;
1025
1026         sg_count = talitos_map_sg(dev, areq->src, edesc->src_nents ? : 1,
1027                                   (areq->src == areq->dst) ? DMA_BIDIRECTIONAL
1028                                                            : DMA_TO_DEVICE,
1029                                   edesc->src_chained);
1030
1031         if (sg_count == 1) {
1032                 to_talitos_ptr(&desc->ptr[4], sg_dma_address(areq->src));
1033         } else {
1034                 sg_link_tbl_len = cryptlen;
1035
1036                 if (edesc->desc.hdr & DESC_HDR_MODE1_MDEU_CICV)
1037                         sg_link_tbl_len = cryptlen + authsize;
1038
1039                 sg_count = sg_to_link_tbl(areq->src, sg_count, sg_link_tbl_len,
1040                                           &edesc->link_tbl[0]);
1041                 if (sg_count > 1) {
1042                         desc->ptr[4].j_extent |= DESC_PTR_LNKTBL_JUMP;
1043                         to_talitos_ptr(&desc->ptr[4], edesc->dma_link_tbl);
1044                         dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1045                                                    edesc->dma_len,
1046                                                    DMA_BIDIRECTIONAL);
1047                 } else {
1048                         /* Only one segment now, so no link tbl needed */
1049                         to_talitos_ptr(&desc->ptr[4],
1050                                        sg_dma_address(areq->src));
1051                 }
1052         }
1053
1054         /* cipher out */
1055         desc->ptr[5].len = cpu_to_be16(cryptlen);
1056         desc->ptr[5].j_extent = authsize;
1057
1058         if (areq->src != areq->dst)
1059                 sg_count = talitos_map_sg(dev, areq->dst,
1060                                           edesc->dst_nents ? : 1,
1061                                           DMA_FROM_DEVICE, edesc->dst_chained);
1062
1063         if (sg_count == 1) {
1064                 to_talitos_ptr(&desc->ptr[5], sg_dma_address(areq->dst));
1065         } else {
1066                 int tbl_off = edesc->src_nents + 1;
1067                 struct talitos_ptr *tbl_ptr = &edesc->link_tbl[tbl_off];
1068
1069                 to_talitos_ptr(&desc->ptr[5], edesc->dma_link_tbl +
1070                                tbl_off * sizeof(struct talitos_ptr));
1071                 sg_count = sg_to_link_tbl(areq->dst, sg_count, cryptlen,
1072                                           tbl_ptr);
1073
1074                 /* Add an entry to the link table for ICV data */
1075                 tbl_ptr += sg_count - 1;
1076                 tbl_ptr->j_extent = 0;
1077                 tbl_ptr++;
1078                 tbl_ptr->j_extent = DESC_PTR_LNKTBL_RETURN;
1079                 tbl_ptr->len = cpu_to_be16(authsize);
1080
1081                 /* icv data follows link tables */
1082                 to_talitos_ptr(tbl_ptr, edesc->dma_link_tbl +
1083                                (tbl_off + edesc->dst_nents + 1 +
1084                                 edesc->assoc_nents) *
1085                                sizeof(struct talitos_ptr));
1086                 desc->ptr[5].j_extent |= DESC_PTR_LNKTBL_JUMP;
1087                 dma_sync_single_for_device(ctx->dev, edesc->dma_link_tbl,
1088                                            edesc->dma_len, DMA_BIDIRECTIONAL);
1089         }
1090
1091         /* iv out */
1092         map_single_talitos_ptr(dev, &desc->ptr[6], ivsize, ctx->iv,
1093                                DMA_FROM_DEVICE);
1094
1095         ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
1096         if (ret != -EINPROGRESS) {
1097                 ipsec_esp_unmap(dev, edesc, areq);
1098                 kfree(edesc);
1099         }
1100         return ret;
1101 }
1102
1103 /*
1104  * derive number of elements in scatterlist
1105  */
1106 static int sg_count(struct scatterlist *sg_list, int nbytes, bool *chained)
1107 {
1108         struct scatterlist *sg = sg_list;
1109         int sg_nents = 0;
1110
1111         *chained = false;
1112         while (nbytes > 0) {
1113                 sg_nents++;
1114                 nbytes -= sg->length;
1115                 if (!sg_is_last(sg) && (sg + 1)->length == 0)
1116                         *chained = true;
1117                 sg = sg_next(sg);
1118         }
1119
1120         return sg_nents;
1121 }
1122
1123 /*
1124  * allocate and map the extended descriptor
1125  */
1126 static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
1127                                                  struct scatterlist *assoc,
1128                                                  struct scatterlist *src,
1129                                                  struct scatterlist *dst,
1130                                                  u8 *iv,
1131                                                  unsigned int assoclen,
1132                                                  unsigned int cryptlen,
1133                                                  unsigned int authsize,
1134                                                  unsigned int ivsize,
1135                                                  int icv_stashing,
1136                                                  u32 cryptoflags,
1137                                                  bool encrypt)
1138 {
1139         struct talitos_edesc *edesc;
1140         int assoc_nents = 0, src_nents, dst_nents, alloc_len, dma_len;
1141         bool assoc_chained = false, src_chained = false, dst_chained = false;
1142         dma_addr_t iv_dma = 0;
1143         gfp_t flags = cryptoflags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
1144                       GFP_ATOMIC;
1145
1146         if (cryptlen + authsize > TALITOS_MAX_DATA_LEN) {
1147                 dev_err(dev, "length exceeds h/w max limit\n");
1148                 return ERR_PTR(-EINVAL);
1149         }
1150
1151         if (ivsize)
1152                 iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE);
1153
1154         if (assoclen) {
1155                 /*
1156                  * Currently it is assumed that iv is provided whenever assoc
1157                  * is.
1158                  */
1159                 BUG_ON(!iv);
1160
1161                 assoc_nents = sg_count(assoc, assoclen, &assoc_chained);
1162                 talitos_map_sg(dev, assoc, assoc_nents, DMA_TO_DEVICE,
1163                                assoc_chained);
1164                 assoc_nents = (assoc_nents == 1) ? 0 : assoc_nents;
1165
1166                 if (assoc_nents || sg_dma_address(assoc) + assoclen != iv_dma)
1167                         assoc_nents = assoc_nents ? assoc_nents + 1 : 2;
1168         }
1169
1170         if (!dst || dst == src) {
1171                 src_nents = sg_count(src, cryptlen + authsize, &src_chained);
1172                 src_nents = (src_nents == 1) ? 0 : src_nents;
1173                 dst_nents = dst ? src_nents : 0;
1174         } else { /* dst && dst != src*/
1175                 src_nents = sg_count(src, cryptlen + (encrypt ? 0 : authsize),
1176                                      &src_chained);
1177                 src_nents = (src_nents == 1) ? 0 : src_nents;
1178                 dst_nents = sg_count(dst, cryptlen + (encrypt ? authsize : 0),
1179                                      &dst_chained);
1180                 dst_nents = (dst_nents == 1) ? 0 : dst_nents;
1181         }
1182
1183         /*
1184          * allocate space for base edesc plus the link tables,
1185          * allowing for two separate entries for ICV and generated ICV (+ 2),
1186          * and the ICV data itself
1187          */
1188         alloc_len = sizeof(struct talitos_edesc);
1189         if (assoc_nents || src_nents || dst_nents) {
1190                 dma_len = (src_nents + dst_nents + 2 + assoc_nents) *
1191                           sizeof(struct talitos_ptr) + authsize;
1192                 alloc_len += dma_len;
1193         } else {
1194                 dma_len = 0;
1195                 alloc_len += icv_stashing ? authsize : 0;
1196         }
1197
1198         edesc = kmalloc(alloc_len, GFP_DMA | flags);
1199         if (!edesc) {
1200                 if (assoc_chained)
1201                         talitos_unmap_sg_chain(dev, assoc, DMA_TO_DEVICE);
1202                 else if (assoclen)
1203                         dma_unmap_sg(dev, assoc,
1204                                      assoc_nents ? assoc_nents - 1 : 1,
1205                                      DMA_TO_DEVICE);
1206
1207                 if (iv_dma)
1208                         dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE);
1209
1210                 dev_err(dev, "could not allocate edescriptor\n");
1211                 return ERR_PTR(-ENOMEM);
1212         }
1213
1214         edesc->assoc_nents = assoc_nents;
1215         edesc->src_nents = src_nents;
1216         edesc->dst_nents = dst_nents;
1217         edesc->assoc_chained = assoc_chained;
1218         edesc->src_chained = src_chained;
1219         edesc->dst_chained = dst_chained;
1220         edesc->iv_dma = iv_dma;
1221         edesc->dma_len = dma_len;
1222         if (dma_len)
1223                 edesc->dma_link_tbl = dma_map_single(dev, &edesc->link_tbl[0],
1224                                                      edesc->dma_len,
1225                                                      DMA_BIDIRECTIONAL);
1226
1227         return edesc;
1228 }
1229
1230 static struct talitos_edesc *aead_edesc_alloc(struct aead_request *areq, u8 *iv,
1231                                               int icv_stashing, bool encrypt)
1232 {
1233         struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
1234         struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1235         unsigned int ivsize = crypto_aead_ivsize(authenc);
1236
1237         return talitos_edesc_alloc(ctx->dev, areq->assoc, areq->src, areq->dst,
1238                                    iv, areq->assoclen, areq->cryptlen,
1239                                    ctx->authsize, ivsize, icv_stashing,
1240                                    areq->base.flags, encrypt);
1241 }
1242
1243 static int aead_encrypt(struct aead_request *req)
1244 {
1245         struct crypto_aead *authenc = crypto_aead_reqtfm(req);
1246         struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1247         struct talitos_edesc *edesc;
1248
1249         /* allocate extended descriptor */
1250         edesc = aead_edesc_alloc(req, req->iv, 0, true);
1251         if (IS_ERR(edesc))
1252                 return PTR_ERR(edesc);
1253
1254         /* set encrypt */
1255         edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;
1256
1257         return ipsec_esp(edesc, req, 0, ipsec_esp_encrypt_done);
1258 }
1259
1260 static int aead_decrypt(struct aead_request *req)
1261 {
1262         struct crypto_aead *authenc = crypto_aead_reqtfm(req);
1263         struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1264         unsigned int authsize = ctx->authsize;
1265         struct talitos_private *priv = dev_get_drvdata(ctx->dev);
1266         struct talitos_edesc *edesc;
1267         struct scatterlist *sg;
1268         void *icvdata;
1269
1270         req->cryptlen -= authsize;
1271
1272         /* allocate extended descriptor */
1273         edesc = aead_edesc_alloc(req, req->iv, 1, false);
1274         if (IS_ERR(edesc))
1275                 return PTR_ERR(edesc);
1276
1277         if ((priv->features & TALITOS_FTR_HW_AUTH_CHECK) &&
1278             ((!edesc->src_nents && !edesc->dst_nents) ||
1279              priv->features & TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT)) {
1280
1281                 /* decrypt and check the ICV */
1282                 edesc->desc.hdr = ctx->desc_hdr_template |
1283                                   DESC_HDR_DIR_INBOUND |
1284                                   DESC_HDR_MODE1_MDEU_CICV;
1285
1286                 /* reset integrity check result bits */
1287                 edesc->desc.hdr_lo = 0;
1288
1289                 return ipsec_esp(edesc, req, 0, ipsec_esp_decrypt_hwauth_done);
1290         }
1291
1292         /* Have to check the ICV with software */
1293         edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND;
1294
1295         /* stash incoming ICV for later cmp with ICV generated by the h/w */
1296         if (edesc->dma_len)
1297                 icvdata = &edesc->link_tbl[edesc->src_nents +
1298                                            edesc->dst_nents + 2 +
1299                                            edesc->assoc_nents];
1300         else
1301                 icvdata = &edesc->link_tbl[0];
1302
1303         sg = sg_last(req->src, edesc->src_nents ? : 1);
1304
1305         memcpy(icvdata, (char *)sg_virt(sg) + sg->length - ctx->authsize,
1306                ctx->authsize);
1307
1308         return ipsec_esp(edesc, req, 0, ipsec_esp_decrypt_swauth_done);
1309 }
1310
1311 static int aead_givencrypt(struct aead_givcrypt_request *req)
1312 {
1313         struct aead_request *areq = &req->areq;
1314         struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
1315         struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1316         struct talitos_edesc *edesc;
1317
1318         /* allocate extended descriptor */
1319         edesc = aead_edesc_alloc(areq, req->giv, 0, true);
1320         if (IS_ERR(edesc))
1321                 return PTR_ERR(edesc);
1322
1323         /* set encrypt */
1324         edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;
1325
1326         memcpy(req->giv, ctx->iv, crypto_aead_ivsize(authenc));
1327         /* avoid consecutive packets going out with same IV */
1328         *(__be64 *)req->giv ^= cpu_to_be64(req->seq);
1329
1330         return ipsec_esp(edesc, areq, req->seq, ipsec_esp_encrypt_done);
1331 }
1332
1333 static int ablkcipher_setkey(struct crypto_ablkcipher *cipher,
1334                              const u8 *key, unsigned int keylen)
1335 {
1336         struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1337
1338         memcpy(&ctx->key, key, keylen);
1339         ctx->keylen = keylen;
1340
1341         return 0;
1342 }
1343
1344 static void unmap_sg_talitos_ptr(struct device *dev, struct scatterlist *src,
1345                                  struct scatterlist *dst, unsigned int len,
1346                                  struct talitos_edesc *edesc)
1347 {
1348         talitos_sg_unmap(dev, edesc, src, dst);
1349 }
1350
1351 static void common_nonsnoop_unmap(struct device *dev,
1352                                   struct talitos_edesc *edesc,
1353                                   struct ablkcipher_request *areq)
1354 {
1355         unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE);
1356
1357         unmap_sg_talitos_ptr(dev, areq->src, areq->dst, areq->nbytes, edesc);
1358         unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2], DMA_TO_DEVICE);
1359         unmap_single_talitos_ptr(dev, &edesc->desc.ptr[1], DMA_TO_DEVICE);
1360
1361         if (edesc->dma_len)
1362                 dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
1363                                  DMA_BIDIRECTIONAL);
1364 }
1365
1366 static void ablkcipher_done(struct device *dev,
1367                             struct talitos_desc *desc, void *context,
1368                             int err)
1369 {
1370         struct ablkcipher_request *areq = context;
1371         struct talitos_edesc *edesc;
1372
1373         edesc = container_of(desc, struct talitos_edesc, desc);
1374
1375         common_nonsnoop_unmap(dev, edesc, areq);
1376
1377         kfree(edesc);
1378
1379         areq->base.complete(&areq->base, err);
1380 }
1381
1382 int map_sg_in_talitos_ptr(struct device *dev, struct scatterlist *src,
1383                           unsigned int len, struct talitos_edesc *edesc,
1384                           enum dma_data_direction dir, struct talitos_ptr *ptr)
1385 {
1386         int sg_count;
1387
1388         to_talitos_ptr_len(ptr, len);
1389         to_talitos_ptr_extent_clear(ptr);
1390
1391         sg_count = talitos_map_sg(dev, src, edesc->src_nents ? : 1, dir,
1392                                   edesc->src_chained);
1393
1394         if (sg_count == 1) {
1395                 to_talitos_ptr(ptr, sg_dma_address(src));
1396         } else {
1397                 sg_count = sg_to_link_tbl(src, sg_count, len,
1398                                           &edesc->link_tbl[0]);
1399                 if (sg_count > 1) {
1400                         to_talitos_ptr(ptr, edesc->dma_link_tbl);
1401                         ptr->j_extent |= DESC_PTR_LNKTBL_JUMP;
1402                         dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1403                                                    edesc->dma_len,
1404                                                    DMA_BIDIRECTIONAL);
1405                 } else {
1406                         /* Only one segment now, so no link tbl needed */
1407                         to_talitos_ptr(ptr, sg_dma_address(src));
1408                 }
1409         }
1410         return sg_count;
1411 }
1412
1413 void map_sg_out_talitos_ptr(struct device *dev, struct scatterlist *dst,
1414                             unsigned int len, struct talitos_edesc *edesc,
1415                             enum dma_data_direction dir,
1416                             struct talitos_ptr *ptr, int sg_count)
1417 {
1418         to_talitos_ptr_len(ptr, len);
1419         to_talitos_ptr_extent_clear(ptr);
1420
1421         if (dir != DMA_NONE)
1422                 sg_count = talitos_map_sg(dev, dst, edesc->dst_nents ? : 1,
1423                                           dir, edesc->dst_chained);
1424
1425         if (sg_count == 1) {
1426                 to_talitos_ptr(ptr, sg_dma_address(dst));
1427         } else {
1428                 struct talitos_ptr *link_tbl_ptr =
1429                         &edesc->link_tbl[edesc->src_nents + 1];
1430
1431                 to_talitos_ptr(ptr, edesc->dma_link_tbl +
1432                                               (edesc->src_nents + 1) *
1433                                               sizeof(struct talitos_ptr));
1434                 ptr->j_extent |= DESC_PTR_LNKTBL_JUMP;
1435                 sg_count = sg_to_link_tbl(dst, sg_count, len, link_tbl_ptr);
1436                 dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1437                                            edesc->dma_len, DMA_BIDIRECTIONAL);
1438         }
1439 }
1440
1441 static int common_nonsnoop(struct talitos_edesc *edesc,
1442                            struct ablkcipher_request *areq,
1443                            void (*callback) (struct device *dev,
1444                                              struct talitos_desc *desc,
1445                                              void *context, int error))
1446 {
1447         struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1448         struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1449         struct device *dev = ctx->dev;
1450         struct talitos_desc *desc = &edesc->desc;
1451         unsigned int cryptlen = areq->nbytes;
1452         unsigned int ivsize = crypto_ablkcipher_ivsize(cipher);
1453         int sg_count, ret;
1454
1455         /* first DWORD empty */
1456         desc->ptr[0] = zero_entry;
1457
1458         /* cipher iv */
1459         to_talitos_ptr(&desc->ptr[1], edesc->iv_dma);
1460         to_talitos_ptr_len(&desc->ptr[1], ivsize);
1461         to_talitos_ptr_extent_clear(&desc->ptr[1]);
1462
1463         /* cipher key */
1464         map_single_talitos_ptr(dev, &desc->ptr[2], ctx->keylen,
1465                                (char *)&ctx->key, DMA_TO_DEVICE);
1466
1467         /*
1468          * cipher in
1469          */
1470         sg_count = map_sg_in_talitos_ptr(dev, areq->src, cryptlen, edesc,
1471                                          (areq->src == areq->dst) ?
1472                                           DMA_BIDIRECTIONAL : DMA_TO_DEVICE,
1473                                           &desc->ptr[3]);
1474
1475         /* cipher out */
1476         map_sg_out_talitos_ptr(dev, areq->dst, cryptlen, edesc,
1477                                (areq->src == areq->dst) ? DMA_NONE
1478                                                         : DMA_FROM_DEVICE,
1479                                &desc->ptr[4], sg_count);
1480
1481         /* iv out */
1482         map_single_talitos_ptr(dev, &desc->ptr[5], ivsize, ctx->iv,
1483                                DMA_FROM_DEVICE);
1484
1485         /* last DWORD empty */
1486         desc->ptr[6] = zero_entry;
1487
1488         ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
1489         if (ret != -EINPROGRESS) {
1490                 common_nonsnoop_unmap(dev, edesc, areq);
1491                 kfree(edesc);
1492         }
1493         return ret;
1494 }
1495
1496 static struct talitos_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request *
1497                                                     areq, bool encrypt)
1498 {
1499         struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1500         struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1501         unsigned int ivsize = crypto_ablkcipher_ivsize(cipher);
1502
1503         return talitos_edesc_alloc(ctx->dev, NULL, areq->src, areq->dst,
1504                                    areq->info, 0, areq->nbytes, 0, ivsize, 0,
1505                                    areq->base.flags, encrypt);
1506 }
1507
1508 static int ablkcipher_encrypt(struct ablkcipher_request *areq)
1509 {
1510         struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1511         struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1512         struct talitos_edesc *edesc;
1513
1514         /* allocate extended descriptor */
1515         edesc = ablkcipher_edesc_alloc(areq, true);
1516         if (IS_ERR(edesc))
1517                 return PTR_ERR(edesc);
1518
1519         /* set encrypt */
1520         edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;
1521
1522         return common_nonsnoop(edesc, areq, ablkcipher_done);
1523 }
1524
1525 static int ablkcipher_decrypt(struct ablkcipher_request *areq)
1526 {
1527         struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1528         struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1529         struct talitos_edesc *edesc;
1530
1531         /* allocate extended descriptor */
1532         edesc = ablkcipher_edesc_alloc(areq, false);
1533         if (IS_ERR(edesc))
1534                 return PTR_ERR(edesc);
1535
1536         edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND;
1537
1538         return common_nonsnoop(edesc, areq, ablkcipher_done);
1539 }
1540
1541 static void common_nonsnoop_hash_unmap(struct device *dev,
1542                                        struct talitos_edesc *edesc,
1543                                        struct ahash_request *areq)
1544 {
1545         struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1546
1547         unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE);
1548
1549         unmap_sg_talitos_ptr(dev, req_ctx->psrc, NULL, 0, edesc);
1550
1551         /* When using hashctx-in, must unmap it. */
1552         if (from_talitos_ptr_len(&edesc->desc.ptr[1]))
1553                 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[1],
1554                                          DMA_TO_DEVICE);
1555
1556         if (from_talitos_ptr_len(&edesc->desc.ptr[2]))
1557                 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2],
1558                                          DMA_TO_DEVICE);
1559
1560         if (edesc->dma_len)
1561                 dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
1562                                  DMA_BIDIRECTIONAL);
1563
1564 }
1565
1566 static void ahash_done(struct device *dev,
1567                        struct talitos_desc *desc, void *context,
1568                        int err)
1569 {
1570         struct ahash_request *areq = context;
1571         struct talitos_edesc *edesc =
1572                  container_of(desc, struct talitos_edesc, desc);
1573         struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1574
1575         if (!req_ctx->last && req_ctx->to_hash_later) {
1576                 /* Position any partial block for next update/final/finup */
1577                 memcpy(req_ctx->buf, req_ctx->bufnext, req_ctx->to_hash_later);
1578                 req_ctx->nbuf = req_ctx->to_hash_later;
1579         }
1580         common_nonsnoop_hash_unmap(dev, edesc, areq);
1581
1582         kfree(edesc);
1583
1584         areq->base.complete(&areq->base, err);
1585 }
1586
1587 static int common_nonsnoop_hash(struct talitos_edesc *edesc,
1588                                 struct ahash_request *areq, unsigned int length,
1589                                 void (*callback) (struct device *dev,
1590                                                   struct talitos_desc *desc,
1591                                                   void *context, int error))
1592 {
1593         struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1594         struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1595         struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1596         struct device *dev = ctx->dev;
1597         struct talitos_desc *desc = &edesc->desc;
1598         int ret;
1599
1600         /* first DWORD empty */
1601         desc->ptr[0] = zero_entry;
1602
1603         /* hash context in */
1604         if (!req_ctx->first || req_ctx->swinit) {
1605                 map_single_talitos_ptr(dev, &desc->ptr[1],
1606                                        req_ctx->hw_context_size,
1607                                        (char *)req_ctx->hw_context,
1608                                        DMA_TO_DEVICE);
1609                 req_ctx->swinit = 0;
1610         } else {
1611                 desc->ptr[1] = zero_entry;
1612                 /* Indicate next op is not the first. */
1613                 req_ctx->first = 0;
1614         }
1615
1616         /* HMAC key */
1617         if (ctx->keylen)
1618                 map_single_talitos_ptr(dev, &desc->ptr[2], ctx->keylen,
1619                                        (char *)&ctx->key, DMA_TO_DEVICE);
1620         else
1621                 desc->ptr[2] = zero_entry;
1622
1623         /*
1624          * data in
1625          */
1626         map_sg_in_talitos_ptr(dev, req_ctx->psrc, length, edesc,
1627                               DMA_TO_DEVICE, &desc->ptr[3]);
1628
1629         /* fifth DWORD empty */
1630         desc->ptr[4] = zero_entry;
1631
1632         /* hash/HMAC out -or- hash context out */
1633         if (req_ctx->last)
1634                 map_single_talitos_ptr(dev, &desc->ptr[5],
1635                                        crypto_ahash_digestsize(tfm),
1636                                        areq->result, DMA_FROM_DEVICE);
1637         else
1638                 map_single_talitos_ptr(dev, &desc->ptr[5],
1639                                        req_ctx->hw_context_size,
1640                                        req_ctx->hw_context, DMA_FROM_DEVICE);
1641
1642         /* last DWORD empty */
1643         desc->ptr[6] = zero_entry;
1644
1645         ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
1646         if (ret != -EINPROGRESS) {
1647                 common_nonsnoop_hash_unmap(dev, edesc, areq);
1648                 kfree(edesc);
1649         }
1650         return ret;
1651 }
1652
1653 static struct talitos_edesc *ahash_edesc_alloc(struct ahash_request *areq,
1654                                                unsigned int nbytes)
1655 {
1656         struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1657         struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1658         struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1659
1660         return talitos_edesc_alloc(ctx->dev, NULL, req_ctx->psrc, NULL, NULL, 0,
1661                                    nbytes, 0, 0, 0, areq->base.flags, false);
1662 }
1663
1664 static int ahash_init(struct ahash_request *areq)
1665 {
1666         struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1667         struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1668
1669         /* Initialize the context */
1670         req_ctx->nbuf = 0;
1671         req_ctx->first = 1; /* first indicates h/w must init its context */
1672         req_ctx->swinit = 0; /* assume h/w init of context */
1673         req_ctx->hw_context_size =
1674                 (crypto_ahash_digestsize(tfm) <= SHA256_DIGEST_SIZE)
1675                         ? TALITOS_MDEU_CONTEXT_SIZE_MD5_SHA1_SHA256
1676                         : TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512;
1677
1678         return 0;
1679 }
1680
1681 /*
1682  * on h/w without explicit sha224 support, we initialize h/w context
1683  * manually with sha224 constants, and tell it to run sha256.
1684  */
1685 static int ahash_init_sha224_swinit(struct ahash_request *areq)
1686 {
1687         struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1688
1689         ahash_init(areq);
1690         req_ctx->swinit = 1;/* prevent h/w initting context with sha256 values*/
1691
1692         req_ctx->hw_context[0] = SHA224_H0;
1693         req_ctx->hw_context[1] = SHA224_H1;
1694         req_ctx->hw_context[2] = SHA224_H2;
1695         req_ctx->hw_context[3] = SHA224_H3;
1696         req_ctx->hw_context[4] = SHA224_H4;
1697         req_ctx->hw_context[5] = SHA224_H5;
1698         req_ctx->hw_context[6] = SHA224_H6;
1699         req_ctx->hw_context[7] = SHA224_H7;
1700
1701         /* init 64-bit count */
1702         req_ctx->hw_context[8] = 0;
1703         req_ctx->hw_context[9] = 0;
1704
1705         return 0;
1706 }
1707
1708 static int ahash_process_req(struct ahash_request *areq, unsigned int nbytes)
1709 {
1710         struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1711         struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1712         struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1713         struct talitos_edesc *edesc;
1714         unsigned int blocksize =
1715                         crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
1716         unsigned int nbytes_to_hash;
1717         unsigned int to_hash_later;
1718         unsigned int nsg;
1719         bool chained;
1720
1721         if (!req_ctx->last && (nbytes + req_ctx->nbuf <= blocksize)) {
1722                 /* Buffer up to one whole block */
1723                 sg_copy_to_buffer(areq->src,
1724                                   sg_count(areq->src, nbytes, &chained),
1725                                   req_ctx->buf + req_ctx->nbuf, nbytes);
1726                 req_ctx->nbuf += nbytes;
1727                 return 0;
1728         }
1729
1730         /* At least (blocksize + 1) bytes are available to hash */
1731         nbytes_to_hash = nbytes + req_ctx->nbuf;
1732         to_hash_later = nbytes_to_hash & (blocksize - 1);
1733
1734         if (req_ctx->last)
1735                 to_hash_later = 0;
1736         else if (to_hash_later)
1737                 /* There is a partial block. Hash the full block(s) now */
1738                 nbytes_to_hash -= to_hash_later;
1739         else {
1740                 /* Keep one block buffered */
1741                 nbytes_to_hash -= blocksize;
1742                 to_hash_later = blocksize;
1743         }
1744
1745         /* Chain in any previously buffered data */
1746         if (req_ctx->nbuf) {
1747                 nsg = (req_ctx->nbuf < nbytes_to_hash) ? 2 : 1;
1748                 sg_init_table(req_ctx->bufsl, nsg);
1749                 sg_set_buf(req_ctx->bufsl, req_ctx->buf, req_ctx->nbuf);
1750                 if (nsg > 1)
1751                         scatterwalk_sg_chain(req_ctx->bufsl, 2, areq->src);
1752                 req_ctx->psrc = req_ctx->bufsl;
1753         } else
1754                 req_ctx->psrc = areq->src;
1755
1756         if (to_hash_later) {
1757                 int nents = sg_count(areq->src, nbytes, &chained);
1758                 sg_pcopy_to_buffer(areq->src, nents,
1759                                       req_ctx->bufnext,
1760                                       to_hash_later,
1761                                       nbytes - to_hash_later);
1762         }
1763         req_ctx->to_hash_later = to_hash_later;
1764
1765         /* Allocate extended descriptor */
1766         edesc = ahash_edesc_alloc(areq, nbytes_to_hash);
1767         if (IS_ERR(edesc))
1768                 return PTR_ERR(edesc);
1769
1770         edesc->desc.hdr = ctx->desc_hdr_template;
1771
1772         /* On last one, request SEC to pad; otherwise continue */
1773         if (req_ctx->last)
1774                 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_PAD;
1775         else
1776                 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_CONT;
1777
1778         /* request SEC to INIT hash. */
1779         if (req_ctx->first && !req_ctx->swinit)
1780                 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_INIT;
1781
1782         /* When the tfm context has a keylen, it's an HMAC.
1783          * A first or last (ie. not middle) descriptor must request HMAC.
1784          */
1785         if (ctx->keylen && (req_ctx->first || req_ctx->last))
1786                 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_HMAC;
1787
1788         return common_nonsnoop_hash(edesc, areq, nbytes_to_hash,
1789                                     ahash_done);
1790 }
1791
1792 static int ahash_update(struct ahash_request *areq)
1793 {
1794         struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1795
1796         req_ctx->last = 0;
1797
1798         return ahash_process_req(areq, areq->nbytes);
1799 }
1800
1801 static int ahash_final(struct ahash_request *areq)
1802 {
1803         struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1804
1805         req_ctx->last = 1;
1806
1807         return ahash_process_req(areq, 0);
1808 }
1809
1810 static int ahash_finup(struct ahash_request *areq)
1811 {
1812         struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1813
1814         req_ctx->last = 1;
1815
1816         return ahash_process_req(areq, areq->nbytes);
1817 }
1818
1819 static int ahash_digest(struct ahash_request *areq)
1820 {
1821         struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1822         struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
1823
1824         ahash->init(areq);
1825         req_ctx->last = 1;
1826
1827         return ahash_process_req(areq, areq->nbytes);
1828 }
1829
1830 struct keyhash_result {
1831         struct completion completion;
1832         int err;
1833 };
1834
1835 static void keyhash_complete(struct crypto_async_request *req, int err)
1836 {
1837         struct keyhash_result *res = req->data;
1838
1839         if (err == -EINPROGRESS)
1840                 return;
1841
1842         res->err = err;
1843         complete(&res->completion);
1844 }
1845
1846 static int keyhash(struct crypto_ahash *tfm, const u8 *key, unsigned int keylen,
1847                    u8 *hash)
1848 {
1849         struct talitos_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
1850
1851         struct scatterlist sg[1];
1852         struct ahash_request *req;
1853         struct keyhash_result hresult;
1854         int ret;
1855
1856         init_completion(&hresult.completion);
1857
1858         req = ahash_request_alloc(tfm, GFP_KERNEL);
1859         if (!req)
1860                 return -ENOMEM;
1861
1862         /* Keep tfm keylen == 0 during hash of the long key */
1863         ctx->keylen = 0;
1864         ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
1865                                    keyhash_complete, &hresult);
1866
1867         sg_init_one(&sg[0], key, keylen);
1868
1869         ahash_request_set_crypt(req, sg, hash, keylen);
1870         ret = crypto_ahash_digest(req);
1871         switch (ret) {
1872         case 0:
1873                 break;
1874         case -EINPROGRESS:
1875         case -EBUSY:
1876                 ret = wait_for_completion_interruptible(
1877                         &hresult.completion);
1878                 if (!ret)
1879                         ret = hresult.err;
1880                 break;
1881         default:
1882                 break;
1883         }
1884         ahash_request_free(req);
1885
1886         return ret;
1887 }
1888
1889 static int ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
1890                         unsigned int keylen)
1891 {
1892         struct talitos_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
1893         unsigned int blocksize =
1894                         crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
1895         unsigned int digestsize = crypto_ahash_digestsize(tfm);
1896         unsigned int keysize = keylen;
1897         u8 hash[SHA512_DIGEST_SIZE];
1898         int ret;
1899
1900         if (keylen <= blocksize)
1901                 memcpy(ctx->key, key, keysize);
1902         else {
1903                 /* Must get the hash of the long key */
1904                 ret = keyhash(tfm, key, keylen, hash);
1905
1906                 if (ret) {
1907                         crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
1908                         return -EINVAL;
1909                 }
1910
1911                 keysize = digestsize;
1912                 memcpy(ctx->key, hash, digestsize);
1913         }
1914
1915         ctx->keylen = keysize;
1916
1917         return 0;
1918 }
1919
1920
1921 struct talitos_alg_template {
1922         u32 type;
1923         union {
1924                 struct crypto_alg crypto;
1925                 struct ahash_alg hash;
1926         } alg;
1927         __be32 desc_hdr_template;
1928 };
1929
1930 static struct talitos_alg_template driver_algs[] = {
1931         /* AEAD algorithms.  These use a single-pass ipsec_esp descriptor */
1932         {       .type = CRYPTO_ALG_TYPE_AEAD,
1933                 .alg.crypto = {
1934                         .cra_name = "authenc(hmac(sha1),cbc(aes))",
1935                         .cra_driver_name = "authenc-hmac-sha1-cbc-aes-talitos",
1936                         .cra_blocksize = AES_BLOCK_SIZE,
1937                         .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
1938                         .cra_aead = {
1939                                 .ivsize = AES_BLOCK_SIZE,
1940                                 .maxauthsize = SHA1_DIGEST_SIZE,
1941                         }
1942                 },
1943                 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
1944                                      DESC_HDR_SEL0_AESU |
1945                                      DESC_HDR_MODE0_AESU_CBC |
1946                                      DESC_HDR_SEL1_MDEUA |
1947                                      DESC_HDR_MODE1_MDEU_INIT |
1948                                      DESC_HDR_MODE1_MDEU_PAD |
1949                                      DESC_HDR_MODE1_MDEU_SHA1_HMAC,
1950         },
1951         {       .type = CRYPTO_ALG_TYPE_AEAD,
1952                 .alg.crypto = {
1953                         .cra_name = "authenc(hmac(sha1),cbc(des3_ede))",
1954                         .cra_driver_name = "authenc-hmac-sha1-cbc-3des-talitos",
1955                         .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1956                         .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
1957                         .cra_aead = {
1958                                 .ivsize = DES3_EDE_BLOCK_SIZE,
1959                                 .maxauthsize = SHA1_DIGEST_SIZE,
1960                         }
1961                 },
1962                 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
1963                                      DESC_HDR_SEL0_DEU |
1964                                      DESC_HDR_MODE0_DEU_CBC |
1965                                      DESC_HDR_MODE0_DEU_3DES |
1966                                      DESC_HDR_SEL1_MDEUA |
1967                                      DESC_HDR_MODE1_MDEU_INIT |
1968                                      DESC_HDR_MODE1_MDEU_PAD |
1969                                      DESC_HDR_MODE1_MDEU_SHA1_HMAC,
1970         },
1971         {       .type = CRYPTO_ALG_TYPE_AEAD,
1972                 .alg.crypto = {
1973                         .cra_name = "authenc(hmac(sha224),cbc(aes))",
1974                         .cra_driver_name = "authenc-hmac-sha224-cbc-aes-talitos",
1975                         .cra_blocksize = AES_BLOCK_SIZE,
1976                         .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
1977                         .cra_aead = {
1978                                 .ivsize = AES_BLOCK_SIZE,
1979                                 .maxauthsize = SHA224_DIGEST_SIZE,
1980                         }
1981                 },
1982                 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
1983                                      DESC_HDR_SEL0_AESU |
1984                                      DESC_HDR_MODE0_AESU_CBC |
1985                                      DESC_HDR_SEL1_MDEUA |
1986                                      DESC_HDR_MODE1_MDEU_INIT |
1987                                      DESC_HDR_MODE1_MDEU_PAD |
1988                                      DESC_HDR_MODE1_MDEU_SHA224_HMAC,
1989         },
1990         {       .type = CRYPTO_ALG_TYPE_AEAD,
1991                 .alg.crypto = {
1992                         .cra_name = "authenc(hmac(sha224),cbc(des3_ede))",
1993                         .cra_driver_name = "authenc-hmac-sha224-cbc-3des-talitos",
1994                         .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1995                         .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
1996                         .cra_aead = {
1997                                 .ivsize = DES3_EDE_BLOCK_SIZE,
1998                                 .maxauthsize = SHA224_DIGEST_SIZE,
1999                         }
2000                 },
2001                 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2002                                      DESC_HDR_SEL0_DEU |
2003                                      DESC_HDR_MODE0_DEU_CBC |
2004                                      DESC_HDR_MODE0_DEU_3DES |
2005                                      DESC_HDR_SEL1_MDEUA |
2006                                      DESC_HDR_MODE1_MDEU_INIT |
2007                                      DESC_HDR_MODE1_MDEU_PAD |
2008                                      DESC_HDR_MODE1_MDEU_SHA224_HMAC,
2009         },
2010         {       .type = CRYPTO_ALG_TYPE_AEAD,
2011                 .alg.crypto = {
2012                         .cra_name = "authenc(hmac(sha256),cbc(aes))",
2013                         .cra_driver_name = "authenc-hmac-sha256-cbc-aes-talitos",
2014                         .cra_blocksize = AES_BLOCK_SIZE,
2015                         .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
2016                         .cra_aead = {
2017                                 .ivsize = AES_BLOCK_SIZE,
2018                                 .maxauthsize = SHA256_DIGEST_SIZE,
2019                         }
2020                 },
2021                 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2022                                      DESC_HDR_SEL0_AESU |
2023                                      DESC_HDR_MODE0_AESU_CBC |
2024                                      DESC_HDR_SEL1_MDEUA |
2025                                      DESC_HDR_MODE1_MDEU_INIT |
2026                                      DESC_HDR_MODE1_MDEU_PAD |
2027                                      DESC_HDR_MODE1_MDEU_SHA256_HMAC,
2028         },
2029         {       .type = CRYPTO_ALG_TYPE_AEAD,
2030                 .alg.crypto = {
2031                         .cra_name = "authenc(hmac(sha256),cbc(des3_ede))",
2032                         .cra_driver_name = "authenc-hmac-sha256-cbc-3des-talitos",
2033                         .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2034                         .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
2035                         .cra_aead = {
2036                                 .ivsize = DES3_EDE_BLOCK_SIZE,
2037                                 .maxauthsize = SHA256_DIGEST_SIZE,
2038                         }
2039                 },
2040                 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2041                                      DESC_HDR_SEL0_DEU |
2042                                      DESC_HDR_MODE0_DEU_CBC |
2043                                      DESC_HDR_MODE0_DEU_3DES |
2044                                      DESC_HDR_SEL1_MDEUA |
2045                                      DESC_HDR_MODE1_MDEU_INIT |
2046                                      DESC_HDR_MODE1_MDEU_PAD |
2047                                      DESC_HDR_MODE1_MDEU_SHA256_HMAC,
2048         },
2049         {       .type = CRYPTO_ALG_TYPE_AEAD,
2050                 .alg.crypto = {
2051                         .cra_name = "authenc(hmac(sha384),cbc(aes))",
2052                         .cra_driver_name = "authenc-hmac-sha384-cbc-aes-talitos",
2053                         .cra_blocksize = AES_BLOCK_SIZE,
2054                         .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
2055                         .cra_aead = {
2056                                 .ivsize = AES_BLOCK_SIZE,
2057                                 .maxauthsize = SHA384_DIGEST_SIZE,
2058                         }
2059                 },
2060                 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2061                                      DESC_HDR_SEL0_AESU |
2062                                      DESC_HDR_MODE0_AESU_CBC |
2063                                      DESC_HDR_SEL1_MDEUB |
2064                                      DESC_HDR_MODE1_MDEU_INIT |
2065                                      DESC_HDR_MODE1_MDEU_PAD |
2066                                      DESC_HDR_MODE1_MDEUB_SHA384_HMAC,
2067         },
2068         {       .type = CRYPTO_ALG_TYPE_AEAD,
2069                 .alg.crypto = {
2070                         .cra_name = "authenc(hmac(sha384),cbc(des3_ede))",
2071                         .cra_driver_name = "authenc-hmac-sha384-cbc-3des-talitos",
2072                         .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2073                         .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
2074                         .cra_aead = {
2075                                 .ivsize = DES3_EDE_BLOCK_SIZE,
2076                                 .maxauthsize = SHA384_DIGEST_SIZE,
2077                         }
2078                 },
2079                 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2080                                      DESC_HDR_SEL0_DEU |
2081                                      DESC_HDR_MODE0_DEU_CBC |
2082                                      DESC_HDR_MODE0_DEU_3DES |
2083                                      DESC_HDR_SEL1_MDEUB |
2084                                      DESC_HDR_MODE1_MDEU_INIT |
2085                                      DESC_HDR_MODE1_MDEU_PAD |
2086                                      DESC_HDR_MODE1_MDEUB_SHA384_HMAC,
2087         },
2088         {       .type = CRYPTO_ALG_TYPE_AEAD,
2089                 .alg.crypto = {
2090                         .cra_name = "authenc(hmac(sha512),cbc(aes))",
2091                         .cra_driver_name = "authenc-hmac-sha512-cbc-aes-talitos",
2092                         .cra_blocksize = AES_BLOCK_SIZE,
2093                         .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
2094                         .cra_aead = {
2095                                 .ivsize = AES_BLOCK_SIZE,
2096                                 .maxauthsize = SHA512_DIGEST_SIZE,
2097                         }
2098                 },
2099                 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2100                                      DESC_HDR_SEL0_AESU |
2101                                      DESC_HDR_MODE0_AESU_CBC |
2102                                      DESC_HDR_SEL1_MDEUB |
2103                                      DESC_HDR_MODE1_MDEU_INIT |
2104                                      DESC_HDR_MODE1_MDEU_PAD |
2105                                      DESC_HDR_MODE1_MDEUB_SHA512_HMAC,
2106         },
2107         {       .type = CRYPTO_ALG_TYPE_AEAD,
2108                 .alg.crypto = {
2109                         .cra_name = "authenc(hmac(sha512),cbc(des3_ede))",
2110                         .cra_driver_name = "authenc-hmac-sha512-cbc-3des-talitos",
2111                         .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2112                         .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
2113                         .cra_aead = {
2114                                 .ivsize = DES3_EDE_BLOCK_SIZE,
2115                                 .maxauthsize = SHA512_DIGEST_SIZE,
2116                         }
2117                 },
2118                 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2119                                      DESC_HDR_SEL0_DEU |
2120                                      DESC_HDR_MODE0_DEU_CBC |
2121                                      DESC_HDR_MODE0_DEU_3DES |
2122                                      DESC_HDR_SEL1_MDEUB |
2123                                      DESC_HDR_MODE1_MDEU_INIT |
2124                                      DESC_HDR_MODE1_MDEU_PAD |
2125                                      DESC_HDR_MODE1_MDEUB_SHA512_HMAC,
2126         },
2127         {       .type = CRYPTO_ALG_TYPE_AEAD,
2128                 .alg.crypto = {
2129                         .cra_name = "authenc(hmac(md5),cbc(aes))",
2130                         .cra_driver_name = "authenc-hmac-md5-cbc-aes-talitos",
2131                         .cra_blocksize = AES_BLOCK_SIZE,
2132                         .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
2133                         .cra_aead = {
2134                                 .ivsize = AES_BLOCK_SIZE,
2135                                 .maxauthsize = MD5_DIGEST_SIZE,
2136                         }
2137                 },
2138                 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2139                                      DESC_HDR_SEL0_AESU |
2140                                      DESC_HDR_MODE0_AESU_CBC |
2141                                      DESC_HDR_SEL1_MDEUA |
2142                                      DESC_HDR_MODE1_MDEU_INIT |
2143                                      DESC_HDR_MODE1_MDEU_PAD |
2144                                      DESC_HDR_MODE1_MDEU_MD5_HMAC,
2145         },
2146         {       .type = CRYPTO_ALG_TYPE_AEAD,
2147                 .alg.crypto = {
2148                         .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
2149                         .cra_driver_name = "authenc-hmac-md5-cbc-3des-talitos",
2150                         .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2151                         .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
2152                         .cra_aead = {
2153                                 .ivsize = DES3_EDE_BLOCK_SIZE,
2154                                 .maxauthsize = MD5_DIGEST_SIZE,
2155                         }
2156                 },
2157                 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2158                                      DESC_HDR_SEL0_DEU |
2159                                      DESC_HDR_MODE0_DEU_CBC |
2160                                      DESC_HDR_MODE0_DEU_3DES |
2161                                      DESC_HDR_SEL1_MDEUA |
2162                                      DESC_HDR_MODE1_MDEU_INIT |
2163                                      DESC_HDR_MODE1_MDEU_PAD |
2164                                      DESC_HDR_MODE1_MDEU_MD5_HMAC,
2165         },
2166         /* ABLKCIPHER algorithms. */
2167         {       .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2168                 .alg.crypto = {
2169                         .cra_name = "cbc(aes)",
2170                         .cra_driver_name = "cbc-aes-talitos",
2171                         .cra_blocksize = AES_BLOCK_SIZE,
2172                         .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2173                                      CRYPTO_ALG_ASYNC,
2174                         .cra_ablkcipher = {
2175                                 .min_keysize = AES_MIN_KEY_SIZE,
2176                                 .max_keysize = AES_MAX_KEY_SIZE,
2177                                 .ivsize = AES_BLOCK_SIZE,
2178                         }
2179                 },
2180                 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2181                                      DESC_HDR_SEL0_AESU |
2182                                      DESC_HDR_MODE0_AESU_CBC,
2183         },
2184         {       .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2185                 .alg.crypto = {
2186                         .cra_name = "cbc(des3_ede)",
2187                         .cra_driver_name = "cbc-3des-talitos",
2188                         .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2189                         .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2190                                      CRYPTO_ALG_ASYNC,
2191                         .cra_ablkcipher = {
2192                                 .min_keysize = DES3_EDE_KEY_SIZE,
2193                                 .max_keysize = DES3_EDE_KEY_SIZE,
2194                                 .ivsize = DES3_EDE_BLOCK_SIZE,
2195                         }
2196                 },
2197                 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2198                                      DESC_HDR_SEL0_DEU |
2199                                      DESC_HDR_MODE0_DEU_CBC |
2200                                      DESC_HDR_MODE0_DEU_3DES,
2201         },
2202         /* AHASH algorithms. */
2203         {       .type = CRYPTO_ALG_TYPE_AHASH,
2204                 .alg.hash = {
2205                         .halg.digestsize = MD5_DIGEST_SIZE,
2206                         .halg.base = {
2207                                 .cra_name = "md5",
2208                                 .cra_driver_name = "md5-talitos",
2209                                 .cra_blocksize = MD5_HMAC_BLOCK_SIZE,
2210                                 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2211                                              CRYPTO_ALG_ASYNC,
2212                         }
2213                 },
2214                 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2215                                      DESC_HDR_SEL0_MDEUA |
2216                                      DESC_HDR_MODE0_MDEU_MD5,
2217         },
2218         {       .type = CRYPTO_ALG_TYPE_AHASH,
2219                 .alg.hash = {
2220                         .halg.digestsize = SHA1_DIGEST_SIZE,
2221                         .halg.base = {
2222                                 .cra_name = "sha1",
2223                                 .cra_driver_name = "sha1-talitos",
2224                                 .cra_blocksize = SHA1_BLOCK_SIZE,
2225                                 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2226                                              CRYPTO_ALG_ASYNC,
2227                         }
2228                 },
2229                 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2230                                      DESC_HDR_SEL0_MDEUA |
2231                                      DESC_HDR_MODE0_MDEU_SHA1,
2232         },
2233         {       .type = CRYPTO_ALG_TYPE_AHASH,
2234                 .alg.hash = {
2235                         .halg.digestsize = SHA224_DIGEST_SIZE,
2236                         .halg.base = {
2237                                 .cra_name = "sha224",
2238                                 .cra_driver_name = "sha224-talitos",
2239                                 .cra_blocksize = SHA224_BLOCK_SIZE,
2240                                 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2241                                              CRYPTO_ALG_ASYNC,
2242                         }
2243                 },
2244                 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2245                                      DESC_HDR_SEL0_MDEUA |
2246                                      DESC_HDR_MODE0_MDEU_SHA224,
2247         },
2248         {       .type = CRYPTO_ALG_TYPE_AHASH,
2249                 .alg.hash = {
2250                         .halg.digestsize = SHA256_DIGEST_SIZE,
2251                         .halg.base = {
2252                                 .cra_name = "sha256",
2253                                 .cra_driver_name = "sha256-talitos",
2254                                 .cra_blocksize = SHA256_BLOCK_SIZE,
2255                                 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2256                                              CRYPTO_ALG_ASYNC,
2257                         }
2258                 },
2259                 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2260                                      DESC_HDR_SEL0_MDEUA |
2261                                      DESC_HDR_MODE0_MDEU_SHA256,
2262         },
2263         {       .type = CRYPTO_ALG_TYPE_AHASH,
2264                 .alg.hash = {
2265                         .halg.digestsize = SHA384_DIGEST_SIZE,
2266                         .halg.base = {
2267                                 .cra_name = "sha384",
2268                                 .cra_driver_name = "sha384-talitos",
2269                                 .cra_blocksize = SHA384_BLOCK_SIZE,
2270                                 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2271                                              CRYPTO_ALG_ASYNC,
2272                         }
2273                 },
2274                 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2275                                      DESC_HDR_SEL0_MDEUB |
2276                                      DESC_HDR_MODE0_MDEUB_SHA384,
2277         },
2278         {       .type = CRYPTO_ALG_TYPE_AHASH,
2279                 .alg.hash = {
2280                         .halg.digestsize = SHA512_DIGEST_SIZE,
2281                         .halg.base = {
2282                                 .cra_name = "sha512",
2283                                 .cra_driver_name = "sha512-talitos",
2284                                 .cra_blocksize = SHA512_BLOCK_SIZE,
2285                                 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2286                                              CRYPTO_ALG_ASYNC,
2287                         }
2288                 },
2289                 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2290                                      DESC_HDR_SEL0_MDEUB |
2291                                      DESC_HDR_MODE0_MDEUB_SHA512,
2292         },
2293         {       .type = CRYPTO_ALG_TYPE_AHASH,
2294                 .alg.hash = {
2295                         .halg.digestsize = MD5_DIGEST_SIZE,
2296                         .halg.base = {
2297                                 .cra_name = "hmac(md5)",
2298                                 .cra_driver_name = "hmac-md5-talitos",
2299                                 .cra_blocksize = MD5_HMAC_BLOCK_SIZE,
2300                                 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2301                                              CRYPTO_ALG_ASYNC,
2302                         }
2303                 },
2304                 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2305                                      DESC_HDR_SEL0_MDEUA |
2306                                      DESC_HDR_MODE0_MDEU_MD5,
2307         },
2308         {       .type = CRYPTO_ALG_TYPE_AHASH,
2309                 .alg.hash = {
2310                         .halg.digestsize = SHA1_DIGEST_SIZE,
2311                         .halg.base = {
2312                                 .cra_name = "hmac(sha1)",
2313                                 .cra_driver_name = "hmac-sha1-talitos",
2314                                 .cra_blocksize = SHA1_BLOCK_SIZE,
2315                                 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2316                                              CRYPTO_ALG_ASYNC,
2317                         }
2318                 },
2319                 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2320                                      DESC_HDR_SEL0_MDEUA |
2321                                      DESC_HDR_MODE0_MDEU_SHA1,
2322         },
2323         {       .type = CRYPTO_ALG_TYPE_AHASH,
2324                 .alg.hash = {
2325                         .halg.digestsize = SHA224_DIGEST_SIZE,
2326                         .halg.base = {
2327                                 .cra_name = "hmac(sha224)",
2328                                 .cra_driver_name = "hmac-sha224-talitos",
2329                                 .cra_blocksize = SHA224_BLOCK_SIZE,
2330                                 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2331                                              CRYPTO_ALG_ASYNC,
2332                         }
2333                 },
2334                 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2335                                      DESC_HDR_SEL0_MDEUA |
2336                                      DESC_HDR_MODE0_MDEU_SHA224,
2337         },
2338         {       .type = CRYPTO_ALG_TYPE_AHASH,
2339                 .alg.hash = {
2340                         .halg.digestsize = SHA256_DIGEST_SIZE,
2341                         .halg.base = {
2342                                 .cra_name = "hmac(sha256)",
2343                                 .cra_driver_name = "hmac-sha256-talitos",
2344                                 .cra_blocksize = SHA256_BLOCK_SIZE,
2345                                 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2346                                              CRYPTO_ALG_ASYNC,
2347                         }
2348                 },
2349                 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2350                                      DESC_HDR_SEL0_MDEUA |
2351                                      DESC_HDR_MODE0_MDEU_SHA256,
2352         },
2353         {       .type = CRYPTO_ALG_TYPE_AHASH,
2354                 .alg.hash = {
2355                         .halg.digestsize = SHA384_DIGEST_SIZE,
2356                         .halg.base = {
2357                                 .cra_name = "hmac(sha384)",
2358                                 .cra_driver_name = "hmac-sha384-talitos",
2359                                 .cra_blocksize = SHA384_BLOCK_SIZE,
2360                                 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2361                                              CRYPTO_ALG_ASYNC,
2362                         }
2363                 },
2364                 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2365                                      DESC_HDR_SEL0_MDEUB |
2366                                      DESC_HDR_MODE0_MDEUB_SHA384,
2367         },
2368         {       .type = CRYPTO_ALG_TYPE_AHASH,
2369                 .alg.hash = {
2370                         .halg.digestsize = SHA512_DIGEST_SIZE,
2371                         .halg.base = {
2372                                 .cra_name = "hmac(sha512)",
2373                                 .cra_driver_name = "hmac-sha512-talitos",
2374                                 .cra_blocksize = SHA512_BLOCK_SIZE,
2375                                 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2376                                              CRYPTO_ALG_ASYNC,
2377                         }
2378                 },
2379                 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2380                                      DESC_HDR_SEL0_MDEUB |
2381                                      DESC_HDR_MODE0_MDEUB_SHA512,
2382         }
2383 };
2384
2385 struct talitos_crypto_alg {
2386         struct list_head entry;
2387         struct device *dev;
2388         struct talitos_alg_template algt;
2389 };
2390
2391 static int talitos_cra_init(struct crypto_tfm *tfm)
2392 {
2393         struct crypto_alg *alg = tfm->__crt_alg;
2394         struct talitos_crypto_alg *talitos_alg;
2395         struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
2396         struct talitos_private *priv;
2397
2398         if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_AHASH)
2399                 talitos_alg = container_of(__crypto_ahash_alg(alg),
2400                                            struct talitos_crypto_alg,
2401                                            algt.alg.hash);
2402         else
2403                 talitos_alg = container_of(alg, struct talitos_crypto_alg,
2404                                            algt.alg.crypto);
2405
2406         /* update context with ptr to dev */
2407         ctx->dev = talitos_alg->dev;
2408
2409         /* assign SEC channel to tfm in round-robin fashion */
2410         priv = dev_get_drvdata(ctx->dev);
2411         ctx->ch = atomic_inc_return(&priv->last_chan) &
2412                   (priv->num_channels - 1);
2413
2414         /* copy descriptor header template value */
2415         ctx->desc_hdr_template = talitos_alg->algt.desc_hdr_template;
2416
2417         /* select done notification */
2418         ctx->desc_hdr_template |= DESC_HDR_DONE_NOTIFY;
2419
2420         return 0;
2421 }
2422
2423 static int talitos_cra_init_aead(struct crypto_tfm *tfm)
2424 {
2425         struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
2426
2427         talitos_cra_init(tfm);
2428
2429         /* random first IV */
2430         get_random_bytes(ctx->iv, TALITOS_MAX_IV_LENGTH);
2431
2432         return 0;
2433 }
2434
2435 static int talitos_cra_init_ahash(struct crypto_tfm *tfm)
2436 {
2437         struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
2438
2439         talitos_cra_init(tfm);
2440
2441         ctx->keylen = 0;
2442         crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
2443                                  sizeof(struct talitos_ahash_req_ctx));
2444
2445         return 0;
2446 }
2447
2448 /*
2449  * given the alg's descriptor header template, determine whether descriptor
2450  * type and primary/secondary execution units required match the hw
2451  * capabilities description provided in the device tree node.
2452  */
2453 static int hw_supports(struct device *dev, __be32 desc_hdr_template)
2454 {
2455         struct talitos_private *priv = dev_get_drvdata(dev);
2456         int ret;
2457
2458         ret = (1 << DESC_TYPE(desc_hdr_template) & priv->desc_types) &&
2459               (1 << PRIMARY_EU(desc_hdr_template) & priv->exec_units);
2460
2461         if (SECONDARY_EU(desc_hdr_template))
2462                 ret = ret && (1 << SECONDARY_EU(desc_hdr_template)
2463                               & priv->exec_units);
2464
2465         return ret;
2466 }
2467
2468 static int talitos_remove(struct platform_device *ofdev)
2469 {
2470         struct device *dev = &ofdev->dev;
2471         struct talitos_private *priv = dev_get_drvdata(dev);
2472         struct talitos_crypto_alg *t_alg, *n;
2473         int i;
2474
2475         list_for_each_entry_safe(t_alg, n, &priv->alg_list, entry) {
2476                 switch (t_alg->algt.type) {
2477                 case CRYPTO_ALG_TYPE_ABLKCIPHER:
2478                 case CRYPTO_ALG_TYPE_AEAD:
2479                         crypto_unregister_alg(&t_alg->algt.alg.crypto);
2480                         break;
2481                 case CRYPTO_ALG_TYPE_AHASH:
2482                         crypto_unregister_ahash(&t_alg->algt.alg.hash);
2483                         break;
2484                 }
2485                 list_del(&t_alg->entry);
2486                 kfree(t_alg);
2487         }
2488
2489         if (hw_supports(dev, DESC_HDR_SEL0_RNG))
2490                 talitos_unregister_rng(dev);
2491
2492         for (i = 0; i < priv->num_channels; i++)
2493                 kfree(priv->chan[i].fifo);
2494
2495         kfree(priv->chan);
2496
2497         for (i = 0; i < 2; i++)
2498                 if (priv->irq[i]) {
2499                         free_irq(priv->irq[i], dev);
2500                         irq_dispose_mapping(priv->irq[i]);
2501                 }
2502
2503         tasklet_kill(&priv->done_task[0]);
2504         if (priv->irq[1])
2505                 tasklet_kill(&priv->done_task[1]);
2506
2507         iounmap(priv->reg);
2508
2509         kfree(priv);
2510
2511         return 0;
2512 }
2513
2514 static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev,
2515                                                     struct talitos_alg_template
2516                                                            *template)
2517 {
2518         struct talitos_private *priv = dev_get_drvdata(dev);
2519         struct talitos_crypto_alg *t_alg;
2520         struct crypto_alg *alg;
2521
2522         t_alg = kzalloc(sizeof(struct talitos_crypto_alg), GFP_KERNEL);
2523         if (!t_alg)
2524                 return ERR_PTR(-ENOMEM);
2525
2526         t_alg->algt = *template;
2527
2528         switch (t_alg->algt.type) {
2529         case CRYPTO_ALG_TYPE_ABLKCIPHER:
2530                 alg = &t_alg->algt.alg.crypto;
2531                 alg->cra_init = talitos_cra_init;
2532                 alg->cra_type = &crypto_ablkcipher_type;
2533                 alg->cra_ablkcipher.setkey = ablkcipher_setkey;
2534                 alg->cra_ablkcipher.encrypt = ablkcipher_encrypt;
2535                 alg->cra_ablkcipher.decrypt = ablkcipher_decrypt;
2536                 alg->cra_ablkcipher.geniv = "eseqiv";
2537                 break;
2538         case CRYPTO_ALG_TYPE_AEAD:
2539                 alg = &t_alg->algt.alg.crypto;
2540                 alg->cra_init = talitos_cra_init_aead;
2541                 alg->cra_type = &crypto_aead_type;
2542                 alg->cra_aead.setkey = aead_setkey;
2543                 alg->cra_aead.setauthsize = aead_setauthsize;
2544                 alg->cra_aead.encrypt = aead_encrypt;
2545                 alg->cra_aead.decrypt = aead_decrypt;
2546                 alg->cra_aead.givencrypt = aead_givencrypt;
2547                 alg->cra_aead.geniv = "<built-in>";
2548                 break;
2549         case CRYPTO_ALG_TYPE_AHASH:
2550                 alg = &t_alg->algt.alg.hash.halg.base;
2551                 alg->cra_init = talitos_cra_init_ahash;
2552                 alg->cra_type = &crypto_ahash_type;
2553                 t_alg->algt.alg.hash.init = ahash_init;
2554                 t_alg->algt.alg.hash.update = ahash_update;
2555                 t_alg->algt.alg.hash.final = ahash_final;
2556                 t_alg->algt.alg.hash.finup = ahash_finup;
2557                 t_alg->algt.alg.hash.digest = ahash_digest;
2558                 t_alg->algt.alg.hash.setkey = ahash_setkey;
2559
2560                 if (!(priv->features & TALITOS_FTR_HMAC_OK) &&
2561                     !strncmp(alg->cra_name, "hmac", 4)) {
2562                         kfree(t_alg);
2563                         return ERR_PTR(-ENOTSUPP);
2564                 }
2565                 if (!(priv->features & TALITOS_FTR_SHA224_HWINIT) &&
2566                     (!strcmp(alg->cra_name, "sha224") ||
2567                      !strcmp(alg->cra_name, "hmac(sha224)"))) {
2568                         t_alg->algt.alg.hash.init = ahash_init_sha224_swinit;
2569                         t_alg->algt.desc_hdr_template =
2570                                         DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2571                                         DESC_HDR_SEL0_MDEUA |
2572                                         DESC_HDR_MODE0_MDEU_SHA256;
2573                 }
2574                 break;
2575         default:
2576                 dev_err(dev, "unknown algorithm type %d\n", t_alg->algt.type);
2577                 return ERR_PTR(-EINVAL);
2578         }
2579
2580         alg->cra_module = THIS_MODULE;
2581         alg->cra_priority = TALITOS_CRA_PRIORITY;
2582         alg->cra_alignmask = 0;
2583         alg->cra_ctxsize = sizeof(struct talitos_ctx);
2584         alg->cra_flags |= CRYPTO_ALG_KERN_DRIVER_ONLY;
2585
2586         t_alg->dev = dev;
2587
2588         return t_alg;
2589 }
2590
2591 static int talitos_probe_irq(struct platform_device *ofdev)
2592 {
2593         struct device *dev = &ofdev->dev;
2594         struct device_node *np = ofdev->dev.of_node;
2595         struct talitos_private *priv = dev_get_drvdata(dev);
2596         int err;
2597
2598         priv->irq[0] = irq_of_parse_and_map(np, 0);
2599         if (!priv->irq[0]) {
2600                 dev_err(dev, "failed to map irq\n");
2601                 return -EINVAL;
2602         }
2603
2604         priv->irq[1] = irq_of_parse_and_map(np, 1);
2605
2606         /* get the primary irq line */
2607         if (!priv->irq[1]) {
2608                 err = request_irq(priv->irq[0], talitos_interrupt_4ch, 0,
2609                                   dev_driver_string(dev), dev);
2610                 goto primary_out;
2611         }
2612
2613         err = request_irq(priv->irq[0], talitos_interrupt_ch0_2, 0,
2614                           dev_driver_string(dev), dev);
2615         if (err)
2616                 goto primary_out;
2617
2618         /* get the secondary irq line */
2619         err = request_irq(priv->irq[1], talitos_interrupt_ch1_3, 0,
2620                           dev_driver_string(dev), dev);
2621         if (err) {
2622                 dev_err(dev, "failed to request secondary irq\n");
2623                 irq_dispose_mapping(priv->irq[1]);
2624                 priv->irq[1] = 0;
2625         }
2626
2627         return err;
2628
2629 primary_out:
2630         if (err) {
2631                 dev_err(dev, "failed to request primary irq\n");
2632                 irq_dispose_mapping(priv->irq[0]);
2633                 priv->irq[0] = 0;
2634         }
2635
2636         return err;
2637 }
2638
2639 static int talitos_probe(struct platform_device *ofdev)
2640 {
2641         struct device *dev = &ofdev->dev;
2642         struct device_node *np = ofdev->dev.of_node;
2643         struct talitos_private *priv;
2644         const unsigned int *prop;
2645         int i, err;
2646
2647         priv = kzalloc(sizeof(struct talitos_private), GFP_KERNEL);
2648         if (!priv)
2649                 return -ENOMEM;
2650
2651         INIT_LIST_HEAD(&priv->alg_list);
2652
2653         dev_set_drvdata(dev, priv);
2654
2655         priv->ofdev = ofdev;
2656
2657         spin_lock_init(&priv->reg_lock);
2658
2659         err = talitos_probe_irq(ofdev);
2660         if (err)
2661                 goto err_out;
2662
2663         if (!priv->irq[1]) {
2664                 tasklet_init(&priv->done_task[0], talitos_done_4ch,
2665                              (unsigned long)dev);
2666         } else {
2667                 tasklet_init(&priv->done_task[0], talitos_done_ch0_2,
2668                              (unsigned long)dev);
2669                 tasklet_init(&priv->done_task[1], talitos_done_ch1_3,
2670                              (unsigned long)dev);
2671         }
2672
2673         priv->reg = of_iomap(np, 0);
2674         if (!priv->reg) {
2675                 dev_err(dev, "failed to of_iomap\n");
2676                 err = -ENOMEM;
2677                 goto err_out;
2678         }
2679
2680         /* get SEC version capabilities from device tree */
2681         prop = of_get_property(np, "fsl,num-channels", NULL);
2682         if (prop)
2683                 priv->num_channels = *prop;
2684
2685         prop = of_get_property(np, "fsl,channel-fifo-len", NULL);
2686         if (prop)
2687                 priv->chfifo_len = *prop;
2688
2689         prop = of_get_property(np, "fsl,exec-units-mask", NULL);
2690         if (prop)
2691                 priv->exec_units = *prop;
2692
2693         prop = of_get_property(np, "fsl,descriptor-types-mask", NULL);
2694         if (prop)
2695                 priv->desc_types = *prop;
2696
2697         if (!is_power_of_2(priv->num_channels) || !priv->chfifo_len ||
2698             !priv->exec_units || !priv->desc_types) {
2699                 dev_err(dev, "invalid property data in device tree node\n");
2700                 err = -EINVAL;
2701                 goto err_out;
2702         }
2703
2704         if (of_device_is_compatible(np, "fsl,sec3.0"))
2705                 priv->features |= TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT;
2706
2707         if (of_device_is_compatible(np, "fsl,sec2.1"))
2708                 priv->features |= TALITOS_FTR_HW_AUTH_CHECK |
2709                                   TALITOS_FTR_SHA224_HWINIT |
2710                                   TALITOS_FTR_HMAC_OK;
2711
2712         if (of_device_is_compatible(np, "fsl,sec1.0"))
2713                 priv->features |= TALITOS_FTR_SEC1;
2714
2715         priv->chan = kzalloc(sizeof(struct talitos_channel) *
2716                              priv->num_channels, GFP_KERNEL);
2717         if (!priv->chan) {
2718                 dev_err(dev, "failed to allocate channel management space\n");
2719                 err = -ENOMEM;
2720                 goto err_out;
2721         }
2722
2723         priv->fifo_len = roundup_pow_of_two(priv->chfifo_len);
2724
2725         for (i = 0; i < priv->num_channels; i++) {
2726                 priv->chan[i].reg = priv->reg + TALITOS_CH_STRIDE * (i + 1);
2727                 if (!priv->irq[1] || !(i & 1))
2728                         priv->chan[i].reg += TALITOS_CH_BASE_OFFSET;
2729
2730                 spin_lock_init(&priv->chan[i].head_lock);
2731                 spin_lock_init(&priv->chan[i].tail_lock);
2732
2733                 priv->chan[i].fifo = kzalloc(sizeof(struct talitos_request) *
2734                                              priv->fifo_len, GFP_KERNEL);
2735                 if (!priv->chan[i].fifo) {
2736                         dev_err(dev, "failed to allocate request fifo %d\n", i);
2737                         err = -ENOMEM;
2738                         goto err_out;
2739                 }
2740
2741                 atomic_set(&priv->chan[i].submit_count,
2742                            -(priv->chfifo_len - 1));
2743         }
2744
2745         dma_set_mask(dev, DMA_BIT_MASK(36));
2746
2747         /* reset and initialize the h/w */
2748         err = init_device(dev);
2749         if (err) {
2750                 dev_err(dev, "failed to initialize device\n");
2751                 goto err_out;
2752         }
2753
2754         /* register the RNG, if available */
2755         if (hw_supports(dev, DESC_HDR_SEL0_RNG)) {
2756                 err = talitos_register_rng(dev);
2757                 if (err) {
2758                         dev_err(dev, "failed to register hwrng: %d\n", err);
2759                         goto err_out;
2760                 } else
2761                         dev_info(dev, "hwrng\n");
2762         }
2763
2764         /* register crypto algorithms the device supports */
2765         for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
2766                 if (hw_supports(dev, driver_algs[i].desc_hdr_template)) {
2767                         struct talitos_crypto_alg *t_alg;
2768                         char *name = NULL;
2769
2770                         t_alg = talitos_alg_alloc(dev, &driver_algs[i]);
2771                         if (IS_ERR(t_alg)) {
2772                                 err = PTR_ERR(t_alg);
2773                                 if (err == -ENOTSUPP)
2774                                         continue;
2775                                 goto err_out;
2776                         }
2777
2778                         switch (t_alg->algt.type) {
2779                         case CRYPTO_ALG_TYPE_ABLKCIPHER:
2780                         case CRYPTO_ALG_TYPE_AEAD:
2781                                 err = crypto_register_alg(
2782                                                 &t_alg->algt.alg.crypto);
2783                                 name = t_alg->algt.alg.crypto.cra_driver_name;
2784                                 break;
2785                         case CRYPTO_ALG_TYPE_AHASH:
2786                                 err = crypto_register_ahash(
2787                                                 &t_alg->algt.alg.hash);
2788                                 name =
2789                                  t_alg->algt.alg.hash.halg.base.cra_driver_name;
2790                                 break;
2791                         }
2792                         if (err) {
2793                                 dev_err(dev, "%s alg registration failed\n",
2794                                         name);
2795                                 kfree(t_alg);
2796                         } else
2797                                 list_add_tail(&t_alg->entry, &priv->alg_list);
2798                 }
2799         }
2800         if (!list_empty(&priv->alg_list))
2801                 dev_info(dev, "%s algorithms registered in /proc/crypto\n",
2802                          (char *)of_get_property(np, "compatible", NULL));
2803
2804         return 0;
2805
2806 err_out:
2807         talitos_remove(ofdev);
2808
2809         return err;
2810 }
2811
2812 static const struct of_device_id talitos_match[] = {
2813         {
2814                 .compatible = "fsl,sec2.0",
2815         },
2816         {},
2817 };
2818 MODULE_DEVICE_TABLE(of, talitos_match);
2819
2820 static struct platform_driver talitos_driver = {
2821         .driver = {
2822                 .name = "talitos",
2823                 .of_match_table = talitos_match,
2824         },
2825         .probe = talitos_probe,
2826         .remove = talitos_remove,
2827 };
2828
2829 module_platform_driver(talitos_driver);
2830
2831 MODULE_LICENSE("GPL");
2832 MODULE_AUTHOR("Kim Phillips <kim.phillips@freescale.com>");
2833 MODULE_DESCRIPTION("Freescale integrated security engine (SEC) driver");