crypto: img-hash - Add Imagination Technologies hw hash accelerator
[firefly-linux-kernel-4.4.55.git] / drivers / crypto / img-hash.c
1 /*
2  * Copyright (c) 2014 Imagination Technologies
3  * Authors:  Will Thomas, James Hartley
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License version 2 as published
7  * by the Free Software Foundation.
8  *
9  *      Interface structure taken from omap-sham driver
10  */
11
12 #include <linux/clk.h>
13 #include <linux/dmaengine.h>
14 #include <linux/interrupt.h>
15 #include <linux/io.h>
16 #include <linux/kernel.h>
17 #include <linux/module.h>
18 #include <linux/of_device.h>
19 #include <linux/platform_device.h>
20 #include <linux/scatterlist.h>
21
22 #include <crypto/internal/hash.h>
23 #include <crypto/md5.h>
24 #include <crypto/sha.h>
25
26 #define CR_RESET                        0
27 #define CR_RESET_SET                    1
28 #define CR_RESET_UNSET                  0
29
30 #define CR_MESSAGE_LENGTH_H             0x4
31 #define CR_MESSAGE_LENGTH_L             0x8
32
33 #define CR_CONTROL                      0xc
34 #define CR_CONTROL_BYTE_ORDER_3210      0
35 #define CR_CONTROL_BYTE_ORDER_0123      1
36 #define CR_CONTROL_BYTE_ORDER_2310      2
37 #define CR_CONTROL_BYTE_ORDER_1032      3
38 #define CR_CONTROL_BYTE_ORDER_SHIFT     8
39 #define CR_CONTROL_ALGO_MD5     0
40 #define CR_CONTROL_ALGO_SHA1    1
41 #define CR_CONTROL_ALGO_SHA224  2
42 #define CR_CONTROL_ALGO_SHA256  3
43
44 #define CR_INTSTAT                      0x10
45 #define CR_INTENAB                      0x14
46 #define CR_INTCLEAR                     0x18
47 #define CR_INT_RESULTS_AVAILABLE        BIT(0)
48 #define CR_INT_NEW_RESULTS_SET          BIT(1)
49 #define CR_INT_RESULT_READ_ERR          BIT(2)
50 #define CR_INT_MESSAGE_WRITE_ERROR      BIT(3)
51 #define CR_INT_STATUS                   BIT(8)
52
53 #define CR_RESULT_QUEUE         0x1c
54 #define CR_RSD0                         0x40
55 #define CR_CORE_REV                     0x50
56 #define CR_CORE_DES1            0x60
57 #define CR_CORE_DES2            0x70
58
59 #define DRIVER_FLAGS_BUSY               BIT(0)
60 #define DRIVER_FLAGS_FINAL              BIT(1)
61 #define DRIVER_FLAGS_DMA_ACTIVE         BIT(2)
62 #define DRIVER_FLAGS_OUTPUT_READY       BIT(3)
63 #define DRIVER_FLAGS_INIT               BIT(4)
64 #define DRIVER_FLAGS_CPU                BIT(5)
65 #define DRIVER_FLAGS_DMA_READY          BIT(6)
66 #define DRIVER_FLAGS_ERROR              BIT(7)
67 #define DRIVER_FLAGS_SG                 BIT(8)
68 #define DRIVER_FLAGS_SHA1               BIT(18)
69 #define DRIVER_FLAGS_SHA224             BIT(19)
70 #define DRIVER_FLAGS_SHA256             BIT(20)
71 #define DRIVER_FLAGS_MD5                BIT(21)
72
73 #define IMG_HASH_QUEUE_LENGTH           20
74 #define IMG_HASH_DMA_THRESHOLD          64
75
76 #ifdef __LITTLE_ENDIAN
77 #define IMG_HASH_BYTE_ORDER             CR_CONTROL_BYTE_ORDER_3210
78 #else
79 #define IMG_HASH_BYTE_ORDER             CR_CONTROL_BYTE_ORDER_0123
80 #endif
81
82 struct img_hash_dev;
83
84 struct img_hash_request_ctx {
85         struct img_hash_dev     *hdev;
86         u8 digest[SHA256_DIGEST_SIZE] __aligned(sizeof(u32));
87         unsigned long           flags;
88         size_t                  digsize;
89
90         dma_addr_t              dma_addr;
91         size_t                  dma_ct;
92
93         /* sg root */
94         struct scatterlist      *sgfirst;
95         /* walk state */
96         struct scatterlist      *sg;
97         size_t                  nents;
98         size_t                  offset;
99         unsigned int            total;
100         size_t                  sent;
101
102         unsigned long           op;
103
104         size_t                  bufcnt;
105         u8 buffer[0] __aligned(sizeof(u32));
106         struct ahash_request    fallback_req;
107 };
108
109 struct img_hash_ctx {
110         struct img_hash_dev     *hdev;
111         unsigned long           flags;
112         struct crypto_ahash     *fallback;
113 };
114
115 struct img_hash_dev {
116         struct list_head        list;
117         struct device           *dev;
118         struct clk              *hash_clk;
119         struct clk              *sys_clk;
120         void __iomem            *io_base;
121
122         phys_addr_t             bus_addr;
123         void __iomem            *cpu_addr;
124
125         spinlock_t              lock;
126         int                     err;
127         struct tasklet_struct   done_task;
128         struct tasklet_struct   dma_task;
129
130         unsigned long           flags;
131         struct crypto_queue     queue;
132         struct ahash_request    *req;
133
134         struct dma_chan         *dma_lch;
135 };
136
137 struct img_hash_drv {
138         struct list_head dev_list;
139         spinlock_t lock;
140 };
141
142 static struct img_hash_drv img_hash = {
143         .dev_list = LIST_HEAD_INIT(img_hash.dev_list),
144         .lock = __SPIN_LOCK_UNLOCKED(img_hash.lock),
145 };
146
147 static inline u32 img_hash_read(struct img_hash_dev *hdev, u32 offset)
148 {
149         return readl_relaxed(hdev->io_base + offset);
150 }
151
152 static inline void img_hash_write(struct img_hash_dev *hdev,
153                                   u32 offset, u32 value)
154 {
155         writel_relaxed(value, hdev->io_base + offset);
156 }
157
158 static inline u32 img_hash_read_result_queue(struct img_hash_dev *hdev)
159 {
160         return be32_to_cpu(img_hash_read(hdev, CR_RESULT_QUEUE));
161 }
162
163 static void img_hash_start(struct img_hash_dev *hdev, bool dma)
164 {
165         struct img_hash_request_ctx *ctx = ahash_request_ctx(hdev->req);
166         u32 cr = IMG_HASH_BYTE_ORDER << CR_CONTROL_BYTE_ORDER_SHIFT;
167
168         if (ctx->flags & DRIVER_FLAGS_MD5)
169                 cr |= CR_CONTROL_ALGO_MD5;
170         else if (ctx->flags & DRIVER_FLAGS_SHA1)
171                 cr |= CR_CONTROL_ALGO_SHA1;
172         else if (ctx->flags & DRIVER_FLAGS_SHA224)
173                 cr |= CR_CONTROL_ALGO_SHA224;
174         else if (ctx->flags & DRIVER_FLAGS_SHA256)
175                 cr |= CR_CONTROL_ALGO_SHA256;
176         dev_dbg(hdev->dev, "Starting hash process\n");
177         img_hash_write(hdev, CR_CONTROL, cr);
178
179         /*
180          * The hardware block requires two cycles between writing the control
181          * register and writing the first word of data in non DMA mode, to
182          * ensure the first data write is not grouped in burst with the control
183          * register write a read is issued to 'flush' the bus.
184          */
185         if (!dma)
186                 img_hash_read(hdev, CR_CONTROL);
187 }
188
189 static int img_hash_xmit_cpu(struct img_hash_dev *hdev, const u8 *buf,
190                              size_t length, int final)
191 {
192         u32 count, len32;
193         const u32 *buffer = (const u32 *)buf;
194
195         dev_dbg(hdev->dev, "xmit_cpu:  length: %u bytes\n", length);
196
197         if (final)
198                 hdev->flags |= DRIVER_FLAGS_FINAL;
199
200         len32 = DIV_ROUND_UP(length, sizeof(u32));
201
202         for (count = 0; count < len32; count++)
203                 writel_relaxed(buffer[count], hdev->cpu_addr);
204
205         return -EINPROGRESS;
206 }
207
208 static void img_hash_dma_callback(void *data)
209 {
210         struct img_hash_dev *hdev = (struct img_hash_dev *)data;
211         struct img_hash_request_ctx *ctx = ahash_request_ctx(hdev->req);
212
213         if (ctx->bufcnt) {
214                 img_hash_xmit_cpu(hdev, ctx->buffer, ctx->bufcnt, 0);
215                 ctx->bufcnt = 0;
216         }
217         if (ctx->sg)
218                 tasklet_schedule(&hdev->dma_task);
219 }
220
221 static int img_hash_xmit_dma(struct img_hash_dev *hdev, struct scatterlist *sg)
222 {
223         struct dma_async_tx_descriptor *desc;
224         struct img_hash_request_ctx *ctx = ahash_request_ctx(hdev->req);
225
226         ctx->dma_ct = dma_map_sg(hdev->dev, sg, 1, DMA_MEM_TO_DEV);
227         if (ctx->dma_ct == 0) {
228                 dev_err(hdev->dev, "Invalid DMA sg\n");
229                 hdev->err = -EINVAL;
230                 return -EINVAL;
231         }
232
233         desc = dmaengine_prep_slave_sg(hdev->dma_lch,
234                                        sg,
235                                        ctx->dma_ct,
236                                        DMA_MEM_TO_DEV,
237                                        DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
238         if (!desc) {
239                 dev_err(hdev->dev, "Null DMA descriptor\n");
240                 hdev->err = -EINVAL;
241                 dma_unmap_sg(hdev->dev, sg, 1, DMA_MEM_TO_DEV);
242                 return -EINVAL;
243         }
244         desc->callback = img_hash_dma_callback;
245         desc->callback_param = hdev;
246         dmaengine_submit(desc);
247         dma_async_issue_pending(hdev->dma_lch);
248
249         return 0;
250 }
251
252 static int img_hash_write_via_cpu(struct img_hash_dev *hdev)
253 {
254         struct img_hash_request_ctx *ctx = ahash_request_ctx(hdev->req);
255
256         ctx->bufcnt = sg_copy_to_buffer(hdev->req->src, sg_nents(ctx->sg),
257                                         ctx->buffer, hdev->req->nbytes);
258
259         ctx->total = hdev->req->nbytes;
260         ctx->bufcnt = 0;
261
262         hdev->flags |= (DRIVER_FLAGS_CPU | DRIVER_FLAGS_FINAL);
263
264         img_hash_start(hdev, false);
265
266         return img_hash_xmit_cpu(hdev, ctx->buffer, ctx->total, 1);
267 }
268
269 static int img_hash_finish(struct ahash_request *req)
270 {
271         struct img_hash_request_ctx *ctx = ahash_request_ctx(req);
272
273         if (!req->result)
274                 return -EINVAL;
275
276         memcpy(req->result, ctx->digest, ctx->digsize);
277
278         return 0;
279 }
280
281 static void img_hash_copy_hash(struct ahash_request *req)
282 {
283         struct img_hash_request_ctx *ctx = ahash_request_ctx(req);
284         u32 *hash = (u32 *)ctx->digest;
285         int i;
286
287         for (i = (ctx->digsize / sizeof(u32)) - 1; i >= 0; i--)
288                 hash[i] = img_hash_read_result_queue(ctx->hdev);
289 }
290
291 static void img_hash_finish_req(struct ahash_request *req, int err)
292 {
293         struct img_hash_request_ctx *ctx = ahash_request_ctx(req);
294         struct img_hash_dev *hdev =  ctx->hdev;
295
296         if (!err) {
297                 img_hash_copy_hash(req);
298                 if (DRIVER_FLAGS_FINAL & hdev->flags)
299                         err = img_hash_finish(req);
300         } else {
301                 dev_warn(hdev->dev, "Hash failed with error %d\n", err);
302                 ctx->flags |= DRIVER_FLAGS_ERROR;
303         }
304
305         hdev->flags &= ~(DRIVER_FLAGS_DMA_READY | DRIVER_FLAGS_OUTPUT_READY |
306                 DRIVER_FLAGS_CPU | DRIVER_FLAGS_BUSY | DRIVER_FLAGS_FINAL);
307
308         if (req->base.complete)
309                 req->base.complete(&req->base, err);
310 }
311
312 static int img_hash_write_via_dma(struct img_hash_dev *hdev)
313 {
314         struct img_hash_request_ctx *ctx = ahash_request_ctx(hdev->req);
315
316         img_hash_start(hdev, true);
317
318         dev_dbg(hdev->dev, "xmit dma size: %d\n", ctx->total);
319
320         if (!ctx->total)
321                 hdev->flags |= DRIVER_FLAGS_FINAL;
322
323         hdev->flags |= DRIVER_FLAGS_DMA_ACTIVE | DRIVER_FLAGS_FINAL;
324
325         tasklet_schedule(&hdev->dma_task);
326
327         return -EINPROGRESS;
328 }
329
330 static int img_hash_dma_init(struct img_hash_dev *hdev)
331 {
332         struct dma_slave_config dma_conf;
333         int err = -EINVAL;
334
335         hdev->dma_lch = dma_request_slave_channel(hdev->dev, "tx");
336         if (!hdev->dma_lch) {
337                 dev_err(hdev->dev, "Couldn't aquire a slave DMA channel.\n");
338                 return -EBUSY;
339         }
340         dma_conf.direction = DMA_MEM_TO_DEV;
341         dma_conf.dst_addr = hdev->bus_addr;
342         dma_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
343         dma_conf.dst_maxburst = 16;
344         dma_conf.device_fc = false;
345
346         err = dmaengine_slave_config(hdev->dma_lch,  &dma_conf);
347         if (err) {
348                 dev_err(hdev->dev, "Couldn't configure DMA slave.\n");
349                 dma_release_channel(hdev->dma_lch);
350                 return err;
351         }
352
353         return 0;
354 }
355
356 static void img_hash_dma_task(unsigned long d)
357 {
358         struct img_hash_dev *hdev = (struct img_hash_dev *)d;
359         struct img_hash_request_ctx *ctx = ahash_request_ctx(hdev->req);
360         u8 *addr;
361         size_t nbytes, bleft, wsend, len, tbc;
362         struct scatterlist tsg;
363
364         if (!ctx->sg)
365                 return;
366
367         addr = sg_virt(ctx->sg);
368         nbytes = ctx->sg->length - ctx->offset;
369
370         /*
371          * The hash accelerator does not support a data valid mask. This means
372          * that if each dma (i.e. per page) is not a multiple of 4 bytes, the
373          * padding bytes in the last word written by that dma would erroneously
374          * be included in the hash. To avoid this we round down the transfer,
375          * and add the excess to the start of the next dma. It does not matter
376          * that the final dma may not be a multiple of 4 bytes as the hashing
377          * block is programmed to accept the correct number of bytes.
378          */
379
380         bleft = nbytes % 4;
381         wsend = (nbytes / 4);
382
383         if (wsend) {
384                 sg_init_one(&tsg, addr + ctx->offset, wsend * 4);
385                 if (img_hash_xmit_dma(hdev, &tsg)) {
386                         dev_err(hdev->dev, "DMA failed, falling back to CPU");
387                         ctx->flags |= DRIVER_FLAGS_CPU;
388                         hdev->err = 0;
389                         img_hash_xmit_cpu(hdev, addr + ctx->offset,
390                                           wsend * 4, 0);
391                         ctx->sent += wsend * 4;
392                         wsend = 0;
393                 } else {
394                         ctx->sent += wsend * 4;
395                 }
396         }
397
398         if (bleft) {
399                 ctx->bufcnt = sg_pcopy_to_buffer(ctx->sgfirst, ctx->nents,
400                                                  ctx->buffer, bleft, ctx->sent);
401                 tbc = 0;
402                 ctx->sg = sg_next(ctx->sg);
403                 while (ctx->sg && (ctx->bufcnt < 4)) {
404                         len = ctx->sg->length;
405                         if (likely(len > (4 - ctx->bufcnt)))
406                                 len = 4 - ctx->bufcnt;
407                         tbc = sg_pcopy_to_buffer(ctx->sgfirst, ctx->nents,
408                                                  ctx->buffer + ctx->bufcnt, len,
409                                         ctx->sent + ctx->bufcnt);
410                         ctx->bufcnt += tbc;
411                         if (tbc >= ctx->sg->length) {
412                                 ctx->sg = sg_next(ctx->sg);
413                                 tbc = 0;
414                         }
415                 }
416
417                 ctx->sent += ctx->bufcnt;
418                 ctx->offset = tbc;
419
420                 if (!wsend)
421                         img_hash_dma_callback(hdev);
422         } else {
423                 ctx->offset = 0;
424                 ctx->sg = sg_next(ctx->sg);
425         }
426 }
427
428 static int img_hash_write_via_dma_stop(struct img_hash_dev *hdev)
429 {
430         struct img_hash_request_ctx *ctx = ahash_request_ctx(hdev->req);
431
432         if (ctx->flags & DRIVER_FLAGS_SG)
433                 dma_unmap_sg(hdev->dev, ctx->sg, ctx->dma_ct, DMA_TO_DEVICE);
434
435         return 0;
436 }
437
438 static int img_hash_process_data(struct img_hash_dev *hdev)
439 {
440         struct ahash_request *req = hdev->req;
441         struct img_hash_request_ctx *ctx = ahash_request_ctx(req);
442         int err = 0;
443
444         ctx->bufcnt = 0;
445
446         if (req->nbytes >= IMG_HASH_DMA_THRESHOLD) {
447                 dev_dbg(hdev->dev, "process data request(%d bytes) using DMA\n",
448                         req->nbytes);
449                 err = img_hash_write_via_dma(hdev);
450         } else {
451                 dev_dbg(hdev->dev, "process data request(%d bytes) using CPU\n",
452                         req->nbytes);
453                 err = img_hash_write_via_cpu(hdev);
454         }
455         return err;
456 }
457
458 static int img_hash_hw_init(struct img_hash_dev *hdev)
459 {
460         unsigned long long nbits;
461         u32 u, l;
462         int ret;
463
464         img_hash_write(hdev, CR_RESET, CR_RESET_SET);
465         img_hash_write(hdev, CR_RESET, CR_RESET_UNSET);
466         img_hash_write(hdev, CR_INTENAB, CR_INT_NEW_RESULTS_SET);
467
468         nbits = (hdev->req->nbytes << 3);
469         u = nbits >> 32;
470         l = nbits;
471         img_hash_write(hdev, CR_MESSAGE_LENGTH_H, u);
472         img_hash_write(hdev, CR_MESSAGE_LENGTH_L, l);
473
474         if (!(DRIVER_FLAGS_INIT & hdev->flags)) {
475                 hdev->flags |= DRIVER_FLAGS_INIT;
476                 hdev->err = 0;
477         }
478         dev_dbg(hdev->dev, "hw initialized, nbits: %llx\n", nbits);
479         return 0;
480 }
481
482 static int img_hash_init(struct ahash_request *req)
483 {
484         struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
485         struct img_hash_request_ctx *rctx = ahash_request_ctx(req);
486         struct img_hash_ctx *ctx = crypto_ahash_ctx(tfm);
487
488         ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback);
489         rctx->fallback_req.base.flags = req->base.flags
490                 & CRYPTO_TFM_REQ_MAY_SLEEP;
491
492         return crypto_ahash_init(&rctx->fallback_req);
493 }
494
495 static int img_hash_handle_queue(struct img_hash_dev *hdev,
496                                  struct ahash_request *req)
497 {
498         struct crypto_async_request *async_req, *backlog;
499         struct img_hash_request_ctx *ctx;
500         unsigned long flags;
501         int err = 0, res = 0;
502
503         spin_lock_irqsave(&hdev->lock, flags);
504
505         if (req)
506                 res = ahash_enqueue_request(&hdev->queue, req);
507
508         if (DRIVER_FLAGS_BUSY & hdev->flags) {
509                 spin_unlock_irqrestore(&hdev->lock, flags);
510                 return res;
511         }
512
513         backlog = crypto_get_backlog(&hdev->queue);
514         async_req = crypto_dequeue_request(&hdev->queue);
515         if (async_req)
516                 hdev->flags |= DRIVER_FLAGS_BUSY;
517
518         spin_unlock_irqrestore(&hdev->lock, flags);
519
520         if (!async_req)
521                 return res;
522
523         if (backlog)
524                 backlog->complete(backlog, -EINPROGRESS);
525
526         req = ahash_request_cast(async_req);
527         hdev->req = req;
528
529         ctx = ahash_request_ctx(req);
530
531         dev_info(hdev->dev, "processing req, op: %lu, bytes: %d\n",
532                  ctx->op, req->nbytes);
533
534         err = img_hash_hw_init(hdev);
535
536         if (!err)
537                 err = img_hash_process_data(hdev);
538
539         if (err != -EINPROGRESS) {
540                 /* done_task will not finish so do it here */
541                 img_hash_finish_req(req, err);
542         }
543         return res;
544 }
545
546 static int img_hash_update(struct ahash_request *req)
547 {
548         struct img_hash_request_ctx *rctx = ahash_request_ctx(req);
549         struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
550         struct img_hash_ctx *ctx = crypto_ahash_ctx(tfm);
551
552         ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback);
553         rctx->fallback_req.base.flags = req->base.flags
554                 & CRYPTO_TFM_REQ_MAY_SLEEP;
555         rctx->fallback_req.nbytes = req->nbytes;
556         rctx->fallback_req.src = req->src;
557
558         return crypto_ahash_update(&rctx->fallback_req);
559 }
560
561 static int img_hash_final(struct ahash_request *req)
562 {
563         struct img_hash_request_ctx *rctx = ahash_request_ctx(req);
564         struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
565         struct img_hash_ctx *ctx = crypto_ahash_ctx(tfm);
566
567         ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback);
568         rctx->fallback_req.base.flags = req->base.flags
569                 & CRYPTO_TFM_REQ_MAY_SLEEP;
570         rctx->fallback_req.result = req->result;
571
572         return crypto_ahash_final(&rctx->fallback_req);
573 }
574
575 static int img_hash_finup(struct ahash_request *req)
576 {
577         struct img_hash_request_ctx *rctx = ahash_request_ctx(req);
578         struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
579         struct img_hash_ctx *ctx = crypto_ahash_ctx(tfm);
580
581         ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback);
582         rctx->fallback_req.base.flags = req->base.flags
583                 & CRYPTO_TFM_REQ_MAY_SLEEP;
584         rctx->fallback_req.nbytes = req->nbytes;
585         rctx->fallback_req.src = req->src;
586         rctx->fallback_req.result = req->result;
587
588         return crypto_ahash_finup(&rctx->fallback_req);
589 }
590
591 static int img_hash_digest(struct ahash_request *req)
592 {
593         struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
594         struct img_hash_ctx *tctx = crypto_ahash_ctx(tfm);
595         struct img_hash_request_ctx *ctx = ahash_request_ctx(req);
596         struct img_hash_dev *hdev = NULL;
597         struct img_hash_dev *tmp;
598         int err;
599
600         spin_lock(&img_hash.lock);
601         if (!tctx->hdev) {
602                 list_for_each_entry(tmp, &img_hash.dev_list, list) {
603                         hdev = tmp;
604                         break;
605                 }
606                 tctx->hdev = hdev;
607
608         } else {
609                 hdev = tctx->hdev;
610         }
611
612         spin_unlock(&img_hash.lock);
613         ctx->hdev = hdev;
614         ctx->flags = 0;
615         ctx->digsize = crypto_ahash_digestsize(tfm);
616
617         switch (ctx->digsize) {
618         case SHA1_DIGEST_SIZE:
619                 ctx->flags |= DRIVER_FLAGS_SHA1;
620                 break;
621         case SHA256_DIGEST_SIZE:
622                 ctx->flags |= DRIVER_FLAGS_SHA256;
623                 break;
624         case SHA224_DIGEST_SIZE:
625                 ctx->flags |= DRIVER_FLAGS_SHA224;
626                 break;
627         case MD5_DIGEST_SIZE:
628                 ctx->flags |= DRIVER_FLAGS_MD5;
629                 break;
630         default:
631                 return -EINVAL;
632         }
633
634         ctx->bufcnt = 0;
635         ctx->offset = 0;
636         ctx->sent = 0;
637         ctx->total = req->nbytes;
638         ctx->sg = req->src;
639         ctx->sgfirst = req->src;
640         ctx->nents = sg_nents(ctx->sg);
641
642         err = img_hash_handle_queue(tctx->hdev, req);
643
644         return err;
645 }
646
647 static int img_hash_cra_init(struct crypto_tfm *tfm)
648 {
649         struct img_hash_ctx *ctx = crypto_tfm_ctx(tfm);
650         const char *alg_name = crypto_tfm_alg_name(tfm);
651         int err = -ENOMEM;
652
653         ctx->fallback = crypto_alloc_ahash(alg_name, 0,
654                                            CRYPTO_ALG_NEED_FALLBACK);
655         if (IS_ERR(ctx->fallback)) {
656                 pr_err("img_hash: Could not load fallback driver.\n");
657                 err = PTR_ERR(ctx->fallback);
658                 goto err;
659         }
660         crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
661                                  sizeof(struct img_hash_request_ctx) +
662                                  IMG_HASH_DMA_THRESHOLD);
663
664         return 0;
665
666 err:
667         return err;
668 }
669
670 static void img_hash_cra_exit(struct crypto_tfm *tfm)
671 {
672         struct img_hash_ctx *tctx = crypto_tfm_ctx(tfm);
673
674         crypto_free_ahash(tctx->fallback);
675 }
676
677 static irqreturn_t img_irq_handler(int irq, void *dev_id)
678 {
679         struct img_hash_dev *hdev = dev_id;
680         u32 reg;
681
682         reg = img_hash_read(hdev, CR_INTSTAT);
683         img_hash_write(hdev, CR_INTCLEAR, reg);
684
685         if (reg & CR_INT_NEW_RESULTS_SET) {
686                 dev_dbg(hdev->dev, "IRQ CR_INT_NEW_RESULTS_SET\n");
687                 if (DRIVER_FLAGS_BUSY & hdev->flags) {
688                         hdev->flags |= DRIVER_FLAGS_OUTPUT_READY;
689                         if (!(DRIVER_FLAGS_CPU & hdev->flags))
690                                 hdev->flags |= DRIVER_FLAGS_DMA_READY;
691                         tasklet_schedule(&hdev->done_task);
692                 } else {
693                         dev_warn(hdev->dev,
694                                  "HASH interrupt when no active requests.\n");
695                 }
696         } else if (reg & CR_INT_RESULTS_AVAILABLE) {
697                 dev_warn(hdev->dev,
698                          "IRQ triggered before the hash had completed\n");
699         } else if (reg & CR_INT_RESULT_READ_ERR) {
700                 dev_warn(hdev->dev,
701                          "Attempt to read from an empty result queue\n");
702         } else if (reg & CR_INT_MESSAGE_WRITE_ERROR) {
703                 dev_warn(hdev->dev,
704                          "Data written before the hardware was configured\n");
705         }
706         return IRQ_HANDLED;
707 }
708
709 static struct ahash_alg img_algs[] = {
710         {
711                 .init = img_hash_init,
712                 .update = img_hash_update,
713                 .final = img_hash_final,
714                 .finup = img_hash_finup,
715                 .digest = img_hash_digest,
716                 .halg = {
717                         .digestsize = MD5_DIGEST_SIZE,
718                         .base = {
719                                 .cra_name = "md5",
720                                 .cra_driver_name = "img-md5",
721                                 .cra_priority = 300,
722                                 .cra_flags =
723                                 CRYPTO_ALG_ASYNC |
724                                 CRYPTO_ALG_NEED_FALLBACK,
725                                 .cra_blocksize = MD5_HMAC_BLOCK_SIZE,
726                                 .cra_ctxsize = sizeof(struct img_hash_ctx),
727                                 .cra_init = img_hash_cra_init,
728                                 .cra_exit = img_hash_cra_exit,
729                                 .cra_module = THIS_MODULE,
730                         }
731                 }
732         },
733         {
734                 .init = img_hash_init,
735                 .update = img_hash_update,
736                 .final = img_hash_final,
737                 .finup = img_hash_finup,
738                 .digest = img_hash_digest,
739                 .halg = {
740                         .digestsize = SHA1_DIGEST_SIZE,
741                         .base = {
742                                 .cra_name = "sha1",
743                                 .cra_driver_name = "img-sha1",
744                                 .cra_priority = 300,
745                                 .cra_flags =
746                                 CRYPTO_ALG_ASYNC |
747                                 CRYPTO_ALG_NEED_FALLBACK,
748                                 .cra_blocksize = SHA1_BLOCK_SIZE,
749                                 .cra_ctxsize = sizeof(struct img_hash_ctx),
750                                 .cra_init = img_hash_cra_init,
751                                 .cra_exit = img_hash_cra_exit,
752                                 .cra_module = THIS_MODULE,
753                         }
754                 }
755         },
756         {
757                 .init = img_hash_init,
758                 .update = img_hash_update,
759                 .final = img_hash_final,
760                 .finup = img_hash_finup,
761                 .digest = img_hash_digest,
762                 .halg = {
763                         .digestsize = SHA224_DIGEST_SIZE,
764                         .base = {
765                                 .cra_name = "sha224",
766                                 .cra_driver_name = "img-sha224",
767                                 .cra_priority = 300,
768                                 .cra_flags =
769                                 CRYPTO_ALG_ASYNC |
770                                 CRYPTO_ALG_NEED_FALLBACK,
771                                 .cra_blocksize = SHA224_BLOCK_SIZE,
772                                 .cra_ctxsize = sizeof(struct img_hash_ctx),
773                                 .cra_init = img_hash_cra_init,
774                                 .cra_exit = img_hash_cra_exit,
775                                 .cra_module = THIS_MODULE,
776                         }
777                 }
778         },
779         {
780                 .init = img_hash_init,
781                 .update = img_hash_update,
782                 .final = img_hash_final,
783                 .finup = img_hash_finup,
784                 .digest = img_hash_digest,
785                 .halg = {
786                         .digestsize = SHA256_DIGEST_SIZE,
787                         .base = {
788                                 .cra_name = "sha256",
789                                 .cra_driver_name = "img-sha256",
790                                 .cra_priority = 300,
791                                 .cra_flags =
792                                 CRYPTO_ALG_ASYNC |
793                                 CRYPTO_ALG_NEED_FALLBACK,
794                                 .cra_blocksize = SHA256_BLOCK_SIZE,
795                                 .cra_ctxsize = sizeof(struct img_hash_ctx),
796                                 .cra_init = img_hash_cra_init,
797                                 .cra_exit = img_hash_cra_exit,
798                                 .cra_module = THIS_MODULE,
799                         }
800                 }
801         }
802 };
803
804 static int img_register_algs(struct img_hash_dev *hdev)
805 {
806         int i, err;
807
808         for (i = 0; i < ARRAY_SIZE(img_algs); i++) {
809                 err = crypto_register_ahash(&img_algs[i]);
810                 if (err)
811                         goto err_reg;
812         }
813         return 0;
814
815 err_reg:
816         for (; i--; )
817                 crypto_unregister_ahash(&img_algs[i]);
818
819         return err;
820 }
821
822 static int img_unregister_algs(struct img_hash_dev *hdev)
823 {
824         int i;
825
826         for (i = 0; i < ARRAY_SIZE(img_algs); i++)
827                 crypto_unregister_ahash(&img_algs[i]);
828         return 0;
829 }
830
831 static void img_hash_done_task(unsigned long data)
832 {
833         struct img_hash_dev *hdev = (struct img_hash_dev *)data;
834         int err = 0;
835
836         if (hdev->err == -EINVAL) {
837                 err = hdev->err;
838                 goto finish;
839         }
840
841         if (!(DRIVER_FLAGS_BUSY & hdev->flags)) {
842                 img_hash_handle_queue(hdev, NULL);
843                 return;
844         }
845
846         if (DRIVER_FLAGS_CPU & hdev->flags) {
847                 if (DRIVER_FLAGS_OUTPUT_READY & hdev->flags) {
848                         hdev->flags &= ~DRIVER_FLAGS_OUTPUT_READY;
849                         goto finish;
850                 }
851         } else if (DRIVER_FLAGS_DMA_READY & hdev->flags) {
852                 if (DRIVER_FLAGS_DMA_ACTIVE & hdev->flags) {
853                         hdev->flags &= ~DRIVER_FLAGS_DMA_ACTIVE;
854                         img_hash_write_via_dma_stop(hdev);
855                         if (hdev->err) {
856                                 err = hdev->err;
857                                 goto finish;
858                         }
859                 }
860                 if (DRIVER_FLAGS_OUTPUT_READY & hdev->flags) {
861                         hdev->flags &= ~(DRIVER_FLAGS_DMA_READY |
862                                         DRIVER_FLAGS_OUTPUT_READY);
863                         goto finish;
864                 }
865         }
866         return;
867
868 finish:
869         img_hash_finish_req(hdev->req, err);
870 }
871
872 static const struct of_device_id img_hash_match[] = {
873         { .compatible = "img,hash-accelerator" },
874         {}
875 };
876 MODULE_DEVICE_TABLE(of, img_hash_match)
877
878 static int img_hash_probe(struct platform_device *pdev)
879 {
880         struct img_hash_dev *hdev;
881         struct device *dev = &pdev->dev;
882         struct resource *hash_res;
883         int     irq;
884         int err;
885
886         hdev = devm_kzalloc(dev, sizeof(*hdev), GFP_KERNEL);
887         if (hdev == NULL)
888                 return -ENOMEM;
889
890         spin_lock_init(&hdev->lock);
891
892         hdev->dev = dev;
893
894         platform_set_drvdata(pdev, hdev);
895
896         INIT_LIST_HEAD(&hdev->list);
897
898         tasklet_init(&hdev->done_task, img_hash_done_task, (unsigned long)hdev);
899         tasklet_init(&hdev->dma_task, img_hash_dma_task, (unsigned long)hdev);
900
901         crypto_init_queue(&hdev->queue, IMG_HASH_QUEUE_LENGTH);
902
903         /* Register bank */
904         hash_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
905
906         hdev->io_base = devm_ioremap_resource(dev, hash_res);
907         if (IS_ERR(hdev->io_base)) {
908                 err = PTR_ERR(hdev->io_base);
909                 dev_err(dev, "can't ioremap, returned %d\n", err);
910
911                 goto res_err;
912         }
913
914         /* Write port (DMA or CPU) */
915         hash_res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
916         hdev->cpu_addr = devm_ioremap_resource(dev, hash_res);
917         if (IS_ERR(hdev->cpu_addr)) {
918                 dev_err(dev, "can't ioremap write port\n");
919                 err = PTR_ERR(hdev->cpu_addr);
920                 goto res_err;
921         }
922         hdev->bus_addr = hash_res->start;
923
924         irq = platform_get_irq(pdev, 0);
925         if (irq < 0) {
926                 dev_err(dev, "no IRQ resource info\n");
927                 err = irq;
928                 goto res_err;
929         }
930
931         err = devm_request_irq(dev, irq, img_irq_handler, 0,
932                                dev_name(dev), hdev);
933         if (err) {
934                 dev_err(dev, "unable to request irq\n");
935                 goto res_err;
936         }
937         dev_dbg(dev, "using IRQ channel %d\n", irq);
938
939         hdev->hash_clk = devm_clk_get(&pdev->dev, "hash");
940         if (IS_ERR(hdev->hash_clk)) {
941                 dev_err(dev, "clock initialization failed.\n");
942                 err = PTR_ERR(hdev->hash_clk);
943                 goto res_err;
944         }
945
946         hdev->sys_clk = devm_clk_get(&pdev->dev, "sys");
947         if (IS_ERR(hdev->sys_clk)) {
948                 dev_err(dev, "clock initialization failed.\n");
949                 err = PTR_ERR(hdev->sys_clk);
950                 goto res_err;
951         }
952
953         err = clk_prepare_enable(hdev->hash_clk);
954         if (err)
955                 goto res_err;
956
957         err = clk_prepare_enable(hdev->sys_clk);
958         if (err)
959                 goto clk_err;
960
961         err = img_hash_dma_init(hdev);
962         if (err)
963                 goto dma_err;
964
965         dev_dbg(dev, "using %s for DMA transfers\n",
966                 dma_chan_name(hdev->dma_lch));
967
968         spin_lock(&img_hash.lock);
969         list_add_tail(&hdev->list, &img_hash.dev_list);
970         spin_unlock(&img_hash.lock);
971
972         err = img_register_algs(hdev);
973         if (err)
974                 goto err_algs;
975         dev_dbg(dev, "Img MD5/SHA1/SHA224/SHA256 Hardware accelerator initialized\n");
976
977         return 0;
978
979 err_algs:
980         spin_lock(&img_hash.lock);
981         list_del(&hdev->list);
982         spin_unlock(&img_hash.lock);
983         dma_release_channel(hdev->dma_lch);
984 dma_err:
985         clk_disable_unprepare(hdev->sys_clk);
986 clk_err:
987         clk_disable_unprepare(hdev->hash_clk);
988 res_err:
989         tasklet_kill(&hdev->done_task);
990         tasklet_kill(&hdev->dma_task);
991
992         return err;
993 }
994
995 static int img_hash_remove(struct platform_device *pdev)
996 {
997         static struct img_hash_dev *hdev;
998
999         hdev = platform_get_drvdata(pdev);
1000         spin_lock(&img_hash.lock);
1001         list_del(&hdev->list);
1002         spin_unlock(&img_hash.lock);
1003
1004         img_unregister_algs(hdev);
1005
1006         tasklet_kill(&hdev->done_task);
1007         tasklet_kill(&hdev->dma_task);
1008
1009         dma_release_channel(hdev->dma_lch);
1010
1011         clk_disable_unprepare(hdev->hash_clk);
1012         clk_disable_unprepare(hdev->sys_clk);
1013
1014         return 0;
1015 }
1016
1017 static struct platform_driver img_hash_driver = {
1018         .probe          = img_hash_probe,
1019         .remove         = img_hash_remove,
1020         .driver         = {
1021                 .name   = "img-hash-accelerator",
1022                 .of_match_table = of_match_ptr(img_hash_match),
1023         }
1024 };
1025 module_platform_driver(img_hash_driver);
1026
1027 MODULE_LICENSE("GPL v2");
1028 MODULE_DESCRIPTION("Imgtec SHA1/224/256 & MD5 hw accelerator driver");
1029 MODULE_AUTHOR("Will Thomas.");
1030 MODULE_AUTHOR("James Hartley <james.hartley@imgtec.com>");