4 * Support for ATMEL AES HW acceleration.
6 * Copyright (c) 2012 Eukréa Electromatique - ATMEL
7 * Author: Nicolas Royer <nicolas@eukrea.com>
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as published
11 * by the Free Software Foundation.
13 * Some ideas are from omap-aes.c driver.
17 #include <linux/kernel.h>
18 #include <linux/module.h>
19 #include <linux/slab.h>
20 #include <linux/err.h>
21 #include <linux/clk.h>
23 #include <linux/hw_random.h>
24 #include <linux/platform_device.h>
26 #include <linux/device.h>
27 #include <linux/init.h>
28 #include <linux/errno.h>
29 #include <linux/interrupt.h>
30 #include <linux/irq.h>
31 #include <linux/scatterlist.h>
32 #include <linux/dma-mapping.h>
33 #include <linux/of_device.h>
34 #include <linux/delay.h>
35 #include <linux/crypto.h>
36 #include <linux/cryptohash.h>
37 #include <crypto/scatterwalk.h>
38 #include <crypto/algapi.h>
39 #include <crypto/aes.h>
40 #include <crypto/hash.h>
41 #include <crypto/internal/hash.h>
42 #include <linux/platform_data/crypto-atmel.h>
43 #include <dt-bindings/dma/at91.h>
44 #include "atmel-aes-regs.h"
46 #define CFB8_BLOCK_SIZE 1
47 #define CFB16_BLOCK_SIZE 2
48 #define CFB32_BLOCK_SIZE 4
49 #define CFB64_BLOCK_SIZE 8
52 #define AES_FLAGS_MODE_MASK 0x03ff
53 #define AES_FLAGS_ENCRYPT BIT(0)
54 #define AES_FLAGS_CBC BIT(1)
55 #define AES_FLAGS_CFB BIT(2)
56 #define AES_FLAGS_CFB8 BIT(3)
57 #define AES_FLAGS_CFB16 BIT(4)
58 #define AES_FLAGS_CFB32 BIT(5)
59 #define AES_FLAGS_CFB64 BIT(6)
60 #define AES_FLAGS_CFB128 BIT(7)
61 #define AES_FLAGS_OFB BIT(8)
62 #define AES_FLAGS_CTR BIT(9)
64 #define AES_FLAGS_INIT BIT(16)
65 #define AES_FLAGS_DMA BIT(17)
66 #define AES_FLAGS_BUSY BIT(18)
67 #define AES_FLAGS_FAST BIT(19)
69 #define ATMEL_AES_QUEUE_LENGTH 50
71 #define ATMEL_AES_DMA_THRESHOLD 16
74 struct atmel_aes_caps {
82 struct atmel_aes_ctx {
83 struct atmel_aes_dev *dd;
86 u32 key[AES_KEYSIZE_256 / sizeof(u32)];
91 struct atmel_aes_reqctx {
95 struct atmel_aes_dma {
96 struct dma_chan *chan;
97 struct dma_slave_config dma_conf;
100 struct atmel_aes_dev {
101 struct list_head list;
102 unsigned long phys_base;
103 void __iomem *io_base;
105 struct atmel_aes_ctx *ctx;
114 struct crypto_queue queue;
116 struct tasklet_struct done_task;
117 struct tasklet_struct queue_task;
119 struct ablkcipher_request *req;
122 struct scatterlist *in_sg;
123 unsigned int nb_in_sg;
125 struct scatterlist *out_sg;
126 unsigned int nb_out_sg;
135 dma_addr_t dma_addr_in;
136 struct atmel_aes_dma dma_lch_in;
140 dma_addr_t dma_addr_out;
141 struct atmel_aes_dma dma_lch_out;
143 struct atmel_aes_caps caps;
148 struct atmel_aes_drv {
149 struct list_head dev_list;
153 static struct atmel_aes_drv atmel_aes = {
154 .dev_list = LIST_HEAD_INIT(atmel_aes.dev_list),
155 .lock = __SPIN_LOCK_UNLOCKED(atmel_aes.lock),
158 static int atmel_aes_sg_length(struct ablkcipher_request *req,
159 struct scatterlist *sg)
161 unsigned int total = req->nbytes;
164 struct scatterlist *sg_list;
171 len = min(sg_list->length, total);
176 sg_list = sg_next(sg_list);
184 static int atmel_aes_sg_copy(struct scatterlist **sg, size_t *offset,
185 void *buf, size_t buflen, size_t total, int out)
187 unsigned int count, off = 0;
189 while (buflen && total) {
190 count = min((*sg)->length - *offset, total);
191 count = min(count, buflen);
196 scatterwalk_map_and_copy(buf + off, *sg, *offset, count, out);
203 if (*offset == (*sg)->length) {
215 static inline u32 atmel_aes_read(struct atmel_aes_dev *dd, u32 offset)
217 return readl_relaxed(dd->io_base + offset);
220 static inline void atmel_aes_write(struct atmel_aes_dev *dd,
221 u32 offset, u32 value)
223 writel_relaxed(value, dd->io_base + offset);
226 static void atmel_aes_read_n(struct atmel_aes_dev *dd, u32 offset,
227 u32 *value, int count)
229 for (; count--; value++, offset += 4)
230 *value = atmel_aes_read(dd, offset);
233 static void atmel_aes_write_n(struct atmel_aes_dev *dd, u32 offset,
234 u32 *value, int count)
236 for (; count--; value++, offset += 4)
237 atmel_aes_write(dd, offset, *value);
240 static struct atmel_aes_dev *atmel_aes_find_dev(struct atmel_aes_ctx *ctx)
242 struct atmel_aes_dev *aes_dd = NULL;
243 struct atmel_aes_dev *tmp;
245 spin_lock_bh(&atmel_aes.lock);
247 list_for_each_entry(tmp, &atmel_aes.dev_list, list) {
256 spin_unlock_bh(&atmel_aes.lock);
261 static int atmel_aes_hw_init(struct atmel_aes_dev *dd)
265 err = clk_prepare_enable(dd->iclk);
269 if (!(dd->flags & AES_FLAGS_INIT)) {
270 atmel_aes_write(dd, AES_CR, AES_CR_SWRST);
271 atmel_aes_write(dd, AES_MR, 0xE << AES_MR_CKEY_OFFSET);
272 dd->flags |= AES_FLAGS_INIT;
279 static inline unsigned int atmel_aes_get_version(struct atmel_aes_dev *dd)
281 return atmel_aes_read(dd, AES_HW_VERSION) & 0x00000fff;
284 static void atmel_aes_hw_version_init(struct atmel_aes_dev *dd)
286 atmel_aes_hw_init(dd);
288 dd->hw_version = atmel_aes_get_version(dd);
291 "version: 0x%x\n", dd->hw_version);
293 clk_disable_unprepare(dd->iclk);
296 static void atmel_aes_finish_req(struct atmel_aes_dev *dd, int err)
298 struct ablkcipher_request *req = dd->req;
300 clk_disable_unprepare(dd->iclk);
301 dd->flags &= ~AES_FLAGS_BUSY;
303 req->base.complete(&req->base, err);
306 static void atmel_aes_dma_callback(void *data)
308 struct atmel_aes_dev *dd = data;
310 /* dma_lch_out - completed */
311 tasklet_schedule(&dd->done_task);
314 static int atmel_aes_crypt_dma(struct atmel_aes_dev *dd,
315 dma_addr_t dma_addr_in, dma_addr_t dma_addr_out, int length)
317 struct scatterlist sg[2];
318 struct dma_async_tx_descriptor *in_desc, *out_desc;
320 dd->dma_size = length;
322 dma_sync_single_for_device(dd->dev, dma_addr_in, length,
324 dma_sync_single_for_device(dd->dev, dma_addr_out, length,
327 if (dd->flags & AES_FLAGS_CFB8) {
328 dd->dma_lch_in.dma_conf.dst_addr_width =
329 DMA_SLAVE_BUSWIDTH_1_BYTE;
330 dd->dma_lch_out.dma_conf.src_addr_width =
331 DMA_SLAVE_BUSWIDTH_1_BYTE;
332 } else if (dd->flags & AES_FLAGS_CFB16) {
333 dd->dma_lch_in.dma_conf.dst_addr_width =
334 DMA_SLAVE_BUSWIDTH_2_BYTES;
335 dd->dma_lch_out.dma_conf.src_addr_width =
336 DMA_SLAVE_BUSWIDTH_2_BYTES;
338 dd->dma_lch_in.dma_conf.dst_addr_width =
339 DMA_SLAVE_BUSWIDTH_4_BYTES;
340 dd->dma_lch_out.dma_conf.src_addr_width =
341 DMA_SLAVE_BUSWIDTH_4_BYTES;
344 if (dd->flags & (AES_FLAGS_CFB8 | AES_FLAGS_CFB16 |
345 AES_FLAGS_CFB32 | AES_FLAGS_CFB64)) {
346 dd->dma_lch_in.dma_conf.src_maxburst = 1;
347 dd->dma_lch_in.dma_conf.dst_maxburst = 1;
348 dd->dma_lch_out.dma_conf.src_maxburst = 1;
349 dd->dma_lch_out.dma_conf.dst_maxburst = 1;
351 dd->dma_lch_in.dma_conf.src_maxburst = dd->caps.max_burst_size;
352 dd->dma_lch_in.dma_conf.dst_maxburst = dd->caps.max_burst_size;
353 dd->dma_lch_out.dma_conf.src_maxburst = dd->caps.max_burst_size;
354 dd->dma_lch_out.dma_conf.dst_maxburst = dd->caps.max_burst_size;
357 dmaengine_slave_config(dd->dma_lch_in.chan, &dd->dma_lch_in.dma_conf);
358 dmaengine_slave_config(dd->dma_lch_out.chan, &dd->dma_lch_out.dma_conf);
360 dd->flags |= AES_FLAGS_DMA;
362 sg_init_table(&sg[0], 1);
363 sg_dma_address(&sg[0]) = dma_addr_in;
364 sg_dma_len(&sg[0]) = length;
366 sg_init_table(&sg[1], 1);
367 sg_dma_address(&sg[1]) = dma_addr_out;
368 sg_dma_len(&sg[1]) = length;
370 in_desc = dmaengine_prep_slave_sg(dd->dma_lch_in.chan, &sg[0],
372 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
376 out_desc = dmaengine_prep_slave_sg(dd->dma_lch_out.chan, &sg[1],
378 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
382 out_desc->callback = atmel_aes_dma_callback;
383 out_desc->callback_param = dd;
385 dmaengine_submit(out_desc);
386 dma_async_issue_pending(dd->dma_lch_out.chan);
388 dmaengine_submit(in_desc);
389 dma_async_issue_pending(dd->dma_lch_in.chan);
394 static int atmel_aes_crypt_cpu_start(struct atmel_aes_dev *dd)
396 dd->flags &= ~AES_FLAGS_DMA;
398 dma_sync_single_for_cpu(dd->dev, dd->dma_addr_in,
399 dd->dma_size, DMA_TO_DEVICE);
400 dma_sync_single_for_cpu(dd->dev, dd->dma_addr_out,
401 dd->dma_size, DMA_FROM_DEVICE);
403 /* use cache buffers */
404 dd->nb_in_sg = atmel_aes_sg_length(dd->req, dd->in_sg);
408 dd->nb_out_sg = atmel_aes_sg_length(dd->req, dd->out_sg);
412 dd->bufcnt = sg_copy_to_buffer(dd->in_sg, dd->nb_in_sg,
413 dd->buf_in, dd->total);
418 dd->total -= dd->bufcnt;
420 atmel_aes_write(dd, AES_IER, AES_INT_DATARDY);
421 atmel_aes_write_n(dd, AES_IDATAR(0), (u32 *) dd->buf_in,
427 static int atmel_aes_crypt_dma_start(struct atmel_aes_dev *dd)
429 int err, fast = 0, in, out;
431 dma_addr_t addr_in, addr_out;
433 if ((!dd->in_offset) && (!dd->out_offset)) {
434 /* check for alignment */
435 in = IS_ALIGNED((u32)dd->in_sg->offset, sizeof(u32)) &&
436 IS_ALIGNED(dd->in_sg->length, dd->ctx->block_size);
437 out = IS_ALIGNED((u32)dd->out_sg->offset, sizeof(u32)) &&
438 IS_ALIGNED(dd->out_sg->length, dd->ctx->block_size);
441 if (sg_dma_len(dd->in_sg) != sg_dma_len(dd->out_sg))
447 count = min(dd->total, sg_dma_len(dd->in_sg));
448 count = min(count, sg_dma_len(dd->out_sg));
450 err = dma_map_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE);
452 dev_err(dd->dev, "dma_map_sg() error\n");
456 err = dma_map_sg(dd->dev, dd->out_sg, 1,
459 dev_err(dd->dev, "dma_map_sg() error\n");
460 dma_unmap_sg(dd->dev, dd->in_sg, 1,
465 addr_in = sg_dma_address(dd->in_sg);
466 addr_out = sg_dma_address(dd->out_sg);
468 dd->flags |= AES_FLAGS_FAST;
471 dma_sync_single_for_cpu(dd->dev, dd->dma_addr_in,
472 dd->dma_size, DMA_TO_DEVICE);
474 /* use cache buffers */
475 count = atmel_aes_sg_copy(&dd->in_sg, &dd->in_offset,
476 dd->buf_in, dd->buflen, dd->total, 0);
478 addr_in = dd->dma_addr_in;
479 addr_out = dd->dma_addr_out;
481 dd->flags &= ~AES_FLAGS_FAST;
486 err = atmel_aes_crypt_dma(dd, addr_in, addr_out, count);
488 if (err && (dd->flags & AES_FLAGS_FAST)) {
489 dma_unmap_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE);
490 dma_unmap_sg(dd->dev, dd->out_sg, 1, DMA_TO_DEVICE);
496 static int atmel_aes_write_ctrl(struct atmel_aes_dev *dd)
499 u32 valcr = 0, valmr = 0;
501 err = atmel_aes_hw_init(dd);
506 /* MR register must be set before IV registers */
507 if (dd->ctx->keylen == AES_KEYSIZE_128)
508 valmr |= AES_MR_KEYSIZE_128;
509 else if (dd->ctx->keylen == AES_KEYSIZE_192)
510 valmr |= AES_MR_KEYSIZE_192;
512 valmr |= AES_MR_KEYSIZE_256;
514 if (dd->flags & AES_FLAGS_CBC) {
515 valmr |= AES_MR_OPMOD_CBC;
516 } else if (dd->flags & AES_FLAGS_CFB) {
517 valmr |= AES_MR_OPMOD_CFB;
518 if (dd->flags & AES_FLAGS_CFB8)
519 valmr |= AES_MR_CFBS_8b;
520 else if (dd->flags & AES_FLAGS_CFB16)
521 valmr |= AES_MR_CFBS_16b;
522 else if (dd->flags & AES_FLAGS_CFB32)
523 valmr |= AES_MR_CFBS_32b;
524 else if (dd->flags & AES_FLAGS_CFB64)
525 valmr |= AES_MR_CFBS_64b;
526 else if (dd->flags & AES_FLAGS_CFB128)
527 valmr |= AES_MR_CFBS_128b;
528 } else if (dd->flags & AES_FLAGS_OFB) {
529 valmr |= AES_MR_OPMOD_OFB;
530 } else if (dd->flags & AES_FLAGS_CTR) {
531 valmr |= AES_MR_OPMOD_CTR;
533 valmr |= AES_MR_OPMOD_ECB;
536 if (dd->flags & AES_FLAGS_ENCRYPT)
537 valmr |= AES_MR_CYPHER_ENC;
539 if (dd->total > ATMEL_AES_DMA_THRESHOLD) {
540 valmr |= AES_MR_SMOD_IDATAR0;
541 if (dd->caps.has_dualbuff)
542 valmr |= AES_MR_DUALBUFF;
544 valmr |= AES_MR_SMOD_AUTO;
547 atmel_aes_write(dd, AES_CR, valcr);
548 atmel_aes_write(dd, AES_MR, valmr);
550 atmel_aes_write_n(dd, AES_KEYWR(0), dd->ctx->key,
551 dd->ctx->keylen >> 2);
553 if (((dd->flags & AES_FLAGS_CBC) || (dd->flags & AES_FLAGS_CFB) ||
554 (dd->flags & AES_FLAGS_OFB) || (dd->flags & AES_FLAGS_CTR)) &&
556 atmel_aes_write_n(dd, AES_IVR(0), dd->req->info, 4);
562 static int atmel_aes_handle_queue(struct atmel_aes_dev *dd,
563 struct ablkcipher_request *req)
565 struct crypto_async_request *async_req, *backlog;
566 struct atmel_aes_ctx *ctx;
567 struct atmel_aes_reqctx *rctx;
571 spin_lock_irqsave(&dd->lock, flags);
573 ret = ablkcipher_enqueue_request(&dd->queue, req);
574 if (dd->flags & AES_FLAGS_BUSY) {
575 spin_unlock_irqrestore(&dd->lock, flags);
578 backlog = crypto_get_backlog(&dd->queue);
579 async_req = crypto_dequeue_request(&dd->queue);
581 dd->flags |= AES_FLAGS_BUSY;
582 spin_unlock_irqrestore(&dd->lock, flags);
588 backlog->complete(backlog, -EINPROGRESS);
590 req = ablkcipher_request_cast(async_req);
592 /* assign new request to device */
594 dd->total = req->nbytes;
596 dd->in_sg = req->src;
598 dd->out_sg = req->dst;
600 rctx = ablkcipher_request_ctx(req);
601 ctx = crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req));
602 rctx->mode &= AES_FLAGS_MODE_MASK;
603 dd->flags = (dd->flags & ~AES_FLAGS_MODE_MASK) | rctx->mode;
607 err = atmel_aes_write_ctrl(dd);
609 if (dd->total > ATMEL_AES_DMA_THRESHOLD)
610 err = atmel_aes_crypt_dma_start(dd);
612 err = atmel_aes_crypt_cpu_start(dd);
615 /* aes_task will not finish it, so do it here */
616 atmel_aes_finish_req(dd, err);
617 tasklet_schedule(&dd->queue_task);
623 static int atmel_aes_crypt_dma_stop(struct atmel_aes_dev *dd)
628 if (dd->flags & AES_FLAGS_DMA) {
630 if (dd->flags & AES_FLAGS_FAST) {
631 dma_unmap_sg(dd->dev, dd->out_sg, 1, DMA_FROM_DEVICE);
632 dma_unmap_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE);
634 dma_sync_single_for_cpu(dd->dev, dd->dma_addr_out,
635 dd->dma_size, DMA_FROM_DEVICE);
638 count = atmel_aes_sg_copy(&dd->out_sg, &dd->out_offset,
639 dd->buf_out, dd->buflen, dd->dma_size, 1);
640 if (count != dd->dma_size) {
642 pr_err("not all data converted: %u\n", count);
651 static int atmel_aes_buff_init(struct atmel_aes_dev *dd)
655 dd->buf_in = (void *)__get_free_pages(GFP_KERNEL, 0);
656 dd->buf_out = (void *)__get_free_pages(GFP_KERNEL, 0);
657 dd->buflen = PAGE_SIZE;
658 dd->buflen &= ~(AES_BLOCK_SIZE - 1);
660 if (!dd->buf_in || !dd->buf_out) {
661 dev_err(dd->dev, "unable to alloc pages.\n");
666 dd->dma_addr_in = dma_map_single(dd->dev, dd->buf_in,
667 dd->buflen, DMA_TO_DEVICE);
668 if (dma_mapping_error(dd->dev, dd->dma_addr_in)) {
669 dev_err(dd->dev, "dma %d bytes error\n", dd->buflen);
674 dd->dma_addr_out = dma_map_single(dd->dev, dd->buf_out,
675 dd->buflen, DMA_FROM_DEVICE);
676 if (dma_mapping_error(dd->dev, dd->dma_addr_out)) {
677 dev_err(dd->dev, "dma %d bytes error\n", dd->buflen);
685 dma_unmap_single(dd->dev, dd->dma_addr_in, dd->buflen,
689 free_page((unsigned long)dd->buf_out);
690 free_page((unsigned long)dd->buf_in);
692 pr_err("error: %d\n", err);
696 static void atmel_aes_buff_cleanup(struct atmel_aes_dev *dd)
698 dma_unmap_single(dd->dev, dd->dma_addr_out, dd->buflen,
700 dma_unmap_single(dd->dev, dd->dma_addr_in, dd->buflen,
702 free_page((unsigned long)dd->buf_out);
703 free_page((unsigned long)dd->buf_in);
706 static int atmel_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
708 struct atmel_aes_ctx *ctx = crypto_ablkcipher_ctx(
709 crypto_ablkcipher_reqtfm(req));
710 struct atmel_aes_reqctx *rctx = ablkcipher_request_ctx(req);
711 struct atmel_aes_dev *dd;
713 if (mode & AES_FLAGS_CFB8) {
714 if (!IS_ALIGNED(req->nbytes, CFB8_BLOCK_SIZE)) {
715 pr_err("request size is not exact amount of CFB8 blocks\n");
718 ctx->block_size = CFB8_BLOCK_SIZE;
719 } else if (mode & AES_FLAGS_CFB16) {
720 if (!IS_ALIGNED(req->nbytes, CFB16_BLOCK_SIZE)) {
721 pr_err("request size is not exact amount of CFB16 blocks\n");
724 ctx->block_size = CFB16_BLOCK_SIZE;
725 } else if (mode & AES_FLAGS_CFB32) {
726 if (!IS_ALIGNED(req->nbytes, CFB32_BLOCK_SIZE)) {
727 pr_err("request size is not exact amount of CFB32 blocks\n");
730 ctx->block_size = CFB32_BLOCK_SIZE;
731 } else if (mode & AES_FLAGS_CFB64) {
732 if (!IS_ALIGNED(req->nbytes, CFB64_BLOCK_SIZE)) {
733 pr_err("request size is not exact amount of CFB64 blocks\n");
736 ctx->block_size = CFB64_BLOCK_SIZE;
738 if (!IS_ALIGNED(req->nbytes, AES_BLOCK_SIZE)) {
739 pr_err("request size is not exact amount of AES blocks\n");
742 ctx->block_size = AES_BLOCK_SIZE;
745 dd = atmel_aes_find_dev(ctx);
751 return atmel_aes_handle_queue(dd, req);
754 static bool atmel_aes_filter(struct dma_chan *chan, void *slave)
756 struct at_dma_slave *sl = slave;
758 if (sl && sl->dma_dev == chan->device->dev) {
766 static int atmel_aes_dma_init(struct atmel_aes_dev *dd,
767 struct crypto_platform_data *pdata)
773 dma_cap_set(DMA_SLAVE, mask);
775 /* Try to grab 2 DMA channels */
776 dd->dma_lch_in.chan = dma_request_slave_channel_compat(mask,
777 atmel_aes_filter, &pdata->dma_slave->rxdata, dd->dev, "tx");
778 if (!dd->dma_lch_in.chan)
781 dd->dma_lch_in.dma_conf.direction = DMA_MEM_TO_DEV;
782 dd->dma_lch_in.dma_conf.dst_addr = dd->phys_base +
784 dd->dma_lch_in.dma_conf.src_maxburst = dd->caps.max_burst_size;
785 dd->dma_lch_in.dma_conf.src_addr_width =
786 DMA_SLAVE_BUSWIDTH_4_BYTES;
787 dd->dma_lch_in.dma_conf.dst_maxburst = dd->caps.max_burst_size;
788 dd->dma_lch_in.dma_conf.dst_addr_width =
789 DMA_SLAVE_BUSWIDTH_4_BYTES;
790 dd->dma_lch_in.dma_conf.device_fc = false;
792 dd->dma_lch_out.chan = dma_request_slave_channel_compat(mask,
793 atmel_aes_filter, &pdata->dma_slave->txdata, dd->dev, "rx");
794 if (!dd->dma_lch_out.chan)
797 dd->dma_lch_out.dma_conf.direction = DMA_DEV_TO_MEM;
798 dd->dma_lch_out.dma_conf.src_addr = dd->phys_base +
800 dd->dma_lch_out.dma_conf.src_maxburst = dd->caps.max_burst_size;
801 dd->dma_lch_out.dma_conf.src_addr_width =
802 DMA_SLAVE_BUSWIDTH_4_BYTES;
803 dd->dma_lch_out.dma_conf.dst_maxburst = dd->caps.max_burst_size;
804 dd->dma_lch_out.dma_conf.dst_addr_width =
805 DMA_SLAVE_BUSWIDTH_4_BYTES;
806 dd->dma_lch_out.dma_conf.device_fc = false;
811 dma_release_channel(dd->dma_lch_in.chan);
813 dev_warn(dd->dev, "no DMA channel available\n");
817 static void atmel_aes_dma_cleanup(struct atmel_aes_dev *dd)
819 dma_release_channel(dd->dma_lch_in.chan);
820 dma_release_channel(dd->dma_lch_out.chan);
823 static int atmel_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
826 struct atmel_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm);
828 if (keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_192 &&
829 keylen != AES_KEYSIZE_256) {
830 crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
834 memcpy(ctx->key, key, keylen);
835 ctx->keylen = keylen;
840 static int atmel_aes_ecb_encrypt(struct ablkcipher_request *req)
842 return atmel_aes_crypt(req,
846 static int atmel_aes_ecb_decrypt(struct ablkcipher_request *req)
848 return atmel_aes_crypt(req,
852 static int atmel_aes_cbc_encrypt(struct ablkcipher_request *req)
854 return atmel_aes_crypt(req,
855 AES_FLAGS_ENCRYPT | AES_FLAGS_CBC);
858 static int atmel_aes_cbc_decrypt(struct ablkcipher_request *req)
860 return atmel_aes_crypt(req,
864 static int atmel_aes_ofb_encrypt(struct ablkcipher_request *req)
866 return atmel_aes_crypt(req,
867 AES_FLAGS_ENCRYPT | AES_FLAGS_OFB);
870 static int atmel_aes_ofb_decrypt(struct ablkcipher_request *req)
872 return atmel_aes_crypt(req,
876 static int atmel_aes_cfb_encrypt(struct ablkcipher_request *req)
878 return atmel_aes_crypt(req,
879 AES_FLAGS_ENCRYPT | AES_FLAGS_CFB | AES_FLAGS_CFB128);
882 static int atmel_aes_cfb_decrypt(struct ablkcipher_request *req)
884 return atmel_aes_crypt(req,
885 AES_FLAGS_CFB | AES_FLAGS_CFB128);
888 static int atmel_aes_cfb64_encrypt(struct ablkcipher_request *req)
890 return atmel_aes_crypt(req,
891 AES_FLAGS_ENCRYPT | AES_FLAGS_CFB | AES_FLAGS_CFB64);
894 static int atmel_aes_cfb64_decrypt(struct ablkcipher_request *req)
896 return atmel_aes_crypt(req,
897 AES_FLAGS_CFB | AES_FLAGS_CFB64);
900 static int atmel_aes_cfb32_encrypt(struct ablkcipher_request *req)
902 return atmel_aes_crypt(req,
903 AES_FLAGS_ENCRYPT | AES_FLAGS_CFB | AES_FLAGS_CFB32);
906 static int atmel_aes_cfb32_decrypt(struct ablkcipher_request *req)
908 return atmel_aes_crypt(req,
909 AES_FLAGS_CFB | AES_FLAGS_CFB32);
912 static int atmel_aes_cfb16_encrypt(struct ablkcipher_request *req)
914 return atmel_aes_crypt(req,
915 AES_FLAGS_ENCRYPT | AES_FLAGS_CFB | AES_FLAGS_CFB16);
918 static int atmel_aes_cfb16_decrypt(struct ablkcipher_request *req)
920 return atmel_aes_crypt(req,
921 AES_FLAGS_CFB | AES_FLAGS_CFB16);
924 static int atmel_aes_cfb8_encrypt(struct ablkcipher_request *req)
926 return atmel_aes_crypt(req,
927 AES_FLAGS_ENCRYPT | AES_FLAGS_CFB | AES_FLAGS_CFB8);
930 static int atmel_aes_cfb8_decrypt(struct ablkcipher_request *req)
932 return atmel_aes_crypt(req,
933 AES_FLAGS_CFB | AES_FLAGS_CFB8);
936 static int atmel_aes_ctr_encrypt(struct ablkcipher_request *req)
938 return atmel_aes_crypt(req,
939 AES_FLAGS_ENCRYPT | AES_FLAGS_CTR);
942 static int atmel_aes_ctr_decrypt(struct ablkcipher_request *req)
944 return atmel_aes_crypt(req,
948 static int atmel_aes_cra_init(struct crypto_tfm *tfm)
950 tfm->crt_ablkcipher.reqsize = sizeof(struct atmel_aes_reqctx);
955 static void atmel_aes_cra_exit(struct crypto_tfm *tfm)
959 static struct crypto_alg aes_algs[] = {
961 .cra_name = "ecb(aes)",
962 .cra_driver_name = "atmel-ecb-aes",
964 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
965 .cra_blocksize = AES_BLOCK_SIZE,
966 .cra_ctxsize = sizeof(struct atmel_aes_ctx),
967 .cra_alignmask = 0xf,
968 .cra_type = &crypto_ablkcipher_type,
969 .cra_module = THIS_MODULE,
970 .cra_init = atmel_aes_cra_init,
971 .cra_exit = atmel_aes_cra_exit,
972 .cra_u.ablkcipher = {
973 .min_keysize = AES_MIN_KEY_SIZE,
974 .max_keysize = AES_MAX_KEY_SIZE,
975 .setkey = atmel_aes_setkey,
976 .encrypt = atmel_aes_ecb_encrypt,
977 .decrypt = atmel_aes_ecb_decrypt,
981 .cra_name = "cbc(aes)",
982 .cra_driver_name = "atmel-cbc-aes",
984 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
985 .cra_blocksize = AES_BLOCK_SIZE,
986 .cra_ctxsize = sizeof(struct atmel_aes_ctx),
987 .cra_alignmask = 0xf,
988 .cra_type = &crypto_ablkcipher_type,
989 .cra_module = THIS_MODULE,
990 .cra_init = atmel_aes_cra_init,
991 .cra_exit = atmel_aes_cra_exit,
992 .cra_u.ablkcipher = {
993 .min_keysize = AES_MIN_KEY_SIZE,
994 .max_keysize = AES_MAX_KEY_SIZE,
995 .ivsize = AES_BLOCK_SIZE,
996 .setkey = atmel_aes_setkey,
997 .encrypt = atmel_aes_cbc_encrypt,
998 .decrypt = atmel_aes_cbc_decrypt,
1002 .cra_name = "ofb(aes)",
1003 .cra_driver_name = "atmel-ofb-aes",
1004 .cra_priority = 100,
1005 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1006 .cra_blocksize = AES_BLOCK_SIZE,
1007 .cra_ctxsize = sizeof(struct atmel_aes_ctx),
1008 .cra_alignmask = 0xf,
1009 .cra_type = &crypto_ablkcipher_type,
1010 .cra_module = THIS_MODULE,
1011 .cra_init = atmel_aes_cra_init,
1012 .cra_exit = atmel_aes_cra_exit,
1013 .cra_u.ablkcipher = {
1014 .min_keysize = AES_MIN_KEY_SIZE,
1015 .max_keysize = AES_MAX_KEY_SIZE,
1016 .ivsize = AES_BLOCK_SIZE,
1017 .setkey = atmel_aes_setkey,
1018 .encrypt = atmel_aes_ofb_encrypt,
1019 .decrypt = atmel_aes_ofb_decrypt,
1023 .cra_name = "cfb(aes)",
1024 .cra_driver_name = "atmel-cfb-aes",
1025 .cra_priority = 100,
1026 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1027 .cra_blocksize = AES_BLOCK_SIZE,
1028 .cra_ctxsize = sizeof(struct atmel_aes_ctx),
1029 .cra_alignmask = 0xf,
1030 .cra_type = &crypto_ablkcipher_type,
1031 .cra_module = THIS_MODULE,
1032 .cra_init = atmel_aes_cra_init,
1033 .cra_exit = atmel_aes_cra_exit,
1034 .cra_u.ablkcipher = {
1035 .min_keysize = AES_MIN_KEY_SIZE,
1036 .max_keysize = AES_MAX_KEY_SIZE,
1037 .ivsize = AES_BLOCK_SIZE,
1038 .setkey = atmel_aes_setkey,
1039 .encrypt = atmel_aes_cfb_encrypt,
1040 .decrypt = atmel_aes_cfb_decrypt,
1044 .cra_name = "cfb32(aes)",
1045 .cra_driver_name = "atmel-cfb32-aes",
1046 .cra_priority = 100,
1047 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1048 .cra_blocksize = CFB32_BLOCK_SIZE,
1049 .cra_ctxsize = sizeof(struct atmel_aes_ctx),
1050 .cra_alignmask = 0x3,
1051 .cra_type = &crypto_ablkcipher_type,
1052 .cra_module = THIS_MODULE,
1053 .cra_init = atmel_aes_cra_init,
1054 .cra_exit = atmel_aes_cra_exit,
1055 .cra_u.ablkcipher = {
1056 .min_keysize = AES_MIN_KEY_SIZE,
1057 .max_keysize = AES_MAX_KEY_SIZE,
1058 .ivsize = AES_BLOCK_SIZE,
1059 .setkey = atmel_aes_setkey,
1060 .encrypt = atmel_aes_cfb32_encrypt,
1061 .decrypt = atmel_aes_cfb32_decrypt,
1065 .cra_name = "cfb16(aes)",
1066 .cra_driver_name = "atmel-cfb16-aes",
1067 .cra_priority = 100,
1068 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1069 .cra_blocksize = CFB16_BLOCK_SIZE,
1070 .cra_ctxsize = sizeof(struct atmel_aes_ctx),
1071 .cra_alignmask = 0x1,
1072 .cra_type = &crypto_ablkcipher_type,
1073 .cra_module = THIS_MODULE,
1074 .cra_init = atmel_aes_cra_init,
1075 .cra_exit = atmel_aes_cra_exit,
1076 .cra_u.ablkcipher = {
1077 .min_keysize = AES_MIN_KEY_SIZE,
1078 .max_keysize = AES_MAX_KEY_SIZE,
1079 .ivsize = AES_BLOCK_SIZE,
1080 .setkey = atmel_aes_setkey,
1081 .encrypt = atmel_aes_cfb16_encrypt,
1082 .decrypt = atmel_aes_cfb16_decrypt,
1086 .cra_name = "cfb8(aes)",
1087 .cra_driver_name = "atmel-cfb8-aes",
1088 .cra_priority = 100,
1089 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1090 .cra_blocksize = CFB8_BLOCK_SIZE,
1091 .cra_ctxsize = sizeof(struct atmel_aes_ctx),
1092 .cra_alignmask = 0x0,
1093 .cra_type = &crypto_ablkcipher_type,
1094 .cra_module = THIS_MODULE,
1095 .cra_init = atmel_aes_cra_init,
1096 .cra_exit = atmel_aes_cra_exit,
1097 .cra_u.ablkcipher = {
1098 .min_keysize = AES_MIN_KEY_SIZE,
1099 .max_keysize = AES_MAX_KEY_SIZE,
1100 .ivsize = AES_BLOCK_SIZE,
1101 .setkey = atmel_aes_setkey,
1102 .encrypt = atmel_aes_cfb8_encrypt,
1103 .decrypt = atmel_aes_cfb8_decrypt,
1107 .cra_name = "ctr(aes)",
1108 .cra_driver_name = "atmel-ctr-aes",
1109 .cra_priority = 100,
1110 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1111 .cra_blocksize = AES_BLOCK_SIZE,
1112 .cra_ctxsize = sizeof(struct atmel_aes_ctx),
1113 .cra_alignmask = 0xf,
1114 .cra_type = &crypto_ablkcipher_type,
1115 .cra_module = THIS_MODULE,
1116 .cra_init = atmel_aes_cra_init,
1117 .cra_exit = atmel_aes_cra_exit,
1118 .cra_u.ablkcipher = {
1119 .min_keysize = AES_MIN_KEY_SIZE,
1120 .max_keysize = AES_MAX_KEY_SIZE,
1121 .ivsize = AES_BLOCK_SIZE,
1122 .setkey = atmel_aes_setkey,
1123 .encrypt = atmel_aes_ctr_encrypt,
1124 .decrypt = atmel_aes_ctr_decrypt,
1129 static struct crypto_alg aes_cfb64_alg = {
1130 .cra_name = "cfb64(aes)",
1131 .cra_driver_name = "atmel-cfb64-aes",
1132 .cra_priority = 100,
1133 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1134 .cra_blocksize = CFB64_BLOCK_SIZE,
1135 .cra_ctxsize = sizeof(struct atmel_aes_ctx),
1136 .cra_alignmask = 0x7,
1137 .cra_type = &crypto_ablkcipher_type,
1138 .cra_module = THIS_MODULE,
1139 .cra_init = atmel_aes_cra_init,
1140 .cra_exit = atmel_aes_cra_exit,
1141 .cra_u.ablkcipher = {
1142 .min_keysize = AES_MIN_KEY_SIZE,
1143 .max_keysize = AES_MAX_KEY_SIZE,
1144 .ivsize = AES_BLOCK_SIZE,
1145 .setkey = atmel_aes_setkey,
1146 .encrypt = atmel_aes_cfb64_encrypt,
1147 .decrypt = atmel_aes_cfb64_decrypt,
1151 static void atmel_aes_queue_task(unsigned long data)
1153 struct atmel_aes_dev *dd = (struct atmel_aes_dev *)data;
1155 atmel_aes_handle_queue(dd, NULL);
1158 static void atmel_aes_done_task(unsigned long data)
1160 struct atmel_aes_dev *dd = (struct atmel_aes_dev *) data;
1163 if (!(dd->flags & AES_FLAGS_DMA)) {
1164 atmel_aes_read_n(dd, AES_ODATAR(0), (u32 *) dd->buf_out,
1167 if (sg_copy_from_buffer(dd->out_sg, dd->nb_out_sg,
1168 dd->buf_out, dd->bufcnt))
1176 err = atmel_aes_crypt_dma_stop(dd);
1178 err = dd->err ? : err;
1180 if (dd->total && !err) {
1181 if (dd->flags & AES_FLAGS_FAST) {
1182 dd->in_sg = sg_next(dd->in_sg);
1183 dd->out_sg = sg_next(dd->out_sg);
1184 if (!dd->in_sg || !dd->out_sg)
1188 err = atmel_aes_crypt_dma_start(dd);
1190 return; /* DMA started. Not fininishing. */
1194 atmel_aes_finish_req(dd, err);
1195 atmel_aes_handle_queue(dd, NULL);
1198 static irqreturn_t atmel_aes_irq(int irq, void *dev_id)
1200 struct atmel_aes_dev *aes_dd = dev_id;
1203 reg = atmel_aes_read(aes_dd, AES_ISR);
1204 if (reg & atmel_aes_read(aes_dd, AES_IMR)) {
1205 atmel_aes_write(aes_dd, AES_IDR, reg);
1206 if (AES_FLAGS_BUSY & aes_dd->flags)
1207 tasklet_schedule(&aes_dd->done_task);
1209 dev_warn(aes_dd->dev, "AES interrupt when no active requests.\n");
1216 static void atmel_aes_unregister_algs(struct atmel_aes_dev *dd)
1220 for (i = 0; i < ARRAY_SIZE(aes_algs); i++)
1221 crypto_unregister_alg(&aes_algs[i]);
1222 if (dd->caps.has_cfb64)
1223 crypto_unregister_alg(&aes_cfb64_alg);
1226 static int atmel_aes_register_algs(struct atmel_aes_dev *dd)
1230 for (i = 0; i < ARRAY_SIZE(aes_algs); i++) {
1231 err = crypto_register_alg(&aes_algs[i]);
1236 if (dd->caps.has_cfb64) {
1237 err = crypto_register_alg(&aes_cfb64_alg);
1239 goto err_aes_cfb64_alg;
1245 i = ARRAY_SIZE(aes_algs);
1247 for (j = 0; j < i; j++)
1248 crypto_unregister_alg(&aes_algs[j]);
1253 static void atmel_aes_get_cap(struct atmel_aes_dev *dd)
1255 dd->caps.has_dualbuff = 0;
1256 dd->caps.has_cfb64 = 0;
1257 dd->caps.max_burst_size = 1;
1259 /* keep only major version number */
1260 switch (dd->hw_version & 0xff0) {
1262 dd->caps.has_dualbuff = 1;
1263 dd->caps.has_cfb64 = 1;
1264 dd->caps.max_burst_size = 4;
1267 dd->caps.has_dualbuff = 1;
1268 dd->caps.has_cfb64 = 1;
1269 dd->caps.max_burst_size = 4;
1275 "Unmanaged aes version, set minimum capabilities\n");
1280 #if defined(CONFIG_OF)
1281 static const struct of_device_id atmel_aes_dt_ids[] = {
1282 { .compatible = "atmel,at91sam9g46-aes" },
1285 MODULE_DEVICE_TABLE(of, atmel_aes_dt_ids);
1287 static struct crypto_platform_data *atmel_aes_of_init(struct platform_device *pdev)
1289 struct device_node *np = pdev->dev.of_node;
1290 struct crypto_platform_data *pdata;
1293 dev_err(&pdev->dev, "device node not found\n");
1294 return ERR_PTR(-EINVAL);
1297 pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
1299 dev_err(&pdev->dev, "could not allocate memory for pdata\n");
1300 return ERR_PTR(-ENOMEM);
1303 pdata->dma_slave = devm_kzalloc(&pdev->dev,
1304 sizeof(*(pdata->dma_slave)),
1306 if (!pdata->dma_slave) {
1307 dev_err(&pdev->dev, "could not allocate memory for dma_slave\n");
1308 devm_kfree(&pdev->dev, pdata);
1309 return ERR_PTR(-ENOMEM);
1315 static inline struct crypto_platform_data *atmel_aes_of_init(struct platform_device *pdev)
1317 return ERR_PTR(-EINVAL);
1321 static int atmel_aes_probe(struct platform_device *pdev)
1323 struct atmel_aes_dev *aes_dd;
1324 struct crypto_platform_data *pdata;
1325 struct device *dev = &pdev->dev;
1326 struct resource *aes_res;
1329 pdata = pdev->dev.platform_data;
1331 pdata = atmel_aes_of_init(pdev);
1332 if (IS_ERR(pdata)) {
1333 err = PTR_ERR(pdata);
1338 if (!pdata->dma_slave) {
1343 aes_dd = devm_kzalloc(&pdev->dev, sizeof(*aes_dd), GFP_KERNEL);
1344 if (aes_dd == NULL) {
1345 dev_err(dev, "unable to alloc data struct.\n");
1352 platform_set_drvdata(pdev, aes_dd);
1354 INIT_LIST_HEAD(&aes_dd->list);
1355 spin_lock_init(&aes_dd->lock);
1357 tasklet_init(&aes_dd->done_task, atmel_aes_done_task,
1358 (unsigned long)aes_dd);
1359 tasklet_init(&aes_dd->queue_task, atmel_aes_queue_task,
1360 (unsigned long)aes_dd);
1362 crypto_init_queue(&aes_dd->queue, ATMEL_AES_QUEUE_LENGTH);
1366 /* Get the base address */
1367 aes_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1369 dev_err(dev, "no MEM resource info\n");
1373 aes_dd->phys_base = aes_res->start;
1376 aes_dd->irq = platform_get_irq(pdev, 0);
1377 if (aes_dd->irq < 0) {
1378 dev_err(dev, "no IRQ resource info\n");
1383 err = devm_request_irq(&pdev->dev, aes_dd->irq, atmel_aes_irq,
1384 IRQF_SHARED, "atmel-aes", aes_dd);
1386 dev_err(dev, "unable to request aes irq.\n");
1390 /* Initializing the clock */
1391 aes_dd->iclk = devm_clk_get(&pdev->dev, "aes_clk");
1392 if (IS_ERR(aes_dd->iclk)) {
1393 dev_err(dev, "clock initialization failed.\n");
1394 err = PTR_ERR(aes_dd->iclk);
1398 aes_dd->io_base = devm_ioremap_resource(&pdev->dev, aes_res);
1399 if (!aes_dd->io_base) {
1400 dev_err(dev, "can't ioremap\n");
1405 atmel_aes_hw_version_init(aes_dd);
1407 atmel_aes_get_cap(aes_dd);
1409 err = atmel_aes_buff_init(aes_dd);
1413 err = atmel_aes_dma_init(aes_dd, pdata);
1417 spin_lock(&atmel_aes.lock);
1418 list_add_tail(&aes_dd->list, &atmel_aes.dev_list);
1419 spin_unlock(&atmel_aes.lock);
1421 err = atmel_aes_register_algs(aes_dd);
1425 dev_info(dev, "Atmel AES - Using %s, %s for DMA transfers\n",
1426 dma_chan_name(aes_dd->dma_lch_in.chan),
1427 dma_chan_name(aes_dd->dma_lch_out.chan));
1432 spin_lock(&atmel_aes.lock);
1433 list_del(&aes_dd->list);
1434 spin_unlock(&atmel_aes.lock);
1435 atmel_aes_dma_cleanup(aes_dd);
1437 atmel_aes_buff_cleanup(aes_dd);
1440 tasklet_kill(&aes_dd->done_task);
1441 tasklet_kill(&aes_dd->queue_task);
1443 dev_err(dev, "initialization failed.\n");
1448 static int atmel_aes_remove(struct platform_device *pdev)
1450 static struct atmel_aes_dev *aes_dd;
1452 aes_dd = platform_get_drvdata(pdev);
1455 spin_lock(&atmel_aes.lock);
1456 list_del(&aes_dd->list);
1457 spin_unlock(&atmel_aes.lock);
1459 atmel_aes_unregister_algs(aes_dd);
1461 tasklet_kill(&aes_dd->done_task);
1462 tasklet_kill(&aes_dd->queue_task);
1464 atmel_aes_dma_cleanup(aes_dd);
1469 static struct platform_driver atmel_aes_driver = {
1470 .probe = atmel_aes_probe,
1471 .remove = atmel_aes_remove,
1473 .name = "atmel_aes",
1474 .of_match_table = of_match_ptr(atmel_aes_dt_ids),
1478 module_platform_driver(atmel_aes_driver);
1480 MODULE_DESCRIPTION("Atmel AES hw acceleration support.");
1481 MODULE_LICENSE("GPL v2");
1482 MODULE_AUTHOR("Nicolas Royer - Eukréa Electromatique");