2 * Support for Marvell's crypto engine which can be found on some Orion5X
5 * Author: Sebastian Andrzej Siewior < sebastian at breakpoint dot cc >
9 #include <crypto/aes.h>
10 #include <crypto/algapi.h>
11 #include <linux/crypto.h>
12 #include <linux/interrupt.h>
14 #include <linux/kthread.h>
15 #include <linux/platform_device.h>
16 #include <linux/scatterlist.h>
17 #include <linux/slab.h>
18 #include <linux/module.h>
19 #include <linux/clk.h>
20 #include <crypto/internal/hash.h>
21 #include <crypto/sha.h>
25 #define MV_CESA "MV-CESA:"
26 #define MAX_HW_HASH_SIZE 0xFFFF
27 #define MV_CESA_EXPIRE 500 /* msec */
31 * /---------------------------------------\
32 * | | request complete
34 * IDLE -> new request -> BUSY -> done -> DEQUEUE
36 * | | more scatter entries
46 * struct req_progress - used for every crypt request
47 * @src_sg_it: sg iterator for src
48 * @dst_sg_it: sg iterator for dst
49 * @sg_src_left: bytes left in src to process (scatter list)
50 * @src_start: offset to add to src start position (scatter list)
51 * @crypt_len: length of current hw crypt/hash process
52 * @hw_nbytes: total bytes to process in hw for this request
53 * @copy_back: whether to copy data back (crypt) or not (hash)
54 * @sg_dst_left: bytes left dst to process in this scatter list
55 * @dst_start: offset to add to dst start position (scatter list)
56 * @hw_processed_bytes: number of bytes processed by hw (request).
58 * sg helper are used to iterate over the scatterlist. Since the size of the
59 * SRAM may be less than the scatter size, this struct struct is used to keep
60 * track of progress within current scatterlist.
63 struct sg_mapping_iter src_sg_it;
64 struct sg_mapping_iter dst_sg_it;
65 void (*complete) (void);
66 void (*process) (int is_first);
77 int hw_processed_bytes;
85 struct task_struct *queue_th;
87 /* the lock protects queue and eng_st */
89 struct crypto_queue queue;
90 enum engine_status eng_st;
91 struct timer_list completion_timer;
92 struct crypto_async_request *cur_req;
93 struct req_progress p;
100 static struct crypto_priv *cpg;
103 u8 aes_enc_key[AES_KEY_LEN];
106 u32 need_calc_aes_dkey;
124 struct mv_tfm_hash_ctx {
125 struct crypto_shash *fallback;
126 struct crypto_shash *base_hash;
127 u32 ivs[2 * SHA1_DIGEST_SIZE / 4];
132 struct mv_req_hash_ctx {
134 u32 state[SHA1_DIGEST_SIZE / 4];
135 u8 buffer[SHA1_BLOCK_SIZE];
136 int first_hash; /* marks that we don't have previous state */
137 int last_chunk; /* marks that this is the 'final' request */
138 int extra_bytes; /* unprocessed bytes in buffer */
143 static void mv_completion_timer_callback(unsigned long unused)
145 int active = readl(cpg->reg + SEC_ACCEL_CMD) & SEC_CMD_EN_SEC_ACCL0;
147 printk(KERN_ERR MV_CESA
148 "completion timer expired (CESA %sactive), cleaning up.\n",
151 del_timer(&cpg->completion_timer);
152 writel(SEC_CMD_DISABLE_SEC, cpg->reg + SEC_ACCEL_CMD);
153 while(readl(cpg->reg + SEC_ACCEL_CMD) & SEC_CMD_DISABLE_SEC)
154 printk(KERN_INFO MV_CESA "%s: waiting for engine finishing\n", __func__);
155 cpg->eng_st = ENGINE_W_DEQUEUE;
156 wake_up_process(cpg->queue_th);
159 static void mv_setup_timer(void)
161 setup_timer(&cpg->completion_timer, &mv_completion_timer_callback, 0);
162 mod_timer(&cpg->completion_timer,
163 jiffies + msecs_to_jiffies(MV_CESA_EXPIRE));
166 static void compute_aes_dec_key(struct mv_ctx *ctx)
168 struct crypto_aes_ctx gen_aes_key;
171 if (!ctx->need_calc_aes_dkey)
174 crypto_aes_expand_key(&gen_aes_key, ctx->aes_enc_key, ctx->key_len);
176 key_pos = ctx->key_len + 24;
177 memcpy(ctx->aes_dec_key, &gen_aes_key.key_enc[key_pos], 4 * 4);
178 switch (ctx->key_len) {
179 case AES_KEYSIZE_256:
182 case AES_KEYSIZE_192:
184 memcpy(&ctx->aes_dec_key[4], &gen_aes_key.key_enc[key_pos],
188 ctx->need_calc_aes_dkey = 0;
191 static int mv_setkey_aes(struct crypto_ablkcipher *cipher, const u8 *key,
194 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
195 struct mv_ctx *ctx = crypto_tfm_ctx(tfm);
198 case AES_KEYSIZE_128:
199 case AES_KEYSIZE_192:
200 case AES_KEYSIZE_256:
203 crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
207 ctx->need_calc_aes_dkey = 1;
209 memcpy(ctx->aes_enc_key, key, AES_KEY_LEN);
213 static void copy_src_to_buf(struct req_progress *p, char *dbuf, int len)
220 if (!p->sg_src_left) {
221 ret = sg_miter_next(&p->src_sg_it);
223 p->sg_src_left = p->src_sg_it.length;
227 sbuf = p->src_sg_it.addr + p->src_start;
229 copy_len = min(p->sg_src_left, len);
230 memcpy(dbuf, sbuf, copy_len);
232 p->src_start += copy_len;
233 p->sg_src_left -= copy_len;
240 static void setup_data_in(void)
242 struct req_progress *p = &cpg->p;
244 min(p->hw_nbytes - p->hw_processed_bytes, cpg->max_req_size);
245 copy_src_to_buf(p, cpg->sram + SRAM_DATA_IN_START + p->crypt_len,
246 data_in_sram - p->crypt_len);
247 p->crypt_len = data_in_sram;
250 static void mv_process_current_q(int first_block)
252 struct ablkcipher_request *req = ablkcipher_request_cast(cpg->cur_req);
253 struct mv_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
254 struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
255 struct sec_accel_config op;
257 switch (req_ctx->op) {
259 op.config = CFG_OP_CRYPT_ONLY | CFG_ENCM_AES | CFG_ENC_MODE_ECB;
263 op.config = CFG_OP_CRYPT_ONLY | CFG_ENCM_AES | CFG_ENC_MODE_CBC;
264 op.enc_iv = ENC_IV_POINT(SRAM_DATA_IV) |
265 ENC_IV_BUF_POINT(SRAM_DATA_IV_BUF);
267 memcpy(cpg->sram + SRAM_DATA_IV, req->info, 16);
270 if (req_ctx->decrypt) {
271 op.config |= CFG_DIR_DEC;
272 memcpy(cpg->sram + SRAM_DATA_KEY_P, ctx->aes_dec_key,
275 op.config |= CFG_DIR_ENC;
276 memcpy(cpg->sram + SRAM_DATA_KEY_P, ctx->aes_enc_key,
280 switch (ctx->key_len) {
281 case AES_KEYSIZE_128:
282 op.config |= CFG_AES_LEN_128;
284 case AES_KEYSIZE_192:
285 op.config |= CFG_AES_LEN_192;
287 case AES_KEYSIZE_256:
288 op.config |= CFG_AES_LEN_256;
291 op.enc_p = ENC_P_SRC(SRAM_DATA_IN_START) |
292 ENC_P_DST(SRAM_DATA_OUT_START);
293 op.enc_key_p = SRAM_DATA_KEY_P;
296 op.enc_len = cpg->p.crypt_len;
297 memcpy(cpg->sram + SRAM_CONFIG, &op,
298 sizeof(struct sec_accel_config));
302 writel(SEC_CMD_EN_SEC_ACCL0, cpg->reg + SEC_ACCEL_CMD);
305 static void mv_crypto_algo_completion(void)
307 struct ablkcipher_request *req = ablkcipher_request_cast(cpg->cur_req);
308 struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
310 sg_miter_stop(&cpg->p.src_sg_it);
311 sg_miter_stop(&cpg->p.dst_sg_it);
313 if (req_ctx->op != COP_AES_CBC)
316 memcpy(req->info, cpg->sram + SRAM_DATA_IV_BUF, 16);
319 static void mv_process_hash_current(int first_block)
321 struct ahash_request *req = ahash_request_cast(cpg->cur_req);
322 const struct mv_tfm_hash_ctx *tfm_ctx = crypto_tfm_ctx(req->base.tfm);
323 struct mv_req_hash_ctx *req_ctx = ahash_request_ctx(req);
324 struct req_progress *p = &cpg->p;
325 struct sec_accel_config op = { 0 };
328 switch (req_ctx->op) {
331 op.config = CFG_OP_MAC_ONLY | CFG_MACM_SHA1;
334 op.config = CFG_OP_MAC_ONLY | CFG_MACM_HMAC_SHA1;
335 memcpy(cpg->sram + SRAM_HMAC_IV_IN,
336 tfm_ctx->ivs, sizeof(tfm_ctx->ivs));
341 MAC_SRC_DATA_P(SRAM_DATA_IN_START) | MAC_SRC_TOTAL_LEN((u32)
348 MAC_DIGEST_P(SRAM_DIGEST_BUF) | MAC_FRAG_LEN(p->crypt_len);
350 MAC_INNER_IV_P(SRAM_HMAC_IV_IN) |
351 MAC_OUTER_IV_P(SRAM_HMAC_IV_OUT);
353 is_last = req_ctx->last_chunk
354 && (p->hw_processed_bytes + p->crypt_len >= p->hw_nbytes)
355 && (req_ctx->count <= MAX_HW_HASH_SIZE);
356 if (req_ctx->first_hash) {
358 op.config |= CFG_NOT_FRAG;
360 op.config |= CFG_FIRST_FRAG;
362 req_ctx->first_hash = 0;
365 op.config |= CFG_LAST_FRAG;
367 op.config |= CFG_MID_FRAG;
370 writel(req_ctx->state[0], cpg->reg + DIGEST_INITIAL_VAL_A);
371 writel(req_ctx->state[1], cpg->reg + DIGEST_INITIAL_VAL_B);
372 writel(req_ctx->state[2], cpg->reg + DIGEST_INITIAL_VAL_C);
373 writel(req_ctx->state[3], cpg->reg + DIGEST_INITIAL_VAL_D);
374 writel(req_ctx->state[4], cpg->reg + DIGEST_INITIAL_VAL_E);
378 memcpy(cpg->sram + SRAM_CONFIG, &op, sizeof(struct sec_accel_config));
382 writel(SEC_CMD_EN_SEC_ACCL0, cpg->reg + SEC_ACCEL_CMD);
385 static inline int mv_hash_import_sha1_ctx(const struct mv_req_hash_ctx *ctx,
386 struct shash_desc *desc)
389 struct sha1_state shash_state;
391 shash_state.count = ctx->count + ctx->count_add;
392 for (i = 0; i < 5; i++)
393 shash_state.state[i] = ctx->state[i];
394 memcpy(shash_state.buffer, ctx->buffer, sizeof(shash_state.buffer));
395 return crypto_shash_import(desc, &shash_state);
398 static int mv_hash_final_fallback(struct ahash_request *req)
400 const struct mv_tfm_hash_ctx *tfm_ctx = crypto_tfm_ctx(req->base.tfm);
401 struct mv_req_hash_ctx *req_ctx = ahash_request_ctx(req);
403 struct shash_desc shash;
404 char ctx[crypto_shash_descsize(tfm_ctx->fallback)];
408 desc.shash.tfm = tfm_ctx->fallback;
409 desc.shash.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
410 if (unlikely(req_ctx->first_hash)) {
411 crypto_shash_init(&desc.shash);
412 crypto_shash_update(&desc.shash, req_ctx->buffer,
413 req_ctx->extra_bytes);
415 /* only SHA1 for now....
417 rc = mv_hash_import_sha1_ctx(req_ctx, &desc.shash);
421 rc = crypto_shash_final(&desc.shash, req->result);
426 static void mv_save_digest_state(struct mv_req_hash_ctx *ctx)
428 ctx->state[0] = readl(cpg->reg + DIGEST_INITIAL_VAL_A);
429 ctx->state[1] = readl(cpg->reg + DIGEST_INITIAL_VAL_B);
430 ctx->state[2] = readl(cpg->reg + DIGEST_INITIAL_VAL_C);
431 ctx->state[3] = readl(cpg->reg + DIGEST_INITIAL_VAL_D);
432 ctx->state[4] = readl(cpg->reg + DIGEST_INITIAL_VAL_E);
435 static void mv_hash_algo_completion(void)
437 struct ahash_request *req = ahash_request_cast(cpg->cur_req);
438 struct mv_req_hash_ctx *ctx = ahash_request_ctx(req);
440 if (ctx->extra_bytes)
441 copy_src_to_buf(&cpg->p, ctx->buffer, ctx->extra_bytes);
442 sg_miter_stop(&cpg->p.src_sg_it);
444 if (likely(ctx->last_chunk)) {
445 if (likely(ctx->count <= MAX_HW_HASH_SIZE)) {
446 memcpy(req->result, cpg->sram + SRAM_DIGEST_BUF,
447 crypto_ahash_digestsize(crypto_ahash_reqtfm
450 mv_save_digest_state(ctx);
451 mv_hash_final_fallback(req);
454 mv_save_digest_state(ctx);
458 static void dequeue_complete_req(void)
460 struct crypto_async_request *req = cpg->cur_req;
463 cpg->p.hw_processed_bytes += cpg->p.crypt_len;
464 if (cpg->p.copy_back) {
465 int need_copy_len = cpg->p.crypt_len;
470 if (!cpg->p.sg_dst_left) {
471 ret = sg_miter_next(&cpg->p.dst_sg_it);
473 cpg->p.sg_dst_left = cpg->p.dst_sg_it.length;
474 cpg->p.dst_start = 0;
477 buf = cpg->p.dst_sg_it.addr;
478 buf += cpg->p.dst_start;
480 dst_copy = min(need_copy_len, cpg->p.sg_dst_left);
483 cpg->sram + SRAM_DATA_OUT_START + sram_offset,
485 sram_offset += dst_copy;
486 cpg->p.sg_dst_left -= dst_copy;
487 need_copy_len -= dst_copy;
488 cpg->p.dst_start += dst_copy;
489 } while (need_copy_len > 0);
492 cpg->p.crypt_len = 0;
494 BUG_ON(cpg->eng_st != ENGINE_W_DEQUEUE);
495 if (cpg->p.hw_processed_bytes < cpg->p.hw_nbytes) {
496 /* process next scatter list entry */
497 cpg->eng_st = ENGINE_BUSY;
501 cpg->eng_st = ENGINE_IDLE;
503 req->complete(req, 0);
508 static int count_sgs(struct scatterlist *sl, unsigned int total_bytes)
514 cur_len = sl[i].length;
516 if (total_bytes > cur_len)
517 total_bytes -= cur_len;
525 static void mv_start_new_crypt_req(struct ablkcipher_request *req)
527 struct req_progress *p = &cpg->p;
530 cpg->cur_req = &req->base;
531 memset(p, 0, sizeof(struct req_progress));
532 p->hw_nbytes = req->nbytes;
533 p->complete = mv_crypto_algo_completion;
534 p->process = mv_process_current_q;
537 num_sgs = count_sgs(req->src, req->nbytes);
538 sg_miter_start(&p->src_sg_it, req->src, num_sgs, SG_MITER_FROM_SG);
540 num_sgs = count_sgs(req->dst, req->nbytes);
541 sg_miter_start(&p->dst_sg_it, req->dst, num_sgs, SG_MITER_TO_SG);
543 mv_process_current_q(1);
546 static void mv_start_new_hash_req(struct ahash_request *req)
548 struct req_progress *p = &cpg->p;
549 struct mv_req_hash_ctx *ctx = ahash_request_ctx(req);
550 int num_sgs, hw_bytes, old_extra_bytes, rc;
551 cpg->cur_req = &req->base;
552 memset(p, 0, sizeof(struct req_progress));
553 hw_bytes = req->nbytes + ctx->extra_bytes;
554 old_extra_bytes = ctx->extra_bytes;
556 ctx->extra_bytes = hw_bytes % SHA1_BLOCK_SIZE;
557 if (ctx->extra_bytes != 0
558 && (!ctx->last_chunk || ctx->count > MAX_HW_HASH_SIZE))
559 hw_bytes -= ctx->extra_bytes;
561 ctx->extra_bytes = 0;
563 num_sgs = count_sgs(req->src, req->nbytes);
564 sg_miter_start(&p->src_sg_it, req->src, num_sgs, SG_MITER_FROM_SG);
567 p->hw_nbytes = hw_bytes;
568 p->complete = mv_hash_algo_completion;
569 p->process = mv_process_hash_current;
571 if (unlikely(old_extra_bytes)) {
572 memcpy(cpg->sram + SRAM_DATA_IN_START, ctx->buffer,
574 p->crypt_len = old_extra_bytes;
577 mv_process_hash_current(1);
579 copy_src_to_buf(p, ctx->buffer + old_extra_bytes,
580 ctx->extra_bytes - old_extra_bytes);
581 sg_miter_stop(&p->src_sg_it);
583 rc = mv_hash_final_fallback(req);
586 cpg->eng_st = ENGINE_IDLE;
588 req->base.complete(&req->base, rc);
593 static int queue_manag(void *data)
595 cpg->eng_st = ENGINE_IDLE;
597 struct crypto_async_request *async_req = NULL;
598 struct crypto_async_request *backlog;
600 __set_current_state(TASK_INTERRUPTIBLE);
602 if (cpg->eng_st == ENGINE_W_DEQUEUE)
603 dequeue_complete_req();
605 spin_lock_irq(&cpg->lock);
606 if (cpg->eng_st == ENGINE_IDLE) {
607 backlog = crypto_get_backlog(&cpg->queue);
608 async_req = crypto_dequeue_request(&cpg->queue);
610 BUG_ON(cpg->eng_st != ENGINE_IDLE);
611 cpg->eng_st = ENGINE_BUSY;
614 spin_unlock_irq(&cpg->lock);
617 backlog->complete(backlog, -EINPROGRESS);
622 if (async_req->tfm->__crt_alg->cra_type !=
623 &crypto_ahash_type) {
624 struct ablkcipher_request *req =
625 ablkcipher_request_cast(async_req);
626 mv_start_new_crypt_req(req);
628 struct ahash_request *req =
629 ahash_request_cast(async_req);
630 mv_start_new_hash_req(req);
637 } while (!kthread_should_stop());
641 static int mv_handle_req(struct crypto_async_request *req)
646 spin_lock_irqsave(&cpg->lock, flags);
647 ret = crypto_enqueue_request(&cpg->queue, req);
648 spin_unlock_irqrestore(&cpg->lock, flags);
649 wake_up_process(cpg->queue_th);
653 static int mv_enc_aes_ecb(struct ablkcipher_request *req)
655 struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
657 req_ctx->op = COP_AES_ECB;
658 req_ctx->decrypt = 0;
660 return mv_handle_req(&req->base);
663 static int mv_dec_aes_ecb(struct ablkcipher_request *req)
665 struct mv_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
666 struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
668 req_ctx->op = COP_AES_ECB;
669 req_ctx->decrypt = 1;
671 compute_aes_dec_key(ctx);
672 return mv_handle_req(&req->base);
675 static int mv_enc_aes_cbc(struct ablkcipher_request *req)
677 struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
679 req_ctx->op = COP_AES_CBC;
680 req_ctx->decrypt = 0;
682 return mv_handle_req(&req->base);
685 static int mv_dec_aes_cbc(struct ablkcipher_request *req)
687 struct mv_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
688 struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
690 req_ctx->op = COP_AES_CBC;
691 req_ctx->decrypt = 1;
693 compute_aes_dec_key(ctx);
694 return mv_handle_req(&req->base);
697 static int mv_cra_init(struct crypto_tfm *tfm)
699 tfm->crt_ablkcipher.reqsize = sizeof(struct mv_req_ctx);
703 static void mv_init_hash_req_ctx(struct mv_req_hash_ctx *ctx, int op,
704 int is_last, unsigned int req_len,
707 memset(ctx, 0, sizeof(*ctx));
709 ctx->count = req_len;
711 ctx->last_chunk = is_last;
712 ctx->count_add = count_add;
715 static void mv_update_hash_req_ctx(struct mv_req_hash_ctx *ctx, int is_last,
718 ctx->last_chunk = is_last;
719 ctx->count += req_len;
722 static int mv_hash_init(struct ahash_request *req)
724 const struct mv_tfm_hash_ctx *tfm_ctx = crypto_tfm_ctx(req->base.tfm);
725 mv_init_hash_req_ctx(ahash_request_ctx(req), tfm_ctx->op, 0, 0,
730 static int mv_hash_update(struct ahash_request *req)
735 mv_update_hash_req_ctx(ahash_request_ctx(req), 0, req->nbytes);
736 return mv_handle_req(&req->base);
739 static int mv_hash_final(struct ahash_request *req)
741 struct mv_req_hash_ctx *ctx = ahash_request_ctx(req);
743 ahash_request_set_crypt(req, NULL, req->result, 0);
744 mv_update_hash_req_ctx(ctx, 1, 0);
745 return mv_handle_req(&req->base);
748 static int mv_hash_finup(struct ahash_request *req)
750 mv_update_hash_req_ctx(ahash_request_ctx(req), 1, req->nbytes);
751 return mv_handle_req(&req->base);
754 static int mv_hash_digest(struct ahash_request *req)
756 const struct mv_tfm_hash_ctx *tfm_ctx = crypto_tfm_ctx(req->base.tfm);
757 mv_init_hash_req_ctx(ahash_request_ctx(req), tfm_ctx->op, 1,
758 req->nbytes, tfm_ctx->count_add);
759 return mv_handle_req(&req->base);
762 static void mv_hash_init_ivs(struct mv_tfm_hash_ctx *ctx, const void *istate,
765 const struct sha1_state *isha1_state = istate, *osha1_state = ostate;
767 for (i = 0; i < 5; i++) {
768 ctx->ivs[i] = cpu_to_be32(isha1_state->state[i]);
769 ctx->ivs[i + 5] = cpu_to_be32(osha1_state->state[i]);
773 static int mv_hash_setkey(struct crypto_ahash *tfm, const u8 * key,
777 struct mv_tfm_hash_ctx *ctx = crypto_tfm_ctx(&tfm->base);
783 rc = crypto_shash_setkey(ctx->fallback, key, keylen);
787 /* Can't see a way to extract the ipad/opad from the fallback tfm
788 so I'm basically copying code from the hmac module */
789 bs = crypto_shash_blocksize(ctx->base_hash);
790 ds = crypto_shash_digestsize(ctx->base_hash);
791 ss = crypto_shash_statesize(ctx->base_hash);
795 struct shash_desc shash;
796 char ctx[crypto_shash_descsize(ctx->base_hash)];
802 desc.shash.tfm = ctx->base_hash;
803 desc.shash.flags = crypto_shash_get_flags(ctx->base_hash) &
804 CRYPTO_TFM_REQ_MAY_SLEEP;
810 crypto_shash_digest(&desc.shash, key, keylen, ipad);
816 memcpy(ipad, key, keylen);
818 memset(ipad + keylen, 0, bs - keylen);
819 memcpy(opad, ipad, bs);
821 for (i = 0; i < bs; i++) {
826 rc = crypto_shash_init(&desc.shash) ? :
827 crypto_shash_update(&desc.shash, ipad, bs) ? :
828 crypto_shash_export(&desc.shash, ipad) ? :
829 crypto_shash_init(&desc.shash) ? :
830 crypto_shash_update(&desc.shash, opad, bs) ? :
831 crypto_shash_export(&desc.shash, opad);
834 mv_hash_init_ivs(ctx, ipad, opad);
840 static int mv_cra_hash_init(struct crypto_tfm *tfm, const char *base_hash_name,
841 enum hash_op op, int count_add)
843 const char *fallback_driver_name = tfm->__crt_alg->cra_name;
844 struct mv_tfm_hash_ctx *ctx = crypto_tfm_ctx(tfm);
845 struct crypto_shash *fallback_tfm = NULL;
846 struct crypto_shash *base_hash = NULL;
850 ctx->count_add = count_add;
852 /* Allocate a fallback and abort if it failed. */
853 fallback_tfm = crypto_alloc_shash(fallback_driver_name, 0,
854 CRYPTO_ALG_NEED_FALLBACK);
855 if (IS_ERR(fallback_tfm)) {
856 printk(KERN_WARNING MV_CESA
857 "Fallback driver '%s' could not be loaded!\n",
858 fallback_driver_name);
859 err = PTR_ERR(fallback_tfm);
862 ctx->fallback = fallback_tfm;
864 if (base_hash_name) {
865 /* Allocate a hash to compute the ipad/opad of hmac. */
866 base_hash = crypto_alloc_shash(base_hash_name, 0,
867 CRYPTO_ALG_NEED_FALLBACK);
868 if (IS_ERR(base_hash)) {
869 printk(KERN_WARNING MV_CESA
870 "Base driver '%s' could not be loaded!\n",
872 err = PTR_ERR(base_hash);
876 ctx->base_hash = base_hash;
878 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
879 sizeof(struct mv_req_hash_ctx) +
880 crypto_shash_descsize(ctx->fallback));
883 crypto_free_shash(fallback_tfm);
888 static void mv_cra_hash_exit(struct crypto_tfm *tfm)
890 struct mv_tfm_hash_ctx *ctx = crypto_tfm_ctx(tfm);
892 crypto_free_shash(ctx->fallback);
894 crypto_free_shash(ctx->base_hash);
897 static int mv_cra_hash_sha1_init(struct crypto_tfm *tfm)
899 return mv_cra_hash_init(tfm, NULL, COP_SHA1, 0);
902 static int mv_cra_hash_hmac_sha1_init(struct crypto_tfm *tfm)
904 return mv_cra_hash_init(tfm, "sha1", COP_HMAC_SHA1, SHA1_BLOCK_SIZE);
907 irqreturn_t crypto_int(int irq, void *priv)
911 val = readl(cpg->reg + SEC_ACCEL_INT_STATUS);
912 if (!(val & SEC_INT_ACCEL0_DONE))
915 if (!del_timer(&cpg->completion_timer)) {
916 printk(KERN_WARNING MV_CESA
917 "got an interrupt but no pending timer?\n");
919 val &= ~SEC_INT_ACCEL0_DONE;
920 writel(val, cpg->reg + FPGA_INT_STATUS);
921 writel(val, cpg->reg + SEC_ACCEL_INT_STATUS);
922 BUG_ON(cpg->eng_st != ENGINE_BUSY);
923 cpg->eng_st = ENGINE_W_DEQUEUE;
924 wake_up_process(cpg->queue_th);
928 struct crypto_alg mv_aes_alg_ecb = {
929 .cra_name = "ecb(aes)",
930 .cra_driver_name = "mv-ecb-aes",
932 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
933 CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC,
935 .cra_ctxsize = sizeof(struct mv_ctx),
937 .cra_type = &crypto_ablkcipher_type,
938 .cra_module = THIS_MODULE,
939 .cra_init = mv_cra_init,
942 .min_keysize = AES_MIN_KEY_SIZE,
943 .max_keysize = AES_MAX_KEY_SIZE,
944 .setkey = mv_setkey_aes,
945 .encrypt = mv_enc_aes_ecb,
946 .decrypt = mv_dec_aes_ecb,
951 struct crypto_alg mv_aes_alg_cbc = {
952 .cra_name = "cbc(aes)",
953 .cra_driver_name = "mv-cbc-aes",
955 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
956 CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC,
957 .cra_blocksize = AES_BLOCK_SIZE,
958 .cra_ctxsize = sizeof(struct mv_ctx),
960 .cra_type = &crypto_ablkcipher_type,
961 .cra_module = THIS_MODULE,
962 .cra_init = mv_cra_init,
965 .ivsize = AES_BLOCK_SIZE,
966 .min_keysize = AES_MIN_KEY_SIZE,
967 .max_keysize = AES_MAX_KEY_SIZE,
968 .setkey = mv_setkey_aes,
969 .encrypt = mv_enc_aes_cbc,
970 .decrypt = mv_dec_aes_cbc,
975 struct ahash_alg mv_sha1_alg = {
976 .init = mv_hash_init,
977 .update = mv_hash_update,
978 .final = mv_hash_final,
979 .finup = mv_hash_finup,
980 .digest = mv_hash_digest,
982 .digestsize = SHA1_DIGEST_SIZE,
985 .cra_driver_name = "mv-sha1",
988 CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY |
989 CRYPTO_ALG_NEED_FALLBACK,
990 .cra_blocksize = SHA1_BLOCK_SIZE,
991 .cra_ctxsize = sizeof(struct mv_tfm_hash_ctx),
992 .cra_init = mv_cra_hash_sha1_init,
993 .cra_exit = mv_cra_hash_exit,
994 .cra_module = THIS_MODULE,
999 struct ahash_alg mv_hmac_sha1_alg = {
1000 .init = mv_hash_init,
1001 .update = mv_hash_update,
1002 .final = mv_hash_final,
1003 .finup = mv_hash_finup,
1004 .digest = mv_hash_digest,
1005 .setkey = mv_hash_setkey,
1007 .digestsize = SHA1_DIGEST_SIZE,
1009 .cra_name = "hmac(sha1)",
1010 .cra_driver_name = "mv-hmac-sha1",
1011 .cra_priority = 300,
1013 CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY |
1014 CRYPTO_ALG_NEED_FALLBACK,
1015 .cra_blocksize = SHA1_BLOCK_SIZE,
1016 .cra_ctxsize = sizeof(struct mv_tfm_hash_ctx),
1017 .cra_init = mv_cra_hash_hmac_sha1_init,
1018 .cra_exit = mv_cra_hash_exit,
1019 .cra_module = THIS_MODULE,
1024 static int mv_probe(struct platform_device *pdev)
1026 struct crypto_priv *cp;
1027 struct resource *res;
1032 printk(KERN_ERR MV_CESA "Second crypto dev?\n");
1036 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs");
1040 cp = kzalloc(sizeof(*cp), GFP_KERNEL);
1044 spin_lock_init(&cp->lock);
1045 crypto_init_queue(&cp->queue, 50);
1046 cp->reg = ioremap(res->start, resource_size(res));
1052 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "sram");
1057 cp->sram_size = resource_size(res);
1058 cp->max_req_size = cp->sram_size - SRAM_CFG_SPACE;
1059 cp->sram = ioremap(res->start, cp->sram_size);
1065 irq = platform_get_irq(pdev, 0);
1066 if (irq < 0 || irq == NO_IRQ) {
1068 goto err_unmap_sram;
1072 platform_set_drvdata(pdev, cp);
1075 cp->queue_th = kthread_run(queue_manag, cp, "mv_crypto");
1076 if (IS_ERR(cp->queue_th)) {
1077 ret = PTR_ERR(cp->queue_th);
1078 goto err_unmap_sram;
1081 ret = request_irq(irq, crypto_int, IRQF_DISABLED, dev_name(&pdev->dev),
1086 /* Not all platforms can gate the clock, so it is not
1087 an error if the clock does not exists. */
1088 cp->clk = clk_get(&pdev->dev, NULL);
1089 if (!IS_ERR(cp->clk))
1090 clk_prepare_enable(cp->clk);
1092 writel(0, cpg->reg + SEC_ACCEL_INT_STATUS);
1093 writel(SEC_INT_ACCEL0_DONE, cpg->reg + SEC_ACCEL_INT_MASK);
1094 writel(SEC_CFG_STOP_DIG_ERR, cpg->reg + SEC_ACCEL_CFG);
1095 writel(SRAM_CONFIG, cpg->reg + SEC_ACCEL_DESC_P0);
1097 ret = crypto_register_alg(&mv_aes_alg_ecb);
1099 printk(KERN_WARNING MV_CESA
1100 "Could not register aes-ecb driver\n");
1104 ret = crypto_register_alg(&mv_aes_alg_cbc);
1106 printk(KERN_WARNING MV_CESA
1107 "Could not register aes-cbc driver\n");
1111 ret = crypto_register_ahash(&mv_sha1_alg);
1115 printk(KERN_WARNING MV_CESA "Could not register sha1 driver\n");
1117 ret = crypto_register_ahash(&mv_hmac_sha1_alg);
1119 cpg->has_hmac_sha1 = 1;
1121 printk(KERN_WARNING MV_CESA
1122 "Could not register hmac-sha1 driver\n");
1127 crypto_unregister_alg(&mv_aes_alg_ecb);
1131 kthread_stop(cp->queue_th);
1139 platform_set_drvdata(pdev, NULL);
1143 static int mv_remove(struct platform_device *pdev)
1145 struct crypto_priv *cp = platform_get_drvdata(pdev);
1147 crypto_unregister_alg(&mv_aes_alg_ecb);
1148 crypto_unregister_alg(&mv_aes_alg_cbc);
1150 crypto_unregister_ahash(&mv_sha1_alg);
1151 if (cp->has_hmac_sha1)
1152 crypto_unregister_ahash(&mv_hmac_sha1_alg);
1153 kthread_stop(cp->queue_th);
1154 free_irq(cp->irq, cp);
1155 memset(cp->sram, 0, cp->sram_size);
1159 if (!IS_ERR(cp->clk)) {
1160 clk_disable_unprepare(cp->clk);
1169 static struct platform_driver marvell_crypto = {
1171 .remove = mv_remove,
1173 .owner = THIS_MODULE,
1174 .name = "mv_crypto",
1177 MODULE_ALIAS("platform:mv_crypto");
1179 module_platform_driver(marvell_crypto);
1181 MODULE_AUTHOR("Sebastian Andrzej Siewior <sebastian@breakpoint.cc>");
1182 MODULE_DESCRIPTION("Support for Marvell's cryptographic engine");
1183 MODULE_LICENSE("GPL");