3 * sep_crypto.c - Crypto interface structures
5 * Copyright(c) 2009-2011 Intel Corporation. All rights reserved.
6 * Contributions(c) 2009-2010 Discretix. All rights reserved.
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the Free
10 * Software Foundation; version 2 of the License.
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc., 59
19 * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
23 * Mark Allyn mark.a.allyn@intel.com
24 * Jayant Mangalampalli jayant.mangalampalli@intel.com
28 * 2009.06.26 Initial publish
29 * 2010.09.14 Upgrade to Medfield
30 * 2011.02.22 Enable Kernel Crypto
35 #include <linux/init.h>
36 #include <linux/module.h>
37 #include <linux/miscdevice.h>
39 #include <linux/cdev.h>
40 #include <linux/kdev_t.h>
41 #include <linux/mutex.h>
42 #include <linux/sched.h>
44 #include <linux/poll.h>
45 #include <linux/wait.h>
46 #include <linux/pci.h>
47 #include <linux/pci.h>
48 #include <linux/pm_runtime.h>
49 #include <linux/err.h>
50 #include <linux/device.h>
51 #include <linux/errno.h>
52 #include <linux/interrupt.h>
53 #include <linux/kernel.h>
54 #include <linux/clk.h>
55 #include <linux/irq.h>
57 #include <linux/platform_device.h>
58 #include <linux/list.h>
59 #include <linux/dma-mapping.h>
60 #include <linux/delay.h>
61 #include <linux/jiffies.h>
62 #include <linux/workqueue.h>
63 #include <linux/crypto.h>
64 #include <crypto/internal/hash.h>
65 #include <crypto/scatterwalk.h>
66 #include <crypto/sha.h>
67 #include <crypto/md5.h>
68 #include <crypto/aes.h>
69 #include <crypto/des.h>
70 #include <crypto/hash.h>
71 #include "sep_driver_hw_defs.h"
72 #include "sep_driver_config.h"
73 #include "sep_driver_api.h"
75 #include "sep_crypto.h"
77 #if defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE)
79 /* Globals for queuing */
80 static spinlock_t queue_lock;
81 static struct crypto_queue sep_queue;
83 /* Declare of dequeuer */
84 static void sep_dequeuer(void *data);
88 * crypto_sep_dump_message - dump the message that is pending
90 * This will only print dump if DEBUG is set; it does
91 * follow kernel debug print enabling
93 static void crypto_sep_dump_message(struct sep_device *sep, void *msg)
100 p = sep->shared_addr;
102 for (count = 0; count < 10 * 4; count += 4)
103 dev_dbg(&sep->pdev->dev,
104 "[PID%d] Word %d of the message is %x (local)%x\n",
105 current->pid, count/4, *p++, *i++);
111 * @work: pointer to work_struct
112 * This is what is called by the queue; it is generic so that it
113 * can be used by any type of operation as each different callback
114 * function can use the data parameter in its own way
116 static void sep_do_callback(struct work_struct *work)
118 struct sep_work_struct *sep_work = container_of(work,
119 struct sep_work_struct, work);
120 if (sep_work != NULL) {
121 (sep_work->callback)(sep_work->data);
124 pr_debug("sep crypto: do callback - NULL container\n");
130 * @work_queue: pointer to struct_workqueue
131 * @funct: pointer to function to execute
132 * @data: pointer to data; function will know
134 * This is a generic API to submit something to
135 * the queue. The callback function will depend
136 * on what operation is to be done
138 static int sep_submit_work(struct workqueue_struct *work_queue,
139 void(*funct)(void *),
142 struct sep_work_struct *sep_work;
145 sep_work = kmalloc(sizeof(struct sep_work_struct), GFP_ATOMIC);
147 if (sep_work == NULL) {
148 pr_debug("sep crypto: cant allocate work structure\n");
152 sep_work->callback = funct;
153 sep_work->data = data;
154 INIT_WORK(&sep_work->work, sep_do_callback);
155 result = queue_work(work_queue, &sep_work->work);
157 pr_debug("sep_crypto: queue_work failed\n");
165 * @sep: pointer to struct sep_device
166 * @size: total size of area
167 * @block_size: minimum size of chunks
168 * each page is minimum or modulo this size
169 * @returns: pointer to struct scatterlist for new
172 static struct scatterlist *sep_alloc_sg_buf(
173 struct sep_device *sep,
181 size_t real_page_size;
183 struct scatterlist *sg, *sg_temp;
188 dev_dbg(&sep->pdev->dev, "sep alloc sg buf\n");
192 real_page_size = PAGE_SIZE - (PAGE_SIZE % block_size);
194 * The size of each page must be modulo of the operation
195 * block size; increment by the modified page size until
196 * the total size is reached, then you have the number of
199 while (current_size < size) {
200 current_size += real_page_size;
204 sg = kmalloc((sizeof(struct scatterlist) * nbr_pages), GFP_ATOMIC);
206 dev_warn(&sep->pdev->dev, "Cannot allocate page for new sg\n");
210 sg_init_table(sg, nbr_pages);
214 for (ct1 = 0; ct1 < nbr_pages; ct1 += 1) {
215 buf = (void *)get_zeroed_page(GFP_ATOMIC);
217 dev_warn(&sep->pdev->dev,
218 "Cannot allocate page for new buffer\n");
223 sg_set_buf(sg_temp, buf, real_page_size);
224 if ((size - current_size) > real_page_size) {
225 sg_temp->length = real_page_size;
226 current_size += real_page_size;
228 sg_temp->length = (size - current_size);
231 sg_temp = sg_next(sg);
238 * @sg: pointer to struct scatterlist; points to area to free
240 static void sep_free_sg_buf(struct scatterlist *sg)
242 struct scatterlist *sg_temp = sg;
244 free_page((unsigned long)sg_virt(sg_temp));
245 sg_temp = sg_next(sg_temp);
252 * @sep: pointer to struct sep_device
253 * @sg_src: pointer to struct scatterlist for source
254 * @sg_dst: pointer to struct scatterlist for destination
255 * @size: size (in bytes) of data to copy
257 * Copy data from one scatterlist to another; both must
260 static void sep_copy_sg(
261 struct sep_device *sep,
262 struct scatterlist *sg_src,
263 struct scatterlist *sg_dst,
267 u32 in_offset, out_offset;
270 struct scatterlist *sg_src_tmp = sg_src;
271 struct scatterlist *sg_dst_tmp = sg_dst;
275 dev_dbg(&sep->pdev->dev, "sep copy sg\n");
277 if ((sg_src == NULL) || (sg_dst == NULL) || (size == 0))
280 dev_dbg(&sep->pdev->dev, "sep copy sg not null\n");
282 while (count < size) {
283 if ((sg_src_tmp->length - in_offset) >
284 (sg_dst_tmp->length - out_offset))
285 seg_size = sg_dst_tmp->length - out_offset;
287 seg_size = sg_src_tmp->length - in_offset;
289 if (seg_size > (size - count))
290 seg_size = (size = count);
292 memcpy(sg_virt(sg_dst_tmp) + out_offset,
293 sg_virt(sg_src_tmp) + in_offset,
296 in_offset += seg_size;
297 out_offset += seg_size;
300 if (in_offset >= sg_src_tmp->length) {
301 sg_src_tmp = sg_next(sg_src_tmp);
305 if (out_offset >= sg_dst_tmp->length) {
306 sg_dst_tmp = sg_next(sg_dst_tmp);
313 * sep_oddball_pages -
314 * @sep: pointer to struct sep_device
315 * @sg: pointer to struct scatterlist - buffer to check
316 * @size: total data size
317 * @blocksize: minimum block size; must be multiples of this size
318 * @to_copy: 1 means do copy, 0 means do not copy
319 * @new_sg: pointer to location to put pointer to new sg area
320 * @returns: 1 if new scatterlist is needed; 0 if not needed;
321 * error value if operation failed
323 * The SEP device requires all pages to be multiples of the
324 * minimum block size appropriate for the operation
325 * This function check all pages; if any are oddball sizes
326 * (not multiple of block sizes), it creates a new scatterlist.
327 * If the to_copy parameter is set to 1, then a scatter list
328 * copy is performed. The pointer to the new scatterlist is
329 * put into the address supplied by the new_sg parameter; if
330 * no new scatterlist is needed, then a NULL is put into
331 * the location at new_sg.
334 static int sep_oddball_pages(
335 struct sep_device *sep,
336 struct scatterlist *sg,
339 struct scatterlist **new_sg,
342 struct scatterlist *sg_temp;
344 u32 nbr_pages, page_count;
346 dev_dbg(&sep->pdev->dev, "sep oddball\n");
347 if ((sg == NULL) || (data_size == 0) || (data_size < block_size))
350 dev_dbg(&sep->pdev->dev, "sep oddball not null\n");
358 sg_temp = sg_next(sg_temp);
362 while ((sg_temp) && (flag == 0)) {
364 if (sg_temp->length % block_size)
367 sg_temp = sg_next(sg_temp);
370 /* Do not process if last (or only) page is oddball */
371 if (nbr_pages == page_count)
375 dev_dbg(&sep->pdev->dev, "sep oddball processing\n");
376 *new_sg = sep_alloc_sg_buf(sep, data_size, block_size);
377 if (*new_sg == NULL) {
378 dev_warn(&sep->pdev->dev, "cannot allocate new sg\n");
383 sep_copy_sg(sep, sg, *new_sg, data_size);
392 * sep_copy_offset_sg -
393 * @sep: pointer to struct sep_device;
394 * @sg: pointer to struct scatterlist
395 * @offset: offset into scatterlist memory
396 * @dst: place to put data
397 * @len: length of data
398 * @returns: number of bytes copies
400 * This copies data from scatterlist buffer
401 * offset from beginning - it is needed for
402 * handling tail data in hash
404 static size_t sep_copy_offset_sg(
405 struct sep_device *sep,
406 struct scatterlist *sg,
413 size_t offset_within_page;
414 size_t length_within_page;
415 size_t length_remaining;
416 size_t current_offset;
418 /* Find which page is beginning of segment */
420 page_end = sg->length;
421 while ((sg) && (offset > page_end)) {
422 page_start += sg->length;
425 page_end += sg->length;
431 offset_within_page = offset - page_start;
432 if ((sg->length - offset_within_page) >= len) {
433 /* All within this page */
434 memcpy(dst, sg_virt(sg) + offset_within_page, len);
437 /* Scattered multiple pages */
439 length_remaining = len;
440 while ((sg) && (current_offset < len)) {
441 length_within_page = sg->length - offset_within_page;
442 if (length_within_page >= length_remaining) {
443 memcpy(dst+current_offset,
444 sg_virt(sg) + offset_within_page,
446 length_remaining = 0;
447 current_offset = len;
449 memcpy(dst+current_offset,
450 sg_virt(sg) + offset_within_page,
452 length_remaining -= length_within_page;
453 current_offset += length_within_page;
454 offset_within_page = 0;
467 * @src_ptr: source pointer
468 * @dst_ptr: destination pointer
469 * @nbytes: number of bytes
470 * @returns: 0 for success; -1 for failure
471 * We cannot have any partial overlap. Total overlap
472 * where src is the same as dst is okay
474 static int partial_overlap(void *src_ptr, void *dst_ptr, u32 nbytes)
476 /* Check for partial overlap */
477 if (src_ptr != dst_ptr) {
478 if (src_ptr < dst_ptr) {
479 if ((src_ptr + nbytes) > dst_ptr)
482 if ((dst_ptr + nbytes) > src_ptr)
490 /* Debug - prints only if DEBUG is defined; follows kernel debug model */
491 static void sep_dump(struct sep_device *sep, char *stg, void *start, int len)
497 dev_dbg(&sep->pdev->dev,
498 "Dump of %s starting at %08lx for %08x bytes\n",
499 stg, (unsigned long)start, len);
500 for (ct1 = 0; ct1 < len; ct1 += 1) {
501 ptt = (u8 *)(start + ct1);
502 dev_dbg(&sep->pdev->dev, "%02x ", *ptt);
504 dev_dbg(&sep->pdev->dev, "\n");
506 dev_dbg(&sep->pdev->dev, "\n");
510 /* Debug - prints only if DEBUG is defined; follows kernel debug model */
511 static void sep_dump_sg(struct sep_device *sep, char *stg,
512 struct scatterlist *sg)
518 dev_dbg(&sep->pdev->dev, "Dump of scatterlist %s\n", stg);
522 dev_dbg(&sep->pdev->dev, "page %x\n size %x", ct1,
524 dev_dbg(&sep->pdev->dev, "phys addr is %lx",
525 (unsigned long)sg_phys(sg));
527 for (ct2 = 0; ct2 < sg->length; ct2 += 1) {
528 dev_dbg(&sep->pdev->dev, "byte %x is %02x\n",
529 ct2, (unsigned char)*(ptt + ct2));
535 dev_dbg(&sep->pdev->dev, "\n");
539 /* Debug - prints only if DEBUG is defined */
540 static void sep_dump_ivs(struct ablkcipher_request *req, char *reason)
544 struct sep_aes_internal_context *aes_internal;
545 struct sep_des_internal_context *des_internal;
548 struct this_task_ctx *ta_ctx;
549 struct crypto_ablkcipher *tfm;
550 struct sep_system_ctx *sctx;
552 ta_ctx = ablkcipher_request_ctx(req);
553 tfm = crypto_ablkcipher_reqtfm(req);
554 sctx = crypto_ablkcipher_ctx(tfm);
556 dev_dbg(&ta_ctx->sep_used->pdev->dev, "IV DUMP - %s\n", reason);
557 if ((ta_ctx->current_request == DES_CBC) &&
558 (ta_ctx->des_opmode == SEP_DES_CBC)) {
560 des_internal = (struct sep_des_internal_context *)
561 sctx->des_private_ctx.ctx_buf;
563 dev_dbg(&ta_ctx->sep_used->pdev->dev,
564 "sep - vendor iv for DES\n");
565 cptr = (unsigned char *)des_internal->iv_context;
566 for (ct1 = 0; ct1 < crypto_ablkcipher_ivsize(tfm); ct1 += 1)
567 dev_dbg(&ta_ctx->sep_used->pdev->dev,
568 "%02x\n", *(cptr + ct1));
571 dev_dbg(&ta_ctx->sep_used->pdev->dev,
572 "sep - walk from kernel crypto iv for DES\n");
573 cptr = (unsigned char *)ta_ctx->walk.iv;
574 for (ct1 = 0; ct1 < crypto_ablkcipher_ivsize(tfm); ct1 += 1)
575 dev_dbg(&ta_ctx->sep_used->pdev->dev,
576 "%02x\n", *(cptr + ct1));
577 } else if ((ta_ctx->current_request == AES_CBC) &&
578 (ta_ctx->aes_opmode == SEP_AES_CBC)) {
580 aes_internal = (struct sep_aes_internal_context *)
581 sctx->aes_private_ctx.cbuff;
583 dev_dbg(&ta_ctx->sep_used->pdev->dev,
584 "sep - vendor iv for AES\n");
585 cptr = (unsigned char *)aes_internal->aes_ctx_iv;
586 for (ct1 = 0; ct1 < crypto_ablkcipher_ivsize(tfm); ct1 += 1)
587 dev_dbg(&ta_ctx->sep_used->pdev->dev,
588 "%02x\n", *(cptr + ct1));
591 dev_dbg(&ta_ctx->sep_used->pdev->dev,
592 "sep - walk from kernel crypto iv for AES\n");
593 cptr = (unsigned char *)ta_ctx->walk.iv;
594 for (ct1 = 0; ct1 < crypto_ablkcipher_ivsize(tfm); ct1 += 1)
595 dev_dbg(&ta_ctx->sep_used->pdev->dev,
596 "%02x\n", *(cptr + ct1));
601 * RFC2451: Weak key check
602 * Returns: 1 (weak), 0 (not weak)
604 static int sep_weak_key(const u8 *key, unsigned int keylen)
606 static const u8 parity[] = {
607 8, 1, 0, 8, 0, 8, 8, 0, 0, 8, 8, 0, 8, 0, 2, 8,
608 0, 8, 8, 0, 8, 0, 0, 8, 8,
610 0, 8, 8, 0, 8, 0, 0, 8, 8, 0, 0, 8, 0, 8, 8, 0,
611 8, 0, 0, 8, 0, 8, 8, 0, 0,
613 0, 8, 8, 0, 8, 0, 0, 8, 8, 0, 0, 8, 0, 8, 8, 0,
614 8, 0, 0, 8, 0, 8, 8, 0, 0,
616 8, 0, 0, 8, 0, 8, 8, 0, 0, 8, 8, 0, 8, 0, 0, 8,
617 0, 8, 8, 0, 8, 0, 0, 8, 8,
619 0, 8, 8, 0, 8, 0, 0, 8, 8, 0, 0, 8, 0, 8, 8, 0,
620 8, 0, 0, 8, 0, 8, 8, 0, 0,
622 8, 0, 0, 8, 0, 8, 8, 0, 0, 8, 8, 0, 8, 0, 0, 8,
623 0, 8, 8, 0, 8, 0, 0, 8, 8,
625 8, 0, 0, 8, 0, 8, 8, 0, 0, 8, 8, 0, 8, 0, 0, 8,
626 0, 8, 8, 0, 8, 0, 0, 8, 8,
628 4, 8, 8, 0, 8, 0, 0, 8, 8, 0, 0, 8, 0, 8, 8, 0,
629 8, 5, 0, 8, 0, 8, 8, 0, 0,
635 n = parity[key[0]]; n <<= 4;
636 n |= parity[key[1]]; n <<= 4;
637 n |= parity[key[2]]; n <<= 4;
638 n |= parity[key[3]]; n <<= 4;
639 n |= parity[key[4]]; n <<= 4;
640 n |= parity[key[5]]; n <<= 4;
641 n |= parity[key[6]]; n <<= 4;
645 /* 1 in 10^10 keys passes this test */
646 if (!((n - (w >> 3)) & w)) {
647 if (n < 0x41415151) {
648 if (n < 0x31312121) {
649 if (n < 0x14141515) {
650 /* 01 01 01 01 01 01 01 01 */
653 /* 01 1F 01 1F 01 0E 01 0E */
657 /* 01 E0 01 E0 01 F1 01 F1 */
660 /* 01 FE 01 FE 01 FE 01 FE */
665 if (n < 0x34342525) {
666 /* 1F 01 1F 01 0E 01 0E 01 */
669 /* 1F 1F 1F 1F 0E 0E 0E 0E (?) */
673 /* 1F E0 1F E0 0E F1 0E F1 */
676 /* 1F FE 1F FE 0E FE 0E FE */
682 if (n < 0x61616161) {
683 if (n < 0x44445555) {
684 /* E0 01 E0 01 F1 01 F1 01 */
687 /* E0 1F E0 1F F1 0E F1 0E */
691 /* E0 E0 E0 E0 F1 F1 F1 F1 (?) */
694 /* E0 FE E0 FE F1 FE F1 FE */
699 if (n < 0x64646565) {
700 /* FE 01 FE 01 FE 01 FE 01 */
703 /* FE 1F FE 1F FE 0E FE 0E */
707 /* FE E0 FE E0 FE F1 FE F1 */
710 /* FE FE FE FE FE FE FE FE */
724 static u32 sep_sg_nents(struct scatterlist *sg)
737 * @ta_ctx: pointer to struct this_task_ctx
738 * @returns: offset to place for the next word in the message
739 * Set up pointer in message pool for new message
741 static u32 sep_start_msg(struct this_task_ctx *ta_ctx)
744 ta_ctx->msg_len_words = 2;
745 ta_ctx->msgptr = ta_ctx->msg;
746 memset(ta_ctx->msg, 0, SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES);
747 ta_ctx->msgptr += sizeof(u32) * 2;
748 word_ptr = (u32 *)ta_ctx->msgptr;
749 *word_ptr = SEP_START_MSG_TOKEN;
750 return sizeof(u32) * 2;
755 * @ta_ctx: pointer to struct this_task_ctx
756 * @messages_offset: current message offset
757 * Returns: 0 for success; <0 otherwise
758 * End message; set length and CRC; and
759 * send interrupt to the SEP
761 static void sep_end_msg(struct this_task_ctx *ta_ctx, u32 msg_offset)
764 /* Msg size goes into msg after token */
765 ta_ctx->msg_len_words = msg_offset / sizeof(u32) + 1;
766 word_ptr = (u32 *)ta_ctx->msgptr;
768 *word_ptr = ta_ctx->msg_len_words;
770 /* CRC (currently 0) goes at end of msg */
771 word_ptr = (u32 *)(ta_ctx->msgptr + msg_offset);
776 * sep_start_inbound_msg -
777 * @ta_ctx: pointer to struct this_task_ctx
778 * @msg_offset: offset to place for the next word in the message
779 * @returns: 0 for success; error value for failure
780 * Set up pointer in message pool for inbound message
782 static u32 sep_start_inbound_msg(struct this_task_ctx *ta_ctx, u32 *msg_offset)
788 *msg_offset = sizeof(u32) * 2;
789 word_ptr = (u32 *)ta_ctx->msgptr;
791 ta_ctx->msg_len_words = *(word_ptr + 1);
793 if (token != SEP_START_MSG_TOKEN) {
794 error = SEP_INVALID_START;
805 * @ta_ctx: pointer to struct this_task_ctx
806 * @in_addr: pointer to start of parameter
807 * @size: size of parameter to copy (in bytes)
808 * @max_size: size to move up offset; SEP mesg is in word sizes
809 * @msg_offset: pointer to current offset (is updated)
810 * @byte_array: flag ti indicate wheter endian must be changed
811 * Copies data into the message area from caller
813 static void sep_write_msg(struct this_task_ctx *ta_ctx, void *in_addr,
814 u32 size, u32 max_size, u32 *msg_offset, u32 byte_array)
818 void_ptr = ta_ctx->msgptr + *msg_offset;
819 word_ptr = (u32 *)void_ptr;
820 memcpy(void_ptr, in_addr, size);
821 *msg_offset += max_size;
823 /* Do we need to manipulate endian? */
826 for (i = 0; i < ((size + 3) / 4); i += 1)
827 *(word_ptr + i) = CHG_ENDIAN(*(word_ptr + i));
833 * @ta_ctx: pointer to struct this_task_ctx
834 * @msg_offset: pointer to current offset (is updated)
835 * @op_code: op code to put into message
836 * Puts op code into message and updates offset
838 static void sep_make_header(struct this_task_ctx *ta_ctx, u32 *msg_offset,
843 *msg_offset = sep_start_msg(ta_ctx);
844 word_ptr = (u32 *)(ta_ctx->msgptr + *msg_offset);
846 *msg_offset += sizeof(u32);
853 * @ta_ctx: pointer to struct this_task_ctx
854 * @in_addr: pointer to start of parameter
855 * @size: size of parameter to copy (in bytes)
856 * @max_size: size to move up offset; SEP mesg is in word sizes
857 * @msg_offset: pointer to current offset (is updated)
858 * @byte_array: flag ti indicate wheter endian must be changed
859 * Copies data out of the message area to caller
861 static void sep_read_msg(struct this_task_ctx *ta_ctx, void *in_addr,
862 u32 size, u32 max_size, u32 *msg_offset, u32 byte_array)
866 void_ptr = ta_ctx->msgptr + *msg_offset;
867 word_ptr = (u32 *)void_ptr;
869 /* Do we need to manipulate endian? */
872 for (i = 0; i < ((size + 3) / 4); i += 1)
873 *(word_ptr + i) = CHG_ENDIAN(*(word_ptr + i));
876 memcpy(in_addr, void_ptr, size);
877 *msg_offset += max_size;
882 * @ta_ctx: pointer to struct this_task_ctx
883 * @op_code: expected op_code
884 * @msg_offset: pointer to current offset (is updated)
885 * @returns: 0 for success; error for failure
887 static u32 sep_verify_op(struct this_task_ctx *ta_ctx, u32 op_code,
893 struct sep_device *sep = ta_ctx->sep_used;
895 dev_dbg(&sep->pdev->dev, "dumping return message\n");
896 error = sep_start_inbound_msg(ta_ctx, msg_offset);
898 dev_warn(&sep->pdev->dev,
899 "sep_start_inbound_msg error\n");
903 sep_read_msg(ta_ctx, in_ary, sizeof(u32) * 2, sizeof(u32) * 2,
906 if (in_ary[0] != op_code) {
907 dev_warn(&sep->pdev->dev,
908 "sep got back wrong opcode\n");
909 dev_warn(&sep->pdev->dev,
910 "got back %x; expected %x\n",
912 return SEP_WRONG_OPCODE;
915 if (in_ary[1] != SEP_OK) {
916 dev_warn(&sep->pdev->dev,
917 "sep execution error\n");
918 dev_warn(&sep->pdev->dev,
919 "got back %x; expected %x\n",
929 * @ta_ctx: pointer to struct this_task_ctx
930 * @msg_offset: point to current place in SEP msg; is updated
931 * @dst: pointer to place to put the context
932 * @len: size of the context structure (differs for crypro/hash)
933 * This function reads the context from the msg area
934 * There is a special way the vendor needs to have the maximum
935 * length calculated so that the msg_offset is updated properly;
936 * it skips over some words in the msg area depending on the size
939 static void sep_read_context(struct this_task_ctx *ta_ctx, u32 *msg_offset,
942 u32 max_length = ((len + 3) / sizeof(u32)) * sizeof(u32);
943 sep_read_msg(ta_ctx, dst, len, max_length, msg_offset, 0);
947 * sep_write_context -
948 * @ta_ctx: pointer to struct this_task_ctx
949 * @msg_offset: point to current place in SEP msg; is updated
950 * @src: pointer to the current context
951 * @len: size of the context structure (differs for crypro/hash)
952 * This function writes the context to the msg area
953 * There is a special way the vendor needs to have the maximum
954 * length calculated so that the msg_offset is updated properly;
955 * it skips over some words in the msg area depending on the size
958 static void sep_write_context(struct this_task_ctx *ta_ctx, u32 *msg_offset,
961 u32 max_length = ((len + 3) / sizeof(u32)) * sizeof(u32);
962 sep_write_msg(ta_ctx, src, len, max_length, msg_offset, 0);
967 * @ta_ctx: pointer to struct this_task_ctx
968 * Clear out crypto related values in sep device structure
969 * to enable device to be used by anyone; either kernel
970 * crypto or userspace app via middleware
972 static void sep_clear_out(struct this_task_ctx *ta_ctx)
974 if (ta_ctx->src_sg_hold) {
975 sep_free_sg_buf(ta_ctx->src_sg_hold);
976 ta_ctx->src_sg_hold = NULL;
979 if (ta_ctx->dst_sg_hold) {
980 sep_free_sg_buf(ta_ctx->dst_sg_hold);
981 ta_ctx->dst_sg_hold = NULL;
984 ta_ctx->src_sg = NULL;
985 ta_ctx->dst_sg = NULL;
987 sep_free_dma_table_data_handler(ta_ctx->sep_used, &ta_ctx->dma_ctx);
989 if (ta_ctx->i_own_sep) {
991 * The following unlocks the sep and makes it available
992 * to any other application
993 * First, null out crypto entries in sep before relesing it
995 ta_ctx->sep_used->current_hash_req = NULL;
996 ta_ctx->sep_used->current_cypher_req = NULL;
997 ta_ctx->sep_used->current_request = 0;
998 ta_ctx->sep_used->current_hash_stage = 0;
999 ta_ctx->sep_used->ta_ctx = NULL;
1000 ta_ctx->sep_used->in_kernel = 0;
1002 ta_ctx->call_status.status = 0;
1004 /* Remove anything confidentail */
1005 memset(ta_ctx->sep_used->shared_addr, 0,
1006 SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES);
1008 sep_queue_status_remove(ta_ctx->sep_used, &ta_ctx->queue_elem);
1010 #ifdef SEP_ENABLE_RUNTIME_PM
1011 ta_ctx->sep_used->in_use = 0;
1012 pm_runtime_mark_last_busy(&ta_ctx->sep_used->pdev->dev);
1013 pm_runtime_put_autosuspend(&ta_ctx->sep_used->pdev->dev);
1016 clear_bit(SEP_WORKING_LOCK_BIT,
1017 &ta_ctx->sep_used->in_use_flags);
1018 ta_ctx->sep_used->pid_doing_transaction = 0;
1020 dev_dbg(&ta_ctx->sep_used->pdev->dev,
1021 "[PID%d] waking up next transaction\n",
1024 clear_bit(SEP_TRANSACTION_STARTED_LOCK_BIT,
1025 &ta_ctx->sep_used->in_use_flags);
1026 wake_up(&ta_ctx->sep_used->event_transactions);
1028 ta_ctx->i_own_sep = 0;
1033 * Release crypto infrastructure from EINPROGRESS and
1034 * clear sep_dev so that SEP is available to anyone
1036 static void sep_crypto_release(struct sep_system_ctx *sctx,
1037 struct this_task_ctx *ta_ctx, u32 error)
1039 struct ahash_request *hash_req = ta_ctx->current_hash_req;
1040 struct ablkcipher_request *cypher_req =
1041 ta_ctx->current_cypher_req;
1042 struct sep_device *sep = ta_ctx->sep_used;
1044 sep_clear_out(ta_ctx);
1047 * This may not yet exist depending when we
1048 * chose to bail out. If it does exist, set
1051 if (ta_ctx->are_we_done_yet != NULL)
1052 *ta_ctx->are_we_done_yet = 1;
1054 if (cypher_req != NULL) {
1055 if ((sctx->key_sent == 1) ||
1056 ((error != 0) && (error != -EINPROGRESS))) {
1057 if (cypher_req->base.complete == NULL) {
1058 dev_dbg(&sep->pdev->dev,
1059 "release is null for cypher!");
1061 cypher_req->base.complete(
1062 &cypher_req->base, error);
1067 if (hash_req != NULL) {
1068 if (hash_req->base.complete == NULL) {
1069 dev_dbg(&sep->pdev->dev,
1070 "release is null for hash!");
1072 hash_req->base.complete(
1073 &hash_req->base, error);
1079 * This is where we grab the sep itself and tell it to do something.
1080 * It will sleep if the sep is currently busy
1081 * and it will return 0 if sep is now ours; error value if there
1084 static int sep_crypto_take_sep(struct this_task_ctx *ta_ctx)
1086 struct sep_device *sep = ta_ctx->sep_used;
1088 struct sep_msgarea_hdr *my_msg_header;
1090 my_msg_header = (struct sep_msgarea_hdr *)ta_ctx->msg;
1092 /* add to status queue */
1093 ta_ctx->queue_elem = sep_queue_status_add(sep, my_msg_header->opcode,
1094 ta_ctx->nbytes, current->pid,
1095 current->comm, sizeof(current->comm));
1097 if (!ta_ctx->queue_elem) {
1098 dev_dbg(&sep->pdev->dev, "[PID%d] updating queue"
1099 " status error\n", current->pid);
1103 /* get the device; this can sleep */
1104 result = sep_wait_transaction(sep);
1108 if (sep_dev->power_save_setup == 1)
1109 pm_runtime_get_sync(&sep_dev->pdev->dev);
1111 /* Copy in the message */
1112 memcpy(sep->shared_addr, ta_ctx->msg,
1113 SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES);
1115 /* Copy in the dcb information if there is any */
1116 if (ta_ctx->dcb_region) {
1117 result = sep_activate_dcb_dmatables_context(sep,
1118 &ta_ctx->dcb_region, &ta_ctx->dmatables_region,
1124 /* Mark the device so we know how to finish the job in the tasklet */
1125 if (ta_ctx->current_hash_req)
1126 sep->current_hash_req = ta_ctx->current_hash_req;
1128 sep->current_cypher_req = ta_ctx->current_cypher_req;
1130 sep->current_request = ta_ctx->current_request;
1131 sep->current_hash_stage = ta_ctx->current_hash_stage;
1132 sep->ta_ctx = ta_ctx;
1134 ta_ctx->i_own_sep = 1;
1136 /* need to set bit first to avoid race condition with interrupt */
1137 set_bit(SEP_LEGACY_SENDMSG_DONE_OFFSET, &ta_ctx->call_status.status);
1139 result = sep_send_command_handler(sep);
1141 dev_dbg(&sep->pdev->dev, "[PID%d]: sending command to the sep\n",
1145 dev_dbg(&sep->pdev->dev, "[PID%d]: command sent okay\n",
1148 dev_dbg(&sep->pdev->dev, "[PID%d]: cant send command\n",
1150 clear_bit(SEP_LEGACY_SENDMSG_DONE_OFFSET,
1151 &ta_ctx->call_status.status);
1158 * This function sets things up for a crypto data block process
1159 * This does all preparation, but does not try to grab the
1161 * @req: pointer to struct ablkcipher_request
1162 * returns: 0 if all went well, non zero if error
1164 static int sep_crypto_block_data(struct ablkcipher_request *req)
1173 static char small_buf[100];
1174 ssize_t copy_result;
1177 struct scatterlist *new_sg;
1178 struct this_task_ctx *ta_ctx;
1179 struct crypto_ablkcipher *tfm;
1180 struct sep_system_ctx *sctx;
1182 struct sep_des_internal_context *des_internal;
1183 struct sep_aes_internal_context *aes_internal;
1185 ta_ctx = ablkcipher_request_ctx(req);
1186 tfm = crypto_ablkcipher_reqtfm(req);
1187 sctx = crypto_ablkcipher_ctx(tfm);
1189 /* start the walk on scatterlists */
1190 ablkcipher_walk_init(&ta_ctx->walk, req->src, req->dst, req->nbytes);
1191 dev_dbg(&ta_ctx->sep_used->pdev->dev, "sep crypto block data size of %x\n",
1194 int_error = ablkcipher_walk_phys(req, &ta_ctx->walk);
1196 dev_warn(&ta_ctx->sep_used->pdev->dev, "walk phys error %x\n",
1201 dev_dbg(&ta_ctx->sep_used->pdev->dev,
1202 "crypto block: src is %lx dst is %lx\n",
1203 (unsigned long)req->src, (unsigned long)req->dst);
1205 /* Make sure all pages are even block */
1206 int_error = sep_oddball_pages(ta_ctx->sep_used, req->src,
1207 req->nbytes, ta_ctx->walk.blocksize, &new_sg, 1);
1209 if (int_error < 0) {
1210 dev_warn(&ta_ctx->sep_used->pdev->dev, "oddball page eerror\n");
1212 } else if (int_error == 1) {
1213 ta_ctx->src_sg = new_sg;
1214 ta_ctx->src_sg_hold = new_sg;
1216 ta_ctx->src_sg = req->src;
1217 ta_ctx->src_sg_hold = NULL;
1220 int_error = sep_oddball_pages(ta_ctx->sep_used, req->dst,
1221 req->nbytes, ta_ctx->walk.blocksize, &new_sg, 0);
1223 if (int_error < 0) {
1224 dev_warn(&ta_ctx->sep_used->pdev->dev, "walk phys error %x\n",
1227 } else if (int_error == 1) {
1228 ta_ctx->dst_sg = new_sg;
1229 ta_ctx->dst_sg_hold = new_sg;
1231 ta_ctx->dst_sg = req->dst;
1232 ta_ctx->dst_sg_hold = NULL;
1235 /* set nbytes for queue status */
1236 ta_ctx->nbytes = req->nbytes;
1238 /* Key already done; this is for data */
1239 dev_dbg(&ta_ctx->sep_used->pdev->dev, "sending data\n");
1241 sep_dump_sg(ta_ctx->sep_used,
1242 "block sg in", ta_ctx->src_sg);
1244 /* check for valid data and proper spacing */
1245 src_ptr = sg_virt(ta_ctx->src_sg);
1246 dst_ptr = sg_virt(ta_ctx->dst_sg);
1248 if (!src_ptr || !dst_ptr ||
1249 (ta_ctx->current_cypher_req->nbytes %
1250 crypto_ablkcipher_blocksize(tfm))) {
1252 dev_warn(&ta_ctx->sep_used->pdev->dev,
1253 "cipher block size odd\n");
1254 dev_warn(&ta_ctx->sep_used->pdev->dev,
1255 "cipher block size is %x\n",
1256 crypto_ablkcipher_blocksize(tfm));
1257 dev_warn(&ta_ctx->sep_used->pdev->dev,
1258 "cipher data size is %x\n",
1259 ta_ctx->current_cypher_req->nbytes);
1263 if (partial_overlap(src_ptr, dst_ptr,
1264 ta_ctx->current_cypher_req->nbytes)) {
1265 dev_warn(&ta_ctx->sep_used->pdev->dev,
1266 "block partial overlap\n");
1270 /* Put together the message */
1271 sep_make_header(ta_ctx, &msg_offset, ta_ctx->block_opcode);
1273 /* If des, and size is 1 block, put directly in msg */
1274 if ((ta_ctx->block_opcode == SEP_DES_BLOCK_OPCODE) &&
1275 (req->nbytes == crypto_ablkcipher_blocksize(tfm))) {
1277 dev_dbg(&ta_ctx->sep_used->pdev->dev,
1278 "writing out one block des\n");
1280 copy_result = sg_copy_to_buffer(
1281 ta_ctx->src_sg, sep_sg_nents(ta_ctx->src_sg),
1282 small_buf, crypto_ablkcipher_blocksize(tfm));
1284 if (copy_result != crypto_ablkcipher_blocksize(tfm)) {
1285 dev_warn(&ta_ctx->sep_used->pdev->dev,
1286 "des block copy faild\n");
1290 /* Put data into message */
1291 sep_write_msg(ta_ctx, small_buf,
1292 crypto_ablkcipher_blocksize(tfm),
1293 crypto_ablkcipher_blocksize(tfm) * 2,
1296 /* Put size into message */
1297 sep_write_msg(ta_ctx, &req->nbytes,
1298 sizeof(u32), sizeof(u32), &msg_offset, 0);
1300 /* Otherwise, fill out dma tables */
1301 ta_ctx->dcb_input_data.app_in_address = src_ptr;
1302 ta_ctx->dcb_input_data.data_in_size = req->nbytes;
1303 ta_ctx->dcb_input_data.app_out_address = dst_ptr;
1304 ta_ctx->dcb_input_data.block_size =
1305 crypto_ablkcipher_blocksize(tfm);
1306 ta_ctx->dcb_input_data.tail_block_size = 0;
1307 ta_ctx->dcb_input_data.is_applet = 0;
1308 ta_ctx->dcb_input_data.src_sg = ta_ctx->src_sg;
1309 ta_ctx->dcb_input_data.dst_sg = ta_ctx->dst_sg;
1311 result = sep_create_dcb_dmatables_context_kernel(
1313 &ta_ctx->dcb_region,
1314 &ta_ctx->dmatables_region,
1316 &ta_ctx->dcb_input_data,
1319 dev_warn(&ta_ctx->sep_used->pdev->dev,
1320 "crypto dma table create failed\n");
1324 /* Portion of msg is nulled (no data) */
1330 sep_write_msg(ta_ctx, (void *)msg, sizeof(u32) * 5,
1331 sizeof(u32) * 5, &msg_offset, 0);
1335 * Before we write the message, we need to overwrite the
1336 * vendor's IV with the one from our own ablkcipher walk
1337 * iv because this is needed for dm-crypt
1339 sep_dump_ivs(req, "sending data block to sep\n");
1340 if ((ta_ctx->current_request == DES_CBC) &&
1341 (ta_ctx->des_opmode == SEP_DES_CBC)) {
1343 dev_dbg(&ta_ctx->sep_used->pdev->dev,
1344 "overwrite vendor iv on DES\n");
1345 des_internal = (struct sep_des_internal_context *)
1346 sctx->des_private_ctx.ctx_buf;
1347 memcpy((void *)des_internal->iv_context,
1348 ta_ctx->walk.iv, crypto_ablkcipher_ivsize(tfm));
1349 } else if ((ta_ctx->current_request == AES_CBC) &&
1350 (ta_ctx->aes_opmode == SEP_AES_CBC)) {
1352 dev_dbg(&ta_ctx->sep_used->pdev->dev,
1353 "overwrite vendor iv on AES\n");
1354 aes_internal = (struct sep_aes_internal_context *)
1355 sctx->aes_private_ctx.cbuff;
1356 memcpy((void *)aes_internal->aes_ctx_iv,
1357 ta_ctx->walk.iv, crypto_ablkcipher_ivsize(tfm));
1360 /* Write context into message */
1361 if (ta_ctx->block_opcode == SEP_DES_BLOCK_OPCODE) {
1362 sep_write_context(ta_ctx, &msg_offset,
1363 &sctx->des_private_ctx,
1364 sizeof(struct sep_des_private_context));
1365 sep_dump(ta_ctx->sep_used, "ctx to block des",
1366 &sctx->des_private_ctx, 40);
1368 sep_write_context(ta_ctx, &msg_offset,
1369 &sctx->aes_private_ctx,
1370 sizeof(struct sep_aes_private_context));
1371 sep_dump(ta_ctx->sep_used, "ctx to block aes",
1372 &sctx->aes_private_ctx, 20);
1375 /* conclude message */
1376 sep_end_msg(ta_ctx, msg_offset);
1378 /* Parent (caller) is now ready to tell the sep to do ahead */
1384 * This function sets things up for a crypto key submit process
1385 * This does all preparation, but does not try to grab the
1387 * @req: pointer to struct ablkcipher_request
1388 * returns: 0 if all went well, non zero if error
1390 static int sep_crypto_send_key(struct ablkcipher_request *req)
1398 struct this_task_ctx *ta_ctx;
1399 struct crypto_ablkcipher *tfm;
1400 struct sep_system_ctx *sctx;
1402 ta_ctx = ablkcipher_request_ctx(req);
1403 tfm = crypto_ablkcipher_reqtfm(req);
1404 sctx = crypto_ablkcipher_ctx(tfm);
1406 dev_dbg(&ta_ctx->sep_used->pdev->dev, "sending key\n");
1408 /* start the walk on scatterlists */
1409 ablkcipher_walk_init(&ta_ctx->walk, req->src, req->dst, req->nbytes);
1410 dev_dbg(&ta_ctx->sep_used->pdev->dev,
1411 "sep crypto block data size of %x\n", req->nbytes);
1413 int_error = ablkcipher_walk_phys(req, &ta_ctx->walk);
1415 dev_warn(&ta_ctx->sep_used->pdev->dev, "walk phys error %x\n",
1421 if ((ta_ctx->current_request == DES_CBC) &&
1422 (ta_ctx->des_opmode == SEP_DES_CBC)) {
1423 if (!ta_ctx->walk.iv) {
1424 dev_warn(&ta_ctx->sep_used->pdev->dev, "no iv found\n");
1428 memcpy(ta_ctx->iv, ta_ctx->walk.iv, SEP_DES_IV_SIZE_BYTES);
1429 sep_dump(ta_ctx->sep_used, "iv",
1430 ta_ctx->iv, SEP_DES_IV_SIZE_BYTES);
1433 if ((ta_ctx->current_request == AES_CBC) &&
1434 (ta_ctx->aes_opmode == SEP_AES_CBC)) {
1435 if (!ta_ctx->walk.iv) {
1436 dev_warn(&ta_ctx->sep_used->pdev->dev, "no iv found\n");
1440 memcpy(ta_ctx->iv, ta_ctx->walk.iv, SEP_AES_IV_SIZE_BYTES);
1441 sep_dump(ta_ctx->sep_used, "iv",
1442 ta_ctx->iv, SEP_AES_IV_SIZE_BYTES);
1445 /* put together message to SEP */
1446 /* Start with op code */
1447 sep_make_header(ta_ctx, &msg_offset, ta_ctx->init_opcode);
1449 /* now deal with IV */
1450 if (ta_ctx->init_opcode == SEP_DES_INIT_OPCODE) {
1451 if (ta_ctx->des_opmode == SEP_DES_CBC) {
1452 sep_write_msg(ta_ctx, ta_ctx->iv,
1453 SEP_DES_IV_SIZE_BYTES, sizeof(u32) * 4,
1455 sep_dump(ta_ctx->sep_used, "initial IV",
1456 ta_ctx->walk.iv, SEP_DES_IV_SIZE_BYTES);
1459 msg_offset += 4 * sizeof(u32);
1462 max_length = ((SEP_AES_IV_SIZE_BYTES + 3) /
1463 sizeof(u32)) * sizeof(u32);
1464 if (ta_ctx->aes_opmode == SEP_AES_CBC) {
1465 sep_write_msg(ta_ctx, ta_ctx->iv,
1466 SEP_AES_IV_SIZE_BYTES, max_length,
1468 sep_dump(ta_ctx->sep_used, "initial IV",
1469 ta_ctx->walk.iv, SEP_AES_IV_SIZE_BYTES);
1472 msg_offset += max_length;
1477 if (ta_ctx->init_opcode == SEP_DES_INIT_OPCODE) {
1478 sep_write_msg(ta_ctx, (void *)&sctx->key.des.key1,
1479 sizeof(u32) * 8, sizeof(u32) * 8,
1482 msg[0] = (u32)sctx->des_nbr_keys;
1483 msg[1] = (u32)ta_ctx->des_encmode;
1484 msg[2] = (u32)ta_ctx->des_opmode;
1486 sep_write_msg(ta_ctx, (void *)msg,
1487 sizeof(u32) * 3, sizeof(u32) * 3,
1490 sep_write_msg(ta_ctx, (void *)&sctx->key.aes,
1492 SEP_AES_MAX_KEY_SIZE_BYTES,
1495 msg[0] = (u32)sctx->aes_key_size;
1496 msg[1] = (u32)ta_ctx->aes_encmode;
1497 msg[2] = (u32)ta_ctx->aes_opmode;
1498 msg[3] = (u32)0; /* Secret key is not used */
1499 sep_write_msg(ta_ctx, (void *)msg,
1500 sizeof(u32) * 4, sizeof(u32) * 4,
1504 /* conclude message */
1505 sep_end_msg(ta_ctx, msg_offset);
1507 /* Parent (caller) is now ready to tell the sep to do ahead */
1512 /* This needs to be run as a work queue as it can be put asleep */
1513 static void sep_crypto_block(void *data)
1515 unsigned long end_time;
1519 struct ablkcipher_request *req;
1520 struct this_task_ctx *ta_ctx;
1521 struct crypto_ablkcipher *tfm;
1522 struct sep_system_ctx *sctx;
1523 int are_we_done_yet;
1525 req = (struct ablkcipher_request *)data;
1526 ta_ctx = ablkcipher_request_ctx(req);
1527 tfm = crypto_ablkcipher_reqtfm(req);
1528 sctx = crypto_ablkcipher_ctx(tfm);
1530 ta_ctx->are_we_done_yet = &are_we_done_yet;
1532 pr_debug("sep_crypto_block\n");
1533 pr_debug("tfm is %p sctx is %p ta_ctx is %p\n",
1535 pr_debug("key_sent is %d\n", sctx->key_sent);
1537 /* do we need to send the key */
1538 if (sctx->key_sent == 0) {
1539 are_we_done_yet = 0;
1540 result = sep_crypto_send_key(req); /* prep to send key */
1542 dev_dbg(&ta_ctx->sep_used->pdev->dev,
1543 "could not prep key %x\n", result);
1544 sep_crypto_release(sctx, ta_ctx, result);
1548 result = sep_crypto_take_sep(ta_ctx);
1550 dev_warn(&ta_ctx->sep_used->pdev->dev,
1551 "sep_crypto_take_sep for key send failed\n");
1552 sep_crypto_release(sctx, ta_ctx, result);
1556 /* now we sit and wait up to a fixed time for completion */
1557 end_time = jiffies + (WAIT_TIME * HZ);
1558 while ((time_before(jiffies, end_time)) &&
1559 (are_we_done_yet == 0))
1562 /* Done waiting; still not done yet? */
1563 if (are_we_done_yet == 0) {
1564 dev_dbg(&ta_ctx->sep_used->pdev->dev,
1565 "Send key job never got done\n");
1566 sep_crypto_release(sctx, ta_ctx, -EINVAL);
1570 /* Set the key sent variable so this can be skipped later */
1574 /* Key sent (or maybe not if we did not have to), now send block */
1575 are_we_done_yet = 0;
1577 result = sep_crypto_block_data(req);
1580 dev_dbg(&ta_ctx->sep_used->pdev->dev,
1581 "could prep not send block %x\n", result);
1582 sep_crypto_release(sctx, ta_ctx, result);
1586 result = sep_crypto_take_sep(ta_ctx);
1588 dev_warn(&ta_ctx->sep_used->pdev->dev,
1589 "sep_crypto_take_sep for block send failed\n");
1590 sep_crypto_release(sctx, ta_ctx, result);
1594 /* now we sit and wait up to a fixed time for completion */
1595 end_time = jiffies + (WAIT_TIME * HZ);
1596 while ((time_before(jiffies, end_time)) && (are_we_done_yet == 0))
1599 /* Done waiting; still not done yet? */
1600 if (are_we_done_yet == 0) {
1601 dev_dbg(&ta_ctx->sep_used->pdev->dev,
1602 "Send block job never got done\n");
1603 sep_crypto_release(sctx, ta_ctx, -EINVAL);
1607 /* That's it; entire thing done, get out of queue */
1609 pr_debug("crypto_block leaving\n");
1610 pr_debug("tfm is %p sctx is %p ta_ctx is %p\n", tfm, sctx, ta_ctx);
1614 * Post operation (after interrupt) for crypto block
1616 static u32 crypto_post_op(struct sep_device *sep)
1622 ssize_t copy_result;
1623 static char small_buf[100];
1625 struct ablkcipher_request *req;
1626 struct this_task_ctx *ta_ctx;
1627 struct sep_system_ctx *sctx;
1628 struct crypto_ablkcipher *tfm;
1630 struct sep_des_internal_context *des_internal;
1631 struct sep_aes_internal_context *aes_internal;
1633 if (!sep->current_cypher_req)
1636 /* hold req since we need to submit work after clearing sep */
1637 req = sep->current_cypher_req;
1639 ta_ctx = ablkcipher_request_ctx(sep->current_cypher_req);
1640 tfm = crypto_ablkcipher_reqtfm(sep->current_cypher_req);
1641 sctx = crypto_ablkcipher_ctx(tfm);
1643 pr_debug("crypto_post op\n");
1644 pr_debug("key_sent is %d tfm is %p sctx is %p ta_ctx is %p\n",
1645 sctx->key_sent, tfm, sctx, ta_ctx);
1647 dev_dbg(&ta_ctx->sep_used->pdev->dev, "crypto post_op\n");
1648 dev_dbg(&ta_ctx->sep_used->pdev->dev, "crypto post_op message dump\n");
1649 crypto_sep_dump_message(ta_ctx->sep_used, ta_ctx->msg);
1651 /* first bring msg from shared area to local area */
1652 memcpy(ta_ctx->msg, sep->shared_addr,
1653 SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES);
1655 /* Is this the result of performing init (key to SEP */
1656 if (sctx->key_sent == 0) {
1658 /* Did SEP do it okay */
1659 u32_error = sep_verify_op(ta_ctx, ta_ctx->init_opcode,
1662 dev_warn(&ta_ctx->sep_used->pdev->dev,
1663 "aes init error %x\n", u32_error);
1664 sep_crypto_release(sctx, ta_ctx, u32_error);
1669 if (ta_ctx->init_opcode == SEP_DES_INIT_OPCODE) {
1670 sep_read_context(ta_ctx, &msg_offset,
1671 &sctx->des_private_ctx,
1672 sizeof(struct sep_des_private_context));
1674 sep_dump(ta_ctx->sep_used, "ctx init des",
1675 &sctx->des_private_ctx, 40);
1677 sep_read_context(ta_ctx, &msg_offset,
1678 &sctx->aes_private_ctx,
1679 sizeof(struct sep_aes_private_context));
1681 sep_dump(ta_ctx->sep_used, "ctx init aes",
1682 &sctx->aes_private_ctx, 20);
1685 sep_dump_ivs(req, "after sending key to sep\n");
1687 /* key sent went okay; release sep, and set are_we_done_yet */
1689 sep_crypto_release(sctx, ta_ctx, -EINPROGRESS);
1694 * This is the result of a block request
1696 dev_dbg(&ta_ctx->sep_used->pdev->dev,
1697 "crypto_post_op block response\n");
1699 u32_error = sep_verify_op(ta_ctx, ta_ctx->block_opcode,
1703 dev_warn(&ta_ctx->sep_used->pdev->dev,
1704 "sep block error %x\n", u32_error);
1705 sep_crypto_release(sctx, ta_ctx, u32_error);
1709 if (ta_ctx->block_opcode == SEP_DES_BLOCK_OPCODE) {
1711 dev_dbg(&ta_ctx->sep_used->pdev->dev,
1712 "post op for DES\n");
1714 /* special case for 1 block des */
1715 if (sep->current_cypher_req->nbytes ==
1716 crypto_ablkcipher_blocksize(tfm)) {
1718 sep_read_msg(ta_ctx, small_buf,
1719 crypto_ablkcipher_blocksize(tfm),
1720 crypto_ablkcipher_blocksize(tfm) * 2,
1723 dev_dbg(&ta_ctx->sep_used->pdev->dev,
1724 "reading in block des\n");
1726 copy_result = sg_copy_from_buffer(
1728 sep_sg_nents(ta_ctx->dst_sg),
1730 crypto_ablkcipher_blocksize(tfm));
1733 crypto_ablkcipher_blocksize(tfm)) {
1735 dev_warn(&ta_ctx->sep_used->pdev->dev,
1736 "des block copy faild\n");
1737 sep_crypto_release(sctx, ta_ctx,
1744 sep_read_context(ta_ctx, &msg_offset,
1745 &sctx->des_private_ctx,
1746 sizeof(struct sep_des_private_context));
1749 dev_dbg(&ta_ctx->sep_used->pdev->dev,
1750 "post op for AES\n");
1752 /* Skip the MAC Output */
1753 msg_offset += (sizeof(u32) * 4);
1756 sep_read_context(ta_ctx, &msg_offset,
1757 &sctx->aes_private_ctx,
1758 sizeof(struct sep_aes_private_context));
1761 sep_dump_sg(ta_ctx->sep_used,
1762 "block sg out", ta_ctx->dst_sg);
1764 /* Copy to correct sg if this block had oddball pages */
1765 if (ta_ctx->dst_sg_hold)
1766 sep_copy_sg(ta_ctx->sep_used,
1768 ta_ctx->current_cypher_req->dst,
1769 ta_ctx->current_cypher_req->nbytes);
1772 * Copy the iv's back to the walk.iv
1773 * This is required for dm_crypt
1775 sep_dump_ivs(req, "got data block from sep\n");
1776 if ((ta_ctx->current_request == DES_CBC) &&
1777 (ta_ctx->des_opmode == SEP_DES_CBC)) {
1779 dev_dbg(&ta_ctx->sep_used->pdev->dev,
1780 "returning result iv to walk on DES\n");
1781 des_internal = (struct sep_des_internal_context *)
1782 sctx->des_private_ctx.ctx_buf;
1783 memcpy(ta_ctx->walk.iv,
1784 (void *)des_internal->iv_context,
1785 crypto_ablkcipher_ivsize(tfm));
1786 } else if ((ta_ctx->current_request == AES_CBC) &&
1787 (ta_ctx->aes_opmode == SEP_AES_CBC)) {
1789 dev_dbg(&ta_ctx->sep_used->pdev->dev,
1790 "returning result iv to walk on AES\n");
1791 aes_internal = (struct sep_aes_internal_context *)
1792 sctx->aes_private_ctx.cbuff;
1793 memcpy(ta_ctx->walk.iv,
1794 (void *)aes_internal->aes_ctx_iv,
1795 crypto_ablkcipher_ivsize(tfm));
1798 /* finished, release everything */
1799 sep_crypto_release(sctx, ta_ctx, 0);
1801 pr_debug("crypto_post_op done\n");
1802 pr_debug("key_sent is %d tfm is %p sctx is %p ta_ctx is %p\n",
1803 sctx->key_sent, tfm, sctx, ta_ctx);
1808 static u32 hash_init_post_op(struct sep_device *sep)
1812 struct crypto_ahash *tfm = crypto_ahash_reqtfm(sep->current_hash_req);
1813 struct this_task_ctx *ta_ctx = ahash_request_ctx(sep->current_hash_req);
1814 struct sep_system_ctx *sctx = crypto_ahash_ctx(tfm);
1815 dev_dbg(&ta_ctx->sep_used->pdev->dev,
1816 "hash init post op\n");
1818 /* first bring msg from shared area to local area */
1819 memcpy(ta_ctx->msg, sep->shared_addr,
1820 SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES);
1822 u32_error = sep_verify_op(ta_ctx, SEP_HASH_INIT_OPCODE,
1826 dev_warn(&ta_ctx->sep_used->pdev->dev, "hash init error %x\n",
1828 sep_crypto_release(sctx, ta_ctx, u32_error);
1833 sep_read_context(ta_ctx, &msg_offset,
1834 &sctx->hash_private_ctx,
1835 sizeof(struct sep_hash_private_context));
1837 /* Signal to crypto infrastructure and clear out */
1838 dev_dbg(&ta_ctx->sep_used->pdev->dev, "hash init post op done\n");
1839 sep_crypto_release(sctx, ta_ctx, 0);
1843 static u32 hash_update_post_op(struct sep_device *sep)
1847 struct crypto_ahash *tfm = crypto_ahash_reqtfm(sep->current_hash_req);
1848 struct this_task_ctx *ta_ctx = ahash_request_ctx(sep->current_hash_req);
1849 struct sep_system_ctx *sctx = crypto_ahash_ctx(tfm);
1850 dev_dbg(&ta_ctx->sep_used->pdev->dev,
1851 "hash update post op\n");
1853 /* first bring msg from shared area to local area */
1854 memcpy(ta_ctx->msg, sep->shared_addr,
1855 SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES);
1857 u32_error = sep_verify_op(ta_ctx, SEP_HASH_UPDATE_OPCODE,
1861 dev_warn(&ta_ctx->sep_used->pdev->dev, "hash init error %x\n",
1863 sep_crypto_release(sctx, ta_ctx, u32_error);
1868 sep_read_context(ta_ctx, &msg_offset,
1869 &sctx->hash_private_ctx,
1870 sizeof(struct sep_hash_private_context));
1873 * Following is only for finup; if we just completd the
1874 * data portion of finup, we now need to kick off the
1875 * finish portion of finup.
1878 if (ta_ctx->sep_used->current_hash_stage == HASH_FINUP_DATA) {
1880 /* first reset stage to HASH_FINUP_FINISH */
1881 ta_ctx->sep_used->current_hash_stage = HASH_FINUP_FINISH;
1883 /* now enqueue the finish operation */
1884 spin_lock_irq(&queue_lock);
1885 u32_error = crypto_enqueue_request(&sep_queue,
1886 &ta_ctx->sep_used->current_hash_req->base);
1887 spin_unlock_irq(&queue_lock);
1889 if ((u32_error != 0) && (u32_error != -EINPROGRESS)) {
1890 dev_warn(&ta_ctx->sep_used->pdev->dev,
1891 "spe cypher post op cant queue\n");
1892 sep_crypto_release(sctx, ta_ctx, u32_error);
1896 /* schedule the data send */
1897 u32_error = sep_submit_work(ta_ctx->sep_used->workqueue,
1898 sep_dequeuer, (void *)&sep_queue);
1901 dev_warn(&ta_ctx->sep_used->pdev->dev,
1902 "cant submit work sep_crypto_block\n");
1903 sep_crypto_release(sctx, ta_ctx, -EINVAL);
1908 /* Signal to crypto infrastructure and clear out */
1909 dev_dbg(&ta_ctx->sep_used->pdev->dev, "hash update post op done\n");
1910 sep_crypto_release(sctx, ta_ctx, 0);
1914 static u32 hash_final_post_op(struct sep_device *sep)
1919 struct crypto_ahash *tfm = crypto_ahash_reqtfm(sep->current_hash_req);
1920 struct sep_system_ctx *sctx = crypto_ahash_ctx(tfm);
1921 struct this_task_ctx *ta_ctx = ahash_request_ctx(sep->current_hash_req);
1922 dev_dbg(&ta_ctx->sep_used->pdev->dev,
1923 "hash final post op\n");
1925 /* first bring msg from shared area to local area */
1926 memcpy(ta_ctx->msg, sep->shared_addr,
1927 SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES);
1929 u32_error = sep_verify_op(ta_ctx, SEP_HASH_FINISH_OPCODE,
1933 dev_warn(&ta_ctx->sep_used->pdev->dev, "hash finish error %x\n",
1935 sep_crypto_release(sctx, ta_ctx, u32_error);
1939 /* Grab the result */
1940 if (ta_ctx->current_hash_req->result == NULL) {
1941 /* Oops, null buffer; error out here */
1942 dev_warn(&ta_ctx->sep_used->pdev->dev,
1943 "hash finish null buffer\n");
1944 sep_crypto_release(sctx, ta_ctx, (u32)-ENOMEM);
1948 max_length = (((SEP_HASH_RESULT_SIZE_WORDS * sizeof(u32)) + 3) /
1949 sizeof(u32)) * sizeof(u32);
1951 sep_read_msg(ta_ctx,
1952 ta_ctx->current_hash_req->result,
1953 crypto_ahash_digestsize(tfm), max_length,
1956 /* Signal to crypto infrastructure and clear out */
1957 dev_dbg(&ta_ctx->sep_used->pdev->dev, "hash finish post op done\n");
1958 sep_crypto_release(sctx, ta_ctx, 0);
1962 static u32 hash_digest_post_op(struct sep_device *sep)
1967 struct crypto_ahash *tfm = crypto_ahash_reqtfm(sep->current_hash_req);
1968 struct sep_system_ctx *sctx = crypto_ahash_ctx(tfm);
1969 struct this_task_ctx *ta_ctx = ahash_request_ctx(sep->current_hash_req);
1970 dev_dbg(&ta_ctx->sep_used->pdev->dev,
1971 "hash digest post op\n");
1973 /* first bring msg from shared area to local area */
1974 memcpy(ta_ctx->msg, sep->shared_addr,
1975 SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES);
1977 u32_error = sep_verify_op(ta_ctx, SEP_HASH_SINGLE_OPCODE,
1981 dev_warn(&ta_ctx->sep_used->pdev->dev,
1982 "hash digest finish error %x\n", u32_error);
1984 sep_crypto_release(sctx, ta_ctx, u32_error);
1988 /* Grab the result */
1989 if (ta_ctx->current_hash_req->result == NULL) {
1990 /* Oops, null buffer; error out here */
1991 dev_warn(&ta_ctx->sep_used->pdev->dev,
1992 "hash digest finish null buffer\n");
1993 sep_crypto_release(sctx, ta_ctx, (u32)-ENOMEM);
1997 max_length = (((SEP_HASH_RESULT_SIZE_WORDS * sizeof(u32)) + 3) /
1998 sizeof(u32)) * sizeof(u32);
2000 sep_read_msg(ta_ctx,
2001 ta_ctx->current_hash_req->result,
2002 crypto_ahash_digestsize(tfm), max_length,
2005 /* Signal to crypto infrastructure and clear out */
2006 dev_dbg(&ta_ctx->sep_used->pdev->dev,
2007 "hash digest finish post op done\n");
2009 sep_crypto_release(sctx, ta_ctx, 0);
2014 * The sep_finish function is the function that is schedule (via tasket)
2015 * by the interrupt service routine when the SEP sends and interrupt
2016 * This is only called by the interrupt handler as a tasklet.
2018 static void sep_finish(unsigned long data)
2020 struct sep_device *sep_dev;
2026 pr_debug("sep_finish called with null data\n");
2030 sep_dev = (struct sep_device *)data;
2031 if (sep_dev == NULL) {
2032 pr_debug("sep_finish; sep_dev is NULL\n");
2036 if (sep_dev->in_kernel == (u32)0) {
2037 dev_warn(&sep_dev->pdev->dev,
2038 "sep_finish; not in kernel operation\n");
2042 /* Did we really do a sep command prior to this? */
2043 if (0 == test_bit(SEP_LEGACY_SENDMSG_DONE_OFFSET,
2044 &sep_dev->ta_ctx->call_status.status)) {
2046 dev_warn(&sep_dev->pdev->dev, "[PID%d] sendmsg not called\n",
2051 if (sep_dev->send_ct != sep_dev->reply_ct) {
2052 dev_warn(&sep_dev->pdev->dev,
2053 "[PID%d] poll; no message came back\n",
2058 /* Check for error (In case time ran out) */
2059 if ((res != 0x0) && (res != 0x8)) {
2060 dev_warn(&sep_dev->pdev->dev,
2061 "[PID%d] poll; poll error GPR3 is %x\n",
2066 /* What kind of interrupt from sep was this? */
2067 res = sep_read_reg(sep_dev, HW_HOST_SEP_HOST_GPR2_REG_ADDR);
2069 dev_dbg(&sep_dev->pdev->dev, "[PID%d] GPR2 at crypto finish is %x\n",
2072 /* Print request? */
2073 if ((res >> 30) & 0x1) {
2074 dev_dbg(&sep_dev->pdev->dev, "[PID%d] sep print req\n",
2076 dev_dbg(&sep_dev->pdev->dev, "[PID%d] contents: %s\n",
2078 (char *)(sep_dev->shared_addr +
2079 SEP_DRIVER_PRINTF_OFFSET_IN_BYTES));
2083 /* Request for daemon (not currently in POR)? */
2085 dev_dbg(&sep_dev->pdev->dev,
2086 "[PID%d] sep request; ignoring\n",
2091 /* If we got here, then we have a replay to a sep command */
2093 dev_dbg(&sep_dev->pdev->dev,
2094 "[PID%d] sep reply to command; processing request: %x\n",
2095 current->pid, sep_dev->current_request);
2097 switch (sep_dev->current_request) {
2102 res = crypto_post_op(sep_dev);
2108 switch (sep_dev->current_hash_stage) {
2110 res = hash_init_post_op(sep_dev);
2113 case HASH_FINUP_DATA:
2114 res = hash_update_post_op(sep_dev);
2116 case HASH_FINUP_FINISH:
2118 res = hash_final_post_op(sep_dev);
2121 res = hash_digest_post_op(sep_dev);
2124 pr_debug("sep - invalid stage for hash finish\n");
2128 pr_debug("sep - invalid request for finish\n");
2132 pr_debug("sep - finish returned error %x\n", res);
2135 static int sep_hash_cra_init(struct crypto_tfm *tfm)
2137 const char *alg_name = crypto_tfm_alg_name(tfm);
2139 pr_debug("sep_hash_cra_init name is %s\n", alg_name);
2141 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
2142 sizeof(struct this_task_ctx));
2146 static void sep_hash_cra_exit(struct crypto_tfm *tfm)
2148 pr_debug("sep_hash_cra_exit\n");
2151 static void sep_hash_init(void *data)
2155 struct ahash_request *req;
2156 struct crypto_ahash *tfm;
2157 struct this_task_ctx *ta_ctx;
2158 struct sep_system_ctx *sctx;
2159 unsigned long end_time;
2160 int are_we_done_yet;
2162 req = (struct ahash_request *)data;
2163 tfm = crypto_ahash_reqtfm(req);
2164 sctx = crypto_ahash_ctx(tfm);
2165 ta_ctx = ahash_request_ctx(req);
2166 ta_ctx->sep_used = sep_dev;
2168 ta_ctx->are_we_done_yet = &are_we_done_yet;
2170 dev_dbg(&ta_ctx->sep_used->pdev->dev,
2172 ta_ctx->current_hash_stage = HASH_INIT;
2173 /* opcode and mode */
2174 sep_make_header(ta_ctx, &msg_offset, SEP_HASH_INIT_OPCODE);
2175 sep_write_msg(ta_ctx, &ta_ctx->hash_opmode,
2176 sizeof(u32), sizeof(u32), &msg_offset, 0);
2177 sep_end_msg(ta_ctx, msg_offset);
2179 are_we_done_yet = 0;
2180 result = sep_crypto_take_sep(ta_ctx);
2182 dev_warn(&ta_ctx->sep_used->pdev->dev,
2183 "sep_hash_init take sep failed\n");
2184 sep_crypto_release(sctx, ta_ctx, -EINVAL);
2187 /* now we sit and wait up to a fixed time for completion */
2188 end_time = jiffies + (WAIT_TIME * HZ);
2189 while ((time_before(jiffies, end_time)) && (are_we_done_yet == 0))
2192 /* Done waiting; still not done yet? */
2193 if (are_we_done_yet == 0) {
2194 dev_dbg(&ta_ctx->sep_used->pdev->dev,
2195 "hash init never got done\n");
2196 sep_crypto_release(sctx, ta_ctx, -EINVAL);
2202 static void sep_hash_update(void *data)
2207 struct sep_hash_internal_context *int_ctx;
2211 int are_we_done_yet;
2214 static char small_buf[100];
2216 struct scatterlist *new_sg;
2217 ssize_t copy_result;
2218 struct ahash_request *req;
2219 struct crypto_ahash *tfm;
2220 struct this_task_ctx *ta_ctx;
2221 struct sep_system_ctx *sctx;
2222 unsigned long end_time;
2224 req = (struct ahash_request *)data;
2225 tfm = crypto_ahash_reqtfm(req);
2226 sctx = crypto_ahash_ctx(tfm);
2227 ta_ctx = ahash_request_ctx(req);
2228 ta_ctx->sep_used = sep_dev;
2230 ta_ctx->are_we_done_yet = &are_we_done_yet;
2232 /* length for queue status */
2233 ta_ctx->nbytes = req->nbytes;
2235 dev_dbg(&ta_ctx->sep_used->pdev->dev,
2236 "sep_hash_update\n");
2237 ta_ctx->current_hash_stage = HASH_UPDATE;
2240 block_size = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
2241 tail_len = req->nbytes % block_size;
2242 dev_dbg(&ta_ctx->sep_used->pdev->dev, "length is %x\n", len);
2243 dev_dbg(&ta_ctx->sep_used->pdev->dev, "block_size is %x\n", block_size);
2244 dev_dbg(&ta_ctx->sep_used->pdev->dev, "tail len is %x\n", tail_len);
2246 /* Compute header/tail sizes */
2247 int_ctx = (struct sep_hash_internal_context *)&sctx->
2248 hash_private_ctx.internal_context;
2249 head_len = (block_size - int_ctx->prev_update_bytes) % block_size;
2250 tail_len = (req->nbytes - head_len) % block_size;
2252 /* Make sure all pages are even block */
2253 int_error = sep_oddball_pages(ta_ctx->sep_used, req->src,
2255 block_size, &new_sg, 1);
2257 if (int_error < 0) {
2258 dev_warn(&ta_ctx->sep_used->pdev->dev,
2259 "oddball pages error in crash update\n");
2260 sep_crypto_release(sctx, ta_ctx, -ENOMEM);
2262 } else if (int_error == 1) {
2263 ta_ctx->src_sg = new_sg;
2264 ta_ctx->src_sg_hold = new_sg;
2266 ta_ctx->src_sg = req->src;
2267 ta_ctx->src_sg_hold = NULL;
2270 src_ptr = sg_virt(ta_ctx->src_sg);
2272 if ((!req->nbytes) || (!ta_ctx->src_sg)) {
2277 sep_dump_sg(ta_ctx->sep_used, "hash block sg in", ta_ctx->src_sg);
2279 ta_ctx->dcb_input_data.app_in_address = src_ptr;
2280 ta_ctx->dcb_input_data.data_in_size =
2281 req->nbytes - (head_len + tail_len);
2282 ta_ctx->dcb_input_data.app_out_address = NULL;
2283 ta_ctx->dcb_input_data.block_size = block_size;
2284 ta_ctx->dcb_input_data.tail_block_size = 0;
2285 ta_ctx->dcb_input_data.is_applet = 0;
2286 ta_ctx->dcb_input_data.src_sg = ta_ctx->src_sg;
2287 ta_ctx->dcb_input_data.dst_sg = NULL;
2289 int_error = sep_create_dcb_dmatables_context_kernel(
2291 &ta_ctx->dcb_region,
2292 &ta_ctx->dmatables_region,
2294 &ta_ctx->dcb_input_data,
2297 dev_warn(&ta_ctx->sep_used->pdev->dev,
2298 "hash update dma table create failed\n");
2299 sep_crypto_release(sctx, ta_ctx, -EINVAL);
2303 /* Construct message to SEP */
2304 sep_make_header(ta_ctx, &msg_offset, SEP_HASH_UPDATE_OPCODE);
2310 sep_write_msg(ta_ctx, msg, sizeof(u32) * 3, sizeof(u32) * 3,
2313 /* Handle remainders */
2316 sep_write_msg(ta_ctx, &head_len, sizeof(u32),
2317 sizeof(u32), &msg_offset, 0);
2320 copy_result = sg_copy_to_buffer(
2322 sep_sg_nents(ta_ctx->src_sg),
2323 small_buf, head_len);
2325 if (copy_result != head_len) {
2326 dev_warn(&ta_ctx->sep_used->pdev->dev,
2327 "sg head copy failure in hash block\n");
2328 sep_crypto_release(sctx, ta_ctx, -ENOMEM);
2332 sep_write_msg(ta_ctx, small_buf, head_len,
2333 sizeof(u32) * 32, &msg_offset, 1);
2335 msg_offset += sizeof(u32) * 32;
2339 sep_write_msg(ta_ctx, &tail_len, sizeof(u32),
2340 sizeof(u32), &msg_offset, 0);
2343 copy_result = sep_copy_offset_sg(
2346 req->nbytes - tail_len,
2347 small_buf, tail_len);
2349 if (copy_result != tail_len) {
2350 dev_warn(&ta_ctx->sep_used->pdev->dev,
2351 "sg tail copy failure in hash block\n");
2352 sep_crypto_release(sctx, ta_ctx, -ENOMEM);
2356 sep_write_msg(ta_ctx, small_buf, tail_len,
2357 sizeof(u32) * 32, &msg_offset, 1);
2359 msg_offset += sizeof(u32) * 32;
2363 sep_write_context(ta_ctx, &msg_offset, &sctx->hash_private_ctx,
2364 sizeof(struct sep_hash_private_context));
2366 sep_end_msg(ta_ctx, msg_offset);
2367 are_we_done_yet = 0;
2368 int_error = sep_crypto_take_sep(ta_ctx);
2370 dev_warn(&ta_ctx->sep_used->pdev->dev,
2371 "sep_hash_update take sep failed\n");
2372 sep_crypto_release(sctx, ta_ctx, -EINVAL);
2375 /* now we sit and wait up to a fixed time for completion */
2376 end_time = jiffies + (WAIT_TIME * HZ);
2377 while ((time_before(jiffies, end_time)) && (are_we_done_yet == 0))
2380 /* Done waiting; still not done yet? */
2381 if (are_we_done_yet == 0) {
2382 dev_dbg(&ta_ctx->sep_used->pdev->dev,
2383 "hash update never got done\n");
2384 sep_crypto_release(sctx, ta_ctx, -EINVAL);
2390 static void sep_hash_final(void *data)
2393 struct ahash_request *req;
2394 struct crypto_ahash *tfm;
2395 struct this_task_ctx *ta_ctx;
2396 struct sep_system_ctx *sctx;
2398 unsigned long end_time;
2399 int are_we_done_yet;
2401 req = (struct ahash_request *)data;
2402 tfm = crypto_ahash_reqtfm(req);
2403 sctx = crypto_ahash_ctx(tfm);
2404 ta_ctx = ahash_request_ctx(req);
2405 ta_ctx->sep_used = sep_dev;
2407 dev_dbg(&ta_ctx->sep_used->pdev->dev,
2408 "sep_hash_final\n");
2409 ta_ctx->current_hash_stage = HASH_FINISH;
2411 ta_ctx->are_we_done_yet = &are_we_done_yet;
2413 /* opcode and mode */
2414 sep_make_header(ta_ctx, &msg_offset, SEP_HASH_FINISH_OPCODE);
2417 sep_write_context(ta_ctx, &msg_offset, &sctx->hash_private_ctx,
2418 sizeof(struct sep_hash_private_context));
2420 sep_end_msg(ta_ctx, msg_offset);
2421 are_we_done_yet = 0;
2422 result = sep_crypto_take_sep(ta_ctx);
2424 dev_warn(&ta_ctx->sep_used->pdev->dev,
2425 "sep_hash_final take sep failed\n");
2426 sep_crypto_release(sctx, ta_ctx, -EINVAL);
2429 /* now we sit and wait up to a fixed time for completion */
2430 end_time = jiffies + (WAIT_TIME * HZ);
2431 while ((time_before(jiffies, end_time)) && (are_we_done_yet == 0))
2434 /* Done waiting; still not done yet? */
2435 if (are_we_done_yet == 0) {
2436 dev_dbg(&ta_ctx->sep_used->pdev->dev,
2437 "hash final job never got done\n");
2438 sep_crypto_release(sctx, ta_ctx, -EINVAL);
2444 static void sep_hash_digest(void *data)
2452 int are_we_done_yet;
2454 static char small_buf[100];
2455 struct scatterlist *new_sg;
2458 struct ahash_request *req;
2459 struct crypto_ahash *tfm;
2460 struct this_task_ctx *ta_ctx;
2461 struct sep_system_ctx *sctx;
2462 unsigned long end_time;
2464 req = (struct ahash_request *)data;
2465 tfm = crypto_ahash_reqtfm(req);
2466 sctx = crypto_ahash_ctx(tfm);
2467 ta_ctx = ahash_request_ctx(req);
2468 ta_ctx->sep_used = sep_dev;
2470 dev_dbg(&ta_ctx->sep_used->pdev->dev,
2471 "sep_hash_digest\n");
2472 ta_ctx->current_hash_stage = HASH_DIGEST;
2474 ta_ctx->are_we_done_yet = &are_we_done_yet;
2476 /* length for queue status */
2477 ta_ctx->nbytes = req->nbytes;
2479 block_size = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
2480 tail_len = req->nbytes % block_size;
2481 dev_dbg(&ta_ctx->sep_used->pdev->dev, "length is %x\n", req->nbytes);
2482 dev_dbg(&ta_ctx->sep_used->pdev->dev, "block_size is %x\n", block_size);
2483 dev_dbg(&ta_ctx->sep_used->pdev->dev, "tail len is %x\n", tail_len);
2485 /* Make sure all pages are even block */
2486 int_error = sep_oddball_pages(ta_ctx->sep_used, req->src,
2488 block_size, &new_sg, 1);
2490 if (int_error < 0) {
2491 dev_warn(&ta_ctx->sep_used->pdev->dev,
2492 "oddball pages error in crash update\n");
2493 sep_crypto_release(sctx, ta_ctx, -ENOMEM);
2495 } else if (int_error == 1) {
2496 ta_ctx->src_sg = new_sg;
2497 ta_ctx->src_sg_hold = new_sg;
2499 ta_ctx->src_sg = req->src;
2500 ta_ctx->src_sg_hold = NULL;
2503 src_ptr = sg_virt(ta_ctx->src_sg);
2505 if ((!req->nbytes) || (!ta_ctx->src_sg)) {
2510 sep_dump_sg(ta_ctx->sep_used, "hash block sg in", ta_ctx->src_sg);
2512 ta_ctx->dcb_input_data.app_in_address = src_ptr;
2513 ta_ctx->dcb_input_data.data_in_size = req->nbytes - tail_len;
2514 ta_ctx->dcb_input_data.app_out_address = NULL;
2515 ta_ctx->dcb_input_data.block_size = block_size;
2516 ta_ctx->dcb_input_data.tail_block_size = 0;
2517 ta_ctx->dcb_input_data.is_applet = 0;
2518 ta_ctx->dcb_input_data.src_sg = ta_ctx->src_sg;
2519 ta_ctx->dcb_input_data.dst_sg = NULL;
2521 int_error = sep_create_dcb_dmatables_context_kernel(
2523 &ta_ctx->dcb_region,
2524 &ta_ctx->dmatables_region,
2526 &ta_ctx->dcb_input_data,
2529 dev_warn(&ta_ctx->sep_used->pdev->dev,
2530 "hash update dma table create failed\n");
2531 sep_crypto_release(sctx, ta_ctx, -EINVAL);
2535 /* Construct message to SEP */
2536 sep_make_header(ta_ctx, &msg_offset, SEP_HASH_SINGLE_OPCODE);
2537 sep_write_msg(ta_ctx, &ta_ctx->hash_opmode,
2538 sizeof(u32), sizeof(u32), &msg_offset, 0);
2544 sep_write_msg(ta_ctx, msg, sizeof(u32) * 3, sizeof(u32) * 3,
2548 sep_write_msg(ta_ctx, &tail_len, sizeof(u32),
2549 sizeof(u32), &msg_offset, 0);
2552 copy_result = sep_copy_offset_sg(
2555 req->nbytes - tail_len,
2556 small_buf, tail_len);
2558 if (copy_result != tail_len) {
2559 dev_warn(&ta_ctx->sep_used->pdev->dev,
2560 "sg tail copy failure in hash block\n");
2561 sep_crypto_release(sctx, ta_ctx, -ENOMEM);
2565 sep_write_msg(ta_ctx, small_buf, tail_len,
2566 sizeof(u32) * 32, &msg_offset, 1);
2568 msg_offset += sizeof(u32) * 32;
2571 sep_end_msg(ta_ctx, msg_offset);
2573 are_we_done_yet = 0;
2574 result = sep_crypto_take_sep(ta_ctx);
2576 dev_warn(&ta_ctx->sep_used->pdev->dev,
2577 "sep_hash_digest take sep failed\n");
2578 sep_crypto_release(sctx, ta_ctx, -EINVAL);
2581 /* now we sit and wait up to a fixed time for completion */
2582 end_time = jiffies + (WAIT_TIME * HZ);
2583 while ((time_before(jiffies, end_time)) && (are_we_done_yet == 0))
2586 /* Done waiting; still not done yet? */
2587 if (are_we_done_yet == 0) {
2588 dev_dbg(&ta_ctx->sep_used->pdev->dev,
2589 "hash digest job never got done\n");
2590 sep_crypto_release(sctx, ta_ctx, -EINVAL);
2597 * This is what is called by each of the API's provided
2598 * in the kernel crypto descriptors. It is run in a process
2599 * context using the kernel workqueues. Therefore it can
2602 static void sep_dequeuer(void *data)
2604 struct crypto_queue *this_queue;
2605 struct crypto_async_request *async_req;
2606 struct crypto_async_request *backlog;
2607 struct ablkcipher_request *cypher_req;
2608 struct ahash_request *hash_req;
2609 struct sep_system_ctx *sctx;
2610 struct crypto_ahash *hash_tfm;
2611 struct this_task_ctx *ta_ctx;
2614 this_queue = (struct crypto_queue *)data;
2616 spin_lock_irq(&queue_lock);
2617 backlog = crypto_get_backlog(this_queue);
2618 async_req = crypto_dequeue_request(this_queue);
2619 spin_unlock_irq(&queue_lock);
2622 pr_debug("sep crypto queue is empty\n");
2627 pr_debug("sep crypto backlog set\n");
2628 if (backlog->complete)
2629 backlog->complete(backlog, -EINPROGRESS);
2633 if (!async_req->tfm) {
2634 pr_debug("sep crypto queue null tfm\n");
2638 if (!async_req->tfm->__crt_alg) {
2639 pr_debug("sep crypto queue null __crt_alg\n");
2643 if (!async_req->tfm->__crt_alg->cra_type) {
2644 pr_debug("sep crypto queue null cra_type\n");
2648 /* we have stuff in the queue */
2649 if (async_req->tfm->__crt_alg->cra_type !=
2650 &crypto_ahash_type) {
2651 /* This is for a cypher */
2652 pr_debug("sep crypto queue doing cipher\n");
2653 cypher_req = container_of(async_req,
2654 struct ablkcipher_request,
2657 pr_debug("sep crypto queue null cypher_req\n");
2661 sep_crypto_block((void *)cypher_req);
2664 /* This is a hash */
2665 pr_debug("sep crypto queue doing hash\n");
2667 * This is a bit more complex than cipher; we
2668 * need to figure out what type of operation
2670 hash_req = ahash_request_cast(async_req);
2672 pr_debug("sep crypto queue null hash_req\n");
2676 hash_tfm = crypto_ahash_reqtfm(hash_req);
2678 pr_debug("sep crypto queue null hash_tfm\n");
2683 sctx = crypto_ahash_ctx(hash_tfm);
2685 pr_debug("sep crypto queue null sctx\n");
2689 ta_ctx = ahash_request_ctx(hash_req);
2691 if (ta_ctx->current_hash_stage == HASH_INIT) {
2692 pr_debug("sep crypto queue hash init\n");
2693 sep_hash_init((void *)hash_req);
2695 } else if (ta_ctx->current_hash_stage == HASH_UPDATE) {
2696 pr_debug("sep crypto queue hash update\n");
2697 sep_hash_update((void *)hash_req);
2699 } else if (ta_ctx->current_hash_stage == HASH_FINISH) {
2700 pr_debug("sep crypto queue hash final\n");
2701 sep_hash_final((void *)hash_req);
2703 } else if (ta_ctx->current_hash_stage == HASH_DIGEST) {
2704 pr_debug("sep crypto queue hash digest\n");
2705 sep_hash_digest((void *)hash_req);
2707 } else if (ta_ctx->current_hash_stage == HASH_FINUP_DATA) {
2708 pr_debug("sep crypto queue hash digest\n");
2709 sep_hash_update((void *)hash_req);
2711 } else if (ta_ctx->current_hash_stage == HASH_FINUP_FINISH) {
2712 pr_debug("sep crypto queue hash digest\n");
2713 sep_hash_final((void *)hash_req);
2716 pr_debug("sep crypto queue hash oops nothing\n");
2722 static int sep_sha1_init(struct ahash_request *req)
2726 struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
2728 pr_debug("sep - doing sha1 init\n");
2730 /* Clear out task context */
2731 memset(ta_ctx, 0, sizeof(struct this_task_ctx));
2733 ta_ctx->sep_used = sep_dev;
2734 ta_ctx->current_request = SHA1;
2735 ta_ctx->current_hash_req = req;
2736 ta_ctx->current_cypher_req = NULL;
2737 ta_ctx->hash_opmode = SEP_HASH_SHA1;
2738 ta_ctx->current_hash_stage = HASH_INIT;
2740 /* lock necessary so that only one entity touches the queues */
2741 spin_lock_irq(&queue_lock);
2742 error = crypto_enqueue_request(&sep_queue, &req->base);
2744 if ((error != 0) && (error != -EINPROGRESS))
2745 pr_debug(" sep - crypto enqueue failed: %x\n",
2747 error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
2748 sep_dequeuer, (void *)&sep_queue);
2750 pr_debug(" sep - workqueue submit failed: %x\n",
2752 spin_unlock_irq(&queue_lock);
2753 /* We return result of crypto enqueue */
2757 static int sep_sha1_update(struct ahash_request *req)
2761 struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
2763 pr_debug("sep - doing sha1 update\n");
2765 ta_ctx->sep_used = sep_dev;
2766 ta_ctx->current_request = SHA1;
2767 ta_ctx->current_hash_req = req;
2768 ta_ctx->current_cypher_req = NULL;
2769 ta_ctx->hash_opmode = SEP_HASH_SHA1;
2770 ta_ctx->current_hash_stage = HASH_UPDATE;
2772 /* lock necessary so that only one entity touches the queues */
2773 spin_lock_irq(&queue_lock);
2774 error = crypto_enqueue_request(&sep_queue, &req->base);
2776 if ((error != 0) && (error != -EINPROGRESS))
2777 pr_debug(" sep - crypto enqueue failed: %x\n",
2779 error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
2780 sep_dequeuer, (void *)&sep_queue);
2782 pr_debug(" sep - workqueue submit failed: %x\n",
2784 spin_unlock_irq(&queue_lock);
2785 /* We return result of crypto enqueue */
2789 static int sep_sha1_final(struct ahash_request *req)
2793 struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
2794 pr_debug("sep - doing sha1 final\n");
2796 ta_ctx->sep_used = sep_dev;
2797 ta_ctx->current_request = SHA1;
2798 ta_ctx->current_hash_req = req;
2799 ta_ctx->current_cypher_req = NULL;
2800 ta_ctx->hash_opmode = SEP_HASH_SHA1;
2801 ta_ctx->current_hash_stage = HASH_FINISH;
2803 /* lock necessary so that only one entity touches the queues */
2804 spin_lock_irq(&queue_lock);
2805 error = crypto_enqueue_request(&sep_queue, &req->base);
2807 if ((error != 0) && (error != -EINPROGRESS))
2808 pr_debug(" sep - crypto enqueue failed: %x\n",
2810 error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
2811 sep_dequeuer, (void *)&sep_queue);
2813 pr_debug(" sep - workqueue submit failed: %x\n",
2815 spin_unlock_irq(&queue_lock);
2816 /* We return result of crypto enqueue */
2820 static int sep_sha1_digest(struct ahash_request *req)
2824 struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
2825 pr_debug("sep - doing sha1 digest\n");
2827 /* Clear out task context */
2828 memset(ta_ctx, 0, sizeof(struct this_task_ctx));
2830 ta_ctx->sep_used = sep_dev;
2831 ta_ctx->current_request = SHA1;
2832 ta_ctx->current_hash_req = req;
2833 ta_ctx->current_cypher_req = NULL;
2834 ta_ctx->hash_opmode = SEP_HASH_SHA1;
2835 ta_ctx->current_hash_stage = HASH_DIGEST;
2837 /* lock necessary so that only one entity touches the queues */
2838 spin_lock_irq(&queue_lock);
2839 error = crypto_enqueue_request(&sep_queue, &req->base);
2841 if ((error != 0) && (error != -EINPROGRESS))
2842 pr_debug(" sep - crypto enqueue failed: %x\n",
2844 error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
2845 sep_dequeuer, (void *)&sep_queue);
2847 pr_debug(" sep - workqueue submit failed: %x\n",
2849 spin_unlock_irq(&queue_lock);
2850 /* We return result of crypto enqueue */
2854 static int sep_sha1_finup(struct ahash_request *req)
2858 struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
2859 pr_debug("sep - doing sha1 finup\n");
2861 ta_ctx->sep_used = sep_dev;
2862 ta_ctx->current_request = SHA1;
2863 ta_ctx->current_hash_req = req;
2864 ta_ctx->current_cypher_req = NULL;
2865 ta_ctx->hash_opmode = SEP_HASH_SHA1;
2866 ta_ctx->current_hash_stage = HASH_FINUP_DATA;
2868 /* lock necessary so that only one entity touches the queues */
2869 spin_lock_irq(&queue_lock);
2870 error = crypto_enqueue_request(&sep_queue, &req->base);
2872 if ((error != 0) && (error != -EINPROGRESS))
2873 pr_debug(" sep - crypto enqueue failed: %x\n",
2875 error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
2876 sep_dequeuer, (void *)&sep_queue);
2878 pr_debug(" sep - workqueue submit failed: %x\n",
2880 spin_unlock_irq(&queue_lock);
2881 /* We return result of crypto enqueue */
2885 static int sep_md5_init(struct ahash_request *req)
2889 struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
2890 pr_debug("sep - doing md5 init\n");
2892 /* Clear out task context */
2893 memset(ta_ctx, 0, sizeof(struct this_task_ctx));
2895 ta_ctx->sep_used = sep_dev;
2896 ta_ctx->current_request = MD5;
2897 ta_ctx->current_hash_req = req;
2898 ta_ctx->current_cypher_req = NULL;
2899 ta_ctx->hash_opmode = SEP_HASH_MD5;
2900 ta_ctx->current_hash_stage = HASH_INIT;
2902 /* lock necessary so that only one entity touches the queues */
2903 spin_lock_irq(&queue_lock);
2904 error = crypto_enqueue_request(&sep_queue, &req->base);
2906 if ((error != 0) && (error != -EINPROGRESS))
2907 pr_debug(" sep - crypto enqueue failed: %x\n",
2909 error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
2910 sep_dequeuer, (void *)&sep_queue);
2912 pr_debug(" sep - workqueue submit failed: %x\n",
2914 spin_unlock_irq(&queue_lock);
2915 /* We return result of crypto enqueue */
2919 static int sep_md5_update(struct ahash_request *req)
2923 struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
2924 pr_debug("sep - doing md5 update\n");
2926 ta_ctx->sep_used = sep_dev;
2927 ta_ctx->current_request = MD5;
2928 ta_ctx->current_hash_req = req;
2929 ta_ctx->current_cypher_req = NULL;
2930 ta_ctx->hash_opmode = SEP_HASH_MD5;
2931 ta_ctx->current_hash_stage = HASH_UPDATE;
2933 /* lock necessary so that only one entity touches the queues */
2934 spin_lock_irq(&queue_lock);
2935 error = crypto_enqueue_request(&sep_queue, &req->base);
2937 if ((error != 0) && (error != -EINPROGRESS))
2938 pr_debug(" sep - crypto enqueue failed: %x\n",
2940 error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
2941 sep_dequeuer, (void *)&sep_queue);
2943 pr_debug(" sep - workqueue submit failed: %x\n",
2945 spin_unlock_irq(&queue_lock);
2946 /* We return result of crypto enqueue */
2950 static int sep_md5_final(struct ahash_request *req)
2954 struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
2955 pr_debug("sep - doing md5 final\n");
2957 ta_ctx->sep_used = sep_dev;
2958 ta_ctx->current_request = MD5;
2959 ta_ctx->current_hash_req = req;
2960 ta_ctx->current_cypher_req = NULL;
2961 ta_ctx->hash_opmode = SEP_HASH_MD5;
2962 ta_ctx->current_hash_stage = HASH_FINISH;
2964 /* lock necessary so that only one entity touches the queues */
2965 spin_lock_irq(&queue_lock);
2966 error = crypto_enqueue_request(&sep_queue, &req->base);
2968 if ((error != 0) && (error != -EINPROGRESS))
2969 pr_debug(" sep - crypto enqueue failed: %x\n",
2971 error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
2972 sep_dequeuer, (void *)&sep_queue);
2974 pr_debug(" sep - workqueue submit failed: %x\n",
2976 spin_unlock_irq(&queue_lock);
2977 /* We return result of crypto enqueue */
2981 static int sep_md5_digest(struct ahash_request *req)
2985 struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
2987 pr_debug("sep - doing md5 digest\n");
2989 /* Clear out task context */
2990 memset(ta_ctx, 0, sizeof(struct this_task_ctx));
2992 ta_ctx->sep_used = sep_dev;
2993 ta_ctx->current_request = MD5;
2994 ta_ctx->current_hash_req = req;
2995 ta_ctx->current_cypher_req = NULL;
2996 ta_ctx->hash_opmode = SEP_HASH_MD5;
2997 ta_ctx->current_hash_stage = HASH_DIGEST;
2999 /* lock necessary so that only one entity touches the queues */
3000 spin_lock_irq(&queue_lock);
3001 error = crypto_enqueue_request(&sep_queue, &req->base);
3003 if ((error != 0) && (error != -EINPROGRESS))
3004 pr_debug(" sep - crypto enqueue failed: %x\n",
3006 error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
3007 sep_dequeuer, (void *)&sep_queue);
3009 pr_debug(" sep - workqueue submit failed: %x\n",
3011 spin_unlock_irq(&queue_lock);
3012 /* We return result of crypto enqueue */
3016 static int sep_md5_finup(struct ahash_request *req)
3020 struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
3022 pr_debug("sep - doing md5 finup\n");
3024 ta_ctx->sep_used = sep_dev;
3025 ta_ctx->current_request = MD5;
3026 ta_ctx->current_hash_req = req;
3027 ta_ctx->current_cypher_req = NULL;
3028 ta_ctx->hash_opmode = SEP_HASH_MD5;
3029 ta_ctx->current_hash_stage = HASH_FINUP_DATA;
3031 /* lock necessary so that only one entity touches the queues */
3032 spin_lock_irq(&queue_lock);
3033 error = crypto_enqueue_request(&sep_queue, &req->base);
3035 if ((error != 0) && (error != -EINPROGRESS))
3036 pr_debug(" sep - crypto enqueue failed: %x\n",
3038 error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
3039 sep_dequeuer, (void *)&sep_queue);
3041 pr_debug(" sep - workqueue submit failed: %x\n",
3043 spin_unlock_irq(&queue_lock);
3044 /* We return result of crypto enqueue */
3048 static int sep_sha224_init(struct ahash_request *req)
3052 struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
3053 pr_debug("sep - doing sha224 init\n");
3055 /* Clear out task context */
3056 memset(ta_ctx, 0, sizeof(struct this_task_ctx));
3058 ta_ctx->sep_used = sep_dev;
3059 ta_ctx->current_request = SHA224;
3060 ta_ctx->current_hash_req = req;
3061 ta_ctx->current_cypher_req = NULL;
3062 ta_ctx->hash_opmode = SEP_HASH_SHA224;
3063 ta_ctx->current_hash_stage = HASH_INIT;
3065 /* lock necessary so that only one entity touches the queues */
3066 spin_lock_irq(&queue_lock);
3067 error = crypto_enqueue_request(&sep_queue, &req->base);
3069 if ((error != 0) && (error != -EINPROGRESS))
3070 pr_debug(" sep - crypto enqueue failed: %x\n",
3072 error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
3073 sep_dequeuer, (void *)&sep_queue);
3075 pr_debug(" sep - workqueue submit failed: %x\n",
3077 spin_unlock_irq(&queue_lock);
3078 /* We return result of crypto enqueue */
3082 static int sep_sha224_update(struct ahash_request *req)
3086 struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
3087 pr_debug("sep - doing sha224 update\n");
3089 ta_ctx->sep_used = sep_dev;
3090 ta_ctx->current_request = SHA224;
3091 ta_ctx->current_hash_req = req;
3092 ta_ctx->current_cypher_req = NULL;
3093 ta_ctx->hash_opmode = SEP_HASH_SHA224;
3094 ta_ctx->current_hash_stage = HASH_UPDATE;
3096 /* lock necessary so that only one entity touches the queues */
3097 spin_lock_irq(&queue_lock);
3098 error = crypto_enqueue_request(&sep_queue, &req->base);
3100 if ((error != 0) && (error != -EINPROGRESS))
3101 pr_debug(" sep - crypto enqueue failed: %x\n",
3103 error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
3104 sep_dequeuer, (void *)&sep_queue);
3106 pr_debug(" sep - workqueue submit failed: %x\n",
3108 spin_unlock_irq(&queue_lock);
3109 /* We return result of crypto enqueue */
3113 static int sep_sha224_final(struct ahash_request *req)
3117 struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
3118 pr_debug("sep - doing sha224 final\n");
3120 ta_ctx->sep_used = sep_dev;
3121 ta_ctx->current_request = SHA224;
3122 ta_ctx->current_hash_req = req;
3123 ta_ctx->current_cypher_req = NULL;
3124 ta_ctx->hash_opmode = SEP_HASH_SHA224;
3125 ta_ctx->current_hash_stage = HASH_FINISH;
3127 /* lock necessary so that only one entity touches the queues */
3128 spin_lock_irq(&queue_lock);
3129 error = crypto_enqueue_request(&sep_queue, &req->base);
3131 if ((error != 0) && (error != -EINPROGRESS))
3132 pr_debug(" sep - crypto enqueue failed: %x\n",
3134 error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
3135 sep_dequeuer, (void *)&sep_queue);
3137 pr_debug(" sep - workqueue submit failed: %x\n",
3139 spin_unlock_irq(&queue_lock);
3140 /* We return result of crypto enqueue */
3144 static int sep_sha224_digest(struct ahash_request *req)
3148 struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
3150 pr_debug("sep - doing sha224 digest\n");
3152 /* Clear out task context */
3153 memset(ta_ctx, 0, sizeof(struct this_task_ctx));
3155 ta_ctx->sep_used = sep_dev;
3156 ta_ctx->current_request = SHA224;
3157 ta_ctx->current_hash_req = req;
3158 ta_ctx->current_cypher_req = NULL;
3159 ta_ctx->hash_opmode = SEP_HASH_SHA224;
3160 ta_ctx->current_hash_stage = HASH_DIGEST;
3162 /* lock necessary so that only one entity touches the queues */
3163 spin_lock_irq(&queue_lock);
3164 error = crypto_enqueue_request(&sep_queue, &req->base);
3166 if ((error != 0) && (error != -EINPROGRESS))
3167 pr_debug(" sep - crypto enqueue failed: %x\n",
3169 error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
3170 sep_dequeuer, (void *)&sep_queue);
3172 pr_debug(" sep - workqueue submit failed: %x\n",
3174 spin_unlock_irq(&queue_lock);
3175 /* We return result of crypto enqueue */
3179 static int sep_sha224_finup(struct ahash_request *req)
3183 struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
3185 pr_debug("sep - doing sha224 finup\n");
3187 ta_ctx->sep_used = sep_dev;
3188 ta_ctx->current_request = SHA224;
3189 ta_ctx->current_hash_req = req;
3190 ta_ctx->current_cypher_req = NULL;
3191 ta_ctx->hash_opmode = SEP_HASH_SHA224;
3192 ta_ctx->current_hash_stage = HASH_FINUP_DATA;
3194 /* lock necessary so that only one entity touches the queues */
3195 spin_lock_irq(&queue_lock);
3196 error = crypto_enqueue_request(&sep_queue, &req->base);
3198 if ((error != 0) && (error != -EINPROGRESS))
3199 pr_debug(" sep - crypto enqueue failed: %x\n",
3201 error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
3202 sep_dequeuer, (void *)&sep_queue);
3204 pr_debug(" sep - workqueue submit failed: %x\n",
3206 spin_unlock_irq(&queue_lock);
3207 /* We return result of crypto enqueue */
3211 static int sep_sha256_init(struct ahash_request *req)
3215 struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
3216 pr_debug("sep - doing sha256 init\n");
3218 /* Clear out task context */
3219 memset(ta_ctx, 0, sizeof(struct this_task_ctx));
3221 ta_ctx->sep_used = sep_dev;
3222 ta_ctx->current_request = SHA256;
3223 ta_ctx->current_hash_req = req;
3224 ta_ctx->current_cypher_req = NULL;
3225 ta_ctx->hash_opmode = SEP_HASH_SHA256;
3226 ta_ctx->current_hash_stage = HASH_INIT;
3228 /* lock necessary so that only one entity touches the queues */
3229 spin_lock_irq(&queue_lock);
3230 error = crypto_enqueue_request(&sep_queue, &req->base);
3232 if ((error != 0) && (error != -EINPROGRESS))
3233 pr_debug(" sep - crypto enqueue failed: %x\n",
3235 error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
3236 sep_dequeuer, (void *)&sep_queue);
3238 pr_debug(" sep - workqueue submit failed: %x\n",
3240 spin_unlock_irq(&queue_lock);
3241 /* We return result of crypto enqueue */
3245 static int sep_sha256_update(struct ahash_request *req)
3249 struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
3250 pr_debug("sep - doing sha256 update\n");
3252 ta_ctx->sep_used = sep_dev;
3253 ta_ctx->current_request = SHA256;
3254 ta_ctx->current_hash_req = req;
3255 ta_ctx->current_cypher_req = NULL;
3256 ta_ctx->hash_opmode = SEP_HASH_SHA256;
3257 ta_ctx->current_hash_stage = HASH_UPDATE;
3259 /* lock necessary so that only one entity touches the queues */
3260 spin_lock_irq(&queue_lock);
3261 error = crypto_enqueue_request(&sep_queue, &req->base);
3263 if ((error != 0) && (error != -EINPROGRESS))
3264 pr_debug(" sep - crypto enqueue failed: %x\n",
3266 error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
3267 sep_dequeuer, (void *)&sep_queue);
3269 pr_debug(" sep - workqueue submit failed: %x\n",
3271 spin_unlock_irq(&queue_lock);
3272 /* We return result of crypto enqueue */
3276 static int sep_sha256_final(struct ahash_request *req)
3280 struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
3281 pr_debug("sep - doing sha256 final\n");
3283 ta_ctx->sep_used = sep_dev;
3284 ta_ctx->current_request = SHA256;
3285 ta_ctx->current_hash_req = req;
3286 ta_ctx->current_cypher_req = NULL;
3287 ta_ctx->hash_opmode = SEP_HASH_SHA256;
3288 ta_ctx->current_hash_stage = HASH_FINISH;
3290 /* lock necessary so that only one entity touches the queues */
3291 spin_lock_irq(&queue_lock);
3292 error = crypto_enqueue_request(&sep_queue, &req->base);
3294 if ((error != 0) && (error != -EINPROGRESS))
3295 pr_debug(" sep - crypto enqueue failed: %x\n",
3297 error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
3298 sep_dequeuer, (void *)&sep_queue);
3300 pr_debug(" sep - workqueue submit failed: %x\n",
3302 spin_unlock_irq(&queue_lock);
3303 /* We return result of crypto enqueue */
3307 static int sep_sha256_digest(struct ahash_request *req)
3311 struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
3313 pr_debug("sep - doing sha256 digest\n");
3315 /* Clear out task context */
3316 memset(ta_ctx, 0, sizeof(struct this_task_ctx));
3318 ta_ctx->sep_used = sep_dev;
3319 ta_ctx->current_request = SHA256;
3320 ta_ctx->current_hash_req = req;
3321 ta_ctx->current_cypher_req = NULL;
3322 ta_ctx->hash_opmode = SEP_HASH_SHA256;
3323 ta_ctx->current_hash_stage = HASH_DIGEST;
3325 /* lock necessary so that only one entity touches the queues */
3326 spin_lock_irq(&queue_lock);
3327 error = crypto_enqueue_request(&sep_queue, &req->base);
3329 if ((error != 0) && (error != -EINPROGRESS))
3330 pr_debug(" sep - crypto enqueue failed: %x\n",
3332 error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
3333 sep_dequeuer, (void *)&sep_queue);
3335 pr_debug(" sep - workqueue submit failed: %x\n",
3337 spin_unlock_irq(&queue_lock);
3338 /* We return result of crypto enqueue */
3342 static int sep_sha256_finup(struct ahash_request *req)
3346 struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
3348 pr_debug("sep - doing sha256 finup\n");
3350 ta_ctx->sep_used = sep_dev;
3351 ta_ctx->current_request = SHA256;
3352 ta_ctx->current_hash_req = req;
3353 ta_ctx->current_cypher_req = NULL;
3354 ta_ctx->hash_opmode = SEP_HASH_SHA256;
3355 ta_ctx->current_hash_stage = HASH_FINUP_DATA;
3357 /* lock necessary so that only one entity touches the queues */
3358 spin_lock_irq(&queue_lock);
3359 error = crypto_enqueue_request(&sep_queue, &req->base);
3361 if ((error != 0) && (error != -EINPROGRESS))
3362 pr_debug(" sep - crypto enqueue failed: %x\n",
3364 error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
3365 sep_dequeuer, (void *)&sep_queue);
3367 pr_debug(" sep - workqueue submit failed: %x\n",
3369 spin_unlock_irq(&queue_lock);
3370 /* We return result of crypto enqueue */
3374 static int sep_crypto_init(struct crypto_tfm *tfm)
3376 const char *alg_name = crypto_tfm_alg_name(tfm);
3378 if (alg_name == NULL)
3379 pr_debug("sep_crypto_init alg is NULL\n");
3381 pr_debug("sep_crypto_init alg is %s\n", alg_name);
3383 tfm->crt_ablkcipher.reqsize = sizeof(struct this_task_ctx);
3387 static void sep_crypto_exit(struct crypto_tfm *tfm)
3389 pr_debug("sep_crypto_exit\n");
3392 static int sep_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
3393 unsigned int keylen)
3395 struct sep_system_ctx *sctx = crypto_ablkcipher_ctx(tfm);
3397 pr_debug("sep aes setkey\n");
3399 pr_debug("tfm is %p sctx is %p\n", tfm, sctx);
3401 case SEP_AES_KEY_128_SIZE:
3402 sctx->aes_key_size = AES_128;
3404 case SEP_AES_KEY_192_SIZE:
3405 sctx->aes_key_size = AES_192;
3407 case SEP_AES_KEY_256_SIZE:
3408 sctx->aes_key_size = AES_256;
3410 case SEP_AES_KEY_512_SIZE:
3411 sctx->aes_key_size = AES_512;
3414 pr_debug("invalid sep aes key size %x\n",
3419 memset(&sctx->key.aes, 0, sizeof(u32) *
3420 SEP_AES_MAX_KEY_SIZE_WORDS);
3421 memcpy(&sctx->key.aes, key, keylen);
3422 sctx->keylen = keylen;
3423 /* Indicate to encrypt/decrypt function to send key to SEP */
3429 static int sep_aes_ecb_encrypt(struct ablkcipher_request *req)
3433 struct this_task_ctx *ta_ctx = ablkcipher_request_ctx(req);
3435 pr_debug("sep - doing aes ecb encrypt\n");
3437 /* Clear out task context */
3438 memset(ta_ctx, 0, sizeof(struct this_task_ctx));
3440 ta_ctx->sep_used = sep_dev;
3441 ta_ctx->current_request = AES_ECB;
3442 ta_ctx->current_hash_req = NULL;
3443 ta_ctx->current_cypher_req = req;
3444 ta_ctx->aes_encmode = SEP_AES_ENCRYPT;
3445 ta_ctx->aes_opmode = SEP_AES_ECB;
3446 ta_ctx->init_opcode = SEP_AES_INIT_OPCODE;
3447 ta_ctx->block_opcode = SEP_AES_BLOCK_OPCODE;
3449 /* lock necessary so that only one entity touches the queues */
3450 spin_lock_irq(&queue_lock);
3451 error = crypto_enqueue_request(&sep_queue, &req->base);
3453 if ((error != 0) && (error != -EINPROGRESS))
3454 pr_debug(" sep - crypto enqueue failed: %x\n",
3456 error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
3457 sep_dequeuer, (void *)&sep_queue);
3459 pr_debug(" sep - workqueue submit failed: %x\n",
3461 spin_unlock_irq(&queue_lock);
3462 /* We return result of crypto enqueue */
3466 static int sep_aes_ecb_decrypt(struct ablkcipher_request *req)
3470 struct this_task_ctx *ta_ctx = ablkcipher_request_ctx(req);
3472 pr_debug("sep - doing aes ecb decrypt\n");
3474 /* Clear out task context */
3475 memset(ta_ctx, 0, sizeof(struct this_task_ctx));
3477 ta_ctx->sep_used = sep_dev;
3478 ta_ctx->current_request = AES_ECB;
3479 ta_ctx->current_hash_req = NULL;
3480 ta_ctx->current_cypher_req = req;
3481 ta_ctx->aes_encmode = SEP_AES_DECRYPT;
3482 ta_ctx->aes_opmode = SEP_AES_ECB;
3483 ta_ctx->init_opcode = SEP_AES_INIT_OPCODE;
3484 ta_ctx->block_opcode = SEP_AES_BLOCK_OPCODE;
3486 /* lock necessary so that only one entity touches the queues */
3487 spin_lock_irq(&queue_lock);
3488 error = crypto_enqueue_request(&sep_queue, &req->base);
3490 if ((error != 0) && (error != -EINPROGRESS))
3491 pr_debug(" sep - crypto enqueue failed: %x\n",
3493 error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
3494 sep_dequeuer, (void *)&sep_queue);
3496 pr_debug(" sep - workqueue submit failed: %x\n",
3498 spin_unlock_irq(&queue_lock);
3499 /* We return result of crypto enqueue */
3503 static int sep_aes_cbc_encrypt(struct ablkcipher_request *req)
3507 struct this_task_ctx *ta_ctx = ablkcipher_request_ctx(req);
3508 struct sep_system_ctx *sctx = crypto_ablkcipher_ctx(
3509 crypto_ablkcipher_reqtfm(req));
3511 pr_debug("sep - doing aes cbc encrypt\n");
3513 /* Clear out task context */
3514 memset(ta_ctx, 0, sizeof(struct this_task_ctx));
3516 pr_debug("tfm is %p sctx is %p and ta_ctx is %p\n",
3517 crypto_ablkcipher_reqtfm(req), sctx, ta_ctx);
3519 ta_ctx->sep_used = sep_dev;
3520 ta_ctx->current_request = AES_CBC;
3521 ta_ctx->current_hash_req = NULL;
3522 ta_ctx->current_cypher_req = req;
3523 ta_ctx->aes_encmode = SEP_AES_ENCRYPT;
3524 ta_ctx->aes_opmode = SEP_AES_CBC;
3525 ta_ctx->init_opcode = SEP_AES_INIT_OPCODE;
3526 ta_ctx->block_opcode = SEP_AES_BLOCK_OPCODE;
3528 /* lock necessary so that only one entity touches the queues */
3529 spin_lock_irq(&queue_lock);
3530 error = crypto_enqueue_request(&sep_queue, &req->base);
3532 if ((error != 0) && (error != -EINPROGRESS))
3533 pr_debug(" sep - crypto enqueue failed: %x\n",
3535 error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
3536 sep_dequeuer, (void *)&sep_queue);
3538 pr_debug(" sep - workqueue submit failed: %x\n",
3540 spin_unlock_irq(&queue_lock);
3541 /* We return result of crypto enqueue */
3545 static int sep_aes_cbc_decrypt(struct ablkcipher_request *req)
3549 struct this_task_ctx *ta_ctx = ablkcipher_request_ctx(req);
3550 struct sep_system_ctx *sctx = crypto_ablkcipher_ctx(
3551 crypto_ablkcipher_reqtfm(req));
3553 pr_debug("sep - doing aes cbc decrypt\n");
3555 pr_debug("tfm is %p sctx is %p and ta_ctx is %p\n",
3556 crypto_ablkcipher_reqtfm(req), sctx, ta_ctx);
3558 /* Clear out task context */
3559 memset(ta_ctx, 0, sizeof(struct this_task_ctx));
3561 ta_ctx->sep_used = sep_dev;
3562 ta_ctx->current_request = AES_CBC;
3563 ta_ctx->current_hash_req = NULL;
3564 ta_ctx->current_cypher_req = req;
3565 ta_ctx->aes_encmode = SEP_AES_DECRYPT;
3566 ta_ctx->aes_opmode = SEP_AES_CBC;
3567 ta_ctx->init_opcode = SEP_AES_INIT_OPCODE;
3568 ta_ctx->block_opcode = SEP_AES_BLOCK_OPCODE;
3570 /* lock necessary so that only one entity touches the queues */
3571 spin_lock_irq(&queue_lock);
3572 error = crypto_enqueue_request(&sep_queue, &req->base);
3574 if ((error != 0) && (error != -EINPROGRESS))
3575 pr_debug(" sep - crypto enqueue failed: %x\n",
3577 error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
3578 sep_dequeuer, (void *)&sep_queue);
3580 pr_debug(" sep - workqueue submit failed: %x\n",
3582 spin_unlock_irq(&queue_lock);
3583 /* We return result of crypto enqueue */
3587 static int sep_des_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
3588 unsigned int keylen)
3590 struct sep_system_ctx *sctx = crypto_ablkcipher_ctx(tfm);
3591 struct crypto_tfm *ctfm = crypto_ablkcipher_tfm(tfm);
3592 u32 *flags = &ctfm->crt_flags;
3594 pr_debug("sep des setkey\n");
3598 sctx->des_nbr_keys = DES_KEY_1;
3600 case DES_KEY_SIZE * 2:
3601 sctx->des_nbr_keys = DES_KEY_2;
3603 case DES_KEY_SIZE * 3:
3604 sctx->des_nbr_keys = DES_KEY_3;
3607 pr_debug("invalid key size %x\n",
3612 if ((*flags & CRYPTO_TFM_REQ_WEAK_KEY) &&
3613 (sep_weak_key(key, keylen))) {
3615 *flags |= CRYPTO_TFM_RES_WEAK_KEY;
3616 pr_debug("weak key\n");
3620 memset(&sctx->key.des, 0, sizeof(struct sep_des_key));
3621 memcpy(&sctx->key.des.key1, key, keylen);
3622 sctx->keylen = keylen;
3623 /* Indicate to encrypt/decrypt function to send key to SEP */
3629 static int sep_des_ebc_encrypt(struct ablkcipher_request *req)
3633 struct this_task_ctx *ta_ctx = ablkcipher_request_ctx(req);
3635 pr_debug("sep - doing des ecb encrypt\n");
3637 /* Clear out task context */
3638 memset(ta_ctx, 0, sizeof(struct this_task_ctx));
3640 ta_ctx->sep_used = sep_dev;
3641 ta_ctx->current_request = DES_ECB;
3642 ta_ctx->current_hash_req = NULL;
3643 ta_ctx->current_cypher_req = req;
3644 ta_ctx->des_encmode = SEP_DES_ENCRYPT;
3645 ta_ctx->des_opmode = SEP_DES_ECB;
3646 ta_ctx->init_opcode = SEP_DES_INIT_OPCODE;
3647 ta_ctx->block_opcode = SEP_DES_BLOCK_OPCODE;
3649 /* lock necessary so that only one entity touches the queues */
3650 spin_lock_irq(&queue_lock);
3651 error = crypto_enqueue_request(&sep_queue, &req->base);
3653 if ((error != 0) && (error != -EINPROGRESS))
3654 pr_debug(" sep - crypto enqueue failed: %x\n",
3656 error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
3657 sep_dequeuer, (void *)&sep_queue);
3659 pr_debug(" sep - workqueue submit failed: %x\n",
3661 spin_unlock_irq(&queue_lock);
3662 /* We return result of crypto enqueue */
3666 static int sep_des_ebc_decrypt(struct ablkcipher_request *req)
3670 struct this_task_ctx *ta_ctx = ablkcipher_request_ctx(req);
3672 pr_debug("sep - doing des ecb decrypt\n");
3674 /* Clear out task context */
3675 memset(ta_ctx, 0, sizeof(struct this_task_ctx));
3677 ta_ctx->sep_used = sep_dev;
3678 ta_ctx->current_request = DES_ECB;
3679 ta_ctx->current_hash_req = NULL;
3680 ta_ctx->current_cypher_req = req;
3681 ta_ctx->des_encmode = SEP_DES_DECRYPT;
3682 ta_ctx->des_opmode = SEP_DES_ECB;
3683 ta_ctx->init_opcode = SEP_DES_INIT_OPCODE;
3684 ta_ctx->block_opcode = SEP_DES_BLOCK_OPCODE;
3686 /* lock necessary so that only one entity touches the queues */
3687 spin_lock_irq(&queue_lock);
3688 error = crypto_enqueue_request(&sep_queue, &req->base);
3690 if ((error != 0) && (error != -EINPROGRESS))
3691 pr_debug(" sep - crypto enqueue failed: %x\n",
3693 error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
3694 sep_dequeuer, (void *)&sep_queue);
3696 pr_debug(" sep - workqueue submit failed: %x\n",
3698 spin_unlock_irq(&queue_lock);
3699 /* We return result of crypto enqueue */
3703 static int sep_des_cbc_encrypt(struct ablkcipher_request *req)
3707 struct this_task_ctx *ta_ctx = ablkcipher_request_ctx(req);
3709 pr_debug("sep - doing des cbc encrypt\n");
3711 /* Clear out task context */
3712 memset(ta_ctx, 0, sizeof(struct this_task_ctx));
3714 ta_ctx->sep_used = sep_dev;
3715 ta_ctx->current_request = DES_CBC;
3716 ta_ctx->current_hash_req = NULL;
3717 ta_ctx->current_cypher_req = req;
3718 ta_ctx->des_encmode = SEP_DES_ENCRYPT;
3719 ta_ctx->des_opmode = SEP_DES_CBC;
3720 ta_ctx->init_opcode = SEP_DES_INIT_OPCODE;
3721 ta_ctx->block_opcode = SEP_DES_BLOCK_OPCODE;
3723 /* lock necessary so that only one entity touches the queues */
3724 spin_lock_irq(&queue_lock);
3725 error = crypto_enqueue_request(&sep_queue, &req->base);
3727 if ((error != 0) && (error != -EINPROGRESS))
3728 pr_debug(" sep - crypto enqueue failed: %x\n",
3730 error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
3731 sep_dequeuer, (void *)&sep_queue);
3733 pr_debug(" sep - workqueue submit failed: %x\n",
3735 spin_unlock_irq(&queue_lock);
3736 /* We return result of crypto enqueue */
3740 static int sep_des_cbc_decrypt(struct ablkcipher_request *req)
3744 struct this_task_ctx *ta_ctx = ablkcipher_request_ctx(req);
3746 pr_debug("sep - doing des ecb decrypt\n");
3748 /* Clear out task context */
3749 memset(ta_ctx, 0, sizeof(struct this_task_ctx));
3751 ta_ctx->sep_used = sep_dev;
3752 ta_ctx->current_request = DES_CBC;
3753 ta_ctx->current_hash_req = NULL;
3754 ta_ctx->current_cypher_req = req;
3755 ta_ctx->des_encmode = SEP_DES_DECRYPT;
3756 ta_ctx->des_opmode = SEP_DES_CBC;
3757 ta_ctx->init_opcode = SEP_DES_INIT_OPCODE;
3758 ta_ctx->block_opcode = SEP_DES_BLOCK_OPCODE;
3760 /* lock necessary so that only one entity touches the queues */
3761 spin_lock_irq(&queue_lock);
3762 error = crypto_enqueue_request(&sep_queue, &req->base);
3764 if ((error != 0) && (error != -EINPROGRESS))
3765 pr_debug(" sep - crypto enqueue failed: %x\n",
3767 error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
3768 sep_dequeuer, (void *)&sep_queue);
3770 pr_debug(" sep - workqueue submit failed: %x\n",
3772 spin_unlock_irq(&queue_lock);
3773 /* We return result of crypto enqueue */
3777 static struct ahash_alg hash_algs[] = {
3779 .init = sep_sha1_init,
3780 .update = sep_sha1_update,
3781 .final = sep_sha1_final,
3782 .digest = sep_sha1_digest,
3783 .finup = sep_sha1_finup,
3785 .digestsize = SHA1_DIGEST_SIZE,
3788 .cra_driver_name = "sha1-sep",
3789 .cra_priority = 100,
3790 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
3792 .cra_blocksize = SHA1_BLOCK_SIZE,
3793 .cra_ctxsize = sizeof(struct sep_system_ctx),
3795 .cra_module = THIS_MODULE,
3796 .cra_init = sep_hash_cra_init,
3797 .cra_exit = sep_hash_cra_exit,
3802 .init = sep_md5_init,
3803 .update = sep_md5_update,
3804 .final = sep_md5_final,
3805 .digest = sep_md5_digest,
3806 .finup = sep_md5_finup,
3808 .digestsize = MD5_DIGEST_SIZE,
3811 .cra_driver_name = "md5-sep",
3812 .cra_priority = 100,
3813 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
3815 .cra_blocksize = SHA1_BLOCK_SIZE,
3816 .cra_ctxsize = sizeof(struct sep_system_ctx),
3818 .cra_module = THIS_MODULE,
3819 .cra_init = sep_hash_cra_init,
3820 .cra_exit = sep_hash_cra_exit,
3825 .init = sep_sha224_init,
3826 .update = sep_sha224_update,
3827 .final = sep_sha224_final,
3828 .digest = sep_sha224_digest,
3829 .finup = sep_sha224_finup,
3831 .digestsize = SHA224_DIGEST_SIZE,
3833 .cra_name = "sha224",
3834 .cra_driver_name = "sha224-sep",
3835 .cra_priority = 100,
3836 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
3838 .cra_blocksize = SHA224_BLOCK_SIZE,
3839 .cra_ctxsize = sizeof(struct sep_system_ctx),
3841 .cra_module = THIS_MODULE,
3842 .cra_init = sep_hash_cra_init,
3843 .cra_exit = sep_hash_cra_exit,
3848 .init = sep_sha256_init,
3849 .update = sep_sha256_update,
3850 .final = sep_sha256_final,
3851 .digest = sep_sha256_digest,
3852 .finup = sep_sha256_finup,
3854 .digestsize = SHA256_DIGEST_SIZE,
3856 .cra_name = "sha256",
3857 .cra_driver_name = "sha256-sep",
3858 .cra_priority = 100,
3859 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
3861 .cra_blocksize = SHA256_BLOCK_SIZE,
3862 .cra_ctxsize = sizeof(struct sep_system_ctx),
3864 .cra_module = THIS_MODULE,
3865 .cra_init = sep_hash_cra_init,
3866 .cra_exit = sep_hash_cra_exit,
3872 static struct crypto_alg crypto_algs[] = {
3874 .cra_name = "ecb(aes)",
3875 .cra_driver_name = "ecb-aes-sep",
3876 .cra_priority = 100,
3877 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
3878 .cra_blocksize = AES_BLOCK_SIZE,
3879 .cra_ctxsize = sizeof(struct sep_system_ctx),
3881 .cra_type = &crypto_ablkcipher_type,
3882 .cra_module = THIS_MODULE,
3883 .cra_init = sep_crypto_init,
3884 .cra_exit = sep_crypto_exit,
3885 .cra_u.ablkcipher = {
3886 .min_keysize = AES_MIN_KEY_SIZE,
3887 .max_keysize = AES_MAX_KEY_SIZE,
3888 .setkey = sep_aes_setkey,
3889 .encrypt = sep_aes_ecb_encrypt,
3890 .decrypt = sep_aes_ecb_decrypt,
3894 .cra_name = "cbc(aes)",
3895 .cra_driver_name = "cbc-aes-sep",
3896 .cra_priority = 100,
3897 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
3898 .cra_blocksize = AES_BLOCK_SIZE,
3899 .cra_ctxsize = sizeof(struct sep_system_ctx),
3901 .cra_type = &crypto_ablkcipher_type,
3902 .cra_module = THIS_MODULE,
3903 .cra_init = sep_crypto_init,
3904 .cra_exit = sep_crypto_exit,
3905 .cra_u.ablkcipher = {
3906 .min_keysize = AES_MIN_KEY_SIZE,
3907 .max_keysize = AES_MAX_KEY_SIZE,
3908 .setkey = sep_aes_setkey,
3909 .encrypt = sep_aes_cbc_encrypt,
3910 .ivsize = AES_BLOCK_SIZE,
3911 .decrypt = sep_aes_cbc_decrypt,
3915 .cra_name = "ebc(des)",
3916 .cra_driver_name = "ebc-des-sep",
3917 .cra_priority = 100,
3918 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
3919 .cra_blocksize = DES_BLOCK_SIZE,
3920 .cra_ctxsize = sizeof(struct sep_system_ctx),
3922 .cra_type = &crypto_ablkcipher_type,
3923 .cra_module = THIS_MODULE,
3924 .cra_init = sep_crypto_init,
3925 .cra_exit = sep_crypto_exit,
3926 .cra_u.ablkcipher = {
3927 .min_keysize = DES_KEY_SIZE,
3928 .max_keysize = DES_KEY_SIZE,
3929 .setkey = sep_des_setkey,
3930 .encrypt = sep_des_ebc_encrypt,
3931 .decrypt = sep_des_ebc_decrypt,
3935 .cra_name = "cbc(des)",
3936 .cra_driver_name = "cbc-des-sep",
3937 .cra_priority = 100,
3938 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
3939 .cra_blocksize = DES_BLOCK_SIZE,
3940 .cra_ctxsize = sizeof(struct sep_system_ctx),
3942 .cra_type = &crypto_ablkcipher_type,
3943 .cra_module = THIS_MODULE,
3944 .cra_init = sep_crypto_init,
3945 .cra_exit = sep_crypto_exit,
3946 .cra_u.ablkcipher = {
3947 .min_keysize = DES_KEY_SIZE,
3948 .max_keysize = DES_KEY_SIZE,
3949 .setkey = sep_des_setkey,
3950 .encrypt = sep_des_cbc_encrypt,
3951 .ivsize = DES_BLOCK_SIZE,
3952 .decrypt = sep_des_cbc_decrypt,
3956 .cra_name = "ebc(des3-ede)",
3957 .cra_driver_name = "ebc-des3-ede-sep",
3958 .cra_priority = 100,
3959 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
3960 .cra_blocksize = DES_BLOCK_SIZE,
3961 .cra_ctxsize = sizeof(struct sep_system_ctx),
3963 .cra_type = &crypto_ablkcipher_type,
3964 .cra_module = THIS_MODULE,
3965 .cra_init = sep_crypto_init,
3966 .cra_exit = sep_crypto_exit,
3967 .cra_u.ablkcipher = {
3968 .min_keysize = DES3_EDE_KEY_SIZE,
3969 .max_keysize = DES3_EDE_KEY_SIZE,
3970 .setkey = sep_des_setkey,
3971 .encrypt = sep_des_ebc_encrypt,
3972 .decrypt = sep_des_ebc_decrypt,
3976 .cra_name = "cbc(des3-ede)",
3977 .cra_driver_name = "cbc-des3--ede-sep",
3978 .cra_priority = 100,
3979 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
3980 .cra_blocksize = DES_BLOCK_SIZE,
3981 .cra_ctxsize = sizeof(struct sep_system_ctx),
3983 .cra_type = &crypto_ablkcipher_type,
3984 .cra_module = THIS_MODULE,
3985 .cra_init = sep_crypto_init,
3986 .cra_exit = sep_crypto_exit,
3987 .cra_u.ablkcipher = {
3988 .min_keysize = DES3_EDE_KEY_SIZE,
3989 .max_keysize = DES3_EDE_KEY_SIZE,
3990 .setkey = sep_des_setkey,
3991 .encrypt = sep_des_cbc_encrypt,
3992 .decrypt = sep_des_cbc_decrypt,
3997 int sep_crypto_setup(void)
4000 tasklet_init(&sep_dev->finish_tasklet, sep_finish,
4001 (unsigned long)sep_dev);
4003 crypto_init_queue(&sep_queue, SEP_QUEUE_LENGTH);
4005 sep_dev->workqueue = create_singlethread_workqueue(
4006 "sep_crypto_workqueue");
4007 if (!sep_dev->workqueue) {
4008 dev_warn(&sep_dev->pdev->dev, "cant create workqueue\n");
4015 spin_lock_init(&queue_lock);
4019 for (i = 0; i < ARRAY_SIZE(hash_algs); i++) {
4020 err = crypto_register_ahash(&hash_algs[i]);
4026 for (j = 0; j < ARRAY_SIZE(crypto_algs); j++) {
4027 err = crypto_register_alg(&crypto_algs[j]);
4029 goto err_crypto_algs;
4035 for (k = 0; k < i; k++)
4036 crypto_unregister_ahash(&hash_algs[k]);
4040 for (k = 0; k < j; k++)
4041 crypto_unregister_alg(&crypto_algs[k]);
4045 void sep_crypto_takedown(void)
4050 for (i = 0; i < ARRAY_SIZE(hash_algs); i++)
4051 crypto_unregister_ahash(&hash_algs[i]);
4052 for (i = 0; i < ARRAY_SIZE(crypto_algs); i++)
4053 crypto_unregister_alg(&crypto_algs[i]);
4055 tasklet_kill(&sep_dev->finish_tasklet);