3 * sep_main.c - Security Processor Driver main group of functions
5 * Copyright(c) 2009-2011 Intel Corporation. All rights reserved.
6 * Contributions(c) 2009-2011 Discretix. All rights reserved.
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the Free
10 * Software Foundation; version 2 of the License.
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc., 59
19 * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
23 * Mark Allyn mark.a.allyn@intel.com
24 * Jayant Mangalampalli jayant.mangalampalli@intel.com
28 * 2009.06.26 Initial publish
29 * 2010.09.14 Upgrade to Medfield
30 * 2011.01.21 Move to sep_main.c to allow for sep_crypto.c
31 * 2011.02.22 Enable kernel crypto operation
33 * Please note that this driver is based on information in the Discretix
34 * CryptoCell 5.2 Driver Implementation Guide; the Discretix CryptoCell 5.2
35 * Integration Intel Medfield appendix; the Discretix CryptoCell 5.2
36 * Linux Driver Integration Guide; and the Discretix CryptoCell 5.2 System
37 * Overview and Integration Guide.
40 /* #define SEP_PERF_DEBUG */
42 #include <linux/kernel.h>
43 #include <linux/module.h>
44 #include <linux/miscdevice.h>
46 #include <linux/cdev.h>
47 #include <linux/kdev_t.h>
48 #include <linux/mutex.h>
49 #include <linux/sched.h>
51 #include <linux/poll.h>
52 #include <linux/wait.h>
53 #include <linux/pci.h>
54 #include <linux/pm_runtime.h>
55 #include <linux/slab.h>
56 #include <linux/ioctl.h>
57 #include <asm/current.h>
58 #include <linux/ioport.h>
60 #include <linux/interrupt.h>
61 #include <linux/pagemap.h>
62 #include <asm/cacheflush.h>
63 #include <linux/delay.h>
64 #include <linux/jiffies.h>
65 #include <linux/async.h>
66 #include <linux/crypto.h>
67 #include <crypto/internal/hash.h>
68 #include <crypto/scatterwalk.h>
69 #include <crypto/sha.h>
70 #include <crypto/md5.h>
71 #include <crypto/aes.h>
72 #include <crypto/des.h>
73 #include <crypto/hash.h>
75 #include "sep_driver_hw_defs.h"
76 #include "sep_driver_config.h"
77 #include "sep_driver_api.h"
79 #include "sep_crypto.h"
81 #define CREATE_TRACE_POINTS
82 #include "sep_trace_events.h"
85 * Let's not spend cycles iterating over message
86 * area contents if debugging not enabled
89 #define sep_dump_message(sep) _sep_dump_message(sep)
91 #define sep_dump_message(sep)
95 * Currently, there is only one SEP device per platform;
96 * In event platforms in the future have more than one SEP
97 * device, this will be a linked list
100 struct sep_device *sep_dev;
103 * sep_queue_status_remove - Removes transaction from status queue
105 * @sep_queue_info: pointer to status queue
107 * This function will remove information about transaction from the queue.
109 void sep_queue_status_remove(struct sep_device *sep,
110 struct sep_queue_info **queue_elem)
112 unsigned long lck_flags;
114 dev_dbg(&sep->pdev->dev, "[PID%d] sep_queue_status_remove\n",
117 if (!queue_elem || !(*queue_elem)) {
118 dev_dbg(&sep->pdev->dev, "PID%d %s null\n",
119 current->pid, __func__);
123 spin_lock_irqsave(&sep->sep_queue_lock, lck_flags);
124 list_del(&(*queue_elem)->list);
125 sep->sep_queue_num--;
126 spin_unlock_irqrestore(&sep->sep_queue_lock, lck_flags);
131 dev_dbg(&sep->pdev->dev, "[PID%d] sep_queue_status_remove return\n",
137 * sep_queue_status_add - Adds transaction to status queue
139 * @opcode: transaction opcode
140 * @size: input data size
141 * @pid: pid of current process
142 * @name: current process name
143 * @name_len: length of name (current process)
145 * This function adds information about about transaction started to the status
148 struct sep_queue_info *sep_queue_status_add(
149 struct sep_device *sep,
153 u8 *name, size_t name_len)
155 unsigned long lck_flags;
156 struct sep_queue_info *my_elem = NULL;
158 my_elem = kzalloc(sizeof(struct sep_queue_info), GFP_KERNEL);
163 dev_dbg(&sep->pdev->dev, "[PID%d] kzalloc ok\n", current->pid);
165 my_elem->data.opcode = opcode;
166 my_elem->data.size = size;
167 my_elem->data.pid = pid;
169 if (name_len > TASK_COMM_LEN)
170 name_len = TASK_COMM_LEN;
172 memcpy(&my_elem->data.name, name, name_len);
174 spin_lock_irqsave(&sep->sep_queue_lock, lck_flags);
176 list_add_tail(&my_elem->list, &sep->sep_queue_status);
177 sep->sep_queue_num++;
179 spin_unlock_irqrestore(&sep->sep_queue_lock, lck_flags);
185 * sep_allocate_dmatables_region - Allocates buf for the MLLI/DMA tables
187 * @dmatables_region: Destination pointer for the buffer
188 * @dma_ctx: DMA context for the transaction
189 * @table_count: Number of MLLI/DMA tables to create
190 * The buffer created will not work as-is for DMA operations,
191 * it needs to be copied over to the appropriate place in the
194 static int sep_allocate_dmatables_region(struct sep_device *sep,
195 void **dmatables_region,
196 struct sep_dma_context *dma_ctx,
197 const u32 table_count)
199 const size_t new_len =
200 SYNCHRONIC_DMA_TABLES_AREA_SIZE_BYTES - 1;
202 void *tmp_region = NULL;
204 dev_dbg(&sep->pdev->dev, "[PID%d] dma_ctx = 0x%p\n",
205 current->pid, dma_ctx);
206 dev_dbg(&sep->pdev->dev, "[PID%d] dmatables_region = 0x%p\n",
207 current->pid, dmatables_region);
209 if (!dma_ctx || !dmatables_region) {
210 dev_warn(&sep->pdev->dev,
211 "[PID%d] dma context/region uninitialized\n",
216 dev_dbg(&sep->pdev->dev, "[PID%d] newlen = 0x%08zX\n",
217 current->pid, new_len);
218 dev_dbg(&sep->pdev->dev, "[PID%d] oldlen = 0x%08X\n", current->pid,
219 dma_ctx->dmatables_len);
220 tmp_region = kzalloc(new_len + dma_ctx->dmatables_len, GFP_KERNEL);
224 /* Were there any previous tables that need to be preserved ? */
225 if (*dmatables_region) {
226 memcpy(tmp_region, *dmatables_region, dma_ctx->dmatables_len);
227 kfree(*dmatables_region);
228 *dmatables_region = NULL;
231 *dmatables_region = tmp_region;
233 dma_ctx->dmatables_len += new_len;
239 * sep_wait_transaction - Used for synchronizing transactions
242 int sep_wait_transaction(struct sep_device *sep)
247 if (0 == test_and_set_bit(SEP_TRANSACTION_STARTED_LOCK_BIT,
248 &sep->in_use_flags)) {
249 dev_dbg(&sep->pdev->dev,
250 "[PID%d] no transactions, returning\n",
252 goto end_function_setpid;
256 * Looping needed even for exclusive waitq entries
257 * due to process wakeup latencies, previous process
258 * might have already created another transaction.
262 * Exclusive waitq entry, so that only one process is
263 * woken up from the queue at a time.
265 prepare_to_wait_exclusive(&sep->event_transactions,
268 if (0 == test_and_set_bit(SEP_TRANSACTION_STARTED_LOCK_BIT,
269 &sep->in_use_flags)) {
270 dev_dbg(&sep->pdev->dev,
271 "[PID%d] no transactions, breaking\n",
275 dev_dbg(&sep->pdev->dev,
276 "[PID%d] transactions ongoing, sleeping\n",
279 dev_dbg(&sep->pdev->dev, "[PID%d] woken up\n", current->pid);
281 if (signal_pending(current)) {
282 dev_dbg(&sep->pdev->dev, "[PID%d] received signal\n",
290 * The pid_doing_transaction indicates that this process
291 * now owns the facilities to perform a transaction with
292 * the SEP. While this process is performing a transaction,
293 * no other process who has the SEP device open can perform
294 * any transactions. This method allows more than one process
295 * to have the device open at any given time, which provides
296 * finer granularity for device utilization by multiple
299 /* Only one process is able to progress here at a time */
300 sep->pid_doing_transaction = current->pid;
303 finish_wait(&sep->event_transactions, &wait);
309 * sep_check_transaction_owner - Checks if current process owns transaction
312 static inline int sep_check_transaction_owner(struct sep_device *sep)
314 dev_dbg(&sep->pdev->dev, "[PID%d] transaction pid = %d\n",
316 sep->pid_doing_transaction);
318 if ((sep->pid_doing_transaction == 0) ||
319 (current->pid != sep->pid_doing_transaction)) {
323 /* We own the transaction */
330 * sep_dump_message - dump the message that is pending
332 * This will only print dump if DEBUG is set; it does
333 * follow kernel debug print enabling
335 static void _sep_dump_message(struct sep_device *sep)
339 u32 *p = sep->shared_addr;
341 for (count = 0; count < 10 * 4; count += 4)
342 dev_dbg(&sep->pdev->dev,
343 "[PID%d] Word %d of the message is %x\n",
344 current->pid, count/4, *p++);
350 * sep_map_and_alloc_shared_area -allocate shared block
351 * @sep: security processor
352 * @size: size of shared area
354 static int sep_map_and_alloc_shared_area(struct sep_device *sep)
356 sep->shared_addr = dma_alloc_coherent(&sep->pdev->dev,
358 &sep->shared_bus, GFP_KERNEL);
360 if (!sep->shared_addr) {
361 dev_dbg(&sep->pdev->dev,
362 "[PID%d] shared memory dma_alloc_coherent failed\n",
366 dev_dbg(&sep->pdev->dev,
367 "[PID%d] shared_addr %zx bytes @%p (bus %llx)\n",
369 sep->shared_size, sep->shared_addr,
370 (unsigned long long)sep->shared_bus);
375 * sep_unmap_and_free_shared_area - free shared block
376 * @sep: security processor
378 static void sep_unmap_and_free_shared_area(struct sep_device *sep)
380 dma_free_coherent(&sep->pdev->dev, sep->shared_size,
381 sep->shared_addr, sep->shared_bus);
387 * sep_shared_bus_to_virt - convert bus/virt addresses
388 * @sep: pointer to struct sep_device
389 * @bus_address: address to convert
391 * Returns virtual address inside the shared area according
392 * to the bus address.
394 static void *sep_shared_bus_to_virt(struct sep_device *sep,
395 dma_addr_t bus_address)
397 return sep->shared_addr + (bus_address - sep->shared_bus);
403 * sep_open - device open method
404 * @inode: inode of SEP device
405 * @filp: file handle to SEP device
407 * Open method for the SEP device. Called when userspace opens
408 * the SEP device node.
410 * Returns zero on success otherwise an error code.
412 static int sep_open(struct inode *inode, struct file *filp)
414 struct sep_device *sep;
415 struct sep_private_data *priv;
417 dev_dbg(&sep_dev->pdev->dev, "[PID%d] open\n", current->pid);
419 if (filp->f_flags & O_NONBLOCK)
423 * Get the SEP device structure and use it for the
424 * private_data field in filp for other methods
427 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
433 filp->private_data = priv;
435 dev_dbg(&sep_dev->pdev->dev, "[PID%d] priv is 0x%p\n",
438 /* Anyone can open; locking takes place at transaction level */
443 * sep_free_dma_table_data_handler - free DMA table
444 * @sep: pointer to struct sep_device
445 * @dma_ctx: dma context
447 * Handles the request to free DMA table for synchronic actions
449 int sep_free_dma_table_data_handler(struct sep_device *sep,
450 struct sep_dma_context **dma_ctx)
454 /* Pointer to the current dma_resource struct */
455 struct sep_dma_resource *dma;
457 dev_dbg(&sep->pdev->dev,
458 "[PID%d] sep_free_dma_table_data_handler\n",
461 if (!dma_ctx || !(*dma_ctx)) {
462 /* No context or context already freed */
463 dev_dbg(&sep->pdev->dev,
464 "[PID%d] no DMA context or context already freed\n",
470 dev_dbg(&sep->pdev->dev, "[PID%d] (*dma_ctx)->nr_dcb_creat 0x%x\n",
472 (*dma_ctx)->nr_dcb_creat);
474 for (dcb_counter = 0;
475 dcb_counter < (*dma_ctx)->nr_dcb_creat; dcb_counter++) {
476 dma = &(*dma_ctx)->dma_res_arr[dcb_counter];
478 /* Unmap and free input map array */
479 if (dma->in_map_array) {
480 for (count = 0; count < dma->in_num_pages; count++) {
481 dma_unmap_page(&sep->pdev->dev,
482 dma->in_map_array[count].dma_addr,
483 dma->in_map_array[count].size,
486 kfree(dma->in_map_array);
490 * Output is handled different. If
491 * this was a secure dma into restricted memory,
492 * then we skip this step altogether as restricted
493 * memory is not available to the o/s at all.
495 if (!(*dma_ctx)->secure_dma && dma->out_map_array) {
497 for (count = 0; count < dma->out_num_pages; count++) {
498 dma_unmap_page(&sep->pdev->dev,
499 dma->out_map_array[count].dma_addr,
500 dma->out_map_array[count].size,
503 kfree(dma->out_map_array);
506 /* Free page cache for output */
507 if (dma->in_page_array) {
508 for (count = 0; count < dma->in_num_pages; count++) {
509 flush_dcache_page(dma->in_page_array[count]);
510 page_cache_release(dma->in_page_array[count]);
512 kfree(dma->in_page_array);
515 /* Again, we do this only for non secure dma */
516 if (!(*dma_ctx)->secure_dma && dma->out_page_array) {
518 for (count = 0; count < dma->out_num_pages; count++) {
519 if (!PageReserved(dma->out_page_array[count]))
522 out_page_array[count]);
524 flush_dcache_page(dma->out_page_array[count]);
525 page_cache_release(dma->out_page_array[count]);
527 kfree(dma->out_page_array);
531 * Note that here we use in_map_num_entries because we
532 * don't have a page array; the page array is generated
533 * only in the lock_user_pages, which is not called
534 * for kernel crypto, which is what the sg (scatter gather
535 * is used for exclusively)
538 dma_unmap_sg(&sep->pdev->dev, dma->src_sg,
539 dma->in_map_num_entries, DMA_TO_DEVICE);
544 dma_unmap_sg(&sep->pdev->dev, dma->dst_sg,
545 dma->in_map_num_entries, DMA_FROM_DEVICE);
549 /* Reset all the values */
550 dma->in_page_array = NULL;
551 dma->out_page_array = NULL;
552 dma->in_num_pages = 0;
553 dma->out_num_pages = 0;
554 dma->in_map_array = NULL;
555 dma->out_map_array = NULL;
556 dma->in_map_num_entries = 0;
557 dma->out_map_num_entries = 0;
560 (*dma_ctx)->nr_dcb_creat = 0;
561 (*dma_ctx)->num_lli_tables_created = 0;
566 dev_dbg(&sep->pdev->dev,
567 "[PID%d] sep_free_dma_table_data_handler end\n",
574 * sep_end_transaction_handler - end transaction
575 * @sep: pointer to struct sep_device
576 * @dma_ctx: DMA context
577 * @call_status: Call status
579 * This API handles the end transaction request.
581 static int sep_end_transaction_handler(struct sep_device *sep,
582 struct sep_dma_context **dma_ctx,
583 struct sep_call_status *call_status,
584 struct sep_queue_info **my_queue_elem)
586 dev_dbg(&sep->pdev->dev, "[PID%d] ending transaction\n", current->pid);
589 * Extraneous transaction clearing would mess up PM
590 * device usage counters and SEP would get suspended
591 * just before we send a command to SEP in the next
594 if (sep_check_transaction_owner(sep)) {
595 dev_dbg(&sep->pdev->dev, "[PID%d] not transaction owner\n",
600 /* Update queue status */
601 sep_queue_status_remove(sep, my_queue_elem);
603 /* Check that all the DMA resources were freed */
605 sep_free_dma_table_data_handler(sep, dma_ctx);
607 /* Reset call status for next transaction */
609 call_status->status = 0;
611 /* Clear the message area to avoid next transaction reading
612 * sensitive results from previous transaction */
613 memset(sep->shared_addr, 0,
614 SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES);
616 /* start suspend delay */
617 #ifdef SEP_ENABLE_RUNTIME_PM
620 pm_runtime_mark_last_busy(&sep->pdev->dev);
621 pm_runtime_put_autosuspend(&sep->pdev->dev);
625 clear_bit(SEP_WORKING_LOCK_BIT, &sep->in_use_flags);
626 sep->pid_doing_transaction = 0;
628 /* Now it's safe for next process to proceed */
629 dev_dbg(&sep->pdev->dev, "[PID%d] waking up next transaction\n",
631 clear_bit(SEP_TRANSACTION_STARTED_LOCK_BIT, &sep->in_use_flags);
632 wake_up(&sep->event_transactions);
639 * sep_release - close a SEP device
640 * @inode: inode of SEP device
641 * @filp: file handle being closed
643 * Called on the final close of a SEP device.
645 static int sep_release(struct inode *inode, struct file *filp)
647 struct sep_private_data * const private_data = filp->private_data;
648 struct sep_call_status *call_status = &private_data->call_status;
649 struct sep_device *sep = private_data->device;
650 struct sep_dma_context **dma_ctx = &private_data->dma_ctx;
651 struct sep_queue_info **my_queue_elem = &private_data->my_queue_elem;
653 dev_dbg(&sep->pdev->dev, "[PID%d] release\n", current->pid);
655 sep_end_transaction_handler(sep, dma_ctx, call_status,
658 kfree(filp->private_data);
664 * sep_mmap - maps the shared area to user space
665 * @filp: pointer to struct file
666 * @vma: pointer to vm_area_struct
668 * Called on an mmap of our space via the normal SEP device
670 static int sep_mmap(struct file *filp, struct vm_area_struct *vma)
672 struct sep_private_data * const private_data = filp->private_data;
673 struct sep_call_status *call_status = &private_data->call_status;
674 struct sep_device *sep = private_data->device;
675 struct sep_queue_info **my_queue_elem = &private_data->my_queue_elem;
677 unsigned long error = 0;
679 dev_dbg(&sep->pdev->dev, "[PID%d] sep_mmap\n", current->pid);
681 /* Set the transaction busy (own the device) */
683 * Problem for multithreaded applications is that here we're
684 * possibly going to sleep while holding a write lock on
685 * current->mm->mmap_sem, which will cause deadlock for ongoing
686 * transaction trying to create DMA tables
688 error = sep_wait_transaction(sep);
690 /* Interrupted by signal, don't clear transaction */
693 /* Clear the message area to avoid next transaction reading
694 * sensitive results from previous transaction */
695 memset(sep->shared_addr, 0,
696 SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES);
699 * Check that the size of the mapped range is as the size of the message
702 if ((vma->vm_end - vma->vm_start) > SEP_DRIVER_MMMAP_AREA_SIZE) {
704 goto end_function_with_error;
707 dev_dbg(&sep->pdev->dev, "[PID%d] shared_addr is %p\n",
708 current->pid, sep->shared_addr);
710 /* Get bus address */
711 bus_addr = sep->shared_bus;
713 if (remap_pfn_range(vma, vma->vm_start, bus_addr >> PAGE_SHIFT,
714 vma->vm_end - vma->vm_start, vma->vm_page_prot)) {
715 dev_dbg(&sep->pdev->dev, "[PID%d] remap_pfn_range failed\n",
718 goto end_function_with_error;
721 /* Update call status */
722 set_bit(SEP_LEGACY_MMAP_DONE_OFFSET, &call_status->status);
726 end_function_with_error:
727 /* Clear our transaction */
728 sep_end_transaction_handler(sep, NULL, call_status,
736 * sep_poll - poll handler
737 * @filp: pointer to struct file
738 * @wait: pointer to poll_table
740 * Called by the OS when the kernel is asked to do a poll on
743 static unsigned int sep_poll(struct file *filp, poll_table *wait)
745 struct sep_private_data * const private_data = filp->private_data;
746 struct sep_call_status *call_status = &private_data->call_status;
747 struct sep_device *sep = private_data->device;
751 unsigned long lock_irq_flag;
753 /* Am I the process that owns the transaction? */
754 if (sep_check_transaction_owner(sep)) {
755 dev_dbg(&sep->pdev->dev, "[PID%d] poll pid not owner\n",
761 /* Check if send command or send_reply were activated previously */
762 if (0 == test_bit(SEP_LEGACY_SENDMSG_DONE_OFFSET,
763 &call_status->status)) {
764 dev_warn(&sep->pdev->dev, "[PID%d] sendmsg not called\n",
771 /* Add the event to the polling wait table */
772 dev_dbg(&sep->pdev->dev, "[PID%d] poll: calling wait sep_event\n",
775 poll_wait(filp, &sep->event_interrupt, wait);
777 dev_dbg(&sep->pdev->dev,
778 "[PID%d] poll: send_ct is %lx reply ct is %lx\n",
779 current->pid, sep->send_ct, sep->reply_ct);
781 /* Check if error occurred during poll */
782 retval2 = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR3_REG_ADDR);
783 if ((retval2 != 0x0) && (retval2 != 0x8)) {
784 dev_dbg(&sep->pdev->dev, "[PID%d] poll; poll error %x\n",
785 current->pid, retval2);
790 spin_lock_irqsave(&sep->snd_rply_lck, lock_irq_flag);
792 if (sep->send_ct == sep->reply_ct) {
793 spin_unlock_irqrestore(&sep->snd_rply_lck, lock_irq_flag);
794 retval = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR2_REG_ADDR);
795 dev_dbg(&sep->pdev->dev,
796 "[PID%d] poll: data ready check (GPR2) %x\n",
797 current->pid, retval);
799 /* Check if printf request */
800 if ((retval >> 30) & 0x1) {
801 dev_dbg(&sep->pdev->dev,
802 "[PID%d] poll: SEP printf request\n",
807 /* Check if the this is SEP reply or request */
809 dev_dbg(&sep->pdev->dev,
810 "[PID%d] poll: SEP request\n",
813 dev_dbg(&sep->pdev->dev,
814 "[PID%d] poll: normal return\n",
816 sep_dump_message(sep);
817 dev_dbg(&sep->pdev->dev,
818 "[PID%d] poll; SEP reply POLLIN|POLLRDNORM\n",
820 mask |= POLLIN | POLLRDNORM;
822 set_bit(SEP_LEGACY_POLL_DONE_OFFSET, &call_status->status);
824 spin_unlock_irqrestore(&sep->snd_rply_lck, lock_irq_flag);
825 dev_dbg(&sep->pdev->dev,
826 "[PID%d] poll; no reply; returning mask of 0\n",
836 * sep_time_address - address in SEP memory of time
837 * @sep: SEP device we want the address from
839 * Return the address of the two dwords in memory used for time
842 static u32 *sep_time_address(struct sep_device *sep)
844 return sep->shared_addr +
845 SEP_DRIVER_SYSTEM_TIME_MEMORY_OFFSET_IN_BYTES;
849 * sep_set_time - set the SEP time
850 * @sep: the SEP we are setting the time for
852 * Calculates time and sets it at the predefined address.
853 * Called with the SEP mutex held.
855 static unsigned long sep_set_time(struct sep_device *sep)
858 u32 *time_addr; /* Address of time as seen by the kernel */
861 do_gettimeofday(&time);
863 /* Set value in the SYSTEM MEMORY offset */
864 time_addr = sep_time_address(sep);
866 time_addr[0] = SEP_TIME_VAL_TOKEN;
867 time_addr[1] = time.tv_sec;
869 dev_dbg(&sep->pdev->dev, "[PID%d] time.tv_sec is %lu\n",
870 current->pid, time.tv_sec);
871 dev_dbg(&sep->pdev->dev, "[PID%d] time_addr is %p\n",
872 current->pid, time_addr);
873 dev_dbg(&sep->pdev->dev, "[PID%d] sep->shared_addr is %p\n",
874 current->pid, sep->shared_addr);
880 * sep_send_command_handler - kick off a command
881 * @sep: SEP being signalled
883 * This function raises interrupt to SEP that signals that is has a new
884 * command from the host
886 * Note that this function does fall under the ioctl lock
888 int sep_send_command_handler(struct sep_device *sep)
890 unsigned long lock_irq_flag;
894 /* Basic sanity check; set msg pool to start of shared area */
895 msg_pool = (u32 *)sep->shared_addr;
898 /* Look for start msg token */
899 if (*msg_pool != SEP_START_MSG_TOKEN) {
900 dev_warn(&sep->pdev->dev, "start message token not present\n");
905 /* Do we have a reasonable size? */
907 if ((*msg_pool < 2) ||
908 (*msg_pool > SEP_DRIVER_MAX_MESSAGE_SIZE_IN_BYTES)) {
910 dev_warn(&sep->pdev->dev, "invalid message size\n");
915 /* Does the command look reasonable? */
918 dev_warn(&sep->pdev->dev, "invalid message opcode\n");
923 #if defined(CONFIG_PM_RUNTIME) && defined(SEP_ENABLE_RUNTIME_PM)
924 dev_dbg(&sep->pdev->dev, "[PID%d] before pm sync status 0x%X\n",
926 sep->pdev->dev.power.runtime_status);
927 sep->in_use = 1; /* device is about to be used */
928 pm_runtime_get_sync(&sep->pdev->dev);
931 if (test_and_set_bit(SEP_WORKING_LOCK_BIT, &sep->in_use_flags)) {
935 sep->in_use = 1; /* device is about to be used */
938 sep_dump_message(sep);
941 spin_lock_irqsave(&sep->snd_rply_lck, lock_irq_flag);
943 spin_unlock_irqrestore(&sep->snd_rply_lck, lock_irq_flag);
945 dev_dbg(&sep->pdev->dev,
946 "[PID%d] sep_send_command_handler send_ct %lx reply_ct %lx\n",
947 current->pid, sep->send_ct, sep->reply_ct);
949 /* Send interrupt to SEP */
950 sep_write_reg(sep, HW_HOST_HOST_SEP_GPR0_REG_ADDR, 0x2);
958 * @sep: pointer to struct sep_device
959 * @sg: pointer to struct scatterlist
961 * @dma_maps: pointer to place a pointer to array of dma maps
962 * This is filled in; anything previous there will be lost
963 * The structure for dma maps is sep_dma_map
964 * @returns number of dma maps on success; negative on error
966 * This creates the dma table from the scatterlist
967 * It is used only for kernel crypto as it works with scatterlists
968 * representation of data buffers
971 static int sep_crypto_dma(
972 struct sep_device *sep,
973 struct scatterlist *sg,
974 struct sep_dma_map **dma_maps,
975 enum dma_data_direction direction)
977 struct scatterlist *temp_sg;
981 struct sep_dma_map *sep_dma;
987 /* Count the segments */
992 temp_sg = scatterwalk_sg_next(temp_sg);
994 dev_dbg(&sep->pdev->dev,
995 "There are (hex) %x segments in sg\n", count_segment);
997 /* DMA map segments */
998 count_mapped = dma_map_sg(&sep->pdev->dev, sg,
999 count_segment, direction);
1001 dev_dbg(&sep->pdev->dev,
1002 "There are (hex) %x maps in sg\n", count_mapped);
1004 if (count_mapped == 0) {
1005 dev_dbg(&sep->pdev->dev, "Cannot dma_map_sg\n");
1009 sep_dma = kmalloc(sizeof(struct sep_dma_map) *
1010 count_mapped, GFP_ATOMIC);
1012 if (sep_dma == NULL) {
1013 dev_dbg(&sep->pdev->dev, "Cannot allocate dma_maps\n");
1017 for_each_sg(sg, temp_sg, count_mapped, ct1) {
1018 sep_dma[ct1].dma_addr = sg_dma_address(temp_sg);
1019 sep_dma[ct1].size = sg_dma_len(temp_sg);
1020 dev_dbg(&sep->pdev->dev, "(all hex) map %x dma %lx len %lx\n",
1021 ct1, (unsigned long)sep_dma[ct1].dma_addr,
1022 (unsigned long)sep_dma[ct1].size);
1025 *dma_maps = sep_dma;
1026 return count_mapped;
1032 * @sep: pointer to struct sep_device
1033 * @sg: pointer to struct scatterlist
1034 * @data_size: total data size
1036 * @dma_maps: pointer to place a pointer to array of dma maps
1037 * This is filled in; anything previous there will be lost
1038 * The structure for dma maps is sep_dma_map
1039 * @lli_maps: pointer to place a pointer to array of lli maps
1040 * This is filled in; anything previous there will be lost
1041 * The structure for dma maps is sep_dma_map
1042 * @returns number of dma maps on success; negative on error
1044 * This creates the LLI table from the scatterlist
1045 * It is only used for kernel crypto as it works exclusively
1046 * with scatterlists (struct scatterlist) representation of
1049 static int sep_crypto_lli(
1050 struct sep_device *sep,
1051 struct scatterlist *sg,
1052 struct sep_dma_map **maps,
1053 struct sep_lli_entry **llis,
1055 enum dma_data_direction direction)
1059 struct sep_lli_entry *sep_lli;
1060 struct sep_dma_map *sep_map;
1064 nbr_ents = sep_crypto_dma(sep, sg, maps, direction);
1065 if (nbr_ents <= 0) {
1066 dev_dbg(&sep->pdev->dev, "crypto_dma failed %x\n",
1073 sep_lli = kmalloc(sizeof(struct sep_lli_entry) * nbr_ents, GFP_ATOMIC);
1075 if (sep_lli == NULL) {
1076 dev_dbg(&sep->pdev->dev, "Cannot allocate lli_maps\n");
1083 for (ct1 = 0; ct1 < nbr_ents; ct1 += 1) {
1084 sep_lli[ct1].bus_address = (u32)sep_map[ct1].dma_addr;
1086 /* Maximum for page is total data size */
1087 if (sep_map[ct1].size > data_size)
1088 sep_map[ct1].size = data_size;
1090 sep_lli[ct1].block_size = (u32)sep_map[ct1].size;
1098 * sep_lock_kernel_pages - map kernel pages for DMA
1099 * @sep: pointer to struct sep_device
1100 * @kernel_virt_addr: address of data buffer in kernel
1101 * @data_size: size of data
1102 * @lli_array_ptr: lli array
1103 * @in_out_flag: input into device or output from device
1105 * This function locks all the physical pages of the kernel virtual buffer
1106 * and construct a basic lli array, where each entry holds the physical
1107 * page address and the size that application data holds in this page
1108 * This function is used only during kernel crypto mod calls from within
1109 * the kernel (when ioctl is not used)
1111 * This is used only for kernel crypto. Kernel pages
1112 * are handled differently as they are done via
1113 * scatter gather lists (struct scatterlist)
1115 static int sep_lock_kernel_pages(struct sep_device *sep,
1116 unsigned long kernel_virt_addr,
1118 struct sep_lli_entry **lli_array_ptr,
1120 struct sep_dma_context *dma_ctx)
1124 struct scatterlist *sg;
1127 struct sep_lli_entry *lli_array;
1129 struct sep_dma_map *map_array;
1131 enum dma_data_direction direction;
1136 if (in_out_flag == SEP_DRIVER_IN_FLAG) {
1137 direction = DMA_TO_DEVICE;
1138 sg = dma_ctx->src_sg;
1140 direction = DMA_FROM_DEVICE;
1141 sg = dma_ctx->dst_sg;
1144 num_pages = sep_crypto_lli(sep, sg, &map_array, &lli_array,
1145 data_size, direction);
1147 if (num_pages <= 0) {
1148 dev_dbg(&sep->pdev->dev, "sep_crypto_lli returned error %x\n",
1153 /* Put mapped kernel sg into kernel resource array */
1155 /* Set output params according to the in_out flag */
1156 if (in_out_flag == SEP_DRIVER_IN_FLAG) {
1157 *lli_array_ptr = lli_array;
1158 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_num_pages =
1160 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_page_array =
1162 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_map_array =
1164 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_map_num_entries =
1166 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].src_sg =
1169 *lli_array_ptr = lli_array;
1170 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_num_pages =
1172 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_page_array =
1174 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_map_array =
1176 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].
1177 out_map_num_entries = num_pages;
1178 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].dst_sg =
1186 * sep_lock_user_pages - lock and map user pages for DMA
1187 * @sep: pointer to struct sep_device
1188 * @app_virt_addr: user memory data buffer
1189 * @data_size: size of data buffer
1190 * @lli_array_ptr: lli array
1191 * @in_out_flag: input or output to device
1193 * This function locks all the physical pages of the application
1194 * virtual buffer and construct a basic lli array, where each entry
1195 * holds the physical page address and the size that application
1196 * data holds in this physical pages
1198 static int sep_lock_user_pages(struct sep_device *sep,
1201 struct sep_lli_entry **lli_array_ptr,
1203 struct sep_dma_context *dma_ctx)
1209 /* The the page of the end address of the user space buffer */
1211 /* The page of the start address of the user space buffer */
1213 /* The range in pages */
1215 /* Array of pointers to page */
1216 struct page **page_array;
1218 struct sep_lli_entry *lli_array;
1220 struct sep_dma_map *map_array;
1222 /* Set start and end pages and num pages */
1223 end_page = (app_virt_addr + data_size - 1) >> PAGE_SHIFT;
1224 start_page = app_virt_addr >> PAGE_SHIFT;
1225 num_pages = end_page - start_page + 1;
1227 dev_dbg(&sep->pdev->dev,
1228 "[PID%d] lock user pages app_virt_addr is %x\n",
1229 current->pid, app_virt_addr);
1231 dev_dbg(&sep->pdev->dev, "[PID%d] data_size is (hex) %x\n",
1232 current->pid, data_size);
1233 dev_dbg(&sep->pdev->dev, "[PID%d] start_page is (hex) %x\n",
1234 current->pid, start_page);
1235 dev_dbg(&sep->pdev->dev, "[PID%d] end_page is (hex) %x\n",
1236 current->pid, end_page);
1237 dev_dbg(&sep->pdev->dev, "[PID%d] num_pages is (hex) %x\n",
1238 current->pid, num_pages);
1240 /* Allocate array of pages structure pointers */
1241 page_array = kmalloc_array(num_pages, sizeof(struct page *),
1248 map_array = kmalloc_array(num_pages, sizeof(struct sep_dma_map),
1252 goto end_function_with_error1;
1255 lli_array = kmalloc_array(num_pages, sizeof(struct sep_lli_entry),
1259 goto end_function_with_error2;
1262 /* Convert the application virtual address into a set of physical */
1263 result = get_user_pages_fast(app_virt_addr, num_pages,
1264 ((in_out_flag == SEP_DRIVER_IN_FLAG) ? 0 : 1), page_array);
1266 /* Check the number of pages locked - if not all then exit with error */
1267 if (result != num_pages) {
1268 dev_warn(&sep->pdev->dev,
1269 "[PID%d] not all pages locked by get_user_pages, result 0x%X, num_pages 0x%X\n",
1270 current->pid, result, num_pages);
1272 goto end_function_with_error3;
1275 dev_dbg(&sep->pdev->dev, "[PID%d] get_user_pages succeeded\n",
1279 * Fill the array using page array data and
1280 * map the pages - this action will also flush the cache as needed
1282 for (count = 0; count < num_pages; count++) {
1283 /* Fill the map array */
1284 map_array[count].dma_addr =
1285 dma_map_page(&sep->pdev->dev, page_array[count],
1286 0, PAGE_SIZE, DMA_BIDIRECTIONAL);
1288 map_array[count].size = PAGE_SIZE;
1290 /* Fill the lli array entry */
1291 lli_array[count].bus_address = (u32)map_array[count].dma_addr;
1292 lli_array[count].block_size = PAGE_SIZE;
1294 dev_dbg(&sep->pdev->dev,
1295 "[PID%d] lli_array[%x].bus_address is %08lx, lli_array[%x].block_size is (hex) %x\n",
1296 current->pid, count,
1297 (unsigned long)lli_array[count].bus_address,
1298 count, lli_array[count].block_size);
1301 /* Check the offset for the first page */
1302 lli_array[0].bus_address =
1303 lli_array[0].bus_address + (app_virt_addr & (~PAGE_MASK));
1305 /* Check that not all the data is in the first page only */
1306 if ((PAGE_SIZE - (app_virt_addr & (~PAGE_MASK))) >= data_size)
1307 lli_array[0].block_size = data_size;
1309 lli_array[0].block_size =
1310 PAGE_SIZE - (app_virt_addr & (~PAGE_MASK));
1312 dev_dbg(&sep->pdev->dev,
1313 "[PID%d] After check if page 0 has all data\n",
1315 dev_dbg(&sep->pdev->dev,
1316 "[PID%d] lli_array[0].bus_address is (hex) %08lx, lli_array[0].block_size is (hex) %x\n",
1318 (unsigned long)lli_array[0].bus_address,
1319 lli_array[0].block_size);
1322 /* Check the size of the last page */
1323 if (num_pages > 1) {
1324 lli_array[num_pages - 1].block_size =
1325 (app_virt_addr + data_size) & (~PAGE_MASK);
1326 if (lli_array[num_pages - 1].block_size == 0)
1327 lli_array[num_pages - 1].block_size = PAGE_SIZE;
1329 dev_dbg(&sep->pdev->dev,
1330 "[PID%d] After last page size adjustment\n",
1332 dev_dbg(&sep->pdev->dev,
1333 "[PID%d] lli_array[%x].bus_address is (hex) %08lx, lli_array[%x].block_size is (hex) %x\n",
1336 (unsigned long)lli_array[num_pages - 1].bus_address,
1338 lli_array[num_pages - 1].block_size);
1341 /* Set output params according to the in_out flag */
1342 if (in_out_flag == SEP_DRIVER_IN_FLAG) {
1343 *lli_array_ptr = lli_array;
1344 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_num_pages =
1346 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_page_array =
1348 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_map_array =
1350 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_map_num_entries =
1352 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].src_sg = NULL;
1354 *lli_array_ptr = lli_array;
1355 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_num_pages =
1357 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_page_array =
1359 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_map_array =
1361 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].
1362 out_map_num_entries = num_pages;
1363 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].dst_sg = NULL;
1367 end_function_with_error3:
1368 /* Free lli array */
1371 end_function_with_error2:
1374 end_function_with_error1:
1375 /* Free page array */
1383 * sep_lli_table_secure_dma - get lli array for IMR addresses
1384 * @sep: pointer to struct sep_device
1385 * @app_virt_addr: user memory data buffer
1386 * @data_size: size of data buffer
1387 * @lli_array_ptr: lli array
1388 * @in_out_flag: not used
1389 * @dma_ctx: pointer to struct sep_dma_context
1391 * This function creates lli tables for outputting data to
1392 * IMR memory, which is memory that cannot be accessed by the
1393 * the x86 processor.
1395 static int sep_lli_table_secure_dma(struct sep_device *sep,
1398 struct sep_lli_entry **lli_array_ptr,
1400 struct sep_dma_context *dma_ctx)
1405 /* The the page of the end address of the user space buffer */
1407 /* The page of the start address of the user space buffer */
1409 /* The range in pages */
1412 struct sep_lli_entry *lli_array;
1414 /* Set start and end pages and num pages */
1415 end_page = (app_virt_addr + data_size - 1) >> PAGE_SHIFT;
1416 start_page = app_virt_addr >> PAGE_SHIFT;
1417 num_pages = end_page - start_page + 1;
1419 dev_dbg(&sep->pdev->dev,
1420 "[PID%d] lock user pages app_virt_addr is %x\n",
1421 current->pid, app_virt_addr);
1423 dev_dbg(&sep->pdev->dev, "[PID%d] data_size is (hex) %x\n",
1424 current->pid, data_size);
1425 dev_dbg(&sep->pdev->dev, "[PID%d] start_page is (hex) %x\n",
1426 current->pid, start_page);
1427 dev_dbg(&sep->pdev->dev, "[PID%d] end_page is (hex) %x\n",
1428 current->pid, end_page);
1429 dev_dbg(&sep->pdev->dev, "[PID%d] num_pages is (hex) %x\n",
1430 current->pid, num_pages);
1432 lli_array = kmalloc_array(num_pages, sizeof(struct sep_lli_entry),
1438 * Fill the lli_array
1440 start_page = start_page << PAGE_SHIFT;
1441 for (count = 0; count < num_pages; count++) {
1442 /* Fill the lli array entry */
1443 lli_array[count].bus_address = start_page;
1444 lli_array[count].block_size = PAGE_SIZE;
1446 start_page += PAGE_SIZE;
1448 dev_dbg(&sep->pdev->dev,
1449 "[PID%d] lli_array[%x].bus_address is %08lx, lli_array[%x].block_size is (hex) %x\n",
1451 count, (unsigned long)lli_array[count].bus_address,
1452 count, lli_array[count].block_size);
1455 /* Check the offset for the first page */
1456 lli_array[0].bus_address =
1457 lli_array[0].bus_address + (app_virt_addr & (~PAGE_MASK));
1459 /* Check that not all the data is in the first page only */
1460 if ((PAGE_SIZE - (app_virt_addr & (~PAGE_MASK))) >= data_size)
1461 lli_array[0].block_size = data_size;
1463 lli_array[0].block_size =
1464 PAGE_SIZE - (app_virt_addr & (~PAGE_MASK));
1466 dev_dbg(&sep->pdev->dev,
1467 "[PID%d] After check if page 0 has all data\n"
1468 "lli_array[0].bus_address is (hex) %08lx, lli_array[0].block_size is (hex) %x\n",
1470 (unsigned long)lli_array[0].bus_address,
1471 lli_array[0].block_size);
1473 /* Check the size of the last page */
1474 if (num_pages > 1) {
1475 lli_array[num_pages - 1].block_size =
1476 (app_virt_addr + data_size) & (~PAGE_MASK);
1477 if (lli_array[num_pages - 1].block_size == 0)
1478 lli_array[num_pages - 1].block_size = PAGE_SIZE;
1480 dev_dbg(&sep->pdev->dev,
1481 "[PID%d] After last page size adjustment\n"
1482 "lli_array[%x].bus_address is (hex) %08lx, lli_array[%x].block_size is (hex) %x\n",
1483 current->pid, num_pages - 1,
1484 (unsigned long)lli_array[num_pages - 1].bus_address,
1486 lli_array[num_pages - 1].block_size);
1488 *lli_array_ptr = lli_array;
1489 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_num_pages = num_pages;
1490 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_page_array = NULL;
1491 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_map_array = NULL;
1492 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_map_num_entries = 0;
1498 * sep_calculate_lli_table_max_size - size the LLI table
1499 * @sep: pointer to struct sep_device
1501 * @num_array_entries
1504 * This function calculates the size of data that can be inserted into
1505 * the lli table from this array, such that either the table is full
1506 * (all entries are entered), or there are no more entries in the
1509 static u32 sep_calculate_lli_table_max_size(struct sep_device *sep,
1510 struct sep_lli_entry *lli_in_array_ptr,
1511 u32 num_array_entries,
1512 u32 *last_table_flag)
1515 /* Table data size */
1516 u32 table_data_size = 0;
1517 /* Data size for the next table */
1518 u32 next_table_data_size;
1520 *last_table_flag = 0;
1523 * Calculate the data in the out lli table till we fill the whole
1524 * table or till the data has ended
1527 (counter < (SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP - 1)) &&
1528 (counter < num_array_entries); counter++)
1529 table_data_size += lli_in_array_ptr[counter].block_size;
1532 * Check if we reached the last entry,
1533 * meaning this ia the last table to build,
1534 * and no need to check the block alignment
1536 if (counter == num_array_entries) {
1537 /* Set the last table flag */
1538 *last_table_flag = 1;
1543 * Calculate the data size of the next table.
1544 * Stop if no entries left or if data size is more the DMA restriction
1546 next_table_data_size = 0;
1547 for (; counter < num_array_entries; counter++) {
1548 next_table_data_size += lli_in_array_ptr[counter].block_size;
1549 if (next_table_data_size >= SEP_DRIVER_MIN_DATA_SIZE_PER_TABLE)
1554 * Check if the next table data size is less then DMA rstriction.
1555 * if it is - recalculate the current table size, so that the next
1556 * table data size will be adaquete for DMA
1558 if (next_table_data_size &&
1559 next_table_data_size < SEP_DRIVER_MIN_DATA_SIZE_PER_TABLE)
1561 table_data_size -= (SEP_DRIVER_MIN_DATA_SIZE_PER_TABLE -
1562 next_table_data_size);
1565 return table_data_size;
1569 * sep_build_lli_table - build an lli array for the given table
1570 * @sep: pointer to struct sep_device
1571 * @lli_array_ptr: pointer to lli array
1572 * @lli_table_ptr: pointer to lli table
1573 * @num_processed_entries_ptr: pointer to number of entries
1574 * @num_table_entries_ptr: pointer to number of tables
1575 * @table_data_size: total data size
1577 * Builds an lli table from the lli_array according to
1578 * the given size of data
1580 static void sep_build_lli_table(struct sep_device *sep,
1581 struct sep_lli_entry *lli_array_ptr,
1582 struct sep_lli_entry *lli_table_ptr,
1583 u32 *num_processed_entries_ptr,
1584 u32 *num_table_entries_ptr,
1585 u32 table_data_size)
1587 /* Current table data size */
1588 u32 curr_table_data_size;
1589 /* Counter of lli array entry */
1592 /* Init current table data size and lli array entry counter */
1593 curr_table_data_size = 0;
1595 *num_table_entries_ptr = 1;
1597 dev_dbg(&sep->pdev->dev,
1598 "[PID%d] build lli table table_data_size: (hex) %x\n",
1599 current->pid, table_data_size);
1601 /* Fill the table till table size reaches the needed amount */
1602 while (curr_table_data_size < table_data_size) {
1603 /* Update the number of entries in table */
1604 (*num_table_entries_ptr)++;
1606 lli_table_ptr->bus_address =
1607 cpu_to_le32(lli_array_ptr[array_counter].bus_address);
1609 lli_table_ptr->block_size =
1610 cpu_to_le32(lli_array_ptr[array_counter].block_size);
1612 curr_table_data_size += lli_array_ptr[array_counter].block_size;
1614 dev_dbg(&sep->pdev->dev,
1615 "[PID%d] lli_table_ptr is %p\n",
1616 current->pid, lli_table_ptr);
1617 dev_dbg(&sep->pdev->dev,
1618 "[PID%d] lli_table_ptr->bus_address: %08lx\n",
1620 (unsigned long)lli_table_ptr->bus_address);
1622 dev_dbg(&sep->pdev->dev,
1623 "[PID%d] lli_table_ptr->block_size is (hex) %x\n",
1624 current->pid, lli_table_ptr->block_size);
1626 /* Check for overflow of the table data */
1627 if (curr_table_data_size > table_data_size) {
1628 dev_dbg(&sep->pdev->dev,
1629 "[PID%d] curr_table_data_size too large\n",
1632 /* Update the size of block in the table */
1633 lli_table_ptr->block_size =
1634 cpu_to_le32(lli_table_ptr->block_size) -
1635 (curr_table_data_size - table_data_size);
1637 /* Update the physical address in the lli array */
1638 lli_array_ptr[array_counter].bus_address +=
1639 cpu_to_le32(lli_table_ptr->block_size);
1641 /* Update the block size left in the lli array */
1642 lli_array_ptr[array_counter].block_size =
1643 (curr_table_data_size - table_data_size);
1645 /* Advance to the next entry in the lli_array */
1648 dev_dbg(&sep->pdev->dev,
1649 "[PID%d] lli_table_ptr->bus_address is %08lx\n",
1651 (unsigned long)lli_table_ptr->bus_address);
1652 dev_dbg(&sep->pdev->dev,
1653 "[PID%d] lli_table_ptr->block_size is (hex) %x\n",
1655 lli_table_ptr->block_size);
1657 /* Move to the next entry in table */
1661 /* Set the info entry to default */
1662 lli_table_ptr->bus_address = 0xffffffff;
1663 lli_table_ptr->block_size = 0;
1665 /* Set the output parameter */
1666 *num_processed_entries_ptr += array_counter;
1671 * sep_shared_area_virt_to_bus - map shared area to bus address
1672 * @sep: pointer to struct sep_device
1673 * @virt_address: virtual address to convert
1675 * This functions returns the physical address inside shared area according
1676 * to the virtual address. It can be either on the external RAM device
1677 * (ioremapped), or on the system RAM
1678 * This implementation is for the external RAM
1680 static dma_addr_t sep_shared_area_virt_to_bus(struct sep_device *sep,
1683 dev_dbg(&sep->pdev->dev, "[PID%d] sh virt to phys v %p\n",
1684 current->pid, virt_address);
1685 dev_dbg(&sep->pdev->dev, "[PID%d] sh virt to phys p %08lx\n",
1688 sep->shared_bus + (virt_address - sep->shared_addr));
1690 return sep->shared_bus + (size_t)(virt_address - sep->shared_addr);
1694 * sep_shared_area_bus_to_virt - map shared area bus address to kernel
1695 * @sep: pointer to struct sep_device
1696 * @bus_address: bus address to convert
1698 * This functions returns the virtual address inside shared area
1699 * according to the physical address. It can be either on the
1700 * external RAM device (ioremapped), or on the system RAM
1701 * This implementation is for the external RAM
1703 static void *sep_shared_area_bus_to_virt(struct sep_device *sep,
1704 dma_addr_t bus_address)
1706 dev_dbg(&sep->pdev->dev, "[PID%d] shared bus to virt b=%lx v=%lx\n",
1708 (unsigned long)bus_address, (unsigned long)(sep->shared_addr +
1709 (size_t)(bus_address - sep->shared_bus)));
1711 return sep->shared_addr + (size_t)(bus_address - sep->shared_bus);
1715 * sep_debug_print_lli_tables - dump LLI table
1716 * @sep: pointer to struct sep_device
1717 * @lli_table_ptr: pointer to sep_lli_entry
1718 * @num_table_entries: number of entries
1719 * @table_data_size: total data size
1721 * Walk the the list of the print created tables and print all the data
1723 static void sep_debug_print_lli_tables(struct sep_device *sep,
1724 struct sep_lli_entry *lli_table_ptr,
1725 unsigned long num_table_entries,
1726 unsigned long table_data_size)
1729 unsigned long table_count = 1;
1730 unsigned long entries_count = 0;
1732 dev_dbg(&sep->pdev->dev, "[PID%d] sep_debug_print_lli_tables start\n",
1734 if (num_table_entries == 0) {
1735 dev_dbg(&sep->pdev->dev, "[PID%d] no table to print\n",
1740 while ((unsigned long) lli_table_ptr->bus_address != 0xffffffff) {
1741 dev_dbg(&sep->pdev->dev,
1742 "[PID%d] lli table %08lx, table_data_size is (hex) %lx\n",
1743 current->pid, table_count, table_data_size);
1744 dev_dbg(&sep->pdev->dev,
1745 "[PID%d] num_table_entries is (hex) %lx\n",
1746 current->pid, num_table_entries);
1748 /* Print entries of the table (without info entry) */
1749 for (entries_count = 0; entries_count < num_table_entries;
1750 entries_count++, lli_table_ptr++) {
1752 dev_dbg(&sep->pdev->dev,
1753 "[PID%d] lli_table_ptr address is %08lx\n",
1755 (unsigned long) lli_table_ptr);
1757 dev_dbg(&sep->pdev->dev,
1758 "[PID%d] phys address is %08lx block size is (hex) %x\n",
1760 (unsigned long)lli_table_ptr->bus_address,
1761 lli_table_ptr->block_size);
1764 /* Point to the info entry */
1767 dev_dbg(&sep->pdev->dev,
1768 "[PID%d] phys lli_table_ptr->block_size is (hex) %x\n",
1770 lli_table_ptr->block_size);
1772 dev_dbg(&sep->pdev->dev,
1773 "[PID%d] phys lli_table_ptr->physical_address is %08lx\n",
1775 (unsigned long)lli_table_ptr->bus_address);
1778 table_data_size = lli_table_ptr->block_size & 0xffffff;
1779 num_table_entries = (lli_table_ptr->block_size >> 24) & 0xff;
1781 dev_dbg(&sep->pdev->dev,
1782 "[PID%d] phys table_data_size is (hex) %lx num_table_entries is %lx bus_address is%lx\n",
1786 (unsigned long)lli_table_ptr->bus_address);
1788 if ((unsigned long)lli_table_ptr->bus_address != 0xffffffff)
1789 lli_table_ptr = (struct sep_lli_entry *)
1790 sep_shared_bus_to_virt(sep,
1791 (unsigned long)lli_table_ptr->bus_address);
1795 dev_dbg(&sep->pdev->dev, "[PID%d] sep_debug_print_lli_tables end\n",
1802 * sep_prepare_empty_lli_table - create a blank LLI table
1803 * @sep: pointer to struct sep_device
1804 * @lli_table_addr_ptr: pointer to lli table
1805 * @num_entries_ptr: pointer to number of entries
1806 * @table_data_size_ptr: point to table data size
1807 * @dmatables_region: Optional buffer for DMA tables
1808 * @dma_ctx: DMA context
1810 * This function creates empty lli tables when there is no data
1812 static void sep_prepare_empty_lli_table(struct sep_device *sep,
1813 dma_addr_t *lli_table_addr_ptr,
1814 u32 *num_entries_ptr,
1815 u32 *table_data_size_ptr,
1816 void **dmatables_region,
1817 struct sep_dma_context *dma_ctx)
1819 struct sep_lli_entry *lli_table_ptr;
1821 /* Find the area for new table */
1823 (struct sep_lli_entry *)(sep->shared_addr +
1824 SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES +
1825 dma_ctx->num_lli_tables_created * sizeof(struct sep_lli_entry) *
1826 SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP);
1828 if (dmatables_region && *dmatables_region)
1829 lli_table_ptr = *dmatables_region;
1831 lli_table_ptr->bus_address = 0;
1832 lli_table_ptr->block_size = 0;
1835 lli_table_ptr->bus_address = 0xFFFFFFFF;
1836 lli_table_ptr->block_size = 0;
1838 /* Set the output parameter value */
1839 *lli_table_addr_ptr = sep->shared_bus +
1840 SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES +
1841 dma_ctx->num_lli_tables_created *
1842 sizeof(struct sep_lli_entry) *
1843 SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
1845 /* Set the num of entries and table data size for empty table */
1846 *num_entries_ptr = 2;
1847 *table_data_size_ptr = 0;
1849 /* Update the number of created tables */
1850 dma_ctx->num_lli_tables_created++;
1854 * sep_prepare_input_dma_table - prepare input DMA mappings
1855 * @sep: pointer to struct sep_device
1860 * @table_data_size_ptr:
1861 * @is_kva: set for kernel data (kernel crypt io call)
1863 * This function prepares only input DMA table for synchronic symmetric
1865 * Note that all bus addresses that are passed to the SEP
1866 * are in 32 bit format; the SEP is a 32 bit device
1868 static int sep_prepare_input_dma_table(struct sep_device *sep,
1869 unsigned long app_virt_addr,
1872 dma_addr_t *lli_table_ptr,
1873 u32 *num_entries_ptr,
1874 u32 *table_data_size_ptr,
1876 void **dmatables_region,
1877 struct sep_dma_context *dma_ctx
1881 /* Pointer to the info entry of the table - the last entry */
1882 struct sep_lli_entry *info_entry_ptr;
1883 /* Array of pointers to page */
1884 struct sep_lli_entry *lli_array_ptr;
1885 /* Points to the first entry to be processed in the lli_in_array */
1886 u32 current_entry = 0;
1887 /* Num entries in the virtual buffer */
1888 u32 sep_lli_entries = 0;
1889 /* Lli table pointer */
1890 struct sep_lli_entry *in_lli_table_ptr;
1891 /* The total data in one table */
1892 u32 table_data_size = 0;
1893 /* Flag for last table */
1894 u32 last_table_flag = 0;
1895 /* Number of entries in lli table */
1896 u32 num_entries_in_table = 0;
1897 /* Next table address */
1898 void *lli_table_alloc_addr = NULL;
1899 void *dma_lli_table_alloc_addr = NULL;
1900 void *dma_in_lli_table_ptr = NULL;
1902 dev_dbg(&sep->pdev->dev,
1903 "[PID%d] prepare intput dma tbl data size: (hex) %x\n",
1904 current->pid, data_size);
1906 dev_dbg(&sep->pdev->dev, "[PID%d] block_size is (hex) %x\n",
1907 current->pid, block_size);
1909 /* Initialize the pages pointers */
1910 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_page_array = NULL;
1911 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_num_pages = 0;
1913 /* Set the kernel address for first table to be allocated */
1914 lli_table_alloc_addr = (void *)(sep->shared_addr +
1915 SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES +
1916 dma_ctx->num_lli_tables_created * sizeof(struct sep_lli_entry) *
1917 SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP);
1919 if (data_size == 0) {
1920 if (dmatables_region) {
1921 error = sep_allocate_dmatables_region(sep,
1928 /* Special case - create meptu table - 2 entries, zero data */
1929 sep_prepare_empty_lli_table(sep, lli_table_ptr,
1930 num_entries_ptr, table_data_size_ptr,
1931 dmatables_region, dma_ctx);
1932 goto update_dcb_counter;
1935 /* Check if the pages are in Kernel Virtual Address layout */
1937 error = sep_lock_kernel_pages(sep, app_virt_addr,
1938 data_size, &lli_array_ptr, SEP_DRIVER_IN_FLAG,
1942 * Lock the pages of the user buffer
1943 * and translate them to pages
1945 error = sep_lock_user_pages(sep, app_virt_addr,
1946 data_size, &lli_array_ptr, SEP_DRIVER_IN_FLAG,
1952 dev_dbg(&sep->pdev->dev,
1953 "[PID%d] output sep_in_num_pages is (hex) %x\n",
1955 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_num_pages);
1958 info_entry_ptr = NULL;
1961 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_num_pages;
1963 dma_lli_table_alloc_addr = lli_table_alloc_addr;
1964 if (dmatables_region) {
1965 error = sep_allocate_dmatables_region(sep,
1970 goto end_function_error;
1971 lli_table_alloc_addr = *dmatables_region;
1974 /* Loop till all the entries in in array are processed */
1975 while (current_entry < sep_lli_entries) {
1977 /* Set the new input and output tables */
1979 (struct sep_lli_entry *)lli_table_alloc_addr;
1980 dma_in_lli_table_ptr =
1981 (struct sep_lli_entry *)dma_lli_table_alloc_addr;
1983 lli_table_alloc_addr += sizeof(struct sep_lli_entry) *
1984 SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
1985 dma_lli_table_alloc_addr += sizeof(struct sep_lli_entry) *
1986 SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
1988 if (dma_lli_table_alloc_addr >
1989 ((void *)sep->shared_addr +
1990 SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES +
1991 SYNCHRONIC_DMA_TABLES_AREA_SIZE_BYTES)) {
1994 goto end_function_error;
1998 /* Update the number of created tables */
1999 dma_ctx->num_lli_tables_created++;
2001 /* Calculate the maximum size of data for input table */
2002 table_data_size = sep_calculate_lli_table_max_size(sep,
2003 &lli_array_ptr[current_entry],
2004 (sep_lli_entries - current_entry),
2008 * If this is not the last table -
2009 * then align it to the block size
2011 if (!last_table_flag)
2013 (table_data_size / block_size) * block_size;
2015 dev_dbg(&sep->pdev->dev,
2016 "[PID%d] output table_data_size is (hex) %x\n",
2020 /* Construct input lli table */
2021 sep_build_lli_table(sep, &lli_array_ptr[current_entry],
2023 ¤t_entry, &num_entries_in_table, table_data_size);
2025 if (info_entry_ptr == NULL) {
2027 /* Set the output parameters to physical addresses */
2028 *lli_table_ptr = sep_shared_area_virt_to_bus(sep,
2029 dma_in_lli_table_ptr);
2030 *num_entries_ptr = num_entries_in_table;
2031 *table_data_size_ptr = table_data_size;
2033 dev_dbg(&sep->pdev->dev,
2034 "[PID%d] output lli_table_in_ptr is %08lx\n",
2036 (unsigned long)*lli_table_ptr);
2039 /* Update the info entry of the previous in table */
2040 info_entry_ptr->bus_address =
2041 sep_shared_area_virt_to_bus(sep,
2042 dma_in_lli_table_ptr);
2043 info_entry_ptr->block_size =
2044 ((num_entries_in_table) << 24) |
2047 /* Save the pointer to the info entry of the current tables */
2048 info_entry_ptr = in_lli_table_ptr + num_entries_in_table - 1;
2050 /* Print input tables */
2051 if (!dmatables_region) {
2052 sep_debug_print_lli_tables(sep, (struct sep_lli_entry *)
2053 sep_shared_area_bus_to_virt(sep, *lli_table_ptr),
2054 *num_entries_ptr, *table_data_size_ptr);
2057 /* The array of the pages */
2058 kfree(lli_array_ptr);
2061 /* Update DCB counter */
2062 dma_ctx->nr_dcb_creat++;
2066 /* Free all the allocated resources */
2067 kfree(dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_map_array);
2068 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_map_array = NULL;
2069 kfree(lli_array_ptr);
2070 kfree(dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_page_array);
2071 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_page_array = NULL;
2079 * sep_construct_dma_tables_from_lli - prepare AES/DES mappings
2080 * @sep: pointer to struct sep_device
2082 * @sep_in_lli_entries:
2084 * @sep_out_lli_entries
2087 * @lli_table_out_ptr
2088 * @in_num_entries_ptr
2089 * @out_num_entries_ptr
2090 * @table_data_size_ptr
2092 * This function creates the input and output DMA tables for
2093 * symmetric operations (AES/DES) according to the block
2094 * size from LLI arays
2095 * Note that all bus addresses that are passed to the SEP
2096 * are in 32 bit format; the SEP is a 32 bit device
2098 static int sep_construct_dma_tables_from_lli(
2099 struct sep_device *sep,
2100 struct sep_lli_entry *lli_in_array,
2101 u32 sep_in_lli_entries,
2102 struct sep_lli_entry *lli_out_array,
2103 u32 sep_out_lli_entries,
2105 dma_addr_t *lli_table_in_ptr,
2106 dma_addr_t *lli_table_out_ptr,
2107 u32 *in_num_entries_ptr,
2108 u32 *out_num_entries_ptr,
2109 u32 *table_data_size_ptr,
2110 void **dmatables_region,
2111 struct sep_dma_context *dma_ctx)
2113 /* Points to the area where next lli table can be allocated */
2114 void *lli_table_alloc_addr = NULL;
2116 * Points to the area in shared region where next lli table
2119 void *dma_lli_table_alloc_addr = NULL;
2120 /* Input lli table in dmatables_region or shared region */
2121 struct sep_lli_entry *in_lli_table_ptr = NULL;
2122 /* Input lli table location in the shared region */
2123 struct sep_lli_entry *dma_in_lli_table_ptr = NULL;
2124 /* Output lli table in dmatables_region or shared region */
2125 struct sep_lli_entry *out_lli_table_ptr = NULL;
2126 /* Output lli table location in the shared region */
2127 struct sep_lli_entry *dma_out_lli_table_ptr = NULL;
2128 /* Pointer to the info entry of the table - the last entry */
2129 struct sep_lli_entry *info_in_entry_ptr = NULL;
2130 /* Pointer to the info entry of the table - the last entry */
2131 struct sep_lli_entry *info_out_entry_ptr = NULL;
2132 /* Points to the first entry to be processed in the lli_in_array */
2133 u32 current_in_entry = 0;
2134 /* Points to the first entry to be processed in the lli_out_array */
2135 u32 current_out_entry = 0;
2136 /* Max size of the input table */
2137 u32 in_table_data_size = 0;
2138 /* Max size of the output table */
2139 u32 out_table_data_size = 0;
2140 /* Flag te signifies if this is the last tables build */
2141 u32 last_table_flag = 0;
2142 /* The data size that should be in table */
2143 u32 table_data_size = 0;
2144 /* Number of entries in the input table */
2145 u32 num_entries_in_table = 0;
2146 /* Number of entries in the output table */
2147 u32 num_entries_out_table = 0;
2150 dev_warn(&sep->pdev->dev, "DMA context uninitialized\n");
2154 /* Initiate to point after the message area */
2155 lli_table_alloc_addr = (void *)(sep->shared_addr +
2156 SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES +
2157 (dma_ctx->num_lli_tables_created *
2158 (sizeof(struct sep_lli_entry) *
2159 SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP)));
2160 dma_lli_table_alloc_addr = lli_table_alloc_addr;
2162 if (dmatables_region) {
2163 /* 2 for both in+out table */
2164 if (sep_allocate_dmatables_region(sep,
2167 2*sep_in_lli_entries))
2169 lli_table_alloc_addr = *dmatables_region;
2172 /* Loop till all the entries in in array are not processed */
2173 while (current_in_entry < sep_in_lli_entries) {
2174 /* Set the new input and output tables */
2176 (struct sep_lli_entry *)lli_table_alloc_addr;
2177 dma_in_lli_table_ptr =
2178 (struct sep_lli_entry *)dma_lli_table_alloc_addr;
2180 lli_table_alloc_addr += sizeof(struct sep_lli_entry) *
2181 SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
2182 dma_lli_table_alloc_addr += sizeof(struct sep_lli_entry) *
2183 SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
2185 /* Set the first output tables */
2187 (struct sep_lli_entry *)lli_table_alloc_addr;
2188 dma_out_lli_table_ptr =
2189 (struct sep_lli_entry *)dma_lli_table_alloc_addr;
2191 /* Check if the DMA table area limit was overrun */
2192 if ((dma_lli_table_alloc_addr + sizeof(struct sep_lli_entry) *
2193 SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP) >
2194 ((void *)sep->shared_addr +
2195 SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES +
2196 SYNCHRONIC_DMA_TABLES_AREA_SIZE_BYTES)) {
2198 dev_warn(&sep->pdev->dev, "dma table limit overrun\n");
2202 /* Update the number of the lli tables created */
2203 dma_ctx->num_lli_tables_created += 2;
2205 lli_table_alloc_addr += sizeof(struct sep_lli_entry) *
2206 SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
2207 dma_lli_table_alloc_addr += sizeof(struct sep_lli_entry) *
2208 SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
2210 /* Calculate the maximum size of data for input table */
2211 in_table_data_size =
2212 sep_calculate_lli_table_max_size(sep,
2213 &lli_in_array[current_in_entry],
2214 (sep_in_lli_entries - current_in_entry),
2217 /* Calculate the maximum size of data for output table */
2218 out_table_data_size =
2219 sep_calculate_lli_table_max_size(sep,
2220 &lli_out_array[current_out_entry],
2221 (sep_out_lli_entries - current_out_entry),
2224 if (!last_table_flag) {
2225 in_table_data_size = (in_table_data_size /
2226 block_size) * block_size;
2227 out_table_data_size = (out_table_data_size /
2228 block_size) * block_size;
2231 table_data_size = in_table_data_size;
2232 if (table_data_size > out_table_data_size)
2233 table_data_size = out_table_data_size;
2235 dev_dbg(&sep->pdev->dev,
2236 "[PID%d] construct tables from lli in_table_data_size is (hex) %x\n",
2237 current->pid, in_table_data_size);
2239 dev_dbg(&sep->pdev->dev,
2240 "[PID%d] construct tables from lli out_table_data_size is (hex) %x\n",
2241 current->pid, out_table_data_size);
2243 /* Construct input lli table */
2244 sep_build_lli_table(sep, &lli_in_array[current_in_entry],
2247 &num_entries_in_table,
2250 /* Construct output lli table */
2251 sep_build_lli_table(sep, &lli_out_array[current_out_entry],
2254 &num_entries_out_table,
2257 /* If info entry is null - this is the first table built */
2258 if (info_in_entry_ptr == NULL || info_out_entry_ptr == NULL) {
2259 /* Set the output parameters to physical addresses */
2261 sep_shared_area_virt_to_bus(sep, dma_in_lli_table_ptr);
2263 *in_num_entries_ptr = num_entries_in_table;
2265 *lli_table_out_ptr =
2266 sep_shared_area_virt_to_bus(sep,
2267 dma_out_lli_table_ptr);
2269 *out_num_entries_ptr = num_entries_out_table;
2270 *table_data_size_ptr = table_data_size;
2272 dev_dbg(&sep->pdev->dev,
2273 "[PID%d] output lli_table_in_ptr is %08lx\n",
2275 (unsigned long)*lli_table_in_ptr);
2276 dev_dbg(&sep->pdev->dev,
2277 "[PID%d] output lli_table_out_ptr is %08lx\n",
2279 (unsigned long)*lli_table_out_ptr);
2281 /* Update the info entry of the previous in table */
2282 info_in_entry_ptr->bus_address =
2283 sep_shared_area_virt_to_bus(sep,
2284 dma_in_lli_table_ptr);
2286 info_in_entry_ptr->block_size =
2287 ((num_entries_in_table) << 24) |
2290 /* Update the info entry of the previous in table */
2291 info_out_entry_ptr->bus_address =
2292 sep_shared_area_virt_to_bus(sep,
2293 dma_out_lli_table_ptr);
2295 info_out_entry_ptr->block_size =
2296 ((num_entries_out_table) << 24) |
2299 dev_dbg(&sep->pdev->dev,
2300 "[PID%d] output lli_table_in_ptr:%08lx %08x\n",
2302 (unsigned long)info_in_entry_ptr->bus_address,
2303 info_in_entry_ptr->block_size);
2305 dev_dbg(&sep->pdev->dev,
2306 "[PID%d] output lli_table_out_ptr: %08lx %08x\n",
2308 (unsigned long)info_out_entry_ptr->bus_address,
2309 info_out_entry_ptr->block_size);
2312 /* Save the pointer to the info entry of the current tables */
2313 info_in_entry_ptr = in_lli_table_ptr +
2314 num_entries_in_table - 1;
2315 info_out_entry_ptr = out_lli_table_ptr +
2316 num_entries_out_table - 1;
2318 dev_dbg(&sep->pdev->dev,
2319 "[PID%d] output num_entries_out_table is %x\n",
2321 (u32)num_entries_out_table);
2322 dev_dbg(&sep->pdev->dev,
2323 "[PID%d] output info_in_entry_ptr is %lx\n",
2325 (unsigned long)info_in_entry_ptr);
2326 dev_dbg(&sep->pdev->dev,
2327 "[PID%d] output info_out_entry_ptr is %lx\n",
2329 (unsigned long)info_out_entry_ptr);
2332 /* Print input tables */
2333 if (!dmatables_region) {
2334 sep_debug_print_lli_tables(
2336 (struct sep_lli_entry *)
2337 sep_shared_area_bus_to_virt(sep, *lli_table_in_ptr),
2338 *in_num_entries_ptr,
2339 *table_data_size_ptr);
2342 /* Print output tables */
2343 if (!dmatables_region) {
2344 sep_debug_print_lli_tables(
2346 (struct sep_lli_entry *)
2347 sep_shared_area_bus_to_virt(sep, *lli_table_out_ptr),
2348 *out_num_entries_ptr,
2349 *table_data_size_ptr);
2356 * sep_prepare_input_output_dma_table - prepare DMA I/O table
2357 * @app_virt_in_addr:
2358 * @app_virt_out_addr:
2361 * @lli_table_in_ptr:
2362 * @lli_table_out_ptr:
2363 * @in_num_entries_ptr:
2364 * @out_num_entries_ptr:
2365 * @table_data_size_ptr:
2366 * @is_kva: set for kernel data; used only for kernel crypto module
2368 * This function builds input and output DMA tables for synchronic
2369 * symmetric operations (AES, DES, HASH). It also checks that each table
2370 * is of the modular block size
2371 * Note that all bus addresses that are passed to the SEP
2372 * are in 32 bit format; the SEP is a 32 bit device
2374 static int sep_prepare_input_output_dma_table(struct sep_device *sep,
2375 unsigned long app_virt_in_addr,
2376 unsigned long app_virt_out_addr,
2379 dma_addr_t *lli_table_in_ptr,
2380 dma_addr_t *lli_table_out_ptr,
2381 u32 *in_num_entries_ptr,
2382 u32 *out_num_entries_ptr,
2383 u32 *table_data_size_ptr,
2385 void **dmatables_region,
2386 struct sep_dma_context *dma_ctx)
2390 /* Array of pointers of page */
2391 struct sep_lli_entry *lli_in_array;
2392 /* Array of pointers of page */
2393 struct sep_lli_entry *lli_out_array;
2400 if (data_size == 0) {
2401 /* Prepare empty table for input and output */
2402 if (dmatables_region) {
2403 error = sep_allocate_dmatables_region(
2411 sep_prepare_empty_lli_table(sep, lli_table_in_ptr,
2412 in_num_entries_ptr, table_data_size_ptr,
2413 dmatables_region, dma_ctx);
2415 sep_prepare_empty_lli_table(sep, lli_table_out_ptr,
2416 out_num_entries_ptr, table_data_size_ptr,
2417 dmatables_region, dma_ctx);
2419 goto update_dcb_counter;
2422 /* Initialize the pages pointers */
2423 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_page_array = NULL;
2424 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_page_array = NULL;
2426 /* Lock the pages of the buffer and translate them to pages */
2428 dev_dbg(&sep->pdev->dev, "[PID%d] Locking kernel input pages\n",
2430 error = sep_lock_kernel_pages(sep, app_virt_in_addr,
2431 data_size, &lli_in_array, SEP_DRIVER_IN_FLAG,
2434 dev_warn(&sep->pdev->dev,
2435 "[PID%d] sep_lock_kernel_pages for input virtual buffer failed\n",
2441 dev_dbg(&sep->pdev->dev, "[PID%d] Locking kernel output pages\n",
2443 error = sep_lock_kernel_pages(sep, app_virt_out_addr,
2444 data_size, &lli_out_array, SEP_DRIVER_OUT_FLAG,
2448 dev_warn(&sep->pdev->dev,
2449 "[PID%d] sep_lock_kernel_pages for output virtual buffer failed\n",
2452 goto end_function_free_lli_in;
2458 dev_dbg(&sep->pdev->dev, "[PID%d] Locking user input pages\n",
2460 error = sep_lock_user_pages(sep, app_virt_in_addr,
2461 data_size, &lli_in_array, SEP_DRIVER_IN_FLAG,
2464 dev_warn(&sep->pdev->dev,
2465 "[PID%d] sep_lock_user_pages for input virtual buffer failed\n",
2471 if (dma_ctx->secure_dma) {
2472 /* secure_dma requires use of non accessible memory */
2473 dev_dbg(&sep->pdev->dev, "[PID%d] in secure_dma\n",
2475 error = sep_lli_table_secure_dma(sep,
2476 app_virt_out_addr, data_size, &lli_out_array,
2477 SEP_DRIVER_OUT_FLAG, dma_ctx);
2479 dev_warn(&sep->pdev->dev,
2480 "[PID%d] secure dma table setup for output virtual buffer failed\n",
2483 goto end_function_free_lli_in;
2486 /* For normal, non-secure dma */
2487 dev_dbg(&sep->pdev->dev, "[PID%d] not in secure_dma\n",
2490 dev_dbg(&sep->pdev->dev,
2491 "[PID%d] Locking user output pages\n",
2494 error = sep_lock_user_pages(sep, app_virt_out_addr,
2495 data_size, &lli_out_array, SEP_DRIVER_OUT_FLAG,
2499 dev_warn(&sep->pdev->dev,
2500 "[PID%d] sep_lock_user_pages for output virtual buffer failed\n",
2503 goto end_function_free_lli_in;
2508 dev_dbg(&sep->pdev->dev,
2509 "[PID%d] After lock; prep input output dma table sep_in_num_pages is (hex) %x\n",
2511 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_num_pages);
2513 dev_dbg(&sep->pdev->dev, "[PID%d] sep_out_num_pages is (hex) %x\n",
2515 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_num_pages);
2517 dev_dbg(&sep->pdev->dev,
2518 "[PID%d] SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP is (hex) %x\n",
2519 current->pid, SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP);
2521 /* Call the function that creates table from the lli arrays */
2522 dev_dbg(&sep->pdev->dev, "[PID%d] calling create table from lli\n",
2524 error = sep_construct_dma_tables_from_lli(
2526 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].
2529 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].
2531 block_size, lli_table_in_ptr, lli_table_out_ptr,
2532 in_num_entries_ptr, out_num_entries_ptr,
2533 table_data_size_ptr, dmatables_region, dma_ctx);
2536 dev_warn(&sep->pdev->dev,
2537 "[PID%d] sep_construct_dma_tables_from_lli failed\n",
2539 goto end_function_with_error;
2542 kfree(lli_out_array);
2543 kfree(lli_in_array);
2546 /* Update DCB counter */
2547 dma_ctx->nr_dcb_creat++;
2551 end_function_with_error:
2552 kfree(dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_map_array);
2553 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_map_array = NULL;
2554 kfree(dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_page_array);
2555 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_page_array = NULL;
2556 kfree(lli_out_array);
2559 end_function_free_lli_in:
2560 kfree(dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_map_array);
2561 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_map_array = NULL;
2562 kfree(dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_page_array);
2563 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_page_array = NULL;
2564 kfree(lli_in_array);
2573 * sep_prepare_input_output_dma_table_in_dcb - prepare control blocks
2574 * @app_in_address: unsigned long; for data buffer in (user space)
2575 * @app_out_address: unsigned long; for data buffer out (user space)
2576 * @data_in_size: u32; for size of data
2577 * @block_size: u32; for block size
2578 * @tail_block_size: u32; for size of tail block
2579 * @isapplet: bool; to indicate external app
2580 * @is_kva: bool; kernel buffer; only used for kernel crypto module
2581 * @secure_dma; indicates whether this is secure_dma using IMR
2583 * This function prepares the linked DMA tables and puts the
2584 * address for the linked list of tables inta a DCB (data control
2585 * block) the address of which is known by the SEP hardware
2586 * Note that all bus addresses that are passed to the SEP
2587 * are in 32 bit format; the SEP is a 32 bit device
2589 int sep_prepare_input_output_dma_table_in_dcb(struct sep_device *sep,
2590 unsigned long app_in_address,
2591 unsigned long app_out_address,
2594 u32 tail_block_size,
2598 struct sep_dcblock *dcb_region,
2599 void **dmatables_region,
2600 struct sep_dma_context **dma_ctx,
2601 struct scatterlist *src_sg,
2602 struct scatterlist *dst_sg)
2607 /* Address of the created DCB table */
2608 struct sep_dcblock *dcb_table_ptr = NULL;
2609 /* The physical address of the first input DMA table */
2610 dma_addr_t in_first_mlli_address = 0;
2611 /* Number of entries in the first input DMA table */
2612 u32 in_first_num_entries = 0;
2613 /* The physical address of the first output DMA table */
2614 dma_addr_t out_first_mlli_address = 0;
2615 /* Number of entries in the first output DMA table */
2616 u32 out_first_num_entries = 0;
2617 /* Data in the first input/output table */
2618 u32 first_data_size = 0;
2620 dev_dbg(&sep->pdev->dev, "[PID%d] app_in_address %lx\n",
2621 current->pid, app_in_address);
2623 dev_dbg(&sep->pdev->dev, "[PID%d] app_out_address %lx\n",
2624 current->pid, app_out_address);
2626 dev_dbg(&sep->pdev->dev, "[PID%d] data_in_size %x\n",
2627 current->pid, data_in_size);
2629 dev_dbg(&sep->pdev->dev, "[PID%d] block_size %x\n",
2630 current->pid, block_size);
2632 dev_dbg(&sep->pdev->dev, "[PID%d] tail_block_size %x\n",
2633 current->pid, tail_block_size);
2635 dev_dbg(&sep->pdev->dev, "[PID%d] isapplet %x\n",
2636 current->pid, isapplet);
2638 dev_dbg(&sep->pdev->dev, "[PID%d] is_kva %x\n",
2639 current->pid, is_kva);
2641 dev_dbg(&sep->pdev->dev, "[PID%d] src_sg %p\n",
2642 current->pid, src_sg);
2644 dev_dbg(&sep->pdev->dev, "[PID%d] dst_sg %p\n",
2645 current->pid, dst_sg);
2648 dev_warn(&sep->pdev->dev, "[PID%d] no DMA context pointer\n",
2655 /* In case there are multiple DCBs for this transaction */
2656 dev_dbg(&sep->pdev->dev, "[PID%d] DMA context already set\n",
2659 *dma_ctx = kzalloc(sizeof(**dma_ctx), GFP_KERNEL);
2661 dev_dbg(&sep->pdev->dev,
2662 "[PID%d] Not enough memory for DMA context\n",
2667 dev_dbg(&sep->pdev->dev,
2668 "[PID%d] Created DMA context addr at 0x%p\n",
2669 current->pid, *dma_ctx);
2672 (*dma_ctx)->secure_dma = secure_dma;
2674 /* these are for kernel crypto only */
2675 (*dma_ctx)->src_sg = src_sg;
2676 (*dma_ctx)->dst_sg = dst_sg;
2678 if ((*dma_ctx)->nr_dcb_creat == SEP_MAX_NUM_SYNC_DMA_OPS) {
2679 /* No more DCBs to allocate */
2680 dev_dbg(&sep->pdev->dev, "[PID%d] no more DCBs available\n",
2683 goto end_function_error;
2686 /* Allocate new DCB */
2688 dcb_table_ptr = dcb_region;
2690 dcb_table_ptr = (struct sep_dcblock *)(sep->shared_addr +
2691 SEP_DRIVER_SYSTEM_DCB_MEMORY_OFFSET_IN_BYTES +
2692 ((*dma_ctx)->nr_dcb_creat *
2693 sizeof(struct sep_dcblock)));
2696 /* Set the default values in the DCB */
2697 dcb_table_ptr->input_mlli_address = 0;
2698 dcb_table_ptr->input_mlli_num_entries = 0;
2699 dcb_table_ptr->input_mlli_data_size = 0;
2700 dcb_table_ptr->output_mlli_address = 0;
2701 dcb_table_ptr->output_mlli_num_entries = 0;
2702 dcb_table_ptr->output_mlli_data_size = 0;
2703 dcb_table_ptr->tail_data_size = 0;
2704 dcb_table_ptr->out_vr_tail_pt = 0;
2708 /* Check if there is enough data for DMA operation */
2709 if (data_in_size < SEP_DRIVER_MIN_DATA_SIZE_PER_TABLE) {
2712 goto end_function_error;
2714 if (copy_from_user(dcb_table_ptr->tail_data,
2715 (void __user *)app_in_address,
2718 goto end_function_error;
2722 dcb_table_ptr->tail_data_size = data_in_size;
2724 /* Set the output user-space address for mem2mem op */
2725 if (app_out_address)
2726 dcb_table_ptr->out_vr_tail_pt =
2727 (aligned_u64)app_out_address;
2730 * Update both data length parameters in order to avoid
2731 * second data copy and allow building of empty mlli
2738 if (!app_out_address) {
2739 tail_size = data_in_size % block_size;
2741 if (tail_block_size == block_size)
2742 tail_size = block_size;
2749 if (tail_size > sizeof(dcb_table_ptr->tail_data))
2753 goto end_function_error;
2755 /* We have tail data - copy it to DCB */
2756 if (copy_from_user(dcb_table_ptr->tail_data,
2757 (void __user *)(app_in_address +
2758 data_in_size - tail_size), tail_size)) {
2760 goto end_function_error;
2763 if (app_out_address)
2765 * Calculate the output address
2766 * according to tail data size
2768 dcb_table_ptr->out_vr_tail_pt =
2769 (aligned_u64)app_out_address +
2770 data_in_size - tail_size;
2772 /* Save the real tail data size */
2773 dcb_table_ptr->tail_data_size = tail_size;
2775 * Update the data size without the tail
2776 * data size AKA data for the dma
2778 data_in_size = (data_in_size - tail_size);
2781 /* Check if we need to build only input table or input/output */
2782 if (app_out_address) {
2783 /* Prepare input/output tables */
2784 error = sep_prepare_input_output_dma_table(sep,
2789 &in_first_mlli_address,
2790 &out_first_mlli_address,
2791 &in_first_num_entries,
2792 &out_first_num_entries,
2798 /* Prepare input tables */
2799 error = sep_prepare_input_dma_table(sep,
2803 &in_first_mlli_address,
2804 &in_first_num_entries,
2812 dev_warn(&sep->pdev->dev,
2813 "prepare DMA table call failed from prepare DCB call\n");
2814 goto end_function_error;
2817 /* Set the DCB values */
2818 dcb_table_ptr->input_mlli_address = in_first_mlli_address;
2819 dcb_table_ptr->input_mlli_num_entries = in_first_num_entries;
2820 dcb_table_ptr->input_mlli_data_size = first_data_size;
2821 dcb_table_ptr->output_mlli_address = out_first_mlli_address;
2822 dcb_table_ptr->output_mlli_num_entries = out_first_num_entries;
2823 dcb_table_ptr->output_mlli_data_size = first_data_size;
2838 * sep_free_dma_tables_and_dcb - free DMA tables and DCBs
2839 * @sep: pointer to struct sep_device
2840 * @isapplet: indicates external application (used for kernel access)
2841 * @is_kva: indicates kernel addresses (only used for kernel crypto)
2843 * This function frees the DMA tables and DCB
2845 static int sep_free_dma_tables_and_dcb(struct sep_device *sep, bool isapplet,
2846 bool is_kva, struct sep_dma_context **dma_ctx)
2848 struct sep_dcblock *dcb_table_ptr;
2849 unsigned long pt_hold;
2856 dev_dbg(&sep->pdev->dev, "[PID%d] sep_free_dma_tables_and_dcb\n",
2858 if (!dma_ctx || !*dma_ctx) /* nothing to be done here*/
2861 if (!(*dma_ctx)->secure_dma && isapplet) {
2862 dev_dbg(&sep->pdev->dev, "[PID%d] handling applet\n",
2865 /* Tail stuff is only for non secure_dma */
2866 /* Set pointer to first DCB table */
2867 dcb_table_ptr = (struct sep_dcblock *)
2869 SEP_DRIVER_SYSTEM_DCB_MEMORY_OFFSET_IN_BYTES);
2872 * Go over each DCB and see if
2873 * tail pointer must be updated
2875 for (i = 0; i < (*dma_ctx)->nr_dcb_creat;
2876 i++, dcb_table_ptr++) {
2877 if (dcb_table_ptr->out_vr_tail_pt) {
2878 pt_hold = (unsigned long)dcb_table_ptr->
2880 tail_pt = (void *)pt_hold;
2885 error_temp = copy_to_user(
2886 (void __user *)tail_pt,
2887 dcb_table_ptr->tail_data,
2888 dcb_table_ptr->tail_data_size);
2891 /* Release the DMA resource */
2899 /* Free the output pages, if any */
2900 sep_free_dma_table_data_handler(sep, dma_ctx);
2902 dev_dbg(&sep->pdev->dev, "[PID%d] sep_free_dma_tables_and_dcb end\n",
2909 * sep_prepare_dcb_handler - prepare a control block
2910 * @sep: pointer to struct sep_device
2911 * @arg: pointer to user parameters
2912 * @secure_dma: indicate whether we are using secure_dma on IMR
2914 * This function will retrieve the RAR buffer physical addresses, type
2915 * & size corresponding to the RAR handles provided in the buffers vector.
2917 static int sep_prepare_dcb_handler(struct sep_device *sep, unsigned long arg,
2919 struct sep_dma_context **dma_ctx)
2922 /* Command arguments */
2923 static struct build_dcb_struct command_args;
2925 /* Get the command arguments */
2926 if (copy_from_user(&command_args, (void __user *)arg,
2927 sizeof(struct build_dcb_struct))) {
2932 dev_dbg(&sep->pdev->dev,
2933 "[PID%d] prep dcb handler app_in_address is %08llx\n",
2934 current->pid, command_args.app_in_address);
2935 dev_dbg(&sep->pdev->dev,
2936 "[PID%d] app_out_address is %08llx\n",
2937 current->pid, command_args.app_out_address);
2938 dev_dbg(&sep->pdev->dev,
2939 "[PID%d] data_size is %x\n",
2940 current->pid, command_args.data_in_size);
2941 dev_dbg(&sep->pdev->dev,
2942 "[PID%d] block_size is %x\n",
2943 current->pid, command_args.block_size);
2944 dev_dbg(&sep->pdev->dev,
2945 "[PID%d] tail block_size is %x\n",
2946 current->pid, command_args.tail_block_size);
2947 dev_dbg(&sep->pdev->dev,
2948 "[PID%d] is_applet is %x\n",
2949 current->pid, command_args.is_applet);
2951 if (!command_args.app_in_address) {
2952 dev_warn(&sep->pdev->dev,
2953 "[PID%d] null app_in_address\n", current->pid);
2958 error = sep_prepare_input_output_dma_table_in_dcb(sep,
2959 (unsigned long)command_args.app_in_address,
2960 (unsigned long)command_args.app_out_address,
2961 command_args.data_in_size, command_args.block_size,
2962 command_args.tail_block_size,
2963 command_args.is_applet, false,
2964 secure_dma, NULL, NULL, dma_ctx, NULL, NULL);
2972 * sep_free_dcb_handler - free control block resources
2973 * @sep: pointer to struct sep_device
2975 * This function frees the DCB resources and updates the needed
2976 * user-space buffers.
2978 static int sep_free_dcb_handler(struct sep_device *sep,
2979 struct sep_dma_context **dma_ctx)
2981 if (!dma_ctx || !(*dma_ctx)) {
2982 dev_dbg(&sep->pdev->dev,
2983 "[PID%d] no dma context defined, nothing to free\n",
2988 dev_dbg(&sep->pdev->dev, "[PID%d] free dcbs num of DCBs %x\n",
2990 (*dma_ctx)->nr_dcb_creat);
2992 return sep_free_dma_tables_and_dcb(sep, false, false, dma_ctx);
2996 * sep_ioctl - ioctl handler for sep device
2997 * @filp: pointer to struct file
2999 * @arg: pointer to argument structure
3001 * Implement the ioctl methods available on the SEP device.
3003 static long sep_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
3005 struct sep_private_data * const private_data = filp->private_data;
3006 struct sep_call_status *call_status = &private_data->call_status;
3007 struct sep_device *sep = private_data->device;
3008 struct sep_dma_context **dma_ctx = &private_data->dma_ctx;
3009 struct sep_queue_info **my_queue_elem = &private_data->my_queue_elem;
3012 dev_dbg(&sep->pdev->dev, "[PID%d] ioctl cmd 0x%x\n",
3014 dev_dbg(&sep->pdev->dev, "[PID%d] dma context addr 0x%p\n",
3015 current->pid, *dma_ctx);
3017 /* Make sure we own this device */
3018 error = sep_check_transaction_owner(sep);
3020 dev_dbg(&sep->pdev->dev, "[PID%d] ioctl pid is not owner\n",
3025 /* Check that sep_mmap has been called before */
3026 if (0 == test_bit(SEP_LEGACY_MMAP_DONE_OFFSET,
3027 &call_status->status)) {
3028 dev_dbg(&sep->pdev->dev,
3029 "[PID%d] mmap not called\n", current->pid);
3034 /* Check that the command is for SEP device */
3035 if (_IOC_TYPE(cmd) != SEP_IOC_MAGIC_NUMBER) {
3041 case SEP_IOCSENDSEPCOMMAND:
3042 dev_dbg(&sep->pdev->dev,
3043 "[PID%d] SEP_IOCSENDSEPCOMMAND start\n",
3045 if (1 == test_bit(SEP_LEGACY_SENDMSG_DONE_OFFSET,
3046 &call_status->status)) {
3047 dev_warn(&sep->pdev->dev,
3048 "[PID%d] send msg already done\n",
3053 /* Send command to SEP */
3054 error = sep_send_command_handler(sep);
3056 set_bit(SEP_LEGACY_SENDMSG_DONE_OFFSET,
3057 &call_status->status);
3058 dev_dbg(&sep->pdev->dev,
3059 "[PID%d] SEP_IOCSENDSEPCOMMAND end\n",
3062 case SEP_IOCENDTRANSACTION:
3063 dev_dbg(&sep->pdev->dev,
3064 "[PID%d] SEP_IOCENDTRANSACTION start\n",
3066 error = sep_end_transaction_handler(sep, dma_ctx, call_status,
3068 dev_dbg(&sep->pdev->dev,
3069 "[PID%d] SEP_IOCENDTRANSACTION end\n",
3072 case SEP_IOCPREPAREDCB:
3073 dev_dbg(&sep->pdev->dev,
3074 "[PID%d] SEP_IOCPREPAREDCB start\n",
3077 case SEP_IOCPREPAREDCB_SECURE_DMA:
3078 dev_dbg(&sep->pdev->dev,
3079 "[PID%d] SEP_IOCPREPAREDCB_SECURE_DMA start\n",
3081 if (1 == test_bit(SEP_LEGACY_SENDMSG_DONE_OFFSET,
3082 &call_status->status)) {
3083 dev_dbg(&sep->pdev->dev,
3084 "[PID%d] dcb prep needed before send msg\n",
3091 dev_dbg(&sep->pdev->dev,
3092 "[PID%d] dcb null arg\n", current->pid);
3097 if (cmd == SEP_IOCPREPAREDCB) {
3099 dev_dbg(&sep->pdev->dev,
3100 "[PID%d] SEP_IOCPREPAREDCB (no secure_dma)\n",
3103 error = sep_prepare_dcb_handler(sep, arg, false,
3107 dev_dbg(&sep->pdev->dev,
3108 "[PID%d] SEP_IOC_POC (with secure_dma)\n",
3111 error = sep_prepare_dcb_handler(sep, arg, true,
3114 dev_dbg(&sep->pdev->dev, "[PID%d] dcb's end\n",
3117 case SEP_IOCFREEDCB:
3118 dev_dbg(&sep->pdev->dev, "[PID%d] SEP_IOCFREEDCB start\n",
3120 case SEP_IOCFREEDCB_SECURE_DMA:
3121 dev_dbg(&sep->pdev->dev,
3122 "[PID%d] SEP_IOCFREEDCB_SECURE_DMA start\n",
3124 error = sep_free_dcb_handler(sep, dma_ctx);
3125 dev_dbg(&sep->pdev->dev, "[PID%d] SEP_IOCFREEDCB end\n",
3130 dev_dbg(&sep->pdev->dev, "[PID%d] default end\n",
3136 dev_dbg(&sep->pdev->dev, "[PID%d] ioctl end\n", current->pid);
3142 * sep_inthandler - interrupt handler for sep device
3144 * @dev_id: device id
3146 static irqreturn_t sep_inthandler(int irq, void *dev_id)
3148 unsigned long lock_irq_flag;
3149 u32 reg_val, reg_val2 = 0;
3150 struct sep_device *sep = dev_id;
3151 irqreturn_t int_error = IRQ_HANDLED;
3153 /* Are we in power save? */
3154 #if defined(CONFIG_PM_RUNTIME) && defined(SEP_ENABLE_RUNTIME_PM)
3155 if (sep->pdev->dev.power.runtime_status != RPM_ACTIVE) {
3156 dev_dbg(&sep->pdev->dev, "interrupt during pwr save\n");
3161 if (test_bit(SEP_WORKING_LOCK_BIT, &sep->in_use_flags) == 0) {
3162 dev_dbg(&sep->pdev->dev, "interrupt while nobody using sep\n");
3166 /* Read the IRR register to check if this is SEP interrupt */
3167 reg_val = sep_read_reg(sep, HW_HOST_IRR_REG_ADDR);
3169 dev_dbg(&sep->pdev->dev, "sep int: IRR REG val: %x\n", reg_val);
3171 if (reg_val & (0x1 << 13)) {
3173 /* Lock and update the counter of reply messages */
3174 spin_lock_irqsave(&sep->snd_rply_lck, lock_irq_flag);
3176 spin_unlock_irqrestore(&sep->snd_rply_lck, lock_irq_flag);
3178 dev_dbg(&sep->pdev->dev, "sep int: send_ct %lx reply_ct %lx\n",
3179 sep->send_ct, sep->reply_ct);
3181 /* Is this a kernel client request */
3182 if (sep->in_kernel) {
3183 tasklet_schedule(&sep->finish_tasklet);
3184 goto finished_interrupt;
3187 /* Is this printf or daemon request? */
3188 reg_val2 = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR2_REG_ADDR);
3189 dev_dbg(&sep->pdev->dev,
3190 "SEP Interrupt - GPR2 is %08x\n", reg_val2);
3192 clear_bit(SEP_WORKING_LOCK_BIT, &sep->in_use_flags);
3194 if ((reg_val2 >> 30) & 0x1) {
3195 dev_dbg(&sep->pdev->dev, "int: printf request\n");
3196 } else if (reg_val2 >> 31) {
3197 dev_dbg(&sep->pdev->dev, "int: daemon request\n");
3199 dev_dbg(&sep->pdev->dev, "int: SEP reply\n");
3200 wake_up(&sep->event_interrupt);
3203 dev_dbg(&sep->pdev->dev, "int: not SEP interrupt\n");
3204 int_error = IRQ_NONE;
3209 if (int_error == IRQ_HANDLED)
3210 sep_write_reg(sep, HW_HOST_ICR_REG_ADDR, reg_val);
3216 * sep_reconfig_shared_area - reconfigure shared area
3217 * @sep: pointer to struct sep_device
3219 * Reconfig the shared area between HOST and SEP - needed in case
3220 * the DX_CC_Init function was called before OS loading.
3222 static int sep_reconfig_shared_area(struct sep_device *sep)
3226 /* use to limit waiting for SEP */
3227 unsigned long end_time;
3229 /* Send the new SHARED MESSAGE AREA to the SEP */
3230 dev_dbg(&sep->pdev->dev, "reconfig shared; sending %08llx to sep\n",
3231 (unsigned long long)sep->shared_bus);
3233 sep_write_reg(sep, HW_HOST_HOST_SEP_GPR1_REG_ADDR, sep->shared_bus);
3235 /* Poll for SEP response */
3236 ret_val = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR1_REG_ADDR);
3238 end_time = jiffies + (WAIT_TIME * HZ);
3240 while ((time_before(jiffies, end_time)) && (ret_val != 0xffffffff) &&
3241 (ret_val != sep->shared_bus))
3242 ret_val = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR1_REG_ADDR);
3244 /* Check the return value (register) */
3245 if (ret_val != sep->shared_bus) {
3246 dev_warn(&sep->pdev->dev, "could not reconfig shared area\n");
3247 dev_warn(&sep->pdev->dev, "result was %x\n", ret_val);
3252 dev_dbg(&sep->pdev->dev, "reconfig shared area end\n");
3258 * sep_activate_dcb_dmatables_context - Takes DCB & DMA tables
3261 * @dcb_region: DCB region copy
3262 * @dmatables_region: MLLI/DMA tables copy
3263 * @dma_ctx: DMA context for current transaction
3265 ssize_t sep_activate_dcb_dmatables_context(struct sep_device *sep,
3266 struct sep_dcblock **dcb_region,
3267 void **dmatables_region,
3268 struct sep_dma_context *dma_ctx)
3270 void *dmaregion_free_start = NULL;
3271 void *dmaregion_free_end = NULL;
3272 void *dcbregion_free_start = NULL;
3273 void *dcbregion_free_end = NULL;
3276 dev_dbg(&sep->pdev->dev, "[PID%d] activating dcb/dma region\n",
3279 if (1 > dma_ctx->nr_dcb_creat) {
3280 dev_warn(&sep->pdev->dev,
3281 "[PID%d] invalid number of dcbs to activate 0x%08X\n",
3282 current->pid, dma_ctx->nr_dcb_creat);
3287 dmaregion_free_start = sep->shared_addr
3288 + SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES;
3289 dmaregion_free_end = dmaregion_free_start
3290 + SYNCHRONIC_DMA_TABLES_AREA_SIZE_BYTES - 1;
3292 if (dmaregion_free_start
3293 + dma_ctx->dmatables_len > dmaregion_free_end) {
3297 memcpy(dmaregion_free_start,
3299 dma_ctx->dmatables_len);
3300 /* Free MLLI table copy */
3301 kfree(*dmatables_region);
3302 *dmatables_region = NULL;
3304 /* Copy thread's DCB table copy to DCB table region */
3305 dcbregion_free_start = sep->shared_addr +
3306 SEP_DRIVER_SYSTEM_DCB_MEMORY_OFFSET_IN_BYTES;
3307 dcbregion_free_end = dcbregion_free_start +
3308 (SEP_MAX_NUM_SYNC_DMA_OPS *
3309 sizeof(struct sep_dcblock)) - 1;
3311 if (dcbregion_free_start
3312 + (dma_ctx->nr_dcb_creat * sizeof(struct sep_dcblock))
3313 > dcbregion_free_end) {
3318 memcpy(dcbregion_free_start,
3320 dma_ctx->nr_dcb_creat * sizeof(struct sep_dcblock));
3322 /* Print the tables */
3323 dev_dbg(&sep->pdev->dev, "activate: input table\n");
3324 sep_debug_print_lli_tables(sep,
3325 (struct sep_lli_entry *)sep_shared_area_bus_to_virt(sep,
3326 (*dcb_region)->input_mlli_address),
3327 (*dcb_region)->input_mlli_num_entries,
3328 (*dcb_region)->input_mlli_data_size);
3330 dev_dbg(&sep->pdev->dev, "activate: output table\n");
3331 sep_debug_print_lli_tables(sep,
3332 (struct sep_lli_entry *)sep_shared_area_bus_to_virt(sep,
3333 (*dcb_region)->output_mlli_address),
3334 (*dcb_region)->output_mlli_num_entries,
3335 (*dcb_region)->output_mlli_data_size);
3337 dev_dbg(&sep->pdev->dev,
3338 "[PID%d] printing activated tables\n", current->pid);
3341 kfree(*dmatables_region);
3342 *dmatables_region = NULL;
3351 * sep_create_dcb_dmatables_context - Creates DCB & MLLI/DMA table context
3353 * @dcb_region: DCB region buf to create for current transaction
3354 * @dmatables_region: MLLI/DMA tables buf to create for current transaction
3355 * @dma_ctx: DMA context buf to create for current transaction
3356 * @user_dcb_args: User arguments for DCB/MLLI creation
3357 * @num_dcbs: Number of DCBs to create
3358 * @secure_dma: Indicate use of IMR restricted memory secure dma
3360 static ssize_t sep_create_dcb_dmatables_context(struct sep_device *sep,
3361 struct sep_dcblock **dcb_region,
3362 void **dmatables_region,
3363 struct sep_dma_context **dma_ctx,
3364 const struct build_dcb_struct __user *user_dcb_args,
3365 const u32 num_dcbs, bool secure_dma)
3369 struct build_dcb_struct *dcb_args = NULL;
3371 dev_dbg(&sep->pdev->dev, "[PID%d] creating dcb/dma region\n",
3374 if (!dcb_region || !dma_ctx || !dmatables_region || !user_dcb_args) {
3379 if (SEP_MAX_NUM_SYNC_DMA_OPS < num_dcbs) {
3380 dev_warn(&sep->pdev->dev,
3381 "[PID%d] invalid number of dcbs 0x%08X\n",
3382 current->pid, num_dcbs);
3387 dcb_args = kcalloc(num_dcbs, sizeof(struct build_dcb_struct),
3394 if (copy_from_user(dcb_args,
3396 num_dcbs * sizeof(struct build_dcb_struct))) {
3401 /* Allocate thread-specific memory for DCB */
3402 *dcb_region = kzalloc(num_dcbs * sizeof(struct sep_dcblock),
3404 if (!(*dcb_region)) {
3409 /* Prepare DCB and MLLI table into the allocated regions */
3410 for (i = 0; i < num_dcbs; i++) {
3411 error = sep_prepare_input_output_dma_table_in_dcb(sep,
3412 (unsigned long)dcb_args[i].app_in_address,
3413 (unsigned long)dcb_args[i].app_out_address,
3414 dcb_args[i].data_in_size,
3415 dcb_args[i].block_size,
3416 dcb_args[i].tail_block_size,
3417 dcb_args[i].is_applet,
3419 *dcb_region, dmatables_region,
3424 dev_warn(&sep->pdev->dev,
3425 "[PID%d] dma table creation failed\n",
3430 if (dcb_args[i].app_in_address != 0)
3431 (*dma_ctx)->input_data_len += dcb_args[i].data_in_size;
3441 * sep_create_dcb_dmatables_context_kernel - Creates DCB & MLLI/DMA table context
3444 * @dcb_region: DCB region buf to create for current transaction
3445 * @dmatables_region: MLLI/DMA tables buf to create for current transaction
3446 * @dma_ctx: DMA context buf to create for current transaction
3447 * @user_dcb_args: User arguments for DCB/MLLI creation
3448 * @num_dcbs: Number of DCBs to create
3449 * This does that same thing as sep_create_dcb_dmatables_context
3450 * except that it is used only for the kernel crypto operation. It is
3451 * separate because there is no user data involved; the dcb data structure
3452 * is specific for kernel crypto (build_dcb_struct_kernel)
3454 int sep_create_dcb_dmatables_context_kernel(struct sep_device *sep,
3455 struct sep_dcblock **dcb_region,
3456 void **dmatables_region,
3457 struct sep_dma_context **dma_ctx,
3458 const struct build_dcb_struct_kernel *dcb_data,
3464 dev_dbg(&sep->pdev->dev, "[PID%d] creating dcb/dma region\n",
3467 if (!dcb_region || !dma_ctx || !dmatables_region || !dcb_data) {
3472 if (SEP_MAX_NUM_SYNC_DMA_OPS < num_dcbs) {
3473 dev_warn(&sep->pdev->dev,
3474 "[PID%d] invalid number of dcbs 0x%08X\n",
3475 current->pid, num_dcbs);
3480 dev_dbg(&sep->pdev->dev, "[PID%d] num_dcbs is %d\n",
3481 current->pid, num_dcbs);
3483 /* Allocate thread-specific memory for DCB */
3484 *dcb_region = kzalloc(num_dcbs * sizeof(struct sep_dcblock),
3486 if (!(*dcb_region)) {
3491 /* Prepare DCB and MLLI table into the allocated regions */
3492 for (i = 0; i < num_dcbs; i++) {
3493 error = sep_prepare_input_output_dma_table_in_dcb(sep,
3494 (unsigned long)dcb_data->app_in_address,
3495 (unsigned long)dcb_data->app_out_address,
3496 dcb_data->data_in_size,
3497 dcb_data->block_size,
3498 dcb_data->tail_block_size,
3499 dcb_data->is_applet,
3502 *dcb_region, dmatables_region,
3507 dev_warn(&sep->pdev->dev,
3508 "[PID%d] dma table creation failed\n",
3520 * sep_activate_msgarea_context - Takes the message area context into use
3522 * @msg_region: Message area context buf
3523 * @msg_len: Message area context buffer size
3525 static ssize_t sep_activate_msgarea_context(struct sep_device *sep,
3527 const size_t msg_len)
3529 dev_dbg(&sep->pdev->dev, "[PID%d] activating msg region\n",
3532 if (!msg_region || !(*msg_region) ||
3533 SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES < msg_len) {
3534 dev_warn(&sep->pdev->dev,
3535 "[PID%d] invalid act msgarea len 0x%08zX\n",
3536 current->pid, msg_len);
3540 memcpy(sep->shared_addr, *msg_region, msg_len);
3546 * sep_create_msgarea_context - Creates message area context
3548 * @msg_region: Msg area region buf to create for current transaction
3549 * @msg_user: Content for msg area region from user
3550 * @msg_len: Message area size
3552 static ssize_t sep_create_msgarea_context(struct sep_device *sep,
3554 const void __user *msg_user,
3555 const size_t msg_len)
3559 dev_dbg(&sep->pdev->dev, "[PID%d] creating msg region\n",
3564 SEP_DRIVER_MAX_MESSAGE_SIZE_IN_BYTES < msg_len ||
3565 SEP_DRIVER_MIN_MESSAGE_SIZE_IN_BYTES > msg_len) {
3566 dev_warn(&sep->pdev->dev,
3567 "[PID%d] invalid creat msgarea len 0x%08zX\n",
3568 current->pid, msg_len);
3573 /* Allocate thread-specific memory for message buffer */
3574 *msg_region = kzalloc(msg_len, GFP_KERNEL);
3575 if (!(*msg_region)) {
3580 /* Copy input data to write() to allocated message buffer */
3581 if (copy_from_user(*msg_region, msg_user, msg_len)) {
3587 if (error && msg_region) {
3597 * sep_read - Returns results of an operation for fastcall interface
3598 * @filp: File pointer
3599 * @buf_user: User buffer for storing results
3600 * @count_user: User buffer size
3601 * @offset: File offset, not supported
3603 * The implementation does not support reading in chunks, all data must be
3604 * consumed during a single read system call.
3606 static ssize_t sep_read(struct file *filp,
3607 char __user *buf_user, size_t count_user,
3610 struct sep_private_data * const private_data = filp->private_data;
3611 struct sep_call_status *call_status = &private_data->call_status;
3612 struct sep_device *sep = private_data->device;
3613 struct sep_dma_context **dma_ctx = &private_data->dma_ctx;
3614 struct sep_queue_info **my_queue_elem = &private_data->my_queue_elem;
3615 ssize_t error = 0, error_tmp = 0;
3617 /* Am I the process that owns the transaction? */
3618 error = sep_check_transaction_owner(sep);
3620 dev_dbg(&sep->pdev->dev, "[PID%d] read pid is not owner\n",
3625 /* Checks that user has called necessary apis */
3626 if (0 == test_bit(SEP_FASTCALL_WRITE_DONE_OFFSET,
3627 &call_status->status)) {
3628 dev_warn(&sep->pdev->dev,
3629 "[PID%d] fastcall write not called\n",
3632 goto end_function_error;
3636 dev_warn(&sep->pdev->dev,
3637 "[PID%d] null user buffer\n",
3640 goto end_function_error;
3644 /* Wait for SEP to finish */
3645 wait_event(sep->event_interrupt,
3646 test_bit(SEP_WORKING_LOCK_BIT,
3647 &sep->in_use_flags) == 0);
3649 sep_dump_message(sep);
3651 dev_dbg(&sep->pdev->dev, "[PID%d] count_user = 0x%08zX\n",
3652 current->pid, count_user);
3654 /* In case user has allocated bigger buffer */
3655 if (count_user > SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES)
3656 count_user = SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES;
3658 if (copy_to_user(buf_user, sep->shared_addr, count_user)) {
3660 goto end_function_error;
3663 dev_dbg(&sep->pdev->dev, "[PID%d] read succeeded\n", current->pid);
3667 /* Copy possible tail data to user and free DCB and MLLIs */
3668 error_tmp = sep_free_dcb_handler(sep, dma_ctx);
3670 dev_warn(&sep->pdev->dev, "[PID%d] dcb free failed\n",
3673 /* End the transaction, wakeup pending ones */
3674 error_tmp = sep_end_transaction_handler(sep, dma_ctx, call_status,
3677 dev_warn(&sep->pdev->dev,
3678 "[PID%d] ending transaction failed\n",
3686 * sep_fastcall_args_get - Gets fastcall params from user
3688 * @args: Parameters buffer
3689 * @buf_user: User buffer for operation parameters
3690 * @count_user: User buffer size
3692 static inline ssize_t sep_fastcall_args_get(struct sep_device *sep,
3693 struct sep_fastcall_hdr *args,
3694 const char __user *buf_user,
3695 const size_t count_user)
3698 size_t actual_count = 0;
3701 dev_warn(&sep->pdev->dev,
3702 "[PID%d] null user buffer\n",
3708 if (count_user < sizeof(struct sep_fastcall_hdr)) {
3709 dev_warn(&sep->pdev->dev,
3710 "[PID%d] too small message size 0x%08zX\n",
3711 current->pid, count_user);
3717 if (copy_from_user(args, buf_user, sizeof(struct sep_fastcall_hdr))) {
3722 if (SEP_FC_MAGIC != args->magic) {
3723 dev_warn(&sep->pdev->dev,
3724 "[PID%d] invalid fastcall magic 0x%08X\n",
3725 current->pid, args->magic);
3730 dev_dbg(&sep->pdev->dev, "[PID%d] fastcall hdr num of DCBs 0x%08X\n",
3731 current->pid, args->num_dcbs);
3732 dev_dbg(&sep->pdev->dev, "[PID%d] fastcall hdr msg len 0x%08X\n",
3733 current->pid, args->msg_len);
3735 if (SEP_DRIVER_MAX_MESSAGE_SIZE_IN_BYTES < args->msg_len ||
3736 SEP_DRIVER_MIN_MESSAGE_SIZE_IN_BYTES > args->msg_len) {
3737 dev_warn(&sep->pdev->dev,
3738 "[PID%d] invalid message length\n",
3744 actual_count = sizeof(struct sep_fastcall_hdr)
3746 + (args->num_dcbs * sizeof(struct build_dcb_struct));
3748 if (actual_count != count_user) {
3749 dev_warn(&sep->pdev->dev,
3750 "[PID%d] inconsistent message sizes 0x%08zX vs 0x%08zX\n",
3751 current->pid, actual_count, count_user);
3761 * sep_write - Starts an operation for fastcall interface
3762 * @filp: File pointer
3763 * @buf_user: User buffer for operation parameters
3764 * @count_user: User buffer size
3765 * @offset: File offset, not supported
3767 * The implementation does not support writing in chunks,
3768 * all data must be given during a single write system call.
3770 static ssize_t sep_write(struct file *filp,
3771 const char __user *buf_user, size_t count_user,
3774 struct sep_private_data * const private_data = filp->private_data;
3775 struct sep_call_status *call_status = &private_data->call_status;
3776 struct sep_device *sep = private_data->device;
3777 struct sep_dma_context *dma_ctx = NULL;
3778 struct sep_fastcall_hdr call_hdr = {0};
3779 void *msg_region = NULL;
3780 void *dmatables_region = NULL;
3781 struct sep_dcblock *dcb_region = NULL;
3783 struct sep_queue_info *my_queue_elem = NULL;
3784 bool my_secure_dma; /* are we using secure_dma (IMR)? */
3786 dev_dbg(&sep->pdev->dev, "[PID%d] sep dev is 0x%p\n",
3788 dev_dbg(&sep->pdev->dev, "[PID%d] private_data is 0x%p\n",
3789 current->pid, private_data);
3791 error = sep_fastcall_args_get(sep, &call_hdr, buf_user, count_user);
3795 buf_user += sizeof(struct sep_fastcall_hdr);
3797 if (call_hdr.secure_dma == 0)
3798 my_secure_dma = false;
3800 my_secure_dma = true;
3803 * Controlling driver memory usage by limiting amount of
3804 * buffers created. Only SEP_DOUBLEBUF_USERS_LIMIT number
3805 * of threads can progress further at a time
3807 dev_dbg(&sep->pdev->dev,
3808 "[PID%d] waiting for double buffering region access\n",
3810 error = down_interruptible(&sep->sep_doublebuf);
3811 dev_dbg(&sep->pdev->dev, "[PID%d] double buffering region start\n",
3814 /* Signal received */
3815 goto end_function_error;
3820 * Prepare contents of the shared area regions for
3821 * the operation into temporary buffers
3823 if (0 < call_hdr.num_dcbs) {
3824 error = sep_create_dcb_dmatables_context(sep,
3828 (const struct build_dcb_struct __user *)
3830 call_hdr.num_dcbs, my_secure_dma);
3832 goto end_function_error_doublebuf;
3834 buf_user += call_hdr.num_dcbs * sizeof(struct build_dcb_struct);
3837 error = sep_create_msgarea_context(sep,
3842 goto end_function_error_doublebuf;
3844 dev_dbg(&sep->pdev->dev, "[PID%d] updating queue status\n",
3846 my_queue_elem = sep_queue_status_add(sep,
3847 ((struct sep_msgarea_hdr *)msg_region)->opcode,
3848 (dma_ctx) ? dma_ctx->input_data_len : 0,
3850 current->comm, sizeof(current->comm));
3852 if (!my_queue_elem) {
3853 dev_dbg(&sep->pdev->dev,
3854 "[PID%d] updating queue status error\n", current->pid);
3856 goto end_function_error_doublebuf;
3859 /* Wait until current process gets the transaction */
3860 error = sep_wait_transaction(sep);
3863 /* Interrupted by signal, don't clear transaction */
3864 dev_dbg(&sep->pdev->dev, "[PID%d] interrupted by signal\n",
3866 sep_queue_status_remove(sep, &my_queue_elem);
3867 goto end_function_error_doublebuf;
3870 dev_dbg(&sep->pdev->dev, "[PID%d] saving queue element\n",
3872 private_data->my_queue_elem = my_queue_elem;
3874 /* Activate shared area regions for the transaction */
3875 error = sep_activate_msgarea_context(sep, &msg_region,
3878 goto end_function_error_clear_transact;
3880 sep_dump_message(sep);
3882 if (0 < call_hdr.num_dcbs) {
3883 error = sep_activate_dcb_dmatables_context(sep,
3888 goto end_function_error_clear_transact;
3891 /* Send command to SEP */
3892 error = sep_send_command_handler(sep);
3894 goto end_function_error_clear_transact;
3896 /* Store DMA context for the transaction */
3897 private_data->dma_ctx = dma_ctx;
3898 /* Update call status */
3899 set_bit(SEP_FASTCALL_WRITE_DONE_OFFSET, &call_status->status);
3902 up(&sep->sep_doublebuf);
3903 dev_dbg(&sep->pdev->dev, "[PID%d] double buffering region end\n",
3908 end_function_error_clear_transact:
3909 sep_end_transaction_handler(sep, &dma_ctx, call_status,
3910 &private_data->my_queue_elem);
3912 end_function_error_doublebuf:
3913 up(&sep->sep_doublebuf);
3914 dev_dbg(&sep->pdev->dev, "[PID%d] double buffering region end\n",
3919 sep_free_dma_table_data_handler(sep, &dma_ctx);
3923 kfree(dmatables_region);
3929 * sep_seek - Handler for seek system call
3930 * @filp: File pointer
3931 * @offset: File offset
3932 * @origin: Options for offset
3934 * Fastcall interface does not support seeking, all reads
3935 * and writes are from/to offset zero
3937 static loff_t sep_seek(struct file *filp, loff_t offset, int origin)
3945 * sep_file_operations - file operation on sep device
3946 * @sep_ioctl: ioctl handler from user space call
3947 * @sep_poll: poll handler
3948 * @sep_open: handles sep device open request
3949 * @sep_release:handles sep device release request
3950 * @sep_mmap: handles memory mapping requests
3951 * @sep_read: handles read request on sep device
3952 * @sep_write: handles write request on sep device
3953 * @sep_seek: handles seek request on sep device
3955 static const struct file_operations sep_file_operations = {
3956 .owner = THIS_MODULE,
3957 .unlocked_ioctl = sep_ioctl,
3960 .release = sep_release,
3968 * sep_sysfs_read - read sysfs entry per gives arguments
3969 * @filp: file pointer
3970 * @kobj: kobject pointer
3971 * @attr: binary file attributes
3972 * @buf: read to this buffer
3973 * @pos: offset to read
3974 * @count: amount of data to read
3976 * This function is to read sysfs entries for sep driver per given arguments.
3979 sep_sysfs_read(struct file *filp, struct kobject *kobj,
3980 struct bin_attribute *attr,
3981 char *buf, loff_t pos, size_t count)
3983 unsigned long lck_flags;
3984 size_t nleft = count;
3985 struct sep_device *sep = sep_dev;
3986 struct sep_queue_info *queue_elem = NULL;
3990 spin_lock_irqsave(&sep->sep_queue_lock, lck_flags);
3992 queue_num = sep->sep_queue_num;
3993 if (queue_num > SEP_DOUBLEBUF_USERS_LIMIT)
3994 queue_num = SEP_DOUBLEBUF_USERS_LIMIT;
3997 if (count < sizeof(queue_num)
3998 + (queue_num * sizeof(struct sep_queue_data))) {
3999 spin_unlock_irqrestore(&sep->sep_queue_lock, lck_flags);
4003 memcpy(buf, &queue_num, sizeof(queue_num));
4004 buf += sizeof(queue_num);
4005 nleft -= sizeof(queue_num);
4007 list_for_each_entry(queue_elem, &sep->sep_queue_status, list) {
4008 if (i++ > queue_num)
4011 memcpy(buf, &queue_elem->data, sizeof(queue_elem->data));
4012 nleft -= sizeof(queue_elem->data);
4013 buf += sizeof(queue_elem->data);
4015 spin_unlock_irqrestore(&sep->sep_queue_lock, lck_flags);
4017 return count - nleft;
4021 * bin_attributes - defines attributes for queue_status
4022 * @attr: attributes (name & permissions)
4023 * @read: function pointer to read this file
4024 * @size: maxinum size of binary attribute
4026 static const struct bin_attribute queue_status = {
4027 .attr = {.name = "queue_status", .mode = 0444},
4028 .read = sep_sysfs_read,
4030 + (SEP_DOUBLEBUF_USERS_LIMIT * sizeof(struct sep_queue_data)),
4034 * sep_register_driver_with_fs - register misc devices
4035 * @sep: pointer to struct sep_device
4037 * This function registers the driver with the file system
4039 static int sep_register_driver_with_fs(struct sep_device *sep)
4043 sep->miscdev_sep.minor = MISC_DYNAMIC_MINOR;
4044 sep->miscdev_sep.name = SEP_DEV_NAME;
4045 sep->miscdev_sep.fops = &sep_file_operations;
4047 ret_val = misc_register(&sep->miscdev_sep);
4049 dev_warn(&sep->pdev->dev, "misc reg fails for SEP %x\n",
4054 ret_val = device_create_bin_file(sep->miscdev_sep.this_device,
4057 dev_warn(&sep->pdev->dev, "sysfs attribute1 fails for SEP %x\n",
4059 misc_deregister(&sep->miscdev_sep);
4068 *sep_probe - probe a matching PCI device
4070 *@ent: pci_device_id
4072 *Attempt to set up and configure a SEP device that has been
4073 *discovered by the PCI layer. Allocates all required resources.
4075 static int sep_probe(struct pci_dev *pdev,
4076 const struct pci_device_id *ent)
4079 struct sep_device *sep = NULL;
4081 if (sep_dev != NULL) {
4082 dev_dbg(&pdev->dev, "only one SEP supported.\n");
4086 /* Enable the device */
4087 error = pci_enable_device(pdev);
4089 dev_warn(&pdev->dev, "error enabling pci device\n");
4093 /* Allocate the sep_device structure for this device */
4094 sep_dev = kzalloc(sizeof(struct sep_device), GFP_ATOMIC);
4095 if (sep_dev == NULL) {
4097 goto end_function_disable_device;
4101 * We're going to use another variable for actually
4102 * working with the device; this way, if we have
4103 * multiple devices in the future, it would be easier
4104 * to make appropriate changes
4108 sep->pdev = pci_dev_get(pdev);
4110 init_waitqueue_head(&sep->event_transactions);
4111 init_waitqueue_head(&sep->event_interrupt);
4112 spin_lock_init(&sep->snd_rply_lck);
4113 spin_lock_init(&sep->sep_queue_lock);
4114 sema_init(&sep->sep_doublebuf, SEP_DOUBLEBUF_USERS_LIMIT);
4116 INIT_LIST_HEAD(&sep->sep_queue_status);
4118 dev_dbg(&sep->pdev->dev,
4119 "sep probe: PCI obtained, device being prepared\n");
4121 /* Set up our register area */
4122 sep->reg_physical_addr = pci_resource_start(sep->pdev, 0);
4123 if (!sep->reg_physical_addr) {
4124 dev_warn(&sep->pdev->dev, "Error getting register start\n");
4126 goto end_function_free_sep_dev;
4129 sep->reg_physical_end = pci_resource_end(sep->pdev, 0);
4130 if (!sep->reg_physical_end) {
4131 dev_warn(&sep->pdev->dev, "Error getting register end\n");
4133 goto end_function_free_sep_dev;
4136 sep->reg_addr = ioremap_nocache(sep->reg_physical_addr,
4137 (size_t)(sep->reg_physical_end - sep->reg_physical_addr + 1));
4138 if (!sep->reg_addr) {
4139 dev_warn(&sep->pdev->dev, "Error getting register virtual\n");
4141 goto end_function_free_sep_dev;
4144 dev_dbg(&sep->pdev->dev,
4145 "Register area start %llx end %llx virtual %p\n",
4146 (unsigned long long)sep->reg_physical_addr,
4147 (unsigned long long)sep->reg_physical_end,
4150 /* Allocate the shared area */
4151 sep->shared_size = SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES +
4152 SYNCHRONIC_DMA_TABLES_AREA_SIZE_BYTES +
4153 SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES +
4154 SEP_DRIVER_STATIC_AREA_SIZE_IN_BYTES +
4155 SEP_DRIVER_SYSTEM_DATA_MEMORY_SIZE_IN_BYTES;
4157 if (sep_map_and_alloc_shared_area(sep)) {
4159 /* Allocation failed */
4160 goto end_function_error;
4163 /* Clear ICR register */
4164 sep_write_reg(sep, HW_HOST_ICR_REG_ADDR, 0xFFFFFFFF);
4166 /* Set the IMR register - open only GPR 2 */
4167 sep_write_reg(sep, HW_HOST_IMR_REG_ADDR, (~(0x1 << 13)));
4169 /* Read send/receive counters from SEP */
4170 sep->reply_ct = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR2_REG_ADDR);
4171 sep->reply_ct &= 0x3FFFFFFF;
4172 sep->send_ct = sep->reply_ct;
4174 /* Get the interrupt line */
4175 error = request_irq(pdev->irq, sep_inthandler, IRQF_SHARED,
4179 goto end_function_deallocate_sep_shared_area;
4181 /* The new chip requires a shared area reconfigure */
4182 error = sep_reconfig_shared_area(sep);
4184 goto end_function_free_irq;
4188 /* Finally magic up the device nodes */
4189 /* Register driver with the fs */
4190 error = sep_register_driver_with_fs(sep);
4193 dev_err(&sep->pdev->dev, "error registering dev file\n");
4194 goto end_function_free_irq;
4197 sep->in_use = 0; /* through touching the device */
4198 #ifdef SEP_ENABLE_RUNTIME_PM
4199 pm_runtime_put_noidle(&sep->pdev->dev);
4200 pm_runtime_allow(&sep->pdev->dev);
4201 pm_runtime_set_autosuspend_delay(&sep->pdev->dev,
4203 pm_runtime_use_autosuspend(&sep->pdev->dev);
4204 pm_runtime_mark_last_busy(&sep->pdev->dev);
4205 sep->power_save_setup = 1;
4207 /* register kernel crypto driver */
4208 #if defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE)
4209 error = sep_crypto_setup();
4211 dev_err(&sep->pdev->dev, "crypto setup failed\n");
4212 goto end_function_free_irq;
4217 end_function_free_irq:
4218 free_irq(pdev->irq, sep);
4220 end_function_deallocate_sep_shared_area:
4221 /* De-allocate shared area */
4222 sep_unmap_and_free_shared_area(sep);
4225 iounmap(sep->reg_addr);
4227 end_function_free_sep_dev:
4228 pci_dev_put(sep_dev->pdev);
4232 end_function_disable_device:
4233 pci_disable_device(pdev);
4240 * sep_remove - handles removing device from pci subsystem
4241 * @pdev: pointer to pci device
4243 * This function will handle removing our sep device from pci subsystem on exit
4244 * or unloading this module. It should free up all used resources, and unmap if
4245 * any memory regions mapped.
4247 static void sep_remove(struct pci_dev *pdev)
4249 struct sep_device *sep = sep_dev;
4251 /* Unregister from fs */
4252 misc_deregister(&sep->miscdev_sep);
4254 /* Unregister from kernel crypto */
4255 #if defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE)
4256 sep_crypto_takedown();
4259 free_irq(sep->pdev->irq, sep);
4261 /* Free the shared area */
4262 sep_unmap_and_free_shared_area(sep_dev);
4263 iounmap(sep_dev->reg_addr);
4265 #ifdef SEP_ENABLE_RUNTIME_PM
4268 pm_runtime_forbid(&sep->pdev->dev);
4269 pm_runtime_get_noresume(&sep->pdev->dev);
4272 pci_dev_put(sep_dev->pdev);
4277 /* Initialize struct pci_device_id for our driver */
4278 static const struct pci_device_id sep_pci_id_tbl[] = {
4279 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0826)},
4280 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x08e9)},
4284 /* Export our pci_device_id structure to user space */
4285 MODULE_DEVICE_TABLE(pci, sep_pci_id_tbl);
4287 #ifdef SEP_ENABLE_RUNTIME_PM
4290 * sep_pm_resume - rsume routine while waking up from S3 state
4291 * @dev: pointer to sep device
4293 * This function is to be used to wake up sep driver while system awakes from S3
4294 * state i.e. suspend to ram. The RAM in intact.
4295 * Notes - revisit with more understanding of pm, ICR/IMR & counters.
4297 static int sep_pci_resume(struct device *dev)
4299 struct sep_device *sep = sep_dev;
4301 dev_dbg(&sep->pdev->dev, "pci resume called\n");
4303 if (sep->power_state == SEP_DRIVER_POWERON)
4306 /* Clear ICR register */
4307 sep_write_reg(sep, HW_HOST_ICR_REG_ADDR, 0xFFFFFFFF);
4309 /* Set the IMR register - open only GPR 2 */
4310 sep_write_reg(sep, HW_HOST_IMR_REG_ADDR, (~(0x1 << 13)));
4312 /* Read send/receive counters from SEP */
4313 sep->reply_ct = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR2_REG_ADDR);
4314 sep->reply_ct &= 0x3FFFFFFF;
4315 sep->send_ct = sep->reply_ct;
4317 sep->power_state = SEP_DRIVER_POWERON;
4323 * sep_pm_suspend - suspend routine while going to S3 state
4324 * @dev: pointer to sep device
4326 * This function is to be used to suspend sep driver while system goes to S3
4327 * state i.e. suspend to ram. The RAM in intact and ON during this suspend.
4328 * Notes - revisit with more understanding of pm, ICR/IMR
4330 static int sep_pci_suspend(struct device *dev)
4332 struct sep_device *sep = sep_dev;
4334 dev_dbg(&sep->pdev->dev, "pci suspend called\n");
4335 if (sep->in_use == 1)
4338 sep->power_state = SEP_DRIVER_POWEROFF;
4340 /* Clear ICR register */
4341 sep_write_reg(sep, HW_HOST_ICR_REG_ADDR, 0xFFFFFFFF);
4343 /* Set the IMR to block all */
4344 sep_write_reg(sep, HW_HOST_IMR_REG_ADDR, 0xFFFFFFFF);
4350 * sep_pm_runtime_resume - runtime resume routine
4351 * @dev: pointer to sep device
4353 * Notes - revisit with more understanding of pm, ICR/IMR & counters
4355 static int sep_pm_runtime_resume(struct device *dev)
4360 struct sep_device *sep = sep_dev;
4362 dev_dbg(&sep->pdev->dev, "pm runtime resume called\n");
4365 * Wait until the SCU boot is ready
4366 * This is done by iterating SCU_DELAY_ITERATION (10
4367 * microseconds each) up to SCU_DELAY_MAX (50) times.
4368 * This bit can be set in a random time that is less
4369 * than 500 microseconds after each power resume
4373 while ((!retval2) && (delay_count < SCU_DELAY_MAX)) {
4374 retval2 = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR3_REG_ADDR);
4375 retval2 &= 0x00000008;
4377 udelay(SCU_DELAY_ITERATION);
4383 dev_warn(&sep->pdev->dev, "scu boot bit not set at resume\n");
4387 /* Clear ICR register */
4388 sep_write_reg(sep, HW_HOST_ICR_REG_ADDR, 0xFFFFFFFF);
4390 /* Set the IMR register - open only GPR 2 */
4391 sep_write_reg(sep, HW_HOST_IMR_REG_ADDR, (~(0x1 << 13)));
4393 /* Read send/receive counters from SEP */
4394 sep->reply_ct = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR2_REG_ADDR);
4395 sep->reply_ct &= 0x3FFFFFFF;
4396 sep->send_ct = sep->reply_ct;
4402 * sep_pm_runtime_suspend - runtime suspend routine
4403 * @dev: pointer to sep device
4405 * Notes - revisit with more understanding of pm
4407 static int sep_pm_runtime_suspend(struct device *dev)
4409 struct sep_device *sep = sep_dev;
4411 dev_dbg(&sep->pdev->dev, "pm runtime suspend called\n");
4413 /* Clear ICR register */
4414 sep_write_reg(sep, HW_HOST_ICR_REG_ADDR, 0xFFFFFFFF);
4419 * sep_pm - power management for sep driver
4420 * @sep_pm_runtime_resume: resume- no communication with cpu & main memory
4421 * @sep_pm_runtime_suspend: suspend- no communication with cpu & main memory
4422 * @sep_pci_suspend: suspend - main memory is still ON
4423 * @sep_pci_resume: resume - main memory is still ON
4425 static const struct dev_pm_ops sep_pm = {
4426 .runtime_resume = sep_pm_runtime_resume,
4427 .runtime_suspend = sep_pm_runtime_suspend,
4428 .resume = sep_pci_resume,
4429 .suspend = sep_pci_suspend,
4431 #endif /* SEP_ENABLE_RUNTIME_PM */
4434 * sep_pci_driver - registers this device with pci subsystem
4435 * @name: name identifier for this driver
4436 * @sep_pci_id_tbl: pointer to struct pci_device_id table
4437 * @sep_probe: pointer to probe function in PCI driver
4438 * @sep_remove: pointer to remove function in PCI driver
4440 static struct pci_driver sep_pci_driver = {
4441 #ifdef SEP_ENABLE_RUNTIME_PM
4446 .name = "sep_sec_driver",
4447 .id_table = sep_pci_id_tbl,
4449 .remove = sep_remove
4452 module_pci_driver(sep_pci_driver);
4453 MODULE_LICENSE("GPL");