3 * sep_main_mod.c - Security Processor Driver main group of functions
5 * Copyright(c) 2009 Intel Corporation. All rights reserved.
6 * Copyright(c) 2009 Discretix. All rights reserved.
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the Free
10 * Software Foundation; either version 2 of the License, or (at your option)
13 * This program is distributed in the hope that it will be useful, but WITHOUT
14 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
18 * You should have received a copy of the GNU General Public License along with
19 * this program; if not, write to the Free Software Foundation, Inc., 59
20 * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
24 * Mark Allyn mark.a.allyn@intel.com
28 * 2009.06.26 Initial publish
32 #include <linux/init.h>
33 #include <linux/module.h>
35 #include <linux/cdev.h>
36 #include <linux/kdev_t.h>
37 #include <linux/mutex.h>
39 #include <linux/poll.h>
40 #include <linux/wait.h>
41 #include <asm/ioctl.h>
42 #include <linux/ioport.h>
44 #include <linux/interrupt.h>
45 #include <linux/pagemap.h>
46 #include <asm/cacheflush.h>
47 #include "sep_driver_hw_defs.h"
48 #include "sep_driver_config.h"
49 #include "sep_driver_api.h"
50 #include "sep_driver_ext_api.h"
53 /*----------------------------------------
55 -----------------------------------------*/
58 #define INT_MODULE_PARM(n, v) int n = v; module_param(n, int, 0)
60 /*--------------------------------------
62 -----------------------------------------*/
66 /*--------------------------------------------
68 --------------------------------------------*/
70 /* debug messages level */
71 INT_MODULE_PARM(sepDebug, 0x0);
72 MODULE_PARM_DESC(sepDebug, "Flag to enable SEP debug messages");
76 mutex for the access to the internals of the sep driver
78 static DEFINE_MUTEX(sep_mutex);
81 /* wait queue head (event) of the driver */
82 static DECLARE_WAIT_QUEUE_HEAD(g_sep_event);
86 /*------------------------------------------------
88 ---------------------------------------------------*/
91 interrupt handler function
93 irqreturn_t sep_inthandler(int irq, void *dev_id);
96 this function registers the driver to the file system
98 static int sep_register_driver_to_fs(void);
101 this function unregisters driver from fs
103 static void sep_unregister_driver_from_fs(void);
106 this function calculates the size of data that can be inserted into the lli
107 table from this array the condition is that either the table is full
108 (all etnries are entered), or there are no more entries in the lli array
110 static unsigned long sep_calculate_lli_table_max_size(struct sep_lli_entry_t *lli_in_array_ptr, unsigned long num_array_entries);
112 this functions builds ont lli table from the lli_array according to the
115 static void sep_build_lli_table(struct sep_lli_entry_t *lli_array_ptr, struct sep_lli_entry_t *lli_table_ptr, unsigned long *num_processed_entries_ptr, unsigned long *num_table_entries_ptr, unsigned long table_data_size);
118 this function goes over the list of the print created tables and prints
121 static void sep_debug_print_lli_tables(struct sep_lli_entry_t *lli_table_ptr, unsigned long num_table_entries, unsigned long table_data_size);
126 This function raises interrupt to SEPm that signals that is has a new
129 static void sep_send_command_handler(void);
133 This function raises interrupt to SEP that signals that is has a
136 static void sep_send_reply_command_handler(void);
139 This function handles the allocate data pool memory request
140 This function returns calculates the physical address of the allocated memory
141 and the offset of this area from the mapped address. Therefore, the FVOs in
142 user space can calculate the exact virtual address of this allocated memory
144 static int sep_allocate_data_pool_memory_handler(unsigned long arg);
148 This function handles write into allocated data pool command
150 static int sep_write_into_data_pool_handler(unsigned long arg);
153 this function handles the read from data pool command
155 static int sep_read_from_data_pool_handler(unsigned long arg);
158 this function handles tha request for creation of the DMA table
159 for the synchronic symmetric operations (AES,DES)
161 static int sep_create_sync_dma_tables_handler(unsigned long arg);
164 this function handles the request to create the DMA tables for flow
166 static int sep_create_flow_dma_tables_handler(unsigned long arg);
169 This API handles the end transaction request
171 static int sep_end_transaction_handler(unsigned long arg);
175 this function handles add tables to flow
177 static int sep_add_flow_tables_handler(unsigned long arg);
180 this function add the flow add message to the specific flow
182 static int sep_add_flow_tables_message_handler(unsigned long arg);
185 this function handles the request for SEP start
187 static int sep_start_handler(void);
190 this function handles the request for SEP initialization
192 static int sep_init_handler(unsigned long arg);
195 this function handles the request cache and resident reallocation
197 static int sep_realloc_cache_resident_handler(unsigned long arg);
201 This api handles the setting of API mode to blocking or non-blocking
203 static int sep_set_api_mode_handler(unsigned long arg);
205 /* handler for flow done interrupt */
206 static void sep_flow_done_handler(struct work_struct *work);
209 This function locks all the physical pages of the kernel virtual buffer
210 and construct a basic lli array, where each entry holds the physical
211 page address and the size that application data holds in this physical pages
213 static int sep_lock_kernel_pages(unsigned long kernel_virt_addr, unsigned long data_size, unsigned long *num_pages_ptr, struct sep_lli_entry_t **lli_array_ptr, struct page ***page_array_ptr);
216 This function creates one DMA table for flow and returns its data,
217 and pointer to its info entry
219 static int sep_prepare_one_flow_dma_table(unsigned long virt_buff_addr, unsigned long virt_buff_size, struct sep_lli_entry_t *table_data, struct sep_lli_entry_t **info_entry_ptr, struct sep_flow_context_t *flow_data_ptr, bool isKernelVirtualAddress);
222 This function creates a list of tables for flow and returns the data for the
223 first and last tables of the list
225 static int sep_prepare_flow_dma_tables(unsigned long num_virtual_buffers,
226 unsigned long first_buff_addr, struct sep_flow_context_t *flow_data_ptr, struct sep_lli_entry_t *first_table_data_ptr, struct sep_lli_entry_t *last_table_data_ptr, bool isKernelVirtualAddress);
229 this function find a space for the new flow dma table
231 static int sep_find_free_flow_dma_table_space(unsigned long **table_address_ptr);
234 this function goes over all the flow tables connected to the given table and
237 static void sep_deallocated_flow_tables(struct sep_lli_entry_t *first_table_ptr);
240 This function handler the set flow id command
242 static int sep_set_flow_id_handler(unsigned long arg);
245 This function returns pointer to the flow data structure
246 that conatins the given id
248 static int sep_find_flow_context(unsigned long flow_id, struct sep_flow_context_t **flow_data_ptr);
252 this function returns the physical and virtual addresses of the static pool
254 static int sep_get_static_pool_addr_handler(unsigned long arg);
257 this address gets the offset of the physical address from the start of
260 static int sep_get_physical_mapped_offset_handler(unsigned long arg);
264 this function handles the request for get time
266 static int sep_get_time_handler(unsigned long arg);
269 calculates time and sets it at the predefined address
271 static int sep_set_time(unsigned long *address_ptr, unsigned long *time_in_sec_ptr);
274 PATCH for configuring the DMA to single burst instead of multi-burst
276 static void sep_configure_dma_burst(void);
279 This function locks all the physical pages of the
280 application virtual buffer and construct a basic lli
281 array, where each entry holds the physical page address
282 and the size that application data holds in this physical pages
284 static int sep_lock_user_pages(unsigned long app_virt_addr, unsigned long data_size, unsigned long *num_pages_ptr, struct sep_lli_entry_t **lli_array_ptr, struct page ***page_array_ptr);
286 /*---------------------------------------------
288 -----------------------------------------------*/
291 this function returns the address of the message shared area
293 void sep_map_shared_area(unsigned long *mappedAddr_ptr)
295 *mappedAddr_ptr = sep_dev->shared_area_addr;
299 this function returns the address of the message shared area
301 void sep_send_msg_rdy_cmd()
303 sep_send_command_handler();
306 /* this functions frees all the resources that were allocated for the building
307 of the LLI DMA tables */
308 void sep_free_dma_resources()
310 sep_free_dma_table_data_handler();
313 /* poll(suspend), until reply from sep */
314 void sep_driver_poll()
316 unsigned long retVal = 0;
318 #ifdef SEP_DRIVER_POLLING_MODE
320 while (sep_dev->host_to_sep_send_counter != (retVal & 0x7FFFFFFF))
321 retVal = sep_read_reg(sep_dev, HW_HOST_SEP_HOST_GPR2_REG_ADDR);
323 sep_dev->sep_to_host_reply_counter++;
325 /* poll, until reply from sep */
326 wait_event(g_sep_event, (sep_dev->host_to_sep_send_counter == sep_dev->sep_to_host_reply_counter));
331 /*----------------------------------------------------------------------
332 open function of the character driver - must only lock the mutex
333 must also release the memory data pool allocations
334 ------------------------------------------------------------------------*/
335 static int sep_open(struct inode *inode_ptr, struct file *file_ptr)
339 dbg("SEP Driver:--------> open start\n");
343 /* check the blocking mode */
344 if (sep_dev->block_mode_flag)
346 mutex_lock(&sep_mutex);
348 error = mutex_trylock(&sep_mutex);
350 /* check the error */
352 edbg("SEP Driver: down_interruptible failed\n");
357 /* release data pool allocations */
358 sep_dev->data_pool_bytes_allocated = 0;
361 dbg("SEP Driver:<-------- open end\n");
368 /*------------------------------------------------------------
370 -------------------------------------------------------------*/
371 static int sep_release(struct inode *inode_ptr, struct file *file_ptr)
373 dbg("----------->SEP Driver: sep_release start\n");
375 #if 0 /*!SEP_DRIVER_POLLING_MODE */
377 sep_write_reg(sep_dev, HW_HOST_IMR_REG_ADDR, 0x7FFF);
379 /* release IRQ line */
380 free_irq(SEP_DIRVER_IRQ_NUM, &sep_dev->reg_base_address);
384 /* unlock the sep mutex */
385 mutex_unlock(&sep_mutex);
387 dbg("SEP Driver:<-------- sep_release end\n");
395 /*---------------------------------------------------------------
396 map function - this functions maps the message shared area
397 -----------------------------------------------------------------*/
398 static int sep_mmap(struct file *filp, struct vm_area_struct *vma)
400 unsigned long phys_addr;
402 dbg("-------->SEP Driver: mmap start\n");
404 /* check that the size of the mapped range is as the size of the message
406 if ((vma->vm_end - vma->vm_start) > SEP_DRIVER_MMMAP_AREA_SIZE) {
407 edbg("SEP Driver mmap requested size is more than allowed\n");
408 printk(KERN_WARNING "SEP Driver mmap requested size is more \
410 printk(KERN_WARNING "SEP Driver vma->vm_end is %08lx\n", vma->vm_end);
411 printk(KERN_WARNING "SEP Driver vma->vm_end is %08lx\n", vma->vm_start);
415 edbg("SEP Driver:g_message_shared_area_addr is %08lx\n", sep_dev->message_shared_area_addr);
417 /* get physical address */
418 phys_addr = sep_dev->phys_shared_area_addr;
420 edbg("SEP Driver: phys_addr is %08lx\n", phys_addr);
422 if (remap_pfn_range(vma, vma->vm_start, phys_addr >> PAGE_SHIFT, vma->vm_end - vma->vm_start, vma->vm_page_prot)) {
423 edbg("SEP Driver remap_page_range failed\n");
424 printk(KERN_WARNING "SEP Driver remap_page_range failed\n");
428 dbg("SEP Driver:<-------- mmap end\n");
434 /*-----------------------------------------------
436 *----------------------------------------------*/
437 static unsigned int sep_poll(struct file *filp, poll_table * wait)
440 unsigned int mask = 0;
441 unsigned long retVal = 0; /* flow id */
443 dbg("---------->SEP Driver poll: start\n");
446 #if SEP_DRIVER_POLLING_MODE
448 while (sep_dev->host_to_sep_send_counter != (retVal & 0x7FFFFFFF)) {
449 retVal = sep_read_reg(sep_dev, HW_HOST_SEP_HOST_GPR2_REG_ADDR);
451 for (count = 0; count < 10 * 4; count += 4)
452 edbg("Poll Debug Word %lu of the message is %lu\n", count, *((unsigned long *) (sep_dev->shared_area_addr + SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES + count)));
455 sep_dev->sep_to_host_reply_counter++;
457 /* add the event to the polling wait table */
458 poll_wait(filp, &g_sep_event, wait);
462 edbg("sep_dev->host_to_sep_send_counter is %lu\n", sep_dev->host_to_sep_send_counter);
463 edbg("sep_dev->sep_to_host_reply_counter is %lu\n", sep_dev->sep_to_host_reply_counter);
465 /* check if the data is ready */
466 if (sep_dev->host_to_sep_send_counter == sep_dev->sep_to_host_reply_counter) {
467 for (count = 0; count < 12 * 4; count += 4)
468 edbg("Sep Mesg Word %lu of the message is %lu\n", count, *((unsigned long *) (sep_dev->shared_area_addr + count)));
470 for (count = 0; count < 10 * 4; count += 4)
471 edbg("Debug Data Word %lu of the message is %lu\n", count, *((unsigned long *) (sep_dev->shared_area_addr + 0x1800 + count)));
473 retVal = sep_read_reg(sep_dev, HW_HOST_SEP_HOST_GPR2_REG_ADDR);
474 edbg("retVal is %lu\n", retVal);
475 /* check if the this is sep reply or request */
477 edbg("SEP Driver: sep request in\n");
479 mask |= POLLOUT | POLLWRNORM;
481 edbg("SEP Driver: sep reply in\n");
482 mask |= POLLIN | POLLRDNORM;
485 dbg("SEP Driver:<-------- poll exit\n");
490 static int sep_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg)
494 dbg("------------>SEP Driver: ioctl start\n");
496 edbg("SEP Driver: cmd is %x\n", cmd);
498 /* check that the command is for sep device */
499 if (_IOC_TYPE(cmd) != SEP_IOC_MAGIC_NUMBER)
503 case SEP_IOCSENDSEPCOMMAND:
504 /* send command to SEP */
505 sep_send_command_handler();
506 edbg("SEP Driver: after sep_send_command_handler\n");
508 case SEP_IOCSENDSEPRPLYCOMMAND:
509 /* send reply command to SEP */
510 sep_send_reply_command_handler();
512 case SEP_IOCALLOCDATAPOLL:
513 /* allocate data pool */
514 error = sep_allocate_data_pool_memory_handler(arg);
516 case SEP_IOCWRITEDATAPOLL:
517 /* write data into memory pool */
518 error = sep_write_into_data_pool_handler(arg);
520 case SEP_IOCREADDATAPOLL:
521 /* read data from data pool into application memory */
522 error = sep_read_from_data_pool_handler(arg);
524 case SEP_IOCCREATESYMDMATABLE:
525 /* create dma table for synhronic operation */
526 error = sep_create_sync_dma_tables_handler(arg);
528 case SEP_IOCCREATEFLOWDMATABLE:
529 /* create flow dma tables */
530 error = sep_create_flow_dma_tables_handler(arg);
532 case SEP_IOCFREEDMATABLEDATA:
534 error = sep_free_dma_table_data_handler();
536 case SEP_IOCSETFLOWID:
538 error = sep_set_flow_id_handler(arg);
540 case SEP_IOCADDFLOWTABLE:
541 /* add tables to the dynamic flow */
542 error = sep_add_flow_tables_handler(arg);
544 case SEP_IOCADDFLOWMESSAGE:
545 /* add message of add tables to flow */
546 error = sep_add_flow_tables_message_handler(arg);
548 case SEP_IOCSEPSTART:
549 /* start command to sep */
550 error = sep_start_handler();
553 /* init command to sep */
554 error = sep_init_handler(arg);
556 case SEP_IOCSETAPIMODE:
557 /* set non- blocking mode */
558 error = sep_set_api_mode_handler(arg);
560 case SEP_IOCGETSTATICPOOLADDR:
561 /* get the physical and virtual addresses of the static pool */
562 error = sep_get_static_pool_addr_handler(arg);
564 case SEP_IOCENDTRANSACTION:
565 error = sep_end_transaction_handler(arg);
567 case SEP_IOCREALLOCCACHERES:
568 error = sep_realloc_cache_resident_handler(arg);
570 case SEP_IOCGETMAPPEDADDROFFSET:
571 error = sep_get_physical_mapped_offset_handler(arg);
574 error = sep_get_time_handler(arg);
580 dbg("SEP Driver:<-------- ioctl end\n");
587 interrupt handler function
589 irqreturn_t sep_inthandler(int irq, void *dev_id)
591 irqreturn_t int_error;
593 unsigned long reg_val;
594 unsigned long flow_id;
595 struct sep_flow_context_t *flow_context_ptr;
597 int_error = IRQ_HANDLED;
599 /* read the IRR register to check if this is SEP interrupt */
600 reg_val = sep_read_reg(sep_dev, HW_HOST_IRR_REG_ADDR);
601 edbg("SEP Interrupt - reg is %08lx\n", reg_val);
603 /* check if this is the flow interrupt */
604 if (0 /*reg_val & (0x1 << 11) */ ) {
605 /* read GPRO to find out the which flow is done */
606 flow_id = sep_read_reg(sep_dev, HW_HOST_IRR_REG_ADDR);
608 /* find the contex of the flow */
609 error = sep_find_flow_context(flow_id >> 28, &flow_context_ptr);
611 goto end_function_with_error;
613 INIT_WORK(&flow_context_ptr->flow_wq, sep_flow_done_handler);
616 queue_work(sep_dev->flow_wq_ptr, &flow_context_ptr->flow_wq);
619 /* check if this is reply interrupt from SEP */
620 if (reg_val & (0x1 << 13)) {
621 /* update the counter of reply messages */
622 sep_dev->sep_to_host_reply_counter++;
624 /* wake up the waiting process */
625 wake_up(&g_sep_event);
627 int_error = IRQ_NONE;
631 end_function_with_error:
632 /* clear the interrupt */
633 sep_write_reg(sep_dev, HW_HOST_ICR_REG_ADDR, reg_val);
640 This function prepares only input DMA table for synhronic symmetric
643 int sep_prepare_input_dma_table(unsigned long app_virt_addr, unsigned long data_size, unsigned long block_size, unsigned long *lli_table_ptr, unsigned long *num_entries_ptr, unsigned long *table_data_size_ptr, bool isKernelVirtualAddress)
645 /* pointer to the info entry of the table - the last entry */
646 struct sep_lli_entry_t *info_entry_ptr;
647 /* array of pointers ot page */
648 struct sep_lli_entry_t *lli_array_ptr;
649 /* points to the first entry to be processed in the lli_in_array */
650 unsigned long current_entry;
651 /* num entries in the virtual buffer */
652 unsigned long sep_lli_entries;
653 /* lli table pointer */
654 struct sep_lli_entry_t *in_lli_table_ptr;
655 /* the total data in one table */
656 unsigned long table_data_size;
657 /* number of entries in lli table */
658 unsigned long num_entries_in_table;
659 /* next table address */
660 unsigned long lli_table_alloc_addr;
661 unsigned long result;
663 dbg("SEP Driver:--------> sep_prepare_input_dma_table start\n");
665 edbg("SEP Driver:data_size is %lu\n", data_size);
666 edbg("SEP Driver:block_size is %lu\n", block_size);
668 /* initialize the pages pointers */
669 sep_dev->in_page_array = 0;
670 sep_dev->in_num_pages = 0;
672 if (data_size == 0) {
673 /* special case - created 2 entries table with zero data */
674 in_lli_table_ptr = (struct sep_lli_entry_t *) (sep_dev->shared_area_addr + SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_OFFSET_IN_BYTES);
675 in_lli_table_ptr->physical_address = sep_dev->shared_area_addr + SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_OFFSET_IN_BYTES;
676 in_lli_table_ptr->block_size = 0;
679 in_lli_table_ptr->physical_address = 0xFFFFFFFF;
680 in_lli_table_ptr->block_size = 0;
682 *lli_table_ptr = sep_dev->phys_shared_area_addr + SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_OFFSET_IN_BYTES;
683 *num_entries_ptr = 2;
684 *table_data_size_ptr = 0;
689 /* check if the pages are in Kernel Virtual Address layout */
690 if (isKernelVirtualAddress == true)
691 /* lock the pages of the kernel buffer and translate them to pages */
692 result = sep_lock_kernel_pages(app_virt_addr, data_size, &sep_dev->in_num_pages, &lli_array_ptr, &sep_dev->in_page_array);
694 /* lock the pages of the user buffer and translate them to pages */
695 result = sep_lock_user_pages(app_virt_addr, data_size, &sep_dev->in_num_pages, &lli_array_ptr, &sep_dev->in_page_array);
700 edbg("SEP Driver:output sep_dev->in_num_pages is %lu\n", sep_dev->in_num_pages);
704 sep_lli_entries = sep_dev->in_num_pages;
706 /* initiate to point after the message area */
707 lli_table_alloc_addr = sep_dev->shared_area_addr + SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_OFFSET_IN_BYTES;
709 /* loop till all the entries in in array are not processed */
710 while (current_entry < sep_lli_entries) {
711 /* set the new input and output tables */
712 in_lli_table_ptr = (struct sep_lli_entry_t *) lli_table_alloc_addr;
714 lli_table_alloc_addr += sizeof(struct sep_lli_entry_t) * SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
716 /* calculate the maximum size of data for input table */
717 table_data_size = sep_calculate_lli_table_max_size(&lli_array_ptr[current_entry], (sep_lli_entries - current_entry));
719 /* now calculate the table size so that it will be module block size */
720 table_data_size = (table_data_size / block_size) * block_size;
722 edbg("SEP Driver:output table_data_size is %lu\n", table_data_size);
724 /* construct input lli table */
725 sep_build_lli_table(&lli_array_ptr[current_entry], in_lli_table_ptr, ¤t_entry, &num_entries_in_table, table_data_size);
727 if (info_entry_ptr == 0) {
728 /* set the output parameters to physical addresses */
729 *lli_table_ptr = sep_shared_area_virt_to_phys((unsigned long) in_lli_table_ptr);
730 *num_entries_ptr = num_entries_in_table;
731 *table_data_size_ptr = table_data_size;
733 edbg("SEP Driver:output lli_table_in_ptr is %08lx\n", *lli_table_ptr);
735 /* update the info entry of the previous in table */
736 info_entry_ptr->physical_address = sep_shared_area_virt_to_phys((unsigned long) in_lli_table_ptr);
737 info_entry_ptr->block_size = ((num_entries_in_table) << 24) | (table_data_size);
740 /* save the pointer to the info entry of the current tables */
741 info_entry_ptr = in_lli_table_ptr + num_entries_in_table - 1;
744 /* print input tables */
745 sep_debug_print_lli_tables((struct sep_lli_entry_t *)
746 sep_shared_area_phys_to_virt(*lli_table_ptr), *num_entries_ptr, *table_data_size_ptr);
748 /* the array of the pages */
749 kfree(lli_array_ptr);
751 dbg("SEP Driver:<-------- sep_prepare_input_dma_table end\n");
757 This function builds input and output DMA tables for synhronic
758 symmetric operations (AES, DES). It also checks that each table
759 is of the modular block size
761 int sep_prepare_input_output_dma_table(unsigned long app_virt_in_addr,
762 unsigned long app_virt_out_addr,
763 unsigned long data_size,
764 unsigned long block_size,
765 unsigned long *lli_table_in_ptr, unsigned long *lli_table_out_ptr, unsigned long *in_num_entries_ptr, unsigned long *out_num_entries_ptr, unsigned long *table_data_size_ptr, bool isKernelVirtualAddress)
767 /* array of pointers of page */
768 struct sep_lli_entry_t *lli_in_array;
769 /* array of pointers of page */
770 struct sep_lli_entry_t *lli_out_array;
773 dbg("SEP Driver:--------> sep_prepare_input_output_dma_table start\n");
775 /* initialize the pages pointers */
776 sep_dev->in_page_array = 0;
777 sep_dev->out_page_array = 0;
779 /* check if the pages are in Kernel Virtual Address layout */
780 if (isKernelVirtualAddress == true) {
781 /* lock the pages of the kernel buffer and translate them to pages */
782 result = sep_lock_kernel_pages(app_virt_in_addr, data_size, &sep_dev->in_num_pages, &lli_in_array, &sep_dev->in_page_array);
784 edbg("SEP Driver: sep_lock_kernel_pages for input virtual buffer failed\n");
788 /* lock the pages of the user buffer and translate them to pages */
789 result = sep_lock_user_pages(app_virt_in_addr, data_size, &sep_dev->in_num_pages, &lli_in_array, &sep_dev->in_page_array);
791 edbg("SEP Driver: sep_lock_user_pages for input virtual buffer failed\n");
796 if (isKernelVirtualAddress == true) {
797 result = sep_lock_kernel_pages(app_virt_out_addr, data_size, &sep_dev->out_num_pages, &lli_out_array, &sep_dev->out_page_array);
799 edbg("SEP Driver: sep_lock_kernel_pages for output virtual buffer failed\n");
800 goto end_function_with_error1;
803 result = sep_lock_user_pages(app_virt_out_addr, data_size, &sep_dev->out_num_pages, &lli_out_array, &sep_dev->out_page_array);
805 edbg("SEP Driver: sep_lock_user_pages for output virtual buffer failed\n");
806 goto end_function_with_error1;
809 edbg("sep_dev->in_num_pages is %lu\n", sep_dev->in_num_pages);
810 edbg("sep_dev->out_num_pages is %lu\n", sep_dev->out_num_pages);
811 edbg("SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP is %x\n", SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP);
814 /* call the fucntion that creates table from the lli arrays */
815 result = sep_construct_dma_tables_from_lli(lli_in_array, sep_dev->in_num_pages, lli_out_array, sep_dev->out_num_pages, block_size, lli_table_in_ptr, lli_table_out_ptr, in_num_entries_ptr, out_num_entries_ptr, table_data_size_ptr);
817 edbg("SEP Driver: sep_construct_dma_tables_from_lli failed\n");
818 goto end_function_with_error2;
821 /* fall through - free the lli entry arrays */
822 dbg("in_num_entries_ptr is %08lx\n", *in_num_entries_ptr);
823 dbg("out_num_entries_ptr is %08lx\n", *out_num_entries_ptr);
824 dbg("table_data_size_ptr is %08lx\n", *table_data_size_ptr);
825 end_function_with_error2:
826 kfree(lli_out_array);
827 end_function_with_error1:
830 dbg("SEP Driver:<-------- sep_prepare_input_output_dma_table end result = %d\n", (int) result);
837 This function creates the input and output dma tables for
838 symmetric operations (AES/DES) according to the block size from LLI arays
840 int sep_construct_dma_tables_from_lli(struct sep_lli_entry_t *lli_in_array,
841 unsigned long sep_in_lli_entries,
842 struct sep_lli_entry_t *lli_out_array,
843 unsigned long sep_out_lli_entries,
844 unsigned long block_size, unsigned long *lli_table_in_ptr, unsigned long *lli_table_out_ptr, unsigned long *in_num_entries_ptr, unsigned long *out_num_entries_ptr, unsigned long *table_data_size_ptr)
846 /* points to the area where next lli table can be allocated */
847 unsigned long lli_table_alloc_addr;
848 /* input lli table */
849 struct sep_lli_entry_t *in_lli_table_ptr;
850 /* output lli table */
851 struct sep_lli_entry_t *out_lli_table_ptr;
852 /* pointer to the info entry of the table - the last entry */
853 struct sep_lli_entry_t *info_in_entry_ptr;
854 /* pointer to the info entry of the table - the last entry */
855 struct sep_lli_entry_t *info_out_entry_ptr;
856 /* points to the first entry to be processed in the lli_in_array */
857 unsigned long current_in_entry;
858 /* points to the first entry to be processed in the lli_out_array */
859 unsigned long current_out_entry;
860 /* max size of the input table */
861 unsigned long in_table_data_size;
862 /* max size of the output table */
863 unsigned long out_table_data_size;
864 /* flag te signifies if this is the first tables build from the arrays */
865 unsigned long first_table_flag;
866 /* the data size that should be in table */
867 unsigned long table_data_size;
868 /* number of etnries in the input table */
869 unsigned long num_entries_in_table;
870 /* number of etnries in the output table */
871 unsigned long num_entries_out_table;
873 dbg("SEP Driver:--------> sep_construct_dma_tables_from_lli start\n");
875 /* initiate to pint after the message area */
876 lli_table_alloc_addr = sep_dev->shared_area_addr + SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_OFFSET_IN_BYTES;
878 current_in_entry = 0;
879 current_out_entry = 0;
880 first_table_flag = 1;
881 info_in_entry_ptr = 0;
882 info_out_entry_ptr = 0;
884 /* loop till all the entries in in array are not processed */
885 while (current_in_entry < sep_in_lli_entries) {
886 /* set the new input and output tables */
887 in_lli_table_ptr = (struct sep_lli_entry_t *) lli_table_alloc_addr;
889 lli_table_alloc_addr += sizeof(struct sep_lli_entry_t) * SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
891 /* set the first output tables */
892 out_lli_table_ptr = (struct sep_lli_entry_t *) lli_table_alloc_addr;
894 lli_table_alloc_addr += sizeof(struct sep_lli_entry_t) * SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
896 /* calculate the maximum size of data for input table */
897 in_table_data_size = sep_calculate_lli_table_max_size(&lli_in_array[current_in_entry], (sep_in_lli_entries - current_in_entry));
899 /* calculate the maximum size of data for output table */
900 out_table_data_size = sep_calculate_lli_table_max_size(&lli_out_array[current_out_entry], (sep_out_lli_entries - current_out_entry));
902 edbg("SEP Driver:in_table_data_size is %lu\n", in_table_data_size);
903 edbg("SEP Driver:out_table_data_size is %lu\n", out_table_data_size);
905 /* check where the data is smallest */
906 table_data_size = in_table_data_size;
907 if (table_data_size > out_table_data_size)
908 table_data_size = out_table_data_size;
910 /* now calculate the table size so that it will be module block size */
911 table_data_size = (table_data_size / block_size) * block_size;
913 dbg("SEP Driver:table_data_size is %lu\n", table_data_size);
915 /* construct input lli table */
916 sep_build_lli_table(&lli_in_array[current_in_entry], in_lli_table_ptr, ¤t_in_entry, &num_entries_in_table, table_data_size);
918 /* construct output lli table */
919 sep_build_lli_table(&lli_out_array[current_out_entry], out_lli_table_ptr, ¤t_out_entry, &num_entries_out_table, table_data_size);
921 /* if info entry is null - this is the first table built */
922 if (info_in_entry_ptr == 0) {
923 /* set the output parameters to physical addresses */
924 *lli_table_in_ptr = sep_shared_area_virt_to_phys((unsigned long) in_lli_table_ptr);
925 *in_num_entries_ptr = num_entries_in_table;
926 *lli_table_out_ptr = sep_shared_area_virt_to_phys((unsigned long) out_lli_table_ptr);
927 *out_num_entries_ptr = num_entries_out_table;
928 *table_data_size_ptr = table_data_size;
930 edbg("SEP Driver:output lli_table_in_ptr is %08lx\n", *lli_table_in_ptr);
931 edbg("SEP Driver:output lli_table_out_ptr is %08lx\n", *lli_table_out_ptr);
933 /* update the info entry of the previous in table */
934 info_in_entry_ptr->physical_address = sep_shared_area_virt_to_phys((unsigned long) in_lli_table_ptr);
935 info_in_entry_ptr->block_size = ((num_entries_in_table) << 24) | (table_data_size);
937 /* update the info entry of the previous in table */
938 info_out_entry_ptr->physical_address = sep_shared_area_virt_to_phys((unsigned long) out_lli_table_ptr);
939 info_out_entry_ptr->block_size = ((num_entries_out_table) << 24) | (table_data_size);
942 /* save the pointer to the info entry of the current tables */
943 info_in_entry_ptr = in_lli_table_ptr + num_entries_in_table - 1;
944 info_out_entry_ptr = out_lli_table_ptr + num_entries_out_table - 1;
946 edbg("SEP Driver:output num_entries_out_table is %lu\n", (unsigned long) num_entries_out_table);
947 edbg("SEP Driver:output info_in_entry_ptr is %lu\n", (unsigned long) info_in_entry_ptr);
948 edbg("SEP Driver:output info_out_entry_ptr is %lu\n", (unsigned long) info_out_entry_ptr);
951 /* print input tables */
952 sep_debug_print_lli_tables((struct sep_lli_entry_t *)
953 sep_shared_area_phys_to_virt(*lli_table_in_ptr), *in_num_entries_ptr, *table_data_size_ptr);
954 /* print output tables */
955 sep_debug_print_lli_tables((struct sep_lli_entry_t *)
956 sep_shared_area_phys_to_virt(*lli_table_out_ptr), *out_num_entries_ptr, *table_data_size_ptr);
957 dbg("SEP Driver:<-------- sep_construct_dma_tables_from_lli end\n");
962 this function calculates the size of data that can be inserted into the lli
963 table from this array the condition is that either the table is full
964 (all etnries are entered), or there are no more entries in the lli array
966 unsigned long sep_calculate_lli_table_max_size(struct sep_lli_entry_t *lli_in_array_ptr, unsigned long num_array_entries)
968 unsigned long table_data_size = 0;
969 unsigned long counter;
971 /* calculate the data in the out lli table if till we fill the whole
972 table or till the data has ended */
973 for (counter = 0; (counter < (SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP - 1)) && (counter < num_array_entries); counter++)
974 table_data_size += lli_in_array_ptr[counter].block_size;
975 return table_data_size;
979 this functions builds ont lli table from the lli_array according to
980 the given size of data
982 static void sep_build_lli_table(struct sep_lli_entry_t *lli_array_ptr, struct sep_lli_entry_t *lli_table_ptr, unsigned long *num_processed_entries_ptr, unsigned long *num_table_entries_ptr, unsigned long table_data_size)
984 unsigned long curr_table_data_size;
985 /* counter of lli array entry */
986 unsigned long array_counter;
988 dbg("SEP Driver:--------> sep_build_lli_table start\n");
990 /* init currrent table data size and lli array entry counter */
991 curr_table_data_size = 0;
993 *num_table_entries_ptr = 1;
995 edbg("SEP Driver:table_data_size is %lu\n", table_data_size);
997 /* fill the table till table size reaches the needed amount */
998 while (curr_table_data_size < table_data_size) {
999 /* update the number of entries in table */
1000 (*num_table_entries_ptr)++;
1002 lli_table_ptr->physical_address = lli_array_ptr[array_counter].physical_address;
1003 lli_table_ptr->block_size = lli_array_ptr[array_counter].block_size;
1004 curr_table_data_size += lli_table_ptr->block_size;
1006 edbg("SEP Driver:lli_table_ptr is %08lx\n", (unsigned long) lli_table_ptr);
1007 edbg("SEP Driver:lli_table_ptr->physical_address is %08lx\n", lli_table_ptr->physical_address);
1008 edbg("SEP Driver:lli_table_ptr->block_size is %lu\n", lli_table_ptr->block_size);
1010 /* check for overflow of the table data */
1011 if (curr_table_data_size > table_data_size) {
1012 edbg("SEP Driver:curr_table_data_size > table_data_size\n");
1014 /* update the size of block in the table */
1015 lli_table_ptr->block_size -= (curr_table_data_size - table_data_size);
1017 /* update the physical address in the lli array */
1018 lli_array_ptr[array_counter].physical_address += lli_table_ptr->block_size;
1020 /* update the block size left in the lli array */
1021 lli_array_ptr[array_counter].block_size = (curr_table_data_size - table_data_size);
1023 /* advance to the next entry in the lli_array */
1026 edbg("SEP Driver:lli_table_ptr->physical_address is %08lx\n", lli_table_ptr->physical_address);
1027 edbg("SEP Driver:lli_table_ptr->block_size is %lu\n", lli_table_ptr->block_size);
1029 /* move to the next entry in table */
1033 /* set the info entry to default */
1034 lli_table_ptr->physical_address = 0xffffffff;
1035 lli_table_ptr->block_size = 0;
1037 edbg("SEP Driver:lli_table_ptr is %08lx\n", (unsigned long) lli_table_ptr);
1038 edbg("SEP Driver:lli_table_ptr->physical_address is %08lx\n", lli_table_ptr->physical_address);
1039 edbg("SEP Driver:lli_table_ptr->block_size is %lu\n", lli_table_ptr->block_size);
1041 /* set the output parameter */
1042 *num_processed_entries_ptr += array_counter;
1044 edbg("SEP Driver:*num_processed_entries_ptr is %lu\n", *num_processed_entries_ptr);
1045 dbg("SEP Driver:<-------- sep_build_lli_table end\n");
1050 this function goes over the list of the print created tables and
1053 static void sep_debug_print_lli_tables(struct sep_lli_entry_t *lli_table_ptr, unsigned long num_table_entries, unsigned long table_data_size)
1055 unsigned long table_count;
1056 unsigned long entries_count;
1058 dbg("SEP Driver:--------> sep_debug_print_lli_tables start\n");
1061 while ((unsigned long) lli_table_ptr != 0xffffffff) {
1062 edbg("SEP Driver: lli table %08lx, table_data_size is %lu\n", table_count, table_data_size);
1063 edbg("SEP Driver: num_table_entries is %lu\n", num_table_entries);
1065 /* print entries of the table (without info entry) */
1066 for (entries_count = 0; entries_count < num_table_entries; entries_count++, lli_table_ptr++) {
1067 edbg("SEP Driver:lli_table_ptr address is %08lx\n", (unsigned long) lli_table_ptr);
1068 edbg("SEP Driver:phys address is %08lx block size is %lu\n", lli_table_ptr->physical_address, lli_table_ptr->block_size);
1071 /* point to the info entry */
1074 edbg("SEP Driver:phys lli_table_ptr->block_size is %lu\n", lli_table_ptr->block_size);
1075 edbg("SEP Driver:phys lli_table_ptr->physical_address is %08lx\n", lli_table_ptr->physical_address);
1078 table_data_size = lli_table_ptr->block_size & 0xffffff;
1079 num_table_entries = (lli_table_ptr->block_size >> 24) & 0xff;
1080 lli_table_ptr = (struct sep_lli_entry_t *)
1081 (lli_table_ptr->physical_address);
1083 edbg("SEP Driver:phys table_data_size is %lu num_table_entries is %lu lli_table_ptr is%lu\n", table_data_size, num_table_entries, (unsigned long) lli_table_ptr);
1085 if ((unsigned long) lli_table_ptr != 0xffffffff)
1086 lli_table_ptr = (struct sep_lli_entry_t *) sep_shared_area_phys_to_virt((unsigned long) lli_table_ptr);
1090 dbg("SEP Driver:<-------- sep_debug_print_lli_tables end\n");
1095 This function locks all the physical pages of the application virtual buffer
1096 and construct a basic lli array, where each entry holds the physical page
1097 address and the size that application data holds in this physical pages
1099 int sep_lock_user_pages(unsigned long app_virt_addr, unsigned long data_size, unsigned long *num_pages_ptr, struct sep_lli_entry_t **lli_array_ptr, struct page ***page_array_ptr)
1102 /* the the page of the end address of the user space buffer */
1103 unsigned long end_page;
1104 /* the page of the start address of the user space buffer */
1105 unsigned long start_page;
1106 /* the range in pages */
1107 unsigned long num_pages;
1108 struct page **page_array;
1109 struct sep_lli_entry_t *lli_array;
1110 unsigned long count;
1113 dbg("SEP Driver:--------> sep_lock_user_pages start\n");
1115 /* set start and end pages and num pages */
1116 end_page = (app_virt_addr + data_size - 1) >> PAGE_SHIFT;
1117 start_page = app_virt_addr >> PAGE_SHIFT;
1118 num_pages = end_page - start_page + 1;
1120 edbg("SEP Driver: app_virt_addr is %08lx\n", app_virt_addr);
1121 edbg("SEP Driver: data_size is %lu\n", data_size);
1122 edbg("SEP Driver: start_page is %lu\n", start_page);
1123 edbg("SEP Driver: end_page is %lu\n", end_page);
1124 edbg("SEP Driver: num_pages is %lu\n", num_pages);
1126 /* allocate array of pages structure pointers */
1127 page_array = kmalloc(sizeof(struct page *) * num_pages, GFP_ATOMIC);
1129 edbg("SEP Driver: kmalloc for page_array failed\n");
1135 lli_array = kmalloc(sizeof(struct sep_lli_entry_t) * num_pages, GFP_ATOMIC);
1137 edbg("SEP Driver: kmalloc for lli_array failed\n");
1140 goto end_function_with_error1;
1143 /* convert the application virtual address into a set of physical */
1144 down_read(¤t->mm->mmap_sem);
1145 result = get_user_pages(current, current->mm, app_virt_addr, num_pages, 1, 0, page_array, 0);
1146 up_read(¤t->mm->mmap_sem);
1148 /* check the number of pages locked - if not all then exit with error */
1149 if (result != num_pages) {
1150 dbg("SEP Driver: not all pages locked by get_user_pages\n");
1153 goto end_function_with_error2;
1156 /* flush the cache */
1157 for (count = 0; count < num_pages; count++)
1158 flush_dcache_page(page_array[count]);
1160 /* set the start address of the first page - app data may start not at
1161 the beginning of the page */
1162 lli_array[0].physical_address = ((unsigned long) page_to_phys(page_array[0])) + (app_virt_addr & (~PAGE_MASK));
1164 /* check that not all the data is in the first page only */
1165 if ((PAGE_SIZE - (app_virt_addr & (~PAGE_MASK))) >= data_size)
1166 lli_array[0].block_size = data_size;
1168 lli_array[0].block_size = PAGE_SIZE - (app_virt_addr & (~PAGE_MASK));
1171 dbg("lli_array[0].physical_address is %08lx, lli_array[0].block_size is %lu\n", lli_array[0].physical_address, lli_array[0].block_size);
1173 /* go from the second page to the prev before last */
1174 for (count = 1; count < (num_pages - 1); count++) {
1175 lli_array[count].physical_address = (unsigned long) page_to_phys(page_array[count]);
1176 lli_array[count].block_size = PAGE_SIZE;
1178 edbg("lli_array[%lu].physical_address is %08lx, lli_array[%lu].block_size is %lu\n", count, lli_array[count].physical_address, count, lli_array[count].block_size);
1181 /* if more then 1 pages locked - then update for the last page size needed */
1182 if (num_pages > 1) {
1183 /* update the address of the last page */
1184 lli_array[count].physical_address = (unsigned long) page_to_phys(page_array[count]);
1186 /* set the size of the last page */
1187 lli_array[count].block_size = (app_virt_addr + data_size) & (~PAGE_MASK);
1189 if (lli_array[count].block_size == 0) {
1190 dbg("app_virt_addr is %08lx\n", app_virt_addr);
1191 dbg("data_size is %lu\n", data_size);
1194 edbg("lli_array[%lu].physical_address is %08lx, \
1195 lli_array[%lu].block_size is %lu\n", count, lli_array[count].physical_address, count, lli_array[count].block_size);
1198 /* set output params */
1199 *lli_array_ptr = lli_array;
1200 *num_pages_ptr = num_pages;
1201 *page_array_ptr = page_array;
1204 end_function_with_error2:
1205 /* release the cache */
1206 for (count = 0; count < num_pages; count++)
1207 page_cache_release(page_array[count]);
1209 end_function_with_error1:
1212 dbg("SEP Driver:<-------- sep_lock_user_pages end\n");
1217 This function locks all the physical pages of the kernel virtual buffer
1218 and construct a basic lli array, where each entry holds the physical
1219 page address and the size that application data holds in this physical pages
1221 int sep_lock_kernel_pages(unsigned long kernel_virt_addr, unsigned long data_size, unsigned long *num_pages_ptr, struct sep_lli_entry_t **lli_array_ptr, struct page ***page_array_ptr)
1224 /* the the page of the end address of the user space buffer */
1225 unsigned long end_page;
1226 /* the page of the start address of the user space buffer */
1227 unsigned long start_page;
1228 /* the range in pages */
1229 unsigned long num_pages;
1230 struct sep_lli_entry_t *lli_array;
1231 /* next kernel address to map */
1232 unsigned long next_kernel_address;
1233 unsigned long count;
1235 dbg("SEP Driver:--------> sep_lock_kernel_pages start\n");
1237 /* set start and end pages and num pages */
1238 end_page = (kernel_virt_addr + data_size - 1) >> PAGE_SHIFT;
1239 start_page = kernel_virt_addr >> PAGE_SHIFT;
1240 num_pages = end_page - start_page + 1;
1242 edbg("SEP Driver: kernel_virt_addr is %08lx\n", kernel_virt_addr);
1243 edbg("SEP Driver: data_size is %lu\n", data_size);
1244 edbg("SEP Driver: start_page is %lx\n", start_page);
1245 edbg("SEP Driver: end_page is %lx\n", end_page);
1246 edbg("SEP Driver: num_pages is %lu\n", num_pages);
1248 lli_array = kmalloc(sizeof(struct sep_lli_entry_t) * num_pages, GFP_ATOMIC);
1250 edbg("SEP Driver: kmalloc for lli_array failed\n");
1255 /* set the start address of the first page - app data may start not at
1256 the beginning of the page */
1257 lli_array[0].physical_address = (unsigned long) virt_to_phys((unsigned long *) kernel_virt_addr);
1259 /* check that not all the data is in the first page only */
1260 if ((PAGE_SIZE - (kernel_virt_addr & (~PAGE_MASK))) >= data_size)
1261 lli_array[0].block_size = data_size;
1263 lli_array[0].block_size = PAGE_SIZE - (kernel_virt_addr & (~PAGE_MASK));
1266 dbg("lli_array[0].physical_address is %08lx, lli_array[0].block_size is %lu\n", lli_array[0].physical_address, lli_array[0].block_size);
1268 /* advance the address to the start of the next page */
1269 next_kernel_address = (kernel_virt_addr & PAGE_MASK) + PAGE_SIZE;
1271 /* go from the second page to the prev before last */
1272 for (count = 1; count < (num_pages - 1); count++) {
1273 lli_array[count].physical_address = (unsigned long) virt_to_phys((unsigned long *) next_kernel_address);
1274 lli_array[count].block_size = PAGE_SIZE;
1276 edbg("lli_array[%lu].physical_address is %08lx, lli_array[%lu].block_size is %lu\n", count, lli_array[count].physical_address, count, lli_array[count].block_size);
1277 next_kernel_address += PAGE_SIZE;
1280 /* if more then 1 pages locked - then update for the last page size needed */
1281 if (num_pages > 1) {
1282 /* update the address of the last page */
1283 lli_array[count].physical_address = (unsigned long) virt_to_phys((unsigned long *) next_kernel_address);
1285 /* set the size of the last page */
1286 lli_array[count].block_size = (kernel_virt_addr + data_size) & (~PAGE_MASK);
1288 if (lli_array[count].block_size == 0) {
1289 dbg("app_virt_addr is %08lx\n", kernel_virt_addr);
1290 dbg("data_size is %lu\n", data_size);
1294 edbg("lli_array[%lu].physical_address is %08lx, lli_array[%lu].block_size is %lu\n", count, lli_array[count].physical_address, count, lli_array[count].block_size);
1296 /* set output params */
1297 *lli_array_ptr = lli_array;
1298 *num_pages_ptr = num_pages;
1299 *page_array_ptr = 0;
1301 dbg("SEP Driver:<-------- sep_lock_kernel_pages end\n");
1306 This function releases all the application virtual buffer physical pages,
1307 that were previously locked
1309 int sep_free_dma_pages(struct page **page_array_ptr, unsigned long num_pages, unsigned long dirtyFlag)
1311 unsigned long count;
1314 for (count = 0; count < num_pages; count++) {
1315 /* the out array was written, therefore the data was changed */
1316 if (!PageReserved(page_array_ptr[count]))
1317 SetPageDirty(page_array_ptr[count]);
1318 page_cache_release(page_array_ptr[count]);
1321 /* free in pages - the data was only read, therefore no update was done
1323 for (count = 0; count < num_pages; count++)
1324 page_cache_release(page_array_ptr[count]);
1328 /* free the array */
1329 kfree(page_array_ptr);
1335 This function raises interrupt to SEP that signals that is has a new
1338 static void sep_send_command_handler()
1340 unsigned long count;
1342 dbg("SEP Driver:--------> sep_send_command_handler start\n");
1348 for (count = 0; count < 12 * 4; count += 4)
1349 edbg("Word %lu of the message is %lu\n", count, *((unsigned long *) (sep_dev->shared_area_addr + count)));
1351 /* update counter */
1352 sep_dev->host_to_sep_send_counter++;
1353 /* send interrupt to SEP */
1354 sep_write_reg(sep_dev, HW_HOST_HOST_SEP_GPR0_REG_ADDR, 0x2);
1355 dbg("SEP Driver:<-------- sep_send_command_handler end\n");
1360 This function raises interrupt to SEPm that signals that is has a
1361 new command from HOST
1363 static void sep_send_reply_command_handler()
1365 unsigned long count;
1367 dbg("SEP Driver:--------> sep_send_reply_command_handler start\n");
1371 for (count = 0; count < 12 * 4; count += 4)
1372 edbg("Word %lu of the message is %lu\n", count, *((unsigned long *) (sep_dev->shared_area_addr + count)));
1373 /* update counter */
1374 sep_dev->host_to_sep_send_counter++;
1375 /* send the interrupt to SEP */
1376 sep_write_reg(sep_dev, HW_HOST_HOST_SEP_GPR2_REG_ADDR, sep_dev->host_to_sep_send_counter);
1377 /* update both counters */
1378 sep_dev->host_to_sep_send_counter++;
1379 sep_dev->sep_to_host_reply_counter++;
1380 dbg("SEP Driver:<-------- sep_send_reply_command_handler end\n");
1386 This function handles the allocate data pool memory request
1387 This function returns calculates the physical address of the
1388 allocated memory, and the offset of this area from the mapped address.
1389 Therefore, the FVOs in user space can calculate the exact virtual
1390 address of this allocated memory
1392 static int sep_allocate_data_pool_memory_handler(unsigned long arg)
1395 struct sep_driver_alloc_t command_args;
1397 dbg("SEP Driver:--------> sep_allocate_data_pool_memory_handler start\n");
1399 error = copy_from_user(&command_args, (void *) arg, sizeof(struct sep_driver_alloc_t));
1403 /* allocate memory */
1404 if ((sep_dev->data_pool_bytes_allocated + command_args.num_bytes) > SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES) {
1409 /* set the virtual and physical address */
1410 command_args.offset = SEP_DRIVER_DATA_POOL_AREA_OFFSET_IN_BYTES + sep_dev->data_pool_bytes_allocated;
1411 command_args.phys_address = sep_dev->phys_shared_area_addr + SEP_DRIVER_DATA_POOL_AREA_OFFSET_IN_BYTES + sep_dev->data_pool_bytes_allocated;
1413 /* write the memory back to the user space */
1414 error = copy_to_user((void *) arg, (void *) &command_args, sizeof(struct sep_driver_alloc_t));
1418 /* set the allocation */
1419 sep_dev->data_pool_bytes_allocated += command_args.num_bytes;
1422 dbg("SEP Driver:<-------- sep_allocate_data_pool_memory_handler end\n");
1427 This function handles write into allocated data pool command
1429 static int sep_write_into_data_pool_handler(unsigned long arg)
1432 unsigned long virt_address;
1433 unsigned long app_in_address;
1434 unsigned long num_bytes;
1435 unsigned long data_pool_area_addr;
1437 dbg("SEP Driver:--------> sep_write_into_data_pool_handler start\n");
1439 /* get the application address */
1440 error = get_user(app_in_address, &(((struct sep_driver_write_t *) arg)->app_address));
1444 /* get the virtual kernel address address */
1445 error = get_user(virt_address, &(((struct sep_driver_write_t *) arg)->datapool_address));
1449 /* get the number of bytes */
1450 error = get_user(num_bytes, &(((struct sep_driver_write_t *) arg)->num_bytes));
1454 /* calculate the start of the data pool */
1455 data_pool_area_addr = sep_dev->shared_area_addr + SEP_DRIVER_DATA_POOL_AREA_OFFSET_IN_BYTES;
1458 /* check that the range of the virtual kernel address is correct */
1459 if ((virt_address < data_pool_area_addr) || (virt_address > (data_pool_area_addr + SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES))) {
1463 /* copy the application data */
1464 error = copy_from_user((void *) virt_address, (void *) app_in_address, num_bytes);
1466 dbg("SEP Driver:<-------- sep_write_into_data_pool_handler end\n");
1471 this function handles the read from data pool command
1473 static int sep_read_from_data_pool_handler(unsigned long arg)
1476 /* virtual address of dest application buffer */
1477 unsigned long app_out_address;
1478 /* virtual address of the data pool */
1479 unsigned long virt_address;
1480 unsigned long num_bytes;
1481 unsigned long data_pool_area_addr;
1483 dbg("SEP Driver:--------> sep_read_from_data_pool_handler start\n");
1485 /* get the application address */
1486 error = get_user(app_out_address, &(((struct sep_driver_write_t *) arg)->app_address));
1490 /* get the virtual kernel address address */
1491 error = get_user(virt_address, &(((struct sep_driver_write_t *) arg)->datapool_address));
1495 /* get the number of bytes */
1496 error = get_user(num_bytes, &(((struct sep_driver_write_t *) arg)->num_bytes));
1500 /* calculate the start of the data pool */
1501 data_pool_area_addr = sep_dev->shared_area_addr + SEP_DRIVER_DATA_POOL_AREA_OFFSET_IN_BYTES;
1503 /* check that the range of the virtual kernel address is correct */
1504 if ((virt_address < data_pool_area_addr) || (virt_address > (data_pool_area_addr + SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES))) {
1509 /* copy the application data */
1510 error = copy_to_user((void *) app_out_address, (void *) virt_address, num_bytes);
1512 dbg("SEP Driver:<-------- sep_read_from_data_pool_handler end\n");
1518 this function handles tha request for creation of the DMA table
1519 for the synchronic symmetric operations (AES,DES)
1521 static int sep_create_sync_dma_tables_handler(unsigned long arg)
1524 /* command arguments */
1525 struct sep_driver_build_sync_table_t command_args;
1527 dbg("SEP Driver:--------> sep_create_sync_dma_tables_handler start\n");
1529 error = copy_from_user(&command_args, (void *) arg, sizeof(struct sep_driver_build_sync_table_t));
1533 edbg("app_in_address is %08lx\n", command_args.app_in_address);
1534 edbg("app_out_address is %08lx\n", command_args.app_out_address);
1535 edbg("data_size is %lu\n", command_args.data_in_size);
1536 edbg("block_size is %lu\n", command_args.block_size);
1538 /* check if we need to build only input table or input/output */
1539 if (command_args.app_out_address)
1540 /* prepare input and output tables */
1541 error = sep_prepare_input_output_dma_table(command_args.app_in_address,
1542 command_args.app_out_address,
1543 command_args.data_in_size,
1544 command_args.block_size,
1545 &command_args.in_table_address,
1546 &command_args.out_table_address, &command_args.in_table_num_entries, &command_args.out_table_num_entries, &command_args.table_data_size, command_args.isKernelVirtualAddress);
1548 /* prepare input tables */
1549 error = sep_prepare_input_dma_table(command_args.app_in_address,
1550 command_args.data_in_size, command_args.block_size, &command_args.in_table_address, &command_args.in_table_num_entries, &command_args.table_data_size, command_args.isKernelVirtualAddress);
1555 error = copy_to_user((void *) arg, (void *) &command_args, sizeof(struct sep_driver_build_sync_table_t));
1557 dbg("SEP Driver:<-------- sep_create_sync_dma_tables_handler end\n");
1562 this function handles the request for freeing dma table for synhronic actions
1564 int sep_free_dma_table_data_handler()
1566 dbg("SEP Driver:--------> sep_free_dma_table_data_handler start\n");
1568 /* free input pages array */
1569 sep_free_dma_pages(sep_dev->in_page_array, sep_dev->in_num_pages, 0);
1571 /* free output pages array if needed */
1572 if (sep_dev->out_page_array)
1573 sep_free_dma_pages(sep_dev->out_page_array, sep_dev->out_num_pages, 1);
1575 /* reset all the values */
1576 sep_dev->in_page_array = 0;
1577 sep_dev->out_page_array = 0;
1578 sep_dev->in_num_pages = 0;
1579 sep_dev->out_num_pages = 0;
1580 dbg("SEP Driver:<-------- sep_free_dma_table_data_handler end\n");
1585 this function handles the request to create the DMA tables for flow
1587 static int sep_create_flow_dma_tables_handler(unsigned long arg)
1590 struct sep_driver_build_flow_table_t command_args;
1591 /* first table - output */
1592 struct sep_lli_entry_t first_table_data;
1593 /* dma table data */
1594 struct sep_lli_entry_t last_table_data;
1595 /* pointer to the info entry of the previuos DMA table */
1596 struct sep_lli_entry_t *prev_info_entry_ptr;
1597 /* pointer to the flow data strucutre */
1598 struct sep_flow_context_t *flow_context_ptr;
1600 dbg("SEP Driver:--------> sep_create_flow_dma_tables_handler start\n");
1602 /* init variables */
1603 prev_info_entry_ptr = 0;
1604 first_table_data.physical_address = 0xffffffff;
1606 /* find the free structure for flow data */
1607 error = sep_find_flow_context(SEP_FREE_FLOW_ID, &flow_context_ptr);
1611 error = copy_from_user(&command_args, (void *) arg, sizeof(struct sep_driver_build_flow_table_t));
1615 /* create flow tables */
1616 error = sep_prepare_flow_dma_tables(command_args.num_virtual_buffers, command_args.virt_buff_data_addr, flow_context_ptr, &first_table_data, &last_table_data, command_args.isKernelVirtualAddress);
1618 goto end_function_with_error;
1620 /* check if flow is static */
1621 if (!command_args.flow_type)
1622 /* point the info entry of the last to the info entry of the first */
1623 last_table_data = first_table_data;
1625 /* set output params */
1626 command_args.first_table_addr = first_table_data.physical_address;
1627 command_args.first_table_num_entries = ((first_table_data.block_size >> SEP_NUM_ENTRIES_OFFSET_IN_BITS) & SEP_NUM_ENTRIES_MASK);
1628 command_args.first_table_data_size = (first_table_data.block_size & SEP_TABLE_DATA_SIZE_MASK);
1630 /* send the parameters to user application */
1631 error = copy_to_user((void *) arg, &command_args, sizeof(struct sep_driver_build_flow_table_t));
1633 goto end_function_with_error;
1635 /* all the flow created - update the flow entry with temp id */
1636 flow_context_ptr->flow_id = SEP_TEMP_FLOW_ID;
1638 /* set the processing tables data in the context */
1639 if (command_args.input_output_flag == SEP_DRIVER_IN_FLAG)
1640 flow_context_ptr->input_tables_in_process = first_table_data;
1642 flow_context_ptr->output_tables_in_process = first_table_data;
1646 end_function_with_error:
1647 /* free the allocated tables */
1648 sep_deallocated_flow_tables(&first_table_data);
1650 dbg("SEP Driver:<-------- sep_create_flow_dma_tables_handler end\n");
1655 this functio n handles add tables to flow
1657 static int sep_add_flow_tables_handler(unsigned long arg)
1660 unsigned long num_entries;
1661 struct sep_driver_add_flow_table_t command_args;
1662 struct sep_flow_context_t *flow_context_ptr;
1663 /* first dma table data */
1664 struct sep_lli_entry_t first_table_data;
1665 /* last dma table data */
1666 struct sep_lli_entry_t last_table_data;
1667 /* pointer to the info entry of the current DMA table */
1668 struct sep_lli_entry_t *info_entry_ptr;
1670 dbg("SEP Driver:--------> sep_add_flow_tables_handler start\n");
1672 /* get input parameters */
1673 error = copy_from_user(&command_args, (void *) arg, sizeof(struct sep_driver_add_flow_table_t));
1677 /* find the flow structure for the flow id */
1678 error = sep_find_flow_context(command_args.flow_id, &flow_context_ptr);
1682 /* prepare the flow dma tables */
1683 error = sep_prepare_flow_dma_tables(command_args.num_virtual_buffers, command_args.virt_buff_data_addr, flow_context_ptr, &first_table_data, &last_table_data, command_args.isKernelVirtualAddress);
1685 goto end_function_with_error;
1687 /* now check if there is already an existing add table for this flow */
1688 if (command_args.inputOutputFlag == SEP_DRIVER_IN_FLAG) {
1689 /* this buffer was for input buffers */
1690 if (flow_context_ptr->input_tables_flag) {
1691 /* add table already exists - add the new tables to the end
1693 num_entries = (flow_context_ptr->last_input_table.block_size >> SEP_NUM_ENTRIES_OFFSET_IN_BITS) & SEP_NUM_ENTRIES_MASK;
1695 info_entry_ptr = (struct sep_lli_entry_t *)
1696 (flow_context_ptr->last_input_table.physical_address + (sizeof(struct sep_lli_entry_t) * (num_entries - 1)));
1698 /* connect to list of tables */
1699 *info_entry_ptr = first_table_data;
1701 /* set the first table data */
1702 first_table_data = flow_context_ptr->first_input_table;
1704 /* set the input flag */
1705 flow_context_ptr->input_tables_flag = 1;
1707 /* set the first table data */
1708 flow_context_ptr->first_input_table = first_table_data;
1710 /* set the last table data */
1711 flow_context_ptr->last_input_table = last_table_data;
1712 } else { /* this is output tables */
1714 /* this buffer was for input buffers */
1715 if (flow_context_ptr->output_tables_flag) {
1716 /* add table already exists - add the new tables to
1717 the end of the previous */
1718 num_entries = (flow_context_ptr->last_output_table.block_size >> SEP_NUM_ENTRIES_OFFSET_IN_BITS) & SEP_NUM_ENTRIES_MASK;
1720 info_entry_ptr = (struct sep_lli_entry_t *)
1721 (flow_context_ptr->last_output_table.physical_address + (sizeof(struct sep_lli_entry_t) * (num_entries - 1)));
1723 /* connect to list of tables */
1724 *info_entry_ptr = first_table_data;
1726 /* set the first table data */
1727 first_table_data = flow_context_ptr->first_output_table;
1729 /* set the input flag */
1730 flow_context_ptr->output_tables_flag = 1;
1732 /* set the first table data */
1733 flow_context_ptr->first_output_table = first_table_data;
1735 /* set the last table data */
1736 flow_context_ptr->last_output_table = last_table_data;
1739 /* set output params */
1740 command_args.first_table_addr = first_table_data.physical_address;
1741 command_args.first_table_num_entries = ((first_table_data.block_size >> SEP_NUM_ENTRIES_OFFSET_IN_BITS) & SEP_NUM_ENTRIES_MASK);
1742 command_args.first_table_data_size = (first_table_data.block_size & SEP_TABLE_DATA_SIZE_MASK);
1744 /* send the parameters to user application */
1745 error = copy_to_user((void *) arg, &command_args, sizeof(struct sep_driver_add_flow_table_t));
1746 end_function_with_error:
1747 /* free the allocated tables */
1748 sep_deallocated_flow_tables(&first_table_data);
1750 dbg("SEP Driver:<-------- sep_add_flow_tables_handler end\n");
1755 this function add the flow add message to the specific flow
1757 static int sep_add_flow_tables_message_handler(unsigned long arg)
1760 struct sep_driver_add_message_t command_args;
1761 struct sep_flow_context_t *flow_context_ptr;
1763 dbg("SEP Driver:--------> sep_add_flow_tables_message_handler start\n");
1765 error = copy_from_user(&command_args, (void *) arg, sizeof(struct sep_driver_add_message_t));
1770 if (command_args.message_size_in_bytes > SEP_MAX_ADD_MESSAGE_LENGTH_IN_BYTES) {
1775 /* find the flow context */
1776 error = sep_find_flow_context(command_args.flow_id, &flow_context_ptr);
1780 /* copy the message into context */
1781 flow_context_ptr->message_size_in_bytes = command_args.message_size_in_bytes;
1782 error = copy_from_user(flow_context_ptr->message, (void *) command_args.message_address, command_args.message_size_in_bytes);
1784 dbg("SEP Driver:<-------- sep_add_flow_tables_message_handler end\n");
1790 this function returns the physical and virtual addresses of the static pool
1792 static int sep_get_static_pool_addr_handler(unsigned long arg)
1795 struct sep_driver_static_pool_addr_t command_args;
1797 dbg("SEP Driver:--------> sep_get_static_pool_addr_handler start\n");
1799 /*prepare the output parameters in the struct */
1800 command_args.physical_static_address = sep_dev->phys_shared_area_addr + SEP_DRIVER_STATIC_AREA_OFFSET_IN_BYTES;
1801 command_args.virtual_static_address = sep_dev->shared_area_addr + SEP_DRIVER_STATIC_AREA_OFFSET_IN_BYTES;
1803 edbg("SEP Driver:physical_static_address is %08lx, virtual_static_address %08lx\n", command_args.physical_static_address, command_args.virtual_static_address);
1805 /* send the parameters to user application */
1806 error = copy_to_user((void *) arg, &command_args, sizeof(struct sep_driver_static_pool_addr_t));
1807 dbg("SEP Driver:<-------- sep_get_static_pool_addr_handler end\n");
1812 this address gets the offset of the physical address from the start
1815 static int sep_get_physical_mapped_offset_handler(unsigned long arg)
1818 struct sep_driver_get_mapped_offset_t command_args;
1820 dbg("SEP Driver:--------> sep_get_physical_mapped_offset_handler start\n");
1822 error = copy_from_user(&command_args, (void *) arg, sizeof(struct sep_driver_get_mapped_offset_t));
1826 if (command_args.physical_address < sep_dev->phys_shared_area_addr) {
1831 /*prepare the output parameters in the struct */
1832 command_args.offset = command_args.physical_address - sep_dev->phys_shared_area_addr;
1834 edbg("SEP Driver:physical_address is %08lx, offset is %lu\n", command_args.physical_address, command_args.offset);
1836 /* send the parameters to user application */
1837 error = copy_to_user((void *) arg, &command_args, sizeof(struct sep_driver_get_mapped_offset_t));
1839 dbg("SEP Driver:<-------- sep_get_physical_mapped_offset_handler end\n");
1847 static int sep_start_handler(void)
1849 unsigned long reg_val;
1850 unsigned long error = 0;
1852 dbg("SEP Driver:--------> sep_start_handler start\n");
1854 /* wait in polling for message from SEP */
1856 reg_val = sep_read_reg(sep_dev, HW_HOST_SEP_HOST_GPR3_REG_ADDR);
1859 /* check the value */
1861 /* fatal error - read erro status from GPRO */
1862 error = sep_read_reg(sep_dev, HW_HOST_SEP_HOST_GPR0_REG_ADDR);
1864 dbg("SEP Driver:<-------- sep_start_handler end\n");
1869 this function handles the request for SEP initialization
1871 static int sep_init_handler(unsigned long arg)
1873 unsigned long message_word;
1874 unsigned long *message_ptr;
1875 struct sep_driver_init_t command_args;
1876 unsigned long counter;
1877 unsigned long error;
1878 unsigned long reg_val;
1880 dbg("SEP Driver:--------> sep_init_handler start\n");
1883 error = copy_from_user(&command_args, (void *) arg, sizeof(struct sep_driver_init_t));
1885 dbg("SEP Driver:--------> sep_init_handler - finished copy_from_user \n");
1890 /* PATCH - configure the DMA to single -burst instead of multi-burst */
1891 /*sep_configure_dma_burst(); */
1893 dbg("SEP Driver:--------> sep_init_handler - finished sep_configure_dma_burst \n");
1895 message_ptr = (unsigned long *) command_args.message_addr;
1897 /* set the base address of the SRAM */
1898 sep_write_reg(sep_dev, HW_SRAM_ADDR_REG_ADDR, HW_CC_SRAM_BASE_ADDRESS);
1900 for (counter = 0; counter < command_args.message_size_in_words; counter++, message_ptr++) {
1901 get_user(message_word, message_ptr);
1902 /* write data to SRAM */
1903 sep_write_reg(sep_dev, HW_SRAM_DATA_REG_ADDR, message_word);
1904 edbg("SEP Driver:message_word is %lu\n", message_word);
1905 /* wait for write complete */
1906 sep_wait_sram_write(sep_dev);
1908 dbg("SEP Driver:--------> sep_init_handler - finished getting messages from user space\n");
1910 sep_write_reg(sep_dev, HW_HOST_HOST_SEP_GPR0_REG_ADDR, 0x1);
1913 reg_val = sep_read_reg(sep_dev, HW_HOST_SEP_HOST_GPR3_REG_ADDR);
1914 while (!(reg_val & 0xFFFFFFFD));
1916 dbg("SEP Driver:--------> sep_init_handler - finished waiting for reg_val & 0xFFFFFFFD \n");
1918 /* check the value */
1919 if (reg_val == 0x1) {
1920 edbg("SEP Driver:init failed\n");
1922 error = sep_read_reg(sep_dev, 0x8060);
1923 edbg("SEP Driver:sw monitor is %lu\n", error);
1925 /* fatal error - read erro status from GPRO */
1926 error = sep_read_reg(sep_dev, HW_HOST_SEP_HOST_GPR0_REG_ADDR);
1927 edbg("SEP Driver:error is %lu\n", error);
1930 dbg("SEP Driver:<-------- sep_init_handler end\n");
1936 this function handles the request cache and resident reallocation
1938 static int sep_realloc_cache_resident_handler(unsigned long arg)
1941 unsigned long phys_cache_address;
1942 unsigned long phys_resident_address;
1943 struct sep_driver_realloc_cache_resident_t command_args;
1946 error = copy_from_user(&command_args, (void *) arg, sizeof(struct sep_driver_realloc_cache_resident_t));
1950 /* copy cache and resident to the their intended locations */
1951 error = sep_copy_cache_resident_to_area(command_args.cache_addr, command_args.cache_size_in_bytes, command_args.resident_addr, command_args.resident_size_in_bytes, &phys_cache_address, &phys_resident_address);
1955 /* lock the area (if needed) */
1956 sep_lock_cache_resident_area();
1958 command_args.new_base_addr = sep_dev->phys_shared_area_addr;
1960 /* find the new base address according to the lowest address between
1961 cache, resident and shared area */
1962 if (phys_resident_address < command_args.new_base_addr)
1963 command_args.new_base_addr = phys_resident_address;
1964 if (phys_cache_address < command_args.new_base_addr)
1965 command_args.new_base_addr = phys_cache_address;
1967 /* set the return parameters */
1968 command_args.new_cache_addr = phys_cache_address;
1969 command_args.new_resident_addr = phys_resident_address;
1971 /* set the new shared area */
1972 command_args.new_shared_area_addr = sep_dev->phys_shared_area_addr;
1974 edbg("SEP Driver:command_args.new_shared_area_addr is %08lx\n", command_args.new_shared_area_addr);
1975 edbg("SEP Driver:command_args.new_base_addr is %08lx\n", command_args.new_base_addr);
1976 edbg("SEP Driver:command_args.new_resident_addr is %08lx\n", command_args.new_resident_addr);
1977 edbg("SEP Driver:command_args.new_cache_addr is %08lx\n", command_args.new_cache_addr);
1979 /* return to user */
1980 error = copy_to_user((void *) arg, (void *) &command_args, sizeof(struct sep_driver_realloc_cache_resident_t));
1986 this function handles the request for get time
1988 static int sep_get_time_handler(unsigned long arg)
1991 struct sep_driver_get_time_t command_args;
1993 error = sep_set_time(&command_args.time_physical_address, &command_args.time_value);
1994 error = copy_to_user((void *) arg, (void *) &command_args, sizeof(struct sep_driver_get_time_t));
2000 This api handles the setting of API mode to blocking or non-blocking
2002 static int sep_set_api_mode_handler(unsigned long arg)
2005 unsigned long mode_flag;
2007 dbg("SEP Driver:--------> sep_set_api_mode_handler start\n");
2009 error = get_user(mode_flag, &(((struct sep_driver_set_api_mode_t *) arg)->mode));
2013 /* set the global flag */
2014 sep_dev->block_mode_flag = mode_flag;
2016 dbg("SEP Driver:<-------- sep_set_api_mode_handler end\n");
2021 This API handles the end transaction request
2023 static int sep_end_transaction_handler(unsigned long arg)
2025 dbg("SEP Driver:--------> sep_end_transaction_handler start\n");
2027 #if 0 /*!SEP_DRIVER_POLLING_MODE */
2029 sep_write_reg(sep_dev, HW_HOST_IMR_REG_ADDR, 0x7FFF);
2031 /* release IRQ line */
2032 free_irq(SEP_DIRVER_IRQ_NUM, &sep_dev->reg_base_address);
2034 /* lock the sep mutex */
2035 mutex_unlock(&sep_mutex);
2038 dbg("SEP Driver:<-------- sep_end_transaction_handler end\n");
2043 /* handler for flow done interrupt */
2044 static void sep_flow_done_handler(struct work_struct *work)
2046 struct sep_flow_context_t *flow_data_ptr;
2048 /* obtain the mutex */
2049 mutex_lock(&sep_mutex);
2051 /* get the pointer to context */
2052 flow_data_ptr = (struct sep_flow_context_t *) work;
2054 /* free all the current input tables in sep */
2055 sep_deallocated_flow_tables(&flow_data_ptr->input_tables_in_process);
2057 /* free all the current tables output tables in SEP (if needed) */
2058 if (flow_data_ptr->output_tables_in_process.physical_address != 0xffffffff)
2059 sep_deallocated_flow_tables(&flow_data_ptr->output_tables_in_process);
2061 /* check if we have additional tables to be sent to SEP only input
2062 flag may be checked */
2063 if (flow_data_ptr->input_tables_flag) {
2064 /* copy the message to the shared RAM and signal SEP */
2065 memcpy((void *) flow_data_ptr->message, (void *) sep_dev->shared_area_addr, flow_data_ptr->message_size_in_bytes);
2067 sep_write_reg(sep_dev, HW_HOST_HOST_SEP_GPR2_REG_ADDR, 0x2);
2069 mutex_unlock(&sep_mutex);
2074 This function creates a list of tables for flow and returns the data for
2075 the first and last tables of the list
2077 static int sep_prepare_flow_dma_tables(unsigned long num_virtual_buffers,
2078 unsigned long first_buff_addr, struct sep_flow_context_t *flow_data_ptr, struct sep_lli_entry_t *first_table_data_ptr, struct sep_lli_entry_t *last_table_data_ptr, bool isKernelVirtualAddress)
2081 unsigned long virt_buff_addr;
2082 unsigned long virt_buff_size;
2083 struct sep_lli_entry_t table_data;
2084 struct sep_lli_entry_t *info_entry_ptr;
2085 struct sep_lli_entry_t *prev_info_entry_ptr;
2090 prev_info_entry_ptr = 0;
2092 /* init the first table to default */
2093 table_data.physical_address = 0xffffffff;
2094 first_table_data_ptr->physical_address = 0xffffffff;
2095 table_data.block_size = 0;
2097 for (i = 0; i < num_virtual_buffers; i++) {
2098 /* get the virtual buffer address */
2099 error = get_user(virt_buff_addr, &first_buff_addr);
2103 /* get the virtual buffer size */
2105 error = get_user(virt_buff_size, &first_buff_addr);
2109 /* advance the address to point to the next pair of address|size */
2112 /* now prepare the one flow LLI table from the data */
2113 error = sep_prepare_one_flow_dma_table(virt_buff_addr, virt_buff_size, &table_data, &info_entry_ptr, flow_data_ptr, isKernelVirtualAddress);
2118 /* if this is the first table - save it to return to the user
2120 *first_table_data_ptr = table_data;
2122 /* set the pointer to info entry */
2123 prev_info_entry_ptr = info_entry_ptr;
2125 /* not first table - the previous table info entry should
2127 prev_info_entry_ptr->block_size = (0x1 << SEP_INT_FLAG_OFFSET_IN_BITS) | (table_data.block_size);
2129 /* set the pointer to info entry */
2130 prev_info_entry_ptr = info_entry_ptr;
2134 /* set the last table data */
2135 *last_table_data_ptr = table_data;
2142 This function creates one DMA table for flow and returns its data,
2143 and pointer to its info entry
2145 static int sep_prepare_one_flow_dma_table(unsigned long virt_buff_addr, unsigned long virt_buff_size, struct sep_lli_entry_t *table_data, struct sep_lli_entry_t **info_entry_ptr, struct sep_flow_context_t *flow_data_ptr, bool isKernelVirtualAddress)
2148 /* the range in pages */
2149 unsigned long lli_array_size;
2150 struct sep_lli_entry_t *lli_array;
2151 struct sep_lli_entry_t *flow_dma_table_entry_ptr;
2152 unsigned long *start_dma_table_ptr;
2153 /* total table data counter */
2154 unsigned long dma_table_data_count;
2155 /* pointer that will keep the pointer to the pages of the virtual buffer */
2156 struct page **page_array_ptr;
2157 unsigned long entry_count;
2159 /* find the space for the new table */
2160 error = sep_find_free_flow_dma_table_space(&start_dma_table_ptr);
2164 /* check if the pages are in Kernel Virtual Address layout */
2165 if (isKernelVirtualAddress == true)
2166 /* lock kernel buffer in the memory */
2167 error = sep_lock_kernel_pages(virt_buff_addr, virt_buff_size, &lli_array_size, &lli_array, &page_array_ptr);
2169 /* lock user buffer in the memory */
2170 error = sep_lock_user_pages(virt_buff_addr, virt_buff_size, &lli_array_size, &lli_array, &page_array_ptr);
2175 /* set the pointer to page array at the beginning of table - this table is
2176 now considered taken */
2177 *start_dma_table_ptr = lli_array_size;
2179 /* point to the place of the pages pointers of the table */
2180 start_dma_table_ptr++;
2182 /* set the pages pointer */
2183 *start_dma_table_ptr = (unsigned long) page_array_ptr;
2185 /* set the pointer to the first entry */
2186 flow_dma_table_entry_ptr = (struct sep_lli_entry_t *) (++start_dma_table_ptr);
2188 /* now create the entries for table */
2189 for (dma_table_data_count = entry_count = 0; entry_count < lli_array_size; entry_count++) {
2190 flow_dma_table_entry_ptr->physical_address = lli_array[entry_count].physical_address;
2192 flow_dma_table_entry_ptr->block_size = lli_array[entry_count].block_size;
2194 /* set the total data of a table */
2195 dma_table_data_count += lli_array[entry_count].block_size;
2197 flow_dma_table_entry_ptr++;
2200 /* set the physical address */
2201 table_data->physical_address = virt_to_phys(start_dma_table_ptr);
2203 /* set the num_entries and total data size */
2204 table_data->block_size = ((lli_array_size + 1) << SEP_NUM_ENTRIES_OFFSET_IN_BITS) | (dma_table_data_count);
2206 /* set the info entry */
2207 flow_dma_table_entry_ptr->physical_address = 0xffffffff;
2208 flow_dma_table_entry_ptr->block_size = 0;
2210 /* set the pointer to info entry */
2211 *info_entry_ptr = flow_dma_table_entry_ptr;
2213 /* the array of the lli entries */
2221 This function returns pointer to the flow data structure
2222 that contains the given id
2224 static int sep_find_flow_context(unsigned long flow_id, struct sep_flow_context_t **flow_data_ptr)
2226 unsigned long count;
2230 always search for flow with id default first - in case we
2231 already started working on the flow there can be no situation
2232 when 2 flows are with default flag
2234 for (count = 0; count < SEP_DRIVER_NUM_FLOWS; count++) {
2235 if (sep_dev->flows_data_array[count].flow_id == flow_id) {
2236 *flow_data_ptr = &sep_dev->flows_data_array[count];
2241 if (count == SEP_DRIVER_NUM_FLOWS)
2249 this function find a space for the new flow dma table
2251 static int sep_find_free_flow_dma_table_space(unsigned long **table_address_ptr)
2254 /* pointer to the id field of the flow dma table */
2255 unsigned long *start_table_ptr;
2256 unsigned long flow_dma_area_start_addr;
2257 unsigned long flow_dma_area_end_addr;
2258 /* maximum table size in words */
2259 unsigned long table_size_in_words;
2261 /* find the start address of the flow DMA table area */
2262 flow_dma_area_start_addr = sep_dev->shared_area_addr + SEP_DRIVER_FLOW_DMA_TABLES_AREA_OFFSET_IN_BYTES;
2264 /* set end address of the flow table area */
2265 flow_dma_area_end_addr = flow_dma_area_start_addr + SEP_DRIVER_FLOW_DMA_TABLES_AREA_SIZE_IN_BYTES;
2267 /* set table size in words */
2268 table_size_in_words = SEP_DRIVER_MAX_FLOW_NUM_ENTRIES_IN_TABLE * (sizeof(struct sep_lli_entry_t) / sizeof(long)) + 2;
2270 /* set the pointer to the start address of DMA area */
2271 start_table_ptr = (unsigned long *) flow_dma_area_start_addr;
2273 /* find the space for the next table */
2274 while (((*start_table_ptr & 0x7FFFFFFF) != 0) && ((unsigned long) start_table_ptr < flow_dma_area_end_addr))
2275 start_table_ptr += table_size_in_words;
2277 /* check if we reached the end of floa tables area */
2278 if ((unsigned long) start_table_ptr >= flow_dma_area_end_addr)
2281 *table_address_ptr = start_table_ptr;
2287 this function goes over all the flow tables connected to the given
2288 table and deallocate them
2290 static void sep_deallocated_flow_tables(struct sep_lli_entry_t *first_table_ptr)
2293 unsigned long *table_ptr;
2294 /* end address of the flow dma area */
2295 unsigned long num_entries;
2296 unsigned long num_pages;
2297 struct page **pages_ptr;
2298 /* maximum table size in words */
2299 struct sep_lli_entry_t *info_entry_ptr;
2301 /* set the pointer to the first table */
2302 table_ptr = (unsigned long *) first_table_ptr->physical_address;
2304 /* set the num of entries */
2305 num_entries = (first_table_ptr->block_size >> SEP_NUM_ENTRIES_OFFSET_IN_BITS)
2306 & SEP_NUM_ENTRIES_MASK;
2308 /* go over all the connected tables */
2309 while (*table_ptr != 0xffffffff) {
2310 /* get number of pages */
2311 num_pages = *(table_ptr - 2);
2313 /* get the pointer to the pages */
2314 pages_ptr = (struct page **) (*(table_ptr - 1));
2316 /* free the pages */
2317 sep_free_dma_pages(pages_ptr, num_pages, 1);
2319 /* goto to the info entry */
2320 info_entry_ptr = ((struct sep_lli_entry_t *) table_ptr) + (num_entries - 1);
2322 table_ptr = (unsigned long *) info_entry_ptr->physical_address;
2323 num_entries = (info_entry_ptr->block_size >> SEP_NUM_ENTRIES_OFFSET_IN_BITS) & SEP_NUM_ENTRIES_MASK;
2330 This function handler the set flow id command
2332 static int sep_set_flow_id_handler(unsigned long arg)
2335 unsigned long flow_id;
2336 struct sep_flow_context_t *flow_data_ptr;
2338 dbg("------------>SEP Driver: sep_set_flow_id_handler start\n");
2340 error = get_user(flow_id, &(((struct sep_driver_set_flow_id_t *) arg)->flow_id));
2344 /* find the flow data structure that was just used for creating new flow
2345 - its id should be default */
2346 error = sep_find_flow_context(SEP_TEMP_FLOW_ID, &flow_data_ptr);
2351 flow_data_ptr->flow_id = flow_id;
2354 dbg("SEP Driver:<-------- sep_set_flow_id_handler end\n");
2360 calculates time and sets it at the predefined address
2362 static int sep_set_time(unsigned long *address_ptr, unsigned long *time_in_sec_ptr)
2364 struct timeval time;
2365 /* address of time in the kernel */
2366 unsigned long time_addr;
2369 dbg("SEP Driver:--------> sep_set_time start\n");
2371 do_gettimeofday(&time);
2373 /* set value in the SYSTEM MEMORY offset */
2374 time_addr = sep_dev->message_shared_area_addr + SEP_DRIVER_SYSTEM_TIME_MEMORY_OFFSET_IN_BYTES;
2376 *(unsigned long *) time_addr = SEP_TIME_VAL_TOKEN;
2377 *(unsigned long *) (time_addr + 4) = time.tv_sec;
2379 edbg("SEP Driver:time.tv_sec is %lu\n", time.tv_sec);
2380 edbg("SEP Driver:time_addr is %lu\n", time_addr);
2381 edbg("SEP Driver:g_message_shared_area_addr is %lu\n", sep_dev->message_shared_area_addr);
2383 /* set the output parameters if needed */
2385 *address_ptr = sep_shared_area_virt_to_phys(time_addr);
2387 if (time_in_sec_ptr)
2388 *time_in_sec_ptr = time.tv_sec;
2390 dbg("SEP Driver:<-------- sep_set_time end\n");
2395 static void sep_wait_busy(struct sep_device *dev)
2400 reg = sep_read_reg(sep_dev, HW_HOST_SEP_BUSY_REG_ADDR);
2405 PATCH for configuring the DMA to single burst instead of multi-burst
2407 static void sep_configure_dma_burst(void)
2409 #define HW_AHB_RD_WR_BURSTS_REG_ADDR 0x0E10UL
2411 dbg("SEP Driver:<-------- sep_configure_dma_burst start \n");
2413 /* request access to registers from SEP */
2414 sep_write_reg(sep_dev, HW_HOST_HOST_SEP_GPR0_REG_ADDR, 0x2);
2416 dbg("SEP Driver:<-------- sep_configure_dma_burst finished request access to registers from SEP (write reg) \n");
2418 sep_wait_busy(sep_dev);
2420 dbg("SEP Driver:<-------- sep_configure_dma_burst finished request access to registers from SEP (while(revVal) wait loop) \n");
2422 /* set the DMA burst register to single burst */
2423 sep_write_reg(sep_dev, HW_AHB_RD_WR_BURSTS_REG_ADDR, 0x0UL);
2425 /* release the sep busy */
2426 sep_write_reg(sep_dev, HW_HOST_HOST_SEP_GPR0_REG_ADDR, 0x0UL);
2427 sep_wait_busy(sep_dev);
2429 dbg("SEP Driver:<-------- sep_configure_dma_burst done \n");
2433 /* major and minor device numbers */
2434 static dev_t sep_devno;
2436 /* the files operations structure of the driver */
2437 static struct file_operations sep_file_operations = {
2438 .owner = THIS_MODULE,
2442 .release = sep_release,
2447 /* cdev struct of the driver */
2448 static struct cdev sep_cdev;
2451 this function registers the driver to the file system
2453 static int sep_register_driver_to_fs(void)
2455 int ret_val = alloc_chrdev_region(&sep_devno, 0, 1, "sep_sec_driver");
2457 edbg("sep_driver:major number allocation failed, retval is %d\n", ret_val);
2462 cdev_init(&sep_cdev, &sep_file_operations);
2463 sep_cdev.owner = THIS_MODULE;
2465 /* register the driver with the kernel */
2466 ret_val = cdev_add(&sep_cdev, sep_devno, 1);
2469 edbg("sep_driver:cdev_add failed, retval is %d\n", ret_val);
2470 goto end_function_unregister_devnum;
2475 end_function_unregister_devnum:
2477 /* unregister dev numbers */
2478 unregister_chrdev_region(sep_devno, 1);
2485 this function unregisters driver from fs
2487 static void sep_unregister_driver_from_fs(void)
2489 cdev_del(&sep_cdev);
2490 /* unregister dev numbers */
2491 unregister_chrdev_region(sep_devno, 1);
2495 /*--------------------------------------------------------------
2497 ----------------------------------------------------------------*/
2498 static int __init sep_init(void)
2502 int size; /* size of memory for allocation */
2504 dbg("SEP Driver:-------->Init start\n");
2505 edbg("sep->shared_area_addr = %lx\n", (unsigned long) &sep_dev->shared_area_addr);
2507 /* transaction counter that coordinates the transactions between SEP
2509 sep_dev->host_to_sep_send_counter = 0;
2511 /* counter for the messages from sep */
2512 sep_dev->sep_to_host_reply_counter = 0;
2514 /* counter for the number of bytes allocated in the pool
2515 for the current transaction */
2516 sep_dev->data_pool_bytes_allocated = 0;
2518 /* set the starting mode to blocking */
2519 sep_dev->block_mode_flag = 1;
2521 ret_val = sep_register_driver_to_device();
2523 edbg("sep_driver:sep_driver_to_device failed, ret_val is %d\n", ret_val);
2524 goto end_function_unregister_from_fs;
2526 /* calculate the total size for allocation */
2527 size = SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES +
2528 SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_SIZE_IN_BYTES + SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES + SEP_DRIVER_FLOW_DMA_TABLES_AREA_SIZE_IN_BYTES + SEP_DRIVER_STATIC_AREA_SIZE_IN_BYTES + SEP_DRIVER_SYSTEM_DATA_MEMORY_SIZE_IN_BYTES;
2530 /* allocate the shared area */
2531 if (sep_map_and_alloc_shared_area(size, &sep_dev->shared_area_addr, &sep_dev->phys_shared_area_addr)) {
2533 /* allocation failed */
2534 goto end_function_unmap_io_memory;
2536 /* now set the memory regions */
2537 sep_dev->message_shared_area_addr = sep_dev->shared_area_addr;
2539 edbg("SEP Driver: g_message_shared_area_addr is %08lx\n", sep_dev->message_shared_area_addr);
2541 #if (SEP_DRIVER_RECONFIG_MESSAGE_AREA == 1)
2542 /* send the new SHARED MESSAGE AREA to the SEP */
2543 sep_write_reg(sep_dev, HW_HOST_HOST_SEP_GPR1_REG_ADDR, sep_dev->phys_shared_area_addr);
2545 /* poll for SEP response */
2546 retVal = sep_read_reg(sep_dev, HW_HOST_SEP_HOST_GPR1_REG_ADDR);
2547 while (retVal != 0xffffffff && retVal != sep_dev->phys_shared_area_addr)
2548 retVal = sep_read_reg(sep_dev, HW_HOST_SEP_HOST_GPR1_REG_ADDR);
2550 /* check the return value (register) */
2551 if (retVal != sep_dev->phys_shared_area_addr) {
2553 goto end_function_deallocate_message_area;
2556 /* init the flow contextes */
2557 for (counter = 0; counter < SEP_DRIVER_NUM_FLOWS; counter++)
2558 sep_dev->flows_data_array[counter].flow_id = SEP_FREE_FLOW_ID;
2560 sep_dev->flow_wq_ptr = create_singlethread_workqueue("sepflowwq");
2561 if (sep_dev->flow_wq_ptr == 0) {
2563 edbg("sep_driver:flow queue creation failed\n");
2564 goto end_function_deallocate_sep_shared_area;
2566 edbg("SEP Driver: create flow workqueue \n");
2568 /* register driver to fs */
2569 ret_val = sep_register_driver_to_fs();
2571 goto end_function_deallocate_sep_shared_area;
2572 /* load the rom code */
2573 sep_load_rom_code();
2575 end_function_unregister_from_fs:
2576 /* unregister from fs */
2577 sep_unregister_driver_from_fs();
2578 end_function_deallocate_sep_shared_area:
2579 /* de-allocate shared area */
2580 sep_unmap_and_free_shared_area(size, sep_dev->shared_area_addr, sep_dev->phys_shared_area_addr);
2581 end_function_unmap_io_memory:
2582 iounmap((void *) sep_dev->reg_base_address);
2583 /* release io memory region */
2584 release_mem_region(SEP_IO_MEM_REGION_START_ADDRESS, SEP_IO_MEM_REGION_SIZE);
2586 dbg("SEP Driver:<-------- Init end\n");
2591 /*-------------------------------------------------------------
2593 --------------------------------------------------------------*/
2594 static void __exit sep_exit(void)
2598 dbg("SEP Driver:--------> Exit start\n");
2600 /* unregister from fs */
2601 sep_unregister_driver_from_fs();
2602 /* calculate the total size for de-allocation */
2603 size = SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES +
2604 SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_SIZE_IN_BYTES + SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES + SEP_DRIVER_FLOW_DMA_TABLES_AREA_SIZE_IN_BYTES + SEP_DRIVER_STATIC_AREA_SIZE_IN_BYTES + SEP_DRIVER_SYSTEM_DATA_MEMORY_SIZE_IN_BYTES;
2605 /* free shared area */
2606 sep_unmap_and_free_shared_area(size, sep_dev->shared_area_addr, sep_dev->phys_shared_area_addr);
2607 edbg("SEP Driver: free pages SEP SHARED AREA \n");
2608 iounmap((void *) sep_dev->reg_base_address);
2609 edbg("SEP Driver: iounmap \n");
2610 /* release io memory region */
2611 release_mem_region(SEP_IO_MEM_REGION_START_ADDRESS, SEP_IO_MEM_REGION_SIZE);
2612 edbg("SEP Driver: release_mem_region \n");
2613 dbg("SEP Driver:<-------- Exit end\n");
2617 module_init(sep_init);
2618 module_exit(sep_exit);
2620 MODULE_LICENSE("GPL");