Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/sage/ceph...
[firefly-linux-kernel-4.4.55.git] / drivers / staging / sep / sep_main.c
1 /*
2  *
3  *  sep_main.c - Security Processor Driver main group of functions
4  *
5  *  Copyright(c) 2009-2011 Intel Corporation. All rights reserved.
6  *  Contributions(c) 2009-2011 Discretix. All rights reserved.
7  *
8  *  This program is free software; you can redistribute it and/or modify it
9  *  under the terms of the GNU General Public License as published by the Free
10  *  Software Foundation; version 2 of the License.
11  *
12  *  This program is distributed in the hope that it will be useful, but WITHOUT
13  *  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14  *  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
15  *  more details.
16  *
17  *  You should have received a copy of the GNU General Public License along with
18  *  this program; if not, write to the Free Software Foundation, Inc., 59
19  *  Temple Place - Suite 330, Boston, MA  02111-1307, USA.
20  *
21  *  CONTACTS:
22  *
23  *  Mark Allyn          mark.a.allyn@intel.com
24  *  Jayant Mangalampalli jayant.mangalampalli@intel.com
25  *
26  *  CHANGES:
27  *
28  *  2009.06.26  Initial publish
29  *  2010.09.14  Upgrade to Medfield
30  *  2011.01.21  Move to sep_main.c to allow for sep_crypto.c
31  *  2011.02.22  Enable kernel crypto operation
32  *
33  *  Please note that this driver is based on information in the Discretix
34  *  CryptoCell 5.2 Driver Implementation Guide; the Discretix CryptoCell 5.2
35  *  Integration Intel Medfield appendix; the Discretix CryptoCell 5.2
36  *  Linux Driver Integration Guide; and the Discretix CryptoCell 5.2 System
37  *  Overview and Integration Guide.
38  */
39 /* #define DEBUG */
40 /* #define SEP_PERF_DEBUG */
41
42 #include <linux/kernel.h>
43 #include <linux/module.h>
44 #include <linux/miscdevice.h>
45 #include <linux/fs.h>
46 #include <linux/cdev.h>
47 #include <linux/kdev_t.h>
48 #include <linux/mutex.h>
49 #include <linux/sched.h>
50 #include <linux/mm.h>
51 #include <linux/poll.h>
52 #include <linux/wait.h>
53 #include <linux/pci.h>
54 #include <linux/pm_runtime.h>
55 #include <linux/slab.h>
56 #include <linux/ioctl.h>
57 #include <asm/current.h>
58 #include <linux/ioport.h>
59 #include <linux/io.h>
60 #include <linux/interrupt.h>
61 #include <linux/pagemap.h>
62 #include <asm/cacheflush.h>
63 #include <linux/delay.h>
64 #include <linux/jiffies.h>
65 #include <linux/async.h>
66 #include <linux/crypto.h>
67 #include <crypto/internal/hash.h>
68 #include <crypto/scatterwalk.h>
69 #include <crypto/sha.h>
70 #include <crypto/md5.h>
71 #include <crypto/aes.h>
72 #include <crypto/des.h>
73 #include <crypto/hash.h>
74
75 #include "sep_driver_hw_defs.h"
76 #include "sep_driver_config.h"
77 #include "sep_driver_api.h"
78 #include "sep_dev.h"
79 #include "sep_crypto.h"
80
81 #define CREATE_TRACE_POINTS
82 #include "sep_trace_events.h"
83
84 /*
85  * Let's not spend cycles iterating over message
86  * area contents if debugging not enabled
87  */
88 #ifdef DEBUG
89 #define sep_dump_message(sep)   _sep_dump_message(sep)
90 #else
91 #define sep_dump_message(sep)
92 #endif
93
94 /**
95  * Currently, there is only one SEP device per platform;
96  * In event platforms in the future have more than one SEP
97  * device, this will be a linked list
98  */
99
100 struct sep_device *sep_dev;
101
102 /**
103  * sep_queue_status_remove - Removes transaction from status queue
104  * @sep: SEP device
105  * @sep_queue_info: pointer to status queue
106  *
107  * This function will remove information about transaction from the queue.
108  */
109 void sep_queue_status_remove(struct sep_device *sep,
110                                       struct sep_queue_info **queue_elem)
111 {
112         unsigned long lck_flags;
113
114         dev_dbg(&sep->pdev->dev, "[PID%d] sep_queue_status_remove\n",
115                 current->pid);
116
117         if (!queue_elem || !(*queue_elem)) {
118                 dev_dbg(&sep->pdev->dev, "PID%d %s null\n",
119                                         current->pid, __func__);
120                 return;
121         }
122
123         spin_lock_irqsave(&sep->sep_queue_lock, lck_flags);
124         list_del(&(*queue_elem)->list);
125         sep->sep_queue_num--;
126         spin_unlock_irqrestore(&sep->sep_queue_lock, lck_flags);
127
128         kfree(*queue_elem);
129         *queue_elem = NULL;
130
131         dev_dbg(&sep->pdev->dev, "[PID%d] sep_queue_status_remove return\n",
132                 current->pid);
133         return;
134 }
135
136 /**
137  * sep_queue_status_add - Adds transaction to status queue
138  * @sep: SEP device
139  * @opcode: transaction opcode
140  * @size: input data size
141  * @pid: pid of current process
142  * @name: current process name
143  * @name_len: length of name (current process)
144  *
145  * This function adds information about about transaction started to the status
146  * queue.
147  */
148 struct sep_queue_info *sep_queue_status_add(
149                                                 struct sep_device *sep,
150                                                 u32 opcode,
151                                                 u32 size,
152                                                 u32 pid,
153                                                 u8 *name, size_t name_len)
154 {
155         unsigned long lck_flags;
156         struct sep_queue_info *my_elem = NULL;
157
158         my_elem = kzalloc(sizeof(struct sep_queue_info), GFP_KERNEL);
159
160         if (!my_elem)
161                 return NULL;
162
163         dev_dbg(&sep->pdev->dev, "[PID%d] kzalloc ok\n", current->pid);
164
165         my_elem->data.opcode = opcode;
166         my_elem->data.size = size;
167         my_elem->data.pid = pid;
168
169         if (name_len > TASK_COMM_LEN)
170                 name_len = TASK_COMM_LEN;
171
172         memcpy(&my_elem->data.name, name, name_len);
173
174         spin_lock_irqsave(&sep->sep_queue_lock, lck_flags);
175
176         list_add_tail(&my_elem->list, &sep->sep_queue_status);
177         sep->sep_queue_num++;
178
179         spin_unlock_irqrestore(&sep->sep_queue_lock, lck_flags);
180
181         return my_elem;
182 }
183
184 /**
185  *      sep_allocate_dmatables_region - Allocates buf for the MLLI/DMA tables
186  *      @sep: SEP device
187  *      @dmatables_region: Destination pointer for the buffer
188  *      @dma_ctx: DMA context for the transaction
189  *      @table_count: Number of MLLI/DMA tables to create
190  *      The buffer created will not work as-is for DMA operations,
191  *      it needs to be copied over to the appropriate place in the
192  *      shared area.
193  */
194 static int sep_allocate_dmatables_region(struct sep_device *sep,
195                                          void **dmatables_region,
196                                          struct sep_dma_context *dma_ctx,
197                                          const u32 table_count)
198 {
199         const size_t new_len =
200                 SYNCHRONIC_DMA_TABLES_AREA_SIZE_BYTES - 1;
201
202         void *tmp_region = NULL;
203
204         dev_dbg(&sep->pdev->dev, "[PID%d] dma_ctx = 0x%p\n",
205                                 current->pid, dma_ctx);
206         dev_dbg(&sep->pdev->dev, "[PID%d] dmatables_region = 0x%p\n",
207                                 current->pid, dmatables_region);
208
209         if (!dma_ctx || !dmatables_region) {
210                 dev_warn(&sep->pdev->dev,
211                         "[PID%d] dma context/region uninitialized\n",
212                         current->pid);
213                 return -EINVAL;
214         }
215
216         dev_dbg(&sep->pdev->dev, "[PID%d] newlen = 0x%08zX\n",
217                                 current->pid, new_len);
218         dev_dbg(&sep->pdev->dev, "[PID%d] oldlen = 0x%08X\n", current->pid,
219                                 dma_ctx->dmatables_len);
220         tmp_region = kzalloc(new_len + dma_ctx->dmatables_len, GFP_KERNEL);
221         if (!tmp_region)
222                 return -ENOMEM;
223
224         /* Were there any previous tables that need to be preserved ? */
225         if (*dmatables_region) {
226                 memcpy(tmp_region, *dmatables_region, dma_ctx->dmatables_len);
227                 kfree(*dmatables_region);
228                 *dmatables_region = NULL;
229         }
230
231         *dmatables_region = tmp_region;
232
233         dma_ctx->dmatables_len += new_len;
234
235         return 0;
236 }
237
238 /**
239  *      sep_wait_transaction - Used for synchronizing transactions
240  *      @sep: SEP device
241  */
242 int sep_wait_transaction(struct sep_device *sep)
243 {
244         int error = 0;
245         DEFINE_WAIT(wait);
246
247         if (0 == test_and_set_bit(SEP_TRANSACTION_STARTED_LOCK_BIT,
248                                 &sep->in_use_flags)) {
249                 dev_dbg(&sep->pdev->dev,
250                         "[PID%d] no transactions, returning\n",
251                                 current->pid);
252                 goto end_function_setpid;
253         }
254
255         /*
256          * Looping needed even for exclusive waitq entries
257          * due to process wakeup latencies, previous process
258          * might have already created another transaction.
259          */
260         for (;;) {
261                 /*
262                  * Exclusive waitq entry, so that only one process is
263                  * woken up from the queue at a time.
264                  */
265                 prepare_to_wait_exclusive(&sep->event_transactions,
266                                           &wait,
267                                           TASK_INTERRUPTIBLE);
268                 if (0 == test_and_set_bit(SEP_TRANSACTION_STARTED_LOCK_BIT,
269                                           &sep->in_use_flags)) {
270                         dev_dbg(&sep->pdev->dev,
271                                 "[PID%d] no transactions, breaking\n",
272                                         current->pid);
273                         break;
274                 }
275                 dev_dbg(&sep->pdev->dev,
276                         "[PID%d] transactions ongoing, sleeping\n",
277                                 current->pid);
278                 schedule();
279                 dev_dbg(&sep->pdev->dev, "[PID%d] woken up\n", current->pid);
280
281                 if (signal_pending(current)) {
282                         dev_dbg(&sep->pdev->dev, "[PID%d] received signal\n",
283                                                         current->pid);
284                         error = -EINTR;
285                         goto end_function;
286                 }
287         }
288 end_function_setpid:
289         /*
290          * The pid_doing_transaction indicates that this process
291          * now owns the facilities to perform a transaction with
292          * the SEP. While this process is performing a transaction,
293          * no other process who has the SEP device open can perform
294          * any transactions. This method allows more than one process
295          * to have the device open at any given time, which provides
296          * finer granularity for device utilization by multiple
297          * processes.
298          */
299         /* Only one process is able to progress here at a time */
300         sep->pid_doing_transaction = current->pid;
301
302 end_function:
303         finish_wait(&sep->event_transactions, &wait);
304
305         return error;
306 }
307
308 /**
309  * sep_check_transaction_owner - Checks if current process owns transaction
310  * @sep: SEP device
311  */
312 static inline int sep_check_transaction_owner(struct sep_device *sep)
313 {
314         dev_dbg(&sep->pdev->dev, "[PID%d] transaction pid = %d\n",
315                 current->pid,
316                 sep->pid_doing_transaction);
317
318         if ((sep->pid_doing_transaction == 0) ||
319                 (current->pid != sep->pid_doing_transaction)) {
320                 return -EACCES;
321         }
322
323         /* We own the transaction */
324         return 0;
325 }
326
327 #ifdef DEBUG
328
329 /**
330  * sep_dump_message - dump the message that is pending
331  * @sep: SEP device
332  * This will only print dump if DEBUG is set; it does
333  * follow kernel debug print enabling
334  */
335 static void _sep_dump_message(struct sep_device *sep)
336 {
337         int count;
338
339         u32 *p = sep->shared_addr;
340
341         for (count = 0; count < 10 * 4; count += 4)
342                 dev_dbg(&sep->pdev->dev,
343                         "[PID%d] Word %d of the message is %x\n",
344                                 current->pid, count/4, *p++);
345 }
346
347 #endif
348
349 /**
350  * sep_map_and_alloc_shared_area -allocate shared block
351  * @sep: security processor
352  * @size: size of shared area
353  */
354 static int sep_map_and_alloc_shared_area(struct sep_device *sep)
355 {
356         sep->shared_addr = dma_alloc_coherent(&sep->pdev->dev,
357                 sep->shared_size,
358                 &sep->shared_bus, GFP_KERNEL);
359
360         if (!sep->shared_addr) {
361                 dev_dbg(&sep->pdev->dev,
362                         "[PID%d] shared memory dma_alloc_coherent failed\n",
363                                 current->pid);
364                 return -ENOMEM;
365         }
366         dev_dbg(&sep->pdev->dev,
367                 "[PID%d] shared_addr %zx bytes @%p (bus %llx)\n",
368                                 current->pid,
369                                 sep->shared_size, sep->shared_addr,
370                                 (unsigned long long)sep->shared_bus);
371         return 0;
372 }
373
374 /**
375  * sep_unmap_and_free_shared_area - free shared block
376  * @sep: security processor
377  */
378 static void sep_unmap_and_free_shared_area(struct sep_device *sep)
379 {
380         dma_free_coherent(&sep->pdev->dev, sep->shared_size,
381                                 sep->shared_addr, sep->shared_bus);
382 }
383
384 #ifdef DEBUG
385
386 /**
387  * sep_shared_bus_to_virt - convert bus/virt addresses
388  * @sep: pointer to struct sep_device
389  * @bus_address: address to convert
390  *
391  * Returns virtual address inside the shared area according
392  * to the bus address.
393  */
394 static void *sep_shared_bus_to_virt(struct sep_device *sep,
395                                                 dma_addr_t bus_address)
396 {
397         return sep->shared_addr + (bus_address - sep->shared_bus);
398 }
399
400 #endif
401
402 /**
403  * sep_open - device open method
404  * @inode: inode of SEP device
405  * @filp: file handle to SEP device
406  *
407  * Open method for the SEP device. Called when userspace opens
408  * the SEP device node.
409  *
410  * Returns zero on success otherwise an error code.
411  */
412 static int sep_open(struct inode *inode, struct file *filp)
413 {
414         struct sep_device *sep;
415         struct sep_private_data *priv;
416
417         dev_dbg(&sep_dev->pdev->dev, "[PID%d] open\n", current->pid);
418
419         if (filp->f_flags & O_NONBLOCK)
420                 return -ENOTSUPP;
421
422         /*
423          * Get the SEP device structure and use it for the
424          * private_data field in filp for other methods
425          */
426
427         priv = kzalloc(sizeof(*priv), GFP_KERNEL);
428         if (!priv)
429                 return -ENOMEM;
430
431         sep = sep_dev;
432         priv->device = sep;
433         filp->private_data = priv;
434
435         dev_dbg(&sep_dev->pdev->dev, "[PID%d] priv is 0x%p\n",
436                                         current->pid, priv);
437
438         /* Anyone can open; locking takes place at transaction level */
439         return 0;
440 }
441
442 /**
443  * sep_free_dma_table_data_handler - free DMA table
444  * @sep: pointer to struct sep_device
445  * @dma_ctx: dma context
446  *
447  * Handles the request to free DMA table for synchronic actions
448  */
449 int sep_free_dma_table_data_handler(struct sep_device *sep,
450                                            struct sep_dma_context **dma_ctx)
451 {
452         int count;
453         int dcb_counter;
454         /* Pointer to the current dma_resource struct */
455         struct sep_dma_resource *dma;
456
457         dev_dbg(&sep->pdev->dev,
458                 "[PID%d] sep_free_dma_table_data_handler\n",
459                         current->pid);
460
461         if (!dma_ctx || !(*dma_ctx)) {
462                 /* No context or context already freed */
463                 dev_dbg(&sep->pdev->dev,
464                         "[PID%d] no DMA context or context already freed\n",
465                                 current->pid);
466
467                 return 0;
468         }
469
470         dev_dbg(&sep->pdev->dev, "[PID%d] (*dma_ctx)->nr_dcb_creat 0x%x\n",
471                                         current->pid,
472                                         (*dma_ctx)->nr_dcb_creat);
473
474         for (dcb_counter = 0;
475              dcb_counter < (*dma_ctx)->nr_dcb_creat; dcb_counter++) {
476                 dma = &(*dma_ctx)->dma_res_arr[dcb_counter];
477
478                 /* Unmap and free input map array */
479                 if (dma->in_map_array) {
480                         for (count = 0; count < dma->in_num_pages; count++) {
481                                 dma_unmap_page(&sep->pdev->dev,
482                                         dma->in_map_array[count].dma_addr,
483                                         dma->in_map_array[count].size,
484                                         DMA_TO_DEVICE);
485                         }
486                         kfree(dma->in_map_array);
487                 }
488
489                 /**
490                  * Output is handled different. If
491                  * this was a secure dma into restricted memory,
492                  * then we skip this step altogether as restricted
493                  * memory is not available to the o/s at all.
494                  */
495                 if (!(*dma_ctx)->secure_dma && dma->out_map_array) {
496
497                         for (count = 0; count < dma->out_num_pages; count++) {
498                                 dma_unmap_page(&sep->pdev->dev,
499                                         dma->out_map_array[count].dma_addr,
500                                         dma->out_map_array[count].size,
501                                         DMA_FROM_DEVICE);
502                         }
503                         kfree(dma->out_map_array);
504                 }
505
506                 /* Free page cache for output */
507                 if (dma->in_page_array) {
508                         for (count = 0; count < dma->in_num_pages; count++) {
509                                 flush_dcache_page(dma->in_page_array[count]);
510                                 page_cache_release(dma->in_page_array[count]);
511                         }
512                         kfree(dma->in_page_array);
513                 }
514
515                 /* Again, we do this only for non secure dma */
516                 if (!(*dma_ctx)->secure_dma && dma->out_page_array) {
517
518                         for (count = 0; count < dma->out_num_pages; count++) {
519                                 if (!PageReserved(dma->out_page_array[count]))
520
521                                         SetPageDirty(dma->
522                                         out_page_array[count]);
523
524                                 flush_dcache_page(dma->out_page_array[count]);
525                                 page_cache_release(dma->out_page_array[count]);
526                         }
527                         kfree(dma->out_page_array);
528                 }
529
530                 /**
531                  * Note that here we use in_map_num_entries because we
532                  * don't have a page array; the page array is generated
533                  * only in the lock_user_pages, which is not called
534                  * for kernel crypto, which is what the sg (scatter gather
535                  * is used for exclusively)
536                  */
537                 if (dma->src_sg) {
538                         dma_unmap_sg(&sep->pdev->dev, dma->src_sg,
539                                 dma->in_map_num_entries, DMA_TO_DEVICE);
540                         dma->src_sg = NULL;
541                 }
542
543                 if (dma->dst_sg) {
544                         dma_unmap_sg(&sep->pdev->dev, dma->dst_sg,
545                                 dma->in_map_num_entries, DMA_FROM_DEVICE);
546                         dma->dst_sg = NULL;
547                 }
548
549                 /* Reset all the values */
550                 dma->in_page_array = NULL;
551                 dma->out_page_array = NULL;
552                 dma->in_num_pages = 0;
553                 dma->out_num_pages = 0;
554                 dma->in_map_array = NULL;
555                 dma->out_map_array = NULL;
556                 dma->in_map_num_entries = 0;
557                 dma->out_map_num_entries = 0;
558         }
559
560         (*dma_ctx)->nr_dcb_creat = 0;
561         (*dma_ctx)->num_lli_tables_created = 0;
562
563         kfree(*dma_ctx);
564         *dma_ctx = NULL;
565
566         dev_dbg(&sep->pdev->dev,
567                 "[PID%d] sep_free_dma_table_data_handler end\n",
568                         current->pid);
569
570         return 0;
571 }
572
573 /**
574  * sep_end_transaction_handler - end transaction
575  * @sep: pointer to struct sep_device
576  * @dma_ctx: DMA context
577  * @call_status: Call status
578  *
579  * This API handles the end transaction request.
580  */
581 static int sep_end_transaction_handler(struct sep_device *sep,
582                                        struct sep_dma_context **dma_ctx,
583                                        struct sep_call_status *call_status,
584                                        struct sep_queue_info **my_queue_elem)
585 {
586         dev_dbg(&sep->pdev->dev, "[PID%d] ending transaction\n", current->pid);
587
588         /*
589          * Extraneous transaction clearing would mess up PM
590          * device usage counters and SEP would get suspended
591          * just before we send a command to SEP in the next
592          * transaction
593          * */
594         if (sep_check_transaction_owner(sep)) {
595                 dev_dbg(&sep->pdev->dev, "[PID%d] not transaction owner\n",
596                                                 current->pid);
597                 return 0;
598         }
599
600         /* Update queue status */
601         sep_queue_status_remove(sep, my_queue_elem);
602
603         /* Check that all the DMA resources were freed */
604         if (dma_ctx)
605                 sep_free_dma_table_data_handler(sep, dma_ctx);
606
607         /* Reset call status for next transaction */
608         if (call_status)
609                 call_status->status = 0;
610
611         /* Clear the message area to avoid next transaction reading
612          * sensitive results from previous transaction */
613         memset(sep->shared_addr, 0,
614                SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES);
615
616         /* start suspend delay */
617 #ifdef SEP_ENABLE_RUNTIME_PM
618         if (sep->in_use) {
619                 sep->in_use = 0;
620                 pm_runtime_mark_last_busy(&sep->pdev->dev);
621                 pm_runtime_put_autosuspend(&sep->pdev->dev);
622         }
623 #endif
624
625         clear_bit(SEP_WORKING_LOCK_BIT, &sep->in_use_flags);
626         sep->pid_doing_transaction = 0;
627
628         /* Now it's safe for next process to proceed */
629         dev_dbg(&sep->pdev->dev, "[PID%d] waking up next transaction\n",
630                                         current->pid);
631         clear_bit(SEP_TRANSACTION_STARTED_LOCK_BIT, &sep->in_use_flags);
632         wake_up(&sep->event_transactions);
633
634         return 0;
635 }
636
637
638 /**
639  * sep_release - close a SEP device
640  * @inode: inode of SEP device
641  * @filp: file handle being closed
642  *
643  * Called on the final close of a SEP device.
644  */
645 static int sep_release(struct inode *inode, struct file *filp)
646 {
647         struct sep_private_data * const private_data = filp->private_data;
648         struct sep_call_status *call_status = &private_data->call_status;
649         struct sep_device *sep = private_data->device;
650         struct sep_dma_context **dma_ctx = &private_data->dma_ctx;
651         struct sep_queue_info **my_queue_elem = &private_data->my_queue_elem;
652
653         dev_dbg(&sep->pdev->dev, "[PID%d] release\n", current->pid);
654
655         sep_end_transaction_handler(sep, dma_ctx, call_status,
656                 my_queue_elem);
657
658         kfree(filp->private_data);
659
660         return 0;
661 }
662
663 /**
664  * sep_mmap -  maps the shared area to user space
665  * @filp: pointer to struct file
666  * @vma: pointer to vm_area_struct
667  *
668  * Called on an mmap of our space via the normal SEP device
669  */
670 static int sep_mmap(struct file *filp, struct vm_area_struct *vma)
671 {
672         struct sep_private_data * const private_data = filp->private_data;
673         struct sep_call_status *call_status = &private_data->call_status;
674         struct sep_device *sep = private_data->device;
675         struct sep_queue_info **my_queue_elem = &private_data->my_queue_elem;
676         dma_addr_t bus_addr;
677         unsigned long error = 0;
678
679         dev_dbg(&sep->pdev->dev, "[PID%d] sep_mmap\n", current->pid);
680
681         /* Set the transaction busy (own the device) */
682         /*
683          * Problem for multithreaded applications is that here we're
684          * possibly going to sleep while holding a write lock on
685          * current->mm->mmap_sem, which will cause deadlock for ongoing
686          * transaction trying to create DMA tables
687          */
688         error = sep_wait_transaction(sep);
689         if (error)
690                 /* Interrupted by signal, don't clear transaction */
691                 goto end_function;
692
693         /* Clear the message area to avoid next transaction reading
694          * sensitive results from previous transaction */
695         memset(sep->shared_addr, 0,
696                SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES);
697
698         /*
699          * Check that the size of the mapped range is as the size of the message
700          * shared area
701          */
702         if ((vma->vm_end - vma->vm_start) > SEP_DRIVER_MMMAP_AREA_SIZE) {
703                 error = -EINVAL;
704                 goto end_function_with_error;
705         }
706
707         dev_dbg(&sep->pdev->dev, "[PID%d] shared_addr is %p\n",
708                                         current->pid, sep->shared_addr);
709
710         /* Get bus address */
711         bus_addr = sep->shared_bus;
712
713         if (remap_pfn_range(vma, vma->vm_start, bus_addr >> PAGE_SHIFT,
714                 vma->vm_end - vma->vm_start, vma->vm_page_prot)) {
715                 dev_dbg(&sep->pdev->dev, "[PID%d] remap_pfn_range failed\n",
716                                                 current->pid);
717                 error = -EAGAIN;
718                 goto end_function_with_error;
719         }
720
721         /* Update call status */
722         set_bit(SEP_LEGACY_MMAP_DONE_OFFSET, &call_status->status);
723
724         goto end_function;
725
726 end_function_with_error:
727         /* Clear our transaction */
728         sep_end_transaction_handler(sep, NULL, call_status,
729                 my_queue_elem);
730
731 end_function:
732         return error;
733 }
734
735 /**
736  * sep_poll - poll handler
737  * @filp:       pointer to struct file
738  * @wait:       pointer to poll_table
739  *
740  * Called by the OS when the kernel is asked to do a poll on
741  * a SEP file handle.
742  */
743 static unsigned int sep_poll(struct file *filp, poll_table *wait)
744 {
745         struct sep_private_data * const private_data = filp->private_data;
746         struct sep_call_status *call_status = &private_data->call_status;
747         struct sep_device *sep = private_data->device;
748         u32 mask = 0;
749         u32 retval = 0;
750         u32 retval2 = 0;
751         unsigned long lock_irq_flag;
752
753         /* Am I the process that owns the transaction? */
754         if (sep_check_transaction_owner(sep)) {
755                 dev_dbg(&sep->pdev->dev, "[PID%d] poll pid not owner\n",
756                                                 current->pid);
757                 mask = POLLERR;
758                 goto end_function;
759         }
760
761         /* Check if send command or send_reply were activated previously */
762         if (0 == test_bit(SEP_LEGACY_SENDMSG_DONE_OFFSET,
763                           &call_status->status)) {
764                 dev_warn(&sep->pdev->dev, "[PID%d] sendmsg not called\n",
765                                                 current->pid);
766                 mask = POLLERR;
767                 goto end_function;
768         }
769
770
771         /* Add the event to the polling wait table */
772         dev_dbg(&sep->pdev->dev, "[PID%d] poll: calling wait sep_event\n",
773                                         current->pid);
774
775         poll_wait(filp, &sep->event_interrupt, wait);
776
777         dev_dbg(&sep->pdev->dev,
778                 "[PID%d] poll: send_ct is %lx reply ct is %lx\n",
779                         current->pid, sep->send_ct, sep->reply_ct);
780
781         /* Check if error occurred during poll */
782         retval2 = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR3_REG_ADDR);
783         if ((retval2 != 0x0) && (retval2 != 0x8)) {
784                 dev_dbg(&sep->pdev->dev, "[PID%d] poll; poll error %x\n",
785                                                 current->pid, retval2);
786                 mask |= POLLERR;
787                 goto end_function;
788         }
789
790         spin_lock_irqsave(&sep->snd_rply_lck, lock_irq_flag);
791
792         if (sep->send_ct == sep->reply_ct) {
793                 spin_unlock_irqrestore(&sep->snd_rply_lck, lock_irq_flag);
794                 retval = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR2_REG_ADDR);
795                 dev_dbg(&sep->pdev->dev,
796                         "[PID%d] poll: data ready check (GPR2)  %x\n",
797                                 current->pid, retval);
798
799                 /* Check if printf request  */
800                 if ((retval >> 30) & 0x1) {
801                         dev_dbg(&sep->pdev->dev,
802                                 "[PID%d] poll: SEP printf request\n",
803                                         current->pid);
804                         goto end_function;
805                 }
806
807                 /* Check if the this is SEP reply or request */
808                 if (retval >> 31) {
809                         dev_dbg(&sep->pdev->dev,
810                                 "[PID%d] poll: SEP request\n",
811                                         current->pid);
812                 } else {
813                         dev_dbg(&sep->pdev->dev,
814                                 "[PID%d] poll: normal return\n",
815                                         current->pid);
816                         sep_dump_message(sep);
817                         dev_dbg(&sep->pdev->dev,
818                                 "[PID%d] poll; SEP reply POLLIN|POLLRDNORM\n",
819                                         current->pid);
820                         mask |= POLLIN | POLLRDNORM;
821                 }
822                 set_bit(SEP_LEGACY_POLL_DONE_OFFSET, &call_status->status);
823         } else {
824                 spin_unlock_irqrestore(&sep->snd_rply_lck, lock_irq_flag);
825                 dev_dbg(&sep->pdev->dev,
826                         "[PID%d] poll; no reply; returning mask of 0\n",
827                                 current->pid);
828                 mask = 0;
829         }
830
831 end_function:
832         return mask;
833 }
834
835 /**
836  * sep_time_address - address in SEP memory of time
837  * @sep: SEP device we want the address from
838  *
839  * Return the address of the two dwords in memory used for time
840  * setting.
841  */
842 static u32 *sep_time_address(struct sep_device *sep)
843 {
844         return sep->shared_addr +
845                 SEP_DRIVER_SYSTEM_TIME_MEMORY_OFFSET_IN_BYTES;
846 }
847
848 /**
849  * sep_set_time - set the SEP time
850  * @sep: the SEP we are setting the time for
851  *
852  * Calculates time and sets it at the predefined address.
853  * Called with the SEP mutex held.
854  */
855 static unsigned long sep_set_time(struct sep_device *sep)
856 {
857         struct timeval time;
858         u32 *time_addr; /* Address of time as seen by the kernel */
859
860
861         do_gettimeofday(&time);
862
863         /* Set value in the SYSTEM MEMORY offset */
864         time_addr = sep_time_address(sep);
865
866         time_addr[0] = SEP_TIME_VAL_TOKEN;
867         time_addr[1] = time.tv_sec;
868
869         dev_dbg(&sep->pdev->dev, "[PID%d] time.tv_sec is %lu\n",
870                                         current->pid, time.tv_sec);
871         dev_dbg(&sep->pdev->dev, "[PID%d] time_addr is %p\n",
872                                         current->pid, time_addr);
873         dev_dbg(&sep->pdev->dev, "[PID%d] sep->shared_addr is %p\n",
874                                         current->pid, sep->shared_addr);
875
876         return time.tv_sec;
877 }
878
879 /**
880  * sep_send_command_handler - kick off a command
881  * @sep: SEP being signalled
882  *
883  * This function raises interrupt to SEP that signals that is has a new
884  * command from the host
885  *
886  * Note that this function does fall under the ioctl lock
887  */
888 int sep_send_command_handler(struct sep_device *sep)
889 {
890         unsigned long lock_irq_flag;
891         u32 *msg_pool;
892         int error = 0;
893
894         /* Basic sanity check; set msg pool to start of shared area */
895         msg_pool = (u32 *)sep->shared_addr;
896         msg_pool += 2;
897
898         /* Look for start msg token */
899         if (*msg_pool != SEP_START_MSG_TOKEN) {
900                 dev_warn(&sep->pdev->dev, "start message token not present\n");
901                 error = -EPROTO;
902                 goto end_function;
903         }
904
905         /* Do we have a reasonable size? */
906         msg_pool += 1;
907         if ((*msg_pool < 2) ||
908                 (*msg_pool > SEP_DRIVER_MAX_MESSAGE_SIZE_IN_BYTES)) {
909
910                 dev_warn(&sep->pdev->dev, "invalid message size\n");
911                 error = -EPROTO;
912                 goto end_function;
913         }
914
915         /* Does the command look reasonable? */
916         msg_pool += 1;
917         if (*msg_pool < 2) {
918                 dev_warn(&sep->pdev->dev, "invalid message opcode\n");
919                 error = -EPROTO;
920                 goto end_function;
921         }
922
923 #if defined(CONFIG_PM_RUNTIME) && defined(SEP_ENABLE_RUNTIME_PM)
924         dev_dbg(&sep->pdev->dev, "[PID%d] before pm sync status 0x%X\n",
925                                         current->pid,
926                                         sep->pdev->dev.power.runtime_status);
927         sep->in_use = 1; /* device is about to be used */
928         pm_runtime_get_sync(&sep->pdev->dev);
929 #endif
930
931         if (test_and_set_bit(SEP_WORKING_LOCK_BIT, &sep->in_use_flags)) {
932                 error = -EPROTO;
933                 goto end_function;
934         }
935         sep->in_use = 1; /* device is about to be used */
936         sep_set_time(sep);
937
938         sep_dump_message(sep);
939
940         /* Update counter */
941         spin_lock_irqsave(&sep->snd_rply_lck, lock_irq_flag);
942         sep->send_ct++;
943         spin_unlock_irqrestore(&sep->snd_rply_lck, lock_irq_flag);
944
945         dev_dbg(&sep->pdev->dev,
946                 "[PID%d] sep_send_command_handler send_ct %lx reply_ct %lx\n",
947                         current->pid, sep->send_ct, sep->reply_ct);
948
949         /* Send interrupt to SEP */
950         sep_write_reg(sep, HW_HOST_HOST_SEP_GPR0_REG_ADDR, 0x2);
951
952 end_function:
953         return error;
954 }
955
956 /**
957  *      sep_crypto_dma -
958  *      @sep: pointer to struct sep_device
959  *      @sg: pointer to struct scatterlist
960  *      @direction:
961  *      @dma_maps: pointer to place a pointer to array of dma maps
962  *       This is filled in; anything previous there will be lost
963  *       The structure for dma maps is sep_dma_map
964  *      @returns number of dma maps on success; negative on error
965  *
966  *      This creates the dma table from the scatterlist
967  *      It is used only for kernel crypto as it works with scatterlists
968  *      representation of data buffers
969  *
970  */
971 static int sep_crypto_dma(
972         struct sep_device *sep,
973         struct scatterlist *sg,
974         struct sep_dma_map **dma_maps,
975         enum dma_data_direction direction)
976 {
977         struct scatterlist *temp_sg;
978
979         u32 count_segment;
980         u32 count_mapped;
981         struct sep_dma_map *sep_dma;
982         int ct1;
983
984         if (sg->length == 0)
985                 return 0;
986
987         /* Count the segments */
988         temp_sg = sg;
989         count_segment = 0;
990         while (temp_sg) {
991                 count_segment += 1;
992                 temp_sg = scatterwalk_sg_next(temp_sg);
993         }
994         dev_dbg(&sep->pdev->dev,
995                 "There are (hex) %x segments in sg\n", count_segment);
996
997         /* DMA map segments */
998         count_mapped = dma_map_sg(&sep->pdev->dev, sg,
999                 count_segment, direction);
1000
1001         dev_dbg(&sep->pdev->dev,
1002                 "There are (hex) %x maps in sg\n", count_mapped);
1003
1004         if (count_mapped == 0) {
1005                 dev_dbg(&sep->pdev->dev, "Cannot dma_map_sg\n");
1006                 return -ENOMEM;
1007         }
1008
1009         sep_dma = kmalloc(sizeof(struct sep_dma_map) *
1010                 count_mapped, GFP_ATOMIC);
1011
1012         if (sep_dma == NULL) {
1013                 dev_dbg(&sep->pdev->dev, "Cannot allocate dma_maps\n");
1014                 return -ENOMEM;
1015         }
1016
1017         for_each_sg(sg, temp_sg, count_mapped, ct1) {
1018                 sep_dma[ct1].dma_addr = sg_dma_address(temp_sg);
1019                 sep_dma[ct1].size = sg_dma_len(temp_sg);
1020                 dev_dbg(&sep->pdev->dev, "(all hex) map %x dma %lx len %lx\n",
1021                         ct1, (unsigned long)sep_dma[ct1].dma_addr,
1022                         (unsigned long)sep_dma[ct1].size);
1023                 }
1024
1025         *dma_maps = sep_dma;
1026         return count_mapped;
1027
1028 }
1029
1030 /**
1031  *      sep_crypto_lli -
1032  *      @sep: pointer to struct sep_device
1033  *      @sg: pointer to struct scatterlist
1034  *      @data_size: total data size
1035  *      @direction:
1036  *      @dma_maps: pointer to place a pointer to array of dma maps
1037  *       This is filled in; anything previous there will be lost
1038  *       The structure for dma maps is sep_dma_map
1039  *      @lli_maps: pointer to place a pointer to array of lli maps
1040  *       This is filled in; anything previous there will be lost
1041  *       The structure for dma maps is sep_dma_map
1042  *      @returns number of dma maps on success; negative on error
1043  *
1044  *      This creates the LLI table from the scatterlist
1045  *      It is only used for kernel crypto as it works exclusively
1046  *      with scatterlists (struct scatterlist) representation of
1047  *      data buffers
1048  */
1049 static int sep_crypto_lli(
1050         struct sep_device *sep,
1051         struct scatterlist *sg,
1052         struct sep_dma_map **maps,
1053         struct sep_lli_entry **llis,
1054         u32 data_size,
1055         enum dma_data_direction direction)
1056 {
1057
1058         int ct1;
1059         struct sep_lli_entry *sep_lli;
1060         struct sep_dma_map *sep_map;
1061
1062         int nbr_ents;
1063
1064         nbr_ents = sep_crypto_dma(sep, sg, maps, direction);
1065         if (nbr_ents <= 0) {
1066                 dev_dbg(&sep->pdev->dev, "crypto_dma failed %x\n",
1067                         nbr_ents);
1068                 return nbr_ents;
1069         }
1070
1071         sep_map = *maps;
1072
1073         sep_lli = kmalloc(sizeof(struct sep_lli_entry) * nbr_ents, GFP_ATOMIC);
1074
1075         if (sep_lli == NULL) {
1076                 dev_dbg(&sep->pdev->dev, "Cannot allocate lli_maps\n");
1077
1078                 kfree(*maps);
1079                 *maps = NULL;
1080                 return -ENOMEM;
1081         }
1082
1083         for (ct1 = 0; ct1 < nbr_ents; ct1 += 1) {
1084                 sep_lli[ct1].bus_address = (u32)sep_map[ct1].dma_addr;
1085
1086                 /* Maximum for page is total data size */
1087                 if (sep_map[ct1].size > data_size)
1088                         sep_map[ct1].size = data_size;
1089
1090                 sep_lli[ct1].block_size = (u32)sep_map[ct1].size;
1091         }
1092
1093         *llis = sep_lli;
1094         return nbr_ents;
1095 }
1096
1097 /**
1098  *      sep_lock_kernel_pages - map kernel pages for DMA
1099  *      @sep: pointer to struct sep_device
1100  *      @kernel_virt_addr: address of data buffer in kernel
1101  *      @data_size: size of data
1102  *      @lli_array_ptr: lli array
1103  *      @in_out_flag: input into device or output from device
1104  *
1105  *      This function locks all the physical pages of the kernel virtual buffer
1106  *      and construct a basic lli  array, where each entry holds the physical
1107  *      page address and the size that application data holds in this page
1108  *      This function is used only during kernel crypto mod calls from within
1109  *      the kernel (when ioctl is not used)
1110  *
1111  *      This is used only for kernel crypto. Kernel pages
1112  *      are handled differently as they are done via
1113  *      scatter gather lists (struct scatterlist)
1114  */
1115 static int sep_lock_kernel_pages(struct sep_device *sep,
1116         unsigned long kernel_virt_addr,
1117         u32 data_size,
1118         struct sep_lli_entry **lli_array_ptr,
1119         int in_out_flag,
1120         struct sep_dma_context *dma_ctx)
1121
1122 {
1123         u32 num_pages;
1124         struct scatterlist *sg;
1125
1126         /* Array of lli */
1127         struct sep_lli_entry *lli_array;
1128         /* Map array */
1129         struct sep_dma_map *map_array;
1130
1131         enum dma_data_direction direction;
1132
1133         lli_array = NULL;
1134         map_array = NULL;
1135
1136         if (in_out_flag == SEP_DRIVER_IN_FLAG) {
1137                 direction = DMA_TO_DEVICE;
1138                 sg = dma_ctx->src_sg;
1139         } else {
1140                 direction = DMA_FROM_DEVICE;
1141                 sg = dma_ctx->dst_sg;
1142         }
1143
1144         num_pages = sep_crypto_lli(sep, sg, &map_array, &lli_array,
1145                 data_size, direction);
1146
1147         if (num_pages <= 0) {
1148                 dev_dbg(&sep->pdev->dev, "sep_crypto_lli returned error %x\n",
1149                         num_pages);
1150                 return -ENOMEM;
1151         }
1152
1153         /* Put mapped kernel sg into kernel resource array */
1154
1155         /* Set output params according to the in_out flag */
1156         if (in_out_flag == SEP_DRIVER_IN_FLAG) {
1157                 *lli_array_ptr = lli_array;
1158                 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_num_pages =
1159                                                                 num_pages;
1160                 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_page_array =
1161                                                                 NULL;
1162                 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_map_array =
1163                                                                 map_array;
1164                 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_map_num_entries =
1165                                                                 num_pages;
1166                 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].src_sg =
1167                         dma_ctx->src_sg;
1168         } else {
1169                 *lli_array_ptr = lli_array;
1170                 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_num_pages =
1171                                                                 num_pages;
1172                 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_page_array =
1173                                                                 NULL;
1174                 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_map_array =
1175                                                                 map_array;
1176                 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].
1177                                         out_map_num_entries = num_pages;
1178                 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].dst_sg =
1179                         dma_ctx->dst_sg;
1180         }
1181
1182         return 0;
1183 }
1184
1185 /**
1186  * sep_lock_user_pages - lock and map user pages for DMA
1187  * @sep: pointer to struct sep_device
1188  * @app_virt_addr: user memory data buffer
1189  * @data_size: size of data buffer
1190  * @lli_array_ptr: lli array
1191  * @in_out_flag: input or output to device
1192  *
1193  * This function locks all the physical pages of the application
1194  * virtual buffer and construct a basic lli  array, where each entry
1195  * holds the physical page address and the size that application
1196  * data holds in this physical pages
1197  */
1198 static int sep_lock_user_pages(struct sep_device *sep,
1199         u32 app_virt_addr,
1200         u32 data_size,
1201         struct sep_lli_entry **lli_array_ptr,
1202         int in_out_flag,
1203         struct sep_dma_context *dma_ctx)
1204
1205 {
1206         int error = 0;
1207         u32 count;
1208         int result;
1209         /* The the page of the end address of the user space buffer */
1210         u32 end_page;
1211         /* The page of the start address of the user space buffer */
1212         u32 start_page;
1213         /* The range in pages */
1214         u32 num_pages;
1215         /* Array of pointers to page */
1216         struct page **page_array;
1217         /* Array of lli */
1218         struct sep_lli_entry *lli_array;
1219         /* Map array */
1220         struct sep_dma_map *map_array;
1221
1222         /* Set start and end pages and num pages */
1223         end_page = (app_virt_addr + data_size - 1) >> PAGE_SHIFT;
1224         start_page = app_virt_addr >> PAGE_SHIFT;
1225         num_pages = end_page - start_page + 1;
1226
1227         dev_dbg(&sep->pdev->dev,
1228                 "[PID%d] lock user pages app_virt_addr is %x\n",
1229                         current->pid, app_virt_addr);
1230
1231         dev_dbg(&sep->pdev->dev, "[PID%d] data_size is (hex) %x\n",
1232                                         current->pid, data_size);
1233         dev_dbg(&sep->pdev->dev, "[PID%d] start_page is (hex) %x\n",
1234                                         current->pid, start_page);
1235         dev_dbg(&sep->pdev->dev, "[PID%d] end_page is (hex) %x\n",
1236                                         current->pid, end_page);
1237         dev_dbg(&sep->pdev->dev, "[PID%d] num_pages is (hex) %x\n",
1238                                         current->pid, num_pages);
1239
1240         /* Allocate array of pages structure pointers */
1241         page_array = kmalloc_array(num_pages, sizeof(struct page *),
1242                                    GFP_ATOMIC);
1243         if (!page_array) {
1244                 error = -ENOMEM;
1245                 goto end_function;
1246         }
1247
1248         map_array = kmalloc_array(num_pages, sizeof(struct sep_dma_map),
1249                                   GFP_ATOMIC);
1250         if (!map_array) {
1251                 error = -ENOMEM;
1252                 goto end_function_with_error1;
1253         }
1254
1255         lli_array = kmalloc_array(num_pages, sizeof(struct sep_lli_entry),
1256                                   GFP_ATOMIC);
1257         if (!lli_array) {
1258                 error = -ENOMEM;
1259                 goto end_function_with_error2;
1260         }
1261
1262         /* Convert the application virtual address into a set of physical */
1263         result = get_user_pages_fast(app_virt_addr, num_pages,
1264                 ((in_out_flag == SEP_DRIVER_IN_FLAG) ? 0 : 1), page_array);
1265
1266         /* Check the number of pages locked - if not all then exit with error */
1267         if (result != num_pages) {
1268                 dev_warn(&sep->pdev->dev,
1269                         "[PID%d] not all pages locked by get_user_pages, result 0x%X, num_pages 0x%X\n",
1270                         current->pid, result, num_pages);
1271                 error = -ENOMEM;
1272                 goto end_function_with_error3;
1273         }
1274
1275         dev_dbg(&sep->pdev->dev, "[PID%d] get_user_pages succeeded\n",
1276                                         current->pid);
1277
1278         /*
1279          * Fill the array using page array data and
1280          * map the pages - this action will also flush the cache as needed
1281          */
1282         for (count = 0; count < num_pages; count++) {
1283                 /* Fill the map array */
1284                 map_array[count].dma_addr =
1285                         dma_map_page(&sep->pdev->dev, page_array[count],
1286                         0, PAGE_SIZE, DMA_BIDIRECTIONAL);
1287
1288                 map_array[count].size = PAGE_SIZE;
1289
1290                 /* Fill the lli array entry */
1291                 lli_array[count].bus_address = (u32)map_array[count].dma_addr;
1292                 lli_array[count].block_size = PAGE_SIZE;
1293
1294                 dev_dbg(&sep->pdev->dev,
1295                         "[PID%d] lli_array[%x].bus_address is %08lx, lli_array[%x].block_size is (hex) %x\n",
1296                         current->pid, count,
1297                         (unsigned long)lli_array[count].bus_address,
1298                         count, lli_array[count].block_size);
1299         }
1300
1301         /* Check the offset for the first page */
1302         lli_array[0].bus_address =
1303                 lli_array[0].bus_address + (app_virt_addr & (~PAGE_MASK));
1304
1305         /* Check that not all the data is in the first page only */
1306         if ((PAGE_SIZE - (app_virt_addr & (~PAGE_MASK))) >= data_size)
1307                 lli_array[0].block_size = data_size;
1308         else
1309                 lli_array[0].block_size =
1310                         PAGE_SIZE - (app_virt_addr & (~PAGE_MASK));
1311
1312                 dev_dbg(&sep->pdev->dev,
1313                         "[PID%d] After check if page 0 has all data\n",
1314                         current->pid);
1315                 dev_dbg(&sep->pdev->dev,
1316                         "[PID%d] lli_array[0].bus_address is (hex) %08lx, lli_array[0].block_size is (hex) %x\n",
1317                         current->pid,
1318                         (unsigned long)lli_array[0].bus_address,
1319                         lli_array[0].block_size);
1320
1321
1322         /* Check the size of the last page */
1323         if (num_pages > 1) {
1324                 lli_array[num_pages - 1].block_size =
1325                         (app_virt_addr + data_size) & (~PAGE_MASK);
1326                 if (lli_array[num_pages - 1].block_size == 0)
1327                         lli_array[num_pages - 1].block_size = PAGE_SIZE;
1328
1329                 dev_dbg(&sep->pdev->dev,
1330                         "[PID%d] After last page size adjustment\n",
1331                         current->pid);
1332                 dev_dbg(&sep->pdev->dev,
1333                         "[PID%d] lli_array[%x].bus_address is (hex) %08lx, lli_array[%x].block_size is (hex) %x\n",
1334                         current->pid,
1335                         num_pages - 1,
1336                         (unsigned long)lli_array[num_pages - 1].bus_address,
1337                         num_pages - 1,
1338                         lli_array[num_pages - 1].block_size);
1339         }
1340
1341         /* Set output params according to the in_out flag */
1342         if (in_out_flag == SEP_DRIVER_IN_FLAG) {
1343                 *lli_array_ptr = lli_array;
1344                 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_num_pages =
1345                                                                 num_pages;
1346                 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_page_array =
1347                                                                 page_array;
1348                 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_map_array =
1349                                                                 map_array;
1350                 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_map_num_entries =
1351                                                                 num_pages;
1352                 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].src_sg = NULL;
1353         } else {
1354                 *lli_array_ptr = lli_array;
1355                 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_num_pages =
1356                                                                 num_pages;
1357                 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_page_array =
1358                                                                 page_array;
1359                 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_map_array =
1360                                                                 map_array;
1361                 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].
1362                                         out_map_num_entries = num_pages;
1363                 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].dst_sg = NULL;
1364         }
1365         goto end_function;
1366
1367 end_function_with_error3:
1368         /* Free lli array */
1369         kfree(lli_array);
1370
1371 end_function_with_error2:
1372         kfree(map_array);
1373
1374 end_function_with_error1:
1375         /* Free page array */
1376         kfree(page_array);
1377
1378 end_function:
1379         return error;
1380 }
1381
1382 /**
1383  *      sep_lli_table_secure_dma - get lli array for IMR addresses
1384  *      @sep: pointer to struct sep_device
1385  *      @app_virt_addr: user memory data buffer
1386  *      @data_size: size of data buffer
1387  *      @lli_array_ptr: lli array
1388  *      @in_out_flag: not used
1389  *      @dma_ctx: pointer to struct sep_dma_context
1390  *
1391  *      This function creates lli tables for outputting data to
1392  *      IMR memory, which is memory that cannot be accessed by the
1393  *      the x86 processor.
1394  */
1395 static int sep_lli_table_secure_dma(struct sep_device *sep,
1396         u32 app_virt_addr,
1397         u32 data_size,
1398         struct sep_lli_entry **lli_array_ptr,
1399         int in_out_flag,
1400         struct sep_dma_context *dma_ctx)
1401
1402 {
1403         int error = 0;
1404         u32 count;
1405         /* The the page of the end address of the user space buffer */
1406         u32 end_page;
1407         /* The page of the start address of the user space buffer */
1408         u32 start_page;
1409         /* The range in pages */
1410         u32 num_pages;
1411         /* Array of lli */
1412         struct sep_lli_entry *lli_array;
1413
1414         /* Set start and end pages and num pages */
1415         end_page = (app_virt_addr + data_size - 1) >> PAGE_SHIFT;
1416         start_page = app_virt_addr >> PAGE_SHIFT;
1417         num_pages = end_page - start_page + 1;
1418
1419         dev_dbg(&sep->pdev->dev,
1420                 "[PID%d] lock user pages  app_virt_addr is %x\n",
1421                 current->pid, app_virt_addr);
1422
1423         dev_dbg(&sep->pdev->dev, "[PID%d] data_size is (hex) %x\n",
1424                 current->pid, data_size);
1425         dev_dbg(&sep->pdev->dev, "[PID%d] start_page is (hex) %x\n",
1426                 current->pid, start_page);
1427         dev_dbg(&sep->pdev->dev, "[PID%d] end_page is (hex) %x\n",
1428                 current->pid, end_page);
1429         dev_dbg(&sep->pdev->dev, "[PID%d] num_pages is (hex) %x\n",
1430                 current->pid, num_pages);
1431
1432         lli_array = kmalloc_array(num_pages, sizeof(struct sep_lli_entry),
1433                                   GFP_ATOMIC);
1434         if (!lli_array)
1435                 return -ENOMEM;
1436
1437         /*
1438          * Fill the lli_array
1439          */
1440         start_page = start_page << PAGE_SHIFT;
1441         for (count = 0; count < num_pages; count++) {
1442                 /* Fill the lli array entry */
1443                 lli_array[count].bus_address = start_page;
1444                 lli_array[count].block_size = PAGE_SIZE;
1445
1446                 start_page += PAGE_SIZE;
1447
1448                 dev_dbg(&sep->pdev->dev,
1449                         "[PID%d] lli_array[%x].bus_address is %08lx, lli_array[%x].block_size is (hex) %x\n",
1450                         current->pid,
1451                         count, (unsigned long)lli_array[count].bus_address,
1452                         count, lli_array[count].block_size);
1453         }
1454
1455         /* Check the offset for the first page */
1456         lli_array[0].bus_address =
1457                 lli_array[0].bus_address + (app_virt_addr & (~PAGE_MASK));
1458
1459         /* Check that not all the data is in the first page only */
1460         if ((PAGE_SIZE - (app_virt_addr & (~PAGE_MASK))) >= data_size)
1461                 lli_array[0].block_size = data_size;
1462         else
1463                 lli_array[0].block_size =
1464                         PAGE_SIZE - (app_virt_addr & (~PAGE_MASK));
1465
1466         dev_dbg(&sep->pdev->dev,
1467                 "[PID%d] After check if page 0 has all data\n"
1468                 "lli_array[0].bus_address is (hex) %08lx, lli_array[0].block_size is (hex) %x\n",
1469                 current->pid,
1470                 (unsigned long)lli_array[0].bus_address,
1471                 lli_array[0].block_size);
1472
1473         /* Check the size of the last page */
1474         if (num_pages > 1) {
1475                 lli_array[num_pages - 1].block_size =
1476                         (app_virt_addr + data_size) & (~PAGE_MASK);
1477                 if (lli_array[num_pages - 1].block_size == 0)
1478                         lli_array[num_pages - 1].block_size = PAGE_SIZE;
1479
1480                 dev_dbg(&sep->pdev->dev,
1481                         "[PID%d] After last page size adjustment\n"
1482                         "lli_array[%x].bus_address is (hex) %08lx, lli_array[%x].block_size is (hex) %x\n",
1483                         current->pid, num_pages - 1,
1484                         (unsigned long)lli_array[num_pages - 1].bus_address,
1485                         num_pages - 1,
1486                         lli_array[num_pages - 1].block_size);
1487         }
1488         *lli_array_ptr = lli_array;
1489         dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_num_pages = num_pages;
1490         dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_page_array = NULL;
1491         dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_map_array = NULL;
1492         dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_map_num_entries = 0;
1493
1494         return error;
1495 }
1496
1497 /**
1498  * sep_calculate_lli_table_max_size - size the LLI table
1499  * @sep: pointer to struct sep_device
1500  * @lli_in_array_ptr
1501  * @num_array_entries
1502  * @last_table_flag
1503  *
1504  * This function calculates the size of data that can be inserted into
1505  * the lli table from this array, such that either the table is full
1506  * (all entries are entered), or there are no more entries in the
1507  * lli array
1508  */
1509 static u32 sep_calculate_lli_table_max_size(struct sep_device *sep,
1510         struct sep_lli_entry *lli_in_array_ptr,
1511         u32 num_array_entries,
1512         u32 *last_table_flag)
1513 {
1514         u32 counter;
1515         /* Table data size */
1516         u32 table_data_size = 0;
1517         /* Data size for the next table */
1518         u32 next_table_data_size;
1519
1520         *last_table_flag = 0;
1521
1522         /*
1523          * Calculate the data in the out lli table till we fill the whole
1524          * table or till the data has ended
1525          */
1526         for (counter = 0;
1527                 (counter < (SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP - 1)) &&
1528                         (counter < num_array_entries); counter++)
1529                 table_data_size += lli_in_array_ptr[counter].block_size;
1530
1531         /*
1532          * Check if we reached the last entry,
1533          * meaning this ia the last table to build,
1534          * and no need to check the block alignment
1535          */
1536         if (counter == num_array_entries) {
1537                 /* Set the last table flag */
1538                 *last_table_flag = 1;
1539                 goto end_function;
1540         }
1541
1542         /*
1543          * Calculate the data size of the next table.
1544          * Stop if no entries left or if data size is more the DMA restriction
1545          */
1546         next_table_data_size = 0;
1547         for (; counter < num_array_entries; counter++) {
1548                 next_table_data_size += lli_in_array_ptr[counter].block_size;
1549                 if (next_table_data_size >= SEP_DRIVER_MIN_DATA_SIZE_PER_TABLE)
1550                         break;
1551         }
1552
1553         /*
1554          * Check if the next table data size is less then DMA rstriction.
1555          * if it is - recalculate the current table size, so that the next
1556          * table data size will be adaquete for DMA
1557          */
1558         if (next_table_data_size &&
1559                 next_table_data_size < SEP_DRIVER_MIN_DATA_SIZE_PER_TABLE)
1560
1561                 table_data_size -= (SEP_DRIVER_MIN_DATA_SIZE_PER_TABLE -
1562                         next_table_data_size);
1563
1564 end_function:
1565         return table_data_size;
1566 }
1567
1568 /**
1569  * sep_build_lli_table - build an lli array for the given table
1570  * @sep: pointer to struct sep_device
1571  * @lli_array_ptr: pointer to lli array
1572  * @lli_table_ptr: pointer to lli table
1573  * @num_processed_entries_ptr: pointer to number of entries
1574  * @num_table_entries_ptr: pointer to number of tables
1575  * @table_data_size: total data size
1576  *
1577  * Builds an lli table from the lli_array according to
1578  * the given size of data
1579  */
1580 static void sep_build_lli_table(struct sep_device *sep,
1581         struct sep_lli_entry    *lli_array_ptr,
1582         struct sep_lli_entry    *lli_table_ptr,
1583         u32 *num_processed_entries_ptr,
1584         u32 *num_table_entries_ptr,
1585         u32 table_data_size)
1586 {
1587         /* Current table data size */
1588         u32 curr_table_data_size;
1589         /* Counter of lli array entry */
1590         u32 array_counter;
1591
1592         /* Init current table data size and lli array entry counter */
1593         curr_table_data_size = 0;
1594         array_counter = 0;
1595         *num_table_entries_ptr = 1;
1596
1597         dev_dbg(&sep->pdev->dev,
1598                 "[PID%d] build lli table table_data_size: (hex) %x\n",
1599                         current->pid, table_data_size);
1600
1601         /* Fill the table till table size reaches the needed amount */
1602         while (curr_table_data_size < table_data_size) {
1603                 /* Update the number of entries in table */
1604                 (*num_table_entries_ptr)++;
1605
1606                 lli_table_ptr->bus_address =
1607                         cpu_to_le32(lli_array_ptr[array_counter].bus_address);
1608
1609                 lli_table_ptr->block_size =
1610                         cpu_to_le32(lli_array_ptr[array_counter].block_size);
1611
1612                 curr_table_data_size += lli_array_ptr[array_counter].block_size;
1613
1614                 dev_dbg(&sep->pdev->dev,
1615                         "[PID%d] lli_table_ptr is %p\n",
1616                                 current->pid, lli_table_ptr);
1617                 dev_dbg(&sep->pdev->dev,
1618                         "[PID%d] lli_table_ptr->bus_address: %08lx\n",
1619                                 current->pid,
1620                                 (unsigned long)lli_table_ptr->bus_address);
1621
1622                 dev_dbg(&sep->pdev->dev,
1623                         "[PID%d] lli_table_ptr->block_size is (hex) %x\n",
1624                                 current->pid, lli_table_ptr->block_size);
1625
1626                 /* Check for overflow of the table data */
1627                 if (curr_table_data_size > table_data_size) {
1628                         dev_dbg(&sep->pdev->dev,
1629                                 "[PID%d] curr_table_data_size too large\n",
1630                                         current->pid);
1631
1632                         /* Update the size of block in the table */
1633                         lli_table_ptr->block_size =
1634                                 cpu_to_le32(lli_table_ptr->block_size) -
1635                                 (curr_table_data_size - table_data_size);
1636
1637                         /* Update the physical address in the lli array */
1638                         lli_array_ptr[array_counter].bus_address +=
1639                         cpu_to_le32(lli_table_ptr->block_size);
1640
1641                         /* Update the block size left in the lli array */
1642                         lli_array_ptr[array_counter].block_size =
1643                                 (curr_table_data_size - table_data_size);
1644                 } else
1645                         /* Advance to the next entry in the lli_array */
1646                         array_counter++;
1647
1648                 dev_dbg(&sep->pdev->dev,
1649                         "[PID%d] lli_table_ptr->bus_address is %08lx\n",
1650                                 current->pid,
1651                                 (unsigned long)lli_table_ptr->bus_address);
1652                 dev_dbg(&sep->pdev->dev,
1653                         "[PID%d] lli_table_ptr->block_size is (hex) %x\n",
1654                                 current->pid,
1655                                 lli_table_ptr->block_size);
1656
1657                 /* Move to the next entry in table */
1658                 lli_table_ptr++;
1659         }
1660
1661         /* Set the info entry to default */
1662         lli_table_ptr->bus_address = 0xffffffff;
1663         lli_table_ptr->block_size = 0;
1664
1665         /* Set the output parameter */
1666         *num_processed_entries_ptr += array_counter;
1667
1668 }
1669
1670 /**
1671  * sep_shared_area_virt_to_bus - map shared area to bus address
1672  * @sep: pointer to struct sep_device
1673  * @virt_address: virtual address to convert
1674  *
1675  * This functions returns the physical address inside shared area according
1676  * to the virtual address. It can be either on the external RAM device
1677  * (ioremapped), or on the system RAM
1678  * This implementation is for the external RAM
1679  */
1680 static dma_addr_t sep_shared_area_virt_to_bus(struct sep_device *sep,
1681         void *virt_address)
1682 {
1683         dev_dbg(&sep->pdev->dev, "[PID%d] sh virt to phys v %p\n",
1684                                         current->pid, virt_address);
1685         dev_dbg(&sep->pdev->dev, "[PID%d] sh virt to phys p %08lx\n",
1686                 current->pid,
1687                 (unsigned long)
1688                 sep->shared_bus + (virt_address - sep->shared_addr));
1689
1690         return sep->shared_bus + (size_t)(virt_address - sep->shared_addr);
1691 }
1692
1693 /**
1694  * sep_shared_area_bus_to_virt - map shared area bus address to kernel
1695  * @sep: pointer to struct sep_device
1696  * @bus_address: bus address to convert
1697  *
1698  * This functions returns the virtual address inside shared area
1699  * according to the physical address. It can be either on the
1700  * external RAM device (ioremapped), or on the system RAM
1701  * This implementation is for the external RAM
1702  */
1703 static void *sep_shared_area_bus_to_virt(struct sep_device *sep,
1704         dma_addr_t bus_address)
1705 {
1706         dev_dbg(&sep->pdev->dev, "[PID%d] shared bus to virt b=%lx v=%lx\n",
1707                 current->pid,
1708                 (unsigned long)bus_address, (unsigned long)(sep->shared_addr +
1709                         (size_t)(bus_address - sep->shared_bus)));
1710
1711         return sep->shared_addr + (size_t)(bus_address - sep->shared_bus);
1712 }
1713
1714 /**
1715  * sep_debug_print_lli_tables - dump LLI table
1716  * @sep: pointer to struct sep_device
1717  * @lli_table_ptr: pointer to sep_lli_entry
1718  * @num_table_entries: number of entries
1719  * @table_data_size: total data size
1720  *
1721  * Walk the the list of the print created tables and print all the data
1722  */
1723 static void sep_debug_print_lli_tables(struct sep_device *sep,
1724         struct sep_lli_entry *lli_table_ptr,
1725         unsigned long num_table_entries,
1726         unsigned long table_data_size)
1727 {
1728 #ifdef DEBUG
1729         unsigned long table_count = 1;
1730         unsigned long entries_count = 0;
1731
1732         dev_dbg(&sep->pdev->dev, "[PID%d] sep_debug_print_lli_tables start\n",
1733                                         current->pid);
1734         if (num_table_entries == 0) {
1735                 dev_dbg(&sep->pdev->dev, "[PID%d] no table to print\n",
1736                         current->pid);
1737                 return;
1738         }
1739
1740         while ((unsigned long) lli_table_ptr->bus_address != 0xffffffff) {
1741                 dev_dbg(&sep->pdev->dev,
1742                         "[PID%d] lli table %08lx, table_data_size is (hex) %lx\n",
1743                         current->pid, table_count, table_data_size);
1744                 dev_dbg(&sep->pdev->dev,
1745                         "[PID%d] num_table_entries is (hex) %lx\n",
1746                                 current->pid, num_table_entries);
1747
1748                 /* Print entries of the table (without info entry) */
1749                 for (entries_count = 0; entries_count < num_table_entries;
1750                         entries_count++, lli_table_ptr++) {
1751
1752                         dev_dbg(&sep->pdev->dev,
1753                                 "[PID%d] lli_table_ptr address is %08lx\n",
1754                                 current->pid,
1755                                 (unsigned long) lli_table_ptr);
1756
1757                         dev_dbg(&sep->pdev->dev,
1758                                 "[PID%d] phys address is %08lx block size is (hex) %x\n",
1759                                 current->pid,
1760                                 (unsigned long)lli_table_ptr->bus_address,
1761                                 lli_table_ptr->block_size);
1762                 }
1763
1764                 /* Point to the info entry */
1765                 lli_table_ptr--;
1766
1767                 dev_dbg(&sep->pdev->dev,
1768                         "[PID%d] phys lli_table_ptr->block_size is (hex) %x\n",
1769                         current->pid,
1770                         lli_table_ptr->block_size);
1771
1772                 dev_dbg(&sep->pdev->dev,
1773                         "[PID%d] phys lli_table_ptr->physical_address is %08lx\n",
1774                         current->pid,
1775                         (unsigned long)lli_table_ptr->bus_address);
1776
1777
1778                 table_data_size = lli_table_ptr->block_size & 0xffffff;
1779                 num_table_entries = (lli_table_ptr->block_size >> 24) & 0xff;
1780
1781                 dev_dbg(&sep->pdev->dev,
1782                         "[PID%d] phys table_data_size is (hex) %lx num_table_entries is %lx bus_address is%lx\n",
1783                         current->pid,
1784                         table_data_size,
1785                         num_table_entries,
1786                         (unsigned long)lli_table_ptr->bus_address);
1787
1788                 if ((unsigned long)lli_table_ptr->bus_address != 0xffffffff)
1789                         lli_table_ptr = (struct sep_lli_entry *)
1790                                 sep_shared_bus_to_virt(sep,
1791                                 (unsigned long)lli_table_ptr->bus_address);
1792
1793                 table_count++;
1794         }
1795         dev_dbg(&sep->pdev->dev, "[PID%d] sep_debug_print_lli_tables end\n",
1796                                         current->pid);
1797 #endif
1798 }
1799
1800
1801 /**
1802  * sep_prepare_empty_lli_table - create a blank LLI table
1803  * @sep: pointer to struct sep_device
1804  * @lli_table_addr_ptr: pointer to lli table
1805  * @num_entries_ptr: pointer to number of entries
1806  * @table_data_size_ptr: point to table data size
1807  * @dmatables_region: Optional buffer for DMA tables
1808  * @dma_ctx: DMA context
1809  *
1810  * This function creates empty lli tables when there is no data
1811  */
1812 static void sep_prepare_empty_lli_table(struct sep_device *sep,
1813                 dma_addr_t *lli_table_addr_ptr,
1814                 u32 *num_entries_ptr,
1815                 u32 *table_data_size_ptr,
1816                 void **dmatables_region,
1817                 struct sep_dma_context *dma_ctx)
1818 {
1819         struct sep_lli_entry *lli_table_ptr;
1820
1821         /* Find the area for new table */
1822         lli_table_ptr =
1823                 (struct sep_lli_entry *)(sep->shared_addr +
1824                 SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES +
1825                 dma_ctx->num_lli_tables_created * sizeof(struct sep_lli_entry) *
1826                         SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP);
1827
1828         if (dmatables_region && *dmatables_region)
1829                 lli_table_ptr = *dmatables_region;
1830
1831         lli_table_ptr->bus_address = 0;
1832         lli_table_ptr->block_size = 0;
1833
1834         lli_table_ptr++;
1835         lli_table_ptr->bus_address = 0xFFFFFFFF;
1836         lli_table_ptr->block_size = 0;
1837
1838         /* Set the output parameter value */
1839         *lli_table_addr_ptr = sep->shared_bus +
1840                 SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES +
1841                 dma_ctx->num_lli_tables_created *
1842                 sizeof(struct sep_lli_entry) *
1843                 SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
1844
1845         /* Set the num of entries and table data size for empty table */
1846         *num_entries_ptr = 2;
1847         *table_data_size_ptr = 0;
1848
1849         /* Update the number of created tables */
1850         dma_ctx->num_lli_tables_created++;
1851 }
1852
1853 /**
1854  * sep_prepare_input_dma_table - prepare input DMA mappings
1855  * @sep: pointer to struct sep_device
1856  * @data_size:
1857  * @block_size:
1858  * @lli_table_ptr:
1859  * @num_entries_ptr:
1860  * @table_data_size_ptr:
1861  * @is_kva: set for kernel data (kernel crypt io call)
1862  *
1863  * This function prepares only input DMA table for synchronic symmetric
1864  * operations (HASH)
1865  * Note that all bus addresses that are passed to the SEP
1866  * are in 32 bit format; the SEP is a 32 bit device
1867  */
1868 static int sep_prepare_input_dma_table(struct sep_device *sep,
1869         unsigned long app_virt_addr,
1870         u32 data_size,
1871         u32 block_size,
1872         dma_addr_t *lli_table_ptr,
1873         u32 *num_entries_ptr,
1874         u32 *table_data_size_ptr,
1875         bool is_kva,
1876         void **dmatables_region,
1877         struct sep_dma_context *dma_ctx
1878 )
1879 {
1880         int error = 0;
1881         /* Pointer to the info entry of the table - the last entry */
1882         struct sep_lli_entry *info_entry_ptr;
1883         /* Array of pointers to page */
1884         struct sep_lli_entry *lli_array_ptr;
1885         /* Points to the first entry to be processed in the lli_in_array */
1886         u32 current_entry = 0;
1887         /* Num entries in the virtual buffer */
1888         u32 sep_lli_entries = 0;
1889         /* Lli table pointer */
1890         struct sep_lli_entry *in_lli_table_ptr;
1891         /* The total data in one table */
1892         u32 table_data_size = 0;
1893         /* Flag for last table */
1894         u32 last_table_flag = 0;
1895         /* Number of entries in lli table */
1896         u32 num_entries_in_table = 0;
1897         /* Next table address */
1898         void *lli_table_alloc_addr = NULL;
1899         void *dma_lli_table_alloc_addr = NULL;
1900         void *dma_in_lli_table_ptr = NULL;
1901
1902         dev_dbg(&sep->pdev->dev,
1903                 "[PID%d] prepare intput dma tbl data size: (hex) %x\n",
1904                 current->pid, data_size);
1905
1906         dev_dbg(&sep->pdev->dev, "[PID%d] block_size is (hex) %x\n",
1907                                         current->pid, block_size);
1908
1909         /* Initialize the pages pointers */
1910         dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_page_array = NULL;
1911         dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_num_pages = 0;
1912
1913         /* Set the kernel address for first table to be allocated */
1914         lli_table_alloc_addr = (void *)(sep->shared_addr +
1915                 SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES +
1916                 dma_ctx->num_lli_tables_created * sizeof(struct sep_lli_entry) *
1917                 SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP);
1918
1919         if (data_size == 0) {
1920                 if (dmatables_region) {
1921                         error = sep_allocate_dmatables_region(sep,
1922                                                 dmatables_region,
1923                                                 dma_ctx,
1924                                                 1);
1925                         if (error)
1926                                 return error;
1927                 }
1928                 /* Special case  - create meptu table - 2 entries, zero data */
1929                 sep_prepare_empty_lli_table(sep, lli_table_ptr,
1930                                 num_entries_ptr, table_data_size_ptr,
1931                                 dmatables_region, dma_ctx);
1932                 goto update_dcb_counter;
1933         }
1934
1935         /* Check if the pages are in Kernel Virtual Address layout */
1936         if (is_kva)
1937                 error = sep_lock_kernel_pages(sep, app_virt_addr,
1938                         data_size, &lli_array_ptr, SEP_DRIVER_IN_FLAG,
1939                         dma_ctx);
1940         else
1941                 /*
1942                  * Lock the pages of the user buffer
1943                  * and translate them to pages
1944                  */
1945                 error = sep_lock_user_pages(sep, app_virt_addr,
1946                         data_size, &lli_array_ptr, SEP_DRIVER_IN_FLAG,
1947                         dma_ctx);
1948
1949         if (error)
1950                 goto end_function;
1951
1952         dev_dbg(&sep->pdev->dev,
1953                 "[PID%d] output sep_in_num_pages is (hex) %x\n",
1954                 current->pid,
1955                 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_num_pages);
1956
1957         current_entry = 0;
1958         info_entry_ptr = NULL;
1959
1960         sep_lli_entries =
1961                 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_num_pages;
1962
1963         dma_lli_table_alloc_addr = lli_table_alloc_addr;
1964         if (dmatables_region) {
1965                 error = sep_allocate_dmatables_region(sep,
1966                                         dmatables_region,
1967                                         dma_ctx,
1968                                         sep_lli_entries);
1969                 if (error)
1970                         goto end_function_error;
1971                 lli_table_alloc_addr = *dmatables_region;
1972         }
1973
1974         /* Loop till all the entries in in array are processed */
1975         while (current_entry < sep_lli_entries) {
1976
1977                 /* Set the new input and output tables */
1978                 in_lli_table_ptr =
1979                         (struct sep_lli_entry *)lli_table_alloc_addr;
1980                 dma_in_lli_table_ptr =
1981                         (struct sep_lli_entry *)dma_lli_table_alloc_addr;
1982
1983                 lli_table_alloc_addr += sizeof(struct sep_lli_entry) *
1984                         SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
1985                 dma_lli_table_alloc_addr += sizeof(struct sep_lli_entry) *
1986                         SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
1987
1988                 if (dma_lli_table_alloc_addr >
1989                         ((void *)sep->shared_addr +
1990                         SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES +
1991                         SYNCHRONIC_DMA_TABLES_AREA_SIZE_BYTES)) {
1992
1993                         error = -ENOMEM;
1994                         goto end_function_error;
1995
1996                 }
1997
1998                 /* Update the number of created tables */
1999                 dma_ctx->num_lli_tables_created++;
2000
2001                 /* Calculate the maximum size of data for input table */
2002                 table_data_size = sep_calculate_lli_table_max_size(sep,
2003                         &lli_array_ptr[current_entry],
2004                         (sep_lli_entries - current_entry),
2005                         &last_table_flag);
2006
2007                 /*
2008                  * If this is not the last table -
2009                  * then align it to the block size
2010                  */
2011                 if (!last_table_flag)
2012                         table_data_size =
2013                                 (table_data_size / block_size) * block_size;
2014
2015                 dev_dbg(&sep->pdev->dev,
2016                         "[PID%d] output table_data_size is (hex) %x\n",
2017                                 current->pid,
2018                                 table_data_size);
2019
2020                 /* Construct input lli table */
2021                 sep_build_lli_table(sep, &lli_array_ptr[current_entry],
2022                         in_lli_table_ptr,
2023                         &current_entry, &num_entries_in_table, table_data_size);
2024
2025                 if (info_entry_ptr == NULL) {
2026
2027                         /* Set the output parameters to physical addresses */
2028                         *lli_table_ptr = sep_shared_area_virt_to_bus(sep,
2029                                 dma_in_lli_table_ptr);
2030                         *num_entries_ptr = num_entries_in_table;
2031                         *table_data_size_ptr = table_data_size;
2032
2033                         dev_dbg(&sep->pdev->dev,
2034                                 "[PID%d] output lli_table_in_ptr is %08lx\n",
2035                                 current->pid,
2036                                 (unsigned long)*lli_table_ptr);
2037
2038                 } else {
2039                         /* Update the info entry of the previous in table */
2040                         info_entry_ptr->bus_address =
2041                                 sep_shared_area_virt_to_bus(sep,
2042                                                         dma_in_lli_table_ptr);
2043                         info_entry_ptr->block_size =
2044                                 ((num_entries_in_table) << 24) |
2045                                 (table_data_size);
2046                 }
2047                 /* Save the pointer to the info entry of the current tables */
2048                 info_entry_ptr = in_lli_table_ptr + num_entries_in_table - 1;
2049         }
2050         /* Print input tables */
2051         if (!dmatables_region) {
2052                 sep_debug_print_lli_tables(sep, (struct sep_lli_entry *)
2053                         sep_shared_area_bus_to_virt(sep, *lli_table_ptr),
2054                         *num_entries_ptr, *table_data_size_ptr);
2055         }
2056
2057         /* The array of the pages */
2058         kfree(lli_array_ptr);
2059
2060 update_dcb_counter:
2061         /* Update DCB counter */
2062         dma_ctx->nr_dcb_creat++;
2063         goto end_function;
2064
2065 end_function_error:
2066         /* Free all the allocated resources */
2067         kfree(dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_map_array);
2068         dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_map_array = NULL;
2069         kfree(lli_array_ptr);
2070         kfree(dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_page_array);
2071         dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_page_array = NULL;
2072
2073 end_function:
2074         return error;
2075
2076 }
2077
2078 /**
2079  * sep_construct_dma_tables_from_lli - prepare AES/DES mappings
2080  * @sep: pointer to struct sep_device
2081  * @lli_in_array:
2082  * @sep_in_lli_entries:
2083  * @lli_out_array:
2084  * @sep_out_lli_entries
2085  * @block_size
2086  * @lli_table_in_ptr
2087  * @lli_table_out_ptr
2088  * @in_num_entries_ptr
2089  * @out_num_entries_ptr
2090  * @table_data_size_ptr
2091  *
2092  * This function creates the input and output DMA tables for
2093  * symmetric operations (AES/DES) according to the block
2094  * size from LLI arays
2095  * Note that all bus addresses that are passed to the SEP
2096  * are in 32 bit format; the SEP is a 32 bit device
2097  */
2098 static int sep_construct_dma_tables_from_lli(
2099         struct sep_device *sep,
2100         struct sep_lli_entry *lli_in_array,
2101         u32     sep_in_lli_entries,
2102         struct sep_lli_entry *lli_out_array,
2103         u32     sep_out_lli_entries,
2104         u32     block_size,
2105         dma_addr_t *lli_table_in_ptr,
2106         dma_addr_t *lli_table_out_ptr,
2107         u32     *in_num_entries_ptr,
2108         u32     *out_num_entries_ptr,
2109         u32     *table_data_size_ptr,
2110         void    **dmatables_region,
2111         struct sep_dma_context *dma_ctx)
2112 {
2113         /* Points to the area where next lli table can be allocated */
2114         void *lli_table_alloc_addr = NULL;
2115         /*
2116          * Points to the area in shared region where next lli table
2117          * can be allocated
2118          */
2119         void *dma_lli_table_alloc_addr = NULL;
2120         /* Input lli table in dmatables_region or shared region */
2121         struct sep_lli_entry *in_lli_table_ptr = NULL;
2122         /* Input lli table location in the shared region */
2123         struct sep_lli_entry *dma_in_lli_table_ptr = NULL;
2124         /* Output lli table in dmatables_region or shared region */
2125         struct sep_lli_entry *out_lli_table_ptr = NULL;
2126         /* Output lli table location in the shared region */
2127         struct sep_lli_entry *dma_out_lli_table_ptr = NULL;
2128         /* Pointer to the info entry of the table - the last entry */
2129         struct sep_lli_entry *info_in_entry_ptr = NULL;
2130         /* Pointer to the info entry of the table - the last entry */
2131         struct sep_lli_entry *info_out_entry_ptr = NULL;
2132         /* Points to the first entry to be processed in the lli_in_array */
2133         u32 current_in_entry = 0;
2134         /* Points to the first entry to be processed in the lli_out_array */
2135         u32 current_out_entry = 0;
2136         /* Max size of the input table */
2137         u32 in_table_data_size = 0;
2138         /* Max size of the output table */
2139         u32 out_table_data_size = 0;
2140         /* Flag te signifies if this is the last tables build */
2141         u32 last_table_flag = 0;
2142         /* The data size that should be in table */
2143         u32 table_data_size = 0;
2144         /* Number of entries in the input table */
2145         u32 num_entries_in_table = 0;
2146         /* Number of entries in the output table */
2147         u32 num_entries_out_table = 0;
2148
2149         if (!dma_ctx) {
2150                 dev_warn(&sep->pdev->dev, "DMA context uninitialized\n");
2151                 return -EINVAL;
2152         }
2153
2154         /* Initiate to point after the message area */
2155         lli_table_alloc_addr = (void *)(sep->shared_addr +
2156                 SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES +
2157                 (dma_ctx->num_lli_tables_created *
2158                 (sizeof(struct sep_lli_entry) *
2159                 SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP)));
2160         dma_lli_table_alloc_addr = lli_table_alloc_addr;
2161
2162         if (dmatables_region) {
2163                 /* 2 for both in+out table */
2164                 if (sep_allocate_dmatables_region(sep,
2165                                         dmatables_region,
2166                                         dma_ctx,
2167                                         2*sep_in_lli_entries))
2168                         return -ENOMEM;
2169                 lli_table_alloc_addr = *dmatables_region;
2170         }
2171
2172         /* Loop till all the entries in in array are not processed */
2173         while (current_in_entry < sep_in_lli_entries) {
2174                 /* Set the new input and output tables */
2175                 in_lli_table_ptr =
2176                         (struct sep_lli_entry *)lli_table_alloc_addr;
2177                 dma_in_lli_table_ptr =
2178                         (struct sep_lli_entry *)dma_lli_table_alloc_addr;
2179
2180                 lli_table_alloc_addr += sizeof(struct sep_lli_entry) *
2181                         SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
2182                 dma_lli_table_alloc_addr += sizeof(struct sep_lli_entry) *
2183                         SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
2184
2185                 /* Set the first output tables */
2186                 out_lli_table_ptr =
2187                         (struct sep_lli_entry *)lli_table_alloc_addr;
2188                 dma_out_lli_table_ptr =
2189                         (struct sep_lli_entry *)dma_lli_table_alloc_addr;
2190
2191                 /* Check if the DMA table area limit was overrun */
2192                 if ((dma_lli_table_alloc_addr + sizeof(struct sep_lli_entry) *
2193                         SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP) >
2194                         ((void *)sep->shared_addr +
2195                         SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES +
2196                         SYNCHRONIC_DMA_TABLES_AREA_SIZE_BYTES)) {
2197
2198                         dev_warn(&sep->pdev->dev, "dma table limit overrun\n");
2199                         return -ENOMEM;
2200                 }
2201
2202                 /* Update the number of the lli tables created */
2203                 dma_ctx->num_lli_tables_created += 2;
2204
2205                 lli_table_alloc_addr += sizeof(struct sep_lli_entry) *
2206                         SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
2207                 dma_lli_table_alloc_addr += sizeof(struct sep_lli_entry) *
2208                         SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
2209
2210                 /* Calculate the maximum size of data for input table */
2211                 in_table_data_size =
2212                         sep_calculate_lli_table_max_size(sep,
2213                         &lli_in_array[current_in_entry],
2214                         (sep_in_lli_entries - current_in_entry),
2215                         &last_table_flag);
2216
2217                 /* Calculate the maximum size of data for output table */
2218                 out_table_data_size =
2219                         sep_calculate_lli_table_max_size(sep,
2220                         &lli_out_array[current_out_entry],
2221                         (sep_out_lli_entries - current_out_entry),
2222                         &last_table_flag);
2223
2224                 if (!last_table_flag) {
2225                         in_table_data_size = (in_table_data_size /
2226                                 block_size) * block_size;
2227                         out_table_data_size = (out_table_data_size /
2228                                 block_size) * block_size;
2229                 }
2230
2231                 table_data_size = in_table_data_size;
2232                 if (table_data_size > out_table_data_size)
2233                         table_data_size = out_table_data_size;
2234
2235                 dev_dbg(&sep->pdev->dev,
2236                         "[PID%d] construct tables from lli in_table_data_size is (hex) %x\n",
2237                         current->pid, in_table_data_size);
2238
2239                 dev_dbg(&sep->pdev->dev,
2240                         "[PID%d] construct tables from lli out_table_data_size is (hex) %x\n",
2241                         current->pid, out_table_data_size);
2242
2243                 /* Construct input lli table */
2244                 sep_build_lli_table(sep, &lli_in_array[current_in_entry],
2245                         in_lli_table_ptr,
2246                         &current_in_entry,
2247                         &num_entries_in_table,
2248                         table_data_size);
2249
2250                 /* Construct output lli table */
2251                 sep_build_lli_table(sep, &lli_out_array[current_out_entry],
2252                         out_lli_table_ptr,
2253                         &current_out_entry,
2254                         &num_entries_out_table,
2255                         table_data_size);
2256
2257                 /* If info entry is null - this is the first table built */
2258                 if (info_in_entry_ptr == NULL || info_out_entry_ptr == NULL) {
2259                         /* Set the output parameters to physical addresses */
2260                         *lli_table_in_ptr =
2261                         sep_shared_area_virt_to_bus(sep, dma_in_lli_table_ptr);
2262
2263                         *in_num_entries_ptr = num_entries_in_table;
2264
2265                         *lli_table_out_ptr =
2266                                 sep_shared_area_virt_to_bus(sep,
2267                                 dma_out_lli_table_ptr);
2268
2269                         *out_num_entries_ptr = num_entries_out_table;
2270                         *table_data_size_ptr = table_data_size;
2271
2272                         dev_dbg(&sep->pdev->dev,
2273                                 "[PID%d] output lli_table_in_ptr is %08lx\n",
2274                                 current->pid,
2275                                 (unsigned long)*lli_table_in_ptr);
2276                         dev_dbg(&sep->pdev->dev,
2277                                 "[PID%d] output lli_table_out_ptr is %08lx\n",
2278                                 current->pid,
2279                                 (unsigned long)*lli_table_out_ptr);
2280                 } else {
2281                         /* Update the info entry of the previous in table */
2282                         info_in_entry_ptr->bus_address =
2283                                 sep_shared_area_virt_to_bus(sep,
2284                                 dma_in_lli_table_ptr);
2285
2286                         info_in_entry_ptr->block_size =
2287                                 ((num_entries_in_table) << 24) |
2288                                 (table_data_size);
2289
2290                         /* Update the info entry of the previous in table */
2291                         info_out_entry_ptr->bus_address =
2292                                 sep_shared_area_virt_to_bus(sep,
2293                                 dma_out_lli_table_ptr);
2294
2295                         info_out_entry_ptr->block_size =
2296                                 ((num_entries_out_table) << 24) |
2297                                 (table_data_size);
2298
2299                         dev_dbg(&sep->pdev->dev,
2300                                 "[PID%d] output lli_table_in_ptr:%08lx %08x\n",
2301                                 current->pid,
2302                                 (unsigned long)info_in_entry_ptr->bus_address,
2303                                 info_in_entry_ptr->block_size);
2304
2305                         dev_dbg(&sep->pdev->dev,
2306                                 "[PID%d] output lli_table_out_ptr: %08lx  %08x\n",
2307                                 current->pid,
2308                                 (unsigned long)info_out_entry_ptr->bus_address,
2309                                 info_out_entry_ptr->block_size);
2310                 }
2311
2312                 /* Save the pointer to the info entry of the current tables */
2313                 info_in_entry_ptr = in_lli_table_ptr +
2314                         num_entries_in_table - 1;
2315                 info_out_entry_ptr = out_lli_table_ptr +
2316                         num_entries_out_table - 1;
2317
2318                 dev_dbg(&sep->pdev->dev,
2319                         "[PID%d] output num_entries_out_table is %x\n",
2320                         current->pid,
2321                         (u32)num_entries_out_table);
2322                 dev_dbg(&sep->pdev->dev,
2323                         "[PID%d] output info_in_entry_ptr is %lx\n",
2324                         current->pid,
2325                         (unsigned long)info_in_entry_ptr);
2326                 dev_dbg(&sep->pdev->dev,
2327                         "[PID%d] output info_out_entry_ptr is %lx\n",
2328                         current->pid,
2329                         (unsigned long)info_out_entry_ptr);
2330         }
2331
2332         /* Print input tables */
2333         if (!dmatables_region) {
2334                 sep_debug_print_lli_tables(
2335                         sep,
2336                         (struct sep_lli_entry *)
2337                         sep_shared_area_bus_to_virt(sep, *lli_table_in_ptr),
2338                         *in_num_entries_ptr,
2339                         *table_data_size_ptr);
2340         }
2341
2342         /* Print output tables */
2343         if (!dmatables_region) {
2344                 sep_debug_print_lli_tables(
2345                         sep,
2346                         (struct sep_lli_entry *)
2347                         sep_shared_area_bus_to_virt(sep, *lli_table_out_ptr),
2348                         *out_num_entries_ptr,
2349                         *table_data_size_ptr);
2350         }
2351
2352         return 0;
2353 }
2354
2355 /**
2356  * sep_prepare_input_output_dma_table - prepare DMA I/O table
2357  * @app_virt_in_addr:
2358  * @app_virt_out_addr:
2359  * @data_size:
2360  * @block_size:
2361  * @lli_table_in_ptr:
2362  * @lli_table_out_ptr:
2363  * @in_num_entries_ptr:
2364  * @out_num_entries_ptr:
2365  * @table_data_size_ptr:
2366  * @is_kva: set for kernel data; used only for kernel crypto module
2367  *
2368  * This function builds input and output DMA tables for synchronic
2369  * symmetric operations (AES, DES, HASH). It also checks that each table
2370  * is of the modular block size
2371  * Note that all bus addresses that are passed to the SEP
2372  * are in 32 bit format; the SEP is a 32 bit device
2373  */
2374 static int sep_prepare_input_output_dma_table(struct sep_device *sep,
2375         unsigned long app_virt_in_addr,
2376         unsigned long app_virt_out_addr,
2377         u32 data_size,
2378         u32 block_size,
2379         dma_addr_t *lli_table_in_ptr,
2380         dma_addr_t *lli_table_out_ptr,
2381         u32 *in_num_entries_ptr,
2382         u32 *out_num_entries_ptr,
2383         u32 *table_data_size_ptr,
2384         bool is_kva,
2385         void **dmatables_region,
2386         struct sep_dma_context *dma_ctx)
2387
2388 {
2389         int error = 0;
2390         /* Array of pointers of page */
2391         struct sep_lli_entry *lli_in_array;
2392         /* Array of pointers of page */
2393         struct sep_lli_entry *lli_out_array;
2394
2395         if (!dma_ctx) {
2396                 error = -EINVAL;
2397                 goto end_function;
2398         }
2399
2400         if (data_size == 0) {
2401                 /* Prepare empty table for input and output */
2402                 if (dmatables_region) {
2403                         error = sep_allocate_dmatables_region(
2404                                         sep,
2405                                         dmatables_region,
2406                                         dma_ctx,
2407                                         2);
2408                   if (error)
2409                         goto end_function;
2410                 }
2411                 sep_prepare_empty_lli_table(sep, lli_table_in_ptr,
2412                         in_num_entries_ptr, table_data_size_ptr,
2413                         dmatables_region, dma_ctx);
2414
2415                 sep_prepare_empty_lli_table(sep, lli_table_out_ptr,
2416                         out_num_entries_ptr, table_data_size_ptr,
2417                         dmatables_region, dma_ctx);
2418
2419                 goto update_dcb_counter;
2420         }
2421
2422         /* Initialize the pages pointers */
2423         dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_page_array = NULL;
2424         dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_page_array = NULL;
2425
2426         /* Lock the pages of the buffer and translate them to pages */
2427         if (is_kva) {
2428                 dev_dbg(&sep->pdev->dev, "[PID%d] Locking kernel input pages\n",
2429                                                 current->pid);
2430                 error = sep_lock_kernel_pages(sep, app_virt_in_addr,
2431                                 data_size, &lli_in_array, SEP_DRIVER_IN_FLAG,
2432                                 dma_ctx);
2433                 if (error) {
2434                         dev_warn(&sep->pdev->dev,
2435                                 "[PID%d] sep_lock_kernel_pages for input virtual buffer failed\n",
2436                                 current->pid);
2437
2438                         goto end_function;
2439                 }
2440
2441                 dev_dbg(&sep->pdev->dev, "[PID%d] Locking kernel output pages\n",
2442                                                 current->pid);
2443                 error = sep_lock_kernel_pages(sep, app_virt_out_addr,
2444                                 data_size, &lli_out_array, SEP_DRIVER_OUT_FLAG,
2445                                 dma_ctx);
2446
2447                 if (error) {
2448                         dev_warn(&sep->pdev->dev,
2449                                 "[PID%d] sep_lock_kernel_pages for output virtual buffer failed\n",
2450                                 current->pid);
2451
2452                         goto end_function_free_lli_in;
2453                 }
2454
2455         }
2456
2457         else {
2458                 dev_dbg(&sep->pdev->dev, "[PID%d] Locking user input pages\n",
2459                                                 current->pid);
2460                 error = sep_lock_user_pages(sep, app_virt_in_addr,
2461                                 data_size, &lli_in_array, SEP_DRIVER_IN_FLAG,
2462                                 dma_ctx);
2463                 if (error) {
2464                         dev_warn(&sep->pdev->dev,
2465                                 "[PID%d] sep_lock_user_pages for input virtual buffer failed\n",
2466                                 current->pid);
2467
2468                         goto end_function;
2469                 }
2470
2471                 if (dma_ctx->secure_dma) {
2472                         /* secure_dma requires use of non accessible memory */
2473                         dev_dbg(&sep->pdev->dev, "[PID%d] in secure_dma\n",
2474                                 current->pid);
2475                         error = sep_lli_table_secure_dma(sep,
2476                                 app_virt_out_addr, data_size, &lli_out_array,
2477                                 SEP_DRIVER_OUT_FLAG, dma_ctx);
2478                         if (error) {
2479                                 dev_warn(&sep->pdev->dev,
2480                                         "[PID%d] secure dma table setup for output virtual buffer failed\n",
2481                                         current->pid);
2482
2483                                 goto end_function_free_lli_in;
2484                         }
2485                 } else {
2486                         /* For normal, non-secure dma */
2487                         dev_dbg(&sep->pdev->dev, "[PID%d] not in secure_dma\n",
2488                                 current->pid);
2489
2490                         dev_dbg(&sep->pdev->dev,
2491                                 "[PID%d] Locking user output pages\n",
2492                                 current->pid);
2493
2494                         error = sep_lock_user_pages(sep, app_virt_out_addr,
2495                                 data_size, &lli_out_array, SEP_DRIVER_OUT_FLAG,
2496                                 dma_ctx);
2497
2498                         if (error) {
2499                                 dev_warn(&sep->pdev->dev,
2500                                         "[PID%d] sep_lock_user_pages for output virtual buffer failed\n",
2501                                         current->pid);
2502
2503                                 goto end_function_free_lli_in;
2504                         }
2505                 }
2506         }
2507
2508         dev_dbg(&sep->pdev->dev,
2509                 "[PID%d] After lock; prep input output dma table sep_in_num_pages is (hex) %x\n",
2510                 current->pid,
2511                 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_num_pages);
2512
2513         dev_dbg(&sep->pdev->dev, "[PID%d] sep_out_num_pages is (hex) %x\n",
2514                 current->pid,
2515                 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_num_pages);
2516
2517         dev_dbg(&sep->pdev->dev,
2518                 "[PID%d] SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP is (hex) %x\n",
2519                 current->pid, SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP);
2520
2521         /* Call the function that creates table from the lli arrays */
2522         dev_dbg(&sep->pdev->dev, "[PID%d] calling create table from lli\n",
2523                                         current->pid);
2524         error = sep_construct_dma_tables_from_lli(
2525                         sep, lli_in_array,
2526                         dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].
2527                                                                 in_num_pages,
2528                         lli_out_array,
2529                         dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].
2530                                                                 out_num_pages,
2531                         block_size, lli_table_in_ptr, lli_table_out_ptr,
2532                         in_num_entries_ptr, out_num_entries_ptr,
2533                         table_data_size_ptr, dmatables_region, dma_ctx);
2534
2535         if (error) {
2536                 dev_warn(&sep->pdev->dev,
2537                         "[PID%d] sep_construct_dma_tables_from_lli failed\n",
2538                         current->pid);
2539                 goto end_function_with_error;
2540         }
2541
2542         kfree(lli_out_array);
2543         kfree(lli_in_array);
2544
2545 update_dcb_counter:
2546         /* Update DCB counter */
2547         dma_ctx->nr_dcb_creat++;
2548
2549         goto end_function;
2550
2551 end_function_with_error:
2552         kfree(dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_map_array);
2553         dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_map_array = NULL;
2554         kfree(dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_page_array);
2555         dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_page_array = NULL;
2556         kfree(lli_out_array);
2557
2558
2559 end_function_free_lli_in:
2560         kfree(dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_map_array);
2561         dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_map_array = NULL;
2562         kfree(dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_page_array);
2563         dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_page_array = NULL;
2564         kfree(lli_in_array);
2565
2566 end_function:
2567
2568         return error;
2569
2570 }
2571
2572 /**
2573  * sep_prepare_input_output_dma_table_in_dcb - prepare control blocks
2574  * @app_in_address: unsigned long; for data buffer in (user space)
2575  * @app_out_address: unsigned long; for data buffer out (user space)
2576  * @data_in_size: u32; for size of data
2577  * @block_size: u32; for block size
2578  * @tail_block_size: u32; for size of tail block
2579  * @isapplet: bool; to indicate external app
2580  * @is_kva: bool; kernel buffer; only used for kernel crypto module
2581  * @secure_dma; indicates whether this is secure_dma using IMR
2582  *
2583  * This function prepares the linked DMA tables and puts the
2584  * address for the linked list of tables inta a DCB (data control
2585  * block) the address of which is known by the SEP hardware
2586  * Note that all bus addresses that are passed to the SEP
2587  * are in 32 bit format; the SEP is a 32 bit device
2588  */
2589 int sep_prepare_input_output_dma_table_in_dcb(struct sep_device *sep,
2590         unsigned long  app_in_address,
2591         unsigned long  app_out_address,
2592         u32  data_in_size,
2593         u32  block_size,
2594         u32  tail_block_size,
2595         bool isapplet,
2596         bool    is_kva,
2597         bool    secure_dma,
2598         struct sep_dcblock *dcb_region,
2599         void **dmatables_region,
2600         struct sep_dma_context **dma_ctx,
2601         struct scatterlist *src_sg,
2602         struct scatterlist *dst_sg)
2603 {
2604         int error = 0;
2605         /* Size of tail */
2606         u32 tail_size = 0;
2607         /* Address of the created DCB table */
2608         struct sep_dcblock *dcb_table_ptr = NULL;
2609         /* The physical address of the first input DMA table */
2610         dma_addr_t in_first_mlli_address = 0;
2611         /* Number of entries in the first input DMA table */
2612         u32  in_first_num_entries = 0;
2613         /* The physical address of the first output DMA table */
2614         dma_addr_t  out_first_mlli_address = 0;
2615         /* Number of entries in the first output DMA table */
2616         u32  out_first_num_entries = 0;
2617         /* Data in the first input/output table */
2618         u32  first_data_size = 0;
2619
2620         dev_dbg(&sep->pdev->dev, "[PID%d] app_in_address %lx\n",
2621                 current->pid, app_in_address);
2622
2623         dev_dbg(&sep->pdev->dev, "[PID%d] app_out_address %lx\n",
2624                 current->pid, app_out_address);
2625
2626         dev_dbg(&sep->pdev->dev, "[PID%d] data_in_size %x\n",
2627                 current->pid, data_in_size);
2628
2629         dev_dbg(&sep->pdev->dev, "[PID%d] block_size %x\n",
2630                 current->pid, block_size);
2631
2632         dev_dbg(&sep->pdev->dev, "[PID%d] tail_block_size %x\n",
2633                 current->pid, tail_block_size);
2634
2635         dev_dbg(&sep->pdev->dev, "[PID%d] isapplet %x\n",
2636                 current->pid, isapplet);
2637
2638         dev_dbg(&sep->pdev->dev, "[PID%d] is_kva %x\n",
2639                 current->pid, is_kva);
2640
2641         dev_dbg(&sep->pdev->dev, "[PID%d] src_sg %p\n",
2642                 current->pid, src_sg);
2643
2644         dev_dbg(&sep->pdev->dev, "[PID%d] dst_sg %p\n",
2645                 current->pid, dst_sg);
2646
2647         if (!dma_ctx) {
2648                 dev_warn(&sep->pdev->dev, "[PID%d] no DMA context pointer\n",
2649                                                 current->pid);
2650                 error = -EINVAL;
2651                 goto end_function;
2652         }
2653
2654         if (*dma_ctx) {
2655                 /* In case there are multiple DCBs for this transaction */
2656                 dev_dbg(&sep->pdev->dev, "[PID%d] DMA context already set\n",
2657                                                 current->pid);
2658         } else {
2659                 *dma_ctx = kzalloc(sizeof(**dma_ctx), GFP_KERNEL);
2660                 if (!(*dma_ctx)) {
2661                         dev_dbg(&sep->pdev->dev,
2662                                 "[PID%d] Not enough memory for DMA context\n",
2663                                 current->pid);
2664                   error = -ENOMEM;
2665                   goto end_function;
2666                 }
2667                 dev_dbg(&sep->pdev->dev,
2668                         "[PID%d] Created DMA context addr at 0x%p\n",
2669                         current->pid, *dma_ctx);
2670         }
2671
2672         (*dma_ctx)->secure_dma = secure_dma;
2673
2674         /* these are for kernel crypto only */
2675         (*dma_ctx)->src_sg = src_sg;
2676         (*dma_ctx)->dst_sg = dst_sg;
2677
2678         if ((*dma_ctx)->nr_dcb_creat == SEP_MAX_NUM_SYNC_DMA_OPS) {
2679                 /* No more DCBs to allocate */
2680                 dev_dbg(&sep->pdev->dev, "[PID%d] no more DCBs available\n",
2681                                                 current->pid);
2682                 error = -ENOSPC;
2683                 goto end_function_error;
2684         }
2685
2686         /* Allocate new DCB */
2687         if (dcb_region) {
2688                 dcb_table_ptr = dcb_region;
2689         } else {
2690                 dcb_table_ptr = (struct sep_dcblock *)(sep->shared_addr +
2691                         SEP_DRIVER_SYSTEM_DCB_MEMORY_OFFSET_IN_BYTES +
2692                         ((*dma_ctx)->nr_dcb_creat *
2693                                                 sizeof(struct sep_dcblock)));
2694         }
2695
2696         /* Set the default values in the DCB */
2697         dcb_table_ptr->input_mlli_address = 0;
2698         dcb_table_ptr->input_mlli_num_entries = 0;
2699         dcb_table_ptr->input_mlli_data_size = 0;
2700         dcb_table_ptr->output_mlli_address = 0;
2701         dcb_table_ptr->output_mlli_num_entries = 0;
2702         dcb_table_ptr->output_mlli_data_size = 0;
2703         dcb_table_ptr->tail_data_size = 0;
2704         dcb_table_ptr->out_vr_tail_pt = 0;
2705
2706         if (isapplet) {
2707
2708                 /* Check if there is enough data for DMA operation */
2709                 if (data_in_size < SEP_DRIVER_MIN_DATA_SIZE_PER_TABLE) {
2710                         if (is_kva) {
2711                                 error = -ENODEV;
2712                                 goto end_function_error;
2713                         } else {
2714                                 if (copy_from_user(dcb_table_ptr->tail_data,
2715                                         (void __user *)app_in_address,
2716                                         data_in_size)) {
2717                                         error = -EFAULT;
2718                                         goto end_function_error;
2719                                 }
2720                         }
2721
2722                         dcb_table_ptr->tail_data_size = data_in_size;
2723
2724                         /* Set the output user-space address for mem2mem op */
2725                         if (app_out_address)
2726                                 dcb_table_ptr->out_vr_tail_pt =
2727                                 (aligned_u64)app_out_address;
2728
2729                         /*
2730                          * Update both data length parameters in order to avoid
2731                          * second data copy and allow building of empty mlli
2732                          * tables
2733                          */
2734                         tail_size = 0x0;
2735                         data_in_size = 0x0;
2736
2737                 } else {
2738                         if (!app_out_address) {
2739                                 tail_size = data_in_size % block_size;
2740                                 if (!tail_size) {
2741                                         if (tail_block_size == block_size)
2742                                                 tail_size = block_size;
2743                                 }
2744                         } else {
2745                                 tail_size = 0;
2746                         }
2747                 }
2748                 if (tail_size) {
2749                         if (tail_size > sizeof(dcb_table_ptr->tail_data))
2750                                 return -EINVAL;
2751                         if (is_kva) {
2752                                 error = -ENODEV;
2753                                 goto end_function_error;
2754                         } else {
2755                                 /* We have tail data - copy it to DCB */
2756                                 if (copy_from_user(dcb_table_ptr->tail_data,
2757                                         (void __user *)(app_in_address +
2758                                         data_in_size - tail_size), tail_size)) {
2759                                         error = -EFAULT;
2760                                         goto end_function_error;
2761                                 }
2762                         }
2763                         if (app_out_address)
2764                                 /*
2765                                  * Calculate the output address
2766                                  * according to tail data size
2767                                  */
2768                                 dcb_table_ptr->out_vr_tail_pt =
2769                                         (aligned_u64)app_out_address +
2770                                         data_in_size - tail_size;
2771
2772                         /* Save the real tail data size */
2773                         dcb_table_ptr->tail_data_size = tail_size;
2774                         /*
2775                          * Update the data size without the tail
2776                          * data size AKA data for the dma
2777                          */
2778                         data_in_size = (data_in_size - tail_size);
2779                 }
2780         }
2781         /* Check if we need to build only input table or input/output */
2782         if (app_out_address) {
2783                 /* Prepare input/output tables */
2784                 error = sep_prepare_input_output_dma_table(sep,
2785                                 app_in_address,
2786                                 app_out_address,
2787                                 data_in_size,
2788                                 block_size,
2789                                 &in_first_mlli_address,
2790                                 &out_first_mlli_address,
2791                                 &in_first_num_entries,
2792                                 &out_first_num_entries,
2793                                 &first_data_size,
2794                                 is_kva,
2795                                 dmatables_region,
2796                                 *dma_ctx);
2797         } else {
2798                 /* Prepare input tables */
2799                 error = sep_prepare_input_dma_table(sep,
2800                                 app_in_address,
2801                                 data_in_size,
2802                                 block_size,
2803                                 &in_first_mlli_address,
2804                                 &in_first_num_entries,
2805                                 &first_data_size,
2806                                 is_kva,
2807                                 dmatables_region,
2808                                 *dma_ctx);
2809         }
2810
2811         if (error) {
2812                 dev_warn(&sep->pdev->dev,
2813                         "prepare DMA table call failed from prepare DCB call\n");
2814                 goto end_function_error;
2815         }
2816
2817         /* Set the DCB values */
2818         dcb_table_ptr->input_mlli_address = in_first_mlli_address;
2819         dcb_table_ptr->input_mlli_num_entries = in_first_num_entries;
2820         dcb_table_ptr->input_mlli_data_size = first_data_size;
2821         dcb_table_ptr->output_mlli_address = out_first_mlli_address;
2822         dcb_table_ptr->output_mlli_num_entries = out_first_num_entries;
2823         dcb_table_ptr->output_mlli_data_size = first_data_size;
2824
2825         goto end_function;
2826
2827 end_function_error:
2828         kfree(*dma_ctx);
2829         *dma_ctx = NULL;
2830
2831 end_function:
2832         return error;
2833
2834 }
2835
2836
2837 /**
2838  * sep_free_dma_tables_and_dcb - free DMA tables and DCBs
2839  * @sep: pointer to struct sep_device
2840  * @isapplet: indicates external application (used for kernel access)
2841  * @is_kva: indicates kernel addresses (only used for kernel crypto)
2842  *
2843  * This function frees the DMA tables and DCB
2844  */
2845 static int sep_free_dma_tables_and_dcb(struct sep_device *sep, bool isapplet,
2846         bool is_kva, struct sep_dma_context **dma_ctx)
2847 {
2848         struct sep_dcblock *dcb_table_ptr;
2849         unsigned long pt_hold;
2850         void *tail_pt;
2851
2852         int i = 0;
2853         int error = 0;
2854         int error_temp = 0;
2855
2856         dev_dbg(&sep->pdev->dev, "[PID%d] sep_free_dma_tables_and_dcb\n",
2857                                         current->pid);
2858         if (!dma_ctx || !*dma_ctx) /* nothing to be done here*/
2859                 return 0;
2860
2861         if (!(*dma_ctx)->secure_dma && isapplet) {
2862                 dev_dbg(&sep->pdev->dev, "[PID%d] handling applet\n",
2863                         current->pid);
2864
2865                 /* Tail stuff is only for non secure_dma */
2866                 /* Set pointer to first DCB table */
2867                 dcb_table_ptr = (struct sep_dcblock *)
2868                         (sep->shared_addr +
2869                         SEP_DRIVER_SYSTEM_DCB_MEMORY_OFFSET_IN_BYTES);
2870
2871                 /**
2872                  * Go over each DCB and see if
2873                  * tail pointer must be updated
2874                  */
2875                 for (i = 0; i < (*dma_ctx)->nr_dcb_creat;
2876                      i++, dcb_table_ptr++) {
2877                         if (dcb_table_ptr->out_vr_tail_pt) {
2878                                 pt_hold = (unsigned long)dcb_table_ptr->
2879                                         out_vr_tail_pt;
2880                                 tail_pt = (void *)pt_hold;
2881                                 if (is_kva) {
2882                                         error = -ENODEV;
2883                                         break;
2884                                 } else {
2885                                         error_temp = copy_to_user(
2886                                                 (void __user *)tail_pt,
2887                                                 dcb_table_ptr->tail_data,
2888                                                 dcb_table_ptr->tail_data_size);
2889                                 }
2890                                 if (error_temp) {
2891                                         /* Release the DMA resource */
2892                                         error = -EFAULT;
2893                                         break;
2894                                 }
2895                         }
2896                 }
2897         }
2898
2899         /* Free the output pages, if any */
2900         sep_free_dma_table_data_handler(sep, dma_ctx);
2901
2902         dev_dbg(&sep->pdev->dev, "[PID%d] sep_free_dma_tables_and_dcb end\n",
2903                                         current->pid);
2904
2905         return error;
2906 }
2907
2908 /**
2909  * sep_prepare_dcb_handler - prepare a control block
2910  * @sep: pointer to struct sep_device
2911  * @arg: pointer to user parameters
2912  * @secure_dma: indicate whether we are using secure_dma on IMR
2913  *
2914  * This function will retrieve the RAR buffer physical addresses, type
2915  * & size corresponding to the RAR handles provided in the buffers vector.
2916  */
2917 static int sep_prepare_dcb_handler(struct sep_device *sep, unsigned long arg,
2918                                    bool secure_dma,
2919                                    struct sep_dma_context **dma_ctx)
2920 {
2921         int error;
2922         /* Command arguments */
2923         static struct build_dcb_struct command_args;
2924
2925         /* Get the command arguments */
2926         if (copy_from_user(&command_args, (void __user *)arg,
2927                                         sizeof(struct build_dcb_struct))) {
2928                 error = -EFAULT;
2929                 goto end_function;
2930         }
2931
2932         dev_dbg(&sep->pdev->dev,
2933                 "[PID%d] prep dcb handler app_in_address is %08llx\n",
2934                         current->pid, command_args.app_in_address);
2935         dev_dbg(&sep->pdev->dev,
2936                 "[PID%d] app_out_address is %08llx\n",
2937                         current->pid, command_args.app_out_address);
2938         dev_dbg(&sep->pdev->dev,
2939                 "[PID%d] data_size is %x\n",
2940                         current->pid, command_args.data_in_size);
2941         dev_dbg(&sep->pdev->dev,
2942                 "[PID%d] block_size is %x\n",
2943                         current->pid, command_args.block_size);
2944         dev_dbg(&sep->pdev->dev,
2945                 "[PID%d] tail block_size is %x\n",
2946                         current->pid, command_args.tail_block_size);
2947         dev_dbg(&sep->pdev->dev,
2948                 "[PID%d] is_applet is %x\n",
2949                         current->pid, command_args.is_applet);
2950
2951         if (!command_args.app_in_address) {
2952                 dev_warn(&sep->pdev->dev,
2953                         "[PID%d] null app_in_address\n", current->pid);
2954                 error = -EINVAL;
2955                 goto end_function;
2956         }
2957
2958         error = sep_prepare_input_output_dma_table_in_dcb(sep,
2959                         (unsigned long)command_args.app_in_address,
2960                         (unsigned long)command_args.app_out_address,
2961                         command_args.data_in_size, command_args.block_size,
2962                         command_args.tail_block_size,
2963                         command_args.is_applet, false,
2964                         secure_dma, NULL, NULL, dma_ctx, NULL, NULL);
2965
2966 end_function:
2967         return error;
2968
2969 }
2970
2971 /**
2972  * sep_free_dcb_handler - free control block resources
2973  * @sep: pointer to struct sep_device
2974  *
2975  * This function frees the DCB resources and updates the needed
2976  * user-space buffers.
2977  */
2978 static int sep_free_dcb_handler(struct sep_device *sep,
2979                                 struct sep_dma_context **dma_ctx)
2980 {
2981         if (!dma_ctx || !(*dma_ctx)) {
2982                 dev_dbg(&sep->pdev->dev,
2983                         "[PID%d] no dma context defined, nothing to free\n",
2984                         current->pid);
2985                 return -EINVAL;
2986         }
2987
2988         dev_dbg(&sep->pdev->dev, "[PID%d] free dcbs num of DCBs %x\n",
2989                 current->pid,
2990                 (*dma_ctx)->nr_dcb_creat);
2991
2992         return sep_free_dma_tables_and_dcb(sep, false, false, dma_ctx);
2993 }
2994
2995 /**
2996  * sep_ioctl - ioctl handler for sep device
2997  * @filp: pointer to struct file
2998  * @cmd: command
2999  * @arg: pointer to argument structure
3000  *
3001  * Implement the ioctl methods available on the SEP device.
3002  */
3003 static long sep_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
3004 {
3005         struct sep_private_data * const private_data = filp->private_data;
3006         struct sep_call_status *call_status = &private_data->call_status;
3007         struct sep_device *sep = private_data->device;
3008         struct sep_dma_context **dma_ctx = &private_data->dma_ctx;
3009         struct sep_queue_info **my_queue_elem = &private_data->my_queue_elem;
3010         int error = 0;
3011
3012         dev_dbg(&sep->pdev->dev, "[PID%d] ioctl cmd 0x%x\n",
3013                 current->pid, cmd);
3014         dev_dbg(&sep->pdev->dev, "[PID%d] dma context addr 0x%p\n",
3015                 current->pid, *dma_ctx);
3016
3017         /* Make sure we own this device */
3018         error = sep_check_transaction_owner(sep);
3019         if (error) {
3020                 dev_dbg(&sep->pdev->dev, "[PID%d] ioctl pid is not owner\n",
3021                         current->pid);
3022                 goto end_function;
3023         }
3024
3025         /* Check that sep_mmap has been called before */
3026         if (0 == test_bit(SEP_LEGACY_MMAP_DONE_OFFSET,
3027                                 &call_status->status)) {
3028                 dev_dbg(&sep->pdev->dev,
3029                         "[PID%d] mmap not called\n", current->pid);
3030                 error = -EPROTO;
3031                 goto end_function;
3032         }
3033
3034         /* Check that the command is for SEP device */
3035         if (_IOC_TYPE(cmd) != SEP_IOC_MAGIC_NUMBER) {
3036                 error = -ENOTTY;
3037                 goto end_function;
3038         }
3039
3040         switch (cmd) {
3041         case SEP_IOCSENDSEPCOMMAND:
3042                 dev_dbg(&sep->pdev->dev,
3043                         "[PID%d] SEP_IOCSENDSEPCOMMAND start\n",
3044                         current->pid);
3045                 if (1 == test_bit(SEP_LEGACY_SENDMSG_DONE_OFFSET,
3046                                   &call_status->status)) {
3047                         dev_warn(&sep->pdev->dev,
3048                                 "[PID%d] send msg already done\n",
3049                                 current->pid);
3050                         error = -EPROTO;
3051                         goto end_function;
3052                 }
3053                 /* Send command to SEP */
3054                 error = sep_send_command_handler(sep);
3055                 if (!error)
3056                         set_bit(SEP_LEGACY_SENDMSG_DONE_OFFSET,
3057                                 &call_status->status);
3058                 dev_dbg(&sep->pdev->dev,
3059                         "[PID%d] SEP_IOCSENDSEPCOMMAND end\n",
3060                         current->pid);
3061                 break;
3062         case SEP_IOCENDTRANSACTION:
3063                 dev_dbg(&sep->pdev->dev,
3064                         "[PID%d] SEP_IOCENDTRANSACTION start\n",
3065                         current->pid);
3066                 error = sep_end_transaction_handler(sep, dma_ctx, call_status,
3067                                                     my_queue_elem);
3068                 dev_dbg(&sep->pdev->dev,
3069                         "[PID%d] SEP_IOCENDTRANSACTION end\n",
3070                         current->pid);
3071                 break;
3072         case SEP_IOCPREPAREDCB:
3073                 dev_dbg(&sep->pdev->dev,
3074                         "[PID%d] SEP_IOCPREPAREDCB start\n",
3075                         current->pid);
3076                 /* fall-through */
3077         case SEP_IOCPREPAREDCB_SECURE_DMA:
3078                 dev_dbg(&sep->pdev->dev,
3079                         "[PID%d] SEP_IOCPREPAREDCB_SECURE_DMA start\n",
3080                         current->pid);
3081                 if (1 == test_bit(SEP_LEGACY_SENDMSG_DONE_OFFSET,
3082                                   &call_status->status)) {
3083                         dev_dbg(&sep->pdev->dev,
3084                                 "[PID%d] dcb prep needed before send msg\n",
3085                                 current->pid);
3086                         error = -EPROTO;
3087                         goto end_function;
3088                 }
3089
3090                 if (!arg) {
3091                         dev_dbg(&sep->pdev->dev,
3092                                 "[PID%d] dcb null arg\n", current->pid);
3093                         error = -EINVAL;
3094                         goto end_function;
3095                 }
3096
3097                 if (cmd == SEP_IOCPREPAREDCB) {
3098                         /* No secure dma */
3099                         dev_dbg(&sep->pdev->dev,
3100                                 "[PID%d] SEP_IOCPREPAREDCB (no secure_dma)\n",
3101                                 current->pid);
3102
3103                         error = sep_prepare_dcb_handler(sep, arg, false,
3104                                 dma_ctx);
3105                 } else {
3106                         /* Secure dma */
3107                         dev_dbg(&sep->pdev->dev,
3108                                 "[PID%d] SEP_IOC_POC (with secure_dma)\n",
3109                                 current->pid);
3110
3111                         error = sep_prepare_dcb_handler(sep, arg, true,
3112                                 dma_ctx);
3113                 }
3114                 dev_dbg(&sep->pdev->dev, "[PID%d] dcb's end\n",
3115                         current->pid);
3116                 break;
3117         case SEP_IOCFREEDCB:
3118                 dev_dbg(&sep->pdev->dev, "[PID%d] SEP_IOCFREEDCB start\n",
3119                         current->pid);
3120         case SEP_IOCFREEDCB_SECURE_DMA:
3121                 dev_dbg(&sep->pdev->dev,
3122                         "[PID%d] SEP_IOCFREEDCB_SECURE_DMA start\n",
3123                         current->pid);
3124                 error = sep_free_dcb_handler(sep, dma_ctx);
3125                 dev_dbg(&sep->pdev->dev, "[PID%d] SEP_IOCFREEDCB end\n",
3126                         current->pid);
3127                 break;
3128         default:
3129                 error = -ENOTTY;
3130                 dev_dbg(&sep->pdev->dev, "[PID%d] default end\n",
3131                         current->pid);
3132                 break;
3133         }
3134
3135 end_function:
3136         dev_dbg(&sep->pdev->dev, "[PID%d] ioctl end\n", current->pid);
3137
3138         return error;
3139 }
3140
3141 /**
3142  * sep_inthandler - interrupt handler for sep device
3143  * @irq: interrupt
3144  * @dev_id: device id
3145  */
3146 static irqreturn_t sep_inthandler(int irq, void *dev_id)
3147 {
3148         unsigned long lock_irq_flag;
3149         u32 reg_val, reg_val2 = 0;
3150         struct sep_device *sep = dev_id;
3151         irqreturn_t int_error = IRQ_HANDLED;
3152
3153         /* Are we in power save? */
3154 #if defined(CONFIG_PM_RUNTIME) && defined(SEP_ENABLE_RUNTIME_PM)
3155         if (sep->pdev->dev.power.runtime_status != RPM_ACTIVE) {
3156                 dev_dbg(&sep->pdev->dev, "interrupt during pwr save\n");
3157                 return IRQ_NONE;
3158         }
3159 #endif
3160
3161         if (test_bit(SEP_WORKING_LOCK_BIT, &sep->in_use_flags) == 0) {
3162                 dev_dbg(&sep->pdev->dev, "interrupt while nobody using sep\n");
3163                 return IRQ_NONE;
3164         }
3165
3166         /* Read the IRR register to check if this is SEP interrupt */
3167         reg_val = sep_read_reg(sep, HW_HOST_IRR_REG_ADDR);
3168
3169         dev_dbg(&sep->pdev->dev, "sep int: IRR REG val: %x\n", reg_val);
3170
3171         if (reg_val & (0x1 << 13)) {
3172
3173                 /* Lock and update the counter of reply messages */
3174                 spin_lock_irqsave(&sep->snd_rply_lck, lock_irq_flag);
3175                 sep->reply_ct++;
3176                 spin_unlock_irqrestore(&sep->snd_rply_lck, lock_irq_flag);
3177
3178                 dev_dbg(&sep->pdev->dev, "sep int: send_ct %lx reply_ct %lx\n",
3179                                         sep->send_ct, sep->reply_ct);
3180
3181                 /* Is this a kernel client request */
3182                 if (sep->in_kernel) {
3183                         tasklet_schedule(&sep->finish_tasklet);
3184                         goto finished_interrupt;
3185                 }
3186
3187                 /* Is this printf or daemon request? */
3188                 reg_val2 = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR2_REG_ADDR);
3189                 dev_dbg(&sep->pdev->dev,
3190                         "SEP Interrupt - GPR2 is %08x\n", reg_val2);
3191
3192                 clear_bit(SEP_WORKING_LOCK_BIT, &sep->in_use_flags);
3193
3194                 if ((reg_val2 >> 30) & 0x1) {
3195                         dev_dbg(&sep->pdev->dev, "int: printf request\n");
3196                 } else if (reg_val2 >> 31) {
3197                         dev_dbg(&sep->pdev->dev, "int: daemon request\n");
3198                 } else {
3199                         dev_dbg(&sep->pdev->dev, "int: SEP reply\n");
3200                         wake_up(&sep->event_interrupt);
3201                 }
3202         } else {
3203                 dev_dbg(&sep->pdev->dev, "int: not SEP interrupt\n");
3204                 int_error = IRQ_NONE;
3205         }
3206
3207 finished_interrupt:
3208
3209         if (int_error == IRQ_HANDLED)
3210                 sep_write_reg(sep, HW_HOST_ICR_REG_ADDR, reg_val);
3211
3212         return int_error;
3213 }
3214
3215 /**
3216  * sep_reconfig_shared_area - reconfigure shared area
3217  * @sep: pointer to struct sep_device
3218  *
3219  * Reconfig the shared area between HOST and SEP - needed in case
3220  * the DX_CC_Init function was called before OS loading.
3221  */
3222 static int sep_reconfig_shared_area(struct sep_device *sep)
3223 {
3224         int ret_val;
3225
3226         /* use to limit waiting for SEP */
3227         unsigned long end_time;
3228
3229         /* Send the new SHARED MESSAGE AREA to the SEP */
3230         dev_dbg(&sep->pdev->dev, "reconfig shared; sending %08llx to sep\n",
3231                                 (unsigned long long)sep->shared_bus);
3232
3233         sep_write_reg(sep, HW_HOST_HOST_SEP_GPR1_REG_ADDR, sep->shared_bus);
3234
3235         /* Poll for SEP response */
3236         ret_val = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR1_REG_ADDR);
3237
3238         end_time = jiffies + (WAIT_TIME * HZ);
3239
3240         while ((time_before(jiffies, end_time)) && (ret_val != 0xffffffff) &&
3241                 (ret_val != sep->shared_bus))
3242                 ret_val = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR1_REG_ADDR);
3243
3244         /* Check the return value (register) */
3245         if (ret_val != sep->shared_bus) {
3246                 dev_warn(&sep->pdev->dev, "could not reconfig shared area\n");
3247                 dev_warn(&sep->pdev->dev, "result was %x\n", ret_val);
3248                 ret_val = -ENOMEM;
3249         } else
3250                 ret_val = 0;
3251
3252         dev_dbg(&sep->pdev->dev, "reconfig shared area end\n");
3253
3254         return ret_val;
3255 }
3256
3257 /**
3258  *      sep_activate_dcb_dmatables_context - Takes DCB & DMA tables
3259  *                                              contexts into use
3260  *      @sep: SEP device
3261  *      @dcb_region: DCB region copy
3262  *      @dmatables_region: MLLI/DMA tables copy
3263  *      @dma_ctx: DMA context for current transaction
3264  */
3265 ssize_t sep_activate_dcb_dmatables_context(struct sep_device *sep,
3266                                         struct sep_dcblock **dcb_region,
3267                                         void **dmatables_region,
3268                                         struct sep_dma_context *dma_ctx)
3269 {
3270         void *dmaregion_free_start = NULL;
3271         void *dmaregion_free_end = NULL;
3272         void *dcbregion_free_start = NULL;
3273         void *dcbregion_free_end = NULL;
3274         ssize_t error = 0;
3275
3276         dev_dbg(&sep->pdev->dev, "[PID%d] activating dcb/dma region\n",
3277                 current->pid);
3278
3279         if (1 > dma_ctx->nr_dcb_creat) {
3280                 dev_warn(&sep->pdev->dev,
3281                          "[PID%d] invalid number of dcbs to activate 0x%08X\n",
3282                          current->pid, dma_ctx->nr_dcb_creat);
3283                 error = -EINVAL;
3284                 goto end_function;
3285         }
3286
3287         dmaregion_free_start = sep->shared_addr
3288                                 + SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES;
3289         dmaregion_free_end = dmaregion_free_start
3290                                 + SYNCHRONIC_DMA_TABLES_AREA_SIZE_BYTES - 1;
3291
3292         if (dmaregion_free_start
3293              + dma_ctx->dmatables_len > dmaregion_free_end) {
3294                 error = -ENOMEM;
3295                 goto end_function;
3296         }
3297         memcpy(dmaregion_free_start,
3298                *dmatables_region,
3299                dma_ctx->dmatables_len);
3300         /* Free MLLI table copy */
3301         kfree(*dmatables_region);
3302         *dmatables_region = NULL;
3303
3304         /* Copy thread's DCB  table copy to DCB table region */
3305         dcbregion_free_start = sep->shared_addr +
3306                                 SEP_DRIVER_SYSTEM_DCB_MEMORY_OFFSET_IN_BYTES;
3307         dcbregion_free_end = dcbregion_free_start +
3308                                 (SEP_MAX_NUM_SYNC_DMA_OPS *
3309                                         sizeof(struct sep_dcblock)) - 1;
3310
3311         if (dcbregion_free_start
3312              + (dma_ctx->nr_dcb_creat * sizeof(struct sep_dcblock))
3313              > dcbregion_free_end) {
3314                 error = -ENOMEM;
3315                 goto end_function;
3316         }
3317
3318         memcpy(dcbregion_free_start,
3319                *dcb_region,
3320                dma_ctx->nr_dcb_creat * sizeof(struct sep_dcblock));
3321
3322         /* Print the tables */
3323         dev_dbg(&sep->pdev->dev, "activate: input table\n");
3324         sep_debug_print_lli_tables(sep,
3325                 (struct sep_lli_entry *)sep_shared_area_bus_to_virt(sep,
3326                 (*dcb_region)->input_mlli_address),
3327                 (*dcb_region)->input_mlli_num_entries,
3328                 (*dcb_region)->input_mlli_data_size);
3329
3330         dev_dbg(&sep->pdev->dev, "activate: output table\n");
3331         sep_debug_print_lli_tables(sep,
3332                 (struct sep_lli_entry *)sep_shared_area_bus_to_virt(sep,
3333                 (*dcb_region)->output_mlli_address),
3334                 (*dcb_region)->output_mlli_num_entries,
3335                 (*dcb_region)->output_mlli_data_size);
3336
3337         dev_dbg(&sep->pdev->dev,
3338                  "[PID%d] printing activated tables\n", current->pid);
3339
3340 end_function:
3341         kfree(*dmatables_region);
3342         *dmatables_region = NULL;
3343
3344         kfree(*dcb_region);
3345         *dcb_region = NULL;
3346
3347         return error;
3348 }
3349
3350 /**
3351  *      sep_create_dcb_dmatables_context - Creates DCB & MLLI/DMA table context
3352  *      @sep: SEP device
3353  *      @dcb_region: DCB region buf to create for current transaction
3354  *      @dmatables_region: MLLI/DMA tables buf to create for current transaction
3355  *      @dma_ctx: DMA context buf to create for current transaction
3356  *      @user_dcb_args: User arguments for DCB/MLLI creation
3357  *      @num_dcbs: Number of DCBs to create
3358  *      @secure_dma: Indicate use of IMR restricted memory secure dma
3359  */
3360 static ssize_t sep_create_dcb_dmatables_context(struct sep_device *sep,
3361                         struct sep_dcblock **dcb_region,
3362                         void **dmatables_region,
3363                         struct sep_dma_context **dma_ctx,
3364                         const struct build_dcb_struct __user *user_dcb_args,
3365                         const u32 num_dcbs, bool secure_dma)
3366 {
3367         int error = 0;
3368         int i = 0;
3369         struct build_dcb_struct *dcb_args = NULL;
3370
3371         dev_dbg(&sep->pdev->dev, "[PID%d] creating dcb/dma region\n",
3372                 current->pid);
3373
3374         if (!dcb_region || !dma_ctx || !dmatables_region || !user_dcb_args) {
3375                 error = -EINVAL;
3376                 goto end_function;
3377         }
3378
3379         if (SEP_MAX_NUM_SYNC_DMA_OPS < num_dcbs) {
3380                 dev_warn(&sep->pdev->dev,
3381                          "[PID%d] invalid number of dcbs 0x%08X\n",
3382                          current->pid, num_dcbs);
3383                 error = -EINVAL;
3384                 goto end_function;
3385         }
3386
3387         dcb_args = kcalloc(num_dcbs, sizeof(struct build_dcb_struct),
3388                            GFP_KERNEL);
3389         if (!dcb_args) {
3390                 error = -ENOMEM;
3391                 goto end_function;
3392         }
3393
3394         if (copy_from_user(dcb_args,
3395                         user_dcb_args,
3396                         num_dcbs * sizeof(struct build_dcb_struct))) {
3397                 error = -EFAULT;
3398                 goto end_function;
3399         }
3400
3401         /* Allocate thread-specific memory for DCB */
3402         *dcb_region = kzalloc(num_dcbs * sizeof(struct sep_dcblock),
3403                               GFP_KERNEL);
3404         if (!(*dcb_region)) {
3405                 error = -ENOMEM;
3406                 goto end_function;
3407         }
3408
3409         /* Prepare DCB and MLLI table into the allocated regions */
3410         for (i = 0; i < num_dcbs; i++) {
3411                 error = sep_prepare_input_output_dma_table_in_dcb(sep,
3412                                 (unsigned long)dcb_args[i].app_in_address,
3413                                 (unsigned long)dcb_args[i].app_out_address,
3414                                 dcb_args[i].data_in_size,
3415                                 dcb_args[i].block_size,
3416                                 dcb_args[i].tail_block_size,
3417                                 dcb_args[i].is_applet,
3418                                 false, secure_dma,
3419                                 *dcb_region, dmatables_region,
3420                                 dma_ctx,
3421                                 NULL,
3422                                 NULL);
3423                 if (error) {
3424                         dev_warn(&sep->pdev->dev,
3425                                  "[PID%d] dma table creation failed\n",
3426                                  current->pid);
3427                         goto end_function;
3428                 }
3429
3430                 if (dcb_args[i].app_in_address != 0)
3431                         (*dma_ctx)->input_data_len += dcb_args[i].data_in_size;
3432         }
3433
3434 end_function:
3435         kfree(dcb_args);
3436         return error;
3437
3438 }
3439
3440 /**
3441  *      sep_create_dcb_dmatables_context_kernel - Creates DCB & MLLI/DMA table context
3442  *      for kernel crypto
3443  *      @sep: SEP device
3444  *      @dcb_region: DCB region buf to create for current transaction
3445  *      @dmatables_region: MLLI/DMA tables buf to create for current transaction
3446  *      @dma_ctx: DMA context buf to create for current transaction
3447  *      @user_dcb_args: User arguments for DCB/MLLI creation
3448  *      @num_dcbs: Number of DCBs to create
3449  *      This does that same thing as sep_create_dcb_dmatables_context
3450  *      except that it is used only for the kernel crypto operation. It is
3451  *      separate because there is no user data involved; the dcb data structure
3452  *      is specific for kernel crypto (build_dcb_struct_kernel)
3453  */
3454 int sep_create_dcb_dmatables_context_kernel(struct sep_device *sep,
3455                         struct sep_dcblock **dcb_region,
3456                         void **dmatables_region,
3457                         struct sep_dma_context **dma_ctx,
3458                         const struct build_dcb_struct_kernel *dcb_data,
3459                         const u32 num_dcbs)
3460 {
3461         int error = 0;
3462         int i = 0;
3463
3464         dev_dbg(&sep->pdev->dev, "[PID%d] creating dcb/dma region\n",
3465                 current->pid);
3466
3467         if (!dcb_region || !dma_ctx || !dmatables_region || !dcb_data) {
3468                 error = -EINVAL;
3469                 goto end_function;
3470         }
3471
3472         if (SEP_MAX_NUM_SYNC_DMA_OPS < num_dcbs) {
3473                 dev_warn(&sep->pdev->dev,
3474                          "[PID%d] invalid number of dcbs 0x%08X\n",
3475                          current->pid, num_dcbs);
3476                 error = -EINVAL;
3477                 goto end_function;
3478         }
3479
3480         dev_dbg(&sep->pdev->dev, "[PID%d] num_dcbs is %d\n",
3481                 current->pid, num_dcbs);
3482
3483         /* Allocate thread-specific memory for DCB */
3484         *dcb_region = kzalloc(num_dcbs * sizeof(struct sep_dcblock),
3485                               GFP_KERNEL);
3486         if (!(*dcb_region)) {
3487                 error = -ENOMEM;
3488                 goto end_function;
3489         }
3490
3491         /* Prepare DCB and MLLI table into the allocated regions */
3492         for (i = 0; i < num_dcbs; i++) {
3493                 error = sep_prepare_input_output_dma_table_in_dcb(sep,
3494                                 (unsigned long)dcb_data->app_in_address,
3495                                 (unsigned long)dcb_data->app_out_address,
3496                                 dcb_data->data_in_size,
3497                                 dcb_data->block_size,
3498                                 dcb_data->tail_block_size,
3499                                 dcb_data->is_applet,
3500                                 true,
3501                                 false,
3502                                 *dcb_region, dmatables_region,
3503                                 dma_ctx,
3504                                 dcb_data->src_sg,
3505                                 dcb_data->dst_sg);
3506                 if (error) {
3507                         dev_warn(&sep->pdev->dev,
3508                                  "[PID%d] dma table creation failed\n",
3509                                  current->pid);
3510                         goto end_function;
3511                 }
3512         }
3513
3514 end_function:
3515         return error;
3516
3517 }
3518
3519 /**
3520  *      sep_activate_msgarea_context - Takes the message area context into use
3521  *      @sep: SEP device
3522  *      @msg_region: Message area context buf
3523  *      @msg_len: Message area context buffer size
3524  */
3525 static ssize_t sep_activate_msgarea_context(struct sep_device *sep,
3526                                             void **msg_region,
3527                                             const size_t msg_len)
3528 {
3529         dev_dbg(&sep->pdev->dev, "[PID%d] activating msg region\n",
3530                 current->pid);
3531
3532         if (!msg_region || !(*msg_region) ||
3533             SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES < msg_len) {
3534                 dev_warn(&sep->pdev->dev,
3535                          "[PID%d] invalid act msgarea len 0x%08zX\n",
3536                          current->pid, msg_len);
3537                 return -EINVAL;
3538         }
3539
3540         memcpy(sep->shared_addr, *msg_region, msg_len);
3541
3542         return 0;
3543 }
3544
3545 /**
3546  *      sep_create_msgarea_context - Creates message area context
3547  *      @sep: SEP device
3548  *      @msg_region: Msg area region buf to create for current transaction
3549  *      @msg_user: Content for msg area region from user
3550  *      @msg_len: Message area size
3551  */
3552 static ssize_t sep_create_msgarea_context(struct sep_device *sep,
3553                                           void **msg_region,
3554                                           const void __user *msg_user,
3555                                           const size_t msg_len)
3556 {
3557         int error = 0;
3558
3559         dev_dbg(&sep->pdev->dev, "[PID%d] creating msg region\n",
3560                 current->pid);
3561
3562         if (!msg_region ||
3563             !msg_user ||
3564             SEP_DRIVER_MAX_MESSAGE_SIZE_IN_BYTES < msg_len ||
3565             SEP_DRIVER_MIN_MESSAGE_SIZE_IN_BYTES > msg_len) {
3566                 dev_warn(&sep->pdev->dev,
3567                          "[PID%d] invalid creat msgarea len 0x%08zX\n",
3568                          current->pid, msg_len);
3569                 error = -EINVAL;
3570                 goto end_function;
3571         }
3572
3573         /* Allocate thread-specific memory for message buffer */
3574         *msg_region = kzalloc(msg_len, GFP_KERNEL);
3575         if (!(*msg_region)) {
3576                 error = -ENOMEM;
3577                 goto end_function;
3578         }
3579
3580         /* Copy input data to write() to allocated message buffer */
3581         if (copy_from_user(*msg_region, msg_user, msg_len)) {
3582                 error = -EFAULT;
3583                 goto end_function;
3584         }
3585
3586 end_function:
3587         if (error && msg_region) {
3588                 kfree(*msg_region);
3589                 *msg_region = NULL;
3590         }
3591
3592         return error;
3593 }
3594
3595
3596 /**
3597  *      sep_read - Returns results of an operation for fastcall interface
3598  *      @filp: File pointer
3599  *      @buf_user: User buffer for storing results
3600  *      @count_user: User buffer size
3601  *      @offset: File offset, not supported
3602  *
3603  *      The implementation does not support reading in chunks, all data must be
3604  *      consumed during a single read system call.
3605  */
3606 static ssize_t sep_read(struct file *filp,
3607                         char __user *buf_user, size_t count_user,
3608                         loff_t *offset)
3609 {
3610         struct sep_private_data * const private_data = filp->private_data;
3611         struct sep_call_status *call_status = &private_data->call_status;
3612         struct sep_device *sep = private_data->device;
3613         struct sep_dma_context **dma_ctx = &private_data->dma_ctx;
3614         struct sep_queue_info **my_queue_elem = &private_data->my_queue_elem;
3615         ssize_t error = 0, error_tmp = 0;
3616
3617         /* Am I the process that owns the transaction? */
3618         error = sep_check_transaction_owner(sep);
3619         if (error) {
3620                 dev_dbg(&sep->pdev->dev, "[PID%d] read pid is not owner\n",
3621                         current->pid);
3622                 goto end_function;
3623         }
3624
3625         /* Checks that user has called necessary apis */
3626         if (0 == test_bit(SEP_FASTCALL_WRITE_DONE_OFFSET,
3627                         &call_status->status)) {
3628                 dev_warn(&sep->pdev->dev,
3629                          "[PID%d] fastcall write not called\n",
3630                          current->pid);
3631                 error = -EPROTO;
3632                 goto end_function_error;
3633         }
3634
3635         if (!buf_user) {
3636                 dev_warn(&sep->pdev->dev,
3637                          "[PID%d] null user buffer\n",
3638                          current->pid);
3639                 error = -EINVAL;
3640                 goto end_function_error;
3641         }
3642
3643
3644         /* Wait for SEP to finish */
3645         wait_event(sep->event_interrupt,
3646                    test_bit(SEP_WORKING_LOCK_BIT,
3647                             &sep->in_use_flags) == 0);
3648
3649         sep_dump_message(sep);
3650
3651         dev_dbg(&sep->pdev->dev, "[PID%d] count_user = 0x%08zX\n",
3652                 current->pid, count_user);
3653
3654         /* In case user has allocated bigger buffer */
3655         if (count_user > SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES)
3656                 count_user = SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES;
3657
3658         if (copy_to_user(buf_user, sep->shared_addr, count_user)) {
3659                 error = -EFAULT;
3660                 goto end_function_error;
3661         }
3662
3663         dev_dbg(&sep->pdev->dev, "[PID%d] read succeeded\n", current->pid);
3664         error = count_user;
3665
3666 end_function_error:
3667         /* Copy possible tail data to user and free DCB and MLLIs */
3668         error_tmp = sep_free_dcb_handler(sep, dma_ctx);
3669         if (error_tmp)
3670                 dev_warn(&sep->pdev->dev, "[PID%d] dcb free failed\n",
3671                         current->pid);
3672
3673         /* End the transaction, wakeup pending ones */
3674         error_tmp = sep_end_transaction_handler(sep, dma_ctx, call_status,
3675                 my_queue_elem);
3676         if (error_tmp)
3677                 dev_warn(&sep->pdev->dev,
3678                          "[PID%d] ending transaction failed\n",
3679                          current->pid);
3680
3681 end_function:
3682         return error;
3683 }
3684
3685 /**
3686  *      sep_fastcall_args_get - Gets fastcall params from user
3687  *      sep: SEP device
3688  *      @args: Parameters buffer
3689  *      @buf_user: User buffer for operation parameters
3690  *      @count_user: User buffer size
3691  */
3692 static inline ssize_t sep_fastcall_args_get(struct sep_device *sep,
3693                                             struct sep_fastcall_hdr *args,
3694                                             const char __user *buf_user,
3695                                             const size_t count_user)
3696 {
3697         ssize_t error = 0;
3698         size_t actual_count = 0;
3699
3700         if (!buf_user) {
3701                 dev_warn(&sep->pdev->dev,
3702                          "[PID%d] null user buffer\n",
3703                          current->pid);
3704                 error = -EINVAL;
3705                 goto end_function;
3706         }
3707
3708         if (count_user < sizeof(struct sep_fastcall_hdr)) {
3709                 dev_warn(&sep->pdev->dev,
3710                          "[PID%d] too small message size 0x%08zX\n",
3711                          current->pid, count_user);
3712                 error = -EINVAL;
3713                 goto end_function;
3714         }
3715
3716
3717         if (copy_from_user(args, buf_user, sizeof(struct sep_fastcall_hdr))) {
3718                 error = -EFAULT;
3719                 goto end_function;
3720         }
3721
3722         if (SEP_FC_MAGIC != args->magic) {
3723                 dev_warn(&sep->pdev->dev,
3724                          "[PID%d] invalid fastcall magic 0x%08X\n",
3725                          current->pid, args->magic);
3726                 error = -EINVAL;
3727                 goto end_function;
3728         }
3729
3730         dev_dbg(&sep->pdev->dev, "[PID%d] fastcall hdr num of DCBs 0x%08X\n",
3731                 current->pid, args->num_dcbs);
3732         dev_dbg(&sep->pdev->dev, "[PID%d] fastcall hdr msg len 0x%08X\n",
3733                 current->pid, args->msg_len);
3734
3735         if (SEP_DRIVER_MAX_MESSAGE_SIZE_IN_BYTES < args->msg_len ||
3736             SEP_DRIVER_MIN_MESSAGE_SIZE_IN_BYTES > args->msg_len) {
3737                 dev_warn(&sep->pdev->dev,
3738                          "[PID%d] invalid message length\n",
3739                          current->pid);
3740                 error = -EINVAL;
3741                 goto end_function;
3742         }
3743
3744         actual_count = sizeof(struct sep_fastcall_hdr)
3745                         + args->msg_len
3746                         + (args->num_dcbs * sizeof(struct build_dcb_struct));
3747
3748         if (actual_count != count_user) {
3749                 dev_warn(&sep->pdev->dev,
3750                          "[PID%d] inconsistent message sizes 0x%08zX vs 0x%08zX\n",
3751                          current->pid, actual_count, count_user);
3752                 error = -EMSGSIZE;
3753                 goto end_function;
3754         }
3755
3756 end_function:
3757         return error;
3758 }
3759
3760 /**
3761  *      sep_write - Starts an operation for fastcall interface
3762  *      @filp: File pointer
3763  *      @buf_user: User buffer for operation parameters
3764  *      @count_user: User buffer size
3765  *      @offset: File offset, not supported
3766  *
3767  *      The implementation does not support writing in chunks,
3768  *      all data must be given during a single write system call.
3769  */
3770 static ssize_t sep_write(struct file *filp,
3771                          const char __user *buf_user, size_t count_user,
3772                          loff_t *offset)
3773 {
3774         struct sep_private_data * const private_data = filp->private_data;
3775         struct sep_call_status *call_status = &private_data->call_status;
3776         struct sep_device *sep = private_data->device;
3777         struct sep_dma_context *dma_ctx = NULL;
3778         struct sep_fastcall_hdr call_hdr = {0};
3779         void *msg_region = NULL;
3780         void *dmatables_region = NULL;
3781         struct sep_dcblock *dcb_region = NULL;
3782         ssize_t error = 0;
3783         struct sep_queue_info *my_queue_elem = NULL;
3784         bool my_secure_dma; /* are we using secure_dma (IMR)? */
3785
3786         dev_dbg(&sep->pdev->dev, "[PID%d] sep dev is 0x%p\n",
3787                 current->pid, sep);
3788         dev_dbg(&sep->pdev->dev, "[PID%d] private_data is 0x%p\n",
3789                 current->pid, private_data);
3790
3791         error = sep_fastcall_args_get(sep, &call_hdr, buf_user, count_user);
3792         if (error)
3793                 goto end_function;
3794
3795         buf_user += sizeof(struct sep_fastcall_hdr);
3796
3797         if (call_hdr.secure_dma == 0)
3798                 my_secure_dma = false;
3799         else
3800                 my_secure_dma = true;
3801
3802         /*
3803          * Controlling driver memory usage by limiting amount of
3804          * buffers created. Only SEP_DOUBLEBUF_USERS_LIMIT number
3805          * of threads can progress further at a time
3806          */
3807         dev_dbg(&sep->pdev->dev,
3808                 "[PID%d] waiting for double buffering region access\n",
3809                 current->pid);
3810         error = down_interruptible(&sep->sep_doublebuf);
3811         dev_dbg(&sep->pdev->dev, "[PID%d] double buffering region start\n",
3812                                         current->pid);
3813         if (error) {
3814                 /* Signal received */
3815                 goto end_function_error;
3816         }
3817
3818
3819         /*
3820          * Prepare contents of the shared area regions for
3821          * the operation into temporary buffers
3822          */
3823         if (0 < call_hdr.num_dcbs) {
3824                 error = sep_create_dcb_dmatables_context(sep,
3825                                 &dcb_region,
3826                                 &dmatables_region,
3827                                 &dma_ctx,
3828                                 (const struct build_dcb_struct __user *)
3829                                         buf_user,
3830                                 call_hdr.num_dcbs, my_secure_dma);
3831                 if (error)
3832                         goto end_function_error_doublebuf;
3833
3834                 buf_user += call_hdr.num_dcbs * sizeof(struct build_dcb_struct);
3835         }
3836
3837         error = sep_create_msgarea_context(sep,
3838                                            &msg_region,
3839                                            buf_user,
3840                                            call_hdr.msg_len);
3841         if (error)
3842                 goto end_function_error_doublebuf;
3843
3844         dev_dbg(&sep->pdev->dev, "[PID%d] updating queue status\n",
3845                                                         current->pid);
3846         my_queue_elem = sep_queue_status_add(sep,
3847                                 ((struct sep_msgarea_hdr *)msg_region)->opcode,
3848                                 (dma_ctx) ? dma_ctx->input_data_len : 0,
3849                                      current->pid,
3850                                      current->comm, sizeof(current->comm));
3851
3852         if (!my_queue_elem) {
3853                 dev_dbg(&sep->pdev->dev,
3854                         "[PID%d] updating queue status error\n", current->pid);
3855                 error = -ENOMEM;
3856                 goto end_function_error_doublebuf;
3857         }
3858
3859         /* Wait until current process gets the transaction */
3860         error = sep_wait_transaction(sep);
3861
3862         if (error) {
3863                 /* Interrupted by signal, don't clear transaction */
3864                 dev_dbg(&sep->pdev->dev, "[PID%d] interrupted by signal\n",
3865                         current->pid);
3866                 sep_queue_status_remove(sep, &my_queue_elem);
3867                 goto end_function_error_doublebuf;
3868         }
3869
3870         dev_dbg(&sep->pdev->dev, "[PID%d] saving queue element\n",
3871                 current->pid);
3872         private_data->my_queue_elem = my_queue_elem;
3873
3874         /* Activate shared area regions for the transaction */
3875         error = sep_activate_msgarea_context(sep, &msg_region,
3876                                              call_hdr.msg_len);
3877         if (error)
3878                 goto end_function_error_clear_transact;
3879
3880         sep_dump_message(sep);
3881
3882         if (0 < call_hdr.num_dcbs) {
3883                 error = sep_activate_dcb_dmatables_context(sep,
3884                                 &dcb_region,
3885                                 &dmatables_region,
3886                                 dma_ctx);
3887                 if (error)
3888                         goto end_function_error_clear_transact;
3889         }
3890
3891         /* Send command to SEP */
3892         error = sep_send_command_handler(sep);
3893         if (error)
3894                 goto end_function_error_clear_transact;
3895
3896         /* Store DMA context for the transaction */
3897         private_data->dma_ctx = dma_ctx;
3898         /* Update call status */
3899         set_bit(SEP_FASTCALL_WRITE_DONE_OFFSET, &call_status->status);
3900         error = count_user;
3901
3902         up(&sep->sep_doublebuf);
3903         dev_dbg(&sep->pdev->dev, "[PID%d] double buffering region end\n",
3904                 current->pid);
3905
3906         goto end_function;
3907
3908 end_function_error_clear_transact:
3909         sep_end_transaction_handler(sep, &dma_ctx, call_status,
3910                                                 &private_data->my_queue_elem);
3911
3912 end_function_error_doublebuf:
3913         up(&sep->sep_doublebuf);
3914         dev_dbg(&sep->pdev->dev, "[PID%d] double buffering region end\n",
3915                 current->pid);
3916
3917 end_function_error:
3918         if (dma_ctx)
3919                 sep_free_dma_table_data_handler(sep, &dma_ctx);
3920
3921 end_function:
3922         kfree(dcb_region);
3923         kfree(dmatables_region);
3924         kfree(msg_region);
3925
3926         return error;
3927 }
3928 /**
3929  *      sep_seek - Handler for seek system call
3930  *      @filp: File pointer
3931  *      @offset: File offset
3932  *      @origin: Options for offset
3933  *
3934  *      Fastcall interface does not support seeking, all reads
3935  *      and writes are from/to offset zero
3936  */
3937 static loff_t sep_seek(struct file *filp, loff_t offset, int origin)
3938 {
3939         return -ENOSYS;
3940 }
3941
3942
3943
3944 /**
3945  * sep_file_operations - file operation on sep device
3946  * @sep_ioctl:  ioctl handler from user space call
3947  * @sep_poll:   poll handler
3948  * @sep_open:   handles sep device open request
3949  * @sep_release:handles sep device release request
3950  * @sep_mmap:   handles memory mapping requests
3951  * @sep_read:   handles read request on sep device
3952  * @sep_write:  handles write request on sep device
3953  * @sep_seek:   handles seek request on sep device
3954  */
3955 static const struct file_operations sep_file_operations = {
3956         .owner = THIS_MODULE,
3957         .unlocked_ioctl = sep_ioctl,
3958         .poll = sep_poll,
3959         .open = sep_open,
3960         .release = sep_release,
3961         .mmap = sep_mmap,
3962         .read = sep_read,
3963         .write = sep_write,
3964         .llseek = sep_seek,
3965 };
3966
3967 /**
3968  * sep_sysfs_read - read sysfs entry per gives arguments
3969  * @filp: file pointer
3970  * @kobj: kobject pointer
3971  * @attr: binary file attributes
3972  * @buf: read to this buffer
3973  * @pos: offset to read
3974  * @count: amount of data to read
3975  *
3976  * This function is to read sysfs entries for sep driver per given arguments.
3977  */
3978 static ssize_t
3979 sep_sysfs_read(struct file *filp, struct kobject *kobj,
3980                 struct bin_attribute *attr,
3981                 char *buf, loff_t pos, size_t count)
3982 {
3983         unsigned long lck_flags;
3984         size_t nleft = count;
3985         struct sep_device *sep = sep_dev;
3986         struct sep_queue_info *queue_elem = NULL;
3987         u32 queue_num = 0;
3988         u32 i = 1;
3989
3990         spin_lock_irqsave(&sep->sep_queue_lock, lck_flags);
3991
3992         queue_num = sep->sep_queue_num;
3993         if (queue_num > SEP_DOUBLEBUF_USERS_LIMIT)
3994                 queue_num = SEP_DOUBLEBUF_USERS_LIMIT;
3995
3996
3997         if (count < sizeof(queue_num)
3998                         + (queue_num * sizeof(struct sep_queue_data))) {
3999                 spin_unlock_irqrestore(&sep->sep_queue_lock, lck_flags);
4000                 return -EINVAL;
4001         }
4002
4003         memcpy(buf, &queue_num, sizeof(queue_num));
4004         buf += sizeof(queue_num);
4005         nleft -= sizeof(queue_num);
4006
4007         list_for_each_entry(queue_elem, &sep->sep_queue_status, list) {
4008                 if (i++ > queue_num)
4009                         break;
4010
4011                 memcpy(buf, &queue_elem->data, sizeof(queue_elem->data));
4012                 nleft -= sizeof(queue_elem->data);
4013                 buf += sizeof(queue_elem->data);
4014         }
4015         spin_unlock_irqrestore(&sep->sep_queue_lock, lck_flags);
4016
4017         return count - nleft;
4018 }
4019
4020 /**
4021  * bin_attributes - defines attributes for queue_status
4022  * @attr: attributes (name & permissions)
4023  * @read: function pointer to read this file
4024  * @size: maxinum size of binary attribute
4025  */
4026 static const struct bin_attribute queue_status = {
4027         .attr = {.name = "queue_status", .mode = 0444},
4028         .read = sep_sysfs_read,
4029         .size = sizeof(u32)
4030                 + (SEP_DOUBLEBUF_USERS_LIMIT * sizeof(struct sep_queue_data)),
4031 };
4032
4033 /**
4034  * sep_register_driver_with_fs - register misc devices
4035  * @sep: pointer to struct sep_device
4036  *
4037  * This function registers the driver with the file system
4038  */
4039 static int sep_register_driver_with_fs(struct sep_device *sep)
4040 {
4041         int ret_val;
4042
4043         sep->miscdev_sep.minor = MISC_DYNAMIC_MINOR;
4044         sep->miscdev_sep.name = SEP_DEV_NAME;
4045         sep->miscdev_sep.fops = &sep_file_operations;
4046
4047         ret_val = misc_register(&sep->miscdev_sep);
4048         if (ret_val) {
4049                 dev_warn(&sep->pdev->dev, "misc reg fails for SEP %x\n",
4050                         ret_val);
4051                 return ret_val;
4052         }
4053
4054         ret_val = device_create_bin_file(sep->miscdev_sep.this_device,
4055                                                                 &queue_status);
4056         if (ret_val) {
4057                 dev_warn(&sep->pdev->dev, "sysfs attribute1 fails for SEP %x\n",
4058                         ret_val);
4059                 misc_deregister(&sep->miscdev_sep);
4060                 return ret_val;
4061         }
4062
4063         return ret_val;
4064 }
4065
4066
4067 /**
4068  *sep_probe - probe a matching PCI device
4069  *@pdev:        pci_device
4070  *@ent: pci_device_id
4071  *
4072  *Attempt to set up and configure a SEP device that has been
4073  *discovered by the PCI layer. Allocates all required resources.
4074  */
4075 static int sep_probe(struct pci_dev *pdev,
4076         const struct pci_device_id *ent)
4077 {
4078         int error = 0;
4079         struct sep_device *sep = NULL;
4080
4081         if (sep_dev != NULL) {
4082                 dev_dbg(&pdev->dev, "only one SEP supported.\n");
4083                 return -EBUSY;
4084         }
4085
4086         /* Enable the device */
4087         error = pci_enable_device(pdev);
4088         if (error) {
4089                 dev_warn(&pdev->dev, "error enabling pci device\n");
4090                 goto end_function;
4091         }
4092
4093         /* Allocate the sep_device structure for this device */
4094         sep_dev = kzalloc(sizeof(struct sep_device), GFP_ATOMIC);
4095         if (sep_dev == NULL) {
4096                 error = -ENOMEM;
4097                 goto end_function_disable_device;
4098         }
4099
4100         /*
4101          * We're going to use another variable for actually
4102          * working with the device; this way, if we have
4103          * multiple devices in the future, it would be easier
4104          * to make appropriate changes
4105          */
4106         sep = sep_dev;
4107
4108         sep->pdev = pci_dev_get(pdev);
4109
4110         init_waitqueue_head(&sep->event_transactions);
4111         init_waitqueue_head(&sep->event_interrupt);
4112         spin_lock_init(&sep->snd_rply_lck);
4113         spin_lock_init(&sep->sep_queue_lock);
4114         sema_init(&sep->sep_doublebuf, SEP_DOUBLEBUF_USERS_LIMIT);
4115
4116         INIT_LIST_HEAD(&sep->sep_queue_status);
4117
4118         dev_dbg(&sep->pdev->dev,
4119                 "sep probe: PCI obtained, device being prepared\n");
4120
4121         /* Set up our register area */
4122         sep->reg_physical_addr = pci_resource_start(sep->pdev, 0);
4123         if (!sep->reg_physical_addr) {
4124                 dev_warn(&sep->pdev->dev, "Error getting register start\n");
4125                 error = -ENODEV;
4126                 goto end_function_free_sep_dev;
4127         }
4128
4129         sep->reg_physical_end = pci_resource_end(sep->pdev, 0);
4130         if (!sep->reg_physical_end) {
4131                 dev_warn(&sep->pdev->dev, "Error getting register end\n");
4132                 error = -ENODEV;
4133                 goto end_function_free_sep_dev;
4134         }
4135
4136         sep->reg_addr = ioremap_nocache(sep->reg_physical_addr,
4137                 (size_t)(sep->reg_physical_end - sep->reg_physical_addr + 1));
4138         if (!sep->reg_addr) {
4139                 dev_warn(&sep->pdev->dev, "Error getting register virtual\n");
4140                 error = -ENODEV;
4141                 goto end_function_free_sep_dev;
4142         }
4143
4144         dev_dbg(&sep->pdev->dev,
4145                 "Register area start %llx end %llx virtual %p\n",
4146                 (unsigned long long)sep->reg_physical_addr,
4147                 (unsigned long long)sep->reg_physical_end,
4148                 sep->reg_addr);
4149
4150         /* Allocate the shared area */
4151         sep->shared_size = SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES +
4152                 SYNCHRONIC_DMA_TABLES_AREA_SIZE_BYTES +
4153                 SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES +
4154                 SEP_DRIVER_STATIC_AREA_SIZE_IN_BYTES +
4155                 SEP_DRIVER_SYSTEM_DATA_MEMORY_SIZE_IN_BYTES;
4156
4157         if (sep_map_and_alloc_shared_area(sep)) {
4158                 error = -ENOMEM;
4159                 /* Allocation failed */
4160                 goto end_function_error;
4161         }
4162
4163         /* Clear ICR register */
4164         sep_write_reg(sep, HW_HOST_ICR_REG_ADDR, 0xFFFFFFFF);
4165
4166         /* Set the IMR register - open only GPR 2 */
4167         sep_write_reg(sep, HW_HOST_IMR_REG_ADDR, (~(0x1 << 13)));
4168
4169         /* Read send/receive counters from SEP */
4170         sep->reply_ct = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR2_REG_ADDR);
4171         sep->reply_ct &= 0x3FFFFFFF;
4172         sep->send_ct = sep->reply_ct;
4173
4174         /* Get the interrupt line */
4175         error = request_irq(pdev->irq, sep_inthandler, IRQF_SHARED,
4176                 "sep_driver", sep);
4177
4178         if (error)
4179                 goto end_function_deallocate_sep_shared_area;
4180
4181         /* The new chip requires a shared area reconfigure */
4182         error = sep_reconfig_shared_area(sep);
4183         if (error)
4184                 goto end_function_free_irq;
4185
4186         sep->in_use = 1;
4187
4188         /* Finally magic up the device nodes */
4189         /* Register driver with the fs */
4190         error = sep_register_driver_with_fs(sep);
4191
4192         if (error) {
4193                 dev_err(&sep->pdev->dev, "error registering dev file\n");
4194                 goto end_function_free_irq;
4195         }
4196
4197         sep->in_use = 0; /* through touching the device */
4198 #ifdef SEP_ENABLE_RUNTIME_PM
4199         pm_runtime_put_noidle(&sep->pdev->dev);
4200         pm_runtime_allow(&sep->pdev->dev);
4201         pm_runtime_set_autosuspend_delay(&sep->pdev->dev,
4202                 SUSPEND_DELAY);
4203         pm_runtime_use_autosuspend(&sep->pdev->dev);
4204         pm_runtime_mark_last_busy(&sep->pdev->dev);
4205         sep->power_save_setup = 1;
4206 #endif
4207         /* register kernel crypto driver */
4208 #if defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE)
4209         error = sep_crypto_setup();
4210         if (error) {
4211                 dev_err(&sep->pdev->dev, "crypto setup failed\n");
4212                 goto end_function_free_irq;
4213         }
4214 #endif
4215         goto end_function;
4216
4217 end_function_free_irq:
4218         free_irq(pdev->irq, sep);
4219
4220 end_function_deallocate_sep_shared_area:
4221         /* De-allocate shared area */
4222         sep_unmap_and_free_shared_area(sep);
4223
4224 end_function_error:
4225         iounmap(sep->reg_addr);
4226
4227 end_function_free_sep_dev:
4228         pci_dev_put(sep_dev->pdev);
4229         kfree(sep_dev);
4230         sep_dev = NULL;
4231
4232 end_function_disable_device:
4233         pci_disable_device(pdev);
4234
4235 end_function:
4236         return error;
4237 }
4238
4239 /**
4240  * sep_remove - handles removing device from pci subsystem
4241  * @pdev:       pointer to pci device
4242  *
4243  * This function will handle removing our sep device from pci subsystem on exit
4244  * or unloading this module. It should free up all used resources, and unmap if
4245  * any memory regions mapped.
4246  */
4247 static void sep_remove(struct pci_dev *pdev)
4248 {
4249         struct sep_device *sep = sep_dev;
4250
4251         /* Unregister from fs */
4252         misc_deregister(&sep->miscdev_sep);
4253
4254         /* Unregister from kernel crypto */
4255 #if defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE)
4256         sep_crypto_takedown();
4257 #endif
4258         /* Free the irq */
4259         free_irq(sep->pdev->irq, sep);
4260
4261         /* Free the shared area  */
4262         sep_unmap_and_free_shared_area(sep_dev);
4263         iounmap(sep_dev->reg_addr);
4264
4265 #ifdef SEP_ENABLE_RUNTIME_PM
4266         if (sep->in_use) {
4267                 sep->in_use = 0;
4268                 pm_runtime_forbid(&sep->pdev->dev);
4269                 pm_runtime_get_noresume(&sep->pdev->dev);
4270         }
4271 #endif
4272         pci_dev_put(sep_dev->pdev);
4273         kfree(sep_dev);
4274         sep_dev = NULL;
4275 }
4276
4277 /* Initialize struct pci_device_id for our driver */
4278 static const struct pci_device_id sep_pci_id_tbl[] = {
4279         {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0826)},
4280         {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x08e9)},
4281         {0}
4282 };
4283
4284 /* Export our pci_device_id structure to user space */
4285 MODULE_DEVICE_TABLE(pci, sep_pci_id_tbl);
4286
4287 #ifdef SEP_ENABLE_RUNTIME_PM
4288
4289 /**
4290  * sep_pm_resume - rsume routine while waking up from S3 state
4291  * @dev:        pointer to sep device
4292  *
4293  * This function is to be used to wake up sep driver while system awakes from S3
4294  * state i.e. suspend to ram. The RAM in intact.
4295  * Notes - revisit with more understanding of pm, ICR/IMR & counters.
4296  */
4297 static int sep_pci_resume(struct device *dev)
4298 {
4299         struct sep_device *sep = sep_dev;
4300
4301         dev_dbg(&sep->pdev->dev, "pci resume called\n");
4302
4303         if (sep->power_state == SEP_DRIVER_POWERON)
4304                 return 0;
4305
4306         /* Clear ICR register */
4307         sep_write_reg(sep, HW_HOST_ICR_REG_ADDR, 0xFFFFFFFF);
4308
4309         /* Set the IMR register - open only GPR 2 */
4310         sep_write_reg(sep, HW_HOST_IMR_REG_ADDR, (~(0x1 << 13)));
4311
4312         /* Read send/receive counters from SEP */
4313         sep->reply_ct = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR2_REG_ADDR);
4314         sep->reply_ct &= 0x3FFFFFFF;
4315         sep->send_ct = sep->reply_ct;
4316
4317         sep->power_state = SEP_DRIVER_POWERON;
4318
4319         return 0;
4320 }
4321
4322 /**
4323  * sep_pm_suspend - suspend routine while going to S3 state
4324  * @dev:        pointer to sep device
4325  *
4326  * This function is to be used to suspend sep driver while system goes to S3
4327  * state i.e. suspend to ram. The RAM in intact and ON during this suspend.
4328  * Notes - revisit with more understanding of pm, ICR/IMR
4329  */
4330 static int sep_pci_suspend(struct device *dev)
4331 {
4332         struct sep_device *sep = sep_dev;
4333
4334         dev_dbg(&sep->pdev->dev, "pci suspend called\n");
4335         if (sep->in_use == 1)
4336                 return -EAGAIN;
4337
4338         sep->power_state = SEP_DRIVER_POWEROFF;
4339
4340         /* Clear ICR register */
4341         sep_write_reg(sep, HW_HOST_ICR_REG_ADDR, 0xFFFFFFFF);
4342
4343         /* Set the IMR to block all */
4344         sep_write_reg(sep, HW_HOST_IMR_REG_ADDR, 0xFFFFFFFF);
4345
4346         return 0;
4347 }
4348
4349 /**
4350  * sep_pm_runtime_resume - runtime resume routine
4351  * @dev:        pointer to sep device
4352  *
4353  * Notes - revisit with more understanding of pm, ICR/IMR & counters
4354  */
4355 static int sep_pm_runtime_resume(struct device *dev)
4356 {
4357
4358         u32 retval2;
4359         u32 delay_count;
4360         struct sep_device *sep = sep_dev;
4361
4362         dev_dbg(&sep->pdev->dev, "pm runtime resume called\n");
4363
4364         /**
4365          * Wait until the SCU boot is ready
4366          * This is done by iterating SCU_DELAY_ITERATION (10
4367          * microseconds each) up to SCU_DELAY_MAX (50) times.
4368          * This bit can be set in a random time that is less
4369          * than 500 microseconds after each power resume
4370          */
4371         retval2 = 0;
4372         delay_count = 0;
4373         while ((!retval2) && (delay_count < SCU_DELAY_MAX)) {
4374                 retval2 = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR3_REG_ADDR);
4375                 retval2 &= 0x00000008;
4376                 if (!retval2) {
4377                         udelay(SCU_DELAY_ITERATION);
4378                         delay_count += 1;
4379                 }
4380         }
4381
4382         if (!retval2) {
4383                 dev_warn(&sep->pdev->dev, "scu boot bit not set at resume\n");
4384                 return -EINVAL;
4385         }
4386
4387         /* Clear ICR register */
4388         sep_write_reg(sep, HW_HOST_ICR_REG_ADDR, 0xFFFFFFFF);
4389
4390         /* Set the IMR register - open only GPR 2 */
4391         sep_write_reg(sep, HW_HOST_IMR_REG_ADDR, (~(0x1 << 13)));
4392
4393         /* Read send/receive counters from SEP */
4394         sep->reply_ct = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR2_REG_ADDR);
4395         sep->reply_ct &= 0x3FFFFFFF;
4396         sep->send_ct = sep->reply_ct;
4397
4398         return 0;
4399 }
4400
4401 /**
4402  * sep_pm_runtime_suspend - runtime suspend routine
4403  * @dev:        pointer to sep device
4404  *
4405  * Notes - revisit with more understanding of pm
4406  */
4407 static int sep_pm_runtime_suspend(struct device *dev)
4408 {
4409         struct sep_device *sep = sep_dev;
4410
4411         dev_dbg(&sep->pdev->dev, "pm runtime suspend called\n");
4412
4413         /* Clear ICR register */
4414         sep_write_reg(sep, HW_HOST_ICR_REG_ADDR, 0xFFFFFFFF);
4415         return 0;
4416 }
4417
4418 /**
4419  * sep_pm - power management for sep driver
4420  * @sep_pm_runtime_resume:      resume- no communication with cpu & main memory
4421  * @sep_pm_runtime_suspend:     suspend- no communication with cpu & main memory
4422  * @sep_pci_suspend:            suspend - main memory is still ON
4423  * @sep_pci_resume:             resume - main memory is still ON
4424  */
4425 static const struct dev_pm_ops sep_pm = {
4426         .runtime_resume = sep_pm_runtime_resume,
4427         .runtime_suspend = sep_pm_runtime_suspend,
4428         .resume = sep_pci_resume,
4429         .suspend = sep_pci_suspend,
4430 };
4431 #endif /* SEP_ENABLE_RUNTIME_PM */
4432
4433 /**
4434  * sep_pci_driver - registers this device with pci subsystem
4435  * @name:       name identifier for this driver
4436  * @sep_pci_id_tbl:     pointer to struct pci_device_id table
4437  * @sep_probe:  pointer to probe function in PCI driver
4438  * @sep_remove: pointer to remove function in PCI driver
4439  */
4440 static struct pci_driver sep_pci_driver = {
4441 #ifdef SEP_ENABLE_RUNTIME_PM
4442         .driver = {
4443                 .pm = &sep_pm,
4444         },
4445 #endif
4446         .name = "sep_sec_driver",
4447         .id_table = sep_pci_id_tbl,
4448         .probe = sep_probe,
4449         .remove = sep_remove
4450 };
4451
4452 module_pci_driver(sep_pci_driver);
4453 MODULE_LICENSE("GPL");