zram: promote zram from staging
[firefly-linux-kernel-4.4.55.git] / drivers / staging / sep / sep_main.c
1 /*
2  *
3  *  sep_main.c - Security Processor Driver main group of functions
4  *
5  *  Copyright(c) 2009-2011 Intel Corporation. All rights reserved.
6  *  Contributions(c) 2009-2011 Discretix. All rights reserved.
7  *
8  *  This program is free software; you can redistribute it and/or modify it
9  *  under the terms of the GNU General Public License as published by the Free
10  *  Software Foundation; version 2 of the License.
11  *
12  *  This program is distributed in the hope that it will be useful, but WITHOUT
13  *  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14  *  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
15  *  more details.
16  *
17  *  You should have received a copy of the GNU General Public License along with
18  *  this program; if not, write to the Free Software Foundation, Inc., 59
19  *  Temple Place - Suite 330, Boston, MA  02111-1307, USA.
20  *
21  *  CONTACTS:
22  *
23  *  Mark Allyn          mark.a.allyn@intel.com
24  *  Jayant Mangalampalli jayant.mangalampalli@intel.com
25  *
26  *  CHANGES:
27  *
28  *  2009.06.26  Initial publish
29  *  2010.09.14  Upgrade to Medfield
30  *  2011.01.21  Move to sep_main.c to allow for sep_crypto.c
31  *  2011.02.22  Enable kernel crypto operation
32  *
33  *  Please note that this driver is based on information in the Discretix
34  *  CryptoCell 5.2 Driver Implementation Guide; the Discretix CryptoCell 5.2
35  *  Integration Intel Medfield appendix; the Discretix CryptoCell 5.2
36  *  Linux Driver Integration Guide; and the Discretix CryptoCell 5.2 System
37  *  Overview and Integration Guide.
38  */
39 /* #define DEBUG */
40 /* #define SEP_PERF_DEBUG */
41
42 #include <linux/kernel.h>
43 #include <linux/module.h>
44 #include <linux/miscdevice.h>
45 #include <linux/fs.h>
46 #include <linux/cdev.h>
47 #include <linux/kdev_t.h>
48 #include <linux/mutex.h>
49 #include <linux/sched.h>
50 #include <linux/mm.h>
51 #include <linux/poll.h>
52 #include <linux/wait.h>
53 #include <linux/pci.h>
54 #include <linux/pm_runtime.h>
55 #include <linux/slab.h>
56 #include <linux/ioctl.h>
57 #include <asm/current.h>
58 #include <linux/ioport.h>
59 #include <linux/io.h>
60 #include <linux/interrupt.h>
61 #include <linux/pagemap.h>
62 #include <asm/cacheflush.h>
63 #include <linux/delay.h>
64 #include <linux/jiffies.h>
65 #include <linux/async.h>
66 #include <linux/crypto.h>
67 #include <crypto/internal/hash.h>
68 #include <crypto/scatterwalk.h>
69 #include <crypto/sha.h>
70 #include <crypto/md5.h>
71 #include <crypto/aes.h>
72 #include <crypto/des.h>
73 #include <crypto/hash.h>
74
75 #include "sep_driver_hw_defs.h"
76 #include "sep_driver_config.h"
77 #include "sep_driver_api.h"
78 #include "sep_dev.h"
79 #include "sep_crypto.h"
80
81 #define CREATE_TRACE_POINTS
82 #include "sep_trace_events.h"
83
84 /*
85  * Let's not spend cycles iterating over message
86  * area contents if debugging not enabled
87  */
88 #ifdef DEBUG
89 #define sep_dump_message(sep)   _sep_dump_message(sep)
90 #else
91 #define sep_dump_message(sep)
92 #endif
93
94 /**
95  * Currently, there is only one SEP device per platform;
96  * In event platforms in the future have more than one SEP
97  * device, this will be a linked list
98  */
99
100 struct sep_device *sep_dev;
101
102 /**
103  * sep_queue_status_remove - Removes transaction from status queue
104  * @sep: SEP device
105  * @sep_queue_info: pointer to status queue
106  *
107  * This function will remove information about transaction from the queue.
108  */
109 void sep_queue_status_remove(struct sep_device *sep,
110                                       struct sep_queue_info **queue_elem)
111 {
112         unsigned long lck_flags;
113
114         dev_dbg(&sep->pdev->dev, "[PID%d] sep_queue_status_remove\n",
115                 current->pid);
116
117         if (!queue_elem || !(*queue_elem)) {
118                 dev_dbg(&sep->pdev->dev, "PID%d %s null\n",
119                                         current->pid, __func__);
120                 return;
121         }
122
123         spin_lock_irqsave(&sep->sep_queue_lock, lck_flags);
124         list_del(&(*queue_elem)->list);
125         sep->sep_queue_num--;
126         spin_unlock_irqrestore(&sep->sep_queue_lock, lck_flags);
127
128         kfree(*queue_elem);
129         *queue_elem = NULL;
130
131         dev_dbg(&sep->pdev->dev, "[PID%d] sep_queue_status_remove return\n",
132                 current->pid);
133         return;
134 }
135
136 /**
137  * sep_queue_status_add - Adds transaction to status queue
138  * @sep: SEP device
139  * @opcode: transaction opcode
140  * @size: input data size
141  * @pid: pid of current process
142  * @name: current process name
143  * @name_len: length of name (current process)
144  *
145  * This function adds information about about transaction started to the status
146  * queue.
147  */
148 struct sep_queue_info *sep_queue_status_add(
149                                                 struct sep_device *sep,
150                                                 u32 opcode,
151                                                 u32 size,
152                                                 u32 pid,
153                                                 u8 *name, size_t name_len)
154 {
155         unsigned long lck_flags;
156         struct sep_queue_info *my_elem = NULL;
157
158         my_elem = kzalloc(sizeof(struct sep_queue_info), GFP_KERNEL);
159
160         if (!my_elem)
161                 return NULL;
162
163         dev_dbg(&sep->pdev->dev, "[PID%d] kzalloc ok\n", current->pid);
164
165         my_elem->data.opcode = opcode;
166         my_elem->data.size = size;
167         my_elem->data.pid = pid;
168
169         if (name_len > TASK_COMM_LEN)
170                 name_len = TASK_COMM_LEN;
171
172         memcpy(&my_elem->data.name, name, name_len);
173
174         spin_lock_irqsave(&sep->sep_queue_lock, lck_flags);
175
176         list_add_tail(&my_elem->list, &sep->sep_queue_status);
177         sep->sep_queue_num++;
178
179         spin_unlock_irqrestore(&sep->sep_queue_lock, lck_flags);
180
181         return my_elem;
182 }
183
184 /**
185  *      sep_allocate_dmatables_region - Allocates buf for the MLLI/DMA tables
186  *      @sep: SEP device
187  *      @dmatables_region: Destination pointer for the buffer
188  *      @dma_ctx: DMA context for the transaction
189  *      @table_count: Number of MLLI/DMA tables to create
190  *      The buffer created will not work as-is for DMA operations,
191  *      it needs to be copied over to the appropriate place in the
192  *      shared area.
193  */
194 static int sep_allocate_dmatables_region(struct sep_device *sep,
195                                          void **dmatables_region,
196                                          struct sep_dma_context *dma_ctx,
197                                          const u32 table_count)
198 {
199         const size_t new_len =
200                 SYNCHRONIC_DMA_TABLES_AREA_SIZE_BYTES - 1;
201
202         void *tmp_region = NULL;
203
204         dev_dbg(&sep->pdev->dev, "[PID%d] dma_ctx = 0x%p\n",
205                                 current->pid, dma_ctx);
206         dev_dbg(&sep->pdev->dev, "[PID%d] dmatables_region = 0x%p\n",
207                                 current->pid, dmatables_region);
208
209         if (!dma_ctx || !dmatables_region) {
210                 dev_warn(&sep->pdev->dev,
211                         "[PID%d] dma context/region uninitialized\n",
212                         current->pid);
213                 return -EINVAL;
214         }
215
216         dev_dbg(&sep->pdev->dev, "[PID%d] newlen = 0x%08zX\n",
217                                 current->pid, new_len);
218         dev_dbg(&sep->pdev->dev, "[PID%d] oldlen = 0x%08X\n", current->pid,
219                                 dma_ctx->dmatables_len);
220         tmp_region = kzalloc(new_len + dma_ctx->dmatables_len, GFP_KERNEL);
221         if (!tmp_region)
222                 return -ENOMEM;
223
224         /* Were there any previous tables that need to be preserved ? */
225         if (*dmatables_region) {
226                 memcpy(tmp_region, *dmatables_region, dma_ctx->dmatables_len);
227                 kfree(*dmatables_region);
228                 *dmatables_region = NULL;
229         }
230
231         *dmatables_region = tmp_region;
232
233         dma_ctx->dmatables_len += new_len;
234
235         return 0;
236 }
237
238 /**
239  *      sep_wait_transaction - Used for synchronizing transactions
240  *      @sep: SEP device
241  */
242 int sep_wait_transaction(struct sep_device *sep)
243 {
244         int error = 0;
245         DEFINE_WAIT(wait);
246
247         if (0 == test_and_set_bit(SEP_TRANSACTION_STARTED_LOCK_BIT,
248                                 &sep->in_use_flags)) {
249                 dev_dbg(&sep->pdev->dev,
250                         "[PID%d] no transactions, returning\n",
251                                 current->pid);
252                 goto end_function_setpid;
253         }
254
255         /*
256          * Looping needed even for exclusive waitq entries
257          * due to process wakeup latencies, previous process
258          * might have already created another transaction.
259          */
260         for (;;) {
261                 /*
262                  * Exclusive waitq entry, so that only one process is
263                  * woken up from the queue at a time.
264                  */
265                 prepare_to_wait_exclusive(&sep->event_transactions,
266                                           &wait,
267                                           TASK_INTERRUPTIBLE);
268                 if (0 == test_and_set_bit(SEP_TRANSACTION_STARTED_LOCK_BIT,
269                                           &sep->in_use_flags)) {
270                         dev_dbg(&sep->pdev->dev,
271                                 "[PID%d] no transactions, breaking\n",
272                                         current->pid);
273                         break;
274                 }
275                 dev_dbg(&sep->pdev->dev,
276                         "[PID%d] transactions ongoing, sleeping\n",
277                                 current->pid);
278                 schedule();
279                 dev_dbg(&sep->pdev->dev, "[PID%d] woken up\n", current->pid);
280
281                 if (signal_pending(current)) {
282                         dev_dbg(&sep->pdev->dev, "[PID%d] received signal\n",
283                                                         current->pid);
284                         error = -EINTR;
285                         goto end_function;
286                 }
287         }
288 end_function_setpid:
289         /*
290          * The pid_doing_transaction indicates that this process
291          * now owns the facilities to perform a transaction with
292          * the SEP. While this process is performing a transaction,
293          * no other process who has the SEP device open can perform
294          * any transactions. This method allows more than one process
295          * to have the device open at any given time, which provides
296          * finer granularity for device utilization by multiple
297          * processes.
298          */
299         /* Only one process is able to progress here at a time */
300         sep->pid_doing_transaction = current->pid;
301
302 end_function:
303         finish_wait(&sep->event_transactions, &wait);
304
305         return error;
306 }
307
308 /**
309  * sep_check_transaction_owner - Checks if current process owns transaction
310  * @sep: SEP device
311  */
312 static inline int sep_check_transaction_owner(struct sep_device *sep)
313 {
314         dev_dbg(&sep->pdev->dev, "[PID%d] transaction pid = %d\n",
315                 current->pid,
316                 sep->pid_doing_transaction);
317
318         if ((sep->pid_doing_transaction == 0) ||
319                 (current->pid != sep->pid_doing_transaction)) {
320                 return -EACCES;
321         }
322
323         /* We own the transaction */
324         return 0;
325 }
326
327 #ifdef DEBUG
328
329 /**
330  * sep_dump_message - dump the message that is pending
331  * @sep: SEP device
332  * This will only print dump if DEBUG is set; it does
333  * follow kernel debug print enabling
334  */
335 static void _sep_dump_message(struct sep_device *sep)
336 {
337         int count;
338
339         u32 *p = sep->shared_addr;
340
341         for (count = 0; count < 10 * 4; count += 4)
342                 dev_dbg(&sep->pdev->dev,
343                         "[PID%d] Word %d of the message is %x\n",
344                                 current->pid, count/4, *p++);
345 }
346
347 #endif
348
349 /**
350  * sep_map_and_alloc_shared_area -allocate shared block
351  * @sep: security processor
352  * @size: size of shared area
353  */
354 static int sep_map_and_alloc_shared_area(struct sep_device *sep)
355 {
356         sep->shared_addr = dma_alloc_coherent(&sep->pdev->dev,
357                 sep->shared_size,
358                 &sep->shared_bus, GFP_KERNEL);
359
360         if (!sep->shared_addr) {
361                 dev_dbg(&sep->pdev->dev,
362                         "[PID%d] shared memory dma_alloc_coherent failed\n",
363                                 current->pid);
364                 return -ENOMEM;
365         }
366         dev_dbg(&sep->pdev->dev,
367                 "[PID%d] shared_addr %zx bytes @%p (bus %llx)\n",
368                                 current->pid,
369                                 sep->shared_size, sep->shared_addr,
370                                 (unsigned long long)sep->shared_bus);
371         return 0;
372 }
373
374 /**
375  * sep_unmap_and_free_shared_area - free shared block
376  * @sep: security processor
377  */
378 static void sep_unmap_and_free_shared_area(struct sep_device *sep)
379 {
380         dma_free_coherent(&sep->pdev->dev, sep->shared_size,
381                                 sep->shared_addr, sep->shared_bus);
382 }
383
384 #ifdef DEBUG
385
386 /**
387  * sep_shared_bus_to_virt - convert bus/virt addresses
388  * @sep: pointer to struct sep_device
389  * @bus_address: address to convert
390  *
391  * Returns virtual address inside the shared area according
392  * to the bus address.
393  */
394 static void *sep_shared_bus_to_virt(struct sep_device *sep,
395                                                 dma_addr_t bus_address)
396 {
397         return sep->shared_addr + (bus_address - sep->shared_bus);
398 }
399
400 #endif
401
402 /**
403  * sep_open - device open method
404  * @inode: inode of SEP device
405  * @filp: file handle to SEP device
406  *
407  * Open method for the SEP device. Called when userspace opens
408  * the SEP device node.
409  *
410  * Returns zero on success otherwise an error code.
411  */
412 static int sep_open(struct inode *inode, struct file *filp)
413 {
414         struct sep_device *sep;
415         struct sep_private_data *priv;
416
417         dev_dbg(&sep_dev->pdev->dev, "[PID%d] open\n", current->pid);
418
419         if (filp->f_flags & O_NONBLOCK)
420                 return -ENOTSUPP;
421
422         /*
423          * Get the SEP device structure and use it for the
424          * private_data field in filp for other methods
425          */
426
427         priv = kzalloc(sizeof(*priv), GFP_KERNEL);
428         if (!priv)
429                 return -ENOMEM;
430
431         sep = sep_dev;
432         priv->device = sep;
433         filp->private_data = priv;
434
435         dev_dbg(&sep_dev->pdev->dev, "[PID%d] priv is 0x%p\n",
436                                         current->pid, priv);
437
438         /* Anyone can open; locking takes place at transaction level */
439         return 0;
440 }
441
442 /**
443  * sep_free_dma_table_data_handler - free DMA table
444  * @sep: pointer to struct sep_device
445  * @dma_ctx: dma context
446  *
447  * Handles the request to free DMA table for synchronic actions
448  */
449 int sep_free_dma_table_data_handler(struct sep_device *sep,
450                                            struct sep_dma_context **dma_ctx)
451 {
452         int count;
453         int dcb_counter;
454         /* Pointer to the current dma_resource struct */
455         struct sep_dma_resource *dma;
456
457         dev_dbg(&sep->pdev->dev,
458                 "[PID%d] sep_free_dma_table_data_handler\n",
459                         current->pid);
460
461         if (!dma_ctx || !(*dma_ctx)) {
462                 /* No context or context already freed */
463                 dev_dbg(&sep->pdev->dev,
464                         "[PID%d] no DMA context or context already freed\n",
465                                 current->pid);
466
467                 return 0;
468         }
469
470         dev_dbg(&sep->pdev->dev, "[PID%d] (*dma_ctx)->nr_dcb_creat 0x%x\n",
471                                         current->pid,
472                                         (*dma_ctx)->nr_dcb_creat);
473
474         for (dcb_counter = 0;
475              dcb_counter < (*dma_ctx)->nr_dcb_creat; dcb_counter++) {
476                 dma = &(*dma_ctx)->dma_res_arr[dcb_counter];
477
478                 /* Unmap and free input map array */
479                 if (dma->in_map_array) {
480                         for (count = 0; count < dma->in_num_pages; count++) {
481                                 dma_unmap_page(&sep->pdev->dev,
482                                         dma->in_map_array[count].dma_addr,
483                                         dma->in_map_array[count].size,
484                                         DMA_TO_DEVICE);
485                         }
486                         kfree(dma->in_map_array);
487                 }
488
489                 /**
490                  * Output is handled different. If
491                  * this was a secure dma into restricted memory,
492                  * then we skip this step altogether as restricted
493                  * memory is not available to the o/s at all.
494                  */
495                 if (!(*dma_ctx)->secure_dma && dma->out_map_array) {
496
497                         for (count = 0; count < dma->out_num_pages; count++) {
498                                 dma_unmap_page(&sep->pdev->dev,
499                                         dma->out_map_array[count].dma_addr,
500                                         dma->out_map_array[count].size,
501                                         DMA_FROM_DEVICE);
502                         }
503                         kfree(dma->out_map_array);
504                 }
505
506                 /* Free page cache for output */
507                 if (dma->in_page_array) {
508                         for (count = 0; count < dma->in_num_pages; count++) {
509                                 flush_dcache_page(dma->in_page_array[count]);
510                                 page_cache_release(dma->in_page_array[count]);
511                         }
512                         kfree(dma->in_page_array);
513                 }
514
515                 /* Again, we do this only for non secure dma */
516                 if (!(*dma_ctx)->secure_dma && dma->out_page_array) {
517
518                         for (count = 0; count < dma->out_num_pages; count++) {
519                                 if (!PageReserved(dma->out_page_array[count]))
520
521                                         SetPageDirty(dma->
522                                         out_page_array[count]);
523
524                                 flush_dcache_page(dma->out_page_array[count]);
525                                 page_cache_release(dma->out_page_array[count]);
526                         }
527                         kfree(dma->out_page_array);
528                 }
529
530                 /**
531                  * Note that here we use in_map_num_entries because we
532                  * don't have a page array; the page array is generated
533                  * only in the lock_user_pages, which is not called
534                  * for kernel crypto, which is what the sg (scatter gather
535                  * is used for exclusively)
536                  */
537                 if (dma->src_sg) {
538                         dma_unmap_sg(&sep->pdev->dev, dma->src_sg,
539                                 dma->in_map_num_entries, DMA_TO_DEVICE);
540                         dma->src_sg = NULL;
541                 }
542
543                 if (dma->dst_sg) {
544                         dma_unmap_sg(&sep->pdev->dev, dma->dst_sg,
545                                 dma->in_map_num_entries, DMA_FROM_DEVICE);
546                         dma->dst_sg = NULL;
547                 }
548
549                 /* Reset all the values */
550                 dma->in_page_array = NULL;
551                 dma->out_page_array = NULL;
552                 dma->in_num_pages = 0;
553                 dma->out_num_pages = 0;
554                 dma->in_map_array = NULL;
555                 dma->out_map_array = NULL;
556                 dma->in_map_num_entries = 0;
557                 dma->out_map_num_entries = 0;
558         }
559
560         (*dma_ctx)->nr_dcb_creat = 0;
561         (*dma_ctx)->num_lli_tables_created = 0;
562
563         kfree(*dma_ctx);
564         *dma_ctx = NULL;
565
566         dev_dbg(&sep->pdev->dev,
567                 "[PID%d] sep_free_dma_table_data_handler end\n",
568                         current->pid);
569
570         return 0;
571 }
572
573 /**
574  * sep_end_transaction_handler - end transaction
575  * @sep: pointer to struct sep_device
576  * @dma_ctx: DMA context
577  * @call_status: Call status
578  *
579  * This API handles the end transaction request.
580  */
581 static int sep_end_transaction_handler(struct sep_device *sep,
582                                        struct sep_dma_context **dma_ctx,
583                                        struct sep_call_status *call_status,
584                                        struct sep_queue_info **my_queue_elem)
585 {
586         dev_dbg(&sep->pdev->dev, "[PID%d] ending transaction\n", current->pid);
587
588         /*
589          * Extraneous transaction clearing would mess up PM
590          * device usage counters and SEP would get suspended
591          * just before we send a command to SEP in the next
592          * transaction
593          * */
594         if (sep_check_transaction_owner(sep)) {
595                 dev_dbg(&sep->pdev->dev, "[PID%d] not transaction owner\n",
596                                                 current->pid);
597                 return 0;
598         }
599
600         /* Update queue status */
601         sep_queue_status_remove(sep, my_queue_elem);
602
603         /* Check that all the DMA resources were freed */
604         if (dma_ctx)
605                 sep_free_dma_table_data_handler(sep, dma_ctx);
606
607         /* Reset call status for next transaction */
608         if (call_status)
609                 call_status->status = 0;
610
611         /* Clear the message area to avoid next transaction reading
612          * sensitive results from previous transaction */
613         memset(sep->shared_addr, 0,
614                SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES);
615
616         /* start suspend delay */
617 #ifdef SEP_ENABLE_RUNTIME_PM
618         if (sep->in_use) {
619                 sep->in_use = 0;
620                 pm_runtime_mark_last_busy(&sep->pdev->dev);
621                 pm_runtime_put_autosuspend(&sep->pdev->dev);
622         }
623 #endif
624
625         clear_bit(SEP_WORKING_LOCK_BIT, &sep->in_use_flags);
626         sep->pid_doing_transaction = 0;
627
628         /* Now it's safe for next process to proceed */
629         dev_dbg(&sep->pdev->dev, "[PID%d] waking up next transaction\n",
630                                         current->pid);
631         clear_bit(SEP_TRANSACTION_STARTED_LOCK_BIT, &sep->in_use_flags);
632         wake_up(&sep->event_transactions);
633
634         return 0;
635 }
636
637
638 /**
639  * sep_release - close a SEP device
640  * @inode: inode of SEP device
641  * @filp: file handle being closed
642  *
643  * Called on the final close of a SEP device.
644  */
645 static int sep_release(struct inode *inode, struct file *filp)
646 {
647         struct sep_private_data * const private_data = filp->private_data;
648         struct sep_call_status *call_status = &private_data->call_status;
649         struct sep_device *sep = private_data->device;
650         struct sep_dma_context **dma_ctx = &private_data->dma_ctx;
651         struct sep_queue_info **my_queue_elem = &private_data->my_queue_elem;
652
653         dev_dbg(&sep->pdev->dev, "[PID%d] release\n", current->pid);
654
655         sep_end_transaction_handler(sep, dma_ctx, call_status,
656                 my_queue_elem);
657
658         kfree(filp->private_data);
659
660         return 0;
661 }
662
663 /**
664  * sep_mmap -  maps the shared area to user space
665  * @filp: pointer to struct file
666  * @vma: pointer to vm_area_struct
667  *
668  * Called on an mmap of our space via the normal SEP device
669  */
670 static int sep_mmap(struct file *filp, struct vm_area_struct *vma)
671 {
672         struct sep_private_data * const private_data = filp->private_data;
673         struct sep_call_status *call_status = &private_data->call_status;
674         struct sep_device *sep = private_data->device;
675         struct sep_queue_info **my_queue_elem = &private_data->my_queue_elem;
676         dma_addr_t bus_addr;
677         unsigned long error = 0;
678
679         dev_dbg(&sep->pdev->dev, "[PID%d] sep_mmap\n", current->pid);
680
681         /* Set the transaction busy (own the device) */
682         /*
683          * Problem for multithreaded applications is that here we're
684          * possibly going to sleep while holding a write lock on
685          * current->mm->mmap_sem, which will cause deadlock for ongoing
686          * transaction trying to create DMA tables
687          */
688         error = sep_wait_transaction(sep);
689         if (error)
690                 /* Interrupted by signal, don't clear transaction */
691                 goto end_function;
692
693         /* Clear the message area to avoid next transaction reading
694          * sensitive results from previous transaction */
695         memset(sep->shared_addr, 0,
696                SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES);
697
698         /*
699          * Check that the size of the mapped range is as the size of the message
700          * shared area
701          */
702         if ((vma->vm_end - vma->vm_start) > SEP_DRIVER_MMMAP_AREA_SIZE) {
703                 error = -EINVAL;
704                 goto end_function_with_error;
705         }
706
707         dev_dbg(&sep->pdev->dev, "[PID%d] shared_addr is %p\n",
708                                         current->pid, sep->shared_addr);
709
710         /* Get bus address */
711         bus_addr = sep->shared_bus;
712
713         if (remap_pfn_range(vma, vma->vm_start, bus_addr >> PAGE_SHIFT,
714                 vma->vm_end - vma->vm_start, vma->vm_page_prot)) {
715                 dev_dbg(&sep->pdev->dev, "[PID%d] remap_pfn_range failed\n",
716                                                 current->pid);
717                 error = -EAGAIN;
718                 goto end_function_with_error;
719         }
720
721         /* Update call status */
722         set_bit(SEP_LEGACY_MMAP_DONE_OFFSET, &call_status->status);
723
724         goto end_function;
725
726 end_function_with_error:
727         /* Clear our transaction */
728         sep_end_transaction_handler(sep, NULL, call_status,
729                 my_queue_elem);
730
731 end_function:
732         return error;
733 }
734
735 /**
736  * sep_poll - poll handler
737  * @filp:       pointer to struct file
738  * @wait:       pointer to poll_table
739  *
740  * Called by the OS when the kernel is asked to do a poll on
741  * a SEP file handle.
742  */
743 static unsigned int sep_poll(struct file *filp, poll_table *wait)
744 {
745         struct sep_private_data * const private_data = filp->private_data;
746         struct sep_call_status *call_status = &private_data->call_status;
747         struct sep_device *sep = private_data->device;
748         u32 mask = 0;
749         u32 retval = 0;
750         u32 retval2 = 0;
751         unsigned long lock_irq_flag;
752
753         /* Am I the process that owns the transaction? */
754         if (sep_check_transaction_owner(sep)) {
755                 dev_dbg(&sep->pdev->dev, "[PID%d] poll pid not owner\n",
756                                                 current->pid);
757                 mask = POLLERR;
758                 goto end_function;
759         }
760
761         /* Check if send command or send_reply were activated previously */
762         if (0 == test_bit(SEP_LEGACY_SENDMSG_DONE_OFFSET,
763                           &call_status->status)) {
764                 dev_warn(&sep->pdev->dev, "[PID%d] sendmsg not called\n",
765                                                 current->pid);
766                 mask = POLLERR;
767                 goto end_function;
768         }
769
770
771         /* Add the event to the polling wait table */
772         dev_dbg(&sep->pdev->dev, "[PID%d] poll: calling wait sep_event\n",
773                                         current->pid);
774
775         poll_wait(filp, &sep->event_interrupt, wait);
776
777         dev_dbg(&sep->pdev->dev,
778                 "[PID%d] poll: send_ct is %lx reply ct is %lx\n",
779                         current->pid, sep->send_ct, sep->reply_ct);
780
781         /* Check if error occurred during poll */
782         retval2 = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR3_REG_ADDR);
783         if ((retval2 != 0x0) && (retval2 != 0x8)) {
784                 dev_dbg(&sep->pdev->dev, "[PID%d] poll; poll error %x\n",
785                                                 current->pid, retval2);
786                 mask |= POLLERR;
787                 goto end_function;
788         }
789
790         spin_lock_irqsave(&sep->snd_rply_lck, lock_irq_flag);
791
792         if (sep->send_ct == sep->reply_ct) {
793                 spin_unlock_irqrestore(&sep->snd_rply_lck, lock_irq_flag);
794                 retval = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR2_REG_ADDR);
795                 dev_dbg(&sep->pdev->dev,
796                         "[PID%d] poll: data ready check (GPR2)  %x\n",
797                                 current->pid, retval);
798
799                 /* Check if printf request  */
800                 if ((retval >> 30) & 0x1) {
801                         dev_dbg(&sep->pdev->dev,
802                                 "[PID%d] poll: SEP printf request\n",
803                                         current->pid);
804                         goto end_function;
805                 }
806
807                 /* Check if the this is SEP reply or request */
808                 if (retval >> 31) {
809                         dev_dbg(&sep->pdev->dev,
810                                 "[PID%d] poll: SEP request\n",
811                                         current->pid);
812                 } else {
813                         dev_dbg(&sep->pdev->dev,
814                                 "[PID%d] poll: normal return\n",
815                                         current->pid);
816                         sep_dump_message(sep);
817                         dev_dbg(&sep->pdev->dev,
818                                 "[PID%d] poll; SEP reply POLLIN|POLLRDNORM\n",
819                                         current->pid);
820                         mask |= POLLIN | POLLRDNORM;
821                 }
822                 set_bit(SEP_LEGACY_POLL_DONE_OFFSET, &call_status->status);
823         } else {
824                 spin_unlock_irqrestore(&sep->snd_rply_lck, lock_irq_flag);
825                 dev_dbg(&sep->pdev->dev,
826                         "[PID%d] poll; no reply; returning mask of 0\n",
827                                 current->pid);
828                 mask = 0;
829         }
830
831 end_function:
832         return mask;
833 }
834
835 /**
836  * sep_time_address - address in SEP memory of time
837  * @sep: SEP device we want the address from
838  *
839  * Return the address of the two dwords in memory used for time
840  * setting.
841  */
842 static u32 *sep_time_address(struct sep_device *sep)
843 {
844         return sep->shared_addr +
845                 SEP_DRIVER_SYSTEM_TIME_MEMORY_OFFSET_IN_BYTES;
846 }
847
848 /**
849  * sep_set_time - set the SEP time
850  * @sep: the SEP we are setting the time for
851  *
852  * Calculates time and sets it at the predefined address.
853  * Called with the SEP mutex held.
854  */
855 static unsigned long sep_set_time(struct sep_device *sep)
856 {
857         struct timeval time;
858         u32 *time_addr; /* Address of time as seen by the kernel */
859
860
861         do_gettimeofday(&time);
862
863         /* Set value in the SYSTEM MEMORY offset */
864         time_addr = sep_time_address(sep);
865
866         time_addr[0] = SEP_TIME_VAL_TOKEN;
867         time_addr[1] = time.tv_sec;
868
869         dev_dbg(&sep->pdev->dev, "[PID%d] time.tv_sec is %lu\n",
870                                         current->pid, time.tv_sec);
871         dev_dbg(&sep->pdev->dev, "[PID%d] time_addr is %p\n",
872                                         current->pid, time_addr);
873         dev_dbg(&sep->pdev->dev, "[PID%d] sep->shared_addr is %p\n",
874                                         current->pid, sep->shared_addr);
875
876         return time.tv_sec;
877 }
878
879 /**
880  * sep_send_command_handler - kick off a command
881  * @sep: SEP being signalled
882  *
883  * This function raises interrupt to SEP that signals that is has a new
884  * command from the host
885  *
886  * Note that this function does fall under the ioctl lock
887  */
888 int sep_send_command_handler(struct sep_device *sep)
889 {
890         unsigned long lock_irq_flag;
891         u32 *msg_pool;
892         int error = 0;
893
894         /* Basic sanity check; set msg pool to start of shared area */
895         msg_pool = (u32 *)sep->shared_addr;
896         msg_pool += 2;
897
898         /* Look for start msg token */
899         if (*msg_pool != SEP_START_MSG_TOKEN) {
900                 dev_warn(&sep->pdev->dev, "start message token not present\n");
901                 error = -EPROTO;
902                 goto end_function;
903         }
904
905         /* Do we have a reasonable size? */
906         msg_pool += 1;
907         if ((*msg_pool < 2) ||
908                 (*msg_pool > SEP_DRIVER_MAX_MESSAGE_SIZE_IN_BYTES)) {
909
910                 dev_warn(&sep->pdev->dev, "invalid message size\n");
911                 error = -EPROTO;
912                 goto end_function;
913         }
914
915         /* Does the command look reasonable? */
916         msg_pool += 1;
917         if (*msg_pool < 2) {
918                 dev_warn(&sep->pdev->dev, "invalid message opcode\n");
919                 error = -EPROTO;
920                 goto end_function;
921         }
922
923 #if defined(CONFIG_PM_RUNTIME) && defined(SEP_ENABLE_RUNTIME_PM)
924         dev_dbg(&sep->pdev->dev, "[PID%d] before pm sync status 0x%X\n",
925                                         current->pid,
926                                         sep->pdev->dev.power.runtime_status);
927         sep->in_use = 1; /* device is about to be used */
928         pm_runtime_get_sync(&sep->pdev->dev);
929 #endif
930
931         if (test_and_set_bit(SEP_WORKING_LOCK_BIT, &sep->in_use_flags)) {
932                 error = -EPROTO;
933                 goto end_function;
934         }
935         sep->in_use = 1; /* device is about to be used */
936         sep_set_time(sep);
937
938         sep_dump_message(sep);
939
940         /* Update counter */
941         spin_lock_irqsave(&sep->snd_rply_lck, lock_irq_flag);
942         sep->send_ct++;
943         spin_unlock_irqrestore(&sep->snd_rply_lck, lock_irq_flag);
944
945         dev_dbg(&sep->pdev->dev,
946                 "[PID%d] sep_send_command_handler send_ct %lx reply_ct %lx\n",
947                         current->pid, sep->send_ct, sep->reply_ct);
948
949         /* Send interrupt to SEP */
950         sep_write_reg(sep, HW_HOST_HOST_SEP_GPR0_REG_ADDR, 0x2);
951
952 end_function:
953         return error;
954 }
955
956 /**
957  *      sep_crypto_dma -
958  *      @sep: pointer to struct sep_device
959  *      @sg: pointer to struct scatterlist
960  *      @direction:
961  *      @dma_maps: pointer to place a pointer to array of dma maps
962  *       This is filled in; anything previous there will be lost
963  *       The structure for dma maps is sep_dma_map
964  *      @returns number of dma maps on success; negative on error
965  *
966  *      This creates the dma table from the scatterlist
967  *      It is used only for kernel crypto as it works with scatterlists
968  *      representation of data buffers
969  *
970  */
971 static int sep_crypto_dma(
972         struct sep_device *sep,
973         struct scatterlist *sg,
974         struct sep_dma_map **dma_maps,
975         enum dma_data_direction direction)
976 {
977         struct scatterlist *temp_sg;
978
979         u32 count_segment;
980         u32 count_mapped;
981         struct sep_dma_map *sep_dma;
982         int ct1;
983
984         if (sg->length == 0)
985                 return 0;
986
987         /* Count the segments */
988         temp_sg = sg;
989         count_segment = 0;
990         while (temp_sg) {
991                 count_segment += 1;
992                 temp_sg = scatterwalk_sg_next(temp_sg);
993         }
994         dev_dbg(&sep->pdev->dev,
995                 "There are (hex) %x segments in sg\n", count_segment);
996
997         /* DMA map segments */
998         count_mapped = dma_map_sg(&sep->pdev->dev, sg,
999                 count_segment, direction);
1000
1001         dev_dbg(&sep->pdev->dev,
1002                 "There are (hex) %x maps in sg\n", count_mapped);
1003
1004         if (count_mapped == 0) {
1005                 dev_dbg(&sep->pdev->dev, "Cannot dma_map_sg\n");
1006                 return -ENOMEM;
1007         }
1008
1009         sep_dma = kmalloc(sizeof(struct sep_dma_map) *
1010                 count_mapped, GFP_ATOMIC);
1011
1012         if (sep_dma == NULL) {
1013                 dev_dbg(&sep->pdev->dev, "Cannot allocate dma_maps\n");
1014                 return -ENOMEM;
1015         }
1016
1017         for_each_sg(sg, temp_sg, count_mapped, ct1) {
1018                 sep_dma[ct1].dma_addr = sg_dma_address(temp_sg);
1019                 sep_dma[ct1].size = sg_dma_len(temp_sg);
1020                 dev_dbg(&sep->pdev->dev, "(all hex) map %x dma %lx len %lx\n",
1021                         ct1, (unsigned long)sep_dma[ct1].dma_addr,
1022                         (unsigned long)sep_dma[ct1].size);
1023                 }
1024
1025         *dma_maps = sep_dma;
1026         return count_mapped;
1027
1028 }
1029
1030 /**
1031  *      sep_crypto_lli -
1032  *      @sep: pointer to struct sep_device
1033  *      @sg: pointer to struct scatterlist
1034  *      @data_size: total data size
1035  *      @direction:
1036  *      @dma_maps: pointer to place a pointer to array of dma maps
1037  *       This is filled in; anything previous there will be lost
1038  *       The structure for dma maps is sep_dma_map
1039  *      @lli_maps: pointer to place a pointer to array of lli maps
1040  *       This is filled in; anything previous there will be lost
1041  *       The structure for dma maps is sep_dma_map
1042  *      @returns number of dma maps on success; negative on error
1043  *
1044  *      This creates the LLI table from the scatterlist
1045  *      It is only used for kernel crypto as it works exclusively
1046  *      with scatterlists (struct scatterlist) representation of
1047  *      data buffers
1048  */
1049 static int sep_crypto_lli(
1050         struct sep_device *sep,
1051         struct scatterlist *sg,
1052         struct sep_dma_map **maps,
1053         struct sep_lli_entry **llis,
1054         u32 data_size,
1055         enum dma_data_direction direction)
1056 {
1057
1058         int ct1;
1059         struct sep_lli_entry *sep_lli;
1060         struct sep_dma_map *sep_map;
1061
1062         int nbr_ents;
1063
1064         nbr_ents = sep_crypto_dma(sep, sg, maps, direction);
1065         if (nbr_ents <= 0) {
1066                 dev_dbg(&sep->pdev->dev, "crypto_dma failed %x\n",
1067                         nbr_ents);
1068                 return nbr_ents;
1069         }
1070
1071         sep_map = *maps;
1072
1073         sep_lli = kmalloc(sizeof(struct sep_lli_entry) * nbr_ents, GFP_ATOMIC);
1074
1075         if (sep_lli == NULL) {
1076                 dev_dbg(&sep->pdev->dev, "Cannot allocate lli_maps\n");
1077
1078                 kfree(*maps);
1079                 *maps = NULL;
1080                 return -ENOMEM;
1081         }
1082
1083         for (ct1 = 0; ct1 < nbr_ents; ct1 += 1) {
1084                 sep_lli[ct1].bus_address = (u32)sep_map[ct1].dma_addr;
1085
1086                 /* Maximum for page is total data size */
1087                 if (sep_map[ct1].size > data_size)
1088                         sep_map[ct1].size = data_size;
1089
1090                 sep_lli[ct1].block_size = (u32)sep_map[ct1].size;
1091         }
1092
1093         *llis = sep_lli;
1094         return nbr_ents;
1095 }
1096
1097 /**
1098  *      sep_lock_kernel_pages - map kernel pages for DMA
1099  *      @sep: pointer to struct sep_device
1100  *      @kernel_virt_addr: address of data buffer in kernel
1101  *      @data_size: size of data
1102  *      @lli_array_ptr: lli array
1103  *      @in_out_flag: input into device or output from device
1104  *
1105  *      This function locks all the physical pages of the kernel virtual buffer
1106  *      and construct a basic lli  array, where each entry holds the physical
1107  *      page address and the size that application data holds in this page
1108  *      This function is used only during kernel crypto mod calls from within
1109  *      the kernel (when ioctl is not used)
1110  *
1111  *      This is used only for kernel crypto. Kernel pages
1112  *      are handled differently as they are done via
1113  *      scatter gather lists (struct scatterlist)
1114  */
1115 static int sep_lock_kernel_pages(struct sep_device *sep,
1116         unsigned long kernel_virt_addr,
1117         u32 data_size,
1118         struct sep_lli_entry **lli_array_ptr,
1119         int in_out_flag,
1120         struct sep_dma_context *dma_ctx)
1121
1122 {
1123         u32 num_pages;
1124         struct scatterlist *sg;
1125
1126         /* Array of lli */
1127         struct sep_lli_entry *lli_array;
1128         /* Map array */
1129         struct sep_dma_map *map_array;
1130
1131         enum dma_data_direction direction;
1132
1133         lli_array = NULL;
1134         map_array = NULL;
1135
1136         if (in_out_flag == SEP_DRIVER_IN_FLAG) {
1137                 direction = DMA_TO_DEVICE;
1138                 sg = dma_ctx->src_sg;
1139         } else {
1140                 direction = DMA_FROM_DEVICE;
1141                 sg = dma_ctx->dst_sg;
1142         }
1143
1144         num_pages = sep_crypto_lli(sep, sg, &map_array, &lli_array,
1145                 data_size, direction);
1146
1147         if (num_pages <= 0) {
1148                 dev_dbg(&sep->pdev->dev, "sep_crypto_lli returned error %x\n",
1149                         num_pages);
1150                 return -ENOMEM;
1151         }
1152
1153         /* Put mapped kernel sg into kernel resource array */
1154
1155         /* Set output params according to the in_out flag */
1156         if (in_out_flag == SEP_DRIVER_IN_FLAG) {
1157                 *lli_array_ptr = lli_array;
1158                 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_num_pages =
1159                                                                 num_pages;
1160                 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_page_array =
1161                                                                 NULL;
1162                 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_map_array =
1163                                                                 map_array;
1164                 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_map_num_entries =
1165                                                                 num_pages;
1166                 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].src_sg =
1167                         dma_ctx->src_sg;
1168         } else {
1169                 *lli_array_ptr = lli_array;
1170                 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_num_pages =
1171                                                                 num_pages;
1172                 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_page_array =
1173                                                                 NULL;
1174                 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_map_array =
1175                                                                 map_array;
1176                 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].
1177                                         out_map_num_entries = num_pages;
1178                 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].dst_sg =
1179                         dma_ctx->dst_sg;
1180         }
1181
1182         return 0;
1183 }
1184
1185 /**
1186  * sep_lock_user_pages - lock and map user pages for DMA
1187  * @sep: pointer to struct sep_device
1188  * @app_virt_addr: user memory data buffer
1189  * @data_size: size of data buffer
1190  * @lli_array_ptr: lli array
1191  * @in_out_flag: input or output to device
1192  *
1193  * This function locks all the physical pages of the application
1194  * virtual buffer and construct a basic lli  array, where each entry
1195  * holds the physical page address and the size that application
1196  * data holds in this physical pages
1197  */
1198 static int sep_lock_user_pages(struct sep_device *sep,
1199         u32 app_virt_addr,
1200         u32 data_size,
1201         struct sep_lli_entry **lli_array_ptr,
1202         int in_out_flag,
1203         struct sep_dma_context *dma_ctx)
1204
1205 {
1206         int error = 0;
1207         u32 count;
1208         int result;
1209         /* The the page of the end address of the user space buffer */
1210         u32 end_page;
1211         /* The page of the start address of the user space buffer */
1212         u32 start_page;
1213         /* The range in pages */
1214         u32 num_pages;
1215         /* Array of pointers to page */
1216         struct page **page_array;
1217         /* Array of lli */
1218         struct sep_lli_entry *lli_array;
1219         /* Map array */
1220         struct sep_dma_map *map_array;
1221
1222         /* Set start and end pages and num pages */
1223         end_page = (app_virt_addr + data_size - 1) >> PAGE_SHIFT;
1224         start_page = app_virt_addr >> PAGE_SHIFT;
1225         num_pages = end_page - start_page + 1;
1226
1227         dev_dbg(&sep->pdev->dev,
1228                 "[PID%d] lock user pages app_virt_addr is %x\n",
1229                         current->pid, app_virt_addr);
1230
1231         dev_dbg(&sep->pdev->dev, "[PID%d] data_size is (hex) %x\n",
1232                                         current->pid, data_size);
1233         dev_dbg(&sep->pdev->dev, "[PID%d] start_page is (hex) %x\n",
1234                                         current->pid, start_page);
1235         dev_dbg(&sep->pdev->dev, "[PID%d] end_page is (hex) %x\n",
1236                                         current->pid, end_page);
1237         dev_dbg(&sep->pdev->dev, "[PID%d] num_pages is (hex) %x\n",
1238                                         current->pid, num_pages);
1239
1240         /* Allocate array of pages structure pointers */
1241         page_array = kmalloc_array(num_pages, sizeof(struct page *),
1242                                    GFP_ATOMIC);
1243         if (!page_array) {
1244                 error = -ENOMEM;
1245                 goto end_function;
1246         }
1247
1248         map_array = kmalloc_array(num_pages, sizeof(struct sep_dma_map),
1249                                   GFP_ATOMIC);
1250         if (!map_array) {
1251                 error = -ENOMEM;
1252                 goto end_function_with_error1;
1253         }
1254
1255         lli_array = kmalloc_array(num_pages, sizeof(struct sep_lli_entry),
1256                                   GFP_ATOMIC);
1257         if (!lli_array) {
1258                 error = -ENOMEM;
1259                 goto end_function_with_error2;
1260         }
1261
1262         /* Convert the application virtual address into a set of physical */
1263         result = get_user_pages_fast(app_virt_addr, num_pages,
1264                 ((in_out_flag == SEP_DRIVER_IN_FLAG) ? 0 : 1), page_array);
1265
1266         /* Check the number of pages locked - if not all then exit with error */
1267         if (result != num_pages) {
1268                 dev_warn(&sep->pdev->dev,
1269                         "[PID%d] not all pages locked by get_user_pages, "
1270                         "result 0x%X, num_pages 0x%X\n",
1271                                 current->pid, result, num_pages);
1272                 error = -ENOMEM;
1273                 goto end_function_with_error3;
1274         }
1275
1276         dev_dbg(&sep->pdev->dev, "[PID%d] get_user_pages succeeded\n",
1277                                         current->pid);
1278
1279         /*
1280          * Fill the array using page array data and
1281          * map the pages - this action will also flush the cache as needed
1282          */
1283         for (count = 0; count < num_pages; count++) {
1284                 /* Fill the map array */
1285                 map_array[count].dma_addr =
1286                         dma_map_page(&sep->pdev->dev, page_array[count],
1287                         0, PAGE_SIZE, DMA_BIDIRECTIONAL);
1288
1289                 map_array[count].size = PAGE_SIZE;
1290
1291                 /* Fill the lli array entry */
1292                 lli_array[count].bus_address = (u32)map_array[count].dma_addr;
1293                 lli_array[count].block_size = PAGE_SIZE;
1294
1295                 dev_dbg(&sep->pdev->dev,
1296                         "[PID%d] lli_array[%x].bus_address is %08lx, "
1297                         "lli_array[%x].block_size is (hex) %x\n", current->pid,
1298                         count, (unsigned long)lli_array[count].bus_address,
1299                         count, lli_array[count].block_size);
1300         }
1301
1302         /* Check the offset for the first page */
1303         lli_array[0].bus_address =
1304                 lli_array[0].bus_address + (app_virt_addr & (~PAGE_MASK));
1305
1306         /* Check that not all the data is in the first page only */
1307         if ((PAGE_SIZE - (app_virt_addr & (~PAGE_MASK))) >= data_size)
1308                 lli_array[0].block_size = data_size;
1309         else
1310                 lli_array[0].block_size =
1311                         PAGE_SIZE - (app_virt_addr & (~PAGE_MASK));
1312
1313                 dev_dbg(&sep->pdev->dev,
1314                         "[PID%d] After check if page 0 has all data\n",
1315                         current->pid);
1316                 dev_dbg(&sep->pdev->dev,
1317                         "[PID%d] lli_array[0].bus_address is (hex) %08lx, "
1318                         "lli_array[0].block_size is (hex) %x\n",
1319                         current->pid,
1320                         (unsigned long)lli_array[0].bus_address,
1321                         lli_array[0].block_size);
1322
1323
1324         /* Check the size of the last page */
1325         if (num_pages > 1) {
1326                 lli_array[num_pages - 1].block_size =
1327                         (app_virt_addr + data_size) & (~PAGE_MASK);
1328                 if (lli_array[num_pages - 1].block_size == 0)
1329                         lli_array[num_pages - 1].block_size = PAGE_SIZE;
1330
1331                 dev_dbg(&sep->pdev->dev,
1332                         "[PID%d] After last page size adjustment\n",
1333                         current->pid);
1334                 dev_dbg(&sep->pdev->dev,
1335                         "[PID%d] lli_array[%x].bus_address is (hex) %08lx, "
1336                         "lli_array[%x].block_size is (hex) %x\n",
1337                         current->pid,
1338                         num_pages - 1,
1339                         (unsigned long)lli_array[num_pages - 1].bus_address,
1340                         num_pages - 1,
1341                         lli_array[num_pages - 1].block_size);
1342         }
1343
1344         /* Set output params according to the in_out flag */
1345         if (in_out_flag == SEP_DRIVER_IN_FLAG) {
1346                 *lli_array_ptr = lli_array;
1347                 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_num_pages =
1348                                                                 num_pages;
1349                 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_page_array =
1350                                                                 page_array;
1351                 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_map_array =
1352                                                                 map_array;
1353                 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_map_num_entries =
1354                                                                 num_pages;
1355                 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].src_sg = NULL;
1356         } else {
1357                 *lli_array_ptr = lli_array;
1358                 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_num_pages =
1359                                                                 num_pages;
1360                 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_page_array =
1361                                                                 page_array;
1362                 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_map_array =
1363                                                                 map_array;
1364                 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].
1365                                         out_map_num_entries = num_pages;
1366                 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].dst_sg = NULL;
1367         }
1368         goto end_function;
1369
1370 end_function_with_error3:
1371         /* Free lli array */
1372         kfree(lli_array);
1373
1374 end_function_with_error2:
1375         kfree(map_array);
1376
1377 end_function_with_error1:
1378         /* Free page array */
1379         kfree(page_array);
1380
1381 end_function:
1382         return error;
1383 }
1384
1385 /**
1386  *      sep_lli_table_secure_dma - get lli array for IMR addresses
1387  *      @sep: pointer to struct sep_device
1388  *      @app_virt_addr: user memory data buffer
1389  *      @data_size: size of data buffer
1390  *      @lli_array_ptr: lli array
1391  *      @in_out_flag: not used
1392  *      @dma_ctx: pointer to struct sep_dma_context
1393  *
1394  *      This function creates lli tables for outputting data to
1395  *      IMR memory, which is memory that cannot be accessed by the
1396  *      the x86 processor.
1397  */
1398 static int sep_lli_table_secure_dma(struct sep_device *sep,
1399         u32 app_virt_addr,
1400         u32 data_size,
1401         struct sep_lli_entry **lli_array_ptr,
1402         int in_out_flag,
1403         struct sep_dma_context *dma_ctx)
1404
1405 {
1406         int error = 0;
1407         u32 count;
1408         /* The the page of the end address of the user space buffer */
1409         u32 end_page;
1410         /* The page of the start address of the user space buffer */
1411         u32 start_page;
1412         /* The range in pages */
1413         u32 num_pages;
1414         /* Array of lli */
1415         struct sep_lli_entry *lli_array;
1416
1417         /* Set start and end pages and num pages */
1418         end_page = (app_virt_addr + data_size - 1) >> PAGE_SHIFT;
1419         start_page = app_virt_addr >> PAGE_SHIFT;
1420         num_pages = end_page - start_page + 1;
1421
1422         dev_dbg(&sep->pdev->dev,
1423                 "[PID%d] lock user pages  app_virt_addr is %x\n",
1424                 current->pid, app_virt_addr);
1425
1426         dev_dbg(&sep->pdev->dev, "[PID%d] data_size is (hex) %x\n",
1427                 current->pid, data_size);
1428         dev_dbg(&sep->pdev->dev, "[PID%d] start_page is (hex) %x\n",
1429                 current->pid, start_page);
1430         dev_dbg(&sep->pdev->dev, "[PID%d] end_page is (hex) %x\n",
1431                 current->pid, end_page);
1432         dev_dbg(&sep->pdev->dev, "[PID%d] num_pages is (hex) %x\n",
1433                 current->pid, num_pages);
1434
1435         lli_array = kmalloc_array(num_pages, sizeof(struct sep_lli_entry),
1436                                   GFP_ATOMIC);
1437         if (!lli_array)
1438                 return -ENOMEM;
1439
1440         /*
1441          * Fill the lli_array
1442          */
1443         start_page = start_page << PAGE_SHIFT;
1444         for (count = 0; count < num_pages; count++) {
1445                 /* Fill the lli array entry */
1446                 lli_array[count].bus_address = start_page;
1447                 lli_array[count].block_size = PAGE_SIZE;
1448
1449                 start_page += PAGE_SIZE;
1450
1451                 dev_dbg(&sep->pdev->dev,
1452                         "[PID%d] lli_array[%x].bus_address is %08lx, "
1453                         "lli_array[%x].block_size is (hex) %x\n",
1454                         current->pid,
1455                         count, (unsigned long)lli_array[count].bus_address,
1456                         count, lli_array[count].block_size);
1457         }
1458
1459         /* Check the offset for the first page */
1460         lli_array[0].bus_address =
1461                 lli_array[0].bus_address + (app_virt_addr & (~PAGE_MASK));
1462
1463         /* Check that not all the data is in the first page only */
1464         if ((PAGE_SIZE - (app_virt_addr & (~PAGE_MASK))) >= data_size)
1465                 lli_array[0].block_size = data_size;
1466         else
1467                 lli_array[0].block_size =
1468                         PAGE_SIZE - (app_virt_addr & (~PAGE_MASK));
1469
1470         dev_dbg(&sep->pdev->dev,
1471                 "[PID%d] After check if page 0 has all data\n"
1472                 "lli_array[0].bus_address is (hex) %08lx, "
1473                 "lli_array[0].block_size is (hex) %x\n",
1474                 current->pid,
1475                 (unsigned long)lli_array[0].bus_address,
1476                 lli_array[0].block_size);
1477
1478         /* Check the size of the last page */
1479         if (num_pages > 1) {
1480                 lli_array[num_pages - 1].block_size =
1481                         (app_virt_addr + data_size) & (~PAGE_MASK);
1482                 if (lli_array[num_pages - 1].block_size == 0)
1483                         lli_array[num_pages - 1].block_size = PAGE_SIZE;
1484
1485                 dev_dbg(&sep->pdev->dev,
1486                         "[PID%d] After last page size adjustment\n"
1487                         "lli_array[%x].bus_address is (hex) %08lx, "
1488                         "lli_array[%x].block_size is (hex) %x\n",
1489                         current->pid, num_pages - 1,
1490                         (unsigned long)lli_array[num_pages - 1].bus_address,
1491                         num_pages - 1,
1492                         lli_array[num_pages - 1].block_size);
1493         }
1494         *lli_array_ptr = lli_array;
1495         dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_num_pages = num_pages;
1496         dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_page_array = NULL;
1497         dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_map_array = NULL;
1498         dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_map_num_entries = 0;
1499
1500         return error;
1501 }
1502
1503 /**
1504  * sep_calculate_lli_table_max_size - size the LLI table
1505  * @sep: pointer to struct sep_device
1506  * @lli_in_array_ptr
1507  * @num_array_entries
1508  * @last_table_flag
1509  *
1510  * This function calculates the size of data that can be inserted into
1511  * the lli table from this array, such that either the table is full
1512  * (all entries are entered), or there are no more entries in the
1513  * lli array
1514  */
1515 static u32 sep_calculate_lli_table_max_size(struct sep_device *sep,
1516         struct sep_lli_entry *lli_in_array_ptr,
1517         u32 num_array_entries,
1518         u32 *last_table_flag)
1519 {
1520         u32 counter;
1521         /* Table data size */
1522         u32 table_data_size = 0;
1523         /* Data size for the next table */
1524         u32 next_table_data_size;
1525
1526         *last_table_flag = 0;
1527
1528         /*
1529          * Calculate the data in the out lli table till we fill the whole
1530          * table or till the data has ended
1531          */
1532         for (counter = 0;
1533                 (counter < (SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP - 1)) &&
1534                         (counter < num_array_entries); counter++)
1535                 table_data_size += lli_in_array_ptr[counter].block_size;
1536
1537         /*
1538          * Check if we reached the last entry,
1539          * meaning this ia the last table to build,
1540          * and no need to check the block alignment
1541          */
1542         if (counter == num_array_entries) {
1543                 /* Set the last table flag */
1544                 *last_table_flag = 1;
1545                 goto end_function;
1546         }
1547
1548         /*
1549          * Calculate the data size of the next table.
1550          * Stop if no entries left or if data size is more the DMA restriction
1551          */
1552         next_table_data_size = 0;
1553         for (; counter < num_array_entries; counter++) {
1554                 next_table_data_size += lli_in_array_ptr[counter].block_size;
1555                 if (next_table_data_size >= SEP_DRIVER_MIN_DATA_SIZE_PER_TABLE)
1556                         break;
1557         }
1558
1559         /*
1560          * Check if the next table data size is less then DMA rstriction.
1561          * if it is - recalculate the current table size, so that the next
1562          * table data size will be adaquete for DMA
1563          */
1564         if (next_table_data_size &&
1565                 next_table_data_size < SEP_DRIVER_MIN_DATA_SIZE_PER_TABLE)
1566
1567                 table_data_size -= (SEP_DRIVER_MIN_DATA_SIZE_PER_TABLE -
1568                         next_table_data_size);
1569
1570 end_function:
1571         return table_data_size;
1572 }
1573
1574 /**
1575  * sep_build_lli_table - build an lli array for the given table
1576  * @sep: pointer to struct sep_device
1577  * @lli_array_ptr: pointer to lli array
1578  * @lli_table_ptr: pointer to lli table
1579  * @num_processed_entries_ptr: pointer to number of entries
1580  * @num_table_entries_ptr: pointer to number of tables
1581  * @table_data_size: total data size
1582  *
1583  * Builds an lli table from the lli_array according to
1584  * the given size of data
1585  */
1586 static void sep_build_lli_table(struct sep_device *sep,
1587         struct sep_lli_entry    *lli_array_ptr,
1588         struct sep_lli_entry    *lli_table_ptr,
1589         u32 *num_processed_entries_ptr,
1590         u32 *num_table_entries_ptr,
1591         u32 table_data_size)
1592 {
1593         /* Current table data size */
1594         u32 curr_table_data_size;
1595         /* Counter of lli array entry */
1596         u32 array_counter;
1597
1598         /* Init current table data size and lli array entry counter */
1599         curr_table_data_size = 0;
1600         array_counter = 0;
1601         *num_table_entries_ptr = 1;
1602
1603         dev_dbg(&sep->pdev->dev,
1604                 "[PID%d] build lli table table_data_size: (hex) %x\n",
1605                         current->pid, table_data_size);
1606
1607         /* Fill the table till table size reaches the needed amount */
1608         while (curr_table_data_size < table_data_size) {
1609                 /* Update the number of entries in table */
1610                 (*num_table_entries_ptr)++;
1611
1612                 lli_table_ptr->bus_address =
1613                         cpu_to_le32(lli_array_ptr[array_counter].bus_address);
1614
1615                 lli_table_ptr->block_size =
1616                         cpu_to_le32(lli_array_ptr[array_counter].block_size);
1617
1618                 curr_table_data_size += lli_array_ptr[array_counter].block_size;
1619
1620                 dev_dbg(&sep->pdev->dev,
1621                         "[PID%d] lli_table_ptr is %p\n",
1622                                 current->pid, lli_table_ptr);
1623                 dev_dbg(&sep->pdev->dev,
1624                         "[PID%d] lli_table_ptr->bus_address: %08lx\n",
1625                                 current->pid,
1626                                 (unsigned long)lli_table_ptr->bus_address);
1627
1628                 dev_dbg(&sep->pdev->dev,
1629                         "[PID%d] lli_table_ptr->block_size is (hex) %x\n",
1630                                 current->pid, lli_table_ptr->block_size);
1631
1632                 /* Check for overflow of the table data */
1633                 if (curr_table_data_size > table_data_size) {
1634                         dev_dbg(&sep->pdev->dev,
1635                                 "[PID%d] curr_table_data_size too large\n",
1636                                         current->pid);
1637
1638                         /* Update the size of block in the table */
1639                         lli_table_ptr->block_size =
1640                                 cpu_to_le32(lli_table_ptr->block_size) -
1641                                 (curr_table_data_size - table_data_size);
1642
1643                         /* Update the physical address in the lli array */
1644                         lli_array_ptr[array_counter].bus_address +=
1645                         cpu_to_le32(lli_table_ptr->block_size);
1646
1647                         /* Update the block size left in the lli array */
1648                         lli_array_ptr[array_counter].block_size =
1649                                 (curr_table_data_size - table_data_size);
1650                 } else
1651                         /* Advance to the next entry in the lli_array */
1652                         array_counter++;
1653
1654                 dev_dbg(&sep->pdev->dev,
1655                         "[PID%d] lli_table_ptr->bus_address is %08lx\n",
1656                                 current->pid,
1657                                 (unsigned long)lli_table_ptr->bus_address);
1658                 dev_dbg(&sep->pdev->dev,
1659                         "[PID%d] lli_table_ptr->block_size is (hex) %x\n",
1660                                 current->pid,
1661                                 lli_table_ptr->block_size);
1662
1663                 /* Move to the next entry in table */
1664                 lli_table_ptr++;
1665         }
1666
1667         /* Set the info entry to default */
1668         lli_table_ptr->bus_address = 0xffffffff;
1669         lli_table_ptr->block_size = 0;
1670
1671         /* Set the output parameter */
1672         *num_processed_entries_ptr += array_counter;
1673
1674 }
1675
1676 /**
1677  * sep_shared_area_virt_to_bus - map shared area to bus address
1678  * @sep: pointer to struct sep_device
1679  * @virt_address: virtual address to convert
1680  *
1681  * This functions returns the physical address inside shared area according
1682  * to the virtual address. It can be either on the external RAM device
1683  * (ioremapped), or on the system RAM
1684  * This implementation is for the external RAM
1685  */
1686 static dma_addr_t sep_shared_area_virt_to_bus(struct sep_device *sep,
1687         void *virt_address)
1688 {
1689         dev_dbg(&sep->pdev->dev, "[PID%d] sh virt to phys v %p\n",
1690                                         current->pid, virt_address);
1691         dev_dbg(&sep->pdev->dev, "[PID%d] sh virt to phys p %08lx\n",
1692                 current->pid,
1693                 (unsigned long)
1694                 sep->shared_bus + (virt_address - sep->shared_addr));
1695
1696         return sep->shared_bus + (size_t)(virt_address - sep->shared_addr);
1697 }
1698
1699 /**
1700  * sep_shared_area_bus_to_virt - map shared area bus address to kernel
1701  * @sep: pointer to struct sep_device
1702  * @bus_address: bus address to convert
1703  *
1704  * This functions returns the virtual address inside shared area
1705  * according to the physical address. It can be either on the
1706  * external RAM device (ioremapped), or on the system RAM
1707  * This implementation is for the external RAM
1708  */
1709 static void *sep_shared_area_bus_to_virt(struct sep_device *sep,
1710         dma_addr_t bus_address)
1711 {
1712         dev_dbg(&sep->pdev->dev, "[PID%d] shared bus to virt b=%lx v=%lx\n",
1713                 current->pid,
1714                 (unsigned long)bus_address, (unsigned long)(sep->shared_addr +
1715                         (size_t)(bus_address - sep->shared_bus)));
1716
1717         return sep->shared_addr + (size_t)(bus_address - sep->shared_bus);
1718 }
1719
1720 /**
1721  * sep_debug_print_lli_tables - dump LLI table
1722  * @sep: pointer to struct sep_device
1723  * @lli_table_ptr: pointer to sep_lli_entry
1724  * @num_table_entries: number of entries
1725  * @table_data_size: total data size
1726  *
1727  * Walk the the list of the print created tables and print all the data
1728  */
1729 static void sep_debug_print_lli_tables(struct sep_device *sep,
1730         struct sep_lli_entry *lli_table_ptr,
1731         unsigned long num_table_entries,
1732         unsigned long table_data_size)
1733 {
1734 #ifdef DEBUG
1735         unsigned long table_count = 1;
1736         unsigned long entries_count = 0;
1737
1738         dev_dbg(&sep->pdev->dev, "[PID%d] sep_debug_print_lli_tables start\n",
1739                                         current->pid);
1740         if (num_table_entries == 0) {
1741                 dev_dbg(&sep->pdev->dev, "[PID%d] no table to print\n",
1742                         current->pid);
1743                 return;
1744         }
1745
1746         while ((unsigned long) lli_table_ptr->bus_address != 0xffffffff) {
1747                 dev_dbg(&sep->pdev->dev,
1748                         "[PID%d] lli table %08lx, "
1749                         "table_data_size is (hex) %lx\n",
1750                                 current->pid, table_count, table_data_size);
1751                 dev_dbg(&sep->pdev->dev,
1752                         "[PID%d] num_table_entries is (hex) %lx\n",
1753                                 current->pid, num_table_entries);
1754
1755                 /* Print entries of the table (without info entry) */
1756                 for (entries_count = 0; entries_count < num_table_entries;
1757                         entries_count++, lli_table_ptr++) {
1758
1759                         dev_dbg(&sep->pdev->dev,
1760                                 "[PID%d] lli_table_ptr address is %08lx\n",
1761                                 current->pid,
1762                                 (unsigned long) lli_table_ptr);
1763
1764                         dev_dbg(&sep->pdev->dev,
1765                                 "[PID%d] phys address is %08lx "
1766                                 "block size is (hex) %x\n", current->pid,
1767                                 (unsigned long)lli_table_ptr->bus_address,
1768                                 lli_table_ptr->block_size);
1769                 }
1770
1771                 /* Point to the info entry */
1772                 lli_table_ptr--;
1773
1774                 dev_dbg(&sep->pdev->dev,
1775                         "[PID%d] phys lli_table_ptr->block_size "
1776                         "is (hex) %x\n",
1777                         current->pid,
1778                         lli_table_ptr->block_size);
1779
1780                 dev_dbg(&sep->pdev->dev,
1781                         "[PID%d] phys lli_table_ptr->physical_address "
1782                         "is %08lx\n",
1783                         current->pid,
1784                         (unsigned long)lli_table_ptr->bus_address);
1785
1786
1787                 table_data_size = lli_table_ptr->block_size & 0xffffff;
1788                 num_table_entries = (lli_table_ptr->block_size >> 24) & 0xff;
1789
1790                 dev_dbg(&sep->pdev->dev,
1791                         "[PID%d] phys table_data_size is "
1792                         "(hex) %lx num_table_entries is"
1793                         " %lx bus_address is%lx\n",
1794                                 current->pid,
1795                                 table_data_size,
1796                                 num_table_entries,
1797                                 (unsigned long)lli_table_ptr->bus_address);
1798
1799                 if ((unsigned long)lli_table_ptr->bus_address != 0xffffffff)
1800                         lli_table_ptr = (struct sep_lli_entry *)
1801                                 sep_shared_bus_to_virt(sep,
1802                                 (unsigned long)lli_table_ptr->bus_address);
1803
1804                 table_count++;
1805         }
1806         dev_dbg(&sep->pdev->dev, "[PID%d] sep_debug_print_lli_tables end\n",
1807                                         current->pid);
1808 #endif
1809 }
1810
1811
1812 /**
1813  * sep_prepare_empty_lli_table - create a blank LLI table
1814  * @sep: pointer to struct sep_device
1815  * @lli_table_addr_ptr: pointer to lli table
1816  * @num_entries_ptr: pointer to number of entries
1817  * @table_data_size_ptr: point to table data size
1818  * @dmatables_region: Optional buffer for DMA tables
1819  * @dma_ctx: DMA context
1820  *
1821  * This function creates empty lli tables when there is no data
1822  */
1823 static void sep_prepare_empty_lli_table(struct sep_device *sep,
1824                 dma_addr_t *lli_table_addr_ptr,
1825                 u32 *num_entries_ptr,
1826                 u32 *table_data_size_ptr,
1827                 void **dmatables_region,
1828                 struct sep_dma_context *dma_ctx)
1829 {
1830         struct sep_lli_entry *lli_table_ptr;
1831
1832         /* Find the area for new table */
1833         lli_table_ptr =
1834                 (struct sep_lli_entry *)(sep->shared_addr +
1835                 SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES +
1836                 dma_ctx->num_lli_tables_created * sizeof(struct sep_lli_entry) *
1837                         SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP);
1838
1839         if (dmatables_region && *dmatables_region)
1840                 lli_table_ptr = *dmatables_region;
1841
1842         lli_table_ptr->bus_address = 0;
1843         lli_table_ptr->block_size = 0;
1844
1845         lli_table_ptr++;
1846         lli_table_ptr->bus_address = 0xFFFFFFFF;
1847         lli_table_ptr->block_size = 0;
1848
1849         /* Set the output parameter value */
1850         *lli_table_addr_ptr = sep->shared_bus +
1851                 SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES +
1852                 dma_ctx->num_lli_tables_created *
1853                 sizeof(struct sep_lli_entry) *
1854                 SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
1855
1856         /* Set the num of entries and table data size for empty table */
1857         *num_entries_ptr = 2;
1858         *table_data_size_ptr = 0;
1859
1860         /* Update the number of created tables */
1861         dma_ctx->num_lli_tables_created++;
1862 }
1863
1864 /**
1865  * sep_prepare_input_dma_table - prepare input DMA mappings
1866  * @sep: pointer to struct sep_device
1867  * @data_size:
1868  * @block_size:
1869  * @lli_table_ptr:
1870  * @num_entries_ptr:
1871  * @table_data_size_ptr:
1872  * @is_kva: set for kernel data (kernel crypt io call)
1873  *
1874  * This function prepares only input DMA table for synchronic symmetric
1875  * operations (HASH)
1876  * Note that all bus addresses that are passed to the SEP
1877  * are in 32 bit format; the SEP is a 32 bit device
1878  */
1879 static int sep_prepare_input_dma_table(struct sep_device *sep,
1880         unsigned long app_virt_addr,
1881         u32 data_size,
1882         u32 block_size,
1883         dma_addr_t *lli_table_ptr,
1884         u32 *num_entries_ptr,
1885         u32 *table_data_size_ptr,
1886         bool is_kva,
1887         void **dmatables_region,
1888         struct sep_dma_context *dma_ctx
1889 )
1890 {
1891         int error = 0;
1892         /* Pointer to the info entry of the table - the last entry */
1893         struct sep_lli_entry *info_entry_ptr;
1894         /* Array of pointers to page */
1895         struct sep_lli_entry *lli_array_ptr;
1896         /* Points to the first entry to be processed in the lli_in_array */
1897         u32 current_entry = 0;
1898         /* Num entries in the virtual buffer */
1899         u32 sep_lli_entries = 0;
1900         /* Lli table pointer */
1901         struct sep_lli_entry *in_lli_table_ptr;
1902         /* The total data in one table */
1903         u32 table_data_size = 0;
1904         /* Flag for last table */
1905         u32 last_table_flag = 0;
1906         /* Number of entries in lli table */
1907         u32 num_entries_in_table = 0;
1908         /* Next table address */
1909         void *lli_table_alloc_addr = NULL;
1910         void *dma_lli_table_alloc_addr = NULL;
1911         void *dma_in_lli_table_ptr = NULL;
1912
1913         dev_dbg(&sep->pdev->dev,
1914                 "[PID%d] prepare intput dma tbl data size: (hex) %x\n",
1915                 current->pid, data_size);
1916
1917         dev_dbg(&sep->pdev->dev, "[PID%d] block_size is (hex) %x\n",
1918                                         current->pid, block_size);
1919
1920         /* Initialize the pages pointers */
1921         dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_page_array = NULL;
1922         dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_num_pages = 0;
1923
1924         /* Set the kernel address for first table to be allocated */
1925         lli_table_alloc_addr = (void *)(sep->shared_addr +
1926                 SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES +
1927                 dma_ctx->num_lli_tables_created * sizeof(struct sep_lli_entry) *
1928                 SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP);
1929
1930         if (data_size == 0) {
1931                 if (dmatables_region) {
1932                         error = sep_allocate_dmatables_region(sep,
1933                                                 dmatables_region,
1934                                                 dma_ctx,
1935                                                 1);
1936                         if (error)
1937                                 return error;
1938                 }
1939                 /* Special case  - create meptu table - 2 entries, zero data */
1940                 sep_prepare_empty_lli_table(sep, lli_table_ptr,
1941                                 num_entries_ptr, table_data_size_ptr,
1942                                 dmatables_region, dma_ctx);
1943                 goto update_dcb_counter;
1944         }
1945
1946         /* Check if the pages are in Kernel Virtual Address layout */
1947         if (is_kva)
1948                 error = sep_lock_kernel_pages(sep, app_virt_addr,
1949                         data_size, &lli_array_ptr, SEP_DRIVER_IN_FLAG,
1950                         dma_ctx);
1951         else
1952                 /*
1953                  * Lock the pages of the user buffer
1954                  * and translate them to pages
1955                  */
1956                 error = sep_lock_user_pages(sep, app_virt_addr,
1957                         data_size, &lli_array_ptr, SEP_DRIVER_IN_FLAG,
1958                         dma_ctx);
1959
1960         if (error)
1961                 goto end_function;
1962
1963         dev_dbg(&sep->pdev->dev,
1964                 "[PID%d] output sep_in_num_pages is (hex) %x\n",
1965                 current->pid,
1966                 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_num_pages);
1967
1968         current_entry = 0;
1969         info_entry_ptr = NULL;
1970
1971         sep_lli_entries =
1972                 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_num_pages;
1973
1974         dma_lli_table_alloc_addr = lli_table_alloc_addr;
1975         if (dmatables_region) {
1976                 error = sep_allocate_dmatables_region(sep,
1977                                         dmatables_region,
1978                                         dma_ctx,
1979                                         sep_lli_entries);
1980                 if (error)
1981                         goto end_function_error;
1982                 lli_table_alloc_addr = *dmatables_region;
1983         }
1984
1985         /* Loop till all the entries in in array are processed */
1986         while (current_entry < sep_lli_entries) {
1987
1988                 /* Set the new input and output tables */
1989                 in_lli_table_ptr =
1990                         (struct sep_lli_entry *)lli_table_alloc_addr;
1991                 dma_in_lli_table_ptr =
1992                         (struct sep_lli_entry *)dma_lli_table_alloc_addr;
1993
1994                 lli_table_alloc_addr += sizeof(struct sep_lli_entry) *
1995                         SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
1996                 dma_lli_table_alloc_addr += sizeof(struct sep_lli_entry) *
1997                         SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
1998
1999                 if (dma_lli_table_alloc_addr >
2000                         ((void *)sep->shared_addr +
2001                         SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES +
2002                         SYNCHRONIC_DMA_TABLES_AREA_SIZE_BYTES)) {
2003
2004                         error = -ENOMEM;
2005                         goto end_function_error;
2006
2007                 }
2008
2009                 /* Update the number of created tables */
2010                 dma_ctx->num_lli_tables_created++;
2011
2012                 /* Calculate the maximum size of data for input table */
2013                 table_data_size = sep_calculate_lli_table_max_size(sep,
2014                         &lli_array_ptr[current_entry],
2015                         (sep_lli_entries - current_entry),
2016                         &last_table_flag);
2017
2018                 /*
2019                  * If this is not the last table -
2020                  * then align it to the block size
2021                  */
2022                 if (!last_table_flag)
2023                         table_data_size =
2024                                 (table_data_size / block_size) * block_size;
2025
2026                 dev_dbg(&sep->pdev->dev,
2027                         "[PID%d] output table_data_size is (hex) %x\n",
2028                                 current->pid,
2029                                 table_data_size);
2030
2031                 /* Construct input lli table */
2032                 sep_build_lli_table(sep, &lli_array_ptr[current_entry],
2033                         in_lli_table_ptr,
2034                         &current_entry, &num_entries_in_table, table_data_size);
2035
2036                 if (info_entry_ptr == NULL) {
2037
2038                         /* Set the output parameters to physical addresses */
2039                         *lli_table_ptr = sep_shared_area_virt_to_bus(sep,
2040                                 dma_in_lli_table_ptr);
2041                         *num_entries_ptr = num_entries_in_table;
2042                         *table_data_size_ptr = table_data_size;
2043
2044                         dev_dbg(&sep->pdev->dev,
2045                                 "[PID%d] output lli_table_in_ptr is %08lx\n",
2046                                 current->pid,
2047                                 (unsigned long)*lli_table_ptr);
2048
2049                 } else {
2050                         /* Update the info entry of the previous in table */
2051                         info_entry_ptr->bus_address =
2052                                 sep_shared_area_virt_to_bus(sep,
2053                                                         dma_in_lli_table_ptr);
2054                         info_entry_ptr->block_size =
2055                                 ((num_entries_in_table) << 24) |
2056                                 (table_data_size);
2057                 }
2058                 /* Save the pointer to the info entry of the current tables */
2059                 info_entry_ptr = in_lli_table_ptr + num_entries_in_table - 1;
2060         }
2061         /* Print input tables */
2062         if (!dmatables_region) {
2063                 sep_debug_print_lli_tables(sep, (struct sep_lli_entry *)
2064                         sep_shared_area_bus_to_virt(sep, *lli_table_ptr),
2065                         *num_entries_ptr, *table_data_size_ptr);
2066         }
2067
2068         /* The array of the pages */
2069         kfree(lli_array_ptr);
2070
2071 update_dcb_counter:
2072         /* Update DCB counter */
2073         dma_ctx->nr_dcb_creat++;
2074         goto end_function;
2075
2076 end_function_error:
2077         /* Free all the allocated resources */
2078         kfree(dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_map_array);
2079         dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_map_array = NULL;
2080         kfree(lli_array_ptr);
2081         kfree(dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_page_array);
2082         dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_page_array = NULL;
2083
2084 end_function:
2085         return error;
2086
2087 }
2088
2089 /**
2090  * sep_construct_dma_tables_from_lli - prepare AES/DES mappings
2091  * @sep: pointer to struct sep_device
2092  * @lli_in_array:
2093  * @sep_in_lli_entries:
2094  * @lli_out_array:
2095  * @sep_out_lli_entries
2096  * @block_size
2097  * @lli_table_in_ptr
2098  * @lli_table_out_ptr
2099  * @in_num_entries_ptr
2100  * @out_num_entries_ptr
2101  * @table_data_size_ptr
2102  *
2103  * This function creates the input and output DMA tables for
2104  * symmetric operations (AES/DES) according to the block
2105  * size from LLI arays
2106  * Note that all bus addresses that are passed to the SEP
2107  * are in 32 bit format; the SEP is a 32 bit device
2108  */
2109 static int sep_construct_dma_tables_from_lli(
2110         struct sep_device *sep,
2111         struct sep_lli_entry *lli_in_array,
2112         u32     sep_in_lli_entries,
2113         struct sep_lli_entry *lli_out_array,
2114         u32     sep_out_lli_entries,
2115         u32     block_size,
2116         dma_addr_t *lli_table_in_ptr,
2117         dma_addr_t *lli_table_out_ptr,
2118         u32     *in_num_entries_ptr,
2119         u32     *out_num_entries_ptr,
2120         u32     *table_data_size_ptr,
2121         void    **dmatables_region,
2122         struct sep_dma_context *dma_ctx)
2123 {
2124         /* Points to the area where next lli table can be allocated */
2125         void *lli_table_alloc_addr = NULL;
2126         /*
2127          * Points to the area in shared region where next lli table
2128          * can be allocated
2129          */
2130         void *dma_lli_table_alloc_addr = NULL;
2131         /* Input lli table in dmatables_region or shared region */
2132         struct sep_lli_entry *in_lli_table_ptr = NULL;
2133         /* Input lli table location in the shared region */
2134         struct sep_lli_entry *dma_in_lli_table_ptr = NULL;
2135         /* Output lli table in dmatables_region or shared region */
2136         struct sep_lli_entry *out_lli_table_ptr = NULL;
2137         /* Output lli table location in the shared region */
2138         struct sep_lli_entry *dma_out_lli_table_ptr = NULL;
2139         /* Pointer to the info entry of the table - the last entry */
2140         struct sep_lli_entry *info_in_entry_ptr = NULL;
2141         /* Pointer to the info entry of the table - the last entry */
2142         struct sep_lli_entry *info_out_entry_ptr = NULL;
2143         /* Points to the first entry to be processed in the lli_in_array */
2144         u32 current_in_entry = 0;
2145         /* Points to the first entry to be processed in the lli_out_array */
2146         u32 current_out_entry = 0;
2147         /* Max size of the input table */
2148         u32 in_table_data_size = 0;
2149         /* Max size of the output table */
2150         u32 out_table_data_size = 0;
2151         /* Flag te signifies if this is the last tables build */
2152         u32 last_table_flag = 0;
2153         /* The data size that should be in table */
2154         u32 table_data_size = 0;
2155         /* Number of entries in the input table */
2156         u32 num_entries_in_table = 0;
2157         /* Number of entries in the output table */
2158         u32 num_entries_out_table = 0;
2159
2160         if (!dma_ctx) {
2161                 dev_warn(&sep->pdev->dev, "DMA context uninitialized\n");
2162                 return -EINVAL;
2163         }
2164
2165         /* Initiate to point after the message area */
2166         lli_table_alloc_addr = (void *)(sep->shared_addr +
2167                 SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES +
2168                 (dma_ctx->num_lli_tables_created *
2169                 (sizeof(struct sep_lli_entry) *
2170                 SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP)));
2171         dma_lli_table_alloc_addr = lli_table_alloc_addr;
2172
2173         if (dmatables_region) {
2174                 /* 2 for both in+out table */
2175                 if (sep_allocate_dmatables_region(sep,
2176                                         dmatables_region,
2177                                         dma_ctx,
2178                                         2*sep_in_lli_entries))
2179                         return -ENOMEM;
2180                 lli_table_alloc_addr = *dmatables_region;
2181         }
2182
2183         /* Loop till all the entries in in array are not processed */
2184         while (current_in_entry < sep_in_lli_entries) {
2185                 /* Set the new input and output tables */
2186                 in_lli_table_ptr =
2187                         (struct sep_lli_entry *)lli_table_alloc_addr;
2188                 dma_in_lli_table_ptr =
2189                         (struct sep_lli_entry *)dma_lli_table_alloc_addr;
2190
2191                 lli_table_alloc_addr += sizeof(struct sep_lli_entry) *
2192                         SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
2193                 dma_lli_table_alloc_addr += sizeof(struct sep_lli_entry) *
2194                         SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
2195
2196                 /* Set the first output tables */
2197                 out_lli_table_ptr =
2198                         (struct sep_lli_entry *)lli_table_alloc_addr;
2199                 dma_out_lli_table_ptr =
2200                         (struct sep_lli_entry *)dma_lli_table_alloc_addr;
2201
2202                 /* Check if the DMA table area limit was overrun */
2203                 if ((dma_lli_table_alloc_addr + sizeof(struct sep_lli_entry) *
2204                         SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP) >
2205                         ((void *)sep->shared_addr +
2206                         SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES +
2207                         SYNCHRONIC_DMA_TABLES_AREA_SIZE_BYTES)) {
2208
2209                         dev_warn(&sep->pdev->dev, "dma table limit overrun\n");
2210                         return -ENOMEM;
2211                 }
2212
2213                 /* Update the number of the lli tables created */
2214                 dma_ctx->num_lli_tables_created += 2;
2215
2216                 lli_table_alloc_addr += sizeof(struct sep_lli_entry) *
2217                         SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
2218                 dma_lli_table_alloc_addr += sizeof(struct sep_lli_entry) *
2219                         SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
2220
2221                 /* Calculate the maximum size of data for input table */
2222                 in_table_data_size =
2223                         sep_calculate_lli_table_max_size(sep,
2224                         &lli_in_array[current_in_entry],
2225                         (sep_in_lli_entries - current_in_entry),
2226                         &last_table_flag);
2227
2228                 /* Calculate the maximum size of data for output table */
2229                 out_table_data_size =
2230                         sep_calculate_lli_table_max_size(sep,
2231                         &lli_out_array[current_out_entry],
2232                         (sep_out_lli_entries - current_out_entry),
2233                         &last_table_flag);
2234
2235                 if (!last_table_flag) {
2236                         in_table_data_size = (in_table_data_size /
2237                                 block_size) * block_size;
2238                         out_table_data_size = (out_table_data_size /
2239                                 block_size) * block_size;
2240                 }
2241
2242                 table_data_size = in_table_data_size;
2243                 if (table_data_size > out_table_data_size)
2244                         table_data_size = out_table_data_size;
2245
2246                 dev_dbg(&sep->pdev->dev,
2247                         "[PID%d] construct tables from lli"
2248                         " in_table_data_size is (hex) %x\n", current->pid,
2249                         in_table_data_size);
2250
2251                 dev_dbg(&sep->pdev->dev,
2252                         "[PID%d] construct tables from lli"
2253                         "out_table_data_size is (hex) %x\n", current->pid,
2254                         out_table_data_size);
2255
2256                 /* Construct input lli table */
2257                 sep_build_lli_table(sep, &lli_in_array[current_in_entry],
2258                         in_lli_table_ptr,
2259                         &current_in_entry,
2260                         &num_entries_in_table,
2261                         table_data_size);
2262
2263                 /* Construct output lli table */
2264                 sep_build_lli_table(sep, &lli_out_array[current_out_entry],
2265                         out_lli_table_ptr,
2266                         &current_out_entry,
2267                         &num_entries_out_table,
2268                         table_data_size);
2269
2270                 /* If info entry is null - this is the first table built */
2271                 if (info_in_entry_ptr == NULL || info_out_entry_ptr == NULL) {
2272                         /* Set the output parameters to physical addresses */
2273                         *lli_table_in_ptr =
2274                         sep_shared_area_virt_to_bus(sep, dma_in_lli_table_ptr);
2275
2276                         *in_num_entries_ptr = num_entries_in_table;
2277
2278                         *lli_table_out_ptr =
2279                                 sep_shared_area_virt_to_bus(sep,
2280                                 dma_out_lli_table_ptr);
2281
2282                         *out_num_entries_ptr = num_entries_out_table;
2283                         *table_data_size_ptr = table_data_size;
2284
2285                         dev_dbg(&sep->pdev->dev,
2286                                 "[PID%d] output lli_table_in_ptr is %08lx\n",
2287                                 current->pid,
2288                                 (unsigned long)*lli_table_in_ptr);
2289                         dev_dbg(&sep->pdev->dev,
2290                                 "[PID%d] output lli_table_out_ptr is %08lx\n",
2291                                 current->pid,
2292                                 (unsigned long)*lli_table_out_ptr);
2293                 } else {
2294                         /* Update the info entry of the previous in table */
2295                         info_in_entry_ptr->bus_address =
2296                                 sep_shared_area_virt_to_bus(sep,
2297                                 dma_in_lli_table_ptr);
2298
2299                         info_in_entry_ptr->block_size =
2300                                 ((num_entries_in_table) << 24) |
2301                                 (table_data_size);
2302
2303                         /* Update the info entry of the previous in table */
2304                         info_out_entry_ptr->bus_address =
2305                                 sep_shared_area_virt_to_bus(sep,
2306                                 dma_out_lli_table_ptr);
2307
2308                         info_out_entry_ptr->block_size =
2309                                 ((num_entries_out_table) << 24) |
2310                                 (table_data_size);
2311
2312                         dev_dbg(&sep->pdev->dev,
2313                                 "[PID%d] output lli_table_in_ptr:%08lx %08x\n",
2314                                 current->pid,
2315                                 (unsigned long)info_in_entry_ptr->bus_address,
2316                                 info_in_entry_ptr->block_size);
2317
2318                         dev_dbg(&sep->pdev->dev,
2319                                 "[PID%d] output lli_table_out_ptr:"
2320                                 "%08lx  %08x\n",
2321                                 current->pid,
2322                                 (unsigned long)info_out_entry_ptr->bus_address,
2323                                 info_out_entry_ptr->block_size);
2324                 }
2325
2326                 /* Save the pointer to the info entry of the current tables */
2327                 info_in_entry_ptr = in_lli_table_ptr +
2328                         num_entries_in_table - 1;
2329                 info_out_entry_ptr = out_lli_table_ptr +
2330                         num_entries_out_table - 1;
2331
2332                 dev_dbg(&sep->pdev->dev,
2333                         "[PID%d] output num_entries_out_table is %x\n",
2334                         current->pid,
2335                         (u32)num_entries_out_table);
2336                 dev_dbg(&sep->pdev->dev,
2337                         "[PID%d] output info_in_entry_ptr is %lx\n",
2338                         current->pid,
2339                         (unsigned long)info_in_entry_ptr);
2340                 dev_dbg(&sep->pdev->dev,
2341                         "[PID%d] output info_out_entry_ptr is %lx\n",
2342                         current->pid,
2343                         (unsigned long)info_out_entry_ptr);
2344         }
2345
2346         /* Print input tables */
2347         if (!dmatables_region) {
2348                 sep_debug_print_lli_tables(
2349                         sep,
2350                         (struct sep_lli_entry *)
2351                         sep_shared_area_bus_to_virt(sep, *lli_table_in_ptr),
2352                         *in_num_entries_ptr,
2353                         *table_data_size_ptr);
2354         }
2355
2356         /* Print output tables */
2357         if (!dmatables_region) {
2358                 sep_debug_print_lli_tables(
2359                         sep,
2360                         (struct sep_lli_entry *)
2361                         sep_shared_area_bus_to_virt(sep, *lli_table_out_ptr),
2362                         *out_num_entries_ptr,
2363                         *table_data_size_ptr);
2364         }
2365
2366         return 0;
2367 }
2368
2369 /**
2370  * sep_prepare_input_output_dma_table - prepare DMA I/O table
2371  * @app_virt_in_addr:
2372  * @app_virt_out_addr:
2373  * @data_size:
2374  * @block_size:
2375  * @lli_table_in_ptr:
2376  * @lli_table_out_ptr:
2377  * @in_num_entries_ptr:
2378  * @out_num_entries_ptr:
2379  * @table_data_size_ptr:
2380  * @is_kva: set for kernel data; used only for kernel crypto module
2381  *
2382  * This function builds input and output DMA tables for synchronic
2383  * symmetric operations (AES, DES, HASH). It also checks that each table
2384  * is of the modular block size
2385  * Note that all bus addresses that are passed to the SEP
2386  * are in 32 bit format; the SEP is a 32 bit device
2387  */
2388 static int sep_prepare_input_output_dma_table(struct sep_device *sep,
2389         unsigned long app_virt_in_addr,
2390         unsigned long app_virt_out_addr,
2391         u32 data_size,
2392         u32 block_size,
2393         dma_addr_t *lli_table_in_ptr,
2394         dma_addr_t *lli_table_out_ptr,
2395         u32 *in_num_entries_ptr,
2396         u32 *out_num_entries_ptr,
2397         u32 *table_data_size_ptr,
2398         bool is_kva,
2399         void **dmatables_region,
2400         struct sep_dma_context *dma_ctx)
2401
2402 {
2403         int error = 0;
2404         /* Array of pointers of page */
2405         struct sep_lli_entry *lli_in_array;
2406         /* Array of pointers of page */
2407         struct sep_lli_entry *lli_out_array;
2408
2409         if (!dma_ctx) {
2410                 error = -EINVAL;
2411                 goto end_function;
2412         }
2413
2414         if (data_size == 0) {
2415                 /* Prepare empty table for input and output */
2416                 if (dmatables_region) {
2417                         error = sep_allocate_dmatables_region(
2418                                         sep,
2419                                         dmatables_region,
2420                                         dma_ctx,
2421                                         2);
2422                   if (error)
2423                         goto end_function;
2424                 }
2425                 sep_prepare_empty_lli_table(sep, lli_table_in_ptr,
2426                         in_num_entries_ptr, table_data_size_ptr,
2427                         dmatables_region, dma_ctx);
2428
2429                 sep_prepare_empty_lli_table(sep, lli_table_out_ptr,
2430                         out_num_entries_ptr, table_data_size_ptr,
2431                         dmatables_region, dma_ctx);
2432
2433                 goto update_dcb_counter;
2434         }
2435
2436         /* Initialize the pages pointers */
2437         dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_page_array = NULL;
2438         dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_page_array = NULL;
2439
2440         /* Lock the pages of the buffer and translate them to pages */
2441         if (is_kva) {
2442                 dev_dbg(&sep->pdev->dev, "[PID%d] Locking kernel input pages\n",
2443                                                 current->pid);
2444                 error = sep_lock_kernel_pages(sep, app_virt_in_addr,
2445                                 data_size, &lli_in_array, SEP_DRIVER_IN_FLAG,
2446                                 dma_ctx);
2447                 if (error) {
2448                         dev_warn(&sep->pdev->dev,
2449                                 "[PID%d] sep_lock_kernel_pages for input "
2450                                 "virtual buffer failed\n", current->pid);
2451
2452                         goto end_function;
2453                 }
2454
2455                 dev_dbg(&sep->pdev->dev, "[PID%d] Locking kernel output pages\n",
2456                                                 current->pid);
2457                 error = sep_lock_kernel_pages(sep, app_virt_out_addr,
2458                                 data_size, &lli_out_array, SEP_DRIVER_OUT_FLAG,
2459                                 dma_ctx);
2460
2461                 if (error) {
2462                         dev_warn(&sep->pdev->dev,
2463                                 "[PID%d] sep_lock_kernel_pages for output "
2464                                 "virtual buffer failed\n", current->pid);
2465
2466                         goto end_function_free_lli_in;
2467                 }
2468
2469         }
2470
2471         else {
2472                 dev_dbg(&sep->pdev->dev, "[PID%d] Locking user input pages\n",
2473                                                 current->pid);
2474                 error = sep_lock_user_pages(sep, app_virt_in_addr,
2475                                 data_size, &lli_in_array, SEP_DRIVER_IN_FLAG,
2476                                 dma_ctx);
2477                 if (error) {
2478                         dev_warn(&sep->pdev->dev,
2479                                 "[PID%d] sep_lock_user_pages for input "
2480                                 "virtual buffer failed\n", current->pid);
2481
2482                         goto end_function;
2483                 }
2484
2485                 if (dma_ctx->secure_dma) {
2486                         /* secure_dma requires use of non accessible memory */
2487                         dev_dbg(&sep->pdev->dev, "[PID%d] in secure_dma\n",
2488                                 current->pid);
2489                         error = sep_lli_table_secure_dma(sep,
2490                                 app_virt_out_addr, data_size, &lli_out_array,
2491                                 SEP_DRIVER_OUT_FLAG, dma_ctx);
2492                         if (error) {
2493                                 dev_warn(&sep->pdev->dev,
2494                                         "[PID%d] secure dma table setup "
2495                                         " for output virtual buffer failed\n",
2496                                         current->pid);
2497
2498                                 goto end_function_free_lli_in;
2499                         }
2500                 } else {
2501                         /* For normal, non-secure dma */
2502                         dev_dbg(&sep->pdev->dev, "[PID%d] not in secure_dma\n",
2503                                 current->pid);
2504
2505                         dev_dbg(&sep->pdev->dev,
2506                                 "[PID%d] Locking user output pages\n",
2507                                 current->pid);
2508
2509                         error = sep_lock_user_pages(sep, app_virt_out_addr,
2510                                 data_size, &lli_out_array, SEP_DRIVER_OUT_FLAG,
2511                                 dma_ctx);
2512
2513                         if (error) {
2514                                 dev_warn(&sep->pdev->dev,
2515                                         "[PID%d] sep_lock_user_pages"
2516                                         " for output virtual buffer failed\n",
2517                                         current->pid);
2518
2519                                 goto end_function_free_lli_in;
2520                         }
2521                 }
2522         }
2523
2524         dev_dbg(&sep->pdev->dev,
2525                 "[PID%d] After lock; prep input output dma table sep_in_num_pages is (hex) %x\n",
2526                 current->pid,
2527                 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_num_pages);
2528
2529         dev_dbg(&sep->pdev->dev, "[PID%d] sep_out_num_pages is (hex) %x\n",
2530                 current->pid,
2531                 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_num_pages);
2532
2533         dev_dbg(&sep->pdev->dev,
2534                 "[PID%d] SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP is (hex) %x\n",
2535                 current->pid, SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP);
2536
2537         /* Call the function that creates table from the lli arrays */
2538         dev_dbg(&sep->pdev->dev, "[PID%d] calling create table from lli\n",
2539                                         current->pid);
2540         error = sep_construct_dma_tables_from_lli(
2541                         sep, lli_in_array,
2542                         dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].
2543                                                                 in_num_pages,
2544                         lli_out_array,
2545                         dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].
2546                                                                 out_num_pages,
2547                         block_size, lli_table_in_ptr, lli_table_out_ptr,
2548                         in_num_entries_ptr, out_num_entries_ptr,
2549                         table_data_size_ptr, dmatables_region, dma_ctx);
2550
2551         if (error) {
2552                 dev_warn(&sep->pdev->dev,
2553                         "[PID%d] sep_construct_dma_tables_from_lli failed\n",
2554                         current->pid);
2555                 goto end_function_with_error;
2556         }
2557
2558         kfree(lli_out_array);
2559         kfree(lli_in_array);
2560
2561 update_dcb_counter:
2562         /* Update DCB counter */
2563         dma_ctx->nr_dcb_creat++;
2564
2565         goto end_function;
2566
2567 end_function_with_error:
2568         kfree(dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_map_array);
2569         dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_map_array = NULL;
2570         kfree(dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_page_array);
2571         dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_page_array = NULL;
2572         kfree(lli_out_array);
2573
2574
2575 end_function_free_lli_in:
2576         kfree(dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_map_array);
2577         dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_map_array = NULL;
2578         kfree(dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_page_array);
2579         dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_page_array = NULL;
2580         kfree(lli_in_array);
2581
2582 end_function:
2583
2584         return error;
2585
2586 }
2587
2588 /**
2589  * sep_prepare_input_output_dma_table_in_dcb - prepare control blocks
2590  * @app_in_address: unsigned long; for data buffer in (user space)
2591  * @app_out_address: unsigned long; for data buffer out (user space)
2592  * @data_in_size: u32; for size of data
2593  * @block_size: u32; for block size
2594  * @tail_block_size: u32; for size of tail block
2595  * @isapplet: bool; to indicate external app
2596  * @is_kva: bool; kernel buffer; only used for kernel crypto module
2597  * @secure_dma; indicates whether this is secure_dma using IMR
2598  *
2599  * This function prepares the linked DMA tables and puts the
2600  * address for the linked list of tables inta a DCB (data control
2601  * block) the address of which is known by the SEP hardware
2602  * Note that all bus addresses that are passed to the SEP
2603  * are in 32 bit format; the SEP is a 32 bit device
2604  */
2605 int sep_prepare_input_output_dma_table_in_dcb(struct sep_device *sep,
2606         unsigned long  app_in_address,
2607         unsigned long  app_out_address,
2608         u32  data_in_size,
2609         u32  block_size,
2610         u32  tail_block_size,
2611         bool isapplet,
2612         bool    is_kva,
2613         bool    secure_dma,
2614         struct sep_dcblock *dcb_region,
2615         void **dmatables_region,
2616         struct sep_dma_context **dma_ctx,
2617         struct scatterlist *src_sg,
2618         struct scatterlist *dst_sg)
2619 {
2620         int error = 0;
2621         /* Size of tail */
2622         u32 tail_size = 0;
2623         /* Address of the created DCB table */
2624         struct sep_dcblock *dcb_table_ptr = NULL;
2625         /* The physical address of the first input DMA table */
2626         dma_addr_t in_first_mlli_address = 0;
2627         /* Number of entries in the first input DMA table */
2628         u32  in_first_num_entries = 0;
2629         /* The physical address of the first output DMA table */
2630         dma_addr_t  out_first_mlli_address = 0;
2631         /* Number of entries in the first output DMA table */
2632         u32  out_first_num_entries = 0;
2633         /* Data in the first input/output table */
2634         u32  first_data_size = 0;
2635
2636         dev_dbg(&sep->pdev->dev, "[PID%d] app_in_address %lx\n",
2637                 current->pid, app_in_address);
2638
2639         dev_dbg(&sep->pdev->dev, "[PID%d] app_out_address %lx\n",
2640                 current->pid, app_out_address);
2641
2642         dev_dbg(&sep->pdev->dev, "[PID%d] data_in_size %x\n",
2643                 current->pid, data_in_size);
2644
2645         dev_dbg(&sep->pdev->dev, "[PID%d] block_size %x\n",
2646                 current->pid, block_size);
2647
2648         dev_dbg(&sep->pdev->dev, "[PID%d] tail_block_size %x\n",
2649                 current->pid, tail_block_size);
2650
2651         dev_dbg(&sep->pdev->dev, "[PID%d] isapplet %x\n",
2652                 current->pid, isapplet);
2653
2654         dev_dbg(&sep->pdev->dev, "[PID%d] is_kva %x\n",
2655                 current->pid, is_kva);
2656
2657         dev_dbg(&sep->pdev->dev, "[PID%d] src_sg %p\n",
2658                 current->pid, src_sg);
2659
2660         dev_dbg(&sep->pdev->dev, "[PID%d] dst_sg %p\n",
2661                 current->pid, dst_sg);
2662
2663         if (!dma_ctx) {
2664                 dev_warn(&sep->pdev->dev, "[PID%d] no DMA context pointer\n",
2665                                                 current->pid);
2666                 error = -EINVAL;
2667                 goto end_function;
2668         }
2669
2670         if (*dma_ctx) {
2671                 /* In case there are multiple DCBs for this transaction */
2672                 dev_dbg(&sep->pdev->dev, "[PID%d] DMA context already set\n",
2673                                                 current->pid);
2674         } else {
2675                 *dma_ctx = kzalloc(sizeof(**dma_ctx), GFP_KERNEL);
2676                 if (!(*dma_ctx)) {
2677                         dev_dbg(&sep->pdev->dev,
2678                                 "[PID%d] Not enough memory for DMA context\n",
2679                                 current->pid);
2680                   error = -ENOMEM;
2681                   goto end_function;
2682                 }
2683                 dev_dbg(&sep->pdev->dev,
2684                         "[PID%d] Created DMA context addr at 0x%p\n",
2685                         current->pid, *dma_ctx);
2686         }
2687
2688         (*dma_ctx)->secure_dma = secure_dma;
2689
2690         /* these are for kernel crypto only */
2691         (*dma_ctx)->src_sg = src_sg;
2692         (*dma_ctx)->dst_sg = dst_sg;
2693
2694         if ((*dma_ctx)->nr_dcb_creat == SEP_MAX_NUM_SYNC_DMA_OPS) {
2695                 /* No more DCBs to allocate */
2696                 dev_dbg(&sep->pdev->dev, "[PID%d] no more DCBs available\n",
2697                                                 current->pid);
2698                 error = -ENOSPC;
2699                 goto end_function_error;
2700         }
2701
2702         /* Allocate new DCB */
2703         if (dcb_region) {
2704                 dcb_table_ptr = dcb_region;
2705         } else {
2706                 dcb_table_ptr = (struct sep_dcblock *)(sep->shared_addr +
2707                         SEP_DRIVER_SYSTEM_DCB_MEMORY_OFFSET_IN_BYTES +
2708                         ((*dma_ctx)->nr_dcb_creat *
2709                                                 sizeof(struct sep_dcblock)));
2710         }
2711
2712         /* Set the default values in the DCB */
2713         dcb_table_ptr->input_mlli_address = 0;
2714         dcb_table_ptr->input_mlli_num_entries = 0;
2715         dcb_table_ptr->input_mlli_data_size = 0;
2716         dcb_table_ptr->output_mlli_address = 0;
2717         dcb_table_ptr->output_mlli_num_entries = 0;
2718         dcb_table_ptr->output_mlli_data_size = 0;
2719         dcb_table_ptr->tail_data_size = 0;
2720         dcb_table_ptr->out_vr_tail_pt = 0;
2721
2722         if (isapplet) {
2723
2724                 /* Check if there is enough data for DMA operation */
2725                 if (data_in_size < SEP_DRIVER_MIN_DATA_SIZE_PER_TABLE) {
2726                         if (is_kva) {
2727                                 error = -ENODEV;
2728                                 goto end_function_error;
2729                         } else {
2730                                 if (copy_from_user(dcb_table_ptr->tail_data,
2731                                         (void __user *)app_in_address,
2732                                         data_in_size)) {
2733                                         error = -EFAULT;
2734                                         goto end_function_error;
2735                                 }
2736                         }
2737
2738                         dcb_table_ptr->tail_data_size = data_in_size;
2739
2740                         /* Set the output user-space address for mem2mem op */
2741                         if (app_out_address)
2742                                 dcb_table_ptr->out_vr_tail_pt =
2743                                 (aligned_u64)app_out_address;
2744
2745                         /*
2746                          * Update both data length parameters in order to avoid
2747                          * second data copy and allow building of empty mlli
2748                          * tables
2749                          */
2750                         tail_size = 0x0;
2751                         data_in_size = 0x0;
2752
2753                 } else {
2754                         if (!app_out_address) {
2755                                 tail_size = data_in_size % block_size;
2756                                 if (!tail_size) {
2757                                         if (tail_block_size == block_size)
2758                                                 tail_size = block_size;
2759                                 }
2760                         } else {
2761                                 tail_size = 0;
2762                         }
2763                 }
2764                 if (tail_size) {
2765                         if (tail_size > sizeof(dcb_table_ptr->tail_data))
2766                                 return -EINVAL;
2767                         if (is_kva) {
2768                                 error = -ENODEV;
2769                                 goto end_function_error;
2770                         } else {
2771                                 /* We have tail data - copy it to DCB */
2772                                 if (copy_from_user(dcb_table_ptr->tail_data,
2773                                         (void __user *)(app_in_address +
2774                                         data_in_size - tail_size), tail_size)) {
2775                                         error = -EFAULT;
2776                                         goto end_function_error;
2777                                 }
2778                         }
2779                         if (app_out_address)
2780                                 /*
2781                                  * Calculate the output address
2782                                  * according to tail data size
2783                                  */
2784                                 dcb_table_ptr->out_vr_tail_pt =
2785                                         (aligned_u64)app_out_address +
2786                                         data_in_size - tail_size;
2787
2788                         /* Save the real tail data size */
2789                         dcb_table_ptr->tail_data_size = tail_size;
2790                         /*
2791                          * Update the data size without the tail
2792                          * data size AKA data for the dma
2793                          */
2794                         data_in_size = (data_in_size - tail_size);
2795                 }
2796         }
2797         /* Check if we need to build only input table or input/output */
2798         if (app_out_address) {
2799                 /* Prepare input/output tables */
2800                 error = sep_prepare_input_output_dma_table(sep,
2801                                 app_in_address,
2802                                 app_out_address,
2803                                 data_in_size,
2804                                 block_size,
2805                                 &in_first_mlli_address,
2806                                 &out_first_mlli_address,
2807                                 &in_first_num_entries,
2808                                 &out_first_num_entries,
2809                                 &first_data_size,
2810                                 is_kva,
2811                                 dmatables_region,
2812                                 *dma_ctx);
2813         } else {
2814                 /* Prepare input tables */
2815                 error = sep_prepare_input_dma_table(sep,
2816                                 app_in_address,
2817                                 data_in_size,
2818                                 block_size,
2819                                 &in_first_mlli_address,
2820                                 &in_first_num_entries,
2821                                 &first_data_size,
2822                                 is_kva,
2823                                 dmatables_region,
2824                                 *dma_ctx);
2825         }
2826
2827         if (error) {
2828                 dev_warn(&sep->pdev->dev,
2829                         "prepare DMA table call failed "
2830                         "from prepare DCB call\n");
2831                 goto end_function_error;
2832         }
2833
2834         /* Set the DCB values */
2835         dcb_table_ptr->input_mlli_address = in_first_mlli_address;
2836         dcb_table_ptr->input_mlli_num_entries = in_first_num_entries;
2837         dcb_table_ptr->input_mlli_data_size = first_data_size;
2838         dcb_table_ptr->output_mlli_address = out_first_mlli_address;
2839         dcb_table_ptr->output_mlli_num_entries = out_first_num_entries;
2840         dcb_table_ptr->output_mlli_data_size = first_data_size;
2841
2842         goto end_function;
2843
2844 end_function_error:
2845         kfree(*dma_ctx);
2846         *dma_ctx = NULL;
2847
2848 end_function:
2849         return error;
2850
2851 }
2852
2853
2854 /**
2855  * sep_free_dma_tables_and_dcb - free DMA tables and DCBs
2856  * @sep: pointer to struct sep_device
2857  * @isapplet: indicates external application (used for kernel access)
2858  * @is_kva: indicates kernel addresses (only used for kernel crypto)
2859  *
2860  * This function frees the DMA tables and DCB
2861  */
2862 static int sep_free_dma_tables_and_dcb(struct sep_device *sep, bool isapplet,
2863         bool is_kva, struct sep_dma_context **dma_ctx)
2864 {
2865         struct sep_dcblock *dcb_table_ptr;
2866         unsigned long pt_hold;
2867         void *tail_pt;
2868
2869         int i = 0;
2870         int error = 0;
2871         int error_temp = 0;
2872
2873         dev_dbg(&sep->pdev->dev, "[PID%d] sep_free_dma_tables_and_dcb\n",
2874                                         current->pid);
2875         if (!dma_ctx || !*dma_ctx) /* nothing to be done here*/
2876                 return 0;
2877
2878         if (!(*dma_ctx)->secure_dma && isapplet) {
2879                 dev_dbg(&sep->pdev->dev, "[PID%d] handling applet\n",
2880                         current->pid);
2881
2882                 /* Tail stuff is only for non secure_dma */
2883                 /* Set pointer to first DCB table */
2884                 dcb_table_ptr = (struct sep_dcblock *)
2885                         (sep->shared_addr +
2886                         SEP_DRIVER_SYSTEM_DCB_MEMORY_OFFSET_IN_BYTES);
2887
2888                 /**
2889                  * Go over each DCB and see if
2890                  * tail pointer must be updated
2891                  */
2892                 for (i = 0; i < (*dma_ctx)->nr_dcb_creat; i++, dcb_table_ptr++) {
2893                         if (dcb_table_ptr->out_vr_tail_pt) {
2894                                 pt_hold = (unsigned long)dcb_table_ptr->
2895                                         out_vr_tail_pt;
2896                                 tail_pt = (void *)pt_hold;
2897                                 if (is_kva) {
2898                                         error = -ENODEV;
2899                                         break;
2900                                 } else {
2901                                         error_temp = copy_to_user(
2902                                                 (void __user *)tail_pt,
2903                                                 dcb_table_ptr->tail_data,
2904                                                 dcb_table_ptr->tail_data_size);
2905                                 }
2906                                 if (error_temp) {
2907                                         /* Release the DMA resource */
2908                                         error = -EFAULT;
2909                                         break;
2910                                 }
2911                         }
2912                 }
2913         }
2914
2915         /* Free the output pages, if any */
2916         sep_free_dma_table_data_handler(sep, dma_ctx);
2917
2918         dev_dbg(&sep->pdev->dev, "[PID%d] sep_free_dma_tables_and_dcb end\n",
2919                                         current->pid);
2920
2921         return error;
2922 }
2923
2924 /**
2925  * sep_prepare_dcb_handler - prepare a control block
2926  * @sep: pointer to struct sep_device
2927  * @arg: pointer to user parameters
2928  * @secure_dma: indicate whether we are using secure_dma on IMR
2929  *
2930  * This function will retrieve the RAR buffer physical addresses, type
2931  * & size corresponding to the RAR handles provided in the buffers vector.
2932  */
2933 static int sep_prepare_dcb_handler(struct sep_device *sep, unsigned long arg,
2934                                    bool secure_dma,
2935                                    struct sep_dma_context **dma_ctx)
2936 {
2937         int error;
2938         /* Command arguments */
2939         static struct build_dcb_struct command_args;
2940
2941         /* Get the command arguments */
2942         if (copy_from_user(&command_args, (void __user *)arg,
2943                                         sizeof(struct build_dcb_struct))) {
2944                 error = -EFAULT;
2945                 goto end_function;
2946         }
2947
2948         dev_dbg(&sep->pdev->dev,
2949                 "[PID%d] prep dcb handler app_in_address is %08llx\n",
2950                         current->pid, command_args.app_in_address);
2951         dev_dbg(&sep->pdev->dev,
2952                 "[PID%d] app_out_address is %08llx\n",
2953                         current->pid, command_args.app_out_address);
2954         dev_dbg(&sep->pdev->dev,
2955                 "[PID%d] data_size is %x\n",
2956                         current->pid, command_args.data_in_size);
2957         dev_dbg(&sep->pdev->dev,
2958                 "[PID%d] block_size is %x\n",
2959                         current->pid, command_args.block_size);
2960         dev_dbg(&sep->pdev->dev,
2961                 "[PID%d] tail block_size is %x\n",
2962                         current->pid, command_args.tail_block_size);
2963         dev_dbg(&sep->pdev->dev,
2964                 "[PID%d] is_applet is %x\n",
2965                         current->pid, command_args.is_applet);
2966
2967         if (!command_args.app_in_address) {
2968                 dev_warn(&sep->pdev->dev,
2969                         "[PID%d] null app_in_address\n", current->pid);
2970                 error = -EINVAL;
2971                 goto end_function;
2972         }
2973
2974         error = sep_prepare_input_output_dma_table_in_dcb(sep,
2975                         (unsigned long)command_args.app_in_address,
2976                         (unsigned long)command_args.app_out_address,
2977                         command_args.data_in_size, command_args.block_size,
2978                         command_args.tail_block_size,
2979                         command_args.is_applet, false,
2980                         secure_dma, NULL, NULL, dma_ctx, NULL, NULL);
2981
2982 end_function:
2983         return error;
2984
2985 }
2986
2987 /**
2988  * sep_free_dcb_handler - free control block resources
2989  * @sep: pointer to struct sep_device
2990  *
2991  * This function frees the DCB resources and updates the needed
2992  * user-space buffers.
2993  */
2994 static int sep_free_dcb_handler(struct sep_device *sep,
2995                                 struct sep_dma_context **dma_ctx)
2996 {
2997         if (!dma_ctx || !(*dma_ctx)) {
2998                 dev_dbg(&sep->pdev->dev,
2999                         "[PID%d] no dma context defined, nothing to free\n",
3000                         current->pid);
3001                 return -EINVAL;
3002         }
3003
3004         dev_dbg(&sep->pdev->dev, "[PID%d] free dcbs num of DCBs %x\n",
3005                 current->pid,
3006                 (*dma_ctx)->nr_dcb_creat);
3007
3008         return sep_free_dma_tables_and_dcb(sep, false, false, dma_ctx);
3009 }
3010
3011 /**
3012  * sep_ioctl - ioctl handler for sep device
3013  * @filp: pointer to struct file
3014  * @cmd: command
3015  * @arg: pointer to argument structure
3016  *
3017  * Implement the ioctl methods available on the SEP device.
3018  */
3019 static long sep_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
3020 {
3021         struct sep_private_data * const private_data = filp->private_data;
3022         struct sep_call_status *call_status = &private_data->call_status;
3023         struct sep_device *sep = private_data->device;
3024         struct sep_dma_context **dma_ctx = &private_data->dma_ctx;
3025         struct sep_queue_info **my_queue_elem = &private_data->my_queue_elem;
3026         int error = 0;
3027
3028         dev_dbg(&sep->pdev->dev, "[PID%d] ioctl cmd 0x%x\n",
3029                 current->pid, cmd);
3030         dev_dbg(&sep->pdev->dev, "[PID%d] dma context addr 0x%p\n",
3031                 current->pid, *dma_ctx);
3032
3033         /* Make sure we own this device */
3034         error = sep_check_transaction_owner(sep);
3035         if (error) {
3036                 dev_dbg(&sep->pdev->dev, "[PID%d] ioctl pid is not owner\n",
3037                         current->pid);
3038                 goto end_function;
3039         }
3040
3041         /* Check that sep_mmap has been called before */
3042         if (0 == test_bit(SEP_LEGACY_MMAP_DONE_OFFSET,
3043                                 &call_status->status)) {
3044                 dev_dbg(&sep->pdev->dev,
3045                         "[PID%d] mmap not called\n", current->pid);
3046                 error = -EPROTO;
3047                 goto end_function;
3048         }
3049
3050         /* Check that the command is for SEP device */
3051         if (_IOC_TYPE(cmd) != SEP_IOC_MAGIC_NUMBER) {
3052                 error = -ENOTTY;
3053                 goto end_function;
3054         }
3055
3056         switch (cmd) {
3057         case SEP_IOCSENDSEPCOMMAND:
3058                 dev_dbg(&sep->pdev->dev,
3059                         "[PID%d] SEP_IOCSENDSEPCOMMAND start\n",
3060                         current->pid);
3061                 if (1 == test_bit(SEP_LEGACY_SENDMSG_DONE_OFFSET,
3062                                   &call_status->status)) {
3063                         dev_warn(&sep->pdev->dev,
3064                                 "[PID%d] send msg already done\n",
3065                                 current->pid);
3066                         error = -EPROTO;
3067                         goto end_function;
3068                 }
3069                 /* Send command to SEP */
3070                 error = sep_send_command_handler(sep);
3071                 if (!error)
3072                         set_bit(SEP_LEGACY_SENDMSG_DONE_OFFSET,
3073                                 &call_status->status);
3074                 dev_dbg(&sep->pdev->dev,
3075                         "[PID%d] SEP_IOCSENDSEPCOMMAND end\n",
3076                         current->pid);
3077                 break;
3078         case SEP_IOCENDTRANSACTION:
3079                 dev_dbg(&sep->pdev->dev,
3080                         "[PID%d] SEP_IOCENDTRANSACTION start\n",
3081                         current->pid);
3082                 error = sep_end_transaction_handler(sep, dma_ctx, call_status,
3083                                                     my_queue_elem);
3084                 dev_dbg(&sep->pdev->dev,
3085                         "[PID%d] SEP_IOCENDTRANSACTION end\n",
3086                         current->pid);
3087                 break;
3088         case SEP_IOCPREPAREDCB:
3089                 dev_dbg(&sep->pdev->dev,
3090                         "[PID%d] SEP_IOCPREPAREDCB start\n",
3091                         current->pid);
3092         case SEP_IOCPREPAREDCB_SECURE_DMA:
3093                 dev_dbg(&sep->pdev->dev,
3094                         "[PID%d] SEP_IOCPREPAREDCB_SECURE_DMA start\n",
3095                         current->pid);
3096                 if (1 == test_bit(SEP_LEGACY_SENDMSG_DONE_OFFSET,
3097                                   &call_status->status)) {
3098                         dev_dbg(&sep->pdev->dev,
3099                                 "[PID%d] dcb prep needed before send msg\n",
3100                                 current->pid);
3101                         error = -EPROTO;
3102                         goto end_function;
3103                 }
3104
3105                 if (!arg) {
3106                         dev_dbg(&sep->pdev->dev,
3107                                 "[PID%d] dcb null arg\n", current->pid);
3108                         error = -EINVAL;
3109                         goto end_function;
3110                 }
3111
3112                 if (cmd == SEP_IOCPREPAREDCB) {
3113                         /* No secure dma */
3114                         dev_dbg(&sep->pdev->dev,
3115                                 "[PID%d] SEP_IOCPREPAREDCB (no secure_dma)\n",
3116                                 current->pid);
3117
3118                         error = sep_prepare_dcb_handler(sep, arg, false,
3119                                 dma_ctx);
3120                 } else {
3121                         /* Secure dma */
3122                         dev_dbg(&sep->pdev->dev,
3123                                 "[PID%d] SEP_IOC_POC (with secure_dma)\n",
3124                                 current->pid);
3125
3126                         error = sep_prepare_dcb_handler(sep, arg, true,
3127                                 dma_ctx);
3128                 }
3129                 dev_dbg(&sep->pdev->dev, "[PID%d] dcb's end\n",
3130                         current->pid);
3131                 break;
3132         case SEP_IOCFREEDCB:
3133                 dev_dbg(&sep->pdev->dev, "[PID%d] SEP_IOCFREEDCB start\n",
3134                         current->pid);
3135         case SEP_IOCFREEDCB_SECURE_DMA:
3136                 dev_dbg(&sep->pdev->dev,
3137                         "[PID%d] SEP_IOCFREEDCB_SECURE_DMA start\n",
3138                         current->pid);
3139                 error = sep_free_dcb_handler(sep, dma_ctx);
3140                 dev_dbg(&sep->pdev->dev, "[PID%d] SEP_IOCFREEDCB end\n",
3141                         current->pid);
3142                 break;
3143         default:
3144                 error = -ENOTTY;
3145                 dev_dbg(&sep->pdev->dev, "[PID%d] default end\n",
3146                         current->pid);
3147                 break;
3148         }
3149
3150 end_function:
3151         dev_dbg(&sep->pdev->dev, "[PID%d] ioctl end\n", current->pid);
3152
3153         return error;
3154 }
3155
3156 /**
3157  * sep_inthandler - interrupt handler for sep device
3158  * @irq: interrupt
3159  * @dev_id: device id
3160  */
3161 static irqreturn_t sep_inthandler(int irq, void *dev_id)
3162 {
3163         unsigned long lock_irq_flag;
3164         u32 reg_val, reg_val2 = 0;
3165         struct sep_device *sep = dev_id;
3166         irqreturn_t int_error = IRQ_HANDLED;
3167
3168         /* Are we in power save? */
3169 #if defined(CONFIG_PM_RUNTIME) && defined(SEP_ENABLE_RUNTIME_PM)
3170         if (sep->pdev->dev.power.runtime_status != RPM_ACTIVE) {
3171                 dev_dbg(&sep->pdev->dev, "interrupt during pwr save\n");
3172                 return IRQ_NONE;
3173         }
3174 #endif
3175
3176         if (test_bit(SEP_WORKING_LOCK_BIT, &sep->in_use_flags) == 0) {
3177                 dev_dbg(&sep->pdev->dev, "interrupt while nobody using sep\n");
3178                 return IRQ_NONE;
3179         }
3180
3181         /* Read the IRR register to check if this is SEP interrupt */
3182         reg_val = sep_read_reg(sep, HW_HOST_IRR_REG_ADDR);
3183
3184         dev_dbg(&sep->pdev->dev, "sep int: IRR REG val: %x\n", reg_val);
3185
3186         if (reg_val & (0x1 << 13)) {
3187
3188                 /* Lock and update the counter of reply messages */
3189                 spin_lock_irqsave(&sep->snd_rply_lck, lock_irq_flag);
3190                 sep->reply_ct++;
3191                 spin_unlock_irqrestore(&sep->snd_rply_lck, lock_irq_flag);
3192
3193                 dev_dbg(&sep->pdev->dev, "sep int: send_ct %lx reply_ct %lx\n",
3194                                         sep->send_ct, sep->reply_ct);
3195
3196                 /* Is this a kernel client request */
3197                 if (sep->in_kernel) {
3198                         tasklet_schedule(&sep->finish_tasklet);
3199                         goto finished_interrupt;
3200                 }
3201
3202                 /* Is this printf or daemon request? */
3203                 reg_val2 = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR2_REG_ADDR);
3204                 dev_dbg(&sep->pdev->dev,
3205                         "SEP Interrupt - GPR2 is %08x\n", reg_val2);
3206
3207                 clear_bit(SEP_WORKING_LOCK_BIT, &sep->in_use_flags);
3208
3209                 if ((reg_val2 >> 30) & 0x1) {
3210                         dev_dbg(&sep->pdev->dev, "int: printf request\n");
3211                 } else if (reg_val2 >> 31) {
3212                         dev_dbg(&sep->pdev->dev, "int: daemon request\n");
3213                 } else {
3214                         dev_dbg(&sep->pdev->dev, "int: SEP reply\n");
3215                         wake_up(&sep->event_interrupt);
3216                 }
3217         } else {
3218                 dev_dbg(&sep->pdev->dev, "int: not SEP interrupt\n");
3219                 int_error = IRQ_NONE;
3220         }
3221
3222 finished_interrupt:
3223
3224         if (int_error == IRQ_HANDLED)
3225                 sep_write_reg(sep, HW_HOST_ICR_REG_ADDR, reg_val);
3226
3227         return int_error;
3228 }
3229
3230 /**
3231  * sep_reconfig_shared_area - reconfigure shared area
3232  * @sep: pointer to struct sep_device
3233  *
3234  * Reconfig the shared area between HOST and SEP - needed in case
3235  * the DX_CC_Init function was called before OS loading.
3236  */
3237 static int sep_reconfig_shared_area(struct sep_device *sep)
3238 {
3239         int ret_val;
3240
3241         /* use to limit waiting for SEP */
3242         unsigned long end_time;
3243
3244         /* Send the new SHARED MESSAGE AREA to the SEP */
3245         dev_dbg(&sep->pdev->dev, "reconfig shared; sending %08llx to sep\n",
3246                                 (unsigned long long)sep->shared_bus);
3247
3248         sep_write_reg(sep, HW_HOST_HOST_SEP_GPR1_REG_ADDR, sep->shared_bus);
3249
3250         /* Poll for SEP response */
3251         ret_val = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR1_REG_ADDR);
3252
3253         end_time = jiffies + (WAIT_TIME * HZ);
3254
3255         while ((time_before(jiffies, end_time)) && (ret_val != 0xffffffff) &&
3256                 (ret_val != sep->shared_bus))
3257                 ret_val = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR1_REG_ADDR);
3258
3259         /* Check the return value (register) */
3260         if (ret_val != sep->shared_bus) {
3261                 dev_warn(&sep->pdev->dev, "could not reconfig shared area\n");
3262                 dev_warn(&sep->pdev->dev, "result was %x\n", ret_val);
3263                 ret_val = -ENOMEM;
3264         } else
3265                 ret_val = 0;
3266
3267         dev_dbg(&sep->pdev->dev, "reconfig shared area end\n");
3268
3269         return ret_val;
3270 }
3271
3272 /**
3273  *      sep_activate_dcb_dmatables_context - Takes DCB & DMA tables
3274  *                                              contexts into use
3275  *      @sep: SEP device
3276  *      @dcb_region: DCB region copy
3277  *      @dmatables_region: MLLI/DMA tables copy
3278  *      @dma_ctx: DMA context for current transaction
3279  */
3280 ssize_t sep_activate_dcb_dmatables_context(struct sep_device *sep,
3281                                         struct sep_dcblock **dcb_region,
3282                                         void **dmatables_region,
3283                                         struct sep_dma_context *dma_ctx)
3284 {
3285         void *dmaregion_free_start = NULL;
3286         void *dmaregion_free_end = NULL;
3287         void *dcbregion_free_start = NULL;
3288         void *dcbregion_free_end = NULL;
3289         ssize_t error = 0;
3290
3291         dev_dbg(&sep->pdev->dev, "[PID%d] activating dcb/dma region\n",
3292                 current->pid);
3293
3294         if (1 > dma_ctx->nr_dcb_creat) {
3295                 dev_warn(&sep->pdev->dev,
3296                          "[PID%d] invalid number of dcbs to activate 0x%08X\n",
3297                          current->pid, dma_ctx->nr_dcb_creat);
3298                 error = -EINVAL;
3299                 goto end_function;
3300         }
3301
3302         dmaregion_free_start = sep->shared_addr
3303                                 + SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES;
3304         dmaregion_free_end = dmaregion_free_start
3305                                 + SYNCHRONIC_DMA_TABLES_AREA_SIZE_BYTES - 1;
3306
3307         if (dmaregion_free_start
3308              + dma_ctx->dmatables_len > dmaregion_free_end) {
3309                 error = -ENOMEM;
3310                 goto end_function;
3311         }
3312         memcpy(dmaregion_free_start,
3313                *dmatables_region,
3314                dma_ctx->dmatables_len);
3315         /* Free MLLI table copy */
3316         kfree(*dmatables_region);
3317         *dmatables_region = NULL;
3318
3319         /* Copy thread's DCB  table copy to DCB table region */
3320         dcbregion_free_start = sep->shared_addr +
3321                                 SEP_DRIVER_SYSTEM_DCB_MEMORY_OFFSET_IN_BYTES;
3322         dcbregion_free_end = dcbregion_free_start +
3323                                 (SEP_MAX_NUM_SYNC_DMA_OPS *
3324                                         sizeof(struct sep_dcblock)) - 1;
3325
3326         if (dcbregion_free_start
3327              + (dma_ctx->nr_dcb_creat * sizeof(struct sep_dcblock))
3328              > dcbregion_free_end) {
3329                 error = -ENOMEM;
3330                 goto end_function;
3331         }
3332
3333         memcpy(dcbregion_free_start,
3334                *dcb_region,
3335                dma_ctx->nr_dcb_creat * sizeof(struct sep_dcblock));
3336
3337         /* Print the tables */
3338         dev_dbg(&sep->pdev->dev, "activate: input table\n");
3339         sep_debug_print_lli_tables(sep,
3340                 (struct sep_lli_entry *)sep_shared_area_bus_to_virt(sep,
3341                 (*dcb_region)->input_mlli_address),
3342                 (*dcb_region)->input_mlli_num_entries,
3343                 (*dcb_region)->input_mlli_data_size);
3344
3345         dev_dbg(&sep->pdev->dev, "activate: output table\n");
3346         sep_debug_print_lli_tables(sep,
3347                 (struct sep_lli_entry *)sep_shared_area_bus_to_virt(sep,
3348                 (*dcb_region)->output_mlli_address),
3349                 (*dcb_region)->output_mlli_num_entries,
3350                 (*dcb_region)->output_mlli_data_size);
3351
3352         dev_dbg(&sep->pdev->dev,
3353                  "[PID%d] printing activated tables\n", current->pid);
3354
3355 end_function:
3356         kfree(*dmatables_region);
3357         *dmatables_region = NULL;
3358
3359         kfree(*dcb_region);
3360         *dcb_region = NULL;
3361
3362         return error;
3363 }
3364
3365 /**
3366  *      sep_create_dcb_dmatables_context - Creates DCB & MLLI/DMA table context
3367  *      @sep: SEP device
3368  *      @dcb_region: DCB region buf to create for current transaction
3369  *      @dmatables_region: MLLI/DMA tables buf to create for current transaction
3370  *      @dma_ctx: DMA context buf to create for current transaction
3371  *      @user_dcb_args: User arguments for DCB/MLLI creation
3372  *      @num_dcbs: Number of DCBs to create
3373  *      @secure_dma: Indicate use of IMR restricted memory secure dma
3374  */
3375 static ssize_t sep_create_dcb_dmatables_context(struct sep_device *sep,
3376                         struct sep_dcblock **dcb_region,
3377                         void **dmatables_region,
3378                         struct sep_dma_context **dma_ctx,
3379                         const struct build_dcb_struct __user *user_dcb_args,
3380                         const u32 num_dcbs, bool secure_dma)
3381 {
3382         int error = 0;
3383         int i = 0;
3384         struct build_dcb_struct *dcb_args = NULL;
3385
3386         dev_dbg(&sep->pdev->dev, "[PID%d] creating dcb/dma region\n",
3387                 current->pid);
3388
3389         if (!dcb_region || !dma_ctx || !dmatables_region || !user_dcb_args) {
3390                 error = -EINVAL;
3391                 goto end_function;
3392         }
3393
3394         if (SEP_MAX_NUM_SYNC_DMA_OPS < num_dcbs) {
3395                 dev_warn(&sep->pdev->dev,
3396                          "[PID%d] invalid number of dcbs 0x%08X\n",
3397                          current->pid, num_dcbs);
3398                 error = -EINVAL;
3399                 goto end_function;
3400         }
3401
3402         dcb_args = kcalloc(num_dcbs, sizeof(struct build_dcb_struct),
3403                            GFP_KERNEL);
3404         if (!dcb_args) {
3405                 error = -ENOMEM;
3406                 goto end_function;
3407         }
3408
3409         if (copy_from_user(dcb_args,
3410                         user_dcb_args,
3411                         num_dcbs * sizeof(struct build_dcb_struct))) {
3412                 error = -EFAULT;
3413                 goto end_function;
3414         }
3415
3416         /* Allocate thread-specific memory for DCB */
3417         *dcb_region = kzalloc(num_dcbs * sizeof(struct sep_dcblock),
3418                               GFP_KERNEL);
3419         if (!(*dcb_region)) {
3420                 error = -ENOMEM;
3421                 goto end_function;
3422         }
3423
3424         /* Prepare DCB and MLLI table into the allocated regions */
3425         for (i = 0; i < num_dcbs; i++) {
3426                 error = sep_prepare_input_output_dma_table_in_dcb(sep,
3427                                 (unsigned long)dcb_args[i].app_in_address,
3428                                 (unsigned long)dcb_args[i].app_out_address,
3429                                 dcb_args[i].data_in_size,
3430                                 dcb_args[i].block_size,
3431                                 dcb_args[i].tail_block_size,
3432                                 dcb_args[i].is_applet,
3433                                 false, secure_dma,
3434                                 *dcb_region, dmatables_region,
3435                                 dma_ctx,
3436                                 NULL,
3437                                 NULL);
3438                 if (error) {
3439                         dev_warn(&sep->pdev->dev,
3440                                  "[PID%d] dma table creation failed\n",
3441                                  current->pid);
3442                         goto end_function;
3443                 }
3444
3445                 if (dcb_args[i].app_in_address != 0)
3446                         (*dma_ctx)->input_data_len += dcb_args[i].data_in_size;
3447         }
3448
3449 end_function:
3450         kfree(dcb_args);
3451         return error;
3452
3453 }
3454
3455 /**
3456  *      sep_create_dcb_dmatables_context_kernel - Creates DCB & MLLI/DMA table context
3457  *      for kernel crypto
3458  *      @sep: SEP device
3459  *      @dcb_region: DCB region buf to create for current transaction
3460  *      @dmatables_region: MLLI/DMA tables buf to create for current transaction
3461  *      @dma_ctx: DMA context buf to create for current transaction
3462  *      @user_dcb_args: User arguments for DCB/MLLI creation
3463  *      @num_dcbs: Number of DCBs to create
3464  *      This does that same thing as sep_create_dcb_dmatables_context
3465  *      except that it is used only for the kernel crypto operation. It is
3466  *      separate because there is no user data involved; the dcb data structure
3467  *      is specific for kernel crypto (build_dcb_struct_kernel)
3468  */
3469 int sep_create_dcb_dmatables_context_kernel(struct sep_device *sep,
3470                         struct sep_dcblock **dcb_region,
3471                         void **dmatables_region,
3472                         struct sep_dma_context **dma_ctx,
3473                         const struct build_dcb_struct_kernel *dcb_data,
3474                         const u32 num_dcbs)
3475 {
3476         int error = 0;
3477         int i = 0;
3478
3479         dev_dbg(&sep->pdev->dev, "[PID%d] creating dcb/dma region\n",
3480                 current->pid);
3481
3482         if (!dcb_region || !dma_ctx || !dmatables_region || !dcb_data) {
3483                 error = -EINVAL;
3484                 goto end_function;
3485         }
3486
3487         if (SEP_MAX_NUM_SYNC_DMA_OPS < num_dcbs) {
3488                 dev_warn(&sep->pdev->dev,
3489                          "[PID%d] invalid number of dcbs 0x%08X\n",
3490                          current->pid, num_dcbs);
3491                 error = -EINVAL;
3492                 goto end_function;
3493         }
3494
3495         dev_dbg(&sep->pdev->dev, "[PID%d] num_dcbs is %d\n",
3496                 current->pid, num_dcbs);
3497
3498         /* Allocate thread-specific memory for DCB */
3499         *dcb_region = kzalloc(num_dcbs * sizeof(struct sep_dcblock),
3500                               GFP_KERNEL);
3501         if (!(*dcb_region)) {
3502                 error = -ENOMEM;
3503                 goto end_function;
3504         }
3505
3506         /* Prepare DCB and MLLI table into the allocated regions */
3507         for (i = 0; i < num_dcbs; i++) {
3508                 error = sep_prepare_input_output_dma_table_in_dcb(sep,
3509                                 (unsigned long)dcb_data->app_in_address,
3510                                 (unsigned long)dcb_data->app_out_address,
3511                                 dcb_data->data_in_size,
3512                                 dcb_data->block_size,
3513                                 dcb_data->tail_block_size,
3514                                 dcb_data->is_applet,
3515                                 true,
3516                                 false,
3517                                 *dcb_region, dmatables_region,
3518                                 dma_ctx,
3519                                 dcb_data->src_sg,
3520                                 dcb_data->dst_sg);
3521                 if (error) {
3522                         dev_warn(&sep->pdev->dev,
3523                                  "[PID%d] dma table creation failed\n",
3524                                  current->pid);
3525                         goto end_function;
3526                 }
3527         }
3528
3529 end_function:
3530         return error;
3531
3532 }
3533
3534 /**
3535  *      sep_activate_msgarea_context - Takes the message area context into use
3536  *      @sep: SEP device
3537  *      @msg_region: Message area context buf
3538  *      @msg_len: Message area context buffer size
3539  */
3540 static ssize_t sep_activate_msgarea_context(struct sep_device *sep,
3541                                             void **msg_region,
3542                                             const size_t msg_len)
3543 {
3544         dev_dbg(&sep->pdev->dev, "[PID%d] activating msg region\n",
3545                 current->pid);
3546
3547         if (!msg_region || !(*msg_region) ||
3548             SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES < msg_len) {
3549                 dev_warn(&sep->pdev->dev,
3550                          "[PID%d] invalid act msgarea len 0x%08zX\n",
3551                          current->pid, msg_len);
3552                 return -EINVAL;
3553         }
3554
3555         memcpy(sep->shared_addr, *msg_region, msg_len);
3556
3557         return 0;
3558 }
3559
3560 /**
3561  *      sep_create_msgarea_context - Creates message area context
3562  *      @sep: SEP device
3563  *      @msg_region: Msg area region buf to create for current transaction
3564  *      @msg_user: Content for msg area region from user
3565  *      @msg_len: Message area size
3566  */
3567 static ssize_t sep_create_msgarea_context(struct sep_device *sep,
3568                                           void **msg_region,
3569                                           const void __user *msg_user,
3570                                           const size_t msg_len)
3571 {
3572         int error = 0;
3573
3574         dev_dbg(&sep->pdev->dev, "[PID%d] creating msg region\n",
3575                 current->pid);
3576
3577         if (!msg_region ||
3578             !msg_user ||
3579             SEP_DRIVER_MAX_MESSAGE_SIZE_IN_BYTES < msg_len ||
3580             SEP_DRIVER_MIN_MESSAGE_SIZE_IN_BYTES > msg_len) {
3581                 dev_warn(&sep->pdev->dev,
3582                          "[PID%d] invalid creat msgarea len 0x%08zX\n",
3583                          current->pid, msg_len);
3584                 error = -EINVAL;
3585                 goto end_function;
3586         }
3587
3588         /* Allocate thread-specific memory for message buffer */
3589         *msg_region = kzalloc(msg_len, GFP_KERNEL);
3590         if (!(*msg_region)) {
3591                 error = -ENOMEM;
3592                 goto end_function;
3593         }
3594
3595         /* Copy input data to write() to allocated message buffer */
3596         if (copy_from_user(*msg_region, msg_user, msg_len)) {
3597                 error = -EFAULT;
3598                 goto end_function;
3599         }
3600
3601 end_function:
3602         if (error && msg_region) {
3603                 kfree(*msg_region);
3604                 *msg_region = NULL;
3605         }
3606
3607         return error;
3608 }
3609
3610
3611 /**
3612  *      sep_read - Returns results of an operation for fastcall interface
3613  *      @filp: File pointer
3614  *      @buf_user: User buffer for storing results
3615  *      @count_user: User buffer size
3616  *      @offset: File offset, not supported
3617  *
3618  *      The implementation does not support reading in chunks, all data must be
3619  *      consumed during a single read system call.
3620  */
3621 static ssize_t sep_read(struct file *filp,
3622                         char __user *buf_user, size_t count_user,
3623                         loff_t *offset)
3624 {
3625         struct sep_private_data * const private_data = filp->private_data;
3626         struct sep_call_status *call_status = &private_data->call_status;
3627         struct sep_device *sep = private_data->device;
3628         struct sep_dma_context **dma_ctx = &private_data->dma_ctx;
3629         struct sep_queue_info **my_queue_elem = &private_data->my_queue_elem;
3630         ssize_t error = 0, error_tmp = 0;
3631
3632         /* Am I the process that owns the transaction? */
3633         error = sep_check_transaction_owner(sep);
3634         if (error) {
3635                 dev_dbg(&sep->pdev->dev, "[PID%d] read pid is not owner\n",
3636                         current->pid);
3637                 goto end_function;
3638         }
3639
3640         /* Checks that user has called necessary apis */
3641         if (0 == test_bit(SEP_FASTCALL_WRITE_DONE_OFFSET,
3642                         &call_status->status)) {
3643                 dev_warn(&sep->pdev->dev,
3644                          "[PID%d] fastcall write not called\n",
3645                          current->pid);
3646                 error = -EPROTO;
3647                 goto end_function_error;
3648         }
3649
3650         if (!buf_user) {
3651                 dev_warn(&sep->pdev->dev,
3652                          "[PID%d] null user buffer\n",
3653                          current->pid);
3654                 error = -EINVAL;
3655                 goto end_function_error;
3656         }
3657
3658
3659         /* Wait for SEP to finish */
3660         wait_event(sep->event_interrupt,
3661                    test_bit(SEP_WORKING_LOCK_BIT,
3662                             &sep->in_use_flags) == 0);
3663
3664         sep_dump_message(sep);
3665
3666         dev_dbg(&sep->pdev->dev, "[PID%d] count_user = 0x%08zX\n",
3667                 current->pid, count_user);
3668
3669         /* In case user has allocated bigger buffer */
3670         if (count_user > SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES)
3671                 count_user = SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES;
3672
3673         if (copy_to_user(buf_user, sep->shared_addr, count_user)) {
3674                 error = -EFAULT;
3675                 goto end_function_error;
3676         }
3677
3678         dev_dbg(&sep->pdev->dev, "[PID%d] read succeeded\n", current->pid);
3679         error = count_user;
3680
3681 end_function_error:
3682         /* Copy possible tail data to user and free DCB and MLLIs */
3683         error_tmp = sep_free_dcb_handler(sep, dma_ctx);
3684         if (error_tmp)
3685                 dev_warn(&sep->pdev->dev, "[PID%d] dcb free failed\n",
3686                         current->pid);
3687
3688         /* End the transaction, wakeup pending ones */
3689         error_tmp = sep_end_transaction_handler(sep, dma_ctx, call_status,
3690                 my_queue_elem);
3691         if (error_tmp)
3692                 dev_warn(&sep->pdev->dev,
3693                          "[PID%d] ending transaction failed\n",
3694                          current->pid);
3695
3696 end_function:
3697         return error;
3698 }
3699
3700 /**
3701  *      sep_fastcall_args_get - Gets fastcall params from user
3702  *      sep: SEP device
3703  *      @args: Parameters buffer
3704  *      @buf_user: User buffer for operation parameters
3705  *      @count_user: User buffer size
3706  */
3707 static inline ssize_t sep_fastcall_args_get(struct sep_device *sep,
3708                                             struct sep_fastcall_hdr *args,
3709                                             const char __user *buf_user,
3710                                             const size_t count_user)
3711 {
3712         ssize_t error = 0;
3713         size_t actual_count = 0;
3714
3715         if (!buf_user) {
3716                 dev_warn(&sep->pdev->dev,
3717                          "[PID%d] null user buffer\n",
3718                          current->pid);
3719                 error = -EINVAL;
3720                 goto end_function;
3721         }
3722
3723         if (count_user < sizeof(struct sep_fastcall_hdr)) {
3724                 dev_warn(&sep->pdev->dev,
3725                          "[PID%d] too small message size 0x%08zX\n",
3726                          current->pid, count_user);
3727                 error = -EINVAL;
3728                 goto end_function;
3729         }
3730
3731
3732         if (copy_from_user(args, buf_user, sizeof(struct sep_fastcall_hdr))) {
3733                 error = -EFAULT;
3734                 goto end_function;
3735         }
3736
3737         if (SEP_FC_MAGIC != args->magic) {
3738                 dev_warn(&sep->pdev->dev,
3739                          "[PID%d] invalid fastcall magic 0x%08X\n",
3740                          current->pid, args->magic);
3741                 error = -EINVAL;
3742                 goto end_function;
3743         }
3744
3745         dev_dbg(&sep->pdev->dev, "[PID%d] fastcall hdr num of DCBs 0x%08X\n",
3746                 current->pid, args->num_dcbs);
3747         dev_dbg(&sep->pdev->dev, "[PID%d] fastcall hdr msg len 0x%08X\n",
3748                 current->pid, args->msg_len);
3749
3750         if (SEP_DRIVER_MAX_MESSAGE_SIZE_IN_BYTES < args->msg_len ||
3751             SEP_DRIVER_MIN_MESSAGE_SIZE_IN_BYTES > args->msg_len) {
3752                 dev_warn(&sep->pdev->dev,
3753                          "[PID%d] invalid message length\n",
3754                          current->pid);
3755                 error = -EINVAL;
3756                 goto end_function;
3757         }
3758
3759         actual_count = sizeof(struct sep_fastcall_hdr)
3760                         + args->msg_len
3761                         + (args->num_dcbs * sizeof(struct build_dcb_struct));
3762
3763         if (actual_count != count_user) {
3764                 dev_warn(&sep->pdev->dev,
3765                          "[PID%d] inconsistent message "
3766                          "sizes 0x%08zX vs 0x%08zX\n",
3767                          current->pid, actual_count, count_user);
3768                 error = -EMSGSIZE;
3769                 goto end_function;
3770         }
3771
3772 end_function:
3773         return error;
3774 }
3775
3776 /**
3777  *      sep_write - Starts an operation for fastcall interface
3778  *      @filp: File pointer
3779  *      @buf_user: User buffer for operation parameters
3780  *      @count_user: User buffer size
3781  *      @offset: File offset, not supported
3782  *
3783  *      The implementation does not support writing in chunks,
3784  *      all data must be given during a single write system call.
3785  */
3786 static ssize_t sep_write(struct file *filp,
3787                          const char __user *buf_user, size_t count_user,
3788                          loff_t *offset)
3789 {
3790         struct sep_private_data * const private_data = filp->private_data;
3791         struct sep_call_status *call_status = &private_data->call_status;
3792         struct sep_device *sep = private_data->device;
3793         struct sep_dma_context *dma_ctx = NULL;
3794         struct sep_fastcall_hdr call_hdr = {0};
3795         void *msg_region = NULL;
3796         void *dmatables_region = NULL;
3797         struct sep_dcblock *dcb_region = NULL;
3798         ssize_t error = 0;
3799         struct sep_queue_info *my_queue_elem = NULL;
3800         bool my_secure_dma; /* are we using secure_dma (IMR)? */
3801
3802         dev_dbg(&sep->pdev->dev, "[PID%d] sep dev is 0x%p\n",
3803                 current->pid, sep);
3804         dev_dbg(&sep->pdev->dev, "[PID%d] private_data is 0x%p\n",
3805                 current->pid, private_data);
3806
3807         error = sep_fastcall_args_get(sep, &call_hdr, buf_user, count_user);
3808         if (error)
3809                 goto end_function;
3810
3811         buf_user += sizeof(struct sep_fastcall_hdr);
3812
3813         if (call_hdr.secure_dma == 0)
3814                 my_secure_dma = false;
3815         else
3816                 my_secure_dma = true;
3817
3818         /*
3819          * Controlling driver memory usage by limiting amount of
3820          * buffers created. Only SEP_DOUBLEBUF_USERS_LIMIT number
3821          * of threads can progress further at a time
3822          */
3823         dev_dbg(&sep->pdev->dev,
3824                 "[PID%d] waiting for double buffering region access\n",
3825                 current->pid);
3826         error = down_interruptible(&sep->sep_doublebuf);
3827         dev_dbg(&sep->pdev->dev, "[PID%d] double buffering region start\n",
3828                                         current->pid);
3829         if (error) {
3830                 /* Signal received */
3831                 goto end_function_error;
3832         }
3833
3834
3835         /*
3836          * Prepare contents of the shared area regions for
3837          * the operation into temporary buffers
3838          */
3839         if (0 < call_hdr.num_dcbs) {
3840                 error = sep_create_dcb_dmatables_context(sep,
3841                                 &dcb_region,
3842                                 &dmatables_region,
3843                                 &dma_ctx,
3844                                 (const struct build_dcb_struct __user *)
3845                                         buf_user,
3846                                 call_hdr.num_dcbs, my_secure_dma);
3847                 if (error)
3848                         goto end_function_error_doublebuf;
3849
3850                 buf_user += call_hdr.num_dcbs * sizeof(struct build_dcb_struct);
3851         }
3852
3853         error = sep_create_msgarea_context(sep,
3854                                            &msg_region,
3855                                            buf_user,
3856                                            call_hdr.msg_len);
3857         if (error)
3858                 goto end_function_error_doublebuf;
3859
3860         dev_dbg(&sep->pdev->dev, "[PID%d] updating queue status\n",
3861                                                         current->pid);
3862         my_queue_elem = sep_queue_status_add(sep,
3863                                 ((struct sep_msgarea_hdr *)msg_region)->opcode,
3864                                 (dma_ctx) ? dma_ctx->input_data_len : 0,
3865                                      current->pid,
3866                                      current->comm, sizeof(current->comm));
3867
3868         if (!my_queue_elem) {
3869                 dev_dbg(&sep->pdev->dev,
3870                         "[PID%d] updating queue status error\n", current->pid);
3871                 error = -ENOMEM;
3872                 goto end_function_error_doublebuf;
3873         }
3874
3875         /* Wait until current process gets the transaction */
3876         error = sep_wait_transaction(sep);
3877
3878         if (error) {
3879                 /* Interrupted by signal, don't clear transaction */
3880                 dev_dbg(&sep->pdev->dev, "[PID%d] interrupted by signal\n",
3881                         current->pid);
3882                 sep_queue_status_remove(sep, &my_queue_elem);
3883                 goto end_function_error_doublebuf;
3884         }
3885
3886         dev_dbg(&sep->pdev->dev, "[PID%d] saving queue element\n",
3887                 current->pid);
3888         private_data->my_queue_elem = my_queue_elem;
3889
3890         /* Activate shared area regions for the transaction */
3891         error = sep_activate_msgarea_context(sep, &msg_region,
3892                                              call_hdr.msg_len);
3893         if (error)
3894                 goto end_function_error_clear_transact;
3895
3896         sep_dump_message(sep);
3897
3898         if (0 < call_hdr.num_dcbs) {
3899                 error = sep_activate_dcb_dmatables_context(sep,
3900                                 &dcb_region,
3901                                 &dmatables_region,
3902                                 dma_ctx);
3903                 if (error)
3904                         goto end_function_error_clear_transact;
3905         }
3906
3907         /* Send command to SEP */
3908         error = sep_send_command_handler(sep);
3909         if (error)
3910                 goto end_function_error_clear_transact;
3911
3912         /* Store DMA context for the transaction */
3913         private_data->dma_ctx = dma_ctx;
3914         /* Update call status */
3915         set_bit(SEP_FASTCALL_WRITE_DONE_OFFSET, &call_status->status);
3916         error = count_user;
3917
3918         up(&sep->sep_doublebuf);
3919         dev_dbg(&sep->pdev->dev, "[PID%d] double buffering region end\n",
3920                 current->pid);
3921
3922         goto end_function;
3923
3924 end_function_error_clear_transact:
3925         sep_end_transaction_handler(sep, &dma_ctx, call_status,
3926                                                 &private_data->my_queue_elem);
3927
3928 end_function_error_doublebuf:
3929         up(&sep->sep_doublebuf);
3930         dev_dbg(&sep->pdev->dev, "[PID%d] double buffering region end\n",
3931                 current->pid);
3932
3933 end_function_error:
3934         if (dma_ctx)
3935                 sep_free_dma_table_data_handler(sep, &dma_ctx);
3936
3937 end_function:
3938         kfree(dcb_region);
3939         kfree(dmatables_region);
3940         kfree(msg_region);
3941
3942         return error;
3943 }
3944 /**
3945  *      sep_seek - Handler for seek system call
3946  *      @filp: File pointer
3947  *      @offset: File offset
3948  *      @origin: Options for offset
3949  *
3950  *      Fastcall interface does not support seeking, all reads
3951  *      and writes are from/to offset zero
3952  */
3953 static loff_t sep_seek(struct file *filp, loff_t offset, int origin)
3954 {
3955         return -ENOSYS;
3956 }
3957
3958
3959
3960 /**
3961  * sep_file_operations - file operation on sep device
3962  * @sep_ioctl:  ioctl handler from user space call
3963  * @sep_poll:   poll handler
3964  * @sep_open:   handles sep device open request
3965  * @sep_release:handles sep device release request
3966  * @sep_mmap:   handles memory mapping requests
3967  * @sep_read:   handles read request on sep device
3968  * @sep_write:  handles write request on sep device
3969  * @sep_seek:   handles seek request on sep device
3970  */
3971 static const struct file_operations sep_file_operations = {
3972         .owner = THIS_MODULE,
3973         .unlocked_ioctl = sep_ioctl,
3974         .poll = sep_poll,
3975         .open = sep_open,
3976         .release = sep_release,
3977         .mmap = sep_mmap,
3978         .read = sep_read,
3979         .write = sep_write,
3980         .llseek = sep_seek,
3981 };
3982
3983 /**
3984  * sep_sysfs_read - read sysfs entry per gives arguments
3985  * @filp: file pointer
3986  * @kobj: kobject pointer
3987  * @attr: binary file attributes
3988  * @buf: read to this buffer
3989  * @pos: offset to read
3990  * @count: amount of data to read
3991  *
3992  * This function is to read sysfs entries for sep driver per given arguments.
3993  */
3994 static ssize_t
3995 sep_sysfs_read(struct file *filp, struct kobject *kobj,
3996                 struct bin_attribute *attr,
3997                 char *buf, loff_t pos, size_t count)
3998 {
3999         unsigned long lck_flags;
4000         size_t nleft = count;
4001         struct sep_device *sep = sep_dev;
4002         struct sep_queue_info *queue_elem = NULL;
4003         u32 queue_num = 0;
4004         u32 i = 1;
4005
4006         spin_lock_irqsave(&sep->sep_queue_lock, lck_flags);
4007
4008         queue_num = sep->sep_queue_num;
4009         if (queue_num > SEP_DOUBLEBUF_USERS_LIMIT)
4010                 queue_num = SEP_DOUBLEBUF_USERS_LIMIT;
4011
4012
4013         if (count < sizeof(queue_num)
4014                         + (queue_num * sizeof(struct sep_queue_data))) {
4015                 spin_unlock_irqrestore(&sep->sep_queue_lock, lck_flags);
4016                 return -EINVAL;
4017         }
4018
4019         memcpy(buf, &queue_num, sizeof(queue_num));
4020         buf += sizeof(queue_num);
4021         nleft -= sizeof(queue_num);
4022
4023         list_for_each_entry(queue_elem, &sep->sep_queue_status, list) {
4024                 if (i++ > queue_num)
4025                         break;
4026
4027                 memcpy(buf, &queue_elem->data, sizeof(queue_elem->data));
4028                 nleft -= sizeof(queue_elem->data);
4029                 buf += sizeof(queue_elem->data);
4030         }
4031         spin_unlock_irqrestore(&sep->sep_queue_lock, lck_flags);
4032
4033         return count - nleft;
4034 }
4035
4036 /**
4037  * bin_attributes - defines attributes for queue_status
4038  * @attr: attributes (name & permissions)
4039  * @read: function pointer to read this file
4040  * @size: maxinum size of binary attribute
4041  */
4042 static const struct bin_attribute queue_status = {
4043         .attr = {.name = "queue_status", .mode = 0444},
4044         .read = sep_sysfs_read,
4045         .size = sizeof(u32)
4046                 + (SEP_DOUBLEBUF_USERS_LIMIT * sizeof(struct sep_queue_data)),
4047 };
4048
4049 /**
4050  * sep_register_driver_with_fs - register misc devices
4051  * @sep: pointer to struct sep_device
4052  *
4053  * This function registers the driver with the file system
4054  */
4055 static int sep_register_driver_with_fs(struct sep_device *sep)
4056 {
4057         int ret_val;
4058
4059         sep->miscdev_sep.minor = MISC_DYNAMIC_MINOR;
4060         sep->miscdev_sep.name = SEP_DEV_NAME;
4061         sep->miscdev_sep.fops = &sep_file_operations;
4062
4063         ret_val = misc_register(&sep->miscdev_sep);
4064         if (ret_val) {
4065                 dev_warn(&sep->pdev->dev, "misc reg fails for SEP %x\n",
4066                         ret_val);
4067                 return ret_val;
4068         }
4069
4070         ret_val = device_create_bin_file(sep->miscdev_sep.this_device,
4071                                                                 &queue_status);
4072         if (ret_val) {
4073                 dev_warn(&sep->pdev->dev, "sysfs attribute1 fails for SEP %x\n",
4074                         ret_val);
4075                 misc_deregister(&sep->miscdev_sep);
4076                 return ret_val;
4077         }
4078
4079         return ret_val;
4080 }
4081
4082
4083 /**
4084  *sep_probe - probe a matching PCI device
4085  *@pdev:        pci_device
4086  *@ent: pci_device_id
4087  *
4088  *Attempt to set up and configure a SEP device that has been
4089  *discovered by the PCI layer. Allocates all required resources.
4090  */
4091 static int sep_probe(struct pci_dev *pdev,
4092         const struct pci_device_id *ent)
4093 {
4094         int error = 0;
4095         struct sep_device *sep = NULL;
4096
4097         if (sep_dev != NULL) {
4098                 dev_dbg(&pdev->dev, "only one SEP supported.\n");
4099                 return -EBUSY;
4100         }
4101
4102         /* Enable the device */
4103         error = pci_enable_device(pdev);
4104         if (error) {
4105                 dev_warn(&pdev->dev, "error enabling pci device\n");
4106                 goto end_function;
4107         }
4108
4109         /* Allocate the sep_device structure for this device */
4110         sep_dev = kzalloc(sizeof(struct sep_device), GFP_ATOMIC);
4111         if (sep_dev == NULL) {
4112                 error = -ENOMEM;
4113                 goto end_function_disable_device;
4114         }
4115
4116         /*
4117          * We're going to use another variable for actually
4118          * working with the device; this way, if we have
4119          * multiple devices in the future, it would be easier
4120          * to make appropriate changes
4121          */
4122         sep = sep_dev;
4123
4124         sep->pdev = pci_dev_get(pdev);
4125
4126         init_waitqueue_head(&sep->event_transactions);
4127         init_waitqueue_head(&sep->event_interrupt);
4128         spin_lock_init(&sep->snd_rply_lck);
4129         spin_lock_init(&sep->sep_queue_lock);
4130         sema_init(&sep->sep_doublebuf, SEP_DOUBLEBUF_USERS_LIMIT);
4131
4132         INIT_LIST_HEAD(&sep->sep_queue_status);
4133
4134         dev_dbg(&sep->pdev->dev,
4135                 "sep probe: PCI obtained, device being prepared\n");
4136
4137         /* Set up our register area */
4138         sep->reg_physical_addr = pci_resource_start(sep->pdev, 0);
4139         if (!sep->reg_physical_addr) {
4140                 dev_warn(&sep->pdev->dev, "Error getting register start\n");
4141                 error = -ENODEV;
4142                 goto end_function_free_sep_dev;
4143         }
4144
4145         sep->reg_physical_end = pci_resource_end(sep->pdev, 0);
4146         if (!sep->reg_physical_end) {
4147                 dev_warn(&sep->pdev->dev, "Error getting register end\n");
4148                 error = -ENODEV;
4149                 goto end_function_free_sep_dev;
4150         }
4151
4152         sep->reg_addr = ioremap_nocache(sep->reg_physical_addr,
4153                 (size_t)(sep->reg_physical_end - sep->reg_physical_addr + 1));
4154         if (!sep->reg_addr) {
4155                 dev_warn(&sep->pdev->dev, "Error getting register virtual\n");
4156                 error = -ENODEV;
4157                 goto end_function_free_sep_dev;
4158         }
4159
4160         dev_dbg(&sep->pdev->dev,
4161                 "Register area start %llx end %llx virtual %p\n",
4162                 (unsigned long long)sep->reg_physical_addr,
4163                 (unsigned long long)sep->reg_physical_end,
4164                 sep->reg_addr);
4165
4166         /* Allocate the shared area */
4167         sep->shared_size = SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES +
4168                 SYNCHRONIC_DMA_TABLES_AREA_SIZE_BYTES +
4169                 SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES +
4170                 SEP_DRIVER_STATIC_AREA_SIZE_IN_BYTES +
4171                 SEP_DRIVER_SYSTEM_DATA_MEMORY_SIZE_IN_BYTES;
4172
4173         if (sep_map_and_alloc_shared_area(sep)) {
4174                 error = -ENOMEM;
4175                 /* Allocation failed */
4176                 goto end_function_error;
4177         }
4178
4179         /* Clear ICR register */
4180         sep_write_reg(sep, HW_HOST_ICR_REG_ADDR, 0xFFFFFFFF);
4181
4182         /* Set the IMR register - open only GPR 2 */
4183         sep_write_reg(sep, HW_HOST_IMR_REG_ADDR, (~(0x1 << 13)));
4184
4185         /* Read send/receive counters from SEP */
4186         sep->reply_ct = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR2_REG_ADDR);
4187         sep->reply_ct &= 0x3FFFFFFF;
4188         sep->send_ct = sep->reply_ct;
4189
4190         /* Get the interrupt line */
4191         error = request_irq(pdev->irq, sep_inthandler, IRQF_SHARED,
4192                 "sep_driver", sep);
4193
4194         if (error)
4195                 goto end_function_deallocate_sep_shared_area;
4196
4197         /* The new chip requires a shared area reconfigure */
4198         error = sep_reconfig_shared_area(sep);
4199         if (error)
4200                 goto end_function_free_irq;
4201
4202         sep->in_use = 1;
4203
4204         /* Finally magic up the device nodes */
4205         /* Register driver with the fs */
4206         error = sep_register_driver_with_fs(sep);
4207
4208         if (error) {
4209                 dev_err(&sep->pdev->dev, "error registering dev file\n");
4210                 goto end_function_free_irq;
4211         }
4212
4213         sep->in_use = 0; /* through touching the device */
4214 #ifdef SEP_ENABLE_RUNTIME_PM
4215         pm_runtime_put_noidle(&sep->pdev->dev);
4216         pm_runtime_allow(&sep->pdev->dev);
4217         pm_runtime_set_autosuspend_delay(&sep->pdev->dev,
4218                 SUSPEND_DELAY);
4219         pm_runtime_use_autosuspend(&sep->pdev->dev);
4220         pm_runtime_mark_last_busy(&sep->pdev->dev);
4221         sep->power_save_setup = 1;
4222 #endif
4223         /* register kernel crypto driver */
4224 #if defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE)
4225         error = sep_crypto_setup();
4226         if (error) {
4227                 dev_err(&sep->pdev->dev, "crypto setup failed\n");
4228                 goto end_function_free_irq;
4229         }
4230 #endif
4231         goto end_function;
4232
4233 end_function_free_irq:
4234         free_irq(pdev->irq, sep);
4235
4236 end_function_deallocate_sep_shared_area:
4237         /* De-allocate shared area */
4238         sep_unmap_and_free_shared_area(sep);
4239
4240 end_function_error:
4241         iounmap(sep->reg_addr);
4242
4243 end_function_free_sep_dev:
4244         pci_dev_put(sep_dev->pdev);
4245         kfree(sep_dev);
4246         sep_dev = NULL;
4247
4248 end_function_disable_device:
4249         pci_disable_device(pdev);
4250
4251 end_function:
4252         return error;
4253 }
4254
4255 /**
4256  * sep_remove - handles removing device from pci subsystem
4257  * @pdev:       pointer to pci device
4258  *
4259  * This function will handle removing our sep device from pci subsystem on exit
4260  * or unloading this module. It should free up all used resources, and unmap if
4261  * any memory regions mapped.
4262  */
4263 static void sep_remove(struct pci_dev *pdev)
4264 {
4265         struct sep_device *sep = sep_dev;
4266
4267         /* Unregister from fs */
4268         misc_deregister(&sep->miscdev_sep);
4269
4270         /* Unregister from kernel crypto */
4271 #if defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE)
4272         sep_crypto_takedown();
4273 #endif
4274         /* Free the irq */
4275         free_irq(sep->pdev->irq, sep);
4276
4277         /* Free the shared area  */
4278         sep_unmap_and_free_shared_area(sep_dev);
4279         iounmap(sep_dev->reg_addr);
4280
4281 #ifdef SEP_ENABLE_RUNTIME_PM
4282         if (sep->in_use) {
4283                 sep->in_use = 0;
4284                 pm_runtime_forbid(&sep->pdev->dev);
4285                 pm_runtime_get_noresume(&sep->pdev->dev);
4286         }
4287 #endif
4288         pci_dev_put(sep_dev->pdev);
4289         kfree(sep_dev);
4290         sep_dev = NULL;
4291 }
4292
4293 /* Initialize struct pci_device_id for our driver */
4294 static const struct pci_device_id sep_pci_id_tbl[] = {
4295         {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0826)},
4296         {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x08e9)},
4297         {0}
4298 };
4299
4300 /* Export our pci_device_id structure to user space */
4301 MODULE_DEVICE_TABLE(pci, sep_pci_id_tbl);
4302
4303 #ifdef SEP_ENABLE_RUNTIME_PM
4304
4305 /**
4306  * sep_pm_resume - rsume routine while waking up from S3 state
4307  * @dev:        pointer to sep device
4308  *
4309  * This function is to be used to wake up sep driver while system awakes from S3
4310  * state i.e. suspend to ram. The RAM in intact.
4311  * Notes - revisit with more understanding of pm, ICR/IMR & counters.
4312  */
4313 static int sep_pci_resume(struct device *dev)
4314 {
4315         struct sep_device *sep = sep_dev;
4316
4317         dev_dbg(&sep->pdev->dev, "pci resume called\n");
4318
4319         if (sep->power_state == SEP_DRIVER_POWERON)
4320                 return 0;
4321
4322         /* Clear ICR register */
4323         sep_write_reg(sep, HW_HOST_ICR_REG_ADDR, 0xFFFFFFFF);
4324
4325         /* Set the IMR register - open only GPR 2 */
4326         sep_write_reg(sep, HW_HOST_IMR_REG_ADDR, (~(0x1 << 13)));
4327
4328         /* Read send/receive counters from SEP */
4329         sep->reply_ct = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR2_REG_ADDR);
4330         sep->reply_ct &= 0x3FFFFFFF;
4331         sep->send_ct = sep->reply_ct;
4332
4333         sep->power_state = SEP_DRIVER_POWERON;
4334
4335         return 0;
4336 }
4337
4338 /**
4339  * sep_pm_suspend - suspend routine while going to S3 state
4340  * @dev:        pointer to sep device
4341  *
4342  * This function is to be used to suspend sep driver while system goes to S3
4343  * state i.e. suspend to ram. The RAM in intact and ON during this suspend.
4344  * Notes - revisit with more understanding of pm, ICR/IMR
4345  */
4346 static int sep_pci_suspend(struct device *dev)
4347 {
4348         struct sep_device *sep = sep_dev;
4349
4350         dev_dbg(&sep->pdev->dev, "pci suspend called\n");
4351         if (sep->in_use == 1)
4352                 return -EAGAIN;
4353
4354         sep->power_state = SEP_DRIVER_POWEROFF;
4355
4356         /* Clear ICR register */
4357         sep_write_reg(sep, HW_HOST_ICR_REG_ADDR, 0xFFFFFFFF);
4358
4359         /* Set the IMR to block all */
4360         sep_write_reg(sep, HW_HOST_IMR_REG_ADDR, 0xFFFFFFFF);
4361
4362         return 0;
4363 }
4364
4365 /**
4366  * sep_pm_runtime_resume - runtime resume routine
4367  * @dev:        pointer to sep device
4368  *
4369  * Notes - revisit with more understanding of pm, ICR/IMR & counters
4370  */
4371 static int sep_pm_runtime_resume(struct device *dev)
4372 {
4373
4374         u32 retval2;
4375         u32 delay_count;
4376         struct sep_device *sep = sep_dev;
4377
4378         dev_dbg(&sep->pdev->dev, "pm runtime resume called\n");
4379
4380         /**
4381          * Wait until the SCU boot is ready
4382          * This is done by iterating SCU_DELAY_ITERATION (10
4383          * microseconds each) up to SCU_DELAY_MAX (50) times.
4384          * This bit can be set in a random time that is less
4385          * than 500 microseconds after each power resume
4386          */
4387         retval2 = 0;
4388         delay_count = 0;
4389         while ((!retval2) && (delay_count < SCU_DELAY_MAX)) {
4390                 retval2 = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR3_REG_ADDR);
4391                 retval2 &= 0x00000008;
4392                 if (!retval2) {
4393                         udelay(SCU_DELAY_ITERATION);
4394                         delay_count += 1;
4395                 }
4396         }
4397
4398         if (!retval2) {
4399                 dev_warn(&sep->pdev->dev, "scu boot bit not set at resume\n");
4400                 return -EINVAL;
4401         }
4402
4403         /* Clear ICR register */
4404         sep_write_reg(sep, HW_HOST_ICR_REG_ADDR, 0xFFFFFFFF);
4405
4406         /* Set the IMR register - open only GPR 2 */
4407         sep_write_reg(sep, HW_HOST_IMR_REG_ADDR, (~(0x1 << 13)));
4408
4409         /* Read send/receive counters from SEP */
4410         sep->reply_ct = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR2_REG_ADDR);
4411         sep->reply_ct &= 0x3FFFFFFF;
4412         sep->send_ct = sep->reply_ct;
4413
4414         return 0;
4415 }
4416
4417 /**
4418  * sep_pm_runtime_suspend - runtime suspend routine
4419  * @dev:        pointer to sep device
4420  *
4421  * Notes - revisit with more understanding of pm
4422  */
4423 static int sep_pm_runtime_suspend(struct device *dev)
4424 {
4425         struct sep_device *sep = sep_dev;
4426
4427         dev_dbg(&sep->pdev->dev, "pm runtime suspend called\n");
4428
4429         /* Clear ICR register */
4430         sep_write_reg(sep, HW_HOST_ICR_REG_ADDR, 0xFFFFFFFF);
4431         return 0;
4432 }
4433
4434 /**
4435  * sep_pm - power management for sep driver
4436  * @sep_pm_runtime_resume:      resume- no communication with cpu & main memory
4437  * @sep_pm_runtime_suspend:     suspend- no communication with cpu & main memory
4438  * @sep_pci_suspend:            suspend - main memory is still ON
4439  * @sep_pci_resume:             resume - main memory is still ON
4440  */
4441 static const struct dev_pm_ops sep_pm = {
4442         .runtime_resume = sep_pm_runtime_resume,
4443         .runtime_suspend = sep_pm_runtime_suspend,
4444         .resume = sep_pci_resume,
4445         .suspend = sep_pci_suspend,
4446 };
4447 #endif /* SEP_ENABLE_RUNTIME_PM */
4448
4449 /**
4450  * sep_pci_driver - registers this device with pci subsystem
4451  * @name:       name identifier for this driver
4452  * @sep_pci_id_tbl:     pointer to struct pci_device_id table
4453  * @sep_probe:  pointer to probe function in PCI driver
4454  * @sep_remove: pointer to remove function in PCI driver
4455  */
4456 static struct pci_driver sep_pci_driver = {
4457 #ifdef SEP_ENABLE_RUNTIME_PM
4458         .driver = {
4459                 .pm = &sep_pm,
4460         },
4461 #endif
4462         .name = "sep_sec_driver",
4463         .id_table = sep_pci_id_tbl,
4464         .probe = sep_probe,
4465         .remove = sep_remove
4466 };
4467
4468 module_pci_driver(sep_pci_driver);
4469 MODULE_LICENSE("GPL");