Merge remote-tracking branch 'origin/develop-3.10-next' into develop-3.10
[firefly-linux-kernel-4.4.55.git] / drivers / iommu / rockchip-iommu.c
1 /*
2  * This program is free software; you can redistribute it and/or modify
3  * it under the terms of the GNU General Public License version 2 as
4  * published by the Free Software Foundation.
5  */
6
7 #ifdef CONFIG_ROCKCHIP_IOMMU_DEBUG
8 #define DEBUG
9 #endif
10
11 #include <linux/io.h>
12 #include <linux/interrupt.h>
13 #include <linux/slab.h>
14 #include <linux/clk.h>
15 #include <linux/err.h>
16 #include <linux/mm.h>
17 #include <linux/errno.h>
18 #include <linux/memblock.h>
19 #include <linux/export.h>
20 #include <linux/module.h>
21
22 #include <asm/cacheflush.h>
23 #include <asm/pgtable.h>
24 #include <linux/of.h>
25 #include <linux/rockchip-iovmm.h>
26
27 #include "rockchip-iommu.h"
28
29 /* We does not consider super section mapping (16MB) */
30 #define SPAGE_ORDER 12
31 #define SPAGE_SIZE (1 << SPAGE_ORDER)
32 #define SPAGE_MASK (~(SPAGE_SIZE - 1))
33
34 enum iommu_entry_flags {
35         IOMMU_FLAGS_PRESENT = 0x01,
36         IOMMU_FLAGS_READ_PERMISSION = 0x02,
37         IOMMU_FLAGS_WRITE_PERMISSION = 0x04,
38         IOMMU_FLAGS_OVERRIDE_CACHE = 0x8,
39         IOMMU_FLAGS_WRITE_CACHEABLE = 0x10,
40         IOMMU_FLAGS_WRITE_ALLOCATE = 0x20,
41         IOMMU_FLAGS_WRITE_BUFFERABLE = 0x40,
42         IOMMU_FLAGS_READ_CACHEABLE = 0x80,
43         IOMMU_FLAGS_READ_ALLOCATE = 0x100,
44         IOMMU_FLAGS_MASK = 0x1FF,
45 };
46
47 #define lv1ent_fault(sent) ((*(sent) & IOMMU_FLAGS_PRESENT) == 0)
48 #define lv1ent_page(sent) ((*(sent) & IOMMU_FLAGS_PRESENT) == 1)
49 #define lv2ent_fault(pent) ((*(pent) & IOMMU_FLAGS_PRESENT) == 0)
50 #define spage_phys(pent) (*(pent) & SPAGE_MASK)
51 #define spage_offs(iova) ((iova) & 0x0FFF)
52
53 #define lv1ent_offset(iova) (((iova)>>22) & 0x03FF)
54 #define lv2ent_offset(iova) (((iova)>>12) & 0x03FF)
55
56 #define NUM_LV1ENTRIES 1024
57 #define NUM_LV2ENTRIES 1024
58
59 #define LV2TABLE_SIZE (NUM_LV2ENTRIES * sizeof(long))
60
61 #define lv2table_base(sent) (*(sent) & 0xFFFFFFFE)
62
63 #define mk_lv1ent_page(pa) ((pa) | IOMMU_FLAGS_PRESENT)
64 /*write and read permission for level2 page default*/
65 #define mk_lv2ent_spage(pa) ((pa) | IOMMU_FLAGS_PRESENT | \
66                              IOMMU_FLAGS_READ_PERMISSION | \
67                              IOMMU_FLAGS_WRITE_PERMISSION)
68
69 #define IOMMU_REG_POLL_COUNT_FAST 1000
70
71 /*rk3036:vpu and hevc share ahb interface*/
72 #define BIT_VCODEC_SEL (1<<3)
73
74
75 /**
76  * MMU register numbers
77  * Used in the register read/write routines.
78  * See the hardware documentation for more information about each register
79  */
80 enum iommu_register {
81         /**< Current Page Directory Pointer */
82         IOMMU_REGISTER_DTE_ADDR = 0x0000,
83         /**< Status of the MMU */
84         IOMMU_REGISTER_STATUS = 0x0004,
85         /**< Command register, used to control the MMU */
86         IOMMU_REGISTER_COMMAND = 0x0008,
87         /**< Logical address of the last page fault */
88         IOMMU_REGISTER_PAGE_FAULT_ADDR = 0x000C,
89         /**< Used to invalidate the mapping of a single page from the MMU */
90         IOMMU_REGISTER_ZAP_ONE_LINE = 0x010,
91         /**< Raw interrupt status, all interrupts visible */
92         IOMMU_REGISTER_INT_RAWSTAT = 0x0014,
93         /**< Indicate to the MMU that the interrupt has been received */
94         IOMMU_REGISTER_INT_CLEAR = 0x0018,
95         /**< Enable/disable types of interrupts */
96         IOMMU_REGISTER_INT_MASK = 0x001C,
97         /**< Interrupt status based on the mask */
98         IOMMU_REGISTER_INT_STATUS = 0x0020,
99         IOMMU_REGISTER_AUTO_GATING = 0x0024
100 };
101
102 enum iommu_command {
103         /**< Enable paging (memory translation) */
104         IOMMU_COMMAND_ENABLE_PAGING = 0x00,
105         /**< Disable paging (memory translation) */
106         IOMMU_COMMAND_DISABLE_PAGING = 0x01,
107         /**<  Enable stall on page fault */
108         IOMMU_COMMAND_ENABLE_STALL = 0x02,
109         /**< Disable stall on page fault */
110         IOMMU_COMMAND_DISABLE_STALL = 0x03,
111         /**< Zap the entire page table cache */
112         IOMMU_COMMAND_ZAP_CACHE = 0x04,
113         /**< Page fault processed */
114         IOMMU_COMMAND_PAGE_FAULT_DONE = 0x05,
115         /**< Reset the MMU back to power-on settings */
116         IOMMU_COMMAND_HARD_RESET = 0x06
117 };
118
119 /**
120  * MMU interrupt register bits
121  * Each cause of the interrupt is reported
122  * through the (raw) interrupt status registers.
123  * Multiple interrupts can be pending, so multiple bits
124  * can be set at once.
125  */
126 enum iommu_interrupt {
127         IOMMU_INTERRUPT_PAGE_FAULT = 0x01, /**< A page fault occured */
128         IOMMU_INTERRUPT_READ_BUS_ERROR = 0x02 /**< A bus read error occured */
129 };
130
131 enum iommu_status_bits {
132         IOMMU_STATUS_BIT_PAGING_ENABLED      = 1 << 0,
133         IOMMU_STATUS_BIT_PAGE_FAULT_ACTIVE   = 1 << 1,
134         IOMMU_STATUS_BIT_STALL_ACTIVE        = 1 << 2,
135         IOMMU_STATUS_BIT_IDLE                = 1 << 3,
136         IOMMU_STATUS_BIT_REPLAY_BUFFER_EMPTY = 1 << 4,
137         IOMMU_STATUS_BIT_PAGE_FAULT_IS_WRITE = 1 << 5,
138         IOMMU_STATUS_BIT_STALL_NOT_ACTIVE    = 1 << 31,
139 };
140
141 /**
142  * Size of an MMU page in bytes
143  */
144 #define IOMMU_PAGE_SIZE 0x1000
145
146 /*
147  * Size of the address space referenced by a page table page
148  */
149 #define IOMMU_VIRTUAL_PAGE_SIZE 0x400000 /* 4 MiB */
150
151 /**
152  * Page directory index from address
153  * Calculates the page directory index from the given address
154  */
155 #define IOMMU_PDE_ENTRY(address) (((address)>>22) & 0x03FF)
156
157 /**
158  * Page table index from address
159  * Calculates the page table index from the given address
160  */
161 #define IOMMU_PTE_ENTRY(address) (((address)>>12) & 0x03FF)
162
163 /**
164  * Extract the memory address from an PDE/PTE entry
165  */
166 #define IOMMU_ENTRY_ADDRESS(value) ((value) & 0xFFFFFC00)
167
168 #define INVALID_PAGE ((u32)(~0))
169
170 static struct kmem_cache *lv2table_kmem_cache;
171
172 static void rockchip_vcodec_select(const char *string)
173 {
174         if(strstr(string,"hevc"))
175         {
176                 writel_relaxed(readl_relaxed(RK_GRF_VIRT + RK3036_GRF_SOC_CON1) |
177                               (BIT_VCODEC_SEL) | (BIT_VCODEC_SEL << 16),
178                               RK_GRF_VIRT + RK3036_GRF_SOC_CON1);
179         }
180         else if(strstr(string,"vpu"))
181         {
182                 writel_relaxed((readl_relaxed(RK_GRF_VIRT + RK3036_GRF_SOC_CON1) &
183                                (~BIT_VCODEC_SEL)) | (BIT_VCODEC_SEL << 16),
184                                RK_GRF_VIRT + RK3036_GRF_SOC_CON1);
185         }
186 }
187 static unsigned long *section_entry(unsigned long *pgtable, unsigned long iova)
188 {
189         return pgtable + lv1ent_offset(iova);
190 }
191
192 static unsigned long *page_entry(unsigned long *sent, unsigned long iova)
193 {
194         return (unsigned long *)__va(lv2table_base(sent)) + lv2ent_offset(iova);
195 }
196
197 static char *iommu_fault_name[IOMMU_FAULTS_NUM] = {
198         "PAGE FAULT",
199         "BUS ERROR",
200         "UNKNOWN FAULT"
201 };
202
203 struct rk_iommu_domain {
204         struct list_head clients; /* list of iommu_drvdata.node */
205         unsigned long *pgtable; /* lv1 page table, 4KB */
206         short *lv2entcnt; /* free lv2 entry counter for each section */
207         spinlock_t lock; /* lock for this structure */
208         spinlock_t pgtablelock; /* lock for modifying page table @ pgtable */
209 };
210
211 static bool set_iommu_active(struct iommu_drvdata *data)
212 {
213         /* return true if the IOMMU was not active previously
214            and it needs to be initialized */
215         return ++data->activations == 1;
216 }
217
218 static bool set_iommu_inactive(struct iommu_drvdata *data)
219 {
220         /* return true if the IOMMU is needed to be disabled */
221         BUG_ON(data->activations < 1);
222         return --data->activations == 0;
223 }
224
225 static bool is_iommu_active(struct iommu_drvdata *data)
226 {
227         return data->activations > 0;
228 }
229
230 static void iommu_disable_stall(void __iomem *base)
231 {
232         int i;
233         u32 mmu_status = __raw_readl(base + IOMMU_REGISTER_STATUS);
234
235         if (0 == (mmu_status & IOMMU_STATUS_BIT_PAGING_ENABLED))
236                 return;
237         if (mmu_status & IOMMU_STATUS_BIT_PAGE_FAULT_ACTIVE) {
238                 pr_err("Aborting MMU disable stall request since it is in pagefault state.\n");
239                 return;
240         }
241         __raw_writel(IOMMU_COMMAND_DISABLE_STALL,
242                      base + IOMMU_REGISTER_COMMAND);
243
244         for (i = 0; i < IOMMU_REG_POLL_COUNT_FAST; ++i) {
245                 u32 status = __raw_readl(base + IOMMU_REGISTER_STATUS);
246
247                 if (0 == (status & IOMMU_STATUS_BIT_STALL_ACTIVE))
248                         break;
249                 if (status & IOMMU_STATUS_BIT_PAGE_FAULT_ACTIVE)
250                         break;
251                 if (0 == (mmu_status & IOMMU_STATUS_BIT_PAGING_ENABLED))
252                         break;
253         }
254         if (IOMMU_REG_POLL_COUNT_FAST == i)
255                 pr_err("Disable stall request failed, MMU status is 0x%08X\n",
256                        __raw_readl(base + IOMMU_REGISTER_STATUS));
257 }
258
259 static bool iommu_enable_stall(void __iomem *base)
260 {
261         int i;
262
263         u32 mmu_status = __raw_readl(base + IOMMU_REGISTER_STATUS);
264
265         if (0 == (mmu_status & IOMMU_STATUS_BIT_PAGING_ENABLED))
266                 return true;
267         if (mmu_status & IOMMU_STATUS_BIT_PAGE_FAULT_ACTIVE) {
268                 pr_err("Aborting MMU stall request since it is in pagefault state.\n");
269                 return false;
270         }
271         __raw_writel(IOMMU_COMMAND_ENABLE_STALL,
272                      base + IOMMU_REGISTER_COMMAND);
273
274         for (i = 0; i < IOMMU_REG_POLL_COUNT_FAST; ++i) {
275                 mmu_status = __raw_readl(base + IOMMU_REGISTER_STATUS);
276                 if (mmu_status & IOMMU_STATUS_BIT_PAGE_FAULT_ACTIVE)
277                         break;
278                 if ((mmu_status & IOMMU_STATUS_BIT_STALL_ACTIVE) &&
279                     (0 == (mmu_status & IOMMU_STATUS_BIT_STALL_NOT_ACTIVE)))
280                         break;
281                 if (0 == (mmu_status & (IOMMU_STATUS_BIT_PAGING_ENABLED)))
282                         break;
283         }
284         if (IOMMU_REG_POLL_COUNT_FAST == i) {
285                 pr_err("Enable stall request failed, MMU status is 0x%08X\n",
286                        __raw_readl(base + IOMMU_REGISTER_STATUS));
287                 return false;
288         }
289         if (mmu_status & IOMMU_STATUS_BIT_PAGE_FAULT_ACTIVE) {
290                 pr_err("Aborting MMU stall request since it has a pagefault.\n");
291                 return false;
292         }
293         return true;
294 }
295
296 static bool iommu_enable_paging(void __iomem *base)
297 {
298         int i;
299
300         __raw_writel(IOMMU_COMMAND_ENABLE_PAGING,
301                      base + IOMMU_REGISTER_COMMAND);
302
303         for (i = 0; i < IOMMU_REG_POLL_COUNT_FAST; ++i) {
304                 if (__raw_readl(base + IOMMU_REGISTER_STATUS) &
305                                 IOMMU_STATUS_BIT_PAGING_ENABLED)
306                         break;
307         }
308         if (IOMMU_REG_POLL_COUNT_FAST == i) {
309                 pr_err("Enable paging request failed, MMU status is 0x%08X\n",
310                        __raw_readl(base + IOMMU_REGISTER_STATUS));
311                 return false;
312         }
313         return true;
314 }
315
316 static bool iommu_disable_paging(void __iomem *base)
317 {
318         int i;
319
320         __raw_writel(IOMMU_COMMAND_DISABLE_PAGING,
321                      base + IOMMU_REGISTER_COMMAND);
322
323         for (i = 0; i < IOMMU_REG_POLL_COUNT_FAST; ++i) {
324                 if (!(__raw_readl(base + IOMMU_REGISTER_STATUS) &
325                                   IOMMU_STATUS_BIT_PAGING_ENABLED))
326                         break;
327         }
328         if (IOMMU_REG_POLL_COUNT_FAST == i) {
329                 pr_err("Disable paging request failed, MMU status is 0x%08X\n",
330                        __raw_readl(base + IOMMU_REGISTER_STATUS));
331                 return false;
332         }
333         return true;
334 }
335
336 static void iommu_page_fault_done(void __iomem *base, const char *dbgname)
337 {
338         pr_info("MMU: %s: Leaving page fault mode\n",
339                 dbgname);
340         __raw_writel(IOMMU_COMMAND_PAGE_FAULT_DONE,
341                      base + IOMMU_REGISTER_COMMAND);
342 }
343
344 static bool iommu_zap_tlb(void __iomem *base)
345 {
346         bool stall_success = iommu_enable_stall(base);
347
348         __raw_writel(IOMMU_COMMAND_ZAP_CACHE,
349                      base + IOMMU_REGISTER_COMMAND);
350         if (!stall_success)
351                 return false;
352         iommu_disable_stall(base);
353         return true;
354 }
355
356 static inline bool iommu_raw_reset(void __iomem *base)
357 {
358         int i;
359
360         __raw_writel(0xCAFEBABE, base + IOMMU_REGISTER_DTE_ADDR);
361
362         if (!(0xCAFEB000 == __raw_readl(base + IOMMU_REGISTER_DTE_ADDR))) {
363                 pr_err("error when %s.\n", __func__);
364                 return false;
365         }
366         __raw_writel(IOMMU_COMMAND_HARD_RESET,
367                      base + IOMMU_REGISTER_COMMAND);
368
369         for (i = 0; i < IOMMU_REG_POLL_COUNT_FAST; ++i) {
370                 if (__raw_readl(base + IOMMU_REGISTER_DTE_ADDR) == 0)
371                         break;
372         }
373         if (IOMMU_REG_POLL_COUNT_FAST == i) {
374                 pr_err("%s,Reset request failed, MMU status is 0x%08X\n",
375                        __func__, __raw_readl(base + IOMMU_REGISTER_DTE_ADDR));
376                 return false;
377         }
378         return true;
379 }
380
381 static void __iommu_set_ptbase(void __iomem *base, unsigned long pgd)
382 {
383         __raw_writel(pgd, base + IOMMU_REGISTER_DTE_ADDR);
384 }
385
386 static bool iommu_reset(void __iomem *base, const char *dbgname)
387 {
388         bool err = true;
389
390         err = iommu_enable_stall(base);
391         if (!err) {
392                 pr_err("%s:stall failed: %s\n", __func__, dbgname);
393                 return err;
394         }
395         err = iommu_raw_reset(base);
396         if (err)
397                 __raw_writel(IOMMU_INTERRUPT_PAGE_FAULT |
398                              IOMMU_INTERRUPT_READ_BUS_ERROR,
399                              base+IOMMU_REGISTER_INT_MASK);
400         iommu_disable_stall(base);
401         if (!err)
402                 pr_err("%s: failed: %s\n", __func__, dbgname);
403         return err;
404 }
405
406 static inline void pgtable_flush(void *vastart, void *vaend)
407 {
408         dmac_flush_range(vastart, vaend);
409         outer_flush_range(virt_to_phys(vastart), virt_to_phys(vaend));
410 }
411
412 static void set_fault_handler(struct iommu_drvdata *data,
413                                 rockchip_iommu_fault_handler_t handler)
414 {
415         unsigned long flags;
416
417         write_lock_irqsave(&data->lock, flags);
418         data->fault_handler = handler;
419         write_unlock_irqrestore(&data->lock, flags);
420 }
421
422 static int default_fault_handler(struct device *dev,
423                                  enum rk_iommu_inttype itype,
424                                  unsigned long pgtable_base,
425                                  unsigned long fault_addr,
426                                  unsigned int status)
427 {
428         struct iommu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
429
430         if (!data) {
431                 pr_err("%s,iommu device not assigned yet\n", __func__);
432                 return 0;
433         }
434         if ((itype >= IOMMU_FAULTS_NUM) || (itype < IOMMU_PAGEFAULT))
435                 itype = IOMMU_FAULT_UNKNOWN;
436
437         if (itype == IOMMU_BUSERROR)
438                 pr_err("%s occured at 0x%lx(Page table base: 0x%lx)\n",
439                        iommu_fault_name[itype], fault_addr, pgtable_base);
440
441         if (itype == IOMMU_PAGEFAULT)
442                 pr_err("IOMMU:Page fault detected at 0x%lx from bus id %d of type %s on %s\n",
443                        fault_addr,
444                        (status >> 6) & 0x1F,
445                        (status & 32) ? "write" : "read",
446                        data->dbgname);
447
448         pr_err("Generating Kernel OOPS... because it is unrecoverable.\n");
449
450         BUG();
451
452         return 0;
453 }
454
455 static void dump_pagetbl(u32 fault_address, u32 addr_dte)
456 {
457         u32 lv1_offset;
458         u32 lv2_offset;
459
460         u32 *lv1_entry_pa;
461         u32 *lv1_entry_va;
462         u32 *lv1_entry_value;
463
464         u32 *lv2_base;
465         u32 *lv2_entry_pa;
466         u32 *lv2_entry_va;
467         u32 *lv2_entry_value;
468
469
470         lv1_offset = lv1ent_offset(fault_address);
471         lv2_offset = lv2ent_offset(fault_address);
472
473         lv1_entry_pa = (u32 *)addr_dte + lv1_offset;
474         lv1_entry_va = (u32 *)(__va(addr_dte)) + lv1_offset;
475         lv1_entry_value = (u32 *)(*lv1_entry_va);
476
477         lv2_base = (u32 *)((*lv1_entry_va) & 0xfffffffe);
478         lv2_entry_pa = (u32 *)lv2_base + lv2_offset;
479         lv2_entry_va = (u32 *)(__va(lv2_base)) + lv2_offset;
480         lv2_entry_value = (u32 *)(*lv2_entry_va);
481
482         pr_info("fault address = 0x%08x,dte addr pa = 0x%08x,va = 0x%08x\n",
483                 fault_address, addr_dte, (u32)__va(addr_dte));
484         pr_info("lv1_offset = 0x%x,lv1_entry_pa = 0x%08x,lv1_entry_va = 0x%08x\n",
485                 lv1_offset, (u32)lv1_entry_pa, (u32)lv1_entry_va);
486         pr_info("lv1_entry_value(*lv1_entry_va) = 0x%08x,lv2_base = 0x%08x\n",
487                 (u32)lv1_entry_value, (u32)lv2_base);
488         pr_info("lv2_offset = 0x%x,lv2_entry_pa = 0x%08x,lv2_entry_va = 0x%08x\n",
489                 lv2_offset, (u32)lv2_entry_pa, (u32)lv2_entry_va);
490         pr_info("lv2_entry value(*lv2_entry_va) = 0x%08x\n",
491                 (u32)lv2_entry_value);
492 }
493
494 static irqreturn_t rockchip_iommu_irq(int irq, void *dev_id)
495 {
496         /* SYSMMU is in blocked when interrupt occurred. */
497         struct iommu_drvdata *data = dev_id;
498         struct resource *irqres;
499         struct platform_device *pdev;
500         enum rk_iommu_inttype itype = IOMMU_FAULT_UNKNOWN;
501         u32 status;
502         u32 rawstat;
503         u32 int_status;
504         u32 fault_address;
505         int i, ret = 0;
506
507         read_lock(&data->lock);
508
509         if (!is_iommu_active(data)) {
510                 read_unlock(&data->lock);
511                 return IRQ_HANDLED;
512         }
513         
514         if(cpu_is_312x() || cpu_is_3036())
515                 rockchip_vcodec_select(data->dbgname);
516         
517         pdev = to_platform_device(data->iommu);
518
519         for (i = 0; i < data->num_res_irq; i++) {
520                 irqres = platform_get_resource(pdev, IORESOURCE_IRQ, i);
521                 if (irqres && ((int)irqres->start == irq))
522                         break;
523         }
524
525         if (i == data->num_res_irq) {
526                 itype = IOMMU_FAULT_UNKNOWN;
527         } else {
528                 int_status = __raw_readl(data->res_bases[i] +
529                                          IOMMU_REGISTER_INT_STATUS);
530
531                 if (int_status != 0) {
532                         /*mask status*/
533                         __raw_writel(0x00, data->res_bases[i] +
534                                      IOMMU_REGISTER_INT_MASK);
535
536                         rawstat = __raw_readl(data->res_bases[i] +
537                                               IOMMU_REGISTER_INT_RAWSTAT);
538
539                         if (rawstat & IOMMU_INTERRUPT_PAGE_FAULT) {
540                                 fault_address = __raw_readl(data->res_bases[i] +
541                                 IOMMU_REGISTER_PAGE_FAULT_ADDR);
542                                 itype = IOMMU_PAGEFAULT;
543                         } else if (rawstat & IOMMU_INTERRUPT_READ_BUS_ERROR) {
544                                 itype = IOMMU_BUSERROR;
545                         } else {
546                                 goto out;
547                         }
548                         dump_pagetbl(fault_address,
549                                      __raw_readl(data->res_bases[i] +
550                                      IOMMU_REGISTER_DTE_ADDR));
551                 } else {
552                         goto out;
553                 }
554         }
555
556         if (data->fault_handler) {
557                 unsigned long base = __raw_readl(data->res_bases[i] +
558                                                  IOMMU_REGISTER_DTE_ADDR);
559                 status = __raw_readl(data->res_bases[i] +
560                                      IOMMU_REGISTER_STATUS);
561                 ret = data->fault_handler(data->dev, itype, base,
562                                           fault_address, status);
563         }
564
565         if (!ret && (itype != IOMMU_FAULT_UNKNOWN)) {
566                 if (IOMMU_PAGEFAULT == itype) {
567                         iommu_zap_tlb(data->res_bases[i]);
568                         iommu_page_fault_done(data->res_bases[i],
569                                                data->dbgname);
570                         __raw_writel(IOMMU_INTERRUPT_PAGE_FAULT |
571                                      IOMMU_INTERRUPT_READ_BUS_ERROR,
572                                      data->res_bases[i] +
573                                      IOMMU_REGISTER_INT_MASK);
574                 }
575         } else {
576                 pr_err("(%s) %s is not handled.\n",
577                        data->dbgname, iommu_fault_name[itype]);
578         }
579
580 out:
581         read_unlock(&data->lock);
582
583         return IRQ_HANDLED;
584 }
585
586 static bool __rockchip_iommu_disable(struct iommu_drvdata *data)
587 {
588         unsigned long flags;
589         int i;
590         bool disabled = false;
591
592         write_lock_irqsave(&data->lock, flags);
593
594         if (!set_iommu_inactive(data))
595                 goto finish;
596
597         for (i = 0; i < data->num_res_mem; i++)
598                 iommu_disable_paging(data->res_bases[i]);
599
600         disabled = true;
601         data->pgtable = 0;
602         data->domain = NULL;
603 finish:
604         write_unlock_irqrestore(&data->lock, flags);
605
606         if (disabled)
607                 pr_info("(%s) Disabled\n", data->dbgname);
608         else
609                 pr_info("(%s) %d times left to be disabled\n",
610                         data->dbgname, data->activations);
611
612         return disabled;
613 }
614
615 /* __rk_sysmmu_enable: Enables System MMU
616  *
617  * returns -error if an error occurred and System MMU is not enabled,
618  * 0 if the System MMU has been just enabled and 1 if System MMU was already
619  * enabled before.
620  */
621 static int __rockchip_iommu_enable(struct iommu_drvdata *data,
622                                     unsigned long pgtable,
623                                     struct iommu_domain *domain)
624 {
625         int i, ret = 0;
626         unsigned long flags;
627
628         write_lock_irqsave(&data->lock, flags);
629
630         if (!set_iommu_active(data)) {
631                 if (WARN_ON(pgtable != data->pgtable)) {
632                         ret = -EBUSY;
633                         set_iommu_inactive(data);
634                 } else {
635                         ret = 1;
636                 }
637
638                 pr_info("(%s) Already enabled\n", data->dbgname);
639                 goto finish;
640         }
641
642         data->pgtable = pgtable;
643
644         for (i = 0; i < data->num_res_mem; i++) {
645                 bool status;
646
647                 status = iommu_enable_stall(data->res_bases[i]);
648                 if (status) {
649                         __iommu_set_ptbase(data->res_bases[i], pgtable);
650                         __raw_writel(IOMMU_COMMAND_ZAP_CACHE,
651                                      data->res_bases[i] +
652                                      IOMMU_REGISTER_COMMAND);
653                 }
654                 __raw_writel(IOMMU_INTERRUPT_PAGE_FAULT |
655                              IOMMU_INTERRUPT_READ_BUS_ERROR,
656                              data->res_bases[i]+IOMMU_REGISTER_INT_MASK);
657                 iommu_enable_paging(data->res_bases[i]);
658                 iommu_disable_stall(data->res_bases[i]);
659         }
660
661         data->domain = domain;
662
663         pr_info("(%s) Enabled\n", data->dbgname);
664 finish:
665         write_unlock_irqrestore(&data->lock, flags);
666
667         return ret;
668 }
669
670 bool rockchip_iommu_disable(struct device *dev)
671 {
672         struct iommu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
673         bool disabled;
674
675         disabled = __rockchip_iommu_disable(data);
676
677         return disabled;
678 }
679
680 void rockchip_iommu_tlb_invalidate(struct device *dev)
681 {
682         unsigned long flags;
683         struct iommu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
684
685         read_lock_irqsave(&data->lock, flags);
686         
687         if(cpu_is_312x() || cpu_is_3036())
688                 rockchip_vcodec_select(data->dbgname);
689         
690         if (is_iommu_active(data)) {
691                 int i;
692
693                 for (i = 0; i < data->num_res_mem; i++) {
694                         if (!iommu_zap_tlb(data->res_bases[i]))
695                                 pr_err("%s,invalidating TLB failed\n",
696                                        data->dbgname);
697                 }
698         } else {
699                 pr_info("(%s) Disabled. Skipping invalidating TLB.\n",
700                         data->dbgname);
701         }
702
703         read_unlock_irqrestore(&data->lock, flags);
704 }
705
706 static phys_addr_t rockchip_iommu_iova_to_phys(struct iommu_domain *domain,
707                                                dma_addr_t iova)
708 {
709         struct rk_iommu_domain *priv = domain->priv;
710         unsigned long *entry;
711         unsigned long flags;
712         phys_addr_t phys = 0;
713
714         spin_lock_irqsave(&priv->pgtablelock, flags);
715
716         entry = section_entry(priv->pgtable, iova);
717         entry = page_entry(entry, iova);
718         phys = spage_phys(entry) + spage_offs(iova);
719
720         spin_unlock_irqrestore(&priv->pgtablelock, flags);
721
722         return phys;
723 }
724
725 static int lv2set_page(unsigned long *pent, phys_addr_t paddr,
726                        size_t size, short *pgcnt)
727 {
728         if (!lv2ent_fault(pent))
729                 return -EADDRINUSE;
730
731         *pent = mk_lv2ent_spage(paddr);
732         pgtable_flush(pent, pent + 1);
733         *pgcnt -= 1;
734         return 0;
735 }
736
737 static unsigned long *alloc_lv2entry(unsigned long *sent,
738                                      unsigned long iova, short *pgcounter)
739 {
740         if (lv1ent_fault(sent)) {
741                 unsigned long *pent;
742
743                 pent = kmem_cache_zalloc(lv2table_kmem_cache, GFP_ATOMIC);
744                 BUG_ON((unsigned long)pent & (LV2TABLE_SIZE - 1));
745                 if (!pent)
746                         return NULL;
747
748                 *sent = mk_lv1ent_page(__pa(pent));
749                 kmemleak_ignore(pent);
750                 *pgcounter = NUM_LV2ENTRIES;
751                 pgtable_flush(pent, pent + NUM_LV2ENTRIES);
752                 pgtable_flush(sent, sent + 1);
753         }
754         return page_entry(sent, iova);
755 }
756
757 static size_t rockchip_iommu_unmap(struct iommu_domain *domain,
758                                    unsigned long iova, size_t size)
759 {
760         struct rk_iommu_domain *priv = domain->priv;
761         unsigned long flags;
762         unsigned long *ent;
763
764         BUG_ON(priv->pgtable == NULL);
765
766         spin_lock_irqsave(&priv->pgtablelock, flags);
767
768         ent = section_entry(priv->pgtable, iova);
769
770         if (unlikely(lv1ent_fault(ent))) {
771                 if (size > SPAGE_SIZE)
772                         size = SPAGE_SIZE;
773                 goto done;
774         }
775
776         /* lv1ent_page(sent) == true here */
777
778         ent = page_entry(ent, iova);
779
780         if (unlikely(lv2ent_fault(ent))) {
781                 size = SPAGE_SIZE;
782                 goto done;
783         }
784
785         *ent = 0;
786         size = SPAGE_SIZE;
787         priv->lv2entcnt[lv1ent_offset(iova)] += 1;
788         goto done;
789
790 done:
791         /*pr_info("%s:unmap iova 0x%lx/0x%x bytes\n",
792                   __func__, iova,size);
793         */
794         spin_unlock_irqrestore(&priv->pgtablelock, flags);
795
796         return size;
797 }
798
799 static int rockchip_iommu_map(struct iommu_domain *domain, unsigned long iova,
800                               phys_addr_t paddr, size_t size, int prot)
801 {
802         struct rk_iommu_domain *priv = domain->priv;
803         unsigned long *entry;
804         unsigned long flags;
805         int ret = -ENOMEM;
806         unsigned long *pent;
807
808         BUG_ON(priv->pgtable == NULL);
809
810         spin_lock_irqsave(&priv->pgtablelock, flags);
811
812         entry = section_entry(priv->pgtable, iova);
813
814         pent = alloc_lv2entry(entry, iova,
815                               &priv->lv2entcnt[lv1ent_offset(iova)]);
816         if (!pent)
817                 ret = -ENOMEM;
818         else
819                 ret = lv2set_page(pent, paddr, size,
820                                   &priv->lv2entcnt[lv1ent_offset(iova)]);
821
822         if (ret) {
823                 pr_err("%s: Failed to map iova 0x%lx/0x%x bytes\n", __func__,
824                        iova, size);
825         }
826         spin_unlock_irqrestore(&priv->pgtablelock, flags);
827
828         return ret;
829 }
830
831 static void rockchip_iommu_detach_device(struct iommu_domain *domain,
832                                          struct device *dev)
833 {
834         struct iommu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
835         struct rk_iommu_domain *priv = domain->priv;
836         struct list_head *pos;
837         unsigned long flags;
838         bool found = false;
839
840         spin_lock_irqsave(&priv->lock, flags);
841
842         list_for_each(pos, &priv->clients)
843         {
844                 if (list_entry(pos, struct iommu_drvdata, node) == data) {
845                         found = true;
846                         break;
847                 }
848         }
849         if (!found)
850                 goto finish;
851         
852         if(cpu_is_312x() || cpu_is_3036())
853                 rockchip_vcodec_select(data->dbgname);
854         
855         if (__rockchip_iommu_disable(data)) {
856                 pr_info("%s: Detached IOMMU with pgtable %#lx\n",
857                         __func__, __pa(priv->pgtable));
858                 list_del(&data->node);
859                 INIT_LIST_HEAD(&data->node);
860
861         } else
862                 pr_info("%s: Detaching IOMMU with pgtable %#lx delayed",
863                         __func__, __pa(priv->pgtable));
864
865 finish:
866         spin_unlock_irqrestore(&priv->lock, flags);
867 }
868
869 static int rockchip_iommu_attach_device(struct iommu_domain *domain,
870                                         struct device *dev)
871 {
872         struct iommu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
873         struct rk_iommu_domain *priv = domain->priv;
874         unsigned long flags;
875         int ret;
876
877         spin_lock_irqsave(&priv->lock, flags);
878
879         if(cpu_is_312x() || cpu_is_3036())
880                 rockchip_vcodec_select(data->dbgname);
881         
882         ret = __rockchip_iommu_enable(data, __pa(priv->pgtable), domain);
883
884         if (ret == 0) {
885                 /* 'data->node' must not be appeared in priv->clients */
886                 BUG_ON(!list_empty(&data->node));
887                 data->dev = dev;
888                 list_add_tail(&data->node, &priv->clients);
889         }
890
891         spin_unlock_irqrestore(&priv->lock, flags);
892
893         if (ret < 0) {
894                 pr_err("%s: Failed to attach IOMMU with pgtable %#lx\n",
895                        __func__, __pa(priv->pgtable));
896         } else if (ret > 0) {
897                 pr_info("%s: IOMMU with pgtable 0x%lx already attached\n",
898                         __func__, __pa(priv->pgtable));
899         } else {
900                 pr_info("%s: Attached new IOMMU with pgtable 0x%lx\n",
901                         __func__, __pa(priv->pgtable));
902         }
903
904         return ret;
905 }
906
907 static void rockchip_iommu_domain_destroy(struct iommu_domain *domain)
908 {
909         struct rk_iommu_domain *priv = domain->priv;
910         struct iommu_drvdata *data;
911         unsigned long flags;
912         int i;
913
914         WARN_ON(!list_empty(&priv->clients));
915
916         spin_lock_irqsave(&priv->lock, flags);
917
918         list_for_each_entry(data, &priv->clients, node) {
919                 if(cpu_is_312x() || cpu_is_3036())
920                         rockchip_vcodec_select(data->dbgname);
921                 while (!rockchip_iommu_disable(data->dev))
922                         ; /* until System MMU is actually disabled */
923         }
924         spin_unlock_irqrestore(&priv->lock, flags);
925
926         for (i = 0; i < NUM_LV1ENTRIES; i++)
927                 if (lv1ent_page(priv->pgtable + i))
928                         kmem_cache_free(lv2table_kmem_cache,
929                                         __va(lv2table_base(priv->pgtable + i)));
930
931         free_pages((unsigned long)priv->pgtable, 0);
932         free_pages((unsigned long)priv->lv2entcnt, 0);
933         kfree(domain->priv);
934         domain->priv = NULL;
935 }
936
937 static int rockchip_iommu_domain_init(struct iommu_domain *domain)
938 {
939         struct rk_iommu_domain *priv;
940
941         priv = kzalloc(sizeof(*priv), GFP_KERNEL);
942         if (!priv)
943                 return -ENOMEM;
944
945 /*rk32xx iommu use 2 level pagetable,
946    level1 and leve2 both have 1024 entries,each entry  occupy 4 bytes,
947    so alloc a page size for each page table
948 */
949         priv->pgtable = (unsigned long *)__get_free_pages(GFP_KERNEL |
950                                                           __GFP_ZERO, 0);
951         if (!priv->pgtable)
952                 goto err_pgtable;
953
954         priv->lv2entcnt = (short *)__get_free_pages(GFP_KERNEL |
955                                                     __GFP_ZERO, 0);
956         if (!priv->lv2entcnt)
957                 goto err_counter;
958
959         pgtable_flush(priv->pgtable, priv->pgtable + NUM_LV1ENTRIES);
960
961         spin_lock_init(&priv->lock);
962         spin_lock_init(&priv->pgtablelock);
963         INIT_LIST_HEAD(&priv->clients);
964
965         domain->priv = priv;
966         return 0;
967
968 err_counter:
969         free_pages((unsigned long)priv->pgtable, 0);
970 err_pgtable:
971         kfree(priv);
972         return -ENOMEM;
973 }
974
975 static struct iommu_ops rk_iommu_ops = {
976         .domain_init = &rockchip_iommu_domain_init,
977         .domain_destroy = &rockchip_iommu_domain_destroy,
978         .attach_dev = &rockchip_iommu_attach_device,
979         .detach_dev = &rockchip_iommu_detach_device,
980         .map = &rockchip_iommu_map,
981         .unmap = &rockchip_iommu_unmap,
982         .iova_to_phys = &rockchip_iommu_iova_to_phys,
983         .pgsize_bitmap = SPAGE_SIZE,
984 };
985
986 static int rockchip_iommu_prepare(void)
987 {
988         int ret = 0;
989         static int registed;
990
991         if (registed)
992                 return 0;
993
994         lv2table_kmem_cache = kmem_cache_create("rk-iommu-lv2table",
995                                                 LV2TABLE_SIZE,
996                                                 LV2TABLE_SIZE,
997                                                 0, NULL);
998         if (!lv2table_kmem_cache) {
999                 pr_err("%s: failed to create kmem cache\n", __func__);
1000                 return -ENOMEM;
1001         }
1002         ret = bus_set_iommu(&platform_bus_type, &rk_iommu_ops);
1003         if (!ret)
1004                 registed = 1;
1005         else
1006                 pr_err("%s:failed to set iommu to bus\r\n", __func__);
1007         return ret;
1008 }
1009
1010 static int  rockchip_get_iommu_resource_num(struct platform_device *pdev,
1011                                              unsigned int type)
1012 {
1013         struct resource *info = NULL;
1014         int num_resources = 0;
1015
1016         /*get resouce info*/
1017 again:
1018         info = platform_get_resource(pdev, type, num_resources);
1019         while (info) {
1020                 num_resources++;
1021                 goto again;
1022         }
1023         return num_resources;
1024 }
1025
1026 static struct kobject *dump_mmu_object;
1027
1028 static int dump_mmu_pagetbl(struct device *dev, struct device_attribute *attr,
1029                             const char *buf, u32 count)
1030 {
1031         u32 fault_address;
1032         u32 iommu_dte;
1033         u32 mmu_base;
1034         void __iomem *base;
1035         u32 ret;
1036
1037         ret = kstrtouint(buf, 0, &mmu_base);
1038         if (ret)
1039                 pr_info("%s is not in hexdecimal form.\n", buf);
1040         base = ioremap(mmu_base, 0x100);
1041         iommu_dte = __raw_readl(base + IOMMU_REGISTER_DTE_ADDR);
1042         fault_address = __raw_readl(base + IOMMU_REGISTER_PAGE_FAULT_ADDR);
1043         dump_pagetbl(fault_address, iommu_dte);
1044         return count;
1045 }
1046
1047 static DEVICE_ATTR(dump_mmu_pgtable, 0644, NULL, dump_mmu_pagetbl);
1048
1049 void dump_iommu_sysfs_init(void)
1050 {
1051         u32 ret;
1052
1053         dump_mmu_object = kobject_create_and_add("rk_iommu", NULL);
1054         if (dump_mmu_object == NULL)
1055                 return;
1056         ret = sysfs_create_file(dump_mmu_object,
1057                                 &dev_attr_dump_mmu_pgtable.attr);
1058 }
1059
1060 static int rockchip_iommu_probe(struct platform_device *pdev)
1061 {
1062         int i, ret;
1063         struct device *dev;
1064         struct iommu_drvdata *data;
1065
1066         dev = &pdev->dev;
1067
1068         ret = rockchip_iommu_prepare();
1069         if (ret) {
1070                 pr_err("%s,failed\r\n", __func__);
1071                 goto err_alloc;
1072         }
1073
1074         data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
1075         if (!data) {
1076                 dev_dbg(dev, "Not enough memory\n");
1077                 ret = -ENOMEM;
1078                 goto err_alloc;
1079         }
1080         dev_set_drvdata(dev, data);
1081 /*
1082         ret = dev_set_drvdata(dev, data);
1083         if (ret)
1084         {
1085                 dev_dbg(dev, "Unabled to initialize driver data\n");
1086                 goto err_init;
1087         }
1088 */
1089         if (pdev->dev.of_node) {
1090                 of_property_read_string(pdev->dev.of_node,
1091                                         "dbgname", &(data->dbgname));
1092         } else {
1093                 pr_info("dbgname not assigned in device tree or device node not exist\r\n");
1094         }
1095
1096         pr_info("(%s) Enter\n", data->dbgname);
1097
1098         data->num_res_mem = rockchip_get_iommu_resource_num(pdev,
1099                                 IORESOURCE_MEM);
1100         if (0 == data->num_res_mem) {
1101                 pr_err("can't find iommu memory resource \r\n");
1102                 goto err_init;
1103         }
1104         pr_info("data->num_res_mem=%d\n", data->num_res_mem);
1105         data->num_res_irq = rockchip_get_iommu_resource_num(pdev,
1106                                 IORESOURCE_IRQ);
1107         if (0 == data->num_res_irq) {
1108                 pr_err("can't find iommu irq resource \r\n");
1109                 goto err_init;
1110         }
1111
1112         data->res_bases = kmalloc_array(data->num_res_mem,
1113                                 sizeof(*data->res_bases), GFP_KERNEL);
1114         if (data->res_bases == NULL) {
1115                 dev_dbg(dev, "Not enough memory\n");
1116                 ret = -ENOMEM;
1117                 goto err_init;
1118         }
1119
1120         for (i = 0; i < data->num_res_mem; i++) {
1121                 struct resource *res;
1122
1123                 res = platform_get_resource(pdev, IORESOURCE_MEM, i);
1124                 if (!res) {
1125                         pr_err("Unable to find IOMEM region\n");
1126                         ret = -ENOENT;
1127                         goto err_res;
1128                 }
1129                 data->res_bases[i] = ioremap(res->start, resource_size(res));
1130                 pr_info("res->start = 0x%08x  ioremap to  data->res_bases[%d] = 0x%08x\n",
1131                         res->start, i, (unsigned int)data->res_bases[i]);
1132                 if (!data->res_bases[i]) {
1133                         pr_err("Unable to map IOMEM @ PA:%#x\n", res->start);
1134                         ret = -ENOENT;
1135                         goto err_res;
1136                 }
1137                 
1138                 if(cpu_is_312x() || cpu_is_3036())
1139                         rockchip_vcodec_select(data->dbgname);
1140
1141                 if (!strstr(data->dbgname, "isp")) {
1142                         if (!iommu_reset(data->res_bases[i], data->dbgname)) {
1143                                 ret = -ENOENT;
1144                                 goto err_res;
1145                         }
1146                 }
1147         }
1148
1149         for (i = 0; i < data->num_res_irq; i++) {
1150                 ret = platform_get_irq(pdev, i);
1151                 if (ret <= 0) {
1152                         pr_err("Unable to find IRQ resource\n");
1153                         goto err_irq;
1154                 }
1155                 ret = request_irq(ret, rockchip_iommu_irq,
1156                                   IRQF_SHARED, dev_name(dev), data);
1157                 if (ret) {
1158                         pr_err("Unabled to register interrupt handler\n");
1159                         goto err_irq;
1160                 }
1161         }
1162         ret = rockchip_init_iovmm(dev, &data->vmm);
1163         if (ret)
1164                 goto err_irq;
1165
1166         data->iommu = dev;
1167         rwlock_init(&data->lock);
1168         INIT_LIST_HEAD(&data->node);
1169
1170         set_fault_handler(data, &default_fault_handler);
1171
1172         pr_info("(%s) Initialized\n", data->dbgname);
1173         return 0;
1174
1175 err_irq:
1176         while (i-- > 0) {
1177                 int irq;
1178
1179                 irq = platform_get_irq(pdev, i);
1180                 free_irq(irq, data);
1181         }
1182 err_res:
1183         while (data->num_res_mem-- > 0)
1184                 iounmap(data->res_bases[data->num_res_mem]);
1185         kfree(data->res_bases);
1186 err_init:
1187         kfree(data);
1188 err_alloc:
1189         dev_err(dev, "Failed to initialize\n");
1190         return ret;
1191 }
1192
1193 #ifdef CONFIG_OF
1194 static const struct of_device_id iommu_dt_ids[] = {
1195         { .compatible = IEP_IOMMU_COMPATIBLE_NAME},
1196         { .compatible = VIP_IOMMU_COMPATIBLE_NAME},
1197         { .compatible = VOPB_IOMMU_COMPATIBLE_NAME},
1198         { .compatible = VOPL_IOMMU_COMPATIBLE_NAME},
1199         { .compatible = HEVC_IOMMU_COMPATIBLE_NAME},
1200         { .compatible = VPU_IOMMU_COMPATIBLE_NAME},
1201         { .compatible = ISP_IOMMU_COMPATIBLE_NAME},
1202         { .compatible = VOP_IOMMU_COMPATIBLE_NAME},
1203         { /* end */ }
1204 };
1205
1206 MODULE_DEVICE_TABLE(of, iommu_dt_ids);
1207 #endif
1208
1209 static struct platform_driver rk_iommu_driver = {
1210         .probe = rockchip_iommu_probe,
1211         .remove = NULL,
1212         .driver = {
1213                    .name = "rk_iommu",
1214                    .owner = THIS_MODULE,
1215                    .of_match_table = of_match_ptr(iommu_dt_ids),
1216         },
1217 };
1218
1219 static int __init rockchip_iommu_init_driver(void)
1220 {
1221         dump_iommu_sysfs_init();
1222
1223         return platform_driver_register(&rk_iommu_driver);
1224 }
1225
1226 core_initcall(rockchip_iommu_init_driver);