Merge remote-tracking branch 'origin/develop-3.10' into develop-3.10-next
[firefly-linux-kernel-4.4.55.git] / drivers / iommu / rockchip-iommu.c
1 /*
2  * This program is free software; you can redistribute it and/or modify
3  * it under the terms of the GNU General Public License version 2 as
4  * published by the Free Software Foundation.
5  */
6
7 #ifdef CONFIG_ROCKCHIP_IOMMU_DEBUG
8 #define DEBUG
9 #endif
10
11 #include <linux/io.h>
12 #include <linux/interrupt.h>
13 #include <linux/slab.h>
14 #include <linux/clk.h>
15 #include <linux/err.h>
16 #include <linux/mm.h>
17 #include <linux/errno.h>
18 #include <linux/memblock.h>
19 #include <linux/export.h>
20 #include <linux/module.h>
21
22 #include <asm/cacheflush.h>
23 #include <asm/pgtable.h>
24 #include <linux/of.h>
25 #include <linux/rockchip/sysmmu.h>
26 #include <linux/rockchip/iomap.h>
27 #include <linux/rockchip/grf.h>
28
29 #include "rockchip-iommu.h"
30
31 /* We does not consider super section mapping (16MB) */
32 #define SPAGE_ORDER 12
33 #define SPAGE_SIZE (1 << SPAGE_ORDER)
34 #define SPAGE_MASK (~(SPAGE_SIZE - 1))
35 typedef enum sysmmu_entry_flags 
36 {
37         SYSMMU_FLAGS_PRESENT = 0x01,
38         SYSMMU_FLAGS_READ_PERMISSION = 0x02,
39         SYSMMU_FLAGS_WRITE_PERMISSION = 0x04,
40         SYSMMU_FLAGS_OVERRIDE_CACHE  = 0x8,
41         SYSMMU_FLAGS_WRITE_CACHEABLE  = 0x10,
42         SYSMMU_FLAGS_WRITE_ALLOCATE  = 0x20,
43         SYSMMU_FLAGS_WRITE_BUFFERABLE  = 0x40,
44         SYSMMU_FLAGS_READ_CACHEABLE  = 0x80,
45         SYSMMU_FLAGS_READ_ALLOCATE  = 0x100,
46         SYSMMU_FLAGS_MASK = 0x1FF,
47 } sysmmu_entry_flags;
48
49 #define lv1ent_fault(sent) ((*(sent) & SYSMMU_FLAGS_PRESENT) == 0)
50 #define lv1ent_page(sent) ((*(sent) & SYSMMU_FLAGS_PRESENT) == 1)
51 #define lv2ent_fault(pent) ((*(pent) & SYSMMU_FLAGS_PRESENT) == 0)
52 #define spage_phys(pent) (*(pent) & SPAGE_MASK)
53 #define spage_offs(iova) ((iova) & 0x0FFF)
54
55 #define lv1ent_offset(iova) (((iova)>>22) & 0x03FF)
56 #define lv2ent_offset(iova) (((iova)>>12) & 0x03FF)
57
58 #define NUM_LV1ENTRIES 1024
59 #define NUM_LV2ENTRIES 1024
60
61 #define LV2TABLE_SIZE (NUM_LV2ENTRIES * sizeof(long))
62
63 #define lv2table_base(sent) (*(sent) & 0xFFFFFFFE)
64
65 #define mk_lv1ent_page(pa) ((pa) | SYSMMU_FLAGS_PRESENT)
66 /*write and read permission for level2 page default*/
67 #define mk_lv2ent_spage(pa) ((pa) | SYSMMU_FLAGS_PRESENT |SYSMMU_FLAGS_READ_PERMISSION |SYSMMU_FLAGS_WRITE_PERMISSION)
68
69 #define SYSMMU_REG_POLL_COUNT_FAST 1000
70
71 /*rk3036:vpu and hevc share ahb interface*/
72 #define BIT_VCODEC_SEL (1<<3)
73
74
75 /**
76  * MMU register numbers
77  * Used in the register read/write routines.
78  * See the hardware documentation for more information about each register
79  */
80 typedef enum sysmmu_register 
81 {
82         SYSMMU_REGISTER_DTE_ADDR = 0x0000, /**< Current Page Directory Pointer */
83         SYSMMU_REGISTER_STATUS = 0x0004, /**< Status of the MMU */
84         SYSMMU_REGISTER_COMMAND = 0x0008, /**< Command register, used to control the MMU */
85         SYSMMU_REGISTER_PAGE_FAULT_ADDR = 0x000C, /**< Logical address of the last page fault */
86         SYSMMU_REGISTER_ZAP_ONE_LINE = 0x010, /**< Used to invalidate the mapping of a single page from the MMU */
87         SYSMMU_REGISTER_INT_RAWSTAT = 0x0014, /**< Raw interrupt status, all interrupts visible */
88         SYSMMU_REGISTER_INT_CLEAR = 0x0018, /**< Indicate to the MMU that the interrupt has been received */
89         SYSMMU_REGISTER_INT_MASK = 0x001C, /**< Enable/disable types of interrupts */
90         SYSMMU_REGISTER_INT_STATUS = 0x0020, /**< Interrupt status based on the mask */
91         SYSMMU_REGISTER_AUTO_GATING     = 0x0024
92 } sysmmu_register;
93
94 typedef enum sysmmu_command 
95 {
96         SYSMMU_COMMAND_ENABLE_PAGING = 0x00, /**< Enable paging (memory translation) */
97         SYSMMU_COMMAND_DISABLE_PAGING = 0x01, /**< Disable paging (memory translation) */
98         SYSMMU_COMMAND_ENABLE_STALL = 0x02, /**<  Enable stall on page fault */
99         SYSMMU_COMMAND_DISABLE_STALL = 0x03, /**< Disable stall on page fault */
100         SYSMMU_COMMAND_ZAP_CACHE = 0x04, /**< Zap the entire page table cache */
101         SYSMMU_COMMAND_PAGE_FAULT_DONE = 0x05, /**< Page fault processed */
102         SYSMMU_COMMAND_HARD_RESET = 0x06 /**< Reset the MMU back to power-on settings */
103 } sysmmu_command;
104
105 /**
106  * MMU interrupt register bits
107  * Each cause of the interrupt is reported
108  * through the (raw) interrupt status registers.
109  * Multiple interrupts can be pending, so multiple bits
110  * can be set at once.
111  */
112 typedef enum sysmmu_interrupt 
113 {
114         SYSMMU_INTERRUPT_PAGE_FAULT = 0x01, /**< A page fault occured */
115         SYSMMU_INTERRUPT_READ_BUS_ERROR = 0x02 /**< A bus read error occured */
116 } sysmmu_interrupt;
117
118 typedef enum sysmmu_status_bits 
119 {
120         SYSMMU_STATUS_BIT_PAGING_ENABLED      = 1 << 0,
121         SYSMMU_STATUS_BIT_PAGE_FAULT_ACTIVE   = 1 << 1,
122         SYSMMU_STATUS_BIT_STALL_ACTIVE        = 1 << 2,
123         SYSMMU_STATUS_BIT_IDLE                = 1 << 3,
124         SYSMMU_STATUS_BIT_REPLAY_BUFFER_EMPTY = 1 << 4,
125         SYSMMU_STATUS_BIT_PAGE_FAULT_IS_WRITE = 1 << 5,
126         SYSMMU_STATUS_BIT_STALL_NOT_ACTIVE    = 1 << 31,
127 } sys_mmu_status_bits;
128
129 /**
130  * Size of an MMU page in bytes
131  */
132 #define SYSMMU_PAGE_SIZE 0x1000
133
134 /*
135  * Size of the address space referenced by a page table page
136  */
137 #define SYSMMU_VIRTUAL_PAGE_SIZE 0x400000 /* 4 MiB */
138
139 /**
140  * Page directory index from address
141  * Calculates the page directory index from the given address
142  */
143 #define SYSMMU_PDE_ENTRY(address) (((address)>>22) & 0x03FF)
144
145 /**
146  * Page table index from address
147  * Calculates the page table index from the given address
148  */
149 #define SYSMMU_PTE_ENTRY(address) (((address)>>12) & 0x03FF)
150
151 /**
152  * Extract the memory address from an PDE/PTE entry
153  */
154 #define SYSMMU_ENTRY_ADDRESS(value) ((value) & 0xFFFFFC00)
155
156 #define INVALID_PAGE ((u32)(~0))
157
158 static struct kmem_cache *lv2table_kmem_cache;
159
160 static void rockchip_vcodec_select(const char *string)
161 {
162         if(strstr(string,"hevc"))
163         {
164                 writel_relaxed(readl_relaxed(RK_GRF_VIRT + RK3036_GRF_SOC_CON1) |
165                (BIT_VCODEC_SEL) | (BIT_VCODEC_SEL << 16),
166                RK_GRF_VIRT + RK3036_GRF_SOC_CON1);
167         }
168         else if(strstr(string,"vpu"))
169         {
170                 writel_relaxed(readl_relaxed(RK_GRF_VIRT + RK3036_GRF_SOC_CON1) |
171               (BIT_VCODEC_SEL << 16),
172                RK_GRF_VIRT + RK3036_GRF_SOC_CON1);
173         }
174 }
175 static unsigned long *section_entry(unsigned long *pgtable, unsigned long iova)
176 {
177         return pgtable + lv1ent_offset(iova);
178 }
179
180 static unsigned long *page_entry(unsigned long *sent, unsigned long iova)
181 {
182         return (unsigned long *)__va(lv2table_base(sent)) + lv2ent_offset(iova);
183 }
184
185 static char *sysmmu_fault_name[SYSMMU_FAULTS_NUM] = {
186         "PAGE FAULT",
187         "BUS ERROR",
188         "UNKNOWN FAULT"
189 };
190
191 struct rk_iommu_domain {
192         struct list_head clients; /* list of sysmmu_drvdata.node */
193         unsigned long *pgtable; /* lv1 page table, 4KB */
194         short *lv2entcnt; /* free lv2 entry counter for each section */
195         spinlock_t lock; /* lock for this structure */
196         spinlock_t pgtablelock; /* lock for modifying page table @ pgtable */
197 };
198
199 static bool set_sysmmu_active(struct sysmmu_drvdata *data)
200 {
201         /* return true if the System MMU was not active previously
202            and it needs to be initialized */
203         return ++data->activations == 1;
204 }
205
206 static bool set_sysmmu_inactive(struct sysmmu_drvdata *data)
207 {
208         /* return true if the System MMU is needed to be disabled */
209         BUG_ON(data->activations < 1);
210         return --data->activations == 0;
211 }
212
213 static bool is_sysmmu_active(struct sysmmu_drvdata *data)
214 {
215         return data->activations > 0;
216 }
217 static void sysmmu_disable_stall(void __iomem *base)
218 {
219         int i;
220         u32 mmu_status = __raw_readl(base+SYSMMU_REGISTER_STATUS);
221         if ( 0 == (mmu_status & SYSMMU_STATUS_BIT_PAGING_ENABLED )) 
222         {
223                 return;
224         }
225         if (mmu_status & SYSMMU_STATUS_BIT_PAGE_FAULT_ACTIVE) 
226         {
227                 pr_err("Aborting MMU disable stall request since it is in pagefault state.\n");
228                 return;
229         }
230         
231         __raw_writel(SYSMMU_COMMAND_DISABLE_STALL, base + SYSMMU_REGISTER_COMMAND);
232         
233         for (i = 0; i < SYSMMU_REG_POLL_COUNT_FAST; ++i) 
234         {
235                 u32 status = __raw_readl(base + SYSMMU_REGISTER_STATUS);
236                 if ( 0 == (status & SYSMMU_STATUS_BIT_STALL_ACTIVE) ) 
237                 {
238                         break;
239                 }
240                 if ( status &  SYSMMU_STATUS_BIT_PAGE_FAULT_ACTIVE ) 
241                 {
242                         break;
243                 }
244                 if ( 0 == (mmu_status & SYSMMU_STATUS_BIT_PAGING_ENABLED )) 
245                 {
246                         break;
247                 }
248         }
249         if (SYSMMU_REG_POLL_COUNT_FAST == i) 
250                 pr_err("Disable stall request failed, MMU status is 0x%08X\n", __raw_readl(base + SYSMMU_REGISTER_STATUS));
251 }
252 static bool sysmmu_enable_stall(void __iomem *base)
253 {
254         int i;
255         u32 mmu_status = __raw_readl(base + SYSMMU_REGISTER_STATUS);
256
257         if ( 0 == (mmu_status & SYSMMU_STATUS_BIT_PAGING_ENABLED) ) 
258         {
259                 /*pr_info("MMU stall is implicit when Paging is not enabled.\n");*/
260                 return true;
261         }
262         if ( mmu_status & SYSMMU_STATUS_BIT_PAGE_FAULT_ACTIVE ) 
263         {
264                 pr_err("Aborting MMU stall request since it is in pagefault state.\n");
265                 return false;
266         }
267         
268         __raw_writel(SYSMMU_COMMAND_ENABLE_STALL, base + SYSMMU_REGISTER_COMMAND);
269
270         for (i = 0; i < SYSMMU_REG_POLL_COUNT_FAST; ++i) 
271         {
272                 mmu_status = __raw_readl(base + SYSMMU_REGISTER_STATUS);
273                 if (mmu_status & SYSMMU_STATUS_BIT_PAGE_FAULT_ACTIVE) 
274                 {
275                         break;
276                 }
277                 if ((mmu_status & SYSMMU_STATUS_BIT_STALL_ACTIVE)&&(0==(mmu_status & SYSMMU_STATUS_BIT_STALL_NOT_ACTIVE))) 
278                 {
279                         break;
280                 }
281                 if (0 == (mmu_status & ( SYSMMU_STATUS_BIT_PAGING_ENABLED ))) 
282                 {
283                         break;
284                 }
285         }
286         if (SYSMMU_REG_POLL_COUNT_FAST == i) 
287         {
288                 pr_err("Enable stall request failed, MMU status is 0x%08X\n", __raw_readl(base + SYSMMU_REGISTER_STATUS));
289                 return false;
290         }
291         if ( mmu_status & SYSMMU_STATUS_BIT_PAGE_FAULT_ACTIVE ) 
292         {
293                 pr_err("Aborting MMU stall request since it has a pagefault.\n");
294                 return false;
295         }
296         return true;
297 }
298
299 static bool sysmmu_enable_paging(void __iomem *base)
300 {
301         int i;
302         __raw_writel(SYSMMU_COMMAND_ENABLE_PAGING, base + SYSMMU_REGISTER_COMMAND);
303
304         for (i = 0; i < SYSMMU_REG_POLL_COUNT_FAST; ++i) 
305         {
306                 if (__raw_readl(base + SYSMMU_REGISTER_STATUS) & SYSMMU_STATUS_BIT_PAGING_ENABLED) 
307                 {
308                         /*pr_info("Enable paging request success.\n");*/
309                         break;
310                 }
311         }
312         if (SYSMMU_REG_POLL_COUNT_FAST == i)
313         {
314                 pr_err("Enable paging request failed, MMU status is 0x%08X\n", __raw_readl(base + SYSMMU_REGISTER_STATUS));
315                 return false;
316         }
317         return true;
318 }
319 static bool sysmmu_disable_paging(void __iomem *base)
320 {
321         int i;
322         __raw_writel(SYSMMU_COMMAND_DISABLE_PAGING, base + SYSMMU_REGISTER_COMMAND);
323
324         for (i = 0; i < SYSMMU_REG_POLL_COUNT_FAST; ++i) 
325         {
326                 if (!(__raw_readl(base + SYSMMU_REGISTER_STATUS) & SYSMMU_STATUS_BIT_PAGING_ENABLED)) 
327                 {
328                         /*pr_info("Disable paging request success.\n");*/
329                         break;
330                 }
331         }
332         if (SYSMMU_REG_POLL_COUNT_FAST == i)
333         {
334                 pr_err("Disable paging request failed, MMU status is 0x%08X\n", __raw_readl(base + SYSMMU_REGISTER_STATUS));
335                 return false;
336         }
337         return true;
338 }
339
340 static void sysmmu_page_fault_done(void __iomem *base,const char *dbgname)
341 {
342         pr_info("MMU: %s: Leaving page fault mode\n", dbgname);
343         __raw_writel(SYSMMU_COMMAND_PAGE_FAULT_DONE, base + SYSMMU_REGISTER_COMMAND);
344 }
345 static bool sysmmu_zap_tlb(void __iomem *base)
346 {
347         bool stall_success = sysmmu_enable_stall(base);
348         
349         __raw_writel(SYSMMU_COMMAND_ZAP_CACHE, base + SYSMMU_REGISTER_COMMAND);
350         if (false == stall_success) 
351         {
352                 /* False means that it is in Pagefault state. Not possible to disable_stall then */
353                 return false;
354         }
355         sysmmu_disable_stall(base);
356         return true;
357 }
358 static inline bool sysmmu_raw_reset(void __iomem *base)
359 {
360         int i;
361         __raw_writel(0xCAFEBABE, base + SYSMMU_REGISTER_DTE_ADDR);
362
363         if(!(0xCAFEB000 == __raw_readl(base+SYSMMU_REGISTER_DTE_ADDR)))
364         {
365                 pr_err("error when %s.\n",__func__);
366                 return false;
367         }
368         __raw_writel(SYSMMU_COMMAND_HARD_RESET, base + SYSMMU_REGISTER_COMMAND);
369
370         for (i = 0; i < SYSMMU_REG_POLL_COUNT_FAST; ++i) 
371         {
372                 if(__raw_readl(base + SYSMMU_REGISTER_DTE_ADDR) == 0)
373                 {
374                         break;
375                 }
376         }
377         if (SYSMMU_REG_POLL_COUNT_FAST == i) {
378                 pr_err("%s,Reset request failed, MMU status is 0x%08X\n", __func__,__raw_readl(base + SYSMMU_REGISTER_DTE_ADDR));
379                 return false;
380         }
381         return true;
382 }
383
384 static void __sysmmu_set_ptbase(void __iomem *base,unsigned long pgd)
385 {
386         __raw_writel(pgd, base + SYSMMU_REGISTER_DTE_ADDR);
387
388 }
389
390 static bool sysmmu_reset(void __iomem *base,const char *dbgname)
391 {
392         bool err = true;
393         
394         err = sysmmu_enable_stall(base);
395         if(!err)
396         {
397                 pr_err("%s:stall failed: %s\n",__func__,dbgname);
398                 return err;
399         }
400         err = sysmmu_raw_reset(base);
401         if(err)
402         {
403                 __raw_writel(SYSMMU_INTERRUPT_PAGE_FAULT|SYSMMU_INTERRUPT_READ_BUS_ERROR, base+SYSMMU_REGISTER_INT_MASK);
404         }
405         sysmmu_disable_stall(base);
406         if(!err)
407                 pr_err("%s: failed: %s\n", __func__,dbgname);
408         return err;
409 }
410
411 static inline void pgtable_flush(void *vastart, void *vaend)
412 {
413         dmac_flush_range(vastart, vaend);
414         outer_flush_range(virt_to_phys(vastart),virt_to_phys(vaend));
415 }
416 static void __set_fault_handler(struct sysmmu_drvdata *data,
417                                         sysmmu_fault_handler_t handler)
418 {
419         unsigned long flags;
420
421         write_lock_irqsave(&data->lock, flags);
422         data->fault_handler = handler;
423         write_unlock_irqrestore(&data->lock, flags);
424 }
425
426 void rockchip_sysmmu_set_fault_handler(struct device *dev,sysmmu_fault_handler_t handler)
427 {
428         struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
429
430         __set_fault_handler(data, handler);
431 }
432
433 static int default_fault_handler(struct device *dev,
434                                         enum rk_sysmmu_inttype itype,
435                                         unsigned long pgtable_base,
436                                         unsigned long fault_addr,
437                                         unsigned int status
438                                         )
439 {
440         struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
441
442         if(!data)
443         {
444                 pr_err("%s,iommu device not assigned yet\n",__func__);
445                 return 0;
446         }
447         if ((itype >= SYSMMU_FAULTS_NUM) || (itype < SYSMMU_PAGEFAULT))
448                 itype = SYSMMU_FAULT_UNKNOWN;
449
450         if(itype == SYSMMU_BUSERROR)
451                 pr_err("%s occured at 0x%lx(Page table base: 0x%lx)\n",sysmmu_fault_name[itype], fault_addr, pgtable_base);
452
453         if(itype == SYSMMU_PAGEFAULT)
454                 pr_err("SYSMMU:Page fault detected at 0x%lx from bus id %d of type %s on %s\n",
455                                 fault_addr,
456                                 (status >> 6) & 0x1F,
457                                 (status & 32) ? "write" : "read",
458                                 data->dbgname
459                                 );
460
461         pr_err("Generating Kernel OOPS... because it is unrecoverable.\n");
462
463         BUG();
464
465         return 0;
466 }
467 static void dump_pagetbl(u32 fault_address,u32 addr_dte)
468 {
469         u32 lv1_offset;
470         u32 lv2_offset;
471         
472         u32 *lv1_entry_pa;
473         u32 *lv1_entry_va;
474         u32 *lv1_entry_value;
475         
476         u32 *lv2_base;
477         u32 *lv2_entry_pa;
478         u32 *lv2_entry_va;
479         u32 *lv2_entry_value;
480
481         
482         lv1_offset = lv1ent_offset(fault_address);
483         lv2_offset = lv2ent_offset(fault_address);
484         
485         lv1_entry_pa = (u32 *)addr_dte + lv1_offset;
486         lv1_entry_va = (u32 *)(__va(addr_dte)) + lv1_offset;
487         lv1_entry_value = (u32 *)(*lv1_entry_va);
488         
489         lv2_base = (u32 *)((*lv1_entry_va) & 0xfffffffe);
490         lv2_entry_pa = (u32 * )lv2_base + lv2_offset;
491         lv2_entry_va = (u32 * )(__va(lv2_base)) + lv2_offset;
492         lv2_entry_value = (u32 *)(*lv2_entry_va);
493         
494         pr_info("fault address = 0x%08x,dte addr pa = 0x%08x,va = 0x%08x\n",fault_address,addr_dte,(u32)__va(addr_dte));
495         pr_info("lv1_offset = 0x%x,lv1_entry_pa = 0x%08x,lv1_entry_va = 0x%08x\n",lv1_offset,(u32)lv1_entry_pa,(u32)lv1_entry_va);
496         pr_info("lv1_entry_value(*lv1_entry_va) = 0x%08x,lv2_base = 0x%08x\n",(u32)lv1_entry_value,(u32)lv2_base);
497         pr_info("lv2_offset = 0x%x,lv2_entry_pa = 0x%08x,lv2_entry_va = 0x%08x\n",lv2_offset,(u32)lv2_entry_pa,(u32)lv2_entry_va);
498         pr_info("lv2_entry value(*lv2_entry_va) = 0x%08x\n",(u32)lv2_entry_value);
499 }
500 static irqreturn_t rockchip_sysmmu_irq(int irq, void *dev_id)
501 {
502         /* SYSMMU is in blocked when interrupt occurred. */
503         struct sysmmu_drvdata *data = dev_id;
504         struct resource *irqres;
505         struct platform_device *pdev;
506         enum rk_sysmmu_inttype itype = SYSMMU_FAULT_UNKNOWN;
507         u32 status;
508         u32 rawstat;
509         u32 int_status;
510         u32 fault_address;
511         int i, ret = 0;
512
513         read_lock(&data->lock);
514         
515 #if 0
516         WARN_ON(!is_sysmmu_active(data));
517 #else
518         if(!is_sysmmu_active(data))
519         {
520                 read_unlock(&data->lock);
521                 return IRQ_HANDLED;
522         }
523 #endif  
524         rockchip_vcodec_select(data->dbgname);
525
526         pdev = to_platform_device(data->sysmmu);
527
528         for (i = 0; i < data->num_res_irq; i++) 
529         {
530                 irqres = platform_get_resource(pdev, IORESOURCE_IRQ, i);
531                 if (irqres && ((int)irqres->start == irq))
532                         break;
533         }
534
535         if (i == data->num_res_irq) 
536         {
537                 itype = SYSMMU_FAULT_UNKNOWN;
538         } 
539         else 
540         {
541                 int_status = __raw_readl(data->res_bases[i] + SYSMMU_REGISTER_INT_STATUS);
542                 
543                 if(int_status != 0)
544                 {
545                         /*mask status*/
546                         __raw_writel(0x00,data->res_bases[i] + SYSMMU_REGISTER_INT_MASK);
547                         
548                         rawstat = __raw_readl(data->res_bases[i] + SYSMMU_REGISTER_INT_RAWSTAT);
549
550                         if(rawstat & SYSMMU_INTERRUPT_PAGE_FAULT)
551                         {
552                                 fault_address = __raw_readl(data->res_bases[i] + SYSMMU_REGISTER_PAGE_FAULT_ADDR);
553                                 itype = SYSMMU_PAGEFAULT;
554                         }
555                         else if(rawstat & SYSMMU_INTERRUPT_READ_BUS_ERROR)
556                         {
557                                 itype = SYSMMU_BUSERROR;
558                         }
559                         else
560                         {
561                                 goto out;
562                         }
563                         dump_pagetbl(fault_address,__raw_readl(data->res_bases[i] + SYSMMU_REGISTER_DTE_ADDR));
564                 }
565                 else
566                         goto out;
567         }
568         
569         if (data->fault_handler) 
570         {
571                 unsigned long base = __raw_readl(data->res_bases[i] + SYSMMU_REGISTER_DTE_ADDR);
572                 status = __raw_readl(data->res_bases[i] + SYSMMU_REGISTER_STATUS);
573                 ret = data->fault_handler(data->dev, itype, base, fault_address,status);
574         }
575
576         if (!ret && (itype != SYSMMU_FAULT_UNKNOWN))
577         {
578                 if(SYSMMU_PAGEFAULT == itype)
579                 {
580                         sysmmu_zap_tlb(data->res_bases[i]);
581                         sysmmu_page_fault_done(data->res_bases[i],data->dbgname);
582                         __raw_writel(SYSMMU_INTERRUPT_PAGE_FAULT|SYSMMU_INTERRUPT_READ_BUS_ERROR, data->res_bases[i]+SYSMMU_REGISTER_INT_MASK);
583                 }
584         }
585         else
586                 pr_err("(%s) %s is not handled.\n",data->dbgname, sysmmu_fault_name[itype]);
587
588 out :
589         read_unlock(&data->lock);
590
591         return IRQ_HANDLED;
592 }
593
594 static bool __rockchip_sysmmu_disable(struct sysmmu_drvdata *data)
595 {
596         unsigned long flags;
597         bool disabled = false;
598         int i;
599         write_lock_irqsave(&data->lock, flags);
600
601         if (!set_sysmmu_inactive(data))
602                 goto finish;
603
604         for(i=0;i<data->num_res_mem;i++)
605         {
606                 sysmmu_disable_paging(data->res_bases[i]);
607         }
608
609         disabled = true;
610         data->pgtable = 0;
611         data->domain = NULL;
612 finish:
613         write_unlock_irqrestore(&data->lock, flags);
614
615         if (disabled)
616                 pr_info("(%s) Disabled\n", data->dbgname);
617         else
618                 pr_info("(%s) %d times left to be disabled\n",data->dbgname, data->activations);
619
620         return disabled;
621 }
622
623 /* __rk_sysmmu_enable: Enables System MMU
624  *
625  * returns -error if an error occurred and System MMU is not enabled,
626  * 0 if the System MMU has been just enabled and 1 if System MMU was already
627  * enabled before.
628  */
629 static int __rockchip_sysmmu_enable(struct sysmmu_drvdata *data,unsigned long pgtable, struct iommu_domain *domain)
630 {
631         int i, ret = 0;
632         unsigned long flags;
633
634         write_lock_irqsave(&data->lock, flags);
635
636         if (!set_sysmmu_active(data)) 
637         {
638                 if (WARN_ON(pgtable != data->pgtable)) 
639                 {
640                         ret = -EBUSY;
641                         set_sysmmu_inactive(data);
642                 } 
643                 else 
644                         ret = 1;
645
646                 pr_info("(%s) Already enabled\n", data->dbgname);
647                 goto finish;
648         }
649         
650         data->pgtable = pgtable;
651
652         for (i = 0; i < data->num_res_mem; i++) 
653         {
654                 bool status;
655                 status = sysmmu_enable_stall(data->res_bases[i]);
656                 if(status)
657                 {
658                         __sysmmu_set_ptbase(data->res_bases[i], pgtable);
659                         __raw_writel(SYSMMU_COMMAND_ZAP_CACHE, data->res_bases[i] + SYSMMU_REGISTER_COMMAND);
660                 }
661                 __raw_writel(SYSMMU_INTERRUPT_PAGE_FAULT|SYSMMU_INTERRUPT_READ_BUS_ERROR, data->res_bases[i]+SYSMMU_REGISTER_INT_MASK);
662                 sysmmu_enable_paging(data->res_bases[i]);
663                 sysmmu_disable_stall(data->res_bases[i]);
664         }
665
666         data->domain = domain;
667
668         pr_info("(%s) Enabled\n", data->dbgname);
669 finish:
670         write_unlock_irqrestore(&data->lock, flags);
671
672         return ret;
673 }
674 bool rockchip_sysmmu_disable(struct device *dev)
675 {
676         struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
677         bool disabled;
678
679         disabled = __rockchip_sysmmu_disable(data);
680
681         return disabled;
682 }
683 void rockchip_sysmmu_tlb_invalidate(struct device *dev)
684 {
685         unsigned long flags;
686         struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
687
688         read_lock_irqsave(&data->lock, flags);
689         
690         rockchip_vcodec_select(data->dbgname);
691
692         if (is_sysmmu_active(data)) 
693         {
694                 int i;
695                 for (i = 0; i < data->num_res_mem; i++) 
696                 {
697                         if(!sysmmu_zap_tlb(data->res_bases[i]))
698                                 pr_err("%s,invalidating TLB failed\n",data->dbgname);
699                 }
700         } 
701         else 
702                 pr_info("(%s) Disabled. Skipping invalidating TLB.\n",data->dbgname);
703
704         read_unlock_irqrestore(&data->lock, flags);
705 }
706 static phys_addr_t rockchip_iommu_iova_to_phys(struct iommu_domain *domain,dma_addr_t iova)
707 {
708         struct rk_iommu_domain *priv = domain->priv;
709         unsigned long *entry;
710         unsigned long flags;
711         phys_addr_t phys = 0;
712
713         spin_lock_irqsave(&priv->pgtablelock, flags);
714
715         entry = section_entry(priv->pgtable, iova);
716         entry = page_entry(entry, iova);
717         phys = spage_phys(entry) + spage_offs(iova);
718         
719         spin_unlock_irqrestore(&priv->pgtablelock, flags);
720
721         return phys;
722 }
723 static int lv2set_page(unsigned long *pent, phys_addr_t paddr, size_t size,
724                                                                 short *pgcnt)
725 {
726         if (!lv2ent_fault(pent))
727                 return -EADDRINUSE;
728
729         *pent = mk_lv2ent_spage(paddr);
730         pgtable_flush(pent, pent + 1);
731         *pgcnt -= 1;
732         return 0;
733 }
734
735 static unsigned long *alloc_lv2entry(unsigned long *sent, unsigned long iova,short *pgcounter)
736 {
737         if (lv1ent_fault(sent)) 
738         {
739                 unsigned long *pent;
740
741                 pent = kmem_cache_zalloc(lv2table_kmem_cache, GFP_ATOMIC);
742                 BUG_ON((unsigned long)pent & (LV2TABLE_SIZE - 1));
743                 if (!pent)
744                         return NULL;
745
746                 *sent = mk_lv1ent_page(__pa(pent));
747                 kmemleak_ignore(pent);
748                 *pgcounter = NUM_LV2ENTRIES;
749                 pgtable_flush(pent, pent + NUM_LV2ENTRIES);
750                 pgtable_flush(sent, sent + 1);
751         }
752         return page_entry(sent, iova);
753 }
754
755 static size_t rockchip_iommu_unmap(struct iommu_domain *domain,unsigned long iova, size_t size)
756 {
757         struct rk_iommu_domain *priv = domain->priv;
758         unsigned long flags;
759         unsigned long *ent;
760
761         BUG_ON(priv->pgtable == NULL);
762
763         spin_lock_irqsave(&priv->pgtablelock, flags);
764
765         ent = section_entry(priv->pgtable, iova);
766
767         if (unlikely(lv1ent_fault(ent))) 
768         {
769                 if (size > SPAGE_SIZE)
770                         size = SPAGE_SIZE;
771                 goto done;
772         }
773
774         /* lv1ent_page(sent) == true here */
775
776         ent = page_entry(ent, iova);
777
778         if (unlikely(lv2ent_fault(ent))) 
779         {
780                 size = SPAGE_SIZE;
781                 goto done;
782         }
783         
784         *ent = 0;
785         size = SPAGE_SIZE;
786         priv->lv2entcnt[lv1ent_offset(iova)] += 1;
787         goto done;
788
789 done:
790         //pr_info("%s:unmap iova 0x%lx/0x%x bytes\n",__func__, iova,size);
791         spin_unlock_irqrestore(&priv->pgtablelock, flags);
792
793         return size;
794 }
795 static int rockchip_iommu_map(struct iommu_domain *domain, unsigned long iova,
796                          phys_addr_t paddr, size_t size, int prot)
797 {
798         struct rk_iommu_domain *priv = domain->priv;
799         unsigned long *entry;
800         unsigned long flags;
801         int ret = -ENOMEM;
802         unsigned long *pent;
803
804         BUG_ON(priv->pgtable == NULL);
805
806         spin_lock_irqsave(&priv->pgtablelock, flags);
807
808         entry = section_entry(priv->pgtable, iova);
809         
810         pent = alloc_lv2entry(entry, iova,&priv->lv2entcnt[lv1ent_offset(iova)]);
811         if (!pent)
812                 ret = -ENOMEM;
813         else
814                 ret = lv2set_page(pent, paddr, size,&priv->lv2entcnt[lv1ent_offset(iova)]);
815         
816         if (ret)
817         {
818                 pr_err("%s: Failed to map iova 0x%lx/0x%x bytes\n",__func__, iova, size);
819         }
820         spin_unlock_irqrestore(&priv->pgtablelock, flags);
821
822         return ret;
823 }
824
825 static void rockchip_iommu_detach_device(struct iommu_domain *domain,
826                                     struct device *dev)
827 {
828         struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
829         struct rk_iommu_domain *priv = domain->priv;
830         struct list_head *pos;
831         unsigned long flags;
832         bool found = false;
833
834         spin_lock_irqsave(&priv->lock, flags);
835
836         list_for_each(pos, &priv->clients) 
837         {
838                 if (list_entry(pos, struct sysmmu_drvdata, node) == data) 
839                 {
840                         found = true;
841                         break;
842                 }
843         }
844         if (!found)
845                 goto finish;
846         
847         rockchip_vcodec_select(data->dbgname);
848
849         if (__rockchip_sysmmu_disable(data)) 
850         {
851                 pr_info("%s: Detached IOMMU with pgtable %#lx\n",__func__, __pa(priv->pgtable));
852                 list_del(&data->node);
853                 INIT_LIST_HEAD(&data->node);
854
855         } 
856         else 
857                 pr_info("%s: Detaching IOMMU with pgtable %#lx delayed",__func__, __pa(priv->pgtable));
858         
859 finish:
860         spin_unlock_irqrestore(&priv->lock, flags);
861 }
862 static int rockchip_iommu_attach_device(struct iommu_domain *domain,struct device *dev)
863 {
864         struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
865         struct rk_iommu_domain *priv = domain->priv;
866         unsigned long flags;
867         int ret;
868
869         spin_lock_irqsave(&priv->lock, flags);
870         
871         rockchip_vcodec_select(data->dbgname);
872
873         ret = __rockchip_sysmmu_enable(data, __pa(priv->pgtable), domain);
874
875         if (ret == 0) 
876         {
877                 /* 'data->node' must not be appeared in priv->clients */
878                 BUG_ON(!list_empty(&data->node));
879                 data->dev = dev;
880                 list_add_tail(&data->node, &priv->clients);
881         }
882
883         spin_unlock_irqrestore(&priv->lock, flags);
884
885         if (ret < 0) 
886         {
887                 pr_err("%s: Failed to attach IOMMU with pgtable %#lx\n",__func__, __pa(priv->pgtable));
888         } 
889         else if (ret > 0) 
890         {
891                 pr_info("%s: IOMMU with pgtable 0x%lx already attached\n",__func__, __pa(priv->pgtable));
892         } 
893         else 
894         {
895                 pr_info("%s: Attached new IOMMU with pgtable 0x%lx\n",__func__, __pa(priv->pgtable));
896         }
897
898         return ret;
899 }
900 static void rockchip_iommu_domain_destroy(struct iommu_domain *domain)
901 {
902         struct rk_iommu_domain *priv = domain->priv;
903         struct sysmmu_drvdata *data;
904         unsigned long flags;
905         int i;
906
907         WARN_ON(!list_empty(&priv->clients));
908
909         spin_lock_irqsave(&priv->lock, flags);
910         
911         list_for_each_entry(data, &priv->clients, node) 
912         {
913                 rockchip_vcodec_select(data->dbgname);
914                 while (!rockchip_sysmmu_disable(data->dev))
915                         ; /* until System MMU is actually disabled */
916         }
917         spin_unlock_irqrestore(&priv->lock, flags);
918
919         for (i = 0; i < NUM_LV1ENTRIES; i++)
920                 if (lv1ent_page(priv->pgtable + i))
921                         kmem_cache_free(lv2table_kmem_cache,__va(lv2table_base(priv->pgtable + i)));
922
923         free_pages((unsigned long)priv->pgtable, 0);
924         free_pages((unsigned long)priv->lv2entcnt, 0);
925         kfree(domain->priv);
926         domain->priv = NULL;
927 }
928
929 static int rockchip_iommu_domain_init(struct iommu_domain *domain)
930 {
931         struct rk_iommu_domain *priv;
932
933         priv = kzalloc(sizeof(*priv), GFP_KERNEL);
934         if (!priv)
935                 return -ENOMEM;
936         
937 /*rk32xx sysmmu use 2 level pagetable,
938    level1 and leve2 both have 1024 entries,each entry  occupy 4 bytes,
939    so alloc a page size for each page table 
940 */
941         priv->pgtable = (unsigned long *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 0);
942         if (!priv->pgtable)
943                 goto err_pgtable;
944
945         priv->lv2entcnt = (short *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 0);
946         if (!priv->lv2entcnt)
947                 goto err_counter;
948
949         pgtable_flush(priv->pgtable, priv->pgtable + NUM_LV1ENTRIES);
950
951         spin_lock_init(&priv->lock);
952         spin_lock_init(&priv->pgtablelock);
953         INIT_LIST_HEAD(&priv->clients);
954
955         domain->priv = priv;
956         return 0;
957
958 err_counter:
959         free_pages((unsigned long)priv->pgtable, 0);    
960 err_pgtable:
961         kfree(priv);
962         return -ENOMEM;
963 }
964
965 static struct iommu_ops rk_iommu_ops = 
966 {
967         .domain_init = &rockchip_iommu_domain_init,
968         .domain_destroy = &rockchip_iommu_domain_destroy,
969         .attach_dev = &rockchip_iommu_attach_device,
970         .detach_dev = &rockchip_iommu_detach_device,
971         .map = &rockchip_iommu_map,
972         .unmap = &rockchip_iommu_unmap,
973         .iova_to_phys = &rockchip_iommu_iova_to_phys,
974         .pgsize_bitmap = SPAGE_SIZE,
975 };
976
977 static int rockchip_sysmmu_prepare(void)
978 {
979         int ret = 0;
980         static int registed = 0;
981         
982         if(registed)
983                 return 0;
984         
985         lv2table_kmem_cache = kmem_cache_create("rk-iommu-lv2table",LV2TABLE_SIZE, LV2TABLE_SIZE, 0, NULL);
986         if (!lv2table_kmem_cache) 
987         {
988                 pr_err("%s: failed to create kmem cache\n", __func__);
989                 return -ENOMEM;
990         }
991         ret = bus_set_iommu(&platform_bus_type, &rk_iommu_ops);
992         if(!ret)
993                 registed = 1;
994         else
995                 pr_err("%s:failed to set iommu to bus\r\n",__func__);
996         return ret;
997 }
998 static int  rockchip_get_sysmmu_resource_num(struct platform_device *pdev,unsigned int type)
999 {
1000         struct resource *info = NULL;
1001         int num_resources = 0;
1002         
1003         /*get resouce info*/
1004 again:
1005         info = platform_get_resource(pdev, type, num_resources);
1006         while(info)
1007         {
1008                 num_resources++;
1009                 goto again;
1010         }
1011         return num_resources;
1012 }
1013
1014 static struct kobject *dump_mmu_object;
1015
1016 static int dump_mmu_pagetbl(struct device *dev,struct device_attribute *attr, const char *buf,u32 count)
1017 {
1018         u32 fault_address;
1019         u32 iommu_dte ;
1020         u32 mmu_base;
1021         void __iomem *base;
1022         u32 ret;
1023         ret = kstrtouint(buf,0,&mmu_base);
1024         if (ret)
1025                 printk("%s is not in hexdecimal form.\n", buf);
1026         base = ioremap(mmu_base, 0x100);
1027         iommu_dte = __raw_readl(base + SYSMMU_REGISTER_DTE_ADDR);
1028         fault_address = __raw_readl(base + SYSMMU_REGISTER_PAGE_FAULT_ADDR);
1029         dump_pagetbl(fault_address,iommu_dte);
1030         return count;
1031 }
1032 static DEVICE_ATTR(dump_mmu_pgtable, 0644, NULL, dump_mmu_pagetbl);
1033
1034 void dump_iommu_sysfs_init(void )
1035 {
1036         u32 ret;
1037         dump_mmu_object = kobject_create_and_add("rk_iommu", NULL);
1038         if (dump_mmu_object == NULL)
1039                 return;
1040         ret = sysfs_create_file(dump_mmu_object, &dev_attr_dump_mmu_pgtable.attr);
1041         return;
1042 }
1043         
1044
1045
1046 static int rockchip_sysmmu_probe(struct platform_device *pdev)
1047 {
1048         int i, ret;
1049         struct device *dev;
1050         struct sysmmu_drvdata *data;
1051         
1052         dev = &pdev->dev;
1053         
1054         ret = rockchip_sysmmu_prepare();
1055         if(ret)
1056         {
1057                 pr_err("%s,failed\r\n",__func__);
1058                 goto err_alloc;
1059         }
1060
1061         data = devm_kzalloc(dev,sizeof(*data), GFP_KERNEL);
1062         if (!data) 
1063         {
1064                 dev_dbg(dev, "Not enough memory\n");
1065                 ret = -ENOMEM;
1066                 goto err_alloc;
1067         }
1068         
1069         ret = dev_set_drvdata(dev, data);
1070         if (ret) 
1071         {
1072                 dev_dbg(dev, "Unabled to initialize driver data\n");
1073                 goto err_init;
1074         }
1075         
1076         if(pdev->dev.of_node)
1077         {
1078                 of_property_read_string(pdev->dev.of_node,"dbgname",&(data->dbgname));
1079         }
1080         else
1081         {
1082                 pr_info("dbgname not assigned in device tree or device node not exist\r\n");
1083         }
1084
1085         pr_info("(%s) Enter\n", data->dbgname);
1086
1087         /*rk32xx sysmmu need both irq and memory */
1088         data->num_res_mem = rockchip_get_sysmmu_resource_num(pdev,IORESOURCE_MEM);
1089         if(0 == data->num_res_mem)
1090         {
1091                 pr_err("can't find sysmmu memory resource \r\n");
1092                 goto err_init;
1093         }
1094         pr_info("data->num_res_mem=%d\n",data->num_res_mem);
1095         data->num_res_irq = rockchip_get_sysmmu_resource_num(pdev,IORESOURCE_IRQ);
1096         if(0 == data->num_res_irq)
1097         {
1098                 pr_err("can't find sysmmu irq resource \r\n");
1099                 goto err_init;
1100         }
1101         
1102         data->res_bases = kmalloc(sizeof(*data->res_bases) * data->num_res_mem,GFP_KERNEL);
1103         if (data->res_bases == NULL)
1104         {
1105                 dev_dbg(dev, "Not enough memory\n");
1106                 ret = -ENOMEM;
1107                 goto err_init;
1108         }
1109
1110         for (i = 0; i < data->num_res_mem; i++) 
1111         {
1112                 struct resource *res;
1113                 res = platform_get_resource(pdev, IORESOURCE_MEM, i);
1114                 if (!res) 
1115                 {
1116                         pr_err("Unable to find IOMEM region\n");
1117                         ret = -ENOENT;
1118                         goto err_res;
1119                 }
1120                 data->res_bases[i] = ioremap(res->start, resource_size(res));
1121                 pr_info("res->start = 0x%08x  ioremap to  data->res_bases[%d] = 0x%08x\n",res->start,i,(unsigned int)data->res_bases[i]);
1122                 if (!data->res_bases[i]) 
1123                 {
1124                         pr_err("Unable to map IOMEM @ PA:%#x\n",res->start);
1125                         ret = -ENOENT;
1126                         goto err_res;
1127                 }
1128                 
1129                 rockchip_vcodec_select(data->dbgname);
1130                 
1131                 if(!strstr(data->dbgname,"isp"))
1132                 {
1133                         /*reset sysmmu*/
1134                         if(!sysmmu_reset(data->res_bases[i],data->dbgname))
1135                         {
1136                                 ret = -ENOENT;
1137                                 goto err_res;
1138                         }
1139                 }
1140         }
1141
1142         for (i = 0; i < data->num_res_irq; i++) 
1143         {
1144                 ret = platform_get_irq(pdev, i);
1145                 if (ret <= 0) 
1146                 {
1147                         pr_err("Unable to find IRQ resource\n");
1148                         goto err_irq;
1149                 }
1150                 ret = request_irq(ret, rockchip_sysmmu_irq, IRQF_SHARED ,dev_name(dev), data);
1151                 if (ret) 
1152                 {
1153                         pr_err("Unabled to register interrupt handler\n");
1154                         goto err_irq;
1155                 }
1156         }
1157         ret = rockchip_init_iovmm(dev, &data->vmm);
1158         if (ret)
1159                 goto err_irq;
1160         
1161         
1162         data->sysmmu = dev;
1163         rwlock_init(&data->lock);
1164         INIT_LIST_HEAD(&data->node);
1165
1166         __set_fault_handler(data, &default_fault_handler);
1167
1168         pr_info("(%s) Initialized\n", data->dbgname);
1169         return 0;
1170
1171 err_irq:
1172         while (i-- > 0) 
1173         {
1174                 int irq;
1175
1176                 irq = platform_get_irq(pdev, i);
1177                 free_irq(irq, data);
1178         }
1179 err_res:
1180         while (data->num_res_mem-- > 0)
1181                 iounmap(data->res_bases[data->num_res_mem]);
1182         kfree(data->res_bases);
1183 err_init:
1184         kfree(data);
1185 err_alloc:
1186         dev_err(dev, "Failed to initialize\n");
1187         return ret;
1188 }
1189
1190 #ifdef CONFIG_OF
1191 static const struct of_device_id sysmmu_dt_ids[] = 
1192 {
1193         { .compatible = IEP_SYSMMU_COMPATIBLE_NAME},
1194         { .compatible = VIP_SYSMMU_COMPATIBLE_NAME},
1195         { .compatible = VOPB_SYSMMU_COMPATIBLE_NAME},
1196         { .compatible = VOPL_SYSMMU_COMPATIBLE_NAME},
1197         { .compatible = HEVC_SYSMMU_COMPATIBLE_NAME},
1198         { .compatible = VPU_SYSMMU_COMPATIBLE_NAME},
1199         { .compatible = ISP_SYSMMU_COMPATIBLE_NAME},
1200         { .compatible = VOP_SYSMMU_COMPATIBLE_NAME},
1201         { /* end */ }
1202 };
1203 MODULE_DEVICE_TABLE(of, sysmmu_dt_ids);
1204 #endif
1205
1206 static struct platform_driver rk_sysmmu_driver = 
1207 {
1208         .probe = rockchip_sysmmu_probe,
1209         .remove = NULL,
1210         .driver = 
1211         {
1212                    .name = "rk_sysmmu",
1213                    .owner = THIS_MODULE,
1214                    .of_match_table = of_match_ptr(sysmmu_dt_ids),
1215         },
1216 };
1217
1218 static int __init rockchip_sysmmu_init_driver(void)
1219 {
1220         dump_iommu_sysfs_init();
1221
1222         return platform_driver_register(&rk_sysmmu_driver);
1223 }
1224
1225 core_initcall(rockchip_sysmmu_init_driver);
1226