rockchip:iommu:reenable mmu page fault and bus error irq mask when host device enable...
[firefly-linux-kernel-4.4.55.git] / drivers / iommu / rockchip-iommu.c
1 /*
2  * This program is free software; you can redistribute it and/or modify
3  * it under the terms of the GNU General Public License version 2 as
4  * published by the Free Software Foundation.
5  */
6
7 #ifdef CONFIG_ROCKCHIP_IOMMU_DEBUG
8 #define DEBUG
9 #endif
10
11 #include <linux/io.h>
12 #include <linux/interrupt.h>
13 #include <linux/slab.h>
14 #include <linux/clk.h>
15 #include <linux/err.h>
16 #include <linux/mm.h>
17 #include <linux/errno.h>
18 #include <linux/memblock.h>
19 #include <linux/export.h>
20
21 #include <asm/cacheflush.h>
22 #include <asm/pgtable.h>
23 #include <linux/of.h>
24 #include <linux/rockchip/sysmmu.h>
25
26 #include "rockchip-iommu.h"
27
28 /* We does not consider super section mapping (16MB) */
29 #define SPAGE_ORDER 12
30 #define SPAGE_SIZE (1 << SPAGE_ORDER)
31 #define SPAGE_MASK (~(SPAGE_SIZE - 1))
32 typedef enum sysmmu_entry_flags 
33 {
34         SYSMMU_FLAGS_PRESENT = 0x01,
35         SYSMMU_FLAGS_READ_PERMISSION = 0x02,
36         SYSMMU_FLAGS_WRITE_PERMISSION = 0x04,
37         SYSMMU_FLAGS_OVERRIDE_CACHE  = 0x8,
38         SYSMMU_FLAGS_WRITE_CACHEABLE  = 0x10,
39         SYSMMU_FLAGS_WRITE_ALLOCATE  = 0x20,
40         SYSMMU_FLAGS_WRITE_BUFFERABLE  = 0x40,
41         SYSMMU_FLAGS_READ_CACHEABLE  = 0x80,
42         SYSMMU_FLAGS_READ_ALLOCATE  = 0x100,
43         SYSMMU_FLAGS_MASK = 0x1FF,
44 } sysmmu_entry_flags;
45
46 #define lv1ent_fault(sent) ((*(sent) & SYSMMU_FLAGS_PRESENT) == 0)
47 #define lv1ent_page(sent) ((*(sent) & SYSMMU_FLAGS_PRESENT) == 1)
48 #define lv2ent_fault(pent) ((*(pent) & SYSMMU_FLAGS_PRESENT) == 0)
49 #define spage_phys(pent) (*(pent) & SPAGE_MASK)
50 #define spage_offs(iova) ((iova) & 0x0FFF)
51
52 #define lv1ent_offset(iova) (((iova)>>22) & 0x03FF)
53 #define lv2ent_offset(iova) (((iova)>>12) & 0x03FF)
54
55 #define NUM_LV1ENTRIES 1024
56 #define NUM_LV2ENTRIES 1024
57
58 #define LV2TABLE_SIZE (NUM_LV2ENTRIES * sizeof(long))
59
60 #define lv2table_base(sent) (*(sent) & 0xFFFFFFFE)
61
62 #define mk_lv1ent_page(pa) ((pa) | SYSMMU_FLAGS_PRESENT)
63 /*write and read permission for level2 page default*/
64 #define mk_lv2ent_spage(pa) ((pa) | SYSMMU_FLAGS_PRESENT |SYSMMU_FLAGS_READ_PERMISSION |SYSMMU_FLAGS_WRITE_PERMISSION)
65
66 #define SYSMMU_REG_POLL_COUNT_FAST 1000
67
68 /**
69  * MMU register numbers
70  * Used in the register read/write routines.
71  * See the hardware documentation for more information about each register
72  */
73 typedef enum sysmmu_register 
74 {
75         SYSMMU_REGISTER_DTE_ADDR = 0x0000, /**< Current Page Directory Pointer */
76         SYSMMU_REGISTER_STATUS = 0x0004, /**< Status of the MMU */
77         SYSMMU_REGISTER_COMMAND = 0x0008, /**< Command register, used to control the MMU */
78         SYSMMU_REGISTER_PAGE_FAULT_ADDR = 0x000C, /**< Logical address of the last page fault */
79         SYSMMU_REGISTER_ZAP_ONE_LINE = 0x010, /**< Used to invalidate the mapping of a single page from the MMU */
80         SYSMMU_REGISTER_INT_RAWSTAT = 0x0014, /**< Raw interrupt status, all interrupts visible */
81         SYSMMU_REGISTER_INT_CLEAR = 0x0018, /**< Indicate to the MMU that the interrupt has been received */
82         SYSMMU_REGISTER_INT_MASK = 0x001C, /**< Enable/disable types of interrupts */
83         SYSMMU_REGISTER_INT_STATUS = 0x0020, /**< Interrupt status based on the mask */
84         SYSMMU_REGISTER_AUTO_GATING     = 0x0024
85 } sysmmu_register;
86
87 typedef enum sysmmu_command 
88 {
89         SYSMMU_COMMAND_ENABLE_PAGING = 0x00, /**< Enable paging (memory translation) */
90         SYSMMU_COMMAND_DISABLE_PAGING = 0x01, /**< Disable paging (memory translation) */
91         SYSMMU_COMMAND_ENABLE_STALL = 0x02, /**<  Enable stall on page fault */
92         SYSMMU_COMMAND_DISABLE_STALL = 0x03, /**< Disable stall on page fault */
93         SYSMMU_COMMAND_ZAP_CACHE = 0x04, /**< Zap the entire page table cache */
94         SYSMMU_COMMAND_PAGE_FAULT_DONE = 0x05, /**< Page fault processed */
95         SYSMMU_COMMAND_HARD_RESET = 0x06 /**< Reset the MMU back to power-on settings */
96 } sysmmu_command;
97
98 /**
99  * MMU interrupt register bits
100  * Each cause of the interrupt is reported
101  * through the (raw) interrupt status registers.
102  * Multiple interrupts can be pending, so multiple bits
103  * can be set at once.
104  */
105 typedef enum sysmmu_interrupt 
106 {
107         SYSMMU_INTERRUPT_PAGE_FAULT = 0x01, /**< A page fault occured */
108         SYSMMU_INTERRUPT_READ_BUS_ERROR = 0x02 /**< A bus read error occured */
109 } sysmmu_interrupt;
110
111 typedef enum sysmmu_status_bits 
112 {
113         SYSMMU_STATUS_BIT_PAGING_ENABLED      = 1 << 0,
114         SYSMMU_STATUS_BIT_PAGE_FAULT_ACTIVE   = 1 << 1,
115         SYSMMU_STATUS_BIT_STALL_ACTIVE        = 1 << 2,
116         SYSMMU_STATUS_BIT_IDLE                = 1 << 3,
117         SYSMMU_STATUS_BIT_REPLAY_BUFFER_EMPTY = 1 << 4,
118         SYSMMU_STATUS_BIT_PAGE_FAULT_IS_WRITE = 1 << 5,
119         SYSMMU_STATUS_BIT_STALL_NOT_ACTIVE    = 1 << 31,
120 } sys_mmu_status_bits;
121
122 /**
123  * Size of an MMU page in bytes
124  */
125 #define SYSMMU_PAGE_SIZE 0x1000
126
127 /*
128  * Size of the address space referenced by a page table page
129  */
130 #define SYSMMU_VIRTUAL_PAGE_SIZE 0x400000 /* 4 MiB */
131
132 /**
133  * Page directory index from address
134  * Calculates the page directory index from the given address
135  */
136 #define SYSMMU_PDE_ENTRY(address) (((address)>>22) & 0x03FF)
137
138 /**
139  * Page table index from address
140  * Calculates the page table index from the given address
141  */
142 #define SYSMMU_PTE_ENTRY(address) (((address)>>12) & 0x03FF)
143
144 /**
145  * Extract the memory address from an PDE/PTE entry
146  */
147 #define SYSMMU_ENTRY_ADDRESS(value) ((value) & 0xFFFFFC00)
148
149 #define INVALID_PAGE ((u32)(~0))
150
151 static struct kmem_cache *lv2table_kmem_cache;
152 static unsigned long *section_entry(unsigned long *pgtable, unsigned long iova)
153 {
154         return pgtable + lv1ent_offset(iova);
155 }
156
157 static unsigned long *page_entry(unsigned long *sent, unsigned long iova)
158 {
159         return (unsigned long *)__va(lv2table_base(sent)) + lv2ent_offset(iova);
160 }
161
162 static char *sysmmu_fault_name[SYSMMU_FAULTS_NUM] = {
163         "PAGE FAULT",
164         "BUS ERROR",
165         "UNKNOWN FAULT"
166 };
167
168 struct rk_iommu_domain {
169         struct list_head clients; /* list of sysmmu_drvdata.node */
170         unsigned long *pgtable; /* lv1 page table, 4KB */
171         short *lv2entcnt; /* free lv2 entry counter for each section */
172         spinlock_t lock; /* lock for this structure */
173         spinlock_t pgtablelock; /* lock for modifying page table @ pgtable */
174 };
175
176 static bool set_sysmmu_active(struct sysmmu_drvdata *data)
177 {
178         /* return true if the System MMU was not active previously
179            and it needs to be initialized */
180         return ++data->activations == 1;
181 }
182
183 static bool set_sysmmu_inactive(struct sysmmu_drvdata *data)
184 {
185         /* return true if the System MMU is needed to be disabled */
186         BUG_ON(data->activations < 1);
187         return --data->activations == 0;
188 }
189
190 static bool is_sysmmu_active(struct sysmmu_drvdata *data)
191 {
192         return data->activations > 0;
193 }
194 static void sysmmu_disable_stall(void __iomem *sfrbase)
195 {
196         int i;
197         u32 mmu_status = __raw_readl(sfrbase+SYSMMU_REGISTER_STATUS);
198         if ( 0 == (mmu_status & SYSMMU_STATUS_BIT_PAGING_ENABLED )) 
199         {
200                 //pr_err("MMU disable skipped since it was not enabled.\n");
201                 return;
202         }
203         if (mmu_status & SYSMMU_STATUS_BIT_PAGE_FAULT_ACTIVE) 
204         {
205                 pr_err("Aborting MMU disable stall request since it is in pagefault state.\n");
206                 return;
207         }
208         
209         __raw_writel(SYSMMU_COMMAND_DISABLE_STALL, sfrbase + SYSMMU_REGISTER_COMMAND);
210         
211         for (i = 0; i < SYSMMU_REG_POLL_COUNT_FAST; ++i) 
212         {
213                 u32 status = __raw_readl(sfrbase + SYSMMU_REGISTER_STATUS);
214                 if ( 0 == (status & SYSMMU_STATUS_BIT_STALL_ACTIVE) ) 
215                 {
216                         break;
217                 }
218                 if ( status &  SYSMMU_STATUS_BIT_PAGE_FAULT_ACTIVE ) 
219                 {
220                         break;
221                 }
222                 if ( 0 == (mmu_status & SYSMMU_STATUS_BIT_PAGING_ENABLED )) 
223                 {
224                         break;
225                 }
226         }
227         if (SYSMMU_REG_POLL_COUNT_FAST == i) 
228                 pr_err("Disable stall request failed, MMU status is 0x%08X\n", __raw_readl(sfrbase + SYSMMU_REGISTER_STATUS));
229 }
230 static bool sysmmu_enable_stall(void __iomem *sfrbase)
231 {
232         int i;
233         u32 mmu_status = __raw_readl(sfrbase + SYSMMU_REGISTER_STATUS);
234
235         if ( 0 == (mmu_status & SYSMMU_STATUS_BIT_PAGING_ENABLED) ) 
236         {
237                 //pr_info("MMU stall is implicit when Paging is not enabled.\n");
238                 return true;
239         }
240         if ( mmu_status & SYSMMU_STATUS_BIT_PAGE_FAULT_ACTIVE ) 
241         {
242                 pr_err("Aborting MMU stall request since it is in pagefault state.\n");
243                 return false;
244         }
245         
246         __raw_writel(SYSMMU_COMMAND_ENABLE_STALL, sfrbase + SYSMMU_REGISTER_COMMAND);
247
248         for (i = 0; i < SYSMMU_REG_POLL_COUNT_FAST; ++i) 
249         {
250                 mmu_status = __raw_readl(sfrbase + SYSMMU_REGISTER_STATUS);
251                 if (mmu_status & SYSMMU_STATUS_BIT_PAGE_FAULT_ACTIVE) 
252                 {
253                         break;
254                 }
255                 if ((mmu_status & SYSMMU_STATUS_BIT_STALL_ACTIVE)&&(0==(mmu_status & SYSMMU_STATUS_BIT_STALL_NOT_ACTIVE))) 
256                 {
257                         break;
258                 }
259                 if (0 == (mmu_status & ( SYSMMU_STATUS_BIT_PAGING_ENABLED ))) 
260                 {
261                         break;
262                 }
263         }
264         if (SYSMMU_REG_POLL_COUNT_FAST == i) 
265         {
266                 pr_info("Enable stall request failed, MMU status is 0x%08X\n", __raw_readl(sfrbase + SYSMMU_REGISTER_STATUS));
267                 return false;
268         }
269         if ( mmu_status & SYSMMU_STATUS_BIT_PAGE_FAULT_ACTIVE ) 
270         {
271                 pr_info("Aborting MMU stall request since it has a pagefault.\n");
272                 return false;
273         }
274         return true;
275 }
276
277 static bool sysmmu_enable_paging(void __iomem *sfrbase)
278 {
279         int i;
280         __raw_writel(SYSMMU_COMMAND_ENABLE_PAGING, sfrbase + SYSMMU_REGISTER_COMMAND);
281
282         for (i = 0; i < SYSMMU_REG_POLL_COUNT_FAST; ++i) 
283         {
284                 if (__raw_readl(sfrbase + SYSMMU_REGISTER_STATUS) & SYSMMU_STATUS_BIT_PAGING_ENABLED) 
285                 {
286                         //pr_info("Enable paging request success.\n");
287                         break;
288                 }
289         }
290         if (SYSMMU_REG_POLL_COUNT_FAST == i)
291         {
292                 pr_err("Enable paging request failed, MMU status is 0x%08X\n", __raw_readl(sfrbase + SYSMMU_REGISTER_STATUS));
293                 return false;
294         }
295         return true;
296 }
297 static bool sysmmu_disable_paging(void __iomem *sfrbase)
298 {
299         int i;
300         __raw_writel(SYSMMU_COMMAND_DISABLE_PAGING, sfrbase + SYSMMU_REGISTER_COMMAND);
301
302         for (i = 0; i < SYSMMU_REG_POLL_COUNT_FAST; ++i) 
303         {
304                 if (!(__raw_readl(sfrbase + SYSMMU_REGISTER_STATUS) & SYSMMU_STATUS_BIT_PAGING_ENABLED)) 
305                 {
306                         //pr_info("Disable paging request success.\n");
307                         break;
308                 }
309         }
310         if (SYSMMU_REG_POLL_COUNT_FAST == i)
311         {
312                 pr_err("Disable paging request failed, MMU status is 0x%08X\n", __raw_readl(sfrbase + SYSMMU_REGISTER_STATUS));
313                 return false;
314         }
315         return true;
316 }
317
318 void sysmmu_page_fault_done(void __iomem *sfrbase,const char *dbgname)
319 {
320         pr_info("MMU: %s: Leaving page fault mode\n", dbgname);
321         __raw_writel(SYSMMU_COMMAND_PAGE_FAULT_DONE, sfrbase + SYSMMU_REGISTER_COMMAND);
322 }
323 bool sysmmu_zap_tlb(void __iomem *sfrbase)
324 {
325         bool stall_success = sysmmu_enable_stall(sfrbase);
326         
327         __raw_writel(SYSMMU_COMMAND_ZAP_CACHE, sfrbase + SYSMMU_REGISTER_COMMAND);
328         if (false == stall_success) 
329         {
330                 /* False means that it is in Pagefault state. Not possible to disable_stall then */
331                 return false;
332         }
333         sysmmu_disable_stall(sfrbase);
334         return true;
335 }
336 static inline bool sysmmu_raw_reset(void __iomem *sfrbase)
337 {
338         int i;
339         __raw_writel(0xCAFEBABE, sfrbase + SYSMMU_REGISTER_DTE_ADDR);
340
341         if(!(0xCAFEB000 == __raw_readl(sfrbase+SYSMMU_REGISTER_DTE_ADDR)))
342         {
343                 pr_err("error when %s.\n",__func__);
344                 return false;
345         }
346         __raw_writel(SYSMMU_COMMAND_HARD_RESET, sfrbase + SYSMMU_REGISTER_COMMAND);
347
348         for (i = 0; i < SYSMMU_REG_POLL_COUNT_FAST; ++i) 
349         {
350                 if(__raw_readl(sfrbase + SYSMMU_REGISTER_DTE_ADDR) == 0)
351                 {
352                         break;
353                 }
354         }
355         if (SYSMMU_REG_POLL_COUNT_FAST == i) {
356                 pr_err("%s,Reset request failed, MMU status is 0x%08X\n", __func__,__raw_readl(sfrbase + SYSMMU_REGISTER_DTE_ADDR));
357                 return false;
358         }
359         return true;
360 }
361
362 static void __sysmmu_set_ptbase(void __iomem *sfrbase,unsigned long pgd)
363 {
364         __raw_writel(pgd, sfrbase + SYSMMU_REGISTER_DTE_ADDR);
365
366 }
367
368 static bool sysmmu_reset(void __iomem *sfrbase,const char *dbgname)
369 {
370         bool err = true;
371         
372         err = sysmmu_enable_stall(sfrbase);
373         if(!err)
374         {
375                 pr_info("%s:stall failed: %s\n",__func__,dbgname);
376                 return err;
377         }
378         err = sysmmu_raw_reset(sfrbase);
379         if(err)
380         {
381                 __raw_writel(SYSMMU_INTERRUPT_PAGE_FAULT|SYSMMU_INTERRUPT_READ_BUS_ERROR, sfrbase+SYSMMU_REGISTER_INT_MASK);
382         }
383         sysmmu_disable_stall(sfrbase);
384         if(!err)
385                 pr_info("%s: failed: %s\n", __func__,dbgname);
386         return err;
387 }
388
389 static inline void pgtable_flush(void *vastart, void *vaend)
390 {
391         dmac_flush_range(vastart, vaend);
392         outer_flush_range(virt_to_phys(vastart),virt_to_phys(vaend));
393 }
394 static void __set_fault_handler(struct sysmmu_drvdata *data,
395                                         sysmmu_fault_handler_t handler)
396 {
397         unsigned long flags;
398
399         write_lock_irqsave(&data->lock, flags);
400         data->fault_handler = handler;
401         write_unlock_irqrestore(&data->lock, flags);
402 }
403
404 void rockchip_sysmmu_set_fault_handler(struct device *dev,sysmmu_fault_handler_t handler)
405 {
406         struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
407
408         __set_fault_handler(data, handler);
409 }
410
411 static int default_fault_handler(struct device *dev,
412                                         enum rk_sysmmu_inttype itype,
413                                         unsigned long pgtable_base,
414                                         unsigned long fault_addr,
415                                         unsigned int status
416                                         )
417 {
418         struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
419
420         if(!data)
421         {
422                 pr_info("%s,iommu device not assigned yet\n",__func__);
423                 return 0;
424         }
425         if ((itype >= SYSMMU_FAULTS_NUM) || (itype < SYSMMU_PAGEFAULT))
426                 itype = SYSMMU_FAULT_UNKNOWN;
427
428         if(itype == SYSMMU_BUSERROR)
429                 pr_err("%s occured at 0x%lx(Page table base: 0x%lx)\n",sysmmu_fault_name[itype], fault_addr, pgtable_base);
430
431         if(itype == SYSMMU_PAGEFAULT)
432                 pr_err("SYSMMU:Page fault detected at 0x%lx from bus id %d of type %s on %s\n",
433                                 fault_addr,
434                                 (status >> 6) & 0x1F,
435                                 (status & 32) ? "write" : "read",
436                                 data->dbgname
437                                 );
438
439         pr_err("Generating Kernel OOPS... because it is unrecoverable.\n");
440
441         BUG();
442
443         return 0;
444 }
445 static void dump_pagetbl(u32 fault_address,unsigned long addr_dte)
446 {
447         u32  offset1;
448         u32  offset2;
449         u32 *level2_base;
450         u32 *level1_entry;
451         u32 *level2_entry;
452         offset1 = lv1ent_offset(fault_address);
453         offset2 = lv2ent_offset(fault_address);
454         level1_entry = (u32 *)__va(addr_dte)+offset1;
455         level2_base = (u32 *)__va((*level1_entry)&0xfffffffe);
456         level2_entry = level2_base+offset2;
457         pr_info("level1 offset=%d,level2 offset=%d,level1_entry=0x%08x\n",offset1,offset2,(u32)level1_entry);
458         pr_info("*level1_entry = 0x%08x\n",*level1_entry);
459         pr_info("*level2_entry = 0x%08x\n",*level2_entry);
460
461 }
462 static irqreturn_t rockchip_sysmmu_irq(int irq, void *dev_id)
463 {
464         /* SYSMMU is in blocked when interrupt occurred. */
465         struct sysmmu_drvdata *data = dev_id;
466         struct resource *irqres;
467         struct platform_device *pdev;
468         enum rk_sysmmu_inttype itype = SYSMMU_FAULT_UNKNOWN;
469         u32 status;
470         u32 rawstat;
471         u32 int_status;
472         u32 fault_address;
473         int i, ret = 0;
474
475         read_lock(&data->lock);
476         
477 #if 0
478         WARN_ON(!is_sysmmu_active(data));
479 #else
480         if(!is_sysmmu_active(data))
481         {
482                 read_unlock(&data->lock);
483                 return IRQ_HANDLED;
484         }
485 #endif  
486         pdev = to_platform_device(data->sysmmu);
487
488         for (i = 0; i < data->num_res_irq; i++) 
489         {
490                 irqres = platform_get_resource(pdev, IORESOURCE_IRQ, i);
491                 if (irqres && ((int)irqres->start == irq))
492                         break;
493         }
494
495         if (i == data->num_res_irq) 
496         {
497                 itype = SYSMMU_FAULT_UNKNOWN;
498         } 
499         else 
500         {
501                 int_status = __raw_readl(data->res_bases[i] + SYSMMU_REGISTER_INT_STATUS);
502                 if(int_status != 0)
503                 {
504                         /*mask status*/
505                         __raw_writel(0x00,data->res_bases[i] + SYSMMU_REGISTER_INT_MASK);
506                         
507                         rawstat = __raw_readl(data->res_bases[i] + SYSMMU_REGISTER_INT_RAWSTAT);
508
509                         if(rawstat & SYSMMU_INTERRUPT_PAGE_FAULT)
510                         {
511                                 fault_address = __raw_readl(data->res_bases[i] + SYSMMU_REGISTER_PAGE_FAULT_ADDR);
512                                 itype = SYSMMU_PAGEFAULT;
513                         }
514                         else if(rawstat & SYSMMU_INTERRUPT_READ_BUS_ERROR)
515                         {
516                                 itype = SYSMMU_BUSERROR;
517                         }
518                         else
519                         {
520                                 goto out;
521                         }
522                         dump_pagetbl(fault_address,__raw_readl(data->res_bases[i] + SYSMMU_REGISTER_DTE_ADDR));
523                 }
524                 else
525                         goto out;
526         }
527         
528         if (data->fault_handler) 
529         {
530                 unsigned long base = __raw_readl(data->res_bases[i] + SYSMMU_REGISTER_DTE_ADDR);
531                 status = __raw_readl(data->res_bases[i] + SYSMMU_REGISTER_STATUS);
532                 ret = data->fault_handler(data->dev, itype, base, fault_address,status);
533         }
534
535         if (!ret && (itype != SYSMMU_FAULT_UNKNOWN))
536         {
537                 if(SYSMMU_PAGEFAULT == itype)
538                 {
539                         sysmmu_zap_tlb(data->res_bases[i]);
540                         sysmmu_page_fault_done(data->res_bases[i],data->dbgname);
541                         __raw_writel(SYSMMU_INTERRUPT_PAGE_FAULT|SYSMMU_INTERRUPT_READ_BUS_ERROR, data->res_bases[i]+SYSMMU_REGISTER_INT_MASK);
542                 }
543         }
544         else
545                 pr_err("(%s) %s is not handled.\n",data->dbgname, sysmmu_fault_name[itype]);
546
547 out :
548         read_unlock(&data->lock);
549
550         return IRQ_HANDLED;
551 }
552
553 static bool __rockchip_sysmmu_disable(struct sysmmu_drvdata *data)
554 {
555         unsigned long flags;
556         bool disabled = false;
557         int i;
558         write_lock_irqsave(&data->lock, flags);
559
560         if (!set_sysmmu_inactive(data))
561                 goto finish;
562
563         for(i=0;i<data->num_res_mem;i++)
564         {
565                 sysmmu_disable_paging(data->res_bases[i]);
566         }
567
568         disabled = true;
569         data->pgtable = 0;
570         data->domain = NULL;
571 finish:
572         write_unlock_irqrestore(&data->lock, flags);
573
574         if (disabled)
575                 pr_info("(%s) Disabled\n", data->dbgname);
576         else
577                 pr_info("(%s) %d times left to be disabled\n",data->dbgname, data->activations);
578
579         return disabled;
580 }
581
582 /* __rk_sysmmu_enable: Enables System MMU
583  *
584  * returns -error if an error occurred and System MMU is not enabled,
585  * 0 if the System MMU has been just enabled and 1 if System MMU was already
586  * enabled before.
587  */
588 static int __rockchip_sysmmu_enable(struct sysmmu_drvdata *data,unsigned long pgtable, struct iommu_domain *domain)
589 {
590         int i, ret = 0;
591         unsigned long flags;
592
593         write_lock_irqsave(&data->lock, flags);
594
595         if (!set_sysmmu_active(data)) 
596         {
597                 if (WARN_ON(pgtable != data->pgtable)) 
598                 {
599                         ret = -EBUSY;
600                         set_sysmmu_inactive(data);
601                 } 
602                 else 
603                         ret = 1;
604
605                 pr_info("(%s) Already enabled\n", data->dbgname);
606                 goto finish;
607         }
608         
609         data->pgtable = pgtable;
610
611         for (i = 0; i < data->num_res_mem; i++) 
612         {
613                 bool status;
614                 status = sysmmu_enable_stall(data->res_bases[i]);
615                 if(status)
616                 {
617                         __sysmmu_set_ptbase(data->res_bases[i], pgtable);
618                         __raw_writel(SYSMMU_COMMAND_ZAP_CACHE, data->res_bases[i] + SYSMMU_REGISTER_COMMAND);
619                 }
620                 __raw_writel(SYSMMU_INTERRUPT_PAGE_FAULT|SYSMMU_INTERRUPT_READ_BUS_ERROR, data->res_bases[i]+SYSMMU_REGISTER_INT_MASK);
621                 sysmmu_enable_paging(data->res_bases[i]);
622                 sysmmu_disable_stall(data->res_bases[i]);
623         }
624
625         data->domain = domain;
626
627         pr_info("(%s) Enabled\n", data->dbgname);
628 finish:
629         write_unlock_irqrestore(&data->lock, flags);
630
631         return ret;
632 }
633 bool rockchip_sysmmu_disable(struct device *dev)
634 {
635         struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
636         bool disabled;
637
638         disabled = __rockchip_sysmmu_disable(data);
639
640         return disabled;
641 }
642 void rockchip_sysmmu_tlb_invalidate(struct device *dev)
643 {
644         unsigned long flags;
645         struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
646
647         read_lock_irqsave(&data->lock, flags);
648
649         if (is_sysmmu_active(data)) 
650         {
651                 int i;
652                 for (i = 0; i < data->num_res_mem; i++) 
653                 {
654                         if(!sysmmu_zap_tlb(data->res_bases[i]))
655                                 pr_err("%s,invalidating TLB failed\n",data->dbgname);
656                 }
657         } 
658         else 
659                 pr_info("(%s) Disabled. Skipping invalidating TLB.\n",data->dbgname);
660
661         read_unlock_irqrestore(&data->lock, flags);
662 }
663 static phys_addr_t rockchip_iommu_iova_to_phys(struct iommu_domain *domain,dma_addr_t iova)
664 {
665         struct rk_iommu_domain *priv = domain->priv;
666         unsigned long *entry;
667         unsigned long flags;
668         phys_addr_t phys = 0;
669
670         spin_lock_irqsave(&priv->pgtablelock, flags);
671
672         entry = section_entry(priv->pgtable, iova);
673         entry = page_entry(entry, iova);
674         phys = spage_phys(entry) + spage_offs(iova);
675         
676         spin_unlock_irqrestore(&priv->pgtablelock, flags);
677
678         return phys;
679 }
680 static int lv2set_page(unsigned long *pent, phys_addr_t paddr, size_t size,
681                                                                 short *pgcnt)
682 {
683         if (!lv2ent_fault(pent))
684                 return -EADDRINUSE;
685
686         *pent = mk_lv2ent_spage(paddr);
687         pgtable_flush(pent, pent + 1);
688         *pgcnt -= 1;
689         return 0;
690 }
691
692 static unsigned long *alloc_lv2entry(unsigned long *sent, unsigned long iova,short *pgcounter)
693 {
694         if (lv1ent_fault(sent)) 
695         {
696                 unsigned long *pent;
697
698                 pent = kmem_cache_zalloc(lv2table_kmem_cache, GFP_ATOMIC);
699                 BUG_ON((unsigned long)pent & (LV2TABLE_SIZE - 1));
700                 if (!pent)
701                         return NULL;
702
703                 *sent = mk_lv1ent_page(__pa(pent));
704                 kmemleak_ignore(pent);
705                 *pgcounter = NUM_LV2ENTRIES;
706                 pgtable_flush(pent, pent + NUM_LV2ENTRIES);
707                 pgtable_flush(sent, sent + 1);
708         }
709         return page_entry(sent, iova);
710 }
711
712 static size_t rockchip_iommu_unmap(struct iommu_domain *domain,unsigned long iova, size_t size)
713 {
714         struct rk_iommu_domain *priv = domain->priv;
715         unsigned long flags;
716         unsigned long *ent;
717
718         BUG_ON(priv->pgtable == NULL);
719
720         spin_lock_irqsave(&priv->pgtablelock, flags);
721
722         ent = section_entry(priv->pgtable, iova);
723
724         if (unlikely(lv1ent_fault(ent))) 
725         {
726                 if (size > SPAGE_SIZE)
727                         size = SPAGE_SIZE;
728                 goto done;
729         }
730
731         /* lv1ent_page(sent) == true here */
732
733         ent = page_entry(ent, iova);
734
735         if (unlikely(lv2ent_fault(ent))) 
736         {
737                 size = SPAGE_SIZE;
738                 goto done;
739         }
740         
741         *ent = 0;
742         size = SPAGE_SIZE;
743         priv->lv2entcnt[lv1ent_offset(iova)] += 1;
744         goto done;
745
746 done:
747         //pr_info("%s:unmap iova 0x%lx/0x%x bytes\n",__func__, iova,size);
748         spin_unlock_irqrestore(&priv->pgtablelock, flags);
749
750         return size;
751 }
752 static int rockchip_iommu_map(struct iommu_domain *domain, unsigned long iova,
753                          phys_addr_t paddr, size_t size, int prot)
754 {
755         struct rk_iommu_domain *priv = domain->priv;
756         unsigned long *entry;
757         unsigned long flags;
758         int ret = -ENOMEM;
759         unsigned long *pent;
760
761         BUG_ON(priv->pgtable == NULL);
762
763         spin_lock_irqsave(&priv->pgtablelock, flags);
764
765         entry = section_entry(priv->pgtable, iova);
766         
767         pent = alloc_lv2entry(entry, iova,&priv->lv2entcnt[lv1ent_offset(iova)]);
768         if (!pent)
769                 ret = -ENOMEM;
770         else
771                 ret = lv2set_page(pent, paddr, size,&priv->lv2entcnt[lv1ent_offset(iova)]);
772         
773         if (ret)
774         {
775                 pr_err("%s: Failed to map iova 0x%lx/0x%x bytes\n",__func__, iova, size);
776         }
777         spin_unlock_irqrestore(&priv->pgtablelock, flags);
778
779         return ret;
780 }
781
782 static void rockchip_iommu_detach_device(struct iommu_domain *domain,
783                                     struct device *dev)
784 {
785         struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
786         struct rk_iommu_domain *priv = domain->priv;
787         struct list_head *pos;
788         unsigned long flags;
789         bool found = false;
790
791         spin_lock_irqsave(&priv->lock, flags);
792
793         list_for_each(pos, &priv->clients) 
794         {
795                 if (list_entry(pos, struct sysmmu_drvdata, node) == data) 
796                 {
797                         found = true;
798                         break;
799                 }
800         }
801         if (!found)
802                 goto finish;
803
804         if (__rockchip_sysmmu_disable(data)) 
805         {
806                 pr_info("%s: Detached IOMMU with pgtable %#lx\n",__func__, __pa(priv->pgtable));
807                 list_del(&data->node);
808                 INIT_LIST_HEAD(&data->node);
809
810         } 
811         else 
812                 pr_info("%s: Detaching IOMMU with pgtable %#lx delayed",__func__, __pa(priv->pgtable));
813         
814 finish:
815         spin_unlock_irqrestore(&priv->lock, flags);
816 }
817 static int rockchip_iommu_attach_device(struct iommu_domain *domain,struct device *dev)
818 {
819         struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
820         struct rk_iommu_domain *priv = domain->priv;
821         unsigned long flags;
822         int ret;
823
824         spin_lock_irqsave(&priv->lock, flags);
825
826         ret = __rockchip_sysmmu_enable(data, __pa(priv->pgtable), domain);
827
828         if (ret == 0) 
829         {
830                 /* 'data->node' must not be appeared in priv->clients */
831                 BUG_ON(!list_empty(&data->node));
832                 data->dev = dev;
833                 list_add_tail(&data->node, &priv->clients);
834         }
835
836         spin_unlock_irqrestore(&priv->lock, flags);
837
838         if (ret < 0) 
839         {
840                 pr_err("%s: Failed to attach IOMMU with pgtable %#lx\n",__func__, __pa(priv->pgtable));
841         } 
842         else if (ret > 0) 
843         {
844                 pr_info("%s: IOMMU with pgtable 0x%lx already attached\n",__func__, __pa(priv->pgtable));
845         } 
846         else 
847         {
848                 pr_info("%s: Attached new IOMMU with pgtable 0x%lx\n",__func__, __pa(priv->pgtable));
849         }
850
851         return ret;
852 }
853 static void rockchip_iommu_domain_destroy(struct iommu_domain *domain)
854 {
855         struct rk_iommu_domain *priv = domain->priv;
856         struct sysmmu_drvdata *data;
857         unsigned long flags;
858         int i;
859
860         WARN_ON(!list_empty(&priv->clients));
861
862         spin_lock_irqsave(&priv->lock, flags);
863
864         list_for_each_entry(data, &priv->clients, node) 
865         {
866                 while (!rockchip_sysmmu_disable(data->dev))
867                         ; /* until System MMU is actually disabled */
868         }
869         spin_unlock_irqrestore(&priv->lock, flags);
870
871         for (i = 0; i < NUM_LV1ENTRIES; i++)
872                 if (lv1ent_page(priv->pgtable + i))
873                         kmem_cache_free(lv2table_kmem_cache,__va(lv2table_base(priv->pgtable + i)));
874
875         free_pages((unsigned long)priv->pgtable, 0);
876         free_pages((unsigned long)priv->lv2entcnt, 0);
877         kfree(domain->priv);
878         domain->priv = NULL;
879 }
880
881 static int rockchip_iommu_domain_init(struct iommu_domain *domain)
882 {
883         struct rk_iommu_domain *priv;
884
885         priv = kzalloc(sizeof(*priv), GFP_KERNEL);
886         if (!priv)
887                 return -ENOMEM;
888         
889 /*rk32xx sysmmu use 2 level pagetable,
890    level1 and leve2 both have 1024 entries,each entry  occupy 4 bytes,
891    so alloc a page size for each page table 
892 */
893         priv->pgtable = (unsigned long *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 0);
894         if (!priv->pgtable)
895                 goto err_pgtable;
896
897         priv->lv2entcnt = (short *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 0);
898         if (!priv->lv2entcnt)
899                 goto err_counter;
900
901         pgtable_flush(priv->pgtable, priv->pgtable + NUM_LV1ENTRIES);
902
903         spin_lock_init(&priv->lock);
904         spin_lock_init(&priv->pgtablelock);
905         INIT_LIST_HEAD(&priv->clients);
906
907         domain->priv = priv;
908         return 0;
909
910 err_counter:
911         free_pages((unsigned long)priv->pgtable, 0);    
912 err_pgtable:
913         kfree(priv);
914         return -ENOMEM;
915 }
916
917 static struct iommu_ops rk_iommu_ops = 
918 {
919         .domain_init = &rockchip_iommu_domain_init,
920         .domain_destroy = &rockchip_iommu_domain_destroy,
921         .attach_dev = &rockchip_iommu_attach_device,
922         .detach_dev = &rockchip_iommu_detach_device,
923         .map = &rockchip_iommu_map,
924         .unmap = &rockchip_iommu_unmap,
925         .iova_to_phys = &rockchip_iommu_iova_to_phys,
926         .pgsize_bitmap = SPAGE_SIZE,
927 };
928
929 static int rockchip_sysmmu_prepare(void)
930 {
931         int ret = 0;
932         static int registed = 0;
933         
934         if(registed)
935                 return 0;
936         
937         lv2table_kmem_cache = kmem_cache_create("rk-iommu-lv2table",LV2TABLE_SIZE, LV2TABLE_SIZE, 0, NULL);
938         if (!lv2table_kmem_cache) 
939         {
940                 pr_err("%s: failed to create kmem cache\n", __func__);
941                 return -ENOMEM;
942         }
943         ret = bus_set_iommu(&platform_bus_type, &rk_iommu_ops);
944         if(!ret)
945                 registed = 1;
946         else
947                 pr_err("%s:failed to set iommu to bus\r\n",__func__);
948         return ret;
949 }
950 static int  rockchip_get_sysmmu_resource_num(struct platform_device *pdev,unsigned int type)
951 {
952         struct resource *info = NULL;
953         int num_resources = 0;
954         
955         /*get resouce info*/
956 again:
957         info = platform_get_resource(pdev, type, num_resources);
958         while(info)
959         {
960                 num_resources++;
961                 goto again;
962         }
963         return num_resources;
964 }
965
966 static int rockchip_sysmmu_probe(struct platform_device *pdev)
967 {
968         int i, ret;
969         struct device *dev;
970         struct sysmmu_drvdata *data;
971         
972         dev = &pdev->dev;
973         
974         ret = rockchip_sysmmu_prepare();
975         if(ret)
976         {
977                 pr_err("%s,failed\r\n",__func__);
978                 goto err_alloc;
979         }
980
981         data = devm_kzalloc(dev,sizeof(*data), GFP_KERNEL);
982         if (!data) 
983         {
984                 dev_dbg(dev, "Not enough memory\n");
985                 ret = -ENOMEM;
986                 goto err_alloc;
987         }
988         
989         ret = dev_set_drvdata(dev, data);
990         if (ret) 
991         {
992                 dev_dbg(dev, "Unabled to initialize driver data\n");
993                 goto err_init;
994         }
995         
996         if(pdev->dev.of_node)
997         {
998                 of_property_read_string(pdev->dev.of_node,"dbgname",&(data->dbgname));
999         }
1000         else
1001         {
1002                 pr_info("dbgname not assigned in device tree or device node not exist\r\n");
1003         }
1004
1005         pr_info("(%s) Enter\n", data->dbgname);
1006
1007         /*rk32xx sysmmu need both irq and memory */
1008         data->num_res_mem = rockchip_get_sysmmu_resource_num(pdev,IORESOURCE_MEM);
1009         if(0 == data->num_res_mem)
1010         {
1011                 pr_err("can't find sysmmu memory resource \r\n");
1012                 goto err_init;
1013         }
1014         pr_info("data->num_res_mem=%d\n",data->num_res_mem);
1015         data->num_res_irq = rockchip_get_sysmmu_resource_num(pdev,IORESOURCE_IRQ);
1016         if(0 == data->num_res_irq)
1017         {
1018                 pr_err("can't find sysmmu irq resource \r\n");
1019                 goto err_init;
1020         }
1021         
1022         data->res_bases = kmalloc(sizeof(*data->res_bases) * data->num_res_mem,GFP_KERNEL);
1023         if (data->res_bases == NULL)
1024         {
1025                 dev_dbg(dev, "Not enough memory\n");
1026                 ret = -ENOMEM;
1027                 goto err_init;
1028         }
1029
1030         for (i = 0; i < data->num_res_mem; i++) 
1031         {
1032                 struct resource *res;
1033                 res = platform_get_resource(pdev, IORESOURCE_MEM, i);
1034                 if (!res) 
1035                 {
1036                         pr_err("Unable to find IOMEM region\n");
1037                         ret = -ENOENT;
1038                         goto err_res;
1039                 }
1040                 data->res_bases[i] = ioremap(res->start, resource_size(res));
1041                 pr_info("res->start = 0x%08x  ioremap to  data->res_bases[%d] = 0x%08x\n",res->start,i,(unsigned int)data->res_bases[i]);
1042                 if (!data->res_bases[i]) 
1043                 {
1044                         pr_err("Unable to map IOMEM @ PA:%#x\n",res->start);
1045                         ret = -ENOENT;
1046                         goto err_res;
1047                 }
1048                 if(!strstr(data->dbgname,"isp"))
1049                 {
1050                         /*reset sysmmu*/
1051                         if(!sysmmu_reset(data->res_bases[i],data->dbgname))
1052                         {
1053                                 ret = -ENOENT;
1054                                 goto err_res;
1055                         }
1056                 }
1057         }
1058
1059         for (i = 0; i < data->num_res_irq; i++) 
1060         {
1061                 ret = platform_get_irq(pdev, i);
1062                 if (ret <= 0) 
1063                 {
1064                         pr_err("Unable to find IRQ resource\n");
1065                         goto err_irq;
1066                 }
1067                 ret = request_irq(ret, rockchip_sysmmu_irq, IRQF_SHARED ,dev_name(dev), data);
1068                 if (ret) 
1069                 {
1070                         pr_err("Unabled to register interrupt handler\n");
1071                         goto err_irq;
1072                 }
1073         }
1074         ret = rockchip_init_iovmm(dev, &data->vmm);
1075         if (ret)
1076                 goto err_irq;
1077         
1078         
1079         data->sysmmu = dev;
1080         rwlock_init(&data->lock);
1081         INIT_LIST_HEAD(&data->node);
1082
1083         __set_fault_handler(data, &default_fault_handler);
1084
1085         pr_info("(%s) Initialized\n", data->dbgname);
1086         return 0;
1087
1088 err_irq:
1089         while (i-- > 0) 
1090         {
1091                 int irq;
1092
1093                 irq = platform_get_irq(pdev, i);
1094                 free_irq(irq, data);
1095         }
1096 err_res:
1097         while (data->num_res_mem-- > 0)
1098                 iounmap(data->res_bases[data->num_res_mem]);
1099         kfree(data->res_bases);
1100 err_init:
1101         kfree(data);
1102 err_alloc:
1103         dev_err(dev, "Failed to initialize\n");
1104         return ret;
1105 }
1106
1107 #ifdef CONFIG_OF
1108 static const struct of_device_id sysmmu_dt_ids[] = 
1109 {
1110         { .compatible = IEP_SYSMMU_COMPATIBLE_NAME},
1111         { .compatible = VIP_SYSMMU_COMPATIBLE_NAME},
1112         { .compatible = VOPB_SYSMMU_COMPATIBLE_NAME},
1113         { .compatible = VOPL_SYSMMU_COMPATIBLE_NAME},
1114         { .compatible = HEVC_SYSMMU_COMPATIBLE_NAME},
1115         { .compatible = VPU_SYSMMU_COMPATIBLE_NAME},
1116         { .compatible = ISP_SYSMMU_COMPATIBLE_NAME},
1117         { /* end */ }
1118 };
1119 MODULE_DEVICE_TABLE(of, sysmmu_dt_ids);
1120 #endif
1121
1122 static struct platform_driver rk_sysmmu_driver = 
1123 {
1124         .probe = rockchip_sysmmu_probe,
1125         .remove = NULL,
1126         .driver = 
1127         {
1128                    .name = "rk_sysmmu",
1129                    .owner = THIS_MODULE,
1130                    .of_match_table = of_match_ptr(sysmmu_dt_ids),
1131         },
1132 };
1133
1134 #if 0
1135 /*I don't know why this can't work*/
1136 #ifdef CONFIG_OF
1137 module_platform_driver(rk_sysmmu_driver);
1138 #endif
1139 #endif
1140 static int __init rockchip_sysmmu_init_driver(void)
1141 {
1142         return platform_driver_register(&rk_sysmmu_driver);
1143 }
1144
1145 core_initcall(rockchip_sysmmu_init_driver);
1146