Merge tag 'lsk-android-14.04' into develop-3.10
[firefly-linux-kernel-4.4.55.git] / drivers / iommu / rockchip-iommu.c
1 /*
2  * This program is free software; you can redistribute it and/or modify
3  * it under the terms of the GNU General Public License version 2 as
4  * published by the Free Software Foundation.
5  */
6
7 #ifdef CONFIG_ROCKCHIP_IOMMU_DEBUG
8 #define DEBUG
9 #endif
10
11 #include <linux/io.h>
12 #include <linux/interrupt.h>
13 #include <linux/slab.h>
14 #include <linux/clk.h>
15 #include <linux/err.h>
16 #include <linux/mm.h>
17 #include <linux/errno.h>
18 #include <linux/memblock.h>
19 #include <linux/export.h>
20
21 #include <asm/cacheflush.h>
22 #include <asm/pgtable.h>
23 #include <linux/of.h>
24 #include <linux/rockchip/sysmmu.h>
25
26 #include "rockchip-iommu.h"
27
28 /* We does not consider super section mapping (16MB) */
29 #define SPAGE_ORDER 12
30 #define SPAGE_SIZE (1 << SPAGE_ORDER)
31 #define SPAGE_MASK (~(SPAGE_SIZE - 1))
32 typedef enum sysmmu_entry_flags 
33 {
34         SYSMMU_FLAGS_PRESENT = 0x01,
35         SYSMMU_FLAGS_READ_PERMISSION = 0x02,
36         SYSMMU_FLAGS_WRITE_PERMISSION = 0x04,
37         SYSMMU_FLAGS_OVERRIDE_CACHE  = 0x8,
38         SYSMMU_FLAGS_WRITE_CACHEABLE  = 0x10,
39         SYSMMU_FLAGS_WRITE_ALLOCATE  = 0x20,
40         SYSMMU_FLAGS_WRITE_BUFFERABLE  = 0x40,
41         SYSMMU_FLAGS_READ_CACHEABLE  = 0x80,
42         SYSMMU_FLAGS_READ_ALLOCATE  = 0x100,
43         SYSMMU_FLAGS_MASK = 0x1FF,
44 } sysmmu_entry_flags;
45
46 #define lv1ent_fault(sent) ((*(sent) & SYSMMU_FLAGS_PRESENT) == 0)
47 #define lv1ent_page(sent) ((*(sent) & SYSMMU_FLAGS_PRESENT) == 1)
48 #define lv2ent_fault(pent) ((*(pent) & SYSMMU_FLAGS_PRESENT) == 0)
49 #define spage_phys(pent) (*(pent) & SPAGE_MASK)
50 #define spage_offs(iova) ((iova) & 0x0FFF)
51
52 #define lv1ent_offset(iova) (((iova)>>22) & 0x03FF)
53 #define lv2ent_offset(iova) (((iova)>>12) & 0x03FF)
54
55 #define NUM_LV1ENTRIES 1024
56 #define NUM_LV2ENTRIES 1024
57
58 #define LV2TABLE_SIZE (NUM_LV2ENTRIES * sizeof(long))
59
60 #define lv2table_base(sent) (*(sent) & 0xFFFFFFFE)
61
62 #define mk_lv1ent_page(pa) ((pa) | SYSMMU_FLAGS_PRESENT)
63 /*write and read permission for level2 page default*/
64 #define mk_lv2ent_spage(pa) ((pa) | SYSMMU_FLAGS_PRESENT |SYSMMU_FLAGS_READ_PERMISSION |SYSMMU_FLAGS_WRITE_PERMISSION)
65
66 #define SYSMMU_REG_POLL_COUNT_FAST 1000
67
68 /**
69  * MMU register numbers
70  * Used in the register read/write routines.
71  * See the hardware documentation for more information about each register
72  */
73 typedef enum sysmmu_register 
74 {
75         SYSMMU_REGISTER_DTE_ADDR = 0x0000, /**< Current Page Directory Pointer */
76         SYSMMU_REGISTER_STATUS = 0x0004, /**< Status of the MMU */
77         SYSMMU_REGISTER_COMMAND = 0x0008, /**< Command register, used to control the MMU */
78         SYSMMU_REGISTER_PAGE_FAULT_ADDR = 0x000C, /**< Logical address of the last page fault */
79         SYSMMU_REGISTER_ZAP_ONE_LINE = 0x010, /**< Used to invalidate the mapping of a single page from the MMU */
80         SYSMMU_REGISTER_INT_RAWSTAT = 0x0014, /**< Raw interrupt status, all interrupts visible */
81         SYSMMU_REGISTER_INT_CLEAR = 0x0018, /**< Indicate to the MMU that the interrupt has been received */
82         SYSMMU_REGISTER_INT_MASK = 0x001C, /**< Enable/disable types of interrupts */
83         SYSMMU_REGISTER_INT_STATUS = 0x0020, /**< Interrupt status based on the mask */
84         SYSMMU_REGISTER_AUTO_GATING     = 0x0024
85 } sysmmu_register;
86
87 typedef enum sysmmu_command 
88 {
89         SYSMMU_COMMAND_ENABLE_PAGING = 0x00, /**< Enable paging (memory translation) */
90         SYSMMU_COMMAND_DISABLE_PAGING = 0x01, /**< Disable paging (memory translation) */
91         SYSMMU_COMMAND_ENABLE_STALL = 0x02, /**<  Enable stall on page fault */
92         SYSMMU_COMMAND_DISABLE_STALL = 0x03, /**< Disable stall on page fault */
93         SYSMMU_COMMAND_ZAP_CACHE = 0x04, /**< Zap the entire page table cache */
94         SYSMMU_COMMAND_PAGE_FAULT_DONE = 0x05, /**< Page fault processed */
95         SYSMMU_COMMAND_HARD_RESET = 0x06 /**< Reset the MMU back to power-on settings */
96 } sysmmu_command;
97
98 /**
99  * MMU interrupt register bits
100  * Each cause of the interrupt is reported
101  * through the (raw) interrupt status registers.
102  * Multiple interrupts can be pending, so multiple bits
103  * can be set at once.
104  */
105 typedef enum sysmmu_interrupt 
106 {
107         SYSMMU_INTERRUPT_PAGE_FAULT = 0x01, /**< A page fault occured */
108         SYSMMU_INTERRUPT_READ_BUS_ERROR = 0x02 /**< A bus read error occured */
109 } sysmmu_interrupt;
110
111 typedef enum sysmmu_status_bits 
112 {
113         SYSMMU_STATUS_BIT_PAGING_ENABLED      = 1 << 0,
114         SYSMMU_STATUS_BIT_PAGE_FAULT_ACTIVE   = 1 << 1,
115         SYSMMU_STATUS_BIT_STALL_ACTIVE        = 1 << 2,
116         SYSMMU_STATUS_BIT_IDLE                = 1 << 3,
117         SYSMMU_STATUS_BIT_REPLAY_BUFFER_EMPTY = 1 << 4,
118         SYSMMU_STATUS_BIT_PAGE_FAULT_IS_WRITE = 1 << 5,
119         SYSMMU_STATUS_BIT_STALL_NOT_ACTIVE    = 1 << 31,
120 } sys_mmu_status_bits;
121
122 /**
123  * Size of an MMU page in bytes
124  */
125 #define SYSMMU_PAGE_SIZE 0x1000
126
127 /*
128  * Size of the address space referenced by a page table page
129  */
130 #define SYSMMU_VIRTUAL_PAGE_SIZE 0x400000 /* 4 MiB */
131
132 /**
133  * Page directory index from address
134  * Calculates the page directory index from the given address
135  */
136 #define SYSMMU_PDE_ENTRY(address) (((address)>>22) & 0x03FF)
137
138 /**
139  * Page table index from address
140  * Calculates the page table index from the given address
141  */
142 #define SYSMMU_PTE_ENTRY(address) (((address)>>12) & 0x03FF)
143
144 /**
145  * Extract the memory address from an PDE/PTE entry
146  */
147 #define SYSMMU_ENTRY_ADDRESS(value) ((value) & 0xFFFFFC00)
148
149 #define INVALID_PAGE ((u32)(~0))
150
151 static struct kmem_cache *lv2table_kmem_cache;
152 static unsigned long *section_entry(unsigned long *pgtable, unsigned long iova)
153 {
154         return pgtable + lv1ent_offset(iova);
155 }
156
157 static unsigned long *page_entry(unsigned long *sent, unsigned long iova)
158 {
159         return (unsigned long *)__va(lv2table_base(sent)) + lv2ent_offset(iova);
160 }
161
162 static char *sysmmu_fault_name[SYSMMU_FAULTS_NUM] = {
163         "PAGE FAULT",
164         "BUS ERROR",
165         "UNKNOWN FAULT"
166 };
167
168 struct rk_iommu_domain {
169         struct list_head clients; /* list of sysmmu_drvdata.node */
170         unsigned long *pgtable; /* lv1 page table, 4KB */
171         short *lv2entcnt; /* free lv2 entry counter for each section */
172         spinlock_t lock; /* lock for this structure */
173         spinlock_t pgtablelock; /* lock for modifying page table @ pgtable */
174 };
175
176 static bool set_sysmmu_active(struct sysmmu_drvdata *data)
177 {
178         /* return true if the System MMU was not active previously
179            and it needs to be initialized */
180         return ++data->activations == 1;
181 }
182
183 static bool set_sysmmu_inactive(struct sysmmu_drvdata *data)
184 {
185         /* return true if the System MMU is needed to be disabled */
186         BUG_ON(data->activations < 1);
187         return --data->activations == 0;
188 }
189
190 static bool is_sysmmu_active(struct sysmmu_drvdata *data)
191 {
192         return data->activations > 0;
193 }
194 static void sysmmu_disable_stall(void __iomem *sfrbase)
195 {
196         int i;
197         u32 mmu_status = __raw_readl(sfrbase+SYSMMU_REGISTER_STATUS);
198         if ( 0 == (mmu_status & SYSMMU_STATUS_BIT_PAGING_ENABLED )) 
199         {
200                 //pr_err("MMU disable skipped since it was not enabled.\n");
201                 return;
202         }
203         if (mmu_status & SYSMMU_STATUS_BIT_PAGE_FAULT_ACTIVE) 
204         {
205                 pr_err("Aborting MMU disable stall request since it is in pagefault state.\n");
206                 return;
207         }
208         
209         __raw_writel(SYSMMU_COMMAND_DISABLE_STALL, sfrbase + SYSMMU_REGISTER_COMMAND);
210         
211         for (i = 0; i < SYSMMU_REG_POLL_COUNT_FAST; ++i) 
212         {
213                 u32 status = __raw_readl(sfrbase + SYSMMU_REGISTER_STATUS);
214                 if ( 0 == (status & SYSMMU_STATUS_BIT_STALL_ACTIVE) ) 
215                 {
216                         break;
217                 }
218                 if ( status &  SYSMMU_STATUS_BIT_PAGE_FAULT_ACTIVE ) 
219                 {
220                         break;
221                 }
222                 if ( 0 == (mmu_status & SYSMMU_STATUS_BIT_PAGING_ENABLED )) 
223                 {
224                         break;
225                 }
226         }
227         if (SYSMMU_REG_POLL_COUNT_FAST == i) 
228                 pr_err("Disable stall request failed, MMU status is 0x%08X\n", __raw_readl(sfrbase + SYSMMU_REGISTER_STATUS));
229 }
230 static bool sysmmu_enable_stall(void __iomem *sfrbase)
231 {
232         int i;
233         u32 mmu_status = __raw_readl(sfrbase + SYSMMU_REGISTER_STATUS);
234
235         if ( 0 == (mmu_status & SYSMMU_STATUS_BIT_PAGING_ENABLED) ) 
236         {
237                 //pr_info("MMU stall is implicit when Paging is not enabled.\n");
238                 return true;
239         }
240         if ( mmu_status & SYSMMU_STATUS_BIT_PAGE_FAULT_ACTIVE ) 
241         {
242                 pr_err("Aborting MMU stall request since it is in pagefault state.\n");
243                 return false;
244         }
245         
246         __raw_writel(SYSMMU_COMMAND_ENABLE_STALL, sfrbase + SYSMMU_REGISTER_COMMAND);
247
248         for (i = 0; i < SYSMMU_REG_POLL_COUNT_FAST; ++i) 
249         {
250                 mmu_status = __raw_readl(sfrbase + SYSMMU_REGISTER_STATUS);
251                 if (mmu_status & SYSMMU_STATUS_BIT_PAGE_FAULT_ACTIVE) 
252                 {
253                         break;
254                 }
255                 if ((mmu_status & SYSMMU_STATUS_BIT_STALL_ACTIVE)&&(0==(mmu_status & SYSMMU_STATUS_BIT_STALL_NOT_ACTIVE))) 
256                 {
257                         break;
258                 }
259                 if (0 == (mmu_status & ( SYSMMU_STATUS_BIT_PAGING_ENABLED ))) 
260                 {
261                         break;
262                 }
263         }
264         if (SYSMMU_REG_POLL_COUNT_FAST == i) 
265         {
266                 pr_info("Enable stall request failed, MMU status is 0x%08X\n", __raw_readl(sfrbase + SYSMMU_REGISTER_STATUS));
267                 return false;
268         }
269         if ( mmu_status & SYSMMU_STATUS_BIT_PAGE_FAULT_ACTIVE ) 
270         {
271                 pr_info("Aborting MMU stall request since it has a pagefault.\n");
272                 return false;
273         }
274         return true;
275 }
276
277 static bool sysmmu_enable_paging(void __iomem *sfrbase)
278 {
279         int i;
280         __raw_writel(SYSMMU_COMMAND_ENABLE_PAGING, sfrbase + SYSMMU_REGISTER_COMMAND);
281
282         for (i = 0; i < SYSMMU_REG_POLL_COUNT_FAST; ++i) 
283         {
284                 if (__raw_readl(sfrbase + SYSMMU_REGISTER_STATUS) & SYSMMU_STATUS_BIT_PAGING_ENABLED) 
285                 {
286                         //pr_info("Enable paging request success.\n");
287                         break;
288                 }
289         }
290         if (SYSMMU_REG_POLL_COUNT_FAST == i)
291         {
292                 pr_err("Enable paging request failed, MMU status is 0x%08X\n", __raw_readl(sfrbase + SYSMMU_REGISTER_STATUS));
293                 return false;
294         }
295         return true;
296 }
297 static bool sysmmu_disable_paging(void __iomem *sfrbase)
298 {
299         int i;
300         __raw_writel(SYSMMU_COMMAND_DISABLE_PAGING, sfrbase + SYSMMU_REGISTER_COMMAND);
301
302         for (i = 0; i < SYSMMU_REG_POLL_COUNT_FAST; ++i) 
303         {
304                 if (!(__raw_readl(sfrbase + SYSMMU_REGISTER_STATUS) & SYSMMU_STATUS_BIT_PAGING_ENABLED)) 
305                 {
306                         //pr_info("Disable paging request success.\n");
307                         break;
308                 }
309         }
310         if (SYSMMU_REG_POLL_COUNT_FAST == i)
311         {
312                 pr_err("Disable paging request failed, MMU status is 0x%08X\n", __raw_readl(sfrbase + SYSMMU_REGISTER_STATUS));
313                 return false;
314         }
315         return true;
316 }
317
318 void sysmmu_page_fault_done(void __iomem *sfrbase,const char *dbgname)
319 {
320         pr_info("MMU: %s: Leaving page fault mode\n", dbgname);
321         __raw_writel(SYSMMU_COMMAND_PAGE_FAULT_DONE, sfrbase + SYSMMU_REGISTER_COMMAND);
322 }
323 bool sysmmu_zap_tlb(void __iomem *sfrbase)
324 {
325         bool stall_success = sysmmu_enable_stall(sfrbase);
326         
327         __raw_writel(SYSMMU_COMMAND_ZAP_CACHE, sfrbase + SYSMMU_REGISTER_COMMAND);
328         if (false == stall_success) 
329         {
330                 /* False means that it is in Pagefault state. Not possible to disable_stall then */
331                 return false;
332         }
333         sysmmu_disable_stall(sfrbase);
334         return true;
335 }
336 static inline bool sysmmu_raw_reset(void __iomem *sfrbase)
337 {
338         int i;
339         __raw_writel(0xCAFEBABE, sfrbase + SYSMMU_REGISTER_DTE_ADDR);
340
341         if(!(0xCAFEB000 == __raw_readl(sfrbase+SYSMMU_REGISTER_DTE_ADDR)))
342         {
343                 pr_err("error when %s.\n",__func__);
344                 return false;
345         }
346         __raw_writel(SYSMMU_COMMAND_HARD_RESET, sfrbase + SYSMMU_REGISTER_COMMAND);
347
348         for (i = 0; i < SYSMMU_REG_POLL_COUNT_FAST; ++i) 
349         {
350                 if(__raw_readl(sfrbase + SYSMMU_REGISTER_DTE_ADDR) == 0)
351                 {
352                         break;
353                 }
354         }
355         if (SYSMMU_REG_POLL_COUNT_FAST == i) {
356                 pr_err("%s,Reset request failed, MMU status is 0x%08X\n", __func__,__raw_readl(sfrbase + SYSMMU_REGISTER_DTE_ADDR));
357                 return false;
358         }
359         return true;
360 }
361
362 static void __sysmmu_set_ptbase(void __iomem *sfrbase,unsigned long pgd)
363 {
364         __raw_writel(pgd, sfrbase + SYSMMU_REGISTER_DTE_ADDR);
365
366 }
367
368 static bool sysmmu_reset(void __iomem *sfrbase,const char *dbgname)
369 {
370         bool err = true;
371         
372         err = sysmmu_enable_stall(sfrbase);
373         if(!err)
374         {
375                 pr_info("%s:stall failed: %s\n",__func__,dbgname);
376                 return err;
377         }
378         err = sysmmu_raw_reset(sfrbase);
379         if(err)
380         {
381                 __raw_writel(SYSMMU_INTERRUPT_PAGE_FAULT|SYSMMU_INTERRUPT_READ_BUS_ERROR, sfrbase+SYSMMU_REGISTER_INT_MASK);
382         }
383         sysmmu_disable_stall(sfrbase);
384         if(!err)
385                 pr_info("%s: failed: %s\n", __func__,dbgname);
386         return err;
387 }
388
389 static inline void pgtable_flush(void *vastart, void *vaend)
390 {
391         dmac_flush_range(vastart, vaend);
392         outer_flush_range(virt_to_phys(vastart),virt_to_phys(vaend));
393 }
394 static void __set_fault_handler(struct sysmmu_drvdata *data,
395                                         sysmmu_fault_handler_t handler)
396 {
397         unsigned long flags;
398
399         write_lock_irqsave(&data->lock, flags);
400         data->fault_handler = handler;
401         write_unlock_irqrestore(&data->lock, flags);
402 }
403
404 void rockchip_sysmmu_set_fault_handler(struct device *dev,sysmmu_fault_handler_t handler)
405 {
406         struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
407
408         __set_fault_handler(data, handler);
409 }
410
411 static int default_fault_handler(struct device *dev,
412                                         enum rk_sysmmu_inttype itype,
413                                         unsigned long pgtable_base,
414                                         unsigned long fault_addr,
415                                         unsigned int status
416                                         )
417 {
418         struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
419
420         if(!data)
421         {
422                 pr_info("%s,iommu device not assigned yet\n",__func__);
423                 return 0;
424         }
425         if ((itype >= SYSMMU_FAULTS_NUM) || (itype < SYSMMU_PAGEFAULT))
426                 itype = SYSMMU_FAULT_UNKNOWN;
427
428         if(itype == SYSMMU_BUSERROR)
429                 pr_err("%s occured at 0x%lx(Page table base: 0x%lx)\n",sysmmu_fault_name[itype], fault_addr, pgtable_base);
430
431         if(itype == SYSMMU_PAGEFAULT)
432                 pr_err("SYSMMU:Page fault detected at 0x%lx from bus id %d of type %s on %s\n",
433                                 fault_addr,
434                                 (status >> 6) & 0x1F,
435                                 (status & 32) ? "write" : "read",
436                                 data->dbgname
437                                 );
438
439         pr_err("Generating Kernel OOPS... because it is unrecoverable.\n");
440
441         BUG();
442
443         return 0;
444 }
445 static void dump_pagetbl(u32 fault_address,unsigned long addr_dte)
446 {
447         u32  offset1;
448         u32  offset2;
449         u32 *level2_base;
450         u32 *level1_entry;
451         u32 *level2_entry;
452         offset1 = lv1ent_offset(fault_address);
453         offset2 = lv2ent_offset(fault_address);
454         level1_entry = (u32 *)__va(addr_dte)+offset1;
455         level2_base = (u32 *)__va((*level1_entry)&0xfffffffe);
456         level2_entry = level2_base+offset2;
457         pr_info("level1 offset=%d,level2 offset=%d,level1_entry=0x%08x\n",offset1,offset2,(u32)level1_entry);
458         pr_info("*level1_entry = 0x%08x\n",*level1_entry);
459         pr_info("*level2_entry = 0x%08x\n",*level2_entry);
460
461 }
462 static irqreturn_t rockchip_sysmmu_irq(int irq, void *dev_id)
463 {
464         /* SYSMMU is in blocked when interrupt occurred. */
465         struct sysmmu_drvdata *data = dev_id;
466         struct resource *irqres;
467         struct platform_device *pdev;
468         enum rk_sysmmu_inttype itype = SYSMMU_FAULT_UNKNOWN;
469         u32 status;
470         u32 rawstat;
471         u32 int_status;
472         u32 fault_address;
473         int i, ret = 0;
474
475         read_lock(&data->lock);
476         
477 #if 0
478         WARN_ON(!is_sysmmu_active(data));
479 #else
480         if(!is_sysmmu_active(data))
481         {
482                 read_unlock(&data->lock);
483                 return IRQ_HANDLED;
484         }
485 #endif  
486         pdev = to_platform_device(data->sysmmu);
487
488         for (i = 0; i < data->num_res_irq; i++) 
489         {
490                 irqres = platform_get_resource(pdev, IORESOURCE_IRQ, i);
491                 if (irqres && ((int)irqres->start == irq))
492                         break;
493         }
494
495         if (i == data->num_res_irq) 
496         {
497                 itype = SYSMMU_FAULT_UNKNOWN;
498         } 
499         else 
500         {
501                 int_status = __raw_readl(data->res_bases[i] + SYSMMU_REGISTER_INT_STATUS);
502                 if(int_status != 0)
503                 {
504                         /*mask status*/
505                         __raw_writel(0x00,data->res_bases[i] + SYSMMU_REGISTER_INT_MASK);
506                         
507                         rawstat = __raw_readl(data->res_bases[i] + SYSMMU_REGISTER_INT_RAWSTAT);
508
509                         if(rawstat & SYSMMU_INTERRUPT_PAGE_FAULT)
510                         {
511                                 fault_address = __raw_readl(data->res_bases[i] + SYSMMU_REGISTER_PAGE_FAULT_ADDR);
512                                 itype = SYSMMU_PAGEFAULT;
513                         }
514                         else if(rawstat & SYSMMU_INTERRUPT_READ_BUS_ERROR)
515                         {
516                                 itype = SYSMMU_BUSERROR;
517                         }
518                         else
519                         {
520                                 goto out;
521                         }
522                         dump_pagetbl(fault_address,__raw_readl(data->res_bases[i] + SYSMMU_REGISTER_DTE_ADDR));
523                 }
524                 else
525                         goto out;
526         }
527         
528         if (data->fault_handler) 
529         {
530                 unsigned long base = __raw_readl(data->res_bases[i] + SYSMMU_REGISTER_DTE_ADDR);
531                 status = __raw_readl(data->res_bases[i] + SYSMMU_REGISTER_STATUS);
532                 ret = data->fault_handler(data->dev, itype, base, fault_address,status);
533         }
534
535         if (!ret && (itype != SYSMMU_FAULT_UNKNOWN))
536         {
537                 if(SYSMMU_PAGEFAULT == itype)
538                 {
539                         sysmmu_zap_tlb(data->res_bases[i]);
540                         sysmmu_page_fault_done(data->res_bases[i],data->dbgname);
541                         __raw_writel(SYSMMU_INTERRUPT_PAGE_FAULT|SYSMMU_INTERRUPT_READ_BUS_ERROR, data->res_bases[i]+SYSMMU_REGISTER_INT_MASK);
542                 }
543         }
544         else
545                 pr_err("(%s) %s is not handled.\n",data->dbgname, sysmmu_fault_name[itype]);
546
547 out :
548         read_unlock(&data->lock);
549
550         return IRQ_HANDLED;
551 }
552
553 static bool __rockchip_sysmmu_disable(struct sysmmu_drvdata *data)
554 {
555         unsigned long flags;
556         bool disabled = false;
557         int i;
558         write_lock_irqsave(&data->lock, flags);
559
560         if (!set_sysmmu_inactive(data))
561                 goto finish;
562
563         for(i=0;i<data->num_res_mem;i++)
564         {
565                 sysmmu_disable_paging(data->res_bases[i]);
566         }
567
568         disabled = true;
569         data->pgtable = 0;
570         data->domain = NULL;
571 finish:
572         write_unlock_irqrestore(&data->lock, flags);
573
574         if (disabled)
575                 pr_info("(%s) Disabled\n", data->dbgname);
576         else
577                 pr_info("(%s) %d times left to be disabled\n",data->dbgname, data->activations);
578
579         return disabled;
580 }
581
582 /* __rk_sysmmu_enable: Enables System MMU
583  *
584  * returns -error if an error occurred and System MMU is not enabled,
585  * 0 if the System MMU has been just enabled and 1 if System MMU was already
586  * enabled before.
587  */
588 static int __rockchip_sysmmu_enable(struct sysmmu_drvdata *data,unsigned long pgtable, struct iommu_domain *domain)
589 {
590         int i, ret = 0;
591         unsigned long flags;
592
593         write_lock_irqsave(&data->lock, flags);
594
595         if (!set_sysmmu_active(data)) 
596         {
597                 if (WARN_ON(pgtable != data->pgtable)) 
598                 {
599                         ret = -EBUSY;
600                         set_sysmmu_inactive(data);
601                 } 
602                 else 
603                         ret = 1;
604
605                 pr_info("(%s) Already enabled\n", data->dbgname);
606                 goto finish;
607         }
608         
609         data->pgtable = pgtable;
610
611         for (i = 0; i < data->num_res_mem; i++) 
612         {
613                 bool status;
614                 status = sysmmu_enable_stall(data->res_bases[i]);
615                 if(status)
616                 {
617                         __sysmmu_set_ptbase(data->res_bases[i], pgtable);
618                         __raw_writel(SYSMMU_COMMAND_ZAP_CACHE, data->res_bases[i] + SYSMMU_REGISTER_COMMAND);
619                 }
620                 sysmmu_enable_paging(data->res_bases[i]);
621                 sysmmu_disable_stall(data->res_bases[i]);
622         }
623
624         data->domain = domain;
625
626         pr_info("(%s) Enabled\n", data->dbgname);
627 finish:
628         write_unlock_irqrestore(&data->lock, flags);
629
630         return ret;
631 }
632 bool rockchip_sysmmu_disable(struct device *dev)
633 {
634         struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
635         bool disabled;
636
637         disabled = __rockchip_sysmmu_disable(data);
638
639         return disabled;
640 }
641 void rockchip_sysmmu_tlb_invalidate(struct device *dev)
642 {
643         unsigned long flags;
644         struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
645
646         read_lock_irqsave(&data->lock, flags);
647
648         if (is_sysmmu_active(data)) 
649         {
650                 int i;
651                 for (i = 0; i < data->num_res_mem; i++) 
652                 {
653                         if(!sysmmu_zap_tlb(data->res_bases[i]))
654                                 pr_err("%s,invalidating TLB failed\n",data->dbgname);
655                 }
656         } 
657         else 
658                 pr_info("(%s) Disabled. Skipping invalidating TLB.\n",data->dbgname);
659
660         read_unlock_irqrestore(&data->lock, flags);
661 }
662 static phys_addr_t rockchip_iommu_iova_to_phys(struct iommu_domain *domain,dma_addr_t iova)
663 {
664         struct rk_iommu_domain *priv = domain->priv;
665         unsigned long *entry;
666         unsigned long flags;
667         phys_addr_t phys = 0;
668
669         spin_lock_irqsave(&priv->pgtablelock, flags);
670
671         entry = section_entry(priv->pgtable, iova);
672         entry = page_entry(entry, iova);
673         phys = spage_phys(entry) + spage_offs(iova);
674         
675         spin_unlock_irqrestore(&priv->pgtablelock, flags);
676
677         return phys;
678 }
679 static int lv2set_page(unsigned long *pent, phys_addr_t paddr, size_t size,
680                                                                 short *pgcnt)
681 {
682         if (!lv2ent_fault(pent))
683                 return -EADDRINUSE;
684
685         *pent = mk_lv2ent_spage(paddr);
686         pgtable_flush(pent, pent + 1);
687         *pgcnt -= 1;
688         return 0;
689 }
690
691 static unsigned long *alloc_lv2entry(unsigned long *sent, unsigned long iova,short *pgcounter)
692 {
693         if (lv1ent_fault(sent)) 
694         {
695                 unsigned long *pent;
696
697                 pent = kmem_cache_zalloc(lv2table_kmem_cache, GFP_ATOMIC);
698                 BUG_ON((unsigned long)pent & (LV2TABLE_SIZE - 1));
699                 if (!pent)
700                         return NULL;
701
702                 *sent = mk_lv1ent_page(__pa(pent));
703                 kmemleak_ignore(pent);
704                 *pgcounter = NUM_LV2ENTRIES;
705                 pgtable_flush(pent, pent + NUM_LV2ENTRIES);
706                 pgtable_flush(sent, sent + 1);
707         }
708         return page_entry(sent, iova);
709 }
710
711 static size_t rockchip_iommu_unmap(struct iommu_domain *domain,unsigned long iova, size_t size)
712 {
713         struct rk_iommu_domain *priv = domain->priv;
714         unsigned long flags;
715         unsigned long *ent;
716
717         BUG_ON(priv->pgtable == NULL);
718
719         spin_lock_irqsave(&priv->pgtablelock, flags);
720
721         ent = section_entry(priv->pgtable, iova);
722
723         if (unlikely(lv1ent_fault(ent))) 
724         {
725                 if (size > SPAGE_SIZE)
726                         size = SPAGE_SIZE;
727                 goto done;
728         }
729
730         /* lv1ent_page(sent) == true here */
731
732         ent = page_entry(ent, iova);
733
734         if (unlikely(lv2ent_fault(ent))) 
735         {
736                 size = SPAGE_SIZE;
737                 goto done;
738         }
739         
740         *ent = 0;
741         size = SPAGE_SIZE;
742         priv->lv2entcnt[lv1ent_offset(iova)] += 1;
743         goto done;
744
745 done:
746         //pr_info("%s:unmap iova 0x%lx/0x%x bytes\n",__func__, iova,size);
747         spin_unlock_irqrestore(&priv->pgtablelock, flags);
748
749         return size;
750 }
751 static int rockchip_iommu_map(struct iommu_domain *domain, unsigned long iova,
752                          phys_addr_t paddr, size_t size, int prot)
753 {
754         struct rk_iommu_domain *priv = domain->priv;
755         unsigned long *entry;
756         unsigned long flags;
757         int ret = -ENOMEM;
758         unsigned long *pent;
759
760         BUG_ON(priv->pgtable == NULL);
761
762         spin_lock_irqsave(&priv->pgtablelock, flags);
763
764         entry = section_entry(priv->pgtable, iova);
765         
766         pent = alloc_lv2entry(entry, iova,&priv->lv2entcnt[lv1ent_offset(iova)]);
767         if (!pent)
768                 ret = -ENOMEM;
769         else
770                 ret = lv2set_page(pent, paddr, size,&priv->lv2entcnt[lv1ent_offset(iova)]);
771         
772         if (ret)
773         {
774                 pr_err("%s: Failed to map iova 0x%lx/0x%x bytes\n",__func__, iova, size);
775         }
776         spin_unlock_irqrestore(&priv->pgtablelock, flags);
777
778         return ret;
779 }
780
781 static void rockchip_iommu_detach_device(struct iommu_domain *domain,
782                                     struct device *dev)
783 {
784         struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
785         struct rk_iommu_domain *priv = domain->priv;
786         struct list_head *pos;
787         unsigned long flags;
788         bool found = false;
789
790         spin_lock_irqsave(&priv->lock, flags);
791
792         list_for_each(pos, &priv->clients) 
793         {
794                 if (list_entry(pos, struct sysmmu_drvdata, node) == data) 
795                 {
796                         found = true;
797                         break;
798                 }
799         }
800         if (!found)
801                 goto finish;
802
803         if (__rockchip_sysmmu_disable(data)) 
804         {
805                 pr_info("%s: Detached IOMMU with pgtable %#lx\n",__func__, __pa(priv->pgtable));
806                 list_del(&data->node);
807                 INIT_LIST_HEAD(&data->node);
808
809         } 
810         else 
811                 pr_info("%s: Detaching IOMMU with pgtable %#lx delayed",__func__, __pa(priv->pgtable));
812         
813 finish:
814         spin_unlock_irqrestore(&priv->lock, flags);
815 }
816 static int rockchip_iommu_attach_device(struct iommu_domain *domain,struct device *dev)
817 {
818         struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
819         struct rk_iommu_domain *priv = domain->priv;
820         unsigned long flags;
821         int ret;
822
823         spin_lock_irqsave(&priv->lock, flags);
824
825         ret = __rockchip_sysmmu_enable(data, __pa(priv->pgtable), domain);
826
827         if (ret == 0) 
828         {
829                 /* 'data->node' must not be appeared in priv->clients */
830                 BUG_ON(!list_empty(&data->node));
831                 data->dev = dev;
832                 list_add_tail(&data->node, &priv->clients);
833         }
834
835         spin_unlock_irqrestore(&priv->lock, flags);
836
837         if (ret < 0) 
838         {
839                 pr_err("%s: Failed to attach IOMMU with pgtable %#lx\n",__func__, __pa(priv->pgtable));
840         } 
841         else if (ret > 0) 
842         {
843                 pr_info("%s: IOMMU with pgtable 0x%lx already attached\n",__func__, __pa(priv->pgtable));
844         } 
845         else 
846         {
847                 pr_info("%s: Attached new IOMMU with pgtable 0x%lx\n",__func__, __pa(priv->pgtable));
848         }
849
850         return ret;
851 }
852 static void rockchip_iommu_domain_destroy(struct iommu_domain *domain)
853 {
854         struct rk_iommu_domain *priv = domain->priv;
855         struct sysmmu_drvdata *data;
856         unsigned long flags;
857         int i;
858
859         WARN_ON(!list_empty(&priv->clients));
860
861         spin_lock_irqsave(&priv->lock, flags);
862
863         list_for_each_entry(data, &priv->clients, node) 
864         {
865                 while (!rockchip_sysmmu_disable(data->dev))
866                         ; /* until System MMU is actually disabled */
867         }
868         spin_unlock_irqrestore(&priv->lock, flags);
869
870         for (i = 0; i < NUM_LV1ENTRIES; i++)
871                 if (lv1ent_page(priv->pgtable + i))
872                         kmem_cache_free(lv2table_kmem_cache,__va(lv2table_base(priv->pgtable + i)));
873
874         free_pages((unsigned long)priv->pgtable, 0);
875         free_pages((unsigned long)priv->lv2entcnt, 0);
876         kfree(domain->priv);
877         domain->priv = NULL;
878 }
879
880 static int rockchip_iommu_domain_init(struct iommu_domain *domain)
881 {
882         struct rk_iommu_domain *priv;
883
884         priv = kzalloc(sizeof(*priv), GFP_KERNEL);
885         if (!priv)
886                 return -ENOMEM;
887         
888 /*rk32xx sysmmu use 2 level pagetable,
889    level1 and leve2 both have 1024 entries,each entry  occupy 4 bytes,
890    so alloc a page size for each page table 
891 */
892         priv->pgtable = (unsigned long *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 0);
893         if (!priv->pgtable)
894                 goto err_pgtable;
895
896         priv->lv2entcnt = (short *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 0);
897         if (!priv->lv2entcnt)
898                 goto err_counter;
899
900         pgtable_flush(priv->pgtable, priv->pgtable + NUM_LV1ENTRIES);
901
902         spin_lock_init(&priv->lock);
903         spin_lock_init(&priv->pgtablelock);
904         INIT_LIST_HEAD(&priv->clients);
905
906         domain->priv = priv;
907         return 0;
908
909 err_counter:
910         free_pages((unsigned long)priv->pgtable, 0);    
911 err_pgtable:
912         kfree(priv);
913         return -ENOMEM;
914 }
915
916 static struct iommu_ops rk_iommu_ops = 
917 {
918         .domain_init = &rockchip_iommu_domain_init,
919         .domain_destroy = &rockchip_iommu_domain_destroy,
920         .attach_dev = &rockchip_iommu_attach_device,
921         .detach_dev = &rockchip_iommu_detach_device,
922         .map = &rockchip_iommu_map,
923         .unmap = &rockchip_iommu_unmap,
924         .iova_to_phys = &rockchip_iommu_iova_to_phys,
925         .pgsize_bitmap = SPAGE_SIZE,
926 };
927
928 static int rockchip_sysmmu_prepare(void)
929 {
930         int ret = 0;
931         static int registed = 0;
932         
933         if(registed)
934                 return 0;
935         
936         lv2table_kmem_cache = kmem_cache_create("rk-iommu-lv2table",LV2TABLE_SIZE, LV2TABLE_SIZE, 0, NULL);
937         if (!lv2table_kmem_cache) 
938         {
939                 pr_err("%s: failed to create kmem cache\n", __func__);
940                 return -ENOMEM;
941         }
942         ret = bus_set_iommu(&platform_bus_type, &rk_iommu_ops);
943         if(!ret)
944                 registed = 1;
945         else
946                 pr_err("%s:failed to set iommu to bus\r\n",__func__);
947         return ret;
948 }
949 static int  rockchip_get_sysmmu_resource_num(struct platform_device *pdev,unsigned int type)
950 {
951         struct resource *info = NULL;
952         int num_resources = 0;
953         
954         /*get resouce info*/
955 again:
956         info = platform_get_resource(pdev, type, num_resources);
957         while(info)
958         {
959                 num_resources++;
960                 goto again;
961         }
962         return num_resources;
963 }
964
965 static int rockchip_sysmmu_probe(struct platform_device *pdev)
966 {
967         int i, ret;
968         struct device *dev;
969         struct sysmmu_drvdata *data;
970         
971         dev = &pdev->dev;
972         
973         ret = rockchip_sysmmu_prepare();
974         if(ret)
975         {
976                 pr_err("%s,failed\r\n",__func__);
977                 goto err_alloc;
978         }
979
980         data = devm_kzalloc(dev,sizeof(*data), GFP_KERNEL);
981         if (!data) 
982         {
983                 dev_dbg(dev, "Not enough memory\n");
984                 ret = -ENOMEM;
985                 goto err_alloc;
986         }
987         
988         ret = dev_set_drvdata(dev, data);
989         if (ret) 
990         {
991                 dev_dbg(dev, "Unabled to initialize driver data\n");
992                 goto err_init;
993         }
994         
995         if(pdev->dev.of_node)
996         {
997                 of_property_read_string(pdev->dev.of_node,"dbgname",&(data->dbgname));
998         }
999         else
1000         {
1001                 pr_info("dbgname not assigned in device tree or device node not exist\r\n");
1002         }
1003
1004         pr_info("(%s) Enter\n", data->dbgname);
1005
1006         /*rk32xx sysmmu need both irq and memory */
1007         data->num_res_mem = rockchip_get_sysmmu_resource_num(pdev,IORESOURCE_MEM);
1008         if(0 == data->num_res_mem)
1009         {
1010                 pr_err("can't find sysmmu memory resource \r\n");
1011                 goto err_init;
1012         }
1013         pr_info("data->num_res_mem=%d\n",data->num_res_mem);
1014         data->num_res_irq = rockchip_get_sysmmu_resource_num(pdev,IORESOURCE_IRQ);
1015         if(0 == data->num_res_irq)
1016         {
1017                 pr_err("can't find sysmmu irq resource \r\n");
1018                 goto err_init;
1019         }
1020         
1021         data->res_bases = kmalloc(sizeof(*data->res_bases) * data->num_res_mem,GFP_KERNEL);
1022         if (data->res_bases == NULL)
1023         {
1024                 dev_dbg(dev, "Not enough memory\n");
1025                 ret = -ENOMEM;
1026                 goto err_init;
1027         }
1028
1029         for (i = 0; i < data->num_res_mem; i++) 
1030         {
1031                 struct resource *res;
1032                 res = platform_get_resource(pdev, IORESOURCE_MEM, i);
1033                 if (!res) 
1034                 {
1035                         pr_err("Unable to find IOMEM region\n");
1036                         ret = -ENOENT;
1037                         goto err_res;
1038                 }
1039                 data->res_bases[i] = ioremap(res->start, resource_size(res));
1040                 pr_info("res->start = 0x%08x  ioremap to  data->res_bases[%d] = 0x%08x\n",res->start,i,(unsigned int)data->res_bases[i]);
1041                 if (!data->res_bases[i]) 
1042                 {
1043                         pr_err("Unable to map IOMEM @ PA:%#x\n",res->start);
1044                         ret = -ENOENT;
1045                         goto err_res;
1046                 }
1047                 if(!strstr(data->dbgname,"isp"))
1048                 {
1049                         /*reset sysmmu*/
1050                         if(!sysmmu_reset(data->res_bases[i],data->dbgname))
1051                         {
1052                                 ret = -ENOENT;
1053                                 goto err_res;
1054                         }
1055                 }
1056         }
1057
1058         for (i = 0; i < data->num_res_irq; i++) 
1059         {
1060                 ret = platform_get_irq(pdev, i);
1061                 if (ret <= 0) 
1062                 {
1063                         pr_err("Unable to find IRQ resource\n");
1064                         goto err_irq;
1065                 }
1066                 ret = request_irq(ret, rockchip_sysmmu_irq, IRQF_SHARED ,dev_name(dev), data);
1067                 if (ret) 
1068                 {
1069                         pr_err("Unabled to register interrupt handler\n");
1070                         goto err_irq;
1071                 }
1072         }
1073         ret = rockchip_init_iovmm(dev, &data->vmm);
1074         if (ret)
1075                 goto err_irq;
1076         
1077         
1078         data->sysmmu = dev;
1079         rwlock_init(&data->lock);
1080         INIT_LIST_HEAD(&data->node);
1081
1082         __set_fault_handler(data, &default_fault_handler);
1083
1084         pr_info("(%s) Initialized\n", data->dbgname);
1085         return 0;
1086
1087 err_irq:
1088         while (i-- > 0) 
1089         {
1090                 int irq;
1091
1092                 irq = platform_get_irq(pdev, i);
1093                 free_irq(irq, data);
1094         }
1095 err_res:
1096         while (data->num_res_mem-- > 0)
1097                 iounmap(data->res_bases[data->num_res_mem]);
1098         kfree(data->res_bases);
1099 err_init:
1100         kfree(data);
1101 err_alloc:
1102         dev_err(dev, "Failed to initialize\n");
1103         return ret;
1104 }
1105
1106 #ifdef CONFIG_OF
1107 static const struct of_device_id sysmmu_dt_ids[] = 
1108 {
1109         { .compatible = IEP_SYSMMU_COMPATIBLE_NAME},
1110         { .compatible = VIP_SYSMMU_COMPATIBLE_NAME},
1111         { .compatible = VOPB_SYSMMU_COMPATIBLE_NAME},
1112         { .compatible = VOPL_SYSMMU_COMPATIBLE_NAME},
1113         { .compatible = HEVC_SYSMMU_COMPATIBLE_NAME},
1114         { .compatible = VPU_SYSMMU_COMPATIBLE_NAME},
1115         { .compatible = ISP_SYSMMU_COMPATIBLE_NAME},
1116         { /* end */ }
1117 };
1118 MODULE_DEVICE_TABLE(of, sysmmu_dt_ids);
1119 #endif
1120
1121 static struct platform_driver rk_sysmmu_driver = 
1122 {
1123         .probe = rockchip_sysmmu_probe,
1124         .remove = NULL,
1125         .driver = 
1126         {
1127                    .name = "rk_sysmmu",
1128                    .owner = THIS_MODULE,
1129                    .of_match_table = of_match_ptr(sysmmu_dt_ids),
1130         },
1131 };
1132
1133 #if 0
1134 /*I don't know why this can't work*/
1135 #ifdef CONFIG_OF
1136 module_platform_driver(rk_sysmmu_driver);
1137 #endif
1138 #endif
1139 static int __init rockchip_sysmmu_init_driver(void)
1140 {
1141         return platform_driver_register(&rk_sysmmu_driver);
1142 }
1143
1144 core_initcall(rockchip_sysmmu_init_driver);
1145