support rockcihp iommu
authorxxm <xxm@rock-chips.com>
Mon, 17 Mar 2014 02:18:13 +0000 (10:18 +0800)
committerCody Xie <Cody.Xie@rock-chips.com>
Mon, 17 Mar 2014 02:18:13 +0000 (10:18 +0800)
arch/arm/boot/dts/rk3288.dtsi
drivers/iommu/rockchip-iommu.c [new file with mode: 0755]
drivers/iommu/rockchip-iommu.h [new file with mode: 0755]
drivers/iommu/rockchip-iovmm.c [new file with mode: 0755]
include/linux/rockchip/iovmm.h [new file with mode: 0755]
include/linux/rockchip/sysmmu.h [new file with mode: 0755]

index 5d5ebd5d9551ebe819226396e92dbf9592713977..5d6185ad4bd6a34f3e1101dbb75b35face10030f 100755 (executable)
                pinctrl-names = "default";
                pinctrl-0 = <&mac_clk &mac_txpins &mac_rxpins &mac_mdpins>;
        };
+    gpu{
+        compatible = "arm,malit764", 
+                     "arm,malit76x", 
+                     "arm,malit7xx", 
+                     "arm,mali-midgard"; 
+        reg = <0xffa40000 0x1000>;
+        interrupts = <GIC_SPI 38 IRQ_TYPE_LEVEL_HIGH>, 
+                     <GIC_SPI 39 IRQ_TYPE_LEVEL_HIGH>, 
+                     <GIC_SPI 40 IRQ_TYPE_LEVEL_HIGH>;
+        interrupt-names = "JOB", 
+                          "MMU", 
+                          "GPU";
+    };
+
+    iep_mmu{
+        dbgname = "iep";
+        compatible = "iommu,iep_mmu";
+        reg = <0xffa40000 0x10000>;
+        interrupts = <GIC_SPI 38 IRQ_TYPE_LEVEL_HIGH>;
+        interrupt-names = "iep_mmu";
+    };
+
+    vip_mmu{
+        dbgname = "vip";
+        compatible = "iommu,vip_mmu";
+        reg = <0xffa40000 0x10000>;
+        interrupts = <GIC_SPI 38 IRQ_TYPE_LEVEL_HIGH>;
+        interrupt-names = "vip_mmu";
+    };
+
+    isp0_mmu{
+        dbgname = "isp0";
+        compatible = "iommu,isp0_mmu";
+        reg = <0xffa40000 0x10000>;
+        interrupts = <GIC_SPI 38 IRQ_TYPE_LEVEL_HIGH>;
+        interrupt-names = "isp0_mmu";
+    };
+
+    isp1_mmu{
+        dbgname = "isp1";
+        compatible = "iommu,isp1_mmu";
+        reg = <0xffa40000 0x10000>;
+        interrupts = <GIC_SPI 38 IRQ_TYPE_LEVEL_HIGH>;
+        interrupt-names = "isp1_mmu";
+    };
+
+    vopb_mmu{
+        dbgname = "vopb";
+        compatible = "iommu,vopb_mmu";
+        reg = <0xffa40000 0x10000>;
+        interrupts = <GIC_SPI 38 IRQ_TYPE_LEVEL_HIGH>;
+        interrupt-names = "vopb_mmu";
+    };
+
+    vopl_mmu{
+        dbgname = "vopl";
+        compatible = "iommu,vopl_mmu";
+        reg = <0xffa40000 0x10000>;
+        interrupts = <GIC_SPI 38 IRQ_TYPE_LEVEL_HIGH>;
+        interrupt-names = "vopl_mmu";
+    };
 };
diff --git a/drivers/iommu/rockchip-iommu.c b/drivers/iommu/rockchip-iommu.c
new file mode 100755 (executable)
index 0000000..bbce3a6
--- /dev/null
@@ -0,0 +1,1088 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifdef CONFIG_ROCKCHIP_IOMMU_DEBUG
+#define DEBUG
+#endif
+
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/mm.h>
+#include <linux/errno.h>
+#include <linux/memblock.h>
+#include <linux/export.h>
+
+#include <asm/cacheflush.h>
+#include <asm/pgtable.h>
+#include <linux/of.h>
+#include <linux/rockchip/sysmmu.h>
+
+#include "rockchip-iommu.h"
+
+/* We does not consider super section mapping (16MB) */
+#define SPAGE_ORDER 12
+#define SPAGE_SIZE (1 << SPAGE_ORDER)
+#define SPAGE_MASK (~(SPAGE_SIZE - 1))
+
+typedef enum sysmmu_entry_flags 
+{
+       SYSMMU_FLAGS_PRESENT = 0x01,
+       SYSMMU_FLAGS_READ_PERMISSION = 0x02,
+       SYSMMU_FLAGS_WRITE_PERMISSION = 0x04,
+       SYSMMU_FLAGS_OVERRIDE_CACHE  = 0x8,
+       SYSMMU_FLAGS_WRITE_CACHEABLE  = 0x10,
+       SYSMMU_FLAGS_WRITE_ALLOCATE  = 0x20,
+       SYSMMU_FLAGS_WRITE_BUFFERABLE  = 0x40,
+       SYSMMU_FLAGS_READ_CACHEABLE  = 0x80,
+       SYSMMU_FLAGS_READ_ALLOCATE  = 0x100,
+       SYSMMU_FLAGS_MASK = 0x1FF,
+} sysmmu_entry_flags;
+
+#define lv1ent_fault(sent) ((*(sent) & SYSMMU_FLAGS_PRESENT) == 0)
+#define lv1ent_page(sent) ((*(sent) & SYSMMU_FLAGS_PRESENT) == 1)
+#define lv2ent_fault(pent) ((*(pent) & SYSMMU_FLAGS_PRESENT) == 0)
+#define spage_phys(pent) (*(pent) & SPAGE_MASK)
+#define spage_offs(iova) ((iova) & 0x0FFF)
+
+#define lv1ent_offset(iova) (((iova)>>22) & 0x03FF)
+#define lv2ent_offset(iova) (((iova)>>12) & 0x03FF)
+
+#define NUM_LV1ENTRIES 1024
+#define NUM_LV2ENTRIES 1024
+
+#define LV2TABLE_SIZE (NUM_LV2ENTRIES * sizeof(long))
+
+#define lv2table_base(sent) (*(sent) & 0xFFFFFFFE)
+
+#define mk_lv1ent_page(pa) ((pa) | SYSMMU_FLAGS_PRESENT)
+/*write and read permission for level2 page default*/
+#define mk_lv2ent_spage(pa) ((pa) | SYSMMU_FLAGS_PRESENT |SYSMMU_FLAGS_READ_PERMISSION |SYSMMU_FLAGS_WRITE_PERMISSION)
+
+#define SYSMMU_REG_POLL_COUNT_FAST 1000
+
+/**
+ * MMU register numbers
+ * Used in the register read/write routines.
+ * See the hardware documentation for more information about each register
+ */
+typedef enum sysmmu_register 
+{
+       SYSMMU_REGISTER_DTE_ADDR = 0x0000, /**< Current Page Directory Pointer */
+       SYSMMU_REGISTER_STATUS = 0x0004, /**< Status of the MMU */
+       SYSMMU_REGISTER_COMMAND = 0x0008, /**< Command register, used to control the MMU */
+       SYSMMU_REGISTER_PAGE_FAULT_ADDR = 0x000C, /**< Logical address of the last page fault */
+       SYSMMU_REGISTER_ZAP_ONE_LINE = 0x010, /**< Used to invalidate the mapping of a single page from the MMU */
+       SYSMMU_REGISTER_INT_RAWSTAT = 0x0014, /**< Raw interrupt status, all interrupts visible */
+       SYSMMU_REGISTER_INT_CLEAR = 0x0018, /**< Indicate to the MMU that the interrupt has been received */
+       SYSMMU_REGISTER_INT_MASK = 0x001C, /**< Enable/disable types of interrupts */
+       SYSMMU_REGISTER_INT_STATUS = 0x0020, /**< Interrupt status based on the mask */
+       SYSMMU_REGISTER_AUTO_GATING     = 0x0024
+} sysmmu_register;
+
+typedef enum sysmmu_command 
+{
+       SYSMMU_COMMAND_ENABLE_PAGING = 0x00, /**< Enable paging (memory translation) */
+       SYSMMU_COMMAND_DISABLE_PAGING = 0x01, /**< Disable paging (memory translation) */
+       SYSMMU_COMMAND_ENABLE_STALL = 0x02, /**<  Enable stall on page fault */
+       SYSMMU_COMMAND_DISABLE_STALL = 0x03, /**< Disable stall on page fault */
+       SYSMMU_COMMAND_ZAP_CACHE = 0x04, /**< Zap the entire page table cache */
+       SYSMMU_COMMAND_PAGE_FAULT_DONE = 0x05, /**< Page fault processed */
+       SYSMMU_COMMAND_HARD_RESET = 0x06 /**< Reset the MMU back to power-on settings */
+} sysmmu_command;
+
+/**
+ * MMU interrupt register bits
+ * Each cause of the interrupt is reported
+ * through the (raw) interrupt status registers.
+ * Multiple interrupts can be pending, so multiple bits
+ * can be set at once.
+ */
+typedef enum sysmmu_interrupt 
+{
+       SYSMMU_INTERRUPT_PAGE_FAULT = 0x01, /**< A page fault occured */
+       SYSMMU_INTERRUPT_READ_BUS_ERROR = 0x02 /**< A bus read error occured */
+} sysmmu_interrupt;
+
+typedef enum sysmmu_status_bits 
+{
+       SYSMMU_STATUS_BIT_PAGING_ENABLED      = 1 << 0,
+       SYSMMU_STATUS_BIT_PAGE_FAULT_ACTIVE   = 1 << 1,
+       SYSMMU_STATUS_BIT_STALL_ACTIVE        = 1 << 2,
+       SYSMMU_STATUS_BIT_IDLE                = 1 << 3,
+       SYSMMU_STATUS_BIT_REPLAY_BUFFER_EMPTY = 1 << 4,
+       SYSMMU_STATUS_BIT_PAGE_FAULT_IS_WRITE = 1 << 5,
+       SYSMMU_STATUS_BIT_STALL_NOT_ACTIVE    = 1 << 31,
+} sys_mmu_status_bits;
+
+/**
+ * Size of an MMU page in bytes
+ */
+#define SYSMMU_PAGE_SIZE 0x1000
+
+/*
+ * Size of the address space referenced by a page table page
+ */
+#define SYSMMU_VIRTUAL_PAGE_SIZE 0x400000 /* 4 MiB */
+
+/**
+ * Page directory index from address
+ * Calculates the page directory index from the given address
+ */
+#define SYSMMU_PDE_ENTRY(address) (((address)>>22) & 0x03FF)
+
+/**
+ * Page table index from address
+ * Calculates the page table index from the given address
+ */
+#define SYSMMU_PTE_ENTRY(address) (((address)>>12) & 0x03FF)
+
+/**
+ * Extract the memory address from an PDE/PTE entry
+ */
+#define SYSMMU_ENTRY_ADDRESS(value) ((value) & 0xFFFFFC00)
+
+#define INVALID_PAGE ((u32)(~0))
+
+static struct kmem_cache *lv2table_kmem_cache;
+
+static unsigned long *section_entry(unsigned long *pgtable, unsigned long iova)
+{
+       return pgtable + lv1ent_offset(iova);
+}
+
+static unsigned long *page_entry(unsigned long *sent, unsigned long iova)
+{
+       return (unsigned long *)__va(lv2table_base(sent)) + lv2ent_offset(iova);
+}
+
+static char *sysmmu_fault_name[SYSMMU_FAULTS_NUM] = {
+       "PAGE FAULT",
+       "BUS ERROR",
+       "UNKNOWN FAULT"
+};
+
+struct rk_iommu_domain {
+       struct list_head clients; /* list of sysmmu_drvdata.node */
+       unsigned long *pgtable; /* lv1 page table, 4KB */
+       short *lv2entcnt; /* free lv2 entry counter for each section */
+       spinlock_t lock; /* lock for this structure */
+       spinlock_t pgtablelock; /* lock for modifying page table @ pgtable */
+};
+
+static bool set_sysmmu_active(struct sysmmu_drvdata *data)
+{
+       /* return true if the System MMU was not active previously
+          and it needs to be initialized */
+       return ++data->activations == 1;
+}
+
+static bool set_sysmmu_inactive(struct sysmmu_drvdata *data)
+{
+       /* return true if the System MMU is needed to be disabled */
+       BUG_ON(data->activations < 1);
+       return --data->activations == 0;
+}
+
+static bool is_sysmmu_active(struct sysmmu_drvdata *data)
+{
+       return data->activations > 0;
+}
+static void sysmmu_disable_stall(void __iomem *sfrbase)
+{
+       int i;
+       u32 mmu_status = __raw_readl(sfrbase+SYSMMU_REGISTER_STATUS);
+       if ( 0 == (mmu_status & SYSMMU_STATUS_BIT_PAGING_ENABLED )) 
+       {
+               pr_err("MMU disable skipped since it was not enabled.\n");
+               return;
+       }
+       if (mmu_status & SYSMMU_STATUS_BIT_PAGE_FAULT_ACTIVE) 
+       {
+               pr_err("Aborting MMU disable stall request since it is in pagefault state.\n");
+               return;
+       }
+       
+       __raw_writel(SYSMMU_COMMAND_DISABLE_STALL, sfrbase + SYSMMU_REGISTER_COMMAND);
+       
+       for (i = 0; i < SYSMMU_REG_POLL_COUNT_FAST; ++i) 
+       {
+               u32 status = __raw_readl(sfrbase + SYSMMU_REGISTER_STATUS);
+               if ( 0 == (status & SYSMMU_STATUS_BIT_STALL_ACTIVE) ) 
+               {
+                       break;
+               }
+               if ( status &  SYSMMU_STATUS_BIT_PAGE_FAULT_ACTIVE ) 
+               {
+                       break;
+               }
+               if ( 0 == (mmu_status & SYSMMU_STATUS_BIT_PAGING_ENABLED )) 
+               {
+                       break;
+               }
+       }
+       if (SYSMMU_REG_POLL_COUNT_FAST == i) 
+               pr_err("Disable stall request failed, MMU status is 0x%08X\n", __raw_readl(sfrbase + SYSMMU_REGISTER_STATUS));
+}
+static bool sysmmu_enable_stall(void __iomem *sfrbase)
+{
+       int i;
+       u32 mmu_status = __raw_readl(sfrbase + SYSMMU_REGISTER_STATUS);
+
+       if ( 0 == (mmu_status & SYSMMU_STATUS_BIT_PAGING_ENABLED) ) 
+       {
+               pr_info("MMU stall is implicit when Paging is not enabled.\n");
+               return true;
+       }
+       if ( mmu_status & SYSMMU_STATUS_BIT_PAGE_FAULT_ACTIVE ) 
+       {
+               pr_err("Aborting MMU stall request since it is in pagefault state.\n");
+               return false;
+       }
+       
+       __raw_writel(SYSMMU_COMMAND_ENABLE_STALL, sfrbase + SYSMMU_REGISTER_COMMAND);
+
+       for (i = 0; i < SYSMMU_REG_POLL_COUNT_FAST; ++i) 
+       {
+               mmu_status = __raw_readl(sfrbase + SYSMMU_REGISTER_STATUS);
+               if (mmu_status & SYSMMU_STATUS_BIT_PAGE_FAULT_ACTIVE) 
+               {
+                       break;
+               }
+               if ((mmu_status & SYSMMU_STATUS_BIT_STALL_ACTIVE)&&(0==(mmu_status & SYSMMU_STATUS_BIT_STALL_NOT_ACTIVE))) 
+               {
+                       break;
+               }
+               if (0 == (mmu_status & ( SYSMMU_STATUS_BIT_PAGING_ENABLED ))) 
+               {
+                       break;
+               }
+       }
+       if (SYSMMU_REG_POLL_COUNT_FAST == i) 
+       {
+               pr_info("Enable stall request failed, MMU status is 0x%08X\n", __raw_readl(sfrbase + SYSMMU_REGISTER_STATUS));
+               return false;
+       }
+       if ( mmu_status & SYSMMU_STATUS_BIT_PAGE_FAULT_ACTIVE ) 
+       {
+               pr_info("Aborting MMU stall request since it has a pagefault.\n");
+               return false;
+       }
+       return true;
+}
+
+static bool sysmmu_enable_paging(void __iomem *sfrbase)
+{
+       int i;
+       __raw_writel(SYSMMU_COMMAND_ENABLE_PAGING, sfrbase + SYSMMU_REGISTER_COMMAND);
+
+       for (i = 0; i < SYSMMU_REG_POLL_COUNT_FAST; ++i) 
+       {
+               if (__raw_readl(sfrbase + SYSMMU_REGISTER_STATUS) & SYSMMU_STATUS_BIT_PAGING_ENABLED) 
+               {
+                       pr_info("Enable paging request success.\n");
+                       break;
+               }
+       }
+       if (SYSMMU_REG_POLL_COUNT_FAST == i)
+       {
+               pr_err("Enable paging request failed, MMU status is 0x%08X\n", __raw_readl(sfrbase + SYSMMU_REGISTER_STATUS));
+               return false;
+       }
+       return true;
+}
+void sysmmu_page_fault_done(void __iomem *sfrbase,const char *dbgname)
+{
+       pr_info("MMU: %s: Leaving page fault mode\n", dbgname);
+       __raw_writel(SYSMMU_COMMAND_PAGE_FAULT_DONE, sfrbase + SYSMMU_REGISTER_COMMAND);
+}
+bool sysmmu_zap_tlb(void __iomem *sfrbase)
+{
+       bool stall_success = sysmmu_enable_stall(sfrbase);
+       
+       __raw_writel(SYSMMU_COMMAND_ZAP_CACHE, sfrbase + SYSMMU_REGISTER_COMMAND);
+       if (false == stall_success) 
+       {
+               /* False means that it is in Pagefault state. Not possible to disable_stall then */
+               return false;
+       }
+       sysmmu_disable_stall(sfrbase);
+       return true;
+}
+static inline int sysmmu_raw_reset(void __iomem *sfrbase)
+{
+       int i;
+       __raw_writel(0xCAFEBABE, sfrbase + SYSMMU_REGISTER_DTE_ADDR);
+
+       if(!(0xCAFEB000 == __raw_readl(sfrbase+SYSMMU_REGISTER_DTE_ADDR)))
+       {
+               pr_err("error when %s.\n",__func__);
+               return -1;
+       }
+       __raw_writel(SYSMMU_COMMAND_HARD_RESET, sfrbase + SYSMMU_REGISTER_COMMAND);
+
+       for (i = 0; i < SYSMMU_REG_POLL_COUNT_FAST; ++i) 
+       {
+               if(__raw_readl(sfrbase + SYSMMU_REGISTER_DTE_ADDR) == 0)
+               {
+                       break;
+               }
+       }
+       if (SYSMMU_REG_POLL_COUNT_FAST == i) {
+               pr_err("Reset request failed, MMU status is 0x%08X\n", __raw_readl(sfrbase + SYSMMU_REGISTER_DTE_ADDR));
+               return -1;
+       }
+       return 0;
+}
+
+static bool sysmmu_reset(void __iomem *sfrbase,const char *dbgname)
+{
+       bool err = false;
+       bool stall_success;
+       
+       stall_success = sysmmu_enable_stall(sfrbase);
+       if(!stall_success)
+       {
+               pr_info("sysmmu reset:stall failed: %s\n",dbgname);
+               return err ;
+       }
+       if(0 == sysmmu_raw_reset(sfrbase))
+       {
+               __raw_writel(SYSMMU_INTERRUPT_PAGE_FAULT|SYSMMU_INTERRUPT_READ_BUS_ERROR, sfrbase+SYSMMU_REGISTER_INT_MASK);
+               err = sysmmu_enable_paging(sfrbase);
+       }
+       sysmmu_disable_stall(sfrbase);
+       if(err)
+               pr_info("SYSMMU: reset successed: %s\n",dbgname);
+       else
+               pr_info("SYSMMU: reset failed: %s\n", dbgname);
+       return err;
+}
+
+static void __sysmmu_set_ptbase(void __iomem *sfrbase,unsigned long pgd)
+{
+       __raw_writel(pgd, sfrbase + SYSMMU_REGISTER_DTE_ADDR);
+
+}
+static inline void pgtable_flush(void *vastart, void *vaend)
+{
+       dmac_flush_range(vastart, vaend);
+       outer_flush_range(virt_to_phys(vastart),virt_to_phys(vaend));
+}
+static void __set_fault_handler(struct sysmmu_drvdata *data,
+                                       sysmmu_fault_handler_t handler)
+{
+       unsigned long flags;
+
+       write_lock_irqsave(&data->lock, flags);
+       data->fault_handler = handler;
+       write_unlock_irqrestore(&data->lock, flags);
+}
+
+void rockchip_sysmmu_set_fault_handler(struct device *dev,sysmmu_fault_handler_t handler)
+{
+       struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
+
+       __set_fault_handler(data, handler);
+}
+
+static int default_fault_handler(struct device *dev,
+                                       enum rk_sysmmu_inttype itype,
+                                       unsigned long pgtable_base,
+                                       unsigned long fault_addr,
+                                       unsigned int status
+                                       )
+{
+       struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
+
+       if ((itype >= SYSMMU_FAULTS_NUM) || (itype < SYSMMU_PAGEFAULT))
+               itype = SYSMMU_FAULT_UNKNOWN;
+
+       if(itype == SYSMMU_BUSERROR)
+               pr_err("%s occured at 0x%lx(Page table base: 0x%lx)\n",sysmmu_fault_name[itype], fault_addr, pgtable_base);
+
+       if(itype == SYSMMU_PAGEFAULT)
+               pr_err("SYSMMU:Page fault detected at 0x%lx from bus id %d of type %s on %s\n",
+                               fault_addr,
+                               (status >> 6) & 0x1F,
+                               (status & 32) ? "write" : "read",
+                               data->dbgname
+                               );
+       
+       pr_err("Generating Kernel OOPS... because it is unrecoverable.\n");
+
+       BUG();
+
+       return 0;
+}
+
+static irqreturn_t rockchip_sysmmu_irq(int irq, void *dev_id)
+{
+       /* SYSMMU is in blocked when interrupt occurred. */
+       struct sysmmu_drvdata *data = dev_id;
+       struct resource *irqres;
+       struct platform_device *pdev;
+       enum rk_sysmmu_inttype itype = SYSMMU_FAULT_UNKNOWN;
+       u32 status;
+       u32 rawstat;
+       u32 fault_address;
+       int i, ret = -ENOSYS;
+
+       read_lock(&data->lock);
+
+       WARN_ON(!is_sysmmu_active(data));
+
+       pdev = to_platform_device(data->sysmmu);
+       for (i = 0; i < data->num_res_irq; i++) 
+       {
+               irqres = platform_get_resource(pdev, IORESOURCE_IRQ, i);
+               if (irqres && ((int)irqres->start == irq))
+                       break;
+       }
+
+       if (i == data->num_res_irq) 
+       {
+               itype = SYSMMU_FAULT_UNKNOWN;
+       } 
+       else 
+       {
+               status = __raw_readl(data->res_bases[i] + SYSMMU_REGISTER_STATUS);
+               if(status != 0)
+               {
+                       rawstat = __raw_readl(data->res_bases[i] + SYSMMU_REGISTER_INT_RAWSTAT);
+                       if(rawstat & SYSMMU_INTERRUPT_PAGE_FAULT)
+                       {
+                               fault_address = __raw_readl(data->res_bases[i] + SYSMMU_REGISTER_PAGE_FAULT_ADDR);
+                               itype = SYSMMU_PAGEFAULT;
+                       }
+                       else if(rawstat & SYSMMU_INTERRUPT_READ_BUS_ERROR)
+                       {
+                               itype = SYSMMU_BUSERROR;
+                       }
+                       else
+                       {
+                               goto out;
+                       }
+               }
+       }
+
+       if (data->domain)
+               ret = report_iommu_fault(data->domain, data->dev,fault_address, itype);
+
+       if ((ret == -ENOSYS) && data->fault_handler) 
+       {
+               unsigned long base = data->pgtable;
+               if (itype != SYSMMU_FAULT_UNKNOWN)
+                       base = __raw_readl(data->res_bases[i] + SYSMMU_REGISTER_DTE_ADDR);
+               ret = data->fault_handler(data->dev, itype, base, fault_address,status);
+       }
+
+       if (!ret && (itype != SYSMMU_FAULT_UNKNOWN))
+       {
+               if(SYSMMU_PAGEFAULT == itype)
+                       sysmmu_page_fault_done(data->res_bases[i],data->dbgname);
+               sysmmu_reset(data->res_bases[i],data->dbgname);
+       }
+       else
+               pr_err("(%s) %s is not handled.\n",data->dbgname, sysmmu_fault_name[itype]);
+
+out :
+       read_unlock(&data->lock);
+
+       return IRQ_HANDLED;
+}
+
+static bool __rockchip_sysmmu_disable(struct sysmmu_drvdata *data)
+{
+       unsigned long flags;
+       bool disabled = false;
+       int i;
+       write_lock_irqsave(&data->lock, flags);
+
+       if (!set_sysmmu_inactive(data))
+               goto finish;
+
+       for(i=0;i<data->num_res_mem;i++)
+       {
+               if(!sysmmu_reset(data->res_bases[i],data->dbgname))
+               goto finish;
+       }
+       disabled = true;
+       data->pgtable = 0;
+       data->domain = NULL;
+finish:
+       write_unlock_irqrestore(&data->lock, flags);
+
+       if (disabled)
+               pr_info("(%s) Disabled\n", data->dbgname);
+       else
+               pr_info("(%s) %d times left to be disabled\n",data->dbgname, data->activations);
+
+       return disabled;
+}
+
+/* __rk_sysmmu_enable: Enables System MMU
+ *
+ * returns -error if an error occurred and System MMU is not enabled,
+ * 0 if the System MMU has been just enabled and 1 if System MMU was already
+ * enabled before.
+ */
+static int __rockchip_sysmmu_enable(struct sysmmu_drvdata *data,unsigned long pgtable, struct iommu_domain *domain)
+{
+       int i, ret = 0;
+       unsigned long flags;
+
+       write_lock_irqsave(&data->lock, flags);
+
+       if (!set_sysmmu_active(data)) 
+       {
+               if (WARN_ON(pgtable != data->pgtable)) 
+               {
+                       ret = -EBUSY;
+                       set_sysmmu_inactive(data);
+               } 
+               else 
+                       ret = 1;
+
+               pr_info("(%s) Already enabled\n", data->dbgname);
+               goto finish;
+       }
+       
+       data->pgtable = pgtable;
+
+       for (i = 0; i < data->num_res_mem; i++) 
+       {
+               bool status;
+               status = sysmmu_enable_stall(data->res_bases[i]);
+               if(status)
+               {
+                       __sysmmu_set_ptbase(data->res_bases[i], pgtable);
+                       __raw_writel(SYSMMU_COMMAND_ZAP_CACHE, data->res_bases[i] + SYSMMU_REGISTER_COMMAND);
+               }
+               sysmmu_disable_stall(data->res_bases[i]);
+       }
+
+       data->domain = domain;
+
+       pr_info("(%s) Enabled\n", data->dbgname);
+finish:
+       write_unlock_irqrestore(&data->lock, flags);
+
+       return ret;
+}
+bool rockchip_sysmmu_disable(struct device *dev)
+{
+       struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
+       bool disabled;
+
+       disabled = __rockchip_sysmmu_disable(data);
+
+       return disabled;
+}
+void rockchip_sysmmu_tlb_invalidate(struct device *dev)
+{
+       unsigned long flags;
+       struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
+
+       read_lock_irqsave(&data->lock, flags);
+
+       if (is_sysmmu_active(data)) 
+       {
+               int i;
+               for (i = 0; i < data->num_res_mem; i++) 
+               {
+                       if(!sysmmu_zap_tlb(data->res_bases[i]))
+                               pr_err("%s,invalidating TLB failed\n",data->dbgname);
+               }
+       } 
+       else 
+               pr_info("(%s) Disabled. Skipping invalidating TLB.\n",data->dbgname);
+
+       read_unlock_irqrestore(&data->lock, flags);
+}
+static phys_addr_t rockchip_iommu_iova_to_phys(struct iommu_domain *domain,dma_addr_t iova)
+{
+       struct rk_iommu_domain *priv = domain->priv;
+       unsigned long *entry;
+       unsigned long flags;
+       phys_addr_t phys = 0;
+
+       spin_lock_irqsave(&priv->pgtablelock, flags);
+
+       entry = section_entry(priv->pgtable, iova);
+       entry = page_entry(entry, iova);
+       phys = spage_phys(entry) + spage_offs(iova);
+       
+       spin_unlock_irqrestore(&priv->pgtablelock, flags);
+
+       return phys;
+}
+static int lv2set_page(unsigned long *pent, phys_addr_t paddr, size_t size,
+                                                               short *pgcnt)
+{
+       if (!lv2ent_fault(pent))
+               return -EADDRINUSE;
+
+       *pent = mk_lv2ent_spage(paddr);
+       pgtable_flush(pent, pent + 1);
+       *pgcnt -= 1;
+       return 0;
+}
+
+static unsigned long *alloc_lv2entry(unsigned long *sent, unsigned long iova,short *pgcounter)
+{
+       if (lv1ent_fault(sent)) 
+       {
+               unsigned long *pent;
+
+               pent = kmem_cache_zalloc(lv2table_kmem_cache, GFP_ATOMIC);
+               BUG_ON((unsigned long)pent & (LV2TABLE_SIZE - 1));
+               if (!pent)
+                       return NULL;
+
+               *sent = mk_lv1ent_page(__pa(pent));
+               kmemleak_ignore(pent);
+               *pgcounter = NUM_LV2ENTRIES;
+               pgtable_flush(pent, pent + NUM_LV2ENTRIES);
+               pgtable_flush(sent, sent + 1);
+       }
+       return page_entry(sent, iova);
+}
+
+static size_t rockchip_iommu_unmap(struct iommu_domain *domain,unsigned long iova, size_t size)
+{
+       struct rk_iommu_domain *priv = domain->priv;
+       unsigned long flags;
+       unsigned long *ent;
+
+       BUG_ON(priv->pgtable == NULL);
+
+       spin_lock_irqsave(&priv->pgtablelock, flags);
+
+       ent = section_entry(priv->pgtable, iova);
+
+       if (unlikely(lv1ent_fault(ent))) 
+       {
+               if (size > SPAGE_SIZE)
+                       size = SPAGE_SIZE;
+               goto done;
+       }
+
+       /* lv1ent_page(sent) == true here */
+
+       ent = page_entry(ent, iova);
+
+       if (unlikely(lv2ent_fault(ent))) 
+       {
+               size = SPAGE_SIZE;
+               goto done;
+       }
+       
+       *ent = 0;
+       size = SPAGE_SIZE;
+       priv->lv2entcnt[lv1ent_offset(iova)] += 1;
+       goto done;
+
+done:
+       pr_info("%s:unmap iova 0x%lx/0x%x bytes\n",__func__, iova,size);
+       spin_unlock_irqrestore(&priv->pgtablelock, flags);
+
+       return size;
+}
+static int rockchip_iommu_map(struct iommu_domain *domain, unsigned long iova,
+                        phys_addr_t paddr, size_t size, int prot)
+{
+       struct rk_iommu_domain *priv = domain->priv;
+       unsigned long *entry;
+       unsigned long flags;
+       int ret = -ENOMEM;
+       unsigned long *pent;
+
+       BUG_ON(priv->pgtable == NULL);
+
+       spin_lock_irqsave(&priv->pgtablelock, flags);
+
+       entry = section_entry(priv->pgtable, iova);
+       
+       pent = alloc_lv2entry(entry, iova,&priv->lv2entcnt[lv1ent_offset(iova)]);
+       if (!pent)
+               ret = -ENOMEM;
+       else
+               ret = lv2set_page(pent, paddr, size,&priv->lv2entcnt[lv1ent_offset(iova)]);
+       
+       if (ret)
+       {
+               pr_debug("%s: Failed to map iova 0x%lx/0x%x bytes\n",__func__, iova, size);
+       }
+       spin_unlock_irqrestore(&priv->pgtablelock, flags);
+
+       return ret;
+}
+
+static void rockchip_iommu_detach_device(struct iommu_domain *domain,
+                                   struct device *dev)
+{
+       struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
+       struct rk_iommu_domain *priv = domain->priv;
+       struct list_head *pos;
+       unsigned long flags;
+       bool found = false;
+
+       spin_lock_irqsave(&priv->lock, flags);
+
+       list_for_each(pos, &priv->clients) 
+       {
+               if (list_entry(pos, struct sysmmu_drvdata, node) == data) 
+               {
+                       found = true;
+                       break;
+               }
+       }
+       if (!found)
+               goto finish;
+
+       if (__rockchip_sysmmu_disable(data)) 
+       {
+               pr_info("%s: Detached IOMMU with pgtable %#lx\n",__func__, __pa(priv->pgtable));
+               list_del(&data->node);
+               INIT_LIST_HEAD(&data->node);
+
+       } 
+       else 
+               pr_info("%s: Detaching IOMMU with pgtable %#lx delayed",__func__, __pa(priv->pgtable));
+       
+finish:
+       spin_unlock_irqrestore(&priv->lock, flags);
+}
+static int rockchip_iommu_attach_device(struct iommu_domain *domain,struct device *dev)
+{
+       struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
+       struct rk_iommu_domain *priv = domain->priv;
+       unsigned long flags;
+       int ret;
+
+       spin_lock_irqsave(&priv->lock, flags);
+
+       ret = __rockchip_sysmmu_enable(data, __pa(priv->pgtable), domain);
+
+       if (ret == 0) 
+       {
+               /* 'data->node' must not be appeared in priv->clients */
+               BUG_ON(!list_empty(&data->node));
+               data->dev = dev;
+               list_add_tail(&data->node, &priv->clients);
+       }
+
+       spin_unlock_irqrestore(&priv->lock, flags);
+
+       if (ret < 0) 
+       {
+               pr_err("%s: Failed to attach IOMMU with pgtable %#lx\n",__func__, __pa(priv->pgtable));
+       } 
+       else if (ret > 0) 
+       {
+               pr_info("%s: IOMMU with pgtable 0x%lx already attached\n",__func__, __pa(priv->pgtable));
+       } 
+       else 
+       {
+               pr_info("%s: Attached new IOMMU with pgtable 0x%lx\n",__func__, __pa(priv->pgtable));
+       }
+
+       return ret;
+}
+static void rockchip_iommu_domain_destroy(struct iommu_domain *domain)
+{
+       struct rk_iommu_domain *priv = domain->priv;
+       struct sysmmu_drvdata *data;
+       unsigned long flags;
+       int i;
+
+       WARN_ON(!list_empty(&priv->clients));
+
+       spin_lock_irqsave(&priv->lock, flags);
+
+       list_for_each_entry(data, &priv->clients, node) 
+       {
+               while (!rockchip_sysmmu_disable(data->dev))
+                       ; /* until System MMU is actually disabled */
+       }
+       spin_unlock_irqrestore(&priv->lock, flags);
+
+       for (i = 0; i < NUM_LV1ENTRIES; i++)
+               if (lv1ent_page(priv->pgtable + i))
+                       kmem_cache_free(lv2table_kmem_cache,__va(lv2table_base(priv->pgtable + i)));
+
+       free_pages((unsigned long)priv->pgtable, 0);
+       free_pages((unsigned long)priv->lv2entcnt, 0);
+       kfree(domain->priv);
+       domain->priv = NULL;
+}
+
+static int rockchip_iommu_domain_init(struct iommu_domain *domain)
+{
+       struct rk_iommu_domain *priv;
+
+       priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+       if (!priv)
+               return -ENOMEM;
+       
+/*rk32xx sysmmu use 2 level pagetable,
+   level1 and leve2 both have 1024 entries,each entry  occupy 4 bytes,
+   so alloc a page size for each page table 
+*/
+       priv->pgtable = (unsigned long *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 0);
+       if (!priv->pgtable)
+               goto err_pgtable;
+
+       priv->lv2entcnt = (short *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 0);
+       if (!priv->lv2entcnt)
+               goto err_counter;
+
+       pgtable_flush(priv->pgtable, priv->pgtable + NUM_LV1ENTRIES);
+
+       spin_lock_init(&priv->lock);
+       spin_lock_init(&priv->pgtablelock);
+       INIT_LIST_HEAD(&priv->clients);
+
+       domain->priv = priv;
+       return 0;
+
+err_counter:
+       free_pages((unsigned long)priv->pgtable, 2);
+err_pgtable:
+       kfree(priv);
+       return -ENOMEM;
+}
+
+static struct iommu_ops rk_iommu_ops = 
+{
+       .domain_init = &rockchip_iommu_domain_init,
+       .domain_destroy = &rockchip_iommu_domain_destroy,
+       .attach_dev = &rockchip_iommu_attach_device,
+       .detach_dev = &rockchip_iommu_detach_device,
+       .map = &rockchip_iommu_map,
+       .unmap = &rockchip_iommu_unmap,
+       .iova_to_phys = &rockchip_iommu_iova_to_phys,
+       .pgsize_bitmap = SPAGE_SIZE,
+};
+
+static int rockchip_sysmmu_prepare(void)
+{
+       int ret = 0;
+       static int registed = 0;
+       
+       if(registed)
+               return 0;
+       
+       lv2table_kmem_cache = kmem_cache_create("rk-iommu-lv2table",LV2TABLE_SIZE, LV2TABLE_SIZE, 0, NULL);
+       if (!lv2table_kmem_cache) 
+       {
+               pr_err("%s: failed to create kmem cache\n", __func__);
+               return -ENOMEM;
+       }
+       ret = bus_set_iommu(&platform_bus_type, &rk_iommu_ops);
+       if(!ret)
+               registed = 1;
+       else
+               pr_err("%s:failed to set iommu to bus\r\n",__func__);
+       return ret;
+}
+static int  rockchip_get_sysmmu_resource_num(struct platform_device *pdev,unsigned int type)
+{
+       struct resource *info = NULL;
+       int num_resources = 0;
+       
+       /*get resouce info*/
+again:
+       info = platform_get_resource(pdev, type, num_resources);
+       while(info)
+       {
+               num_resources++;
+               goto again;
+       }
+               
+       if(IORESOURCE_MEM == type)
+               pr_info("have memory resource %d\r\n",num_resources);
+       if(IORESOURCE_IRQ == type)
+               pr_info("have IRQ resource %d\r\n",num_resources);
+       return num_resources;
+}
+
+static int rockchip_sysmmu_probe(struct platform_device *pdev)
+{
+       int i, ret;
+       struct device *dev;
+       struct sysmmu_drvdata *data;
+       
+       dev = &pdev->dev;
+       
+       ret = rockchip_sysmmu_prepare();
+       if(ret)
+       {
+               pr_err("%s,failed\r\n",__func__);
+               goto err_alloc;
+       }
+
+       data = devm_kzalloc(dev,sizeof(*data), GFP_KERNEL);
+       if (!data) 
+       {
+               dev_dbg(dev, "Not enough memory\n");
+               ret = -ENOMEM;
+               goto err_alloc;
+       }
+       
+       ret = dev_set_drvdata(dev, data);
+       if (ret) 
+       {
+               dev_dbg(dev, "Unabled to initialize driver data\n");
+               goto err_init;
+       }
+       
+       /*rk32xx sysmmu need both irq and memory */
+       data->num_res_mem = rockchip_get_sysmmu_resource_num(pdev,IORESOURCE_MEM);
+       if(0 == data->num_res_mem)
+       {
+               pr_err("can't find sysmmu memory resource \r\n");
+               goto err_init;
+       }
+       data->num_res_irq = rockchip_get_sysmmu_resource_num(pdev,IORESOURCE_IRQ);
+       if(0 == data->num_res_irq)
+       {
+               pr_err("can't find sysmmu irq resource \r\n");
+               goto err_init;
+       }
+       
+       data->res_bases = kmalloc(sizeof(*data->res_bases) * data->num_res_mem,GFP_KERNEL);
+       if (data->res_bases == NULL)
+       {
+               dev_dbg(dev, "Not enough memory\n");
+               ret = -ENOMEM;
+               goto err_init;
+       }
+
+       for (i = 0; i < data->num_res_mem; i++) 
+       {
+               struct resource *res;
+               res = platform_get_resource(pdev, IORESOURCE_MEM, i);
+               if (!res) 
+               {
+                       pr_err("Unable to find IOMEM region\n");
+                       ret = -ENOENT;
+                       goto err_res;
+               }
+               data->res_bases[i] = ioremap(res->start, resource_size(res));
+               if (!data->res_bases[i]) 
+               {
+                       pr_err("Unable to map IOMEM @ PA:%#x\n",res->start);
+                       ret = -ENOENT;
+                       goto err_res;
+               }
+               /*reset sysmmu*/
+               if(!sysmmu_reset(data->res_bases[i],data->dbgname))
+               {
+                       ret = -ENOENT;
+                       goto err_res;
+               }
+       }
+
+       for (i = 0; i < data->num_res_irq; i++) 
+       {
+               ret = platform_get_irq(pdev, i);
+               if (ret <= 0) 
+               {
+                       pr_err("Unable to find IRQ resource\n");
+                       goto err_irq;
+               }
+               ret = request_irq(ret, rockchip_sysmmu_irq, IRQF_SHARED ,dev_name(dev), data);
+               if (ret) 
+               {
+                       pr_err("Unabled to register interrupt handler\n");
+                       goto err_irq;
+               }
+       }
+       
+       if(pdev->dev.of_node)
+       {
+               of_property_read_string(pdev->dev.of_node,"dbgname",&(data->dbgname));
+               pr_info("dbgname : %s\n",data->dbgname);
+       }
+       else
+       {
+               pr_info("dbgname not assigned in device tree or device node not exist\r\n");
+       }
+       ret = rockchip_init_iovmm(dev, &data->vmm);
+       if (ret)
+               goto err_irq;
+
+       data->sysmmu = dev;
+       rwlock_init(&data->lock);
+       INIT_LIST_HEAD(&data->node);
+
+       __set_fault_handler(data, &default_fault_handler);
+
+       pr_info("(%s) Initialized\n", data->dbgname);
+       return 0;
+
+err_irq:
+       while (i-- > 0) 
+       {
+               int irq;
+
+               irq = platform_get_irq(pdev, i);
+               free_irq(irq, data);
+       }
+err_res:
+       while (data->num_res_mem-- > 0)
+               iounmap(data->res_bases[data->num_res_mem]);
+       kfree(data->res_bases);
+err_init:
+       kfree(data);
+err_alloc:
+       dev_err(dev, "Failed to initialize\n");
+       return ret;
+}
+
+#ifdef CONFIG_OF
+static const struct of_device_id sysmmu_dt_ids[] = 
+{
+       { .compatible = IEP_SYSMMU_COMPATIBLE_NAME},
+       { .compatible = VIP_SYSMMU_COMPATIBLE_NAME},
+       { .compatible = ISP0_SYSMMU_COMPATIBLE_NAME},
+       { .compatible = ISP1_SYSMMU_COMPATIBLE_NAME},
+       { .compatible = VOPB_SYSMMU_COMPATIBLE_NAME},
+       { .compatible = VOPL_SYSMMU_COMPATIBLE_NAME},
+       { /* end */ }
+};
+MODULE_DEVICE_TABLE(of, sysmmu_dt_ids);
+#endif
+
+static struct platform_driver rk_sysmmu_driver = 
+{
+       .probe = rockchip_sysmmu_probe,
+       .remove = NULL,
+       .driver = 
+       {
+                  .name = "rk_sysmmu",
+                  .owner = THIS_MODULE,
+                  .of_match_table = of_match_ptr(sysmmu_dt_ids),
+       },
+};
+
+#if 0
+/*I don't know why this can't work*/
+#ifdef CONFIG_OF
+module_platform_driver(rk_sysmmu_driver);
+#endif
+#endif
+static int __init rockchip_sysmmu_init_driver(void)
+{
+       return platform_driver_register(&rk_sysmmu_driver);
+}
+
+core_initcall(rockchip_sysmmu_init_driver);
+
diff --git a/drivers/iommu/rockchip-iommu.h b/drivers/iommu/rockchip-iommu.h
new file mode 100755 (executable)
index 0000000..1354f89
--- /dev/null
@@ -0,0 +1,69 @@
+/*
+ * Data structure definition for Rockchip IOMMU driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/kernel.h>
+#include <linux/spinlock.h>
+#include <linux/list.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/genalloc.h>
+#include <linux/iommu.h>
+
+#include <linux/rockchip/sysmmu.h>
+
+#ifdef CONFIG_ROCKCHIP_IOVMM
+
+#define IOVA_START 0x10000000
+#define IOVM_SIZE (SZ_1G - SZ_4K) /* last 4K is for error values */
+
+struct rk_vm_region {
+       struct list_head node;
+       dma_addr_t start;
+       size_t size;
+};
+
+struct rk_iovmm {
+       struct iommu_domain *domain; /* iommu domain for this iovmm */
+       struct gen_pool *vmm_pool;
+       struct list_head regions_list;  /* list of rk_vm_region */
+       spinlock_t lock; /* lock for updating regions_list */
+};
+#endif
+
+
+struct sysmmu_drvdata {
+       struct list_head node; /* entry of rk_iommu_domain.clients */
+       struct device *sysmmu;  /* System MMU's device descriptor */
+       struct device *dev;     /* Owner of system MMU */
+       int num_res_mem;
+       int num_res_irq;        
+       const char *dbgname;
+       void __iomem **res_bases;
+       int activations;
+       rwlock_t lock;
+       struct iommu_domain *domain; /* domain given to iommu_attach_device() */
+       sysmmu_fault_handler_t fault_handler;
+       unsigned long pgtable;
+#ifdef CONFIG_ROCKCHIP_IOVMM
+       struct rk_iovmm vmm;
+#endif
+};
+
+#ifdef CONFIG_ROCKCHIP_IOVMM
+static inline struct rk_iovmm *rockchip_get_iovmm(struct device *dev)
+{
+       struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
+
+       BUG_ON(!dev->archdata.iommu || !data);
+
+       return &data->vmm;
+}
+
+int rockchip_init_iovmm(struct device *sysmmu, struct rk_iovmm *vmm);
+#else
+#define rockchip_init_iovmm(sysmmu, vmm) 0
+#endif
diff --git a/drivers/iommu/rockchip-iovmm.c b/drivers/iommu/rockchip-iovmm.c
new file mode 100755 (executable)
index 0000000..c43f347
--- /dev/null
@@ -0,0 +1,331 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifdef CONFIG_ROCKCHIP_IOMMU_DEBUG
+#define DEBUG
+#endif
+
+#include <linux/kernel.h>
+#include <linux/hardirq.h>
+#include <linux/slab.h>
+#include <linux/scatterlist.h>
+#include <linux/err.h>
+
+#include <linux/of.h>
+#include <linux/of_platform.h>
+
+#include "rockchip-iommu.h"
+
+static struct rk_vm_region *find_region(struct rk_iovmm *vmm, dma_addr_t iova)
+{
+       struct rk_vm_region *region;
+
+       list_for_each_entry(region, &vmm->regions_list, node)
+               if (region->start == iova)
+                       return region;
+
+       return NULL;
+}
+
+int iovmm_activate(struct device *dev)
+{
+       struct rk_iovmm *vmm = rockchip_get_iovmm(dev);
+
+       return iommu_attach_device(vmm->domain, dev);
+}
+
+void iovmm_deactivate(struct device *dev)
+{
+       struct rk_iovmm *vmm = rockchip_get_iovmm(dev);
+
+       iommu_detach_device(vmm->domain, dev);
+}
+
+dma_addr_t iovmm_map(struct device *dev,struct scatterlist *sg, off_t offset,size_t size)
+{
+       off_t start_off;
+       dma_addr_t addr, start = 0;
+       size_t mapped_size = 0;
+       struct rk_vm_region *region;
+       struct rk_iovmm *vmm = rockchip_get_iovmm(dev);
+       int order;
+       int ret;
+
+       for (; sg_dma_len(sg) < offset; sg = sg_next(sg))
+               offset -= sg_dma_len(sg);
+
+       start_off = offset_in_page(sg_phys(sg) + offset);
+       size = PAGE_ALIGN(size + start_off);
+
+       order = __fls(min_t(size_t, size, SZ_1M));
+
+       region = kmalloc(sizeof(*region), GFP_KERNEL);
+       if (!region) 
+       {
+               ret = -ENOMEM;
+               goto err_map_nomem;
+       }
+
+       //start = (dma_addr_t)gen_pool_alloc_aligned(vmm->vmm_pool, size, order);
+       
+       start = (dma_addr_t)gen_pool_alloc(vmm->vmm_pool, size);
+       if (!start) 
+       {
+               ret = -ENOMEM;
+               goto err_map_noiomem;
+       }
+
+       addr = start;
+       do {
+               phys_addr_t phys;
+               size_t len;
+
+               phys = sg_phys(sg);
+               len = sg_dma_len(sg);
+
+               /* if back to back sg entries are contiguous consolidate them */
+               while (sg_next(sg) &&sg_phys(sg) + sg_dma_len(sg) == sg_phys(sg_next(sg))) 
+               {
+                       len += sg_dma_len(sg_next(sg));
+                       sg = sg_next(sg);
+               }
+
+               if (offset > 0) 
+               {
+                       len -= offset;
+                       phys += offset;
+                       offset = 0;
+               }
+
+               if (offset_in_page(phys))
+               {
+                       len += offset_in_page(phys);
+                       phys = round_down(phys, PAGE_SIZE);
+               }
+
+               len = PAGE_ALIGN(len);
+
+               if (len > (size - mapped_size))
+                       len = size - mapped_size;
+
+               ret = iommu_map(vmm->domain, addr, phys, len, 0);
+               if (ret)
+                       break;
+
+               addr += len;
+               mapped_size += len;
+       } while ((sg = sg_next(sg)) && (mapped_size < size));
+
+       BUG_ON(mapped_size > size);
+
+       if (mapped_size < size)
+               goto err_map_map;
+
+       region->start = start + start_off;
+       region->size = size;
+
+       INIT_LIST_HEAD(&region->node);
+
+       spin_lock(&vmm->lock);
+
+       list_add(&region->node, &vmm->regions_list);
+
+       spin_unlock(&vmm->lock);
+
+       dev_dbg(dev, "IOVMM: Allocated VM region @ %#x/%#X bytes.\n",region->start, region->size);
+
+       return region->start;
+
+err_map_map:
+       iommu_unmap(vmm->domain, start, mapped_size);
+       gen_pool_free(vmm->vmm_pool, start, size);
+err_map_noiomem:
+       kfree(region);
+err_map_nomem:
+       dev_dbg(dev, "IOVMM: Failed to allocated VM region for %#x bytes.\n",size);
+       return (dma_addr_t)ret;
+}
+
+void iovmm_unmap(struct device *dev, dma_addr_t iova)
+{
+       struct rk_vm_region *region;
+       struct rk_iovmm *vmm = rockchip_get_iovmm(dev);
+       size_t unmapped_size;
+
+       /* This function must not be called in IRQ handlers */
+       BUG_ON(in_irq());
+
+       spin_lock(&vmm->lock);
+
+       region = find_region(vmm, iova);
+       if (WARN_ON(!region)) 
+       {
+               spin_unlock(&vmm->lock);
+               return;
+       }
+
+       list_del(&region->node);
+
+       spin_unlock(&vmm->lock);
+
+       region->start = round_down(region->start, PAGE_SIZE);
+
+       unmapped_size = iommu_unmap(vmm->domain, region->start, region->size);
+
+       rockchip_sysmmu_tlb_invalidate(dev);
+
+       gen_pool_free(vmm->vmm_pool, region->start, region->size);
+
+       WARN_ON(unmapped_size != region->size);
+       dev_dbg(dev, "IOVMM: Unmapped %#x bytes from %#x.\n",unmapped_size, region->start);
+
+       kfree(region);
+}
+
+int iovmm_map_oto(struct device *dev, phys_addr_t phys, size_t size)
+{
+       struct rk_vm_region *region;
+       struct rk_iovmm *vmm = rockchip_get_iovmm(dev);
+       int ret;
+
+       if (WARN_ON((phys + size) >= IOVA_START)) 
+       {
+               dev_err(dev,"Unable to create one to one mapping for %#x @ %#x\n",size, phys);
+               return -EINVAL;
+       }
+
+       region = kmalloc(sizeof(*region), GFP_KERNEL);
+       if (!region)
+               return -ENOMEM;
+
+       if (WARN_ON(phys & ~PAGE_MASK))
+               phys = round_down(phys, PAGE_SIZE);
+
+
+       ret = iommu_map(vmm->domain, (dma_addr_t)phys, phys, size, 0);
+       if (ret < 0) 
+       {
+               kfree(region);
+               return ret;
+       }
+
+       region->start = (dma_addr_t)phys;
+       region->size = size;
+       INIT_LIST_HEAD(&region->node);
+
+       spin_lock(&vmm->lock);
+
+       list_add(&region->node, &vmm->regions_list);
+
+       spin_unlock(&vmm->lock);
+
+       return 0;
+}
+
+void iovmm_unmap_oto(struct device *dev, phys_addr_t phys)
+{
+       struct rk_vm_region *region;
+       struct rk_iovmm *vmm = rockchip_get_iovmm(dev);
+       size_t unmapped_size;
+
+       /* This function must not be called in IRQ handlers */
+       BUG_ON(in_irq());
+
+       if (WARN_ON(phys & ~PAGE_MASK))
+               phys = round_down(phys, PAGE_SIZE);
+
+       spin_lock(&vmm->lock);
+
+       region = find_region(vmm, (dma_addr_t)phys);
+       if (WARN_ON(!region)) 
+       {
+               spin_unlock(&vmm->lock);
+               return;
+       }
+
+       list_del(&region->node);
+
+       spin_unlock(&vmm->lock);
+
+       unmapped_size = iommu_unmap(vmm->domain, region->start, region->size);
+       rockchip_sysmmu_tlb_invalidate(dev);
+       WARN_ON(unmapped_size != region->size);
+       dev_dbg(dev, "IOVMM: Unmapped %#x bytes from %#x.\n",unmapped_size, region->start);
+
+       kfree(region);
+}
+
+int rockchip_init_iovmm(struct device *sysmmu, struct rk_iovmm *vmm)
+{
+       int ret = 0;
+
+       vmm->vmm_pool = gen_pool_create(PAGE_SHIFT, -1);
+       if (!vmm->vmm_pool) 
+       {
+               ret = -ENOMEM;
+               goto err_setup_genalloc;
+       }
+
+       /* (1GB - 4KB) addr space from 0x10000000 */
+       ret = gen_pool_add(vmm->vmm_pool, IOVA_START, IOVM_SIZE, -1);
+       if (ret)
+               goto err_setup_domain;
+
+       vmm->domain = iommu_domain_alloc(&platform_bus_type);
+       if (!vmm->domain) 
+       {
+               ret = -ENOMEM;
+               goto err_setup_domain;
+       }
+
+       spin_lock_init(&vmm->lock);
+
+       INIT_LIST_HEAD(&vmm->regions_list);
+       
+       pr_info("IOVMM: Created %#x B IOVMM from %#x.\n",IOVM_SIZE, IOVA_START);
+       dev_dbg(sysmmu, "IOVMM: Created %#x B IOVMM from %#x.\n",IOVM_SIZE, IOVA_START);
+       return 0;
+err_setup_domain:
+       gen_pool_destroy(vmm->vmm_pool);
+err_setup_genalloc:
+       dev_dbg(sysmmu, "IOVMM: Failed to create IOVMM (%d)\n", ret);
+
+       return ret;
+}
+
+/****
+1,success : pointer to the device inside of platform device 
+2,fail       : NULL
+****/
+struct device *rockchip_get_sysmmu_device_by_compatible(const char *compt)
+{
+       struct device_node *dn = NULL;
+       struct platform_device *pd = NULL;
+       struct device *ret = NULL ;
+
+#if 0
+       dn = of_find_node_by_name(NULL,name);
+#endif
+
+       dn = of_find_compatible_node(NULL,NULL,compt);
+       if(!dn)
+       {
+               printk("can't find device node %s \r\n",compt);
+               return NULL;
+       }
+       
+       pd = of_find_device_by_node(dn);
+       if(!pd)
+       {       
+               printk("can't find platform device in device node %s \r\n",compt);
+               return  NULL;
+       }
+       ret = &pd->dev;
+       
+       return ret;
+
+}
+
diff --git a/include/linux/rockchip/iovmm.h b/include/linux/rockchip/iovmm.h
new file mode 100755 (executable)
index 0000000..48e212d
--- /dev/null
@@ -0,0 +1,75 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ASM_PLAT_IOVMM_H
+#define __ASM_PLAT_IOVMM_H
+
+#ifdef CONFIG_ROCKCHIP_IOVMM
+struct scatterlist;
+struct device;
+
+int iovmm_activate(struct device *dev);
+void iovmm_deactivate(struct device *dev);
+
+/* iovmm_map() - Maps a list of physical memory chunks
+ * @dev: the owner of the IO address space where the mapping is created
+ * @sg: list of physical memory chunks to map
+ * @offset: length in bytes where the mapping starts
+ * @size: how much memory to map in bytes. @offset + @size must not exceed
+ *        total size of @sg
+ *
+ * This function returns mapped IO address in the address space of @dev.
+ * Returns minus error number if mapping fails.
+ * Caller must check its return code with IS_ERROR_VALUE() if the function
+ * succeeded.
+ *
+ * The caller of this function must ensure that iovmm_cleanup() is not called
+ * while this function is called.
+ *
+ */
+dma_addr_t iovmm_map(struct device *dev, struct scatterlist *sg, off_t offset,
+                                                               size_t size);
+
+/* iovmm_unmap() - unmaps the given IO address
+ * @dev: the owner of the IO address space where @iova belongs
+ * @iova: IO address that needs to be unmapped and freed.
+ *
+ * The caller of this function must ensure that iovmm_cleanup() is not called
+ * while this function is called.
+ */
+void iovmm_unmap(struct device *dev, dma_addr_t iova);
+
+/* iovmm_map_oto - create one to one mapping for the given physical address
+ * @dev: the owner of the IO address space to map
+ * @phys: physical address to map
+ * @size: size of the mapping to create
+ *
+ * This function return 0 if mapping is successful. Otherwise, minus error
+ * value.
+ */
+int iovmm_map_oto(struct device *dev, phys_addr_t phys, size_t size);
+
+/* iovmm_unmap_oto - remove one to one mapping
+ * @dev: the owner ofthe IO address space
+ * @phys: physical address to remove mapping
+ */
+void iovmm_unmap_oto(struct device *dev, phys_addr_t phys);
+
+struct device *rockchip_get_sysmmu_device_by_compatible(const char *compt);
+
+
+#else
+#define iovmm_activate(dev)            (-ENOSYS)
+#define iovmm_deactivate(dev)          do { } while (0)
+#define iovmm_map(dev, sg, offset, size) (-ENOSYS)
+#define iovmm_unmap(dev, iova)         do { } while (0)
+#define iovmm_map_oto(dev, phys, size) (-ENOSYS)
+#define iovmm_unmap_oto(dev, phys)     do { } while (0)
+#define rockchip_get_sysmmu_device_by_compatible(compt) do { } while (0)
+
+#endif /* CONFIG_ROCKCHIP_IOVMM */
+
+#endif /*__ASM_PLAT_IOVMM_H*/
diff --git a/include/linux/rockchip/sysmmu.h b/include/linux/rockchip/sysmmu.h
new file mode 100755 (executable)
index 0000000..bcf09d9
--- /dev/null
@@ -0,0 +1,124 @@
+/*
+ * Rockchip - System MMU support
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _ARM_MACH_RK_SYSMMU_H_
+#define _ARM_MACH_RK_SYSMMU_H_
+
+#include <linux/list.h>
+#include <linux/atomic.h>
+#include <linux/spinlock.h>
+
+#define IEP_SYSMMU_COMPATIBLE_NAME "iommu,iep_mmu"
+#define VIP_SYSMMU_COMPATIBLE_NAME "iommu,vip_mmu"
+
+#define ISP0_SYSMMU_COMPATIBLE_NAME "iommu,isp0_mmu"
+#define ISP1_SYSMMU_COMPATIBLE_NAME "iommu,isp1_mmu"
+
+#define VOPB_SYSMMU_COMPATIBLE_NAME "iommu,vopb_mmu"
+#define VOPL_SYSMMU_COMPATIBLE_NAME "iommu,vopl_mmu"
+
+
+
+enum rk_sysmmu_inttype {
+       SYSMMU_PAGEFAULT,
+       SYSMMU_BUSERROR,
+       SYSMMU_FAULT_UNKNOWN,
+       SYSMMU_FAULTS_NUM
+};
+       
+struct sysmmu_drvdata;
+/*
+ * @itype: type of fault.
+ * @pgtable_base: the physical address of page table base. This is 0 if @itype
+ *                               is SYSMMU_BUSERROR.
+ * @fault_addr: the device (virtual) address that the System MMU tried to
+ *                        translated. This is 0 if @itype is SYSMMU_BUSERROR.
+ */
+typedef int (*sysmmu_fault_handler_t)(struct device *dev,
+                                         enum rk_sysmmu_inttype itype,
+                                         unsigned long pgtable_base,
+                                         unsigned long fault_addr,
+                                         unsigned int statu
+                                         );
+       
+#ifdef CONFIG_ROCKCHIP_IOMMU
+/**
+* rockchip_sysmmu_enable() - enable system mmu
+* @owner: The device whose System MMU is about to be enabled.
+* @pgd: Base physical address of the 1st level page table
+*
+* This function enable system mmu to transfer address
+* from virtual address to physical address.
+* Return non-zero if it fails to enable System MMU.
+*/
+int rockchip_sysmmu_enable(struct device *owner, unsigned long pgd);
+
+/**
+* rockchip_sysmmu_disable() - disable sysmmu mmu of ip
+* @owner: The device whose System MMU is about to be disabled.
+*
+* This function disable system mmu to transfer address
+ * from virtual address to physical address
+ */
+bool rockchip_sysmmu_disable(struct device *owner);
+
+/**
+ * rockchip_sysmmu_tlb_invalidate() - flush all TLB entry in system mmu
+ * @owner: The device whose System MMU.
+ *
+ * This function flush all TLB entry in system mmu
+ */
+void rockchip_sysmmu_tlb_invalidate(struct device *owner);
+
+/** rockchip_sysmmu_set_fault_handler() - Fault handler for System MMUs
+ * Called when interrupt occurred by the System MMUs
+ * The device drivers of peripheral devices that has a System MMU can implement
+ * a fault handler to resolve address translation fault by System MMU.
+ * The meanings of return value and parameters are described below.
+ *
+ * return value: non-zero if the fault is correctly resolved.
+ *                zero if the fault is not handled.
+ */
+void rockchip_sysmmu_set_fault_handler(struct device *dev,sysmmu_fault_handler_t handler);
+
+/** rockchip_sysmmu_set_prefbuf() - Initialize prefetch buffers of System MMU v3
+ *     @owner: The device which need to set the prefetch buffers
+ *     @base0: The start virtual address of the area of the @owner device that the
+ *                     first prefetch buffer loads translation descriptors
+ *     @size0: The last virtual address of the area of the @owner device that the
+ *                     first prefetch buffer loads translation descriptors.
+ *     @base1: The start virtual address of the area of the @owner device that the
+ *                     second prefetch buffer loads translation descriptors. This will be
+ *                     ignored if @size1 is 0 and this function assigns the 2 prefetch
+ *                     buffers with each half of the area specified by @base0 and @size0
+ *     @size1: The last virtual address of the area of the @owner device that the
+ *                     prefetch buffer loads translation descriptors. This can be 0. See
+ *                     the description of @base1 for more information with @size1 = 0
+ */
+void rockchip_sysmmu_set_prefbuf(struct device *owner,
+                               unsigned long base0, unsigned long size0,
+                               unsigned long base1, unsigned long size1);
+#else /* CONFIG_ROCKCHIP_IOMMU */
+#define rockchip_sysmmu_enable(owner, pgd) do { } while (0)
+#define rockchip_sysmmu_disable(owner) do { } while (0)
+#define rockchip_sysmmu_tlb_invalidate(owner) do { } while (0)
+#define rockchip_sysmmu_set_fault_handler(sysmmu, handler) do { } while (0)
+#define rockchip_sysmmu_set_prefbuf(owner, b0, s0, b1, s1) do { } while (0)
+#endif
+
+#ifdef CONFIG_IOMMU_API
+#include <linux/device.h>
+static inline void platform_set_sysmmu(struct device *sysmmu, struct device *dev)
+{
+       dev->archdata.iommu = sysmmu;
+}
+#else
+#define platform_set_sysmmu(dev, sysmmu) do { } while (0)
+#endif
+
+#endif /* _ARM_MACH_RK_SYSMMU_H_ */