drm/gma500: Make SGX MMU driver actually do something
authorPatrik Jakobsson <patrik.r.jakobsson@gmail.com>
Thu, 2 Jan 2014 00:27:00 +0000 (01:27 +0100)
committerPatrik Jakobsson <patrik.r.jakobsson@gmail.com>
Mon, 17 Mar 2014 19:04:01 +0000 (20:04 +0100)
Old MMU code never wrote PDs or PTEs to any registers. Now we do, and
that's a good start.

Signed-off-by: Patrik Jakobsson <patrik.r.jakobsson@gmail.com>
drivers/gpu/drm/gma500/mmu.c
drivers/gpu/drm/gma500/psb_drv.c
drivers/gpu/drm/gma500/psb_drv.h

index 49bac41beefb3251ecc6a63f04e86d0d36de9974..0bfc9f7b343b2d039ec9b3b82297ec54a75bfe20 100644 (file)
@@ -59,15 +59,14 @@ struct psb_mmu_driver {
        spinlock_t lock;
 
        atomic_t needs_tlbflush;
-
-       uint8_t __iomem *register_map;
+       atomic_t *msvdx_mmu_invaldc;
        struct psb_mmu_pd *default_pd;
-       /*uint32_t bif_ctrl;*/
+       uint32_t bif_ctrl;
        int has_clflush;
        int clflush_add;
        unsigned long clflush_mask;
 
-       struct drm_psb_private *dev_priv;
+       struct drm_device *dev;
 };
 
 struct psb_mmu_pd;
@@ -102,13 +101,13 @@ static inline uint32_t psb_mmu_pd_index(uint32_t offset)
        return offset >> PSB_PDE_SHIFT;
 }
 
+#if defined(CONFIG_X86)
 static inline void psb_clflush(void *addr)
 {
        __asm__ __volatile__("clflush (%0)\n" : : "r"(addr) : "memory");
 }
 
-static inline void psb_mmu_clflush(struct psb_mmu_driver *driver,
-                                  void *addr)
+static inline void psb_mmu_clflush(struct psb_mmu_driver *driver, void *addr)
 {
        if (!driver->has_clflush)
                return;
@@ -117,62 +116,77 @@ static inline void psb_mmu_clflush(struct psb_mmu_driver *driver,
        psb_clflush(addr);
        mb();
 }
+#else
 
-static void psb_page_clflush(struct psb_mmu_driver *driver, struct page* page)
-{
-       uint32_t clflush_add = driver->clflush_add >> PAGE_SHIFT;
-       uint32_t clflush_count = PAGE_SIZE / clflush_add;
-       int i;
-       uint8_t *clf;
-
-       clf = kmap_atomic(page);
-       mb();
-       for (i = 0; i < clflush_count; ++i) {
-               psb_clflush(clf);
-               clf += clflush_add;
-       }
-       mb();
-       kunmap_atomic(clf);
+static inline void psb_mmu_clflush(struct psb_mmu_driver *driver, void *addr)
+{;
 }
 
-static void psb_pages_clflush(struct psb_mmu_driver *driver,
-                               struct page *page[], unsigned long num_pages)
-{
-       int i;
-
-       if (!driver->has_clflush)
-               return ;
-
-       for (i = 0; i < num_pages; i++)
-               psb_page_clflush(driver, *page++);
-}
+#endif
 
-static void psb_mmu_flush_pd_locked(struct psb_mmu_driver *driver,
-                                   int force)
+static void psb_mmu_flush_pd_locked(struct psb_mmu_driver *driver, int force)
 {
+       struct drm_device *dev = driver->dev;
+       struct drm_psb_private *dev_priv = dev->dev_private;
+
+       if (atomic_read(&driver->needs_tlbflush) || force) {
+               uint32_t val = PSB_RSGX32(PSB_CR_BIF_CTRL);
+               PSB_WSGX32(val | _PSB_CB_CTRL_INVALDC, PSB_CR_BIF_CTRL);
+
+               /* Make sure data cache is turned off before enabling it */
+               wmb();
+               PSB_WSGX32(val & ~_PSB_CB_CTRL_INVALDC, PSB_CR_BIF_CTRL);
+               (void)PSB_RSGX32(PSB_CR_BIF_CTRL);
+               if (driver->msvdx_mmu_invaldc)
+                       atomic_set(driver->msvdx_mmu_invaldc, 1);
+       }
        atomic_set(&driver->needs_tlbflush, 0);
 }
 
+#if 0
 static void psb_mmu_flush_pd(struct psb_mmu_driver *driver, int force)
 {
        down_write(&driver->sem);
        psb_mmu_flush_pd_locked(driver, force);
        up_write(&driver->sem);
 }
+#endif
 
-void psb_mmu_flush(struct psb_mmu_driver *driver, int rc_prot)
+void psb_mmu_flush(struct psb_mmu_driver *driver)
 {
-       if (rc_prot)
-               down_write(&driver->sem);
-       if (rc_prot)
-               up_write(&driver->sem);
+       struct drm_device *dev = driver->dev;
+       struct drm_psb_private *dev_priv = dev->dev_private;
+       uint32_t val;
+
+       down_write(&driver->sem);
+       val = PSB_RSGX32(PSB_CR_BIF_CTRL);
+       if (atomic_read(&driver->needs_tlbflush))
+               PSB_WSGX32(val | _PSB_CB_CTRL_INVALDC, PSB_CR_BIF_CTRL);
+       else
+               PSB_WSGX32(val | _PSB_CB_CTRL_FLUSH, PSB_CR_BIF_CTRL);
+
+       /* Make sure data cache is turned off and MMU is flushed before
+          restoring bank interface control register */
+       wmb();
+       PSB_WSGX32(val & ~(_PSB_CB_CTRL_FLUSH | _PSB_CB_CTRL_INVALDC),
+                  PSB_CR_BIF_CTRL);
+       (void)PSB_RSGX32(PSB_CR_BIF_CTRL);
+
+       atomic_set(&driver->needs_tlbflush, 0);
+       if (driver->msvdx_mmu_invaldc)
+               atomic_set(driver->msvdx_mmu_invaldc, 1);
+       up_write(&driver->sem);
 }
 
 void psb_mmu_set_pd_context(struct psb_mmu_pd *pd, int hw_context)
 {
-       /*ttm_tt_cache_flush(&pd->p, 1);*/
-       psb_pages_clflush(pd->driver, &pd->p, 1);
+       struct drm_device *dev = pd->driver->dev;
+       struct drm_psb_private *dev_priv = dev->dev_private;
+       uint32_t offset = (hw_context == 0) ? PSB_CR_BIF_DIR_LIST_BASE0 :
+                         PSB_CR_BIF_DIR_LIST_BASE1 + hw_context * 4;
+
        down_write(&pd->driver->sem);
+       PSB_WSGX32(page_to_pfn(pd->p) << PAGE_SHIFT, offset);
        wmb();
        psb_mmu_flush_pd_locked(pd->driver, 1);
        pd->hw_context = hw_context;
@@ -183,7 +197,6 @@ void psb_mmu_set_pd_context(struct psb_mmu_pd *pd, int hw_context)
 static inline unsigned long psb_pd_addr_end(unsigned long addr,
                                            unsigned long end)
 {
-
        addr = (addr + PSB_PDE_MASK + 1) & ~PSB_PDE_MASK;
        return (addr < end) ? addr : end;
 }
@@ -223,12 +236,10 @@ struct psb_mmu_pd *psb_mmu_alloc_pd(struct psb_mmu_driver *driver,
                goto out_err3;
 
        if (!trap_pagefaults) {
-               pd->invalid_pde =
-                   psb_mmu_mask_pte(page_to_pfn(pd->dummy_pt),
-                                    invalid_type);
-               pd->invalid_pte =
-                   psb_mmu_mask_pte(page_to_pfn(pd->dummy_page),
-                                    invalid_type);
+               pd->invalid_pde = psb_mmu_mask_pte(page_to_pfn(pd->dummy_pt),
+                                                  invalid_type);
+               pd->invalid_pte = psb_mmu_mask_pte(page_to_pfn(pd->dummy_page),
+                                                  invalid_type);
        } else {
                pd->invalid_pde = 0;
                pd->invalid_pte = 0;
@@ -279,12 +290,16 @@ static void psb_mmu_free_pt(struct psb_mmu_pt *pt)
 void psb_mmu_free_pagedir(struct psb_mmu_pd *pd)
 {
        struct psb_mmu_driver *driver = pd->driver;
+       struct drm_device *dev = driver->dev;
+       struct drm_psb_private *dev_priv = dev->dev_private;
        struct psb_mmu_pt *pt;
        int i;
 
        down_write(&driver->sem);
-       if (pd->hw_context != -1)
+       if (pd->hw_context != -1) {
+               PSB_WSGX32(0, PSB_CR_BIF_DIR_LIST_BASE0 + pd->hw_context * 4);
                psb_mmu_flush_pd_locked(driver, 1);
+       }
 
        /* Should take the spinlock here, but we don't need to do that
           since we have the semaphore in write mode. */
@@ -331,7 +346,7 @@ static struct psb_mmu_pt *psb_mmu_alloc_pt(struct psb_mmu_pd *pd)
        for (i = 0; i < (PAGE_SIZE / sizeof(uint32_t)); ++i)
                *ptes++ = pd->invalid_pte;
 
-
+#if defined(CONFIG_X86)
        if (pd->driver->has_clflush && pd->hw_context != -1) {
                mb();
                for (i = 0; i < clflush_count; ++i) {
@@ -340,7 +355,7 @@ static struct psb_mmu_pt *psb_mmu_alloc_pt(struct psb_mmu_pd *pd)
                }
                mb();
        }
-
+#endif
        kunmap_atomic(v);
        spin_unlock(lock);
 
@@ -351,7 +366,7 @@ static struct psb_mmu_pt *psb_mmu_alloc_pt(struct psb_mmu_pd *pd)
        return pt;
 }
 
-static struct psb_mmu_pt *psb_mmu_pt_alloc_map_lock(struct psb_mmu_pd *pd,
+struct psb_mmu_pt *psb_mmu_pt_alloc_map_lock(struct psb_mmu_pd *pd,
                                             unsigned long addr)
 {
        uint32_t index = psb_mmu_pd_index(addr);
@@ -383,7 +398,7 @@ static struct psb_mmu_pt *psb_mmu_pt_alloc_map_lock(struct psb_mmu_pd *pd,
                kunmap_atomic((void *) v);
 
                if (pd->hw_context != -1) {
-                       psb_mmu_clflush(pd->driver, (void *) &v[index]);
+                       psb_mmu_clflush(pd->driver, (void *)&v[index]);
                        atomic_set(&pd->driver->needs_tlbflush, 1);
                }
        }
@@ -420,8 +435,7 @@ static void psb_mmu_pt_unmap_unlock(struct psb_mmu_pt *pt)
                pd->tables[pt->index] = NULL;
 
                if (pd->hw_context != -1) {
-                       psb_mmu_clflush(pd->driver,
-                                       (void *) &v[pt->index]);
+                       psb_mmu_clflush(pd->driver, (void *)&v[pt->index]);
                        atomic_set(&pd->driver->needs_tlbflush, 1);
                }
                kunmap_atomic(pt->v);
@@ -432,8 +446,8 @@ static void psb_mmu_pt_unmap_unlock(struct psb_mmu_pt *pt)
        spin_unlock(&pd->driver->lock);
 }
 
-static inline void psb_mmu_set_pte(struct psb_mmu_pt *pt,
-                                  unsigned long addr, uint32_t pte)
+static inline void psb_mmu_set_pte(struct psb_mmu_pt *pt, unsigned long addr,
+                                  uint32_t pte)
 {
        pt->v[psb_mmu_pt_index(addr)] = pte;
 }
@@ -444,69 +458,50 @@ static inline void psb_mmu_invalidate_pte(struct psb_mmu_pt *pt,
        pt->v[psb_mmu_pt_index(addr)] = pt->pd->invalid_pte;
 }
 
-
-void psb_mmu_mirror_gtt(struct psb_mmu_pd *pd,
-                       uint32_t mmu_offset, uint32_t gtt_start,
-                       uint32_t gtt_pages)
+struct psb_mmu_pd *psb_mmu_get_default_pd(struct psb_mmu_driver *driver)
 {
-       uint32_t *v;
-       uint32_t start = psb_mmu_pd_index(mmu_offset);
-       struct psb_mmu_driver *driver = pd->driver;
-       int num_pages = gtt_pages;
+       struct psb_mmu_pd *pd;
 
        down_read(&driver->sem);
-       spin_lock(&driver->lock);
-
-       v = kmap_atomic(pd->p);
-       v += start;
-
-       while (gtt_pages--) {
-               *v++ = gtt_start | pd->pd_mask;
-               gtt_start += PAGE_SIZE;
-       }
-
-       /*ttm_tt_cache_flush(&pd->p, num_pages);*/
-       psb_pages_clflush(pd->driver, &pd->p, num_pages);
-       kunmap_atomic(v);
-       spin_unlock(&driver->lock);
-
-       if (pd->hw_context != -1)
-               atomic_set(&pd->driver->needs_tlbflush, 1);
+       pd = driver->default_pd;
+       up_read(&driver->sem);
 
-       up_read(&pd->driver->sem);
-       psb_mmu_flush_pd(pd->driver, 0);
+       return pd;
 }
 
-struct psb_mmu_pd *psb_mmu_get_default_pd(struct psb_mmu_driver *driver)
+/* Returns the physical address of the PD shared by sgx/msvdx */
+uint32_t psb_get_default_pd_addr(struct psb_mmu_driver *driver)
 {
        struct psb_mmu_pd *pd;
 
-       /* down_read(&driver->sem); */
-       pd = driver->default_pd;
-       /* up_read(&driver->sem); */
-
-       return pd;
+       pd = psb_mmu_get_default_pd(driver);
+       return page_to_pfn(pd->p) << PAGE_SHIFT;
 }
 
 void psb_mmu_driver_takedown(struct psb_mmu_driver *driver)
 {
+       struct drm_device *dev = driver->dev;
+       struct drm_psb_private *dev_priv = dev->dev_private;
+
+       PSB_WSGX32(driver->bif_ctrl, PSB_CR_BIF_CTRL);
        psb_mmu_free_pagedir(driver->default_pd);
        kfree(driver);
 }
 
-struct psb_mmu_driver *psb_mmu_driver_init(uint8_t __iomem * registers,
-                                       int trap_pagefaults,
-                                       int invalid_type,
-                                       struct drm_psb_private *dev_priv)
+struct psb_mmu_driver *psb_mmu_driver_init(struct drm_device *dev,
+                                          int trap_pagefaults,
+                                          int invalid_type,
+                                          atomic_t *msvdx_mmu_invaldc)
 {
        struct psb_mmu_driver *driver;
+       struct drm_psb_private *dev_priv = dev->dev_private;
 
        driver = kmalloc(sizeof(*driver), GFP_KERNEL);
 
        if (!driver)
                return NULL;
-       driver->dev_priv = dev_priv;
 
+       driver->dev = dev;
        driver->default_pd = psb_mmu_alloc_pd(driver, trap_pagefaults,
                                              invalid_type);
        if (!driver->default_pd)
@@ -515,17 +510,24 @@ struct psb_mmu_driver *psb_mmu_driver_init(uint8_t __iomem * registers,
        spin_lock_init(&driver->lock);
        init_rwsem(&driver->sem);
        down_write(&driver->sem);
-       driver->register_map = registers;
        atomic_set(&driver->needs_tlbflush, 1);
+       driver->msvdx_mmu_invaldc = msvdx_mmu_invaldc;
+
+       driver->bif_ctrl = PSB_RSGX32(PSB_CR_BIF_CTRL);
+       PSB_WSGX32(driver->bif_ctrl | _PSB_CB_CTRL_CLEAR_FAULT,
+                  PSB_CR_BIF_CTRL);
+       PSB_WSGX32(driver->bif_ctrl & ~_PSB_CB_CTRL_CLEAR_FAULT,
+                  PSB_CR_BIF_CTRL);
 
        driver->has_clflush = 0;
 
+#if defined(CONFIG_X86)
        if (boot_cpu_has(X86_FEATURE_CLFLSH)) {
                uint32_t tfms, misc, cap0, cap4, clflush_size;
 
                /*
-                * clflush size is determined at kernel setup for x86_64
-                *  but not for i386. We have to do it here.
+                * clflush size is determined at kernel setup for x86_64 but not
+                * for i386. We have to do it here.
                 */
 
                cpuid(0x00000001, &tfms, &misc, &cap0, &cap4);
@@ -536,6 +538,7 @@ struct psb_mmu_driver *psb_mmu_driver_init(uint8_t __iomem * registers,
                driver->clflush_mask = driver->clflush_add - 1;
                driver->clflush_mask = ~driver->clflush_mask;
        }
+#endif
 
        up_write(&driver->sem);
        return driver;
@@ -545,9 +548,9 @@ out_err1:
        return NULL;
 }
 
-static void psb_mmu_flush_ptes(struct psb_mmu_pd *pd,
-                              unsigned long address, uint32_t num_pages,
-                              uint32_t desired_tile_stride,
+#if defined(CONFIG_X86)
+static void psb_mmu_flush_ptes(struct psb_mmu_pd *pd, unsigned long address,
+                              uint32_t num_pages, uint32_t desired_tile_stride,
                               uint32_t hw_tile_stride)
 {
        struct psb_mmu_pt *pt;
@@ -561,11 +564,8 @@ static void psb_mmu_flush_ptes(struct psb_mmu_pd *pd,
        unsigned long clflush_add = pd->driver->clflush_add;
        unsigned long clflush_mask = pd->driver->clflush_mask;
 
-       if (!pd->driver->has_clflush) {
-               /*ttm_tt_cache_flush(&pd->p, num_pages);*/
-               psb_pages_clflush(pd->driver, &pd->p, num_pages);
+       if (!pd->driver->has_clflush)
                return;
-       }
 
        if (hw_tile_stride)
                rows = num_pages / desired_tile_stride;
@@ -586,10 +586,8 @@ static void psb_mmu_flush_ptes(struct psb_mmu_pd *pd,
                        if (!pt)
                                continue;
                        do {
-                               psb_clflush(&pt->v
-                                           [psb_mmu_pt_index(addr)]);
-                       } while (addr +=
-                                clflush_add,
+                               psb_clflush(&pt->v[psb_mmu_pt_index(addr)]);
+                       } while (addr += clflush_add,
                                 (addr & clflush_mask) < next);
 
                        psb_mmu_pt_unmap_unlock(pt);
@@ -598,6 +596,14 @@ static void psb_mmu_flush_ptes(struct psb_mmu_pd *pd,
        }
        mb();
 }
+#else
+static void psb_mmu_flush_ptes(struct psb_mmu_pd *pd, unsigned long address,
+                              uint32_t num_pages, uint32_t desired_tile_stride,
+                              uint32_t hw_tile_stride)
+{
+       drm_ttm_cache_flush();
+}
+#endif
 
 void psb_mmu_remove_pfn_sequence(struct psb_mmu_pd *pd,
                                 unsigned long address, uint32_t num_pages)
@@ -633,7 +639,7 @@ out:
        up_read(&pd->driver->sem);
 
        if (pd->hw_context != -1)
-               psb_mmu_flush(pd->driver, 0);
+               psb_mmu_flush(pd->driver);
 
        return;
 }
@@ -660,7 +666,7 @@ void psb_mmu_remove_pages(struct psb_mmu_pd *pd, unsigned long address,
        add = desired_tile_stride << PAGE_SHIFT;
        row_add = hw_tile_stride << PAGE_SHIFT;
 
-       /* down_read(&pd->driver->sem); */
+       down_read(&pd->driver->sem);
 
        /* Make sure we only need to flush this processor's cache */
 
@@ -688,10 +694,10 @@ void psb_mmu_remove_pages(struct psb_mmu_pd *pd, unsigned long address,
                psb_mmu_flush_ptes(pd, f_address, num_pages,
                                   desired_tile_stride, hw_tile_stride);
 
-       /* up_read(&pd->driver->sem); */
+       up_read(&pd->driver->sem);
 
        if (pd->hw_context != -1)
-               psb_mmu_flush(pd->driver, 0);
+               psb_mmu_flush(pd->driver);
 }
 
 int psb_mmu_insert_pfn_sequence(struct psb_mmu_pd *pd, uint32_t start_pfn,
@@ -704,7 +710,7 @@ int psb_mmu_insert_pfn_sequence(struct psb_mmu_pd *pd, uint32_t start_pfn,
        unsigned long end;
        unsigned long next;
        unsigned long f_address = address;
-       int ret = 0;
+       int ret = -ENOMEM;
 
        down_read(&pd->driver->sem);
 
@@ -726,6 +732,7 @@ int psb_mmu_insert_pfn_sequence(struct psb_mmu_pd *pd, uint32_t start_pfn,
                psb_mmu_pt_unmap_unlock(pt);
 
        } while (addr = next, next != end);
+       ret = 0;
 
 out:
        if (pd->hw_context != -1)
@@ -734,15 +741,15 @@ out:
        up_read(&pd->driver->sem);
 
        if (pd->hw_context != -1)
-               psb_mmu_flush(pd->driver, 1);
+               psb_mmu_flush(pd->driver);
 
-       return ret;
+       return 0;
 }
 
 int psb_mmu_insert_pages(struct psb_mmu_pd *pd, struct page **pages,
                         unsigned long address, uint32_t num_pages,
-                        uint32_t desired_tile_stride,
-                        uint32_t hw_tile_stride, int type)
+                        uint32_t desired_tile_stride, uint32_t hw_tile_stride,
+                        int type)
 {
        struct psb_mmu_pt *pt;
        uint32_t rows = 1;
@@ -754,7 +761,7 @@ int psb_mmu_insert_pages(struct psb_mmu_pd *pd, struct page **pages,
        unsigned long add;
        unsigned long row_add;
        unsigned long f_address = address;
-       int ret = 0;
+       int ret = -ENOMEM;
 
        if (hw_tile_stride) {
                if (num_pages % desired_tile_stride != 0)
@@ -777,14 +784,11 @@ int psb_mmu_insert_pages(struct psb_mmu_pd *pd, struct page **pages,
                do {
                        next = psb_pd_addr_end(addr, end);
                        pt = psb_mmu_pt_alloc_map_lock(pd, addr);
-                       if (!pt) {
-                               ret = -ENOMEM;
+                       if (!pt)
                                goto out;
-                       }
                        do {
-                               pte =
-                                   psb_mmu_mask_pte(page_to_pfn(*pages++),
-                                                    type);
+                               pte = psb_mmu_mask_pte(page_to_pfn(*pages++),
+                                                      type);
                                psb_mmu_set_pte(pt, addr, pte);
                                pt->count++;
                        } while (addr += PAGE_SIZE, addr < next);
@@ -794,6 +798,8 @@ int psb_mmu_insert_pages(struct psb_mmu_pd *pd, struct page **pages,
 
                address += row_add;
        }
+
+       ret = 0;
 out:
        if (pd->hw_context != -1)
                psb_mmu_flush_ptes(pd, f_address, num_pages,
@@ -802,7 +808,7 @@ out:
        up_read(&pd->driver->sem);
 
        if (pd->hw_context != -1)
-               psb_mmu_flush(pd->driver, 1);
+               psb_mmu_flush(pd->driver);
 
        return ret;
 }
index 1199180667c98af81093ff073948f2bf52aacbbc..55eef4d6cef8f3861b655407f8f7b992cccaba3b 100644 (file)
@@ -347,9 +347,7 @@ static int psb_driver_load(struct drm_device *dev, unsigned long chipset)
        if (ret)
                goto out_err;
 
-       dev_priv->mmu = psb_mmu_driver_init((void *)0,
-                                       drm_psb_trap_pagefaults, 0,
-                                       dev_priv);
+       dev_priv->mmu = psb_mmu_driver_init(dev, drm_psb_trap_pagefaults, 0, 0);
        if (!dev_priv->mmu)
                goto out_err;
 
index 5ad6a03e477eae63aa8cc635981887febca71ac1..ac8cc1989b94e33c88c2a10af6b64ad83a246c6c 100644 (file)
@@ -727,10 +727,10 @@ static inline struct drm_psb_private *psb_priv(struct drm_device *dev)
  * MMU stuff.
  */
 
-extern struct psb_mmu_driver *psb_mmu_driver_init(uint8_t __iomem * registers,
-                                       int trap_pagefaults,
-                                       int invalid_type,
-                                       struct drm_psb_private *dev_priv);
+extern struct psb_mmu_driver *psb_mmu_driver_init(struct drm_device *dev,
+                                                 int trap_pagefaults,
+                                                 int invalid_type,
+                                                 atomic_t *msvdx_mmu_invaldc);
 extern void psb_mmu_driver_takedown(struct psb_mmu_driver *driver);
 extern struct psb_mmu_pd *psb_mmu_get_default_pd(struct psb_mmu_driver
                                                 *driver);
@@ -740,7 +740,7 @@ extern struct psb_mmu_pd *psb_mmu_alloc_pd(struct psb_mmu_driver *driver,
                                           int trap_pagefaults,
                                           int invalid_type);
 extern void psb_mmu_free_pagedir(struct psb_mmu_pd *pd);
-extern void psb_mmu_flush(struct psb_mmu_driver *driver, int rc_prot);
+extern void psb_mmu_flush(struct psb_mmu_driver *driver);
 extern void psb_mmu_remove_pfn_sequence(struct psb_mmu_pd *pd,
                                        unsigned long address,
                                        uint32_t num_pages);