ARM: kill off arch_is_coherent
authorRob Herring <rob.herring@calxeda.com>
Tue, 21 Aug 2012 10:26:24 +0000 (12:26 +0200)
committerMarek Szyprowski <m.szyprowski@samsung.com>
Tue, 2 Oct 2012 06:58:07 +0000 (08:58 +0200)
With ixp2xxx removed, there are no platforms that define arch_is_coherent,
so the last occurrences of arch_is_coherent can be removed. Any new
platform with coherent i/o should use coherent dma mapping functions.

Signed-off-by: Rob Herring <rob.herring@calxeda.com>
Cc: Russell King <linux@arm.linux.org.uk>
Cc: Marek Szyprowski <m.szyprowski@samsung.com>
Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com>
arch/arm/include/asm/barrier.h
arch/arm/include/asm/memory.h
arch/arm/mm/mmu.c

index 05112380dc5398dd47cbd9fb6adf564f96a4c94c..8dcd9c702d90c9c352d85595d0ea8f83a42f215e 100644 (file)
 #define rmb()          dsb()
 #define wmb()          mb()
 #else
-#include <asm/memory.h>
-#define mb()   do { if (arch_is_coherent()) dmb(); else barrier(); } while (0)
-#define rmb()  do { if (arch_is_coherent()) dmb(); else barrier(); } while (0)
-#define wmb()  do { if (arch_is_coherent()) dmb(); else barrier(); } while (0)
+#define mb()           barrier()
+#define rmb()          barrier()
+#define wmb()          barrier()
 #endif
 
 #ifndef CONFIG_SMP
index 5f6ddcc56452998f40b1c16d7e20a0ff1ec010bd..73cf03aa981e1b665d9a40f4f80847335ca49441 100644 (file)
@@ -275,14 +275,6 @@ static inline __deprecated void *bus_to_virt(unsigned long x)
 #define virt_to_page(kaddr)    pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
 #define virt_addr_valid(kaddr) ((unsigned long)(kaddr) >= PAGE_OFFSET && (unsigned long)(kaddr) < (unsigned long)high_memory)
 
-/*
- * Optional coherency support.  Currently used only by selected
- * Intel XSC3-based systems.
- */
-#ifndef arch_is_coherent
-#define arch_is_coherent()             0
-#endif
-
 #endif
 
 #include <asm-generic/memory_model.h>
index c2fa21d0103e0348f2b1f48c886aa11d63809dad..8fd039929ae83bedbffb5563fd61e262ea5bf971 100644 (file)
@@ -216,7 +216,7 @@ static struct mem_type mem_types[] = {
                .prot_l1        = PMD_TYPE_TABLE,
                .prot_sect      = PROT_SECT_DEVICE | PMD_SECT_WB,
                .domain         = DOMAIN_IO,
-       },      
+       },
        [MT_DEVICE_WC] = {      /* ioremap_wc */
                .prot_pte       = PROT_PTE_DEVICE | L_PTE_MT_DEV_WC,
                .prot_l1        = PMD_TYPE_TABLE,
@@ -421,17 +421,6 @@ static void __init build_mem_type_table(void)
        cp = &cache_policies[cachepolicy];
        vecs_pgprot = kern_pgprot = user_pgprot = cp->pte;
 
-       /*
-        * Enable CPU-specific coherency if supported.
-        * (Only available on XSC3 at the moment.)
-        */
-       if (arch_is_coherent() && cpu_is_xsc3()) {
-               mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S;
-               mem_types[MT_MEMORY].prot_pte |= L_PTE_SHARED;
-               mem_types[MT_MEMORY_DMA_READY].prot_pte |= L_PTE_SHARED;
-               mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S;
-               mem_types[MT_MEMORY_NONCACHED].prot_pte |= L_PTE_SHARED;
-       }
        /*
         * ARMv6 and above have extended page tables.
         */
@@ -777,8 +766,8 @@ void __init iotable_init(struct map_desc *io_desc, int nr)
                create_mapping(md);
                vm->addr = (void *)(md->virtual & PAGE_MASK);
                vm->size = PAGE_ALIGN(md->length + (md->virtual & ~PAGE_MASK));
-               vm->phys_addr = __pfn_to_phys(md->pfn); 
-               vm->flags = VM_IOREMAP | VM_ARM_STATIC_MAPPING; 
+               vm->phys_addr = __pfn_to_phys(md->pfn);
+               vm->flags = VM_IOREMAP | VM_ARM_STATIC_MAPPING;
                vm->flags |= VM_ARM_MTYPE(md->type);
                vm->caller = iotable_init;
                vm_area_add_early(vm++);