Merge tag 'omap-for-v3.10/fixes-for-merge-window' of git://git.kernel.org/pub/scm...
[firefly-linux-kernel-4.4.55.git] / arch / arm / mm / mmu.c
1 /*
2  *  linux/arch/arm/mm/mmu.c
3  *
4  *  Copyright (C) 1995-2005 Russell King
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  */
10 #include <linux/module.h>
11 #include <linux/kernel.h>
12 #include <linux/errno.h>
13 #include <linux/init.h>
14 #include <linux/mman.h>
15 #include <linux/nodemask.h>
16 #include <linux/memblock.h>
17 #include <linux/fs.h>
18 #include <linux/vmalloc.h>
19 #include <linux/sizes.h>
20
21 #include <asm/cp15.h>
22 #include <asm/cputype.h>
23 #include <asm/sections.h>
24 #include <asm/cachetype.h>
25 #include <asm/setup.h>
26 #include <asm/smp_plat.h>
27 #include <asm/tlb.h>
28 #include <asm/highmem.h>
29 #include <asm/system_info.h>
30 #include <asm/traps.h>
31
32 #include <asm/mach/arch.h>
33 #include <asm/mach/map.h>
34 #include <asm/mach/pci.h>
35
36 #include "mm.h"
37
38 /*
39  * empty_zero_page is a special page that is used for
40  * zero-initialized data and COW.
41  */
42 struct page *empty_zero_page;
43 EXPORT_SYMBOL(empty_zero_page);
44
45 /*
46  * The pmd table for the upper-most set of pages.
47  */
48 pmd_t *top_pmd;
49
50 #define CPOLICY_UNCACHED        0
51 #define CPOLICY_BUFFERED        1
52 #define CPOLICY_WRITETHROUGH    2
53 #define CPOLICY_WRITEBACK       3
54 #define CPOLICY_WRITEALLOC      4
55
56 static unsigned int cachepolicy __initdata = CPOLICY_WRITEBACK;
57 static unsigned int ecc_mask __initdata = 0;
58 pgprot_t pgprot_user;
59 pgprot_t pgprot_kernel;
60 pgprot_t pgprot_hyp_device;
61 pgprot_t pgprot_s2;
62 pgprot_t pgprot_s2_device;
63
64 EXPORT_SYMBOL(pgprot_user);
65 EXPORT_SYMBOL(pgprot_kernel);
66
67 struct cachepolicy {
68         const char      policy[16];
69         unsigned int    cr_mask;
70         pmdval_t        pmd;
71         pteval_t        pte;
72         pteval_t        pte_s2;
73 };
74
75 #ifdef CONFIG_ARM_LPAE
76 #define s2_policy(policy)       policy
77 #else
78 #define s2_policy(policy)       0
79 #endif
80
81 static struct cachepolicy cache_policies[] __initdata = {
82         {
83                 .policy         = "uncached",
84                 .cr_mask        = CR_W|CR_C,
85                 .pmd            = PMD_SECT_UNCACHED,
86                 .pte            = L_PTE_MT_UNCACHED,
87                 .pte_s2         = s2_policy(L_PTE_S2_MT_UNCACHED),
88         }, {
89                 .policy         = "buffered",
90                 .cr_mask        = CR_C,
91                 .pmd            = PMD_SECT_BUFFERED,
92                 .pte            = L_PTE_MT_BUFFERABLE,
93                 .pte_s2         = s2_policy(L_PTE_S2_MT_UNCACHED),
94         }, {
95                 .policy         = "writethrough",
96                 .cr_mask        = 0,
97                 .pmd            = PMD_SECT_WT,
98                 .pte            = L_PTE_MT_WRITETHROUGH,
99                 .pte_s2         = s2_policy(L_PTE_S2_MT_WRITETHROUGH),
100         }, {
101                 .policy         = "writeback",
102                 .cr_mask        = 0,
103                 .pmd            = PMD_SECT_WB,
104                 .pte            = L_PTE_MT_WRITEBACK,
105                 .pte_s2         = s2_policy(L_PTE_S2_MT_WRITEBACK),
106         }, {
107                 .policy         = "writealloc",
108                 .cr_mask        = 0,
109                 .pmd            = PMD_SECT_WBWA,
110                 .pte            = L_PTE_MT_WRITEALLOC,
111                 .pte_s2         = s2_policy(L_PTE_S2_MT_WRITEBACK),
112         }
113 };
114
115 /*
116  * These are useful for identifying cache coherency
117  * problems by allowing the cache or the cache and
118  * writebuffer to be turned off.  (Note: the write
119  * buffer should not be on and the cache off).
120  */
121 static int __init early_cachepolicy(char *p)
122 {
123         int i;
124
125         for (i = 0; i < ARRAY_SIZE(cache_policies); i++) {
126                 int len = strlen(cache_policies[i].policy);
127
128                 if (memcmp(p, cache_policies[i].policy, len) == 0) {
129                         cachepolicy = i;
130                         cr_alignment &= ~cache_policies[i].cr_mask;
131                         cr_no_alignment &= ~cache_policies[i].cr_mask;
132                         break;
133                 }
134         }
135         if (i == ARRAY_SIZE(cache_policies))
136                 printk(KERN_ERR "ERROR: unknown or unsupported cache policy\n");
137         /*
138          * This restriction is partly to do with the way we boot; it is
139          * unpredictable to have memory mapped using two different sets of
140          * memory attributes (shared, type, and cache attribs).  We can not
141          * change these attributes once the initial assembly has setup the
142          * page tables.
143          */
144         if (cpu_architecture() >= CPU_ARCH_ARMv6) {
145                 printk(KERN_WARNING "Only cachepolicy=writeback supported on ARMv6 and later\n");
146                 cachepolicy = CPOLICY_WRITEBACK;
147         }
148         flush_cache_all();
149         set_cr(cr_alignment);
150         return 0;
151 }
152 early_param("cachepolicy", early_cachepolicy);
153
154 static int __init early_nocache(char *__unused)
155 {
156         char *p = "buffered";
157         printk(KERN_WARNING "nocache is deprecated; use cachepolicy=%s\n", p);
158         early_cachepolicy(p);
159         return 0;
160 }
161 early_param("nocache", early_nocache);
162
163 static int __init early_nowrite(char *__unused)
164 {
165         char *p = "uncached";
166         printk(KERN_WARNING "nowb is deprecated; use cachepolicy=%s\n", p);
167         early_cachepolicy(p);
168         return 0;
169 }
170 early_param("nowb", early_nowrite);
171
172 #ifndef CONFIG_ARM_LPAE
173 static int __init early_ecc(char *p)
174 {
175         if (memcmp(p, "on", 2) == 0)
176                 ecc_mask = PMD_PROTECTION;
177         else if (memcmp(p, "off", 3) == 0)
178                 ecc_mask = 0;
179         return 0;
180 }
181 early_param("ecc", early_ecc);
182 #endif
183
184 static int __init noalign_setup(char *__unused)
185 {
186         cr_alignment &= ~CR_A;
187         cr_no_alignment &= ~CR_A;
188         set_cr(cr_alignment);
189         return 1;
190 }
191 __setup("noalign", noalign_setup);
192
193 #ifndef CONFIG_SMP
194 void adjust_cr(unsigned long mask, unsigned long set)
195 {
196         unsigned long flags;
197
198         mask &= ~CR_A;
199
200         set &= mask;
201
202         local_irq_save(flags);
203
204         cr_no_alignment = (cr_no_alignment & ~mask) | set;
205         cr_alignment = (cr_alignment & ~mask) | set;
206
207         set_cr((get_cr() & ~mask) | set);
208
209         local_irq_restore(flags);
210 }
211 #endif
212
213 #define PROT_PTE_DEVICE         L_PTE_PRESENT|L_PTE_YOUNG|L_PTE_DIRTY|L_PTE_XN
214 #define PROT_SECT_DEVICE        PMD_TYPE_SECT|PMD_SECT_AP_WRITE
215
216 static struct mem_type mem_types[] = {
217         [MT_DEVICE] = {           /* Strongly ordered / ARMv6 shared device */
218                 .prot_pte       = PROT_PTE_DEVICE | L_PTE_MT_DEV_SHARED |
219                                   L_PTE_SHARED,
220                 .prot_l1        = PMD_TYPE_TABLE,
221                 .prot_sect      = PROT_SECT_DEVICE | PMD_SECT_S,
222                 .domain         = DOMAIN_IO,
223         },
224         [MT_DEVICE_NONSHARED] = { /* ARMv6 non-shared device */
225                 .prot_pte       = PROT_PTE_DEVICE | L_PTE_MT_DEV_NONSHARED,
226                 .prot_l1        = PMD_TYPE_TABLE,
227                 .prot_sect      = PROT_SECT_DEVICE,
228                 .domain         = DOMAIN_IO,
229         },
230         [MT_DEVICE_CACHED] = {    /* ioremap_cached */
231                 .prot_pte       = PROT_PTE_DEVICE | L_PTE_MT_DEV_CACHED,
232                 .prot_l1        = PMD_TYPE_TABLE,
233                 .prot_sect      = PROT_SECT_DEVICE | PMD_SECT_WB,
234                 .domain         = DOMAIN_IO,
235         },
236         [MT_DEVICE_WC] = {      /* ioremap_wc */
237                 .prot_pte       = PROT_PTE_DEVICE | L_PTE_MT_DEV_WC,
238                 .prot_l1        = PMD_TYPE_TABLE,
239                 .prot_sect      = PROT_SECT_DEVICE,
240                 .domain         = DOMAIN_IO,
241         },
242         [MT_UNCACHED] = {
243                 .prot_pte       = PROT_PTE_DEVICE,
244                 .prot_l1        = PMD_TYPE_TABLE,
245                 .prot_sect      = PMD_TYPE_SECT | PMD_SECT_XN,
246                 .domain         = DOMAIN_IO,
247         },
248         [MT_CACHECLEAN] = {
249                 .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
250                 .domain    = DOMAIN_KERNEL,
251         },
252 #ifndef CONFIG_ARM_LPAE
253         [MT_MINICLEAN] = {
254                 .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN | PMD_SECT_MINICACHE,
255                 .domain    = DOMAIN_KERNEL,
256         },
257 #endif
258         [MT_LOW_VECTORS] = {
259                 .prot_pte  = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
260                                 L_PTE_RDONLY,
261                 .prot_l1   = PMD_TYPE_TABLE,
262                 .domain    = DOMAIN_USER,
263         },
264         [MT_HIGH_VECTORS] = {
265                 .prot_pte  = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
266                                 L_PTE_USER | L_PTE_RDONLY,
267                 .prot_l1   = PMD_TYPE_TABLE,
268                 .domain    = DOMAIN_USER,
269         },
270         [MT_MEMORY] = {
271                 .prot_pte  = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
272                 .prot_l1   = PMD_TYPE_TABLE,
273                 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
274                 .domain    = DOMAIN_KERNEL,
275         },
276         [MT_ROM] = {
277                 .prot_sect = PMD_TYPE_SECT,
278                 .domain    = DOMAIN_KERNEL,
279         },
280         [MT_MEMORY_NONCACHED] = {
281                 .prot_pte  = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
282                                 L_PTE_MT_BUFFERABLE,
283                 .prot_l1   = PMD_TYPE_TABLE,
284                 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
285                 .domain    = DOMAIN_KERNEL,
286         },
287         [MT_MEMORY_DTCM] = {
288                 .prot_pte  = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
289                                 L_PTE_XN,
290                 .prot_l1   = PMD_TYPE_TABLE,
291                 .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
292                 .domain    = DOMAIN_KERNEL,
293         },
294         [MT_MEMORY_ITCM] = {
295                 .prot_pte  = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
296                 .prot_l1   = PMD_TYPE_TABLE,
297                 .domain    = DOMAIN_KERNEL,
298         },
299         [MT_MEMORY_SO] = {
300                 .prot_pte  = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
301                                 L_PTE_MT_UNCACHED | L_PTE_XN,
302                 .prot_l1   = PMD_TYPE_TABLE,
303                 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_S |
304                                 PMD_SECT_UNCACHED | PMD_SECT_XN,
305                 .domain    = DOMAIN_KERNEL,
306         },
307         [MT_MEMORY_DMA_READY] = {
308                 .prot_pte  = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
309                 .prot_l1   = PMD_TYPE_TABLE,
310                 .domain    = DOMAIN_KERNEL,
311         },
312 };
313
314 const struct mem_type *get_mem_type(unsigned int type)
315 {
316         return type < ARRAY_SIZE(mem_types) ? &mem_types[type] : NULL;
317 }
318 EXPORT_SYMBOL(get_mem_type);
319
320 /*
321  * Adjust the PMD section entries according to the CPU in use.
322  */
323 static void __init build_mem_type_table(void)
324 {
325         struct cachepolicy *cp;
326         unsigned int cr = get_cr();
327         pteval_t user_pgprot, kern_pgprot, vecs_pgprot;
328         pteval_t hyp_device_pgprot, s2_pgprot, s2_device_pgprot;
329         int cpu_arch = cpu_architecture();
330         int i;
331
332         if (cpu_arch < CPU_ARCH_ARMv6) {
333 #if defined(CONFIG_CPU_DCACHE_DISABLE)
334                 if (cachepolicy > CPOLICY_BUFFERED)
335                         cachepolicy = CPOLICY_BUFFERED;
336 #elif defined(CONFIG_CPU_DCACHE_WRITETHROUGH)
337                 if (cachepolicy > CPOLICY_WRITETHROUGH)
338                         cachepolicy = CPOLICY_WRITETHROUGH;
339 #endif
340         }
341         if (cpu_arch < CPU_ARCH_ARMv5) {
342                 if (cachepolicy >= CPOLICY_WRITEALLOC)
343                         cachepolicy = CPOLICY_WRITEBACK;
344                 ecc_mask = 0;
345         }
346         if (is_smp())
347                 cachepolicy = CPOLICY_WRITEALLOC;
348
349         /*
350          * Strip out features not present on earlier architectures.
351          * Pre-ARMv5 CPUs don't have TEX bits.  Pre-ARMv6 CPUs or those
352          * without extended page tables don't have the 'Shared' bit.
353          */
354         if (cpu_arch < CPU_ARCH_ARMv5)
355                 for (i = 0; i < ARRAY_SIZE(mem_types); i++)
356                         mem_types[i].prot_sect &= ~PMD_SECT_TEX(7);
357         if ((cpu_arch < CPU_ARCH_ARMv6 || !(cr & CR_XP)) && !cpu_is_xsc3())
358                 for (i = 0; i < ARRAY_SIZE(mem_types); i++)
359                         mem_types[i].prot_sect &= ~PMD_SECT_S;
360
361         /*
362          * ARMv5 and lower, bit 4 must be set for page tables (was: cache
363          * "update-able on write" bit on ARM610).  However, Xscale and
364          * Xscale3 require this bit to be cleared.
365          */
366         if (cpu_is_xscale() || cpu_is_xsc3()) {
367                 for (i = 0; i < ARRAY_SIZE(mem_types); i++) {
368                         mem_types[i].prot_sect &= ~PMD_BIT4;
369                         mem_types[i].prot_l1 &= ~PMD_BIT4;
370                 }
371         } else if (cpu_arch < CPU_ARCH_ARMv6) {
372                 for (i = 0; i < ARRAY_SIZE(mem_types); i++) {
373                         if (mem_types[i].prot_l1)
374                                 mem_types[i].prot_l1 |= PMD_BIT4;
375                         if (mem_types[i].prot_sect)
376                                 mem_types[i].prot_sect |= PMD_BIT4;
377                 }
378         }
379
380         /*
381          * Mark the device areas according to the CPU/architecture.
382          */
383         if (cpu_is_xsc3() || (cpu_arch >= CPU_ARCH_ARMv6 && (cr & CR_XP))) {
384                 if (!cpu_is_xsc3()) {
385                         /*
386                          * Mark device regions on ARMv6+ as execute-never
387                          * to prevent speculative instruction fetches.
388                          */
389                         mem_types[MT_DEVICE].prot_sect |= PMD_SECT_XN;
390                         mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_XN;
391                         mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_XN;
392                         mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_XN;
393                 }
394                 if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) {
395                         /*
396                          * For ARMv7 with TEX remapping,
397                          * - shared device is SXCB=1100
398                          * - nonshared device is SXCB=0100
399                          * - write combine device mem is SXCB=0001
400                          * (Uncached Normal memory)
401                          */
402                         mem_types[MT_DEVICE].prot_sect |= PMD_SECT_TEX(1);
403                         mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_TEX(1);
404                         mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_BUFFERABLE;
405                 } else if (cpu_is_xsc3()) {
406                         /*
407                          * For Xscale3,
408                          * - shared device is TEXCB=00101
409                          * - nonshared device is TEXCB=01000
410                          * - write combine device mem is TEXCB=00100
411                          * (Inner/Outer Uncacheable in xsc3 parlance)
412                          */
413                         mem_types[MT_DEVICE].prot_sect |= PMD_SECT_TEX(1) | PMD_SECT_BUFFERED;
414                         mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_TEX(2);
415                         mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_TEX(1);
416                 } else {
417                         /*
418                          * For ARMv6 and ARMv7 without TEX remapping,
419                          * - shared device is TEXCB=00001
420                          * - nonshared device is TEXCB=01000
421                          * - write combine device mem is TEXCB=00100
422                          * (Uncached Normal in ARMv6 parlance).
423                          */
424                         mem_types[MT_DEVICE].prot_sect |= PMD_SECT_BUFFERED;
425                         mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_TEX(2);
426                         mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_TEX(1);
427                 }
428         } else {
429                 /*
430                  * On others, write combining is "Uncached/Buffered"
431                  */
432                 mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_BUFFERABLE;
433         }
434
435         /*
436          * Now deal with the memory-type mappings
437          */
438         cp = &cache_policies[cachepolicy];
439         vecs_pgprot = kern_pgprot = user_pgprot = cp->pte;
440         s2_pgprot = cp->pte_s2;
441         hyp_device_pgprot = s2_device_pgprot = mem_types[MT_DEVICE].prot_pte;
442
443         /*
444          * ARMv6 and above have extended page tables.
445          */
446         if (cpu_arch >= CPU_ARCH_ARMv6 && (cr & CR_XP)) {
447 #ifndef CONFIG_ARM_LPAE
448                 /*
449                  * Mark cache clean areas and XIP ROM read only
450                  * from SVC mode and no access from userspace.
451                  */
452                 mem_types[MT_ROM].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
453                 mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
454                 mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
455 #endif
456
457                 if (is_smp()) {
458                         /*
459                          * Mark memory with the "shared" attribute
460                          * for SMP systems
461                          */
462                         user_pgprot |= L_PTE_SHARED;
463                         kern_pgprot |= L_PTE_SHARED;
464                         vecs_pgprot |= L_PTE_SHARED;
465                         s2_pgprot |= L_PTE_SHARED;
466                         mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_S;
467                         mem_types[MT_DEVICE_WC].prot_pte |= L_PTE_SHARED;
468                         mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_S;
469                         mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_SHARED;
470                         mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S;
471                         mem_types[MT_MEMORY].prot_pte |= L_PTE_SHARED;
472                         mem_types[MT_MEMORY_DMA_READY].prot_pte |= L_PTE_SHARED;
473                         mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S;
474                         mem_types[MT_MEMORY_NONCACHED].prot_pte |= L_PTE_SHARED;
475                 }
476         }
477
478         /*
479          * Non-cacheable Normal - intended for memory areas that must
480          * not cause dirty cache line writebacks when used
481          */
482         if (cpu_arch >= CPU_ARCH_ARMv6) {
483                 if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) {
484                         /* Non-cacheable Normal is XCB = 001 */
485                         mem_types[MT_MEMORY_NONCACHED].prot_sect |=
486                                 PMD_SECT_BUFFERED;
487                 } else {
488                         /* For both ARMv6 and non-TEX-remapping ARMv7 */
489                         mem_types[MT_MEMORY_NONCACHED].prot_sect |=
490                                 PMD_SECT_TEX(1);
491                 }
492         } else {
493                 mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_BUFFERABLE;
494         }
495
496 #ifdef CONFIG_ARM_LPAE
497         /*
498          * Do not generate access flag faults for the kernel mappings.
499          */
500         for (i = 0; i < ARRAY_SIZE(mem_types); i++) {
501                 mem_types[i].prot_pte |= PTE_EXT_AF;
502                 if (mem_types[i].prot_sect)
503                         mem_types[i].prot_sect |= PMD_SECT_AF;
504         }
505         kern_pgprot |= PTE_EXT_AF;
506         vecs_pgprot |= PTE_EXT_AF;
507 #endif
508
509         for (i = 0; i < 16; i++) {
510                 pteval_t v = pgprot_val(protection_map[i]);
511                 protection_map[i] = __pgprot(v | user_pgprot);
512         }
513
514         mem_types[MT_LOW_VECTORS].prot_pte |= vecs_pgprot;
515         mem_types[MT_HIGH_VECTORS].prot_pte |= vecs_pgprot;
516
517         pgprot_user   = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | user_pgprot);
518         pgprot_kernel = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG |
519                                  L_PTE_DIRTY | kern_pgprot);
520         pgprot_s2  = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | s2_pgprot);
521         pgprot_s2_device  = __pgprot(s2_device_pgprot);
522         pgprot_hyp_device  = __pgprot(hyp_device_pgprot);
523
524         mem_types[MT_LOW_VECTORS].prot_l1 |= ecc_mask;
525         mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask;
526         mem_types[MT_MEMORY].prot_sect |= ecc_mask | cp->pmd;
527         mem_types[MT_MEMORY].prot_pte |= kern_pgprot;
528         mem_types[MT_MEMORY_DMA_READY].prot_pte |= kern_pgprot;
529         mem_types[MT_MEMORY_NONCACHED].prot_sect |= ecc_mask;
530         mem_types[MT_ROM].prot_sect |= cp->pmd;
531
532         switch (cp->pmd) {
533         case PMD_SECT_WT:
534                 mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_WT;
535                 break;
536         case PMD_SECT_WB:
537         case PMD_SECT_WBWA:
538                 mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_WB;
539                 break;
540         }
541         printk("Memory policy: ECC %sabled, Data cache %s\n",
542                 ecc_mask ? "en" : "dis", cp->policy);
543
544         for (i = 0; i < ARRAY_SIZE(mem_types); i++) {
545                 struct mem_type *t = &mem_types[i];
546                 if (t->prot_l1)
547                         t->prot_l1 |= PMD_DOMAIN(t->domain);
548                 if (t->prot_sect)
549                         t->prot_sect |= PMD_DOMAIN(t->domain);
550         }
551 }
552
553 #ifdef CONFIG_ARM_DMA_MEM_BUFFERABLE
554 pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
555                               unsigned long size, pgprot_t vma_prot)
556 {
557         if (!pfn_valid(pfn))
558                 return pgprot_noncached(vma_prot);
559         else if (file->f_flags & O_SYNC)
560                 return pgprot_writecombine(vma_prot);
561         return vma_prot;
562 }
563 EXPORT_SYMBOL(phys_mem_access_prot);
564 #endif
565
566 #define vectors_base()  (vectors_high() ? 0xffff0000 : 0)
567
568 static void __init *early_alloc_aligned(unsigned long sz, unsigned long align)
569 {
570         void *ptr = __va(memblock_alloc(sz, align));
571         memset(ptr, 0, sz);
572         return ptr;
573 }
574
575 static void __init *early_alloc(unsigned long sz)
576 {
577         return early_alloc_aligned(sz, sz);
578 }
579
580 static pte_t * __init early_pte_alloc(pmd_t *pmd, unsigned long addr, unsigned long prot)
581 {
582         if (pmd_none(*pmd)) {
583                 pte_t *pte = early_alloc(PTE_HWTABLE_OFF + PTE_HWTABLE_SIZE);
584                 __pmd_populate(pmd, __pa(pte), prot);
585         }
586         BUG_ON(pmd_bad(*pmd));
587         return pte_offset_kernel(pmd, addr);
588 }
589
590 static void __init alloc_init_pte(pmd_t *pmd, unsigned long addr,
591                                   unsigned long end, unsigned long pfn,
592                                   const struct mem_type *type)
593 {
594         pte_t *pte = early_pte_alloc(pmd, addr, type->prot_l1);
595         do {
596                 set_pte_ext(pte, pfn_pte(pfn, __pgprot(type->prot_pte)), 0);
597                 pfn++;
598         } while (pte++, addr += PAGE_SIZE, addr != end);
599 }
600
601 static void __init map_init_section(pmd_t *pmd, unsigned long addr,
602                         unsigned long end, phys_addr_t phys,
603                         const struct mem_type *type)
604 {
605 #ifndef CONFIG_ARM_LPAE
606         /*
607          * In classic MMU format, puds and pmds are folded in to
608          * the pgds. pmd_offset gives the PGD entry. PGDs refer to a
609          * group of L1 entries making up one logical pointer to
610          * an L2 table (2MB), where as PMDs refer to the individual
611          * L1 entries (1MB). Hence increment to get the correct
612          * offset for odd 1MB sections.
613          * (See arch/arm/include/asm/pgtable-2level.h)
614          */
615         if (addr & SECTION_SIZE)
616                 pmd++;
617 #endif
618         do {
619                 *pmd = __pmd(phys | type->prot_sect);
620                 phys += SECTION_SIZE;
621         } while (pmd++, addr += SECTION_SIZE, addr != end);
622
623         flush_pmd_entry(pmd);
624 }
625
626 static void __init alloc_init_pmd(pud_t *pud, unsigned long addr,
627                                       unsigned long end, phys_addr_t phys,
628                                       const struct mem_type *type)
629 {
630         pmd_t *pmd = pmd_offset(pud, addr);
631         unsigned long next;
632
633         do {
634                 /*
635                  * With LPAE, we must loop over to map
636                  * all the pmds for the given range.
637                  */
638                 next = pmd_addr_end(addr, end);
639
640                 /*
641                  * Try a section mapping - addr, next and phys must all be
642                  * aligned to a section boundary.
643                  */
644                 if (type->prot_sect &&
645                                 ((addr | next | phys) & ~SECTION_MASK) == 0) {
646                         map_init_section(pmd, addr, next, phys, type);
647                 } else {
648                         alloc_init_pte(pmd, addr, next,
649                                                 __phys_to_pfn(phys), type);
650                 }
651
652                 phys += next - addr;
653
654         } while (pmd++, addr = next, addr != end);
655 }
656
657 static void __init alloc_init_pud(pgd_t *pgd, unsigned long addr,
658         unsigned long end, unsigned long phys, const struct mem_type *type)
659 {
660         pud_t *pud = pud_offset(pgd, addr);
661         unsigned long next;
662
663         do {
664                 next = pud_addr_end(addr, end);
665                 alloc_init_pmd(pud, addr, next, phys, type);
666                 phys += next - addr;
667         } while (pud++, addr = next, addr != end);
668 }
669
670 #ifndef CONFIG_ARM_LPAE
671 static void __init create_36bit_mapping(struct map_desc *md,
672                                         const struct mem_type *type)
673 {
674         unsigned long addr, length, end;
675         phys_addr_t phys;
676         pgd_t *pgd;
677
678         addr = md->virtual;
679         phys = __pfn_to_phys(md->pfn);
680         length = PAGE_ALIGN(md->length);
681
682         if (!(cpu_architecture() >= CPU_ARCH_ARMv6 || cpu_is_xsc3())) {
683                 printk(KERN_ERR "MM: CPU does not support supersection "
684                        "mapping for 0x%08llx at 0x%08lx\n",
685                        (long long)__pfn_to_phys((u64)md->pfn), addr);
686                 return;
687         }
688
689         /* N.B. ARMv6 supersections are only defined to work with domain 0.
690          *      Since domain assignments can in fact be arbitrary, the
691          *      'domain == 0' check below is required to insure that ARMv6
692          *      supersections are only allocated for domain 0 regardless
693          *      of the actual domain assignments in use.
694          */
695         if (type->domain) {
696                 printk(KERN_ERR "MM: invalid domain in supersection "
697                        "mapping for 0x%08llx at 0x%08lx\n",
698                        (long long)__pfn_to_phys((u64)md->pfn), addr);
699                 return;
700         }
701
702         if ((addr | length | __pfn_to_phys(md->pfn)) & ~SUPERSECTION_MASK) {
703                 printk(KERN_ERR "MM: cannot create mapping for 0x%08llx"
704                        " at 0x%08lx invalid alignment\n",
705                        (long long)__pfn_to_phys((u64)md->pfn), addr);
706                 return;
707         }
708
709         /*
710          * Shift bits [35:32] of address into bits [23:20] of PMD
711          * (See ARMv6 spec).
712          */
713         phys |= (((md->pfn >> (32 - PAGE_SHIFT)) & 0xF) << 20);
714
715         pgd = pgd_offset_k(addr);
716         end = addr + length;
717         do {
718                 pud_t *pud = pud_offset(pgd, addr);
719                 pmd_t *pmd = pmd_offset(pud, addr);
720                 int i;
721
722                 for (i = 0; i < 16; i++)
723                         *pmd++ = __pmd(phys | type->prot_sect | PMD_SECT_SUPER);
724
725                 addr += SUPERSECTION_SIZE;
726                 phys += SUPERSECTION_SIZE;
727                 pgd += SUPERSECTION_SIZE >> PGDIR_SHIFT;
728         } while (addr != end);
729 }
730 #endif  /* !CONFIG_ARM_LPAE */
731
732 /*
733  * Create the page directory entries and any necessary
734  * page tables for the mapping specified by `md'.  We
735  * are able to cope here with varying sizes and address
736  * offsets, and we take full advantage of sections and
737  * supersections.
738  */
739 static void __init create_mapping(struct map_desc *md)
740 {
741         unsigned long addr, length, end;
742         phys_addr_t phys;
743         const struct mem_type *type;
744         pgd_t *pgd;
745
746         if (md->virtual != vectors_base() && md->virtual < TASK_SIZE) {
747                 printk(KERN_WARNING "BUG: not creating mapping for 0x%08llx"
748                        " at 0x%08lx in user region\n",
749                        (long long)__pfn_to_phys((u64)md->pfn), md->virtual);
750                 return;
751         }
752
753         if ((md->type == MT_DEVICE || md->type == MT_ROM) &&
754             md->virtual >= PAGE_OFFSET &&
755             (md->virtual < VMALLOC_START || md->virtual >= VMALLOC_END)) {
756                 printk(KERN_WARNING "BUG: mapping for 0x%08llx"
757                        " at 0x%08lx out of vmalloc space\n",
758                        (long long)__pfn_to_phys((u64)md->pfn), md->virtual);
759         }
760
761         type = &mem_types[md->type];
762
763 #ifndef CONFIG_ARM_LPAE
764         /*
765          * Catch 36-bit addresses
766          */
767         if (md->pfn >= 0x100000) {
768                 create_36bit_mapping(md, type);
769                 return;
770         }
771 #endif
772
773         addr = md->virtual & PAGE_MASK;
774         phys = __pfn_to_phys(md->pfn);
775         length = PAGE_ALIGN(md->length + (md->virtual & ~PAGE_MASK));
776
777         if (type->prot_l1 == 0 && ((addr | phys | length) & ~SECTION_MASK)) {
778                 printk(KERN_WARNING "BUG: map for 0x%08llx at 0x%08lx can not "
779                        "be mapped using pages, ignoring.\n",
780                        (long long)__pfn_to_phys(md->pfn), addr);
781                 return;
782         }
783
784         pgd = pgd_offset_k(addr);
785         end = addr + length;
786         do {
787                 unsigned long next = pgd_addr_end(addr, end);
788
789                 alloc_init_pud(pgd, addr, next, phys, type);
790
791                 phys += next - addr;
792                 addr = next;
793         } while (pgd++, addr != end);
794 }
795
796 /*
797  * Create the architecture specific mappings
798  */
799 void __init iotable_init(struct map_desc *io_desc, int nr)
800 {
801         struct map_desc *md;
802         struct vm_struct *vm;
803         struct static_vm *svm;
804
805         if (!nr)
806                 return;
807
808         svm = early_alloc_aligned(sizeof(*svm) * nr, __alignof__(*svm));
809
810         for (md = io_desc; nr; md++, nr--) {
811                 create_mapping(md);
812
813                 vm = &svm->vm;
814                 vm->addr = (void *)(md->virtual & PAGE_MASK);
815                 vm->size = PAGE_ALIGN(md->length + (md->virtual & ~PAGE_MASK));
816                 vm->phys_addr = __pfn_to_phys(md->pfn);
817                 vm->flags = VM_IOREMAP | VM_ARM_STATIC_MAPPING;
818                 vm->flags |= VM_ARM_MTYPE(md->type);
819                 vm->caller = iotable_init;
820                 add_static_vm_early(svm++);
821         }
822 }
823
824 void __init vm_reserve_area_early(unsigned long addr, unsigned long size,
825                                   void *caller)
826 {
827         struct vm_struct *vm;
828         struct static_vm *svm;
829
830         svm = early_alloc_aligned(sizeof(*svm), __alignof__(*svm));
831
832         vm = &svm->vm;
833         vm->addr = (void *)addr;
834         vm->size = size;
835         vm->flags = VM_IOREMAP | VM_ARM_EMPTY_MAPPING;
836         vm->caller = caller;
837         add_static_vm_early(svm);
838 }
839
840 #ifndef CONFIG_ARM_LPAE
841
842 /*
843  * The Linux PMD is made of two consecutive section entries covering 2MB
844  * (see definition in include/asm/pgtable-2level.h).  However a call to
845  * create_mapping() may optimize static mappings by using individual
846  * 1MB section mappings.  This leaves the actual PMD potentially half
847  * initialized if the top or bottom section entry isn't used, leaving it
848  * open to problems if a subsequent ioremap() or vmalloc() tries to use
849  * the virtual space left free by that unused section entry.
850  *
851  * Let's avoid the issue by inserting dummy vm entries covering the unused
852  * PMD halves once the static mappings are in place.
853  */
854
855 static void __init pmd_empty_section_gap(unsigned long addr)
856 {
857         vm_reserve_area_early(addr, SECTION_SIZE, pmd_empty_section_gap);
858 }
859
860 static void __init fill_pmd_gaps(void)
861 {
862         struct static_vm *svm;
863         struct vm_struct *vm;
864         unsigned long addr, next = 0;
865         pmd_t *pmd;
866
867         list_for_each_entry(svm, &static_vmlist, list) {
868                 vm = &svm->vm;
869                 addr = (unsigned long)vm->addr;
870                 if (addr < next)
871                         continue;
872
873                 /*
874                  * Check if this vm starts on an odd section boundary.
875                  * If so and the first section entry for this PMD is free
876                  * then we block the corresponding virtual address.
877                  */
878                 if ((addr & ~PMD_MASK) == SECTION_SIZE) {
879                         pmd = pmd_off_k(addr);
880                         if (pmd_none(*pmd))
881                                 pmd_empty_section_gap(addr & PMD_MASK);
882                 }
883
884                 /*
885                  * Then check if this vm ends on an odd section boundary.
886                  * If so and the second section entry for this PMD is empty
887                  * then we block the corresponding virtual address.
888                  */
889                 addr += vm->size;
890                 if ((addr & ~PMD_MASK) == SECTION_SIZE) {
891                         pmd = pmd_off_k(addr) + 1;
892                         if (pmd_none(*pmd))
893                                 pmd_empty_section_gap(addr);
894                 }
895
896                 /* no need to look at any vm entry until we hit the next PMD */
897                 next = (addr + PMD_SIZE - 1) & PMD_MASK;
898         }
899 }
900
901 #else
902 #define fill_pmd_gaps() do { } while (0)
903 #endif
904
905 #if defined(CONFIG_PCI) && !defined(CONFIG_NEED_MACH_IO_H)
906 static void __init pci_reserve_io(void)
907 {
908         struct static_vm *svm;
909
910         svm = find_static_vm_vaddr((void *)PCI_IO_VIRT_BASE);
911         if (svm)
912                 return;
913
914         vm_reserve_area_early(PCI_IO_VIRT_BASE, SZ_2M, pci_reserve_io);
915 }
916 #else
917 #define pci_reserve_io() do { } while (0)
918 #endif
919
920 #ifdef CONFIG_DEBUG_LL
921 void __init debug_ll_io_init(void)
922 {
923         struct map_desc map;
924
925         debug_ll_addr(&map.pfn, &map.virtual);
926         if (!map.pfn || !map.virtual)
927                 return;
928         map.pfn = __phys_to_pfn(map.pfn);
929         map.virtual &= PAGE_MASK;
930         map.length = PAGE_SIZE;
931         map.type = MT_DEVICE;
932         create_mapping(&map);
933 }
934 #endif
935
936 static void * __initdata vmalloc_min =
937         (void *)(VMALLOC_END - (240 << 20) - VMALLOC_OFFSET);
938
939 /*
940  * vmalloc=size forces the vmalloc area to be exactly 'size'
941  * bytes. This can be used to increase (or decrease) the vmalloc
942  * area - the default is 240m.
943  */
944 static int __init early_vmalloc(char *arg)
945 {
946         unsigned long vmalloc_reserve = memparse(arg, NULL);
947
948         if (vmalloc_reserve < SZ_16M) {
949                 vmalloc_reserve = SZ_16M;
950                 printk(KERN_WARNING
951                         "vmalloc area too small, limiting to %luMB\n",
952                         vmalloc_reserve >> 20);
953         }
954
955         if (vmalloc_reserve > VMALLOC_END - (PAGE_OFFSET + SZ_32M)) {
956                 vmalloc_reserve = VMALLOC_END - (PAGE_OFFSET + SZ_32M);
957                 printk(KERN_WARNING
958                         "vmalloc area is too big, limiting to %luMB\n",
959                         vmalloc_reserve >> 20);
960         }
961
962         vmalloc_min = (void *)(VMALLOC_END - vmalloc_reserve);
963         return 0;
964 }
965 early_param("vmalloc", early_vmalloc);
966
967 phys_addr_t arm_lowmem_limit __initdata = 0;
968
969 void __init sanity_check_meminfo(void)
970 {
971         int i, j, highmem = 0;
972
973         for (i = 0, j = 0; i < meminfo.nr_banks; i++) {
974                 struct membank *bank = &meminfo.bank[j];
975                 *bank = meminfo.bank[i];
976
977                 if (bank->start > ULONG_MAX)
978                         highmem = 1;
979
980 #ifdef CONFIG_HIGHMEM
981                 if (__va(bank->start) >= vmalloc_min ||
982                     __va(bank->start) < (void *)PAGE_OFFSET)
983                         highmem = 1;
984
985                 bank->highmem = highmem;
986
987                 /*
988                  * Split those memory banks which are partially overlapping
989                  * the vmalloc area greatly simplifying things later.
990                  */
991                 if (!highmem && __va(bank->start) < vmalloc_min &&
992                     bank->size > vmalloc_min - __va(bank->start)) {
993                         if (meminfo.nr_banks >= NR_BANKS) {
994                                 printk(KERN_CRIT "NR_BANKS too low, "
995                                                  "ignoring high memory\n");
996                         } else {
997                                 memmove(bank + 1, bank,
998                                         (meminfo.nr_banks - i) * sizeof(*bank));
999                                 meminfo.nr_banks++;
1000                                 i++;
1001                                 bank[1].size -= vmalloc_min - __va(bank->start);
1002                                 bank[1].start = __pa(vmalloc_min - 1) + 1;
1003                                 bank[1].highmem = highmem = 1;
1004                                 j++;
1005                         }
1006                         bank->size = vmalloc_min - __va(bank->start);
1007                 }
1008 #else
1009                 bank->highmem = highmem;
1010
1011                 /*
1012                  * Highmem banks not allowed with !CONFIG_HIGHMEM.
1013                  */
1014                 if (highmem) {
1015                         printk(KERN_NOTICE "Ignoring RAM at %.8llx-%.8llx "
1016                                "(!CONFIG_HIGHMEM).\n",
1017                                (unsigned long long)bank->start,
1018                                (unsigned long long)bank->start + bank->size - 1);
1019                         continue;
1020                 }
1021
1022                 /*
1023                  * Check whether this memory bank would entirely overlap
1024                  * the vmalloc area.
1025                  */
1026                 if (__va(bank->start) >= vmalloc_min ||
1027                     __va(bank->start) < (void *)PAGE_OFFSET) {
1028                         printk(KERN_NOTICE "Ignoring RAM at %.8llx-%.8llx "
1029                                "(vmalloc region overlap).\n",
1030                                (unsigned long long)bank->start,
1031                                (unsigned long long)bank->start + bank->size - 1);
1032                         continue;
1033                 }
1034
1035                 /*
1036                  * Check whether this memory bank would partially overlap
1037                  * the vmalloc area.
1038                  */
1039                 if (__va(bank->start + bank->size - 1) >= vmalloc_min ||
1040                     __va(bank->start + bank->size - 1) <= __va(bank->start)) {
1041                         unsigned long newsize = vmalloc_min - __va(bank->start);
1042                         printk(KERN_NOTICE "Truncating RAM at %.8llx-%.8llx "
1043                                "to -%.8llx (vmalloc region overlap).\n",
1044                                (unsigned long long)bank->start,
1045                                (unsigned long long)bank->start + bank->size - 1,
1046                                (unsigned long long)bank->start + newsize - 1);
1047                         bank->size = newsize;
1048                 }
1049 #endif
1050                 if (!bank->highmem && bank->start + bank->size > arm_lowmem_limit)
1051                         arm_lowmem_limit = bank->start + bank->size;
1052
1053                 j++;
1054         }
1055 #ifdef CONFIG_HIGHMEM
1056         if (highmem) {
1057                 const char *reason = NULL;
1058
1059                 if (cache_is_vipt_aliasing()) {
1060                         /*
1061                          * Interactions between kmap and other mappings
1062                          * make highmem support with aliasing VIPT caches
1063                          * rather difficult.
1064                          */
1065                         reason = "with VIPT aliasing cache";
1066                 }
1067                 if (reason) {
1068                         printk(KERN_CRIT "HIGHMEM is not supported %s, ignoring high memory\n",
1069                                 reason);
1070                         while (j > 0 && meminfo.bank[j - 1].highmem)
1071                                 j--;
1072                 }
1073         }
1074 #endif
1075         meminfo.nr_banks = j;
1076         high_memory = __va(arm_lowmem_limit - 1) + 1;
1077         memblock_set_current_limit(arm_lowmem_limit);
1078 }
1079
1080 static inline void prepare_page_table(void)
1081 {
1082         unsigned long addr;
1083         phys_addr_t end;
1084
1085         /*
1086          * Clear out all the mappings below the kernel image.
1087          */
1088         for (addr = 0; addr < MODULES_VADDR; addr += PMD_SIZE)
1089                 pmd_clear(pmd_off_k(addr));
1090
1091 #ifdef CONFIG_XIP_KERNEL
1092         /* The XIP kernel is mapped in the module area -- skip over it */
1093         addr = ((unsigned long)_etext + PMD_SIZE - 1) & PMD_MASK;
1094 #endif
1095         for ( ; addr < PAGE_OFFSET; addr += PMD_SIZE)
1096                 pmd_clear(pmd_off_k(addr));
1097
1098         /*
1099          * Find the end of the first block of lowmem.
1100          */
1101         end = memblock.memory.regions[0].base + memblock.memory.regions[0].size;
1102         if (end >= arm_lowmem_limit)
1103                 end = arm_lowmem_limit;
1104
1105         /*
1106          * Clear out all the kernel space mappings, except for the first
1107          * memory bank, up to the vmalloc region.
1108          */
1109         for (addr = __phys_to_virt(end);
1110              addr < VMALLOC_START; addr += PMD_SIZE)
1111                 pmd_clear(pmd_off_k(addr));
1112 }
1113
1114 #ifdef CONFIG_ARM_LPAE
1115 /* the first page is reserved for pgd */
1116 #define SWAPPER_PG_DIR_SIZE     (PAGE_SIZE + \
1117                                  PTRS_PER_PGD * PTRS_PER_PMD * sizeof(pmd_t))
1118 #else
1119 #define SWAPPER_PG_DIR_SIZE     (PTRS_PER_PGD * sizeof(pgd_t))
1120 #endif
1121
1122 /*
1123  * Reserve the special regions of memory
1124  */
1125 void __init arm_mm_memblock_reserve(void)
1126 {
1127         /*
1128          * Reserve the page tables.  These are already in use,
1129          * and can only be in node 0.
1130          */
1131         memblock_reserve(__pa(swapper_pg_dir), SWAPPER_PG_DIR_SIZE);
1132
1133 #ifdef CONFIG_SA1111
1134         /*
1135          * Because of the SA1111 DMA bug, we want to preserve our
1136          * precious DMA-able memory...
1137          */
1138         memblock_reserve(PHYS_OFFSET, __pa(swapper_pg_dir) - PHYS_OFFSET);
1139 #endif
1140 }
1141
1142 /*
1143  * Set up the device mappings.  Since we clear out the page tables for all
1144  * mappings above VMALLOC_START, we will remove any debug device mappings.
1145  * This means you have to be careful how you debug this function, or any
1146  * called function.  This means you can't use any function or debugging
1147  * method which may touch any device, otherwise the kernel _will_ crash.
1148  */
1149 static void __init devicemaps_init(struct machine_desc *mdesc)
1150 {
1151         struct map_desc map;
1152         unsigned long addr;
1153         void *vectors;
1154
1155         /*
1156          * Allocate the vector page early.
1157          */
1158         vectors = early_alloc(PAGE_SIZE);
1159
1160         early_trap_init(vectors);
1161
1162         for (addr = VMALLOC_START; addr; addr += PMD_SIZE)
1163                 pmd_clear(pmd_off_k(addr));
1164
1165         /*
1166          * Map the kernel if it is XIP.
1167          * It is always first in the modulearea.
1168          */
1169 #ifdef CONFIG_XIP_KERNEL
1170         map.pfn = __phys_to_pfn(CONFIG_XIP_PHYS_ADDR & SECTION_MASK);
1171         map.virtual = MODULES_VADDR;
1172         map.length = ((unsigned long)_etext - map.virtual + ~SECTION_MASK) & SECTION_MASK;
1173         map.type = MT_ROM;
1174         create_mapping(&map);
1175 #endif
1176
1177         /*
1178          * Map the cache flushing regions.
1179          */
1180 #ifdef FLUSH_BASE
1181         map.pfn = __phys_to_pfn(FLUSH_BASE_PHYS);
1182         map.virtual = FLUSH_BASE;
1183         map.length = SZ_1M;
1184         map.type = MT_CACHECLEAN;
1185         create_mapping(&map);
1186 #endif
1187 #ifdef FLUSH_BASE_MINICACHE
1188         map.pfn = __phys_to_pfn(FLUSH_BASE_PHYS + SZ_1M);
1189         map.virtual = FLUSH_BASE_MINICACHE;
1190         map.length = SZ_1M;
1191         map.type = MT_MINICLEAN;
1192         create_mapping(&map);
1193 #endif
1194
1195         /*
1196          * Create a mapping for the machine vectors at the high-vectors
1197          * location (0xffff0000).  If we aren't using high-vectors, also
1198          * create a mapping at the low-vectors virtual address.
1199          */
1200         map.pfn = __phys_to_pfn(virt_to_phys(vectors));
1201         map.virtual = 0xffff0000;
1202         map.length = PAGE_SIZE;
1203         map.type = MT_HIGH_VECTORS;
1204         create_mapping(&map);
1205
1206         if (!vectors_high()) {
1207                 map.virtual = 0;
1208                 map.type = MT_LOW_VECTORS;
1209                 create_mapping(&map);
1210         }
1211
1212         /*
1213          * Ask the machine support to map in the statically mapped devices.
1214          */
1215         if (mdesc->map_io)
1216                 mdesc->map_io();
1217         fill_pmd_gaps();
1218
1219         /* Reserve fixed i/o space in VMALLOC region */
1220         pci_reserve_io();
1221
1222         /*
1223          * Finally flush the caches and tlb to ensure that we're in a
1224          * consistent state wrt the writebuffer.  This also ensures that
1225          * any write-allocated cache lines in the vector page are written
1226          * back.  After this point, we can start to touch devices again.
1227          */
1228         local_flush_tlb_all();
1229         flush_cache_all();
1230 }
1231
1232 static void __init kmap_init(void)
1233 {
1234 #ifdef CONFIG_HIGHMEM
1235         pkmap_page_table = early_pte_alloc(pmd_off_k(PKMAP_BASE),
1236                 PKMAP_BASE, _PAGE_KERNEL_TABLE);
1237 #endif
1238 }
1239
1240 static void __init map_lowmem(void)
1241 {
1242         struct memblock_region *reg;
1243
1244         /* Map all the lowmem memory banks. */
1245         for_each_memblock(memory, reg) {
1246                 phys_addr_t start = reg->base;
1247                 phys_addr_t end = start + reg->size;
1248                 struct map_desc map;
1249
1250                 if (end > arm_lowmem_limit)
1251                         end = arm_lowmem_limit;
1252                 if (start >= end)
1253                         break;
1254
1255                 map.pfn = __phys_to_pfn(start);
1256                 map.virtual = __phys_to_virt(start);
1257                 map.length = end - start;
1258                 map.type = MT_MEMORY;
1259
1260                 create_mapping(&map);
1261         }
1262 }
1263
1264 /*
1265  * paging_init() sets up the page tables, initialises the zone memory
1266  * maps, and sets up the zero page, bad page and bad page tables.
1267  */
1268 void __init paging_init(struct machine_desc *mdesc)
1269 {
1270         void *zero_page;
1271
1272         memblock_set_current_limit(arm_lowmem_limit);
1273
1274         build_mem_type_table();
1275         prepare_page_table();
1276         map_lowmem();
1277         dma_contiguous_remap();
1278         devicemaps_init(mdesc);
1279         kmap_init();
1280
1281         top_pmd = pmd_off_k(0xffff0000);
1282
1283         /* allocate the zero page. */
1284         zero_page = early_alloc(PAGE_SIZE);
1285
1286         bootmem_init();
1287
1288         empty_zero_page = virt_to_page(zero_page);
1289         __flush_dcache_page(NULL, empty_zero_page);
1290 }