arm64: mm: place empty_zero_page in bss
[firefly-linux-kernel-4.4.55.git] / arch / arm64 / mm / mmu.c
1 /*
2  * Based on arch/arm/mm/mmu.c
3  *
4  * Copyright (C) 1995-2005 Russell King
5  * Copyright (C) 2012 ARM Ltd.
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
18  */
19
20 #include <linux/export.h>
21 #include <linux/kernel.h>
22 #include <linux/errno.h>
23 #include <linux/init.h>
24 #include <linux/libfdt.h>
25 #include <linux/mman.h>
26 #include <linux/nodemask.h>
27 #include <linux/memblock.h>
28 #include <linux/fs.h>
29 #include <linux/io.h>
30 #include <linux/slab.h>
31 #include <linux/stop_machine.h>
32
33 #include <asm/barrier.h>
34 #include <asm/cputype.h>
35 #include <asm/fixmap.h>
36 #include <asm/kernel-pgtable.h>
37 #include <asm/sections.h>
38 #include <asm/setup.h>
39 #include <asm/sizes.h>
40 #include <asm/tlb.h>
41 #include <asm/memblock.h>
42 #include <asm/mmu_context.h>
43
44 #include "mm.h"
45
46 u64 idmap_t0sz = TCR_T0SZ(VA_BITS);
47
48 /*
49  * Empty_zero_page is a special page that is used for zero-initialized data
50  * and COW.
51  */
52 unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)] __page_aligned_bss;
53 EXPORT_SYMBOL(empty_zero_page);
54
55 pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
56                               unsigned long size, pgprot_t vma_prot)
57 {
58         if (!pfn_valid(pfn))
59                 return pgprot_noncached(vma_prot);
60         else if (file->f_flags & O_SYNC)
61                 return pgprot_writecombine(vma_prot);
62         return vma_prot;
63 }
64 EXPORT_SYMBOL(phys_mem_access_prot);
65
66 static void __init *early_pgtable_alloc(void)
67 {
68         phys_addr_t phys;
69         void *ptr;
70
71         phys = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
72         BUG_ON(!phys);
73         ptr = __va(phys);
74         memset(ptr, 0, PAGE_SIZE);
75
76         /* Ensure the zeroed page is visible to the page table walker */
77         dsb(ishst);
78         return ptr;
79 }
80
81 /*
82  * remap a PMD into pages
83  */
84 static void split_pmd(pmd_t *pmd, pte_t *pte)
85 {
86         unsigned long pfn = pmd_pfn(*pmd);
87         int i = 0;
88
89         do {
90                 /*
91                  * Need to have the least restrictive permissions available
92                  * permissions will be fixed up later
93                  */
94                 set_pte(pte, pfn_pte(pfn, PAGE_KERNEL_EXEC));
95                 pfn++;
96         } while (pte++, i++, i < PTRS_PER_PTE);
97 }
98
99 static void alloc_init_pte(pmd_t *pmd, unsigned long addr,
100                                   unsigned long end, unsigned long pfn,
101                                   pgprot_t prot,
102                                   void *(*pgtable_alloc)(void))
103 {
104         pte_t *pte;
105
106         if (pmd_none(*pmd) || pmd_sect(*pmd)) {
107                 pte = pgtable_alloc();
108                 if (pmd_sect(*pmd))
109                         split_pmd(pmd, pte);
110                 __pmd_populate(pmd, __pa(pte), PMD_TYPE_TABLE);
111                 flush_tlb_all();
112         }
113         BUG_ON(pmd_bad(*pmd));
114
115         pte = pte_offset_kernel(pmd, addr);
116         do {
117                 set_pte(pte, pfn_pte(pfn, prot));
118                 pfn++;
119         } while (pte++, addr += PAGE_SIZE, addr != end);
120 }
121
122 static void split_pud(pud_t *old_pud, pmd_t *pmd)
123 {
124         unsigned long addr = pud_pfn(*old_pud) << PAGE_SHIFT;
125         pgprot_t prot = __pgprot(pud_val(*old_pud) ^ addr);
126         int i = 0;
127
128         do {
129                 set_pmd(pmd, __pmd(addr | pgprot_val(prot)));
130                 addr += PMD_SIZE;
131         } while (pmd++, i++, i < PTRS_PER_PMD);
132 }
133
134 static void alloc_init_pmd(struct mm_struct *mm, pud_t *pud,
135                                   unsigned long addr, unsigned long end,
136                                   phys_addr_t phys, pgprot_t prot,
137                                   void *(*pgtable_alloc)(void))
138 {
139         pmd_t *pmd;
140         unsigned long next;
141
142         /*
143          * Check for initial section mappings in the pgd/pud and remove them.
144          */
145         if (pud_none(*pud) || pud_sect(*pud)) {
146                 pmd = pgtable_alloc();
147                 if (pud_sect(*pud)) {
148                         /*
149                          * need to have the 1G of mappings continue to be
150                          * present
151                          */
152                         split_pud(pud, pmd);
153                 }
154                 pud_populate(mm, pud, pmd);
155                 flush_tlb_all();
156         }
157         BUG_ON(pud_bad(*pud));
158
159         pmd = pmd_offset(pud, addr);
160         do {
161                 next = pmd_addr_end(addr, end);
162                 /* try section mapping first */
163                 if (((addr | next | phys) & ~SECTION_MASK) == 0) {
164                         pmd_t old_pmd =*pmd;
165                         set_pmd(pmd, __pmd(phys |
166                                            pgprot_val(mk_sect_prot(prot))));
167                         /*
168                          * Check for previous table entries created during
169                          * boot (__create_page_tables) and flush them.
170                          */
171                         if (!pmd_none(old_pmd)) {
172                                 flush_tlb_all();
173                                 if (pmd_table(old_pmd)) {
174                                         phys_addr_t table = __pa(pte_offset_map(&old_pmd, 0));
175                                         if (!WARN_ON_ONCE(slab_is_available()))
176                                                 memblock_free(table, PAGE_SIZE);
177                                 }
178                         }
179                 } else {
180                         alloc_init_pte(pmd, addr, next, __phys_to_pfn(phys),
181                                        prot, pgtable_alloc);
182                 }
183                 phys += next - addr;
184         } while (pmd++, addr = next, addr != end);
185 }
186
187 static inline bool use_1G_block(unsigned long addr, unsigned long next,
188                         unsigned long phys)
189 {
190         if (PAGE_SHIFT != 12)
191                 return false;
192
193         if (((addr | next | phys) & ~PUD_MASK) != 0)
194                 return false;
195
196         return true;
197 }
198
199 static void alloc_init_pud(struct mm_struct *mm, pgd_t *pgd,
200                                   unsigned long addr, unsigned long end,
201                                   phys_addr_t phys, pgprot_t prot,
202                                   void *(*pgtable_alloc)(void))
203 {
204         pud_t *pud;
205         unsigned long next;
206
207         if (pgd_none(*pgd)) {
208                 pud = pgtable_alloc();
209                 pgd_populate(mm, pgd, pud);
210         }
211         BUG_ON(pgd_bad(*pgd));
212
213         pud = pud_offset(pgd, addr);
214         do {
215                 next = pud_addr_end(addr, end);
216
217                 /*
218                  * For 4K granule only, attempt to put down a 1GB block
219                  */
220                 if (use_1G_block(addr, next, phys)) {
221                         pud_t old_pud = *pud;
222                         set_pud(pud, __pud(phys |
223                                            pgprot_val(mk_sect_prot(prot))));
224
225                         /*
226                          * If we have an old value for a pud, it will
227                          * be pointing to a pmd table that we no longer
228                          * need (from swapper_pg_dir).
229                          *
230                          * Look up the old pmd table and free it.
231                          */
232                         if (!pud_none(old_pud)) {
233                                 flush_tlb_all();
234                                 if (pud_table(old_pud)) {
235                                         phys_addr_t table = __pa(pmd_offset(&old_pud, 0));
236                                         if (!WARN_ON_ONCE(slab_is_available()))
237                                                 memblock_free(table, PAGE_SIZE);
238                                 }
239                         }
240                 } else {
241                         alloc_init_pmd(mm, pud, addr, next, phys, prot,
242                                        pgtable_alloc);
243                 }
244                 phys += next - addr;
245         } while (pud++, addr = next, addr != end);
246 }
247
248 /*
249  * Create the page directory entries and any necessary page tables for the
250  * mapping specified by 'md'.
251  */
252 static void  __create_mapping(struct mm_struct *mm, pgd_t *pgd,
253                                     phys_addr_t phys, unsigned long virt,
254                                     phys_addr_t size, pgprot_t prot,
255                                     void *(*pgtable_alloc)(void))
256 {
257         unsigned long addr, length, end, next;
258
259         addr = virt & PAGE_MASK;
260         length = PAGE_ALIGN(size + (virt & ~PAGE_MASK));
261
262         end = addr + length;
263         do {
264                 next = pgd_addr_end(addr, end);
265                 alloc_init_pud(mm, pgd, addr, next, phys, prot, pgtable_alloc);
266                 phys += next - addr;
267         } while (pgd++, addr = next, addr != end);
268 }
269
270 static void *late_pgtable_alloc(void)
271 {
272         void *ptr = (void *)__get_free_page(PGALLOC_GFP);
273         BUG_ON(!ptr);
274
275         /* Ensure the zeroed page is visible to the page table walker */
276         dsb(ishst);
277         return ptr;
278 }
279
280 static void __init create_mapping(phys_addr_t phys, unsigned long virt,
281                                   phys_addr_t size, pgprot_t prot)
282 {
283         if (virt < VMALLOC_START) {
284                 pr_warn("BUG: not creating mapping for %pa at 0x%016lx - outside kernel range\n",
285                         &phys, virt);
286                 return;
287         }
288         __create_mapping(&init_mm, pgd_offset_k(virt), phys, virt,
289                          size, prot, early_pgtable_alloc);
290 }
291
292 void __init create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys,
293                                unsigned long virt, phys_addr_t size,
294                                pgprot_t prot)
295 {
296         __create_mapping(mm, pgd_offset(mm, virt), phys, virt, size, prot,
297                                 late_pgtable_alloc);
298 }
299
300 static void create_mapping_late(phys_addr_t phys, unsigned long virt,
301                                   phys_addr_t size, pgprot_t prot)
302 {
303         if (virt < VMALLOC_START) {
304                 pr_warn("BUG: not creating mapping for %pa at 0x%016lx - outside kernel range\n",
305                         &phys, virt);
306                 return;
307         }
308
309         return __create_mapping(&init_mm, pgd_offset_k(virt),
310                                 phys, virt, size, prot, late_pgtable_alloc);
311 }
312
313 #ifdef CONFIG_DEBUG_RODATA
314 static void __init __map_memblock(phys_addr_t start, phys_addr_t end)
315 {
316         /*
317          * Set up the executable regions using the existing section mappings
318          * for now. This will get more fine grained later once all memory
319          * is mapped
320          */
321         unsigned long kernel_x_start = round_down(__pa(_stext), SWAPPER_BLOCK_SIZE);
322         unsigned long kernel_x_end = round_up(__pa(__init_end), SWAPPER_BLOCK_SIZE);
323
324         if (end < kernel_x_start) {
325                 create_mapping(start, __phys_to_virt(start),
326                         end - start, PAGE_KERNEL);
327         } else if (start >= kernel_x_end) {
328                 create_mapping(start, __phys_to_virt(start),
329                         end - start, PAGE_KERNEL);
330         } else {
331                 if (start < kernel_x_start)
332                         create_mapping(start, __phys_to_virt(start),
333                                 kernel_x_start - start,
334                                 PAGE_KERNEL);
335                 create_mapping(kernel_x_start,
336                                 __phys_to_virt(kernel_x_start),
337                                 kernel_x_end - kernel_x_start,
338                                 PAGE_KERNEL_EXEC);
339                 if (kernel_x_end < end)
340                         create_mapping(kernel_x_end,
341                                 __phys_to_virt(kernel_x_end),
342                                 end - kernel_x_end,
343                                 PAGE_KERNEL);
344         }
345
346 }
347 #else
348 static void __init __map_memblock(phys_addr_t start, phys_addr_t end)
349 {
350         create_mapping(start, __phys_to_virt(start), end - start,
351                         PAGE_KERNEL_EXEC);
352 }
353 #endif
354
355 static void __init map_mem(void)
356 {
357         struct memblock_region *reg;
358         phys_addr_t limit;
359
360         /*
361          * Temporarily limit the memblock range. We need to do this as
362          * create_mapping requires puds, pmds and ptes to be allocated from
363          * memory addressable from the initial direct kernel mapping.
364          *
365          * The initial direct kernel mapping, located at swapper_pg_dir, gives
366          * us PUD_SIZE (with SECTION maps) or PMD_SIZE (without SECTION maps,
367          * memory starting from PHYS_OFFSET (which must be aligned to 2MB as
368          * per Documentation/arm64/booting.txt).
369          */
370         limit = PHYS_OFFSET + SWAPPER_INIT_MAP_SIZE;
371         memblock_set_current_limit(limit);
372
373         /* map all the memory banks */
374         for_each_memblock(memory, reg) {
375                 phys_addr_t start = reg->base;
376                 phys_addr_t end = start + reg->size;
377
378                 if (start >= end)
379                         break;
380
381                 if (ARM64_SWAPPER_USES_SECTION_MAPS) {
382                         /*
383                          * For the first memory bank align the start address and
384                          * current memblock limit to prevent create_mapping() from
385                          * allocating pte page tables from unmapped memory. With
386                          * the section maps, if the first block doesn't end on section
387                          * size boundary, create_mapping() will try to allocate a pte
388                          * page, which may be returned from an unmapped area.
389                          * When section maps are not used, the pte page table for the
390                          * current limit is already present in swapper_pg_dir.
391                          */
392                         if (start < limit)
393                                 start = ALIGN(start, SECTION_SIZE);
394                         if (end < limit) {
395                                 limit = end & SECTION_MASK;
396                                 memblock_set_current_limit(limit);
397                         }
398                 }
399                 __map_memblock(start, end);
400         }
401
402         /* Limit no longer required. */
403         memblock_set_current_limit(MEMBLOCK_ALLOC_ANYWHERE);
404 }
405
406 static void __init fixup_executable(void)
407 {
408 #ifdef CONFIG_DEBUG_RODATA
409         /* now that we are actually fully mapped, make the start/end more fine grained */
410         if (!IS_ALIGNED((unsigned long)_stext, SWAPPER_BLOCK_SIZE)) {
411                 unsigned long aligned_start = round_down(__pa(_stext),
412                                                          SWAPPER_BLOCK_SIZE);
413
414                 create_mapping(aligned_start, __phys_to_virt(aligned_start),
415                                 __pa(_stext) - aligned_start,
416                                 PAGE_KERNEL);
417         }
418
419         if (!IS_ALIGNED((unsigned long)__init_end, SWAPPER_BLOCK_SIZE)) {
420                 unsigned long aligned_end = round_up(__pa(__init_end),
421                                                           SWAPPER_BLOCK_SIZE);
422                 create_mapping(__pa(__init_end), (unsigned long)__init_end,
423                                 aligned_end - __pa(__init_end),
424                                 PAGE_KERNEL);
425         }
426 #endif
427 }
428
429 #ifdef CONFIG_DEBUG_RODATA
430 void mark_rodata_ro(void)
431 {
432         create_mapping_late(__pa(_stext), (unsigned long)_stext,
433                                 (unsigned long)_etext - (unsigned long)_stext,
434                                 PAGE_KERNEL_ROX);
435
436 }
437 #endif
438
439 void fixup_init(void)
440 {
441         create_mapping_late(__pa(__init_begin), (unsigned long)__init_begin,
442                         (unsigned long)__init_end - (unsigned long)__init_begin,
443                         PAGE_KERNEL);
444 }
445
446 /*
447  * paging_init() sets up the page tables, initialises the zone memory
448  * maps and sets up the zero page.
449  */
450 void __init paging_init(void)
451 {
452         map_mem();
453         fixup_executable();
454
455         bootmem_init();
456
457         /*
458          * TTBR0 is only used for the identity mapping at this stage. Make it
459          * point to zero page to avoid speculatively fetching new entries.
460          */
461         cpu_set_reserved_ttbr0();
462         local_flush_tlb_all();
463         cpu_set_default_tcr_t0sz();
464 }
465
466 /*
467  * Check whether a kernel address is valid (derived from arch/x86/).
468  */
469 int kern_addr_valid(unsigned long addr)
470 {
471         pgd_t *pgd;
472         pud_t *pud;
473         pmd_t *pmd;
474         pte_t *pte;
475
476         if ((((long)addr) >> VA_BITS) != -1UL)
477                 return 0;
478
479         pgd = pgd_offset_k(addr);
480         if (pgd_none(*pgd))
481                 return 0;
482
483         pud = pud_offset(pgd, addr);
484         if (pud_none(*pud))
485                 return 0;
486
487         if (pud_sect(*pud))
488                 return pfn_valid(pud_pfn(*pud));
489
490         pmd = pmd_offset(pud, addr);
491         if (pmd_none(*pmd))
492                 return 0;
493
494         if (pmd_sect(*pmd))
495                 return pfn_valid(pmd_pfn(*pmd));
496
497         pte = pte_offset_kernel(pmd, addr);
498         if (pte_none(*pte))
499                 return 0;
500
501         return pfn_valid(pte_pfn(*pte));
502 }
503 #ifdef CONFIG_SPARSEMEM_VMEMMAP
504 #if !ARM64_SWAPPER_USES_SECTION_MAPS
505 int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
506 {
507         return vmemmap_populate_basepages(start, end, node);
508 }
509 #else   /* !ARM64_SWAPPER_USES_SECTION_MAPS */
510 int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
511 {
512         unsigned long addr = start;
513         unsigned long next;
514         pgd_t *pgd;
515         pud_t *pud;
516         pmd_t *pmd;
517
518         do {
519                 next = pmd_addr_end(addr, end);
520
521                 pgd = vmemmap_pgd_populate(addr, node);
522                 if (!pgd)
523                         return -ENOMEM;
524
525                 pud = vmemmap_pud_populate(pgd, addr, node);
526                 if (!pud)
527                         return -ENOMEM;
528
529                 pmd = pmd_offset(pud, addr);
530                 if (pmd_none(*pmd)) {
531                         void *p = NULL;
532
533                         p = vmemmap_alloc_block_buf(PMD_SIZE, node);
534                         if (!p)
535                                 return -ENOMEM;
536
537                         set_pmd(pmd, __pmd(__pa(p) | PROT_SECT_NORMAL));
538                 } else
539                         vmemmap_verify((pte_t *)pmd, node, addr, next);
540         } while (addr = next, addr != end);
541
542         return 0;
543 }
544 #endif  /* CONFIG_ARM64_64K_PAGES */
545 void vmemmap_free(unsigned long start, unsigned long end)
546 {
547 }
548 #endif  /* CONFIG_SPARSEMEM_VMEMMAP */
549
550 static pte_t bm_pte[PTRS_PER_PTE] __page_aligned_bss;
551 #if CONFIG_PGTABLE_LEVELS > 2
552 static pmd_t bm_pmd[PTRS_PER_PMD] __page_aligned_bss;
553 #endif
554 #if CONFIG_PGTABLE_LEVELS > 3
555 static pud_t bm_pud[PTRS_PER_PUD] __page_aligned_bss;
556 #endif
557
558 static inline pud_t * fixmap_pud(unsigned long addr)
559 {
560         pgd_t *pgd = pgd_offset_k(addr);
561
562         BUG_ON(pgd_none(*pgd) || pgd_bad(*pgd));
563
564         return pud_offset(pgd, addr);
565 }
566
567 static inline pmd_t * fixmap_pmd(unsigned long addr)
568 {
569         pud_t *pud = fixmap_pud(addr);
570
571         BUG_ON(pud_none(*pud) || pud_bad(*pud));
572
573         return pmd_offset(pud, addr);
574 }
575
576 static inline pte_t * fixmap_pte(unsigned long addr)
577 {
578         pmd_t *pmd = fixmap_pmd(addr);
579
580         BUG_ON(pmd_none(*pmd) || pmd_bad(*pmd));
581
582         return pte_offset_kernel(pmd, addr);
583 }
584
585 void __init early_fixmap_init(void)
586 {
587         pgd_t *pgd;
588         pud_t *pud;
589         pmd_t *pmd;
590         unsigned long addr = FIXADDR_START;
591
592         pgd = pgd_offset_k(addr);
593         pgd_populate(&init_mm, pgd, bm_pud);
594         pud = pud_offset(pgd, addr);
595         pud_populate(&init_mm, pud, bm_pmd);
596         pmd = pmd_offset(pud, addr);
597         pmd_populate_kernel(&init_mm, pmd, bm_pte);
598
599         /*
600          * The boot-ioremap range spans multiple pmds, for which
601          * we are not preparted:
602          */
603         BUILD_BUG_ON((__fix_to_virt(FIX_BTMAP_BEGIN) >> PMD_SHIFT)
604                      != (__fix_to_virt(FIX_BTMAP_END) >> PMD_SHIFT));
605
606         if ((pmd != fixmap_pmd(fix_to_virt(FIX_BTMAP_BEGIN)))
607              || pmd != fixmap_pmd(fix_to_virt(FIX_BTMAP_END))) {
608                 WARN_ON(1);
609                 pr_warn("pmd %p != %p, %p\n",
610                         pmd, fixmap_pmd(fix_to_virt(FIX_BTMAP_BEGIN)),
611                         fixmap_pmd(fix_to_virt(FIX_BTMAP_END)));
612                 pr_warn("fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
613                         fix_to_virt(FIX_BTMAP_BEGIN));
614                 pr_warn("fix_to_virt(FIX_BTMAP_END):   %08lx\n",
615                         fix_to_virt(FIX_BTMAP_END));
616
617                 pr_warn("FIX_BTMAP_END:       %d\n", FIX_BTMAP_END);
618                 pr_warn("FIX_BTMAP_BEGIN:     %d\n", FIX_BTMAP_BEGIN);
619         }
620 }
621
622 void __set_fixmap(enum fixed_addresses idx,
623                                phys_addr_t phys, pgprot_t flags)
624 {
625         unsigned long addr = __fix_to_virt(idx);
626         pte_t *pte;
627
628         BUG_ON(idx <= FIX_HOLE || idx >= __end_of_fixed_addresses);
629
630         pte = fixmap_pte(addr);
631
632         if (pgprot_val(flags)) {
633                 set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags));
634         } else {
635                 pte_clear(&init_mm, addr, pte);
636                 flush_tlb_kernel_range(addr, addr+PAGE_SIZE);
637         }
638 }
639
640 void *__init fixmap_remap_fdt(phys_addr_t dt_phys)
641 {
642         const u64 dt_virt_base = __fix_to_virt(FIX_FDT);
643         pgprot_t prot = PAGE_KERNEL_RO;
644         int size, offset;
645         void *dt_virt;
646
647         /*
648          * Check whether the physical FDT address is set and meets the minimum
649          * alignment requirement. Since we are relying on MIN_FDT_ALIGN to be
650          * at least 8 bytes so that we can always access the magic and size
651          * fields of the FDT header after mapping the first chunk, double check
652          * here if that is indeed the case.
653          */
654         BUILD_BUG_ON(MIN_FDT_ALIGN < 8);
655         if (!dt_phys || dt_phys % MIN_FDT_ALIGN)
656                 return NULL;
657
658         /*
659          * Make sure that the FDT region can be mapped without the need to
660          * allocate additional translation table pages, so that it is safe
661          * to call create_mapping() this early.
662          *
663          * On 64k pages, the FDT will be mapped using PTEs, so we need to
664          * be in the same PMD as the rest of the fixmap.
665          * On 4k pages, we'll use section mappings for the FDT so we only
666          * have to be in the same PUD.
667          */
668         BUILD_BUG_ON(dt_virt_base % SZ_2M);
669
670         BUILD_BUG_ON(__fix_to_virt(FIX_FDT_END) >> SWAPPER_TABLE_SHIFT !=
671                      __fix_to_virt(FIX_BTMAP_BEGIN) >> SWAPPER_TABLE_SHIFT);
672
673         offset = dt_phys % SWAPPER_BLOCK_SIZE;
674         dt_virt = (void *)dt_virt_base + offset;
675
676         /* map the first chunk so we can read the size from the header */
677         create_mapping(round_down(dt_phys, SWAPPER_BLOCK_SIZE), dt_virt_base,
678                        SWAPPER_BLOCK_SIZE, prot);
679
680         if (fdt_magic(dt_virt) != FDT_MAGIC)
681                 return NULL;
682
683         size = fdt_totalsize(dt_virt);
684         if (size > MAX_FDT_SIZE)
685                 return NULL;
686
687         if (offset + size > SWAPPER_BLOCK_SIZE)
688                 create_mapping(round_down(dt_phys, SWAPPER_BLOCK_SIZE), dt_virt_base,
689                                round_up(offset + size, SWAPPER_BLOCK_SIZE), prot);
690
691         memblock_reserve(dt_phys, size);
692
693         return dt_virt;
694 }