1 #ifndef _ASM_POWERPC_PAGE_H
2 #define _ASM_POWERPC_PAGE_H
5 * Copyright (C) 2001,2005 IBM Corporation.
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
14 #include <linux/types.h>
16 #include <asm/types.h>
18 #include <asm/asm-compat.h>
19 #include <asm/kdump.h>
22 * On regular PPC32 page size is 4K (but we support 4K/16K/64K/256K pages
23 * on PPC44x). For PPC64 we support either 4K or 64K software
24 * page size. When using 64K pages however, whether we are really supporting
25 * 64K pages in HW or not is irrelevant to those definitions.
27 #if defined(CONFIG_PPC_256K_PAGES)
29 #elif defined(CONFIG_PPC_64K_PAGES)
31 #elif defined(CONFIG_PPC_16K_PAGES)
37 #define PAGE_SIZE (ASM_CONST(1) << PAGE_SHIFT)
40 #ifdef CONFIG_HUGETLB_PAGE
41 extern unsigned int HPAGE_SHIFT;
43 #define HPAGE_SHIFT PAGE_SHIFT
45 #define HPAGE_SIZE ((1UL) << HPAGE_SHIFT)
46 #define HPAGE_MASK (~(HPAGE_SIZE - 1))
47 #define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
48 #define HUGE_MAX_HSTATE (MMU_PAGE_COUNT-1)
52 * Subtle: (1 << PAGE_SHIFT) is an int, not an unsigned long. So if we
53 * assign PAGE_MASK to a larger type it gets extended the way we want
54 * (i.e. with 1s in the high bits)
56 #define PAGE_MASK (~((1 << PAGE_SHIFT) - 1))
59 * KERNELBASE is the virtual address of the start of the kernel, it's often
60 * the same as PAGE_OFFSET, but _might not be_.
62 * The kdump dump kernel is one example where KERNELBASE != PAGE_OFFSET.
64 * PAGE_OFFSET is the virtual address of the start of lowmem.
66 * PHYSICAL_START is the physical address of the start of the kernel.
68 * MEMORY_START is the physical address of the start of lowmem.
70 * KERNELBASE, PAGE_OFFSET, and PHYSICAL_START are all configurable on
71 * ppc32 and based on how they are set we determine MEMORY_START.
73 * For the linear mapping the following equation should be true:
74 * KERNELBASE - PAGE_OFFSET = PHYSICAL_START - MEMORY_START
76 * Also, KERNELBASE >= PAGE_OFFSET and PHYSICAL_START >= MEMORY_START
78 * There are two ways to determine a physical address from a virtual one:
79 * va = pa + PAGE_OFFSET - MEMORY_START
80 * va = pa + KERNELBASE - PHYSICAL_START
82 * If you want to know something's offset from the start of the kernel you
83 * should subtract KERNELBASE.
85 * If you want to test if something's a kernel address, use is_kernel_addr().
88 #define KERNELBASE ASM_CONST(CONFIG_KERNEL_START)
89 #define PAGE_OFFSET ASM_CONST(CONFIG_PAGE_OFFSET)
90 #define LOAD_OFFSET ASM_CONST((CONFIG_KERNEL_START-CONFIG_PHYSICAL_START))
92 #if defined(CONFIG_NONSTATIC_KERNEL)
95 extern phys_addr_t memstart_addr;
96 extern phys_addr_t kernstart_addr;
98 #ifdef CONFIG_RELOCATABLE_PPC32
99 extern long long virt_phys_offset;
102 #endif /* __ASSEMBLY__ */
103 #define PHYSICAL_START kernstart_addr
105 #else /* !CONFIG_NONSTATIC_KERNEL */
106 #define PHYSICAL_START ASM_CONST(CONFIG_PHYSICAL_START)
109 /* See Description below for VIRT_PHYS_OFFSET */
110 #ifdef CONFIG_RELOCATABLE_PPC32
111 #define VIRT_PHYS_OFFSET virt_phys_offset
113 #define VIRT_PHYS_OFFSET (KERNELBASE - PHYSICAL_START)
118 #define MEMORY_START 0UL
119 #elif defined(CONFIG_NONSTATIC_KERNEL)
120 #define MEMORY_START memstart_addr
122 #define MEMORY_START (PHYSICAL_START + PAGE_OFFSET - KERNELBASE)
125 #ifdef CONFIG_FLATMEM
126 #define ARCH_PFN_OFFSET ((unsigned long)(MEMORY_START >> PAGE_SHIFT))
127 #define pfn_valid(pfn) ((pfn) >= ARCH_PFN_OFFSET && (pfn) < max_mapnr)
130 #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
131 #define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
132 #define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
135 * On Book-E parts we need __va to parse the device tree and we can't
136 * determine MEMORY_START until then. However we can determine PHYSICAL_START
137 * from information at hand (program counter, TLB lookup).
139 * On BookE with RELOCATABLE (RELOCATABLE_PPC32)
141 * With RELOCATABLE_PPC32, we support loading the kernel at any physical
142 * address without any restriction on the page alignment.
144 * We find the runtime address of _stext and relocate ourselves based on
145 * the following calculation:
147 * virtual_base = ALIGN_DOWN(KERNELBASE,256M) +
148 * MODULO(_stext.run,256M)
149 * and create the following mapping:
151 * ALIGN_DOWN(_stext.run,256M) => ALIGN_DOWN(KERNELBASE,256M)
153 * When we process relocations, we cannot depend on the
154 * existing equation for the __va()/__pa() translations:
156 * __va(x) = (x) - PHYSICAL_START + KERNELBASE
159 * PHYSICAL_START = kernstart_addr = Physical address of _stext
160 * KERNELBASE = Compiled virtual address of _stext.
162 * This formula holds true iff, kernel load address is TLB page aligned.
164 * In our case, we need to also account for the shift in the kernel Virtual
169 * Let the kernel be loaded at 64MB and KERNELBASE be 0xc0000000 (same as PAGE_OFFSET).
170 * In this case, we would be mapping 0 to 0xc0000000, and kernstart_addr = 64M
172 * Now __va(1MB) = (0x100000) - (0x4000000) + 0xc0000000
173 * = 0xbc100000 , which is wrong.
175 * Rather, it should be : 0xc0000000 + 0x100000 = 0xc0100000
176 * according to our mapping.
178 * Hence we use the following formula to get the translations right:
180 * __va(x) = (x) - [ PHYSICAL_START - Effective KERNELBASE ]
183 * PHYSICAL_START = dynamic load address.(kernstart_addr variable)
184 * Effective KERNELBASE = virtual_base =
185 * = ALIGN_DOWN(KERNELBASE,256M) +
186 * MODULO(PHYSICAL_START,256M)
188 * To make the cost of __va() / __pa() more light weight, we introduce
189 * a new variable virt_phys_offset, which will hold :
191 * virt_phys_offset = Effective KERNELBASE - PHYSICAL_START
192 * = ALIGN_DOWN(KERNELBASE,256M) -
193 * ALIGN_DOWN(PHYSICALSTART,256M)
197 * __va(x) = x - PHYSICAL_START + Effective KERNELBASE
198 * = x + virt_phys_offset
201 * __pa(x) = x + PHYSICAL_START - Effective KERNELBASE
202 * = x - virt_phys_offset
204 * On non-Book-E PPC64 PAGE_OFFSET and MEMORY_START are constants so use
205 * the other definitions for __va & __pa.
208 #define __va(x) ((void *)(unsigned long)((phys_addr_t)(x) + VIRT_PHYS_OFFSET))
209 #define __pa(x) ((unsigned long)(x) - VIRT_PHYS_OFFSET)
213 * gcc miscompiles (unsigned long)(&static_var) - PAGE_OFFSET
214 * with -mcmodel=medium, so we use & and | instead of - and + on 64-bit.
216 #define __va(x) ((void *)(unsigned long)((phys_addr_t)(x) | PAGE_OFFSET))
217 #define __pa(x) ((unsigned long)(x) & 0x0fffffffffffffffUL)
219 #else /* 32-bit, non book E */
220 #define __va(x) ((void *)(unsigned long)((phys_addr_t)(x) + PAGE_OFFSET - MEMORY_START))
221 #define __pa(x) ((unsigned long)(x) - PAGE_OFFSET + MEMORY_START)
226 * Unfortunately the PLT is in the BSS in the PPC32 ELF ABI,
227 * and needs to be executable. This means the whole heap ends
228 * up being executable.
230 #define VM_DATA_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
231 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
233 #define VM_DATA_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
234 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
237 #include <asm/page_64.h>
239 #include <asm/page_32.h>
242 /* align addr on a size boundary - adjust address up/down if needed */
243 #define _ALIGN_UP(addr,size) (((addr)+((size)-1))&(~((size)-1)))
244 #define _ALIGN_DOWN(addr,size) ((addr)&(~((size)-1)))
246 /* align addr on a size boundary - adjust address up if needed */
247 #define _ALIGN(addr,size) _ALIGN_UP(addr,size)
250 * Don't compare things with KERNELBASE or PAGE_OFFSET to test for
251 * "kernelness", use is_kernel_addr() - it should do what you want.
253 #ifdef CONFIG_PPC_BOOK3E_64
254 #define is_kernel_addr(x) ((x) >= 0x8000000000000000ul)
256 #define is_kernel_addr(x) ((x) >= PAGE_OFFSET)
259 #ifndef CONFIG_PPC_BOOK3S_64
261 * Use the top bit of the higher-level page table entries to indicate whether
262 * the entries we point to contain hugepages. This works because we know that
263 * the page tables live in kernel space. If we ever decide to support having
264 * page tables at arbitrary addresses, this breaks and will have to change.
267 #define PD_HUGE 0x8000000000000000
269 #define PD_HUGE 0x80000000
271 #endif /* CONFIG_PPC_BOOK3S_64 */
274 * Some number of bits at the level of the page table that points to
275 * a hugepte are used to encode the size. This masks those bits.
277 #define HUGEPD_SHIFT_MASK 0x3f
281 #undef STRICT_MM_TYPECHECKS
283 #ifdef STRICT_MM_TYPECHECKS
284 /* These are used to make use of C type-checking. */
287 typedef struct { pte_basic_t pte; } pte_t;
288 #define pte_val(x) ((x).pte)
289 #define __pte(x) ((pte_t) { (x) })
291 /* 64k pages additionally define a bigger "real PTE" type that gathers
292 * the "second half" part of the PTE for pseudo 64k pages
294 #if defined(CONFIG_PPC_64K_PAGES) && defined(CONFIG_PPC_STD_MMU_64)
295 typedef struct { pte_t pte; unsigned long hidx; } real_pte_t;
297 typedef struct { pte_t pte; } real_pte_t;
302 typedef struct { unsigned long pmd; } pmd_t;
303 #define pmd_val(x) ((x).pmd)
304 #define __pmd(x) ((pmd_t) { (x) })
306 /* PUD level exusts only on 4k pages */
307 #ifndef CONFIG_PPC_64K_PAGES
308 typedef struct { unsigned long pud; } pud_t;
309 #define pud_val(x) ((x).pud)
310 #define __pud(x) ((pud_t) { (x) })
311 #endif /* !CONFIG_PPC_64K_PAGES */
312 #endif /* CONFIG_PPC64 */
315 typedef struct { unsigned long pgd; } pgd_t;
316 #define pgd_val(x) ((x).pgd)
317 #define __pgd(x) ((pgd_t) { (x) })
319 /* Page protection bits */
320 typedef struct { unsigned long pgprot; } pgprot_t;
321 #define pgprot_val(x) ((x).pgprot)
322 #define __pgprot(x) ((pgprot_t) { (x) })
327 * .. while these make it easier on the compiler
330 typedef pte_basic_t pte_t;
331 #define pte_val(x) (x)
334 #if defined(CONFIG_PPC_64K_PAGES) && defined(CONFIG_PPC_STD_MMU_64)
335 typedef struct { pte_t pte; unsigned long hidx; } real_pte_t;
337 typedef pte_t real_pte_t;
342 typedef unsigned long pmd_t;
343 #define pmd_val(x) (x)
346 #ifndef CONFIG_PPC_64K_PAGES
347 typedef unsigned long pud_t;
348 #define pud_val(x) (x)
350 #endif /* !CONFIG_PPC_64K_PAGES */
351 #endif /* CONFIG_PPC64 */
353 typedef unsigned long pgd_t;
354 #define pgd_val(x) (x)
355 #define pgprot_val(x) (x)
357 typedef unsigned long pgprot_t;
359 #define __pgprot(x) (x)
363 typedef struct { signed long pd; } hugepd_t;
365 #ifdef CONFIG_HUGETLB_PAGE
366 #ifdef CONFIG_PPC_BOOK3S_64
367 static inline int hugepd_ok(hugepd_t hpd)
370 * hugepd pointer, bottom two bits == 00 and next 4 bits
371 * indicate size of table
373 return (((hpd.pd & 0x3) == 0x0) && ((hpd.pd & HUGEPD_SHIFT_MASK) != 0));
376 static inline int hugepd_ok(hugepd_t hpd)
382 #define is_hugepd(pdep) (hugepd_ok(*((hugepd_t *)(pdep))))
383 int pgd_huge(pgd_t pgd);
384 #else /* CONFIG_HUGETLB_PAGE */
385 #define is_hugepd(pdep) 0
386 #define pgd_huge(pgd) 0
387 #endif /* CONFIG_HUGETLB_PAGE */
390 extern void clear_user_page(void *page, unsigned long vaddr, struct page *pg);
391 extern void copy_user_page(void *to, void *from, unsigned long vaddr,
393 extern int page_is_ram(unsigned long pfn);
394 extern int devmem_is_allowed(unsigned long pfn);
396 #ifdef CONFIG_PPC_SMLPAR
397 void arch_free_page(struct page *page, int order);
398 #define HAVE_ARCH_FREE_PAGE
401 struct vm_area_struct;
403 #if defined(CONFIG_PPC_64K_PAGES) && defined(CONFIG_PPC64)
404 typedef pte_t *pgtable_t;
406 typedef struct page *pgtable_t;
409 #include <asm-generic/memory_model.h>
410 #endif /* __ASSEMBLY__ */
412 #endif /* _ASM_POWERPC_PAGE_H */