2 * arch/arm/include/asm/pgtable.h
4 * Copyright (C) 1995-2002 Russell King
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
10 #ifndef _ASMARM_PGTABLE_H
11 #define _ASMARM_PGTABLE_H
13 #include <linux/const.h>
14 #include <asm/proc-fns.h>
18 #include <asm-generic/4level-fixup.h>
19 #include "pgtable-nommu.h"
23 #include <asm-generic/pgtable-nopud.h>
24 #include <asm/memory.h>
25 #include <mach/vmalloc.h>
26 #include <asm/pgtable-hwdef.h>
28 #include <asm/pgtable-2level.h>
31 * Just any arbitrary offset to the start of the vmalloc VM area: the
32 * current 8MB value just means that there will be a 8MB "hole" after the
33 * physical memory until the kernel virtual memory starts. That means that
34 * any out-of-bounds memory accesses will hopefully be caught.
35 * The vmalloc() routines leaves a hole of 4kB between each vmalloced
36 * area for the same reason. ;)
38 * Note that platforms may override VMALLOC_START, but they must provide
39 * VMALLOC_END. VMALLOC_END defines the (exclusive) limit of this space,
40 * which may not overlap IO space.
43 #define VMALLOC_OFFSET (8*1024*1024)
44 #define VMALLOC_START (((unsigned long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1))
47 #define LIBRARY_TEXT_START 0x0c000000
50 extern void __pte_error(const char *file, int line, pte_t);
51 extern void __pmd_error(const char *file, int line, pmd_t);
52 extern void __pgd_error(const char *file, int line, pgd_t);
54 #define pte_ERROR(pte) __pte_error(__FILE__, __LINE__, pte)
55 #define pmd_ERROR(pmd) __pmd_error(__FILE__, __LINE__, pmd)
56 #define pgd_ERROR(pgd) __pgd_error(__FILE__, __LINE__, pgd)
59 * This is the lowest virtual address we can permit any user space
60 * mapping to be mapped at. This is particularly important for
61 * non-high vector CPUs.
63 #define FIRST_USER_ADDRESS PAGE_SIZE
66 * The pgprot_* and protection_map entries will be fixed up in runtime
67 * to include the cachable and bufferable bits based on memory policy,
68 * as well as any architecture dependent bits like global/ASID and SMP
69 * shared mapping bits.
71 #define _L_PTE_DEFAULT L_PTE_PRESENT | L_PTE_YOUNG
73 extern pgprot_t pgprot_user;
74 extern pgprot_t pgprot_kernel;
76 #define _MOD_PROT(p, b) __pgprot(pgprot_val(p) | (b))
78 #define PAGE_NONE _MOD_PROT(pgprot_user, L_PTE_XN | L_PTE_RDONLY)
79 #define PAGE_SHARED _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_XN)
80 #define PAGE_SHARED_EXEC _MOD_PROT(pgprot_user, L_PTE_USER)
81 #define PAGE_COPY _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_RDONLY | L_PTE_XN)
82 #define PAGE_COPY_EXEC _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_RDONLY)
83 #define PAGE_READONLY _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_RDONLY | L_PTE_XN)
84 #define PAGE_READONLY_EXEC _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_RDONLY)
85 #define PAGE_KERNEL _MOD_PROT(pgprot_kernel, L_PTE_XN)
86 #define PAGE_KERNEL_EXEC pgprot_kernel
88 #define __PAGE_NONE __pgprot(_L_PTE_DEFAULT | L_PTE_RDONLY | L_PTE_XN)
89 #define __PAGE_SHARED __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_XN)
90 #define __PAGE_SHARED_EXEC __pgprot(_L_PTE_DEFAULT | L_PTE_USER)
91 #define __PAGE_COPY __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_RDONLY | L_PTE_XN)
92 #define __PAGE_COPY_EXEC __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_RDONLY)
93 #define __PAGE_READONLY __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_RDONLY | L_PTE_XN)
94 #define __PAGE_READONLY_EXEC __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_RDONLY)
96 #define __pgprot_modify(prot,mask,bits) \
97 __pgprot((pgprot_val(prot) & ~(mask)) | (bits))
99 #define pgprot_noncached(prot) \
100 __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_UNCACHED)
102 #define pgprot_writecombine(prot) \
103 __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_BUFFERABLE)
105 #define pgprot_stronglyordered(prot) \
106 __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_UNCACHED)
108 #ifdef CONFIG_ARM_DMA_MEM_BUFFERABLE
109 #define pgprot_dmacoherent(prot) \
110 __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_BUFFERABLE | L_PTE_XN)
111 #define __HAVE_PHYS_MEM_ACCESS_PROT
113 extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
114 unsigned long size, pgprot_t vma_prot);
116 #define pgprot_dmacoherent(prot) \
117 __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_UNCACHED | L_PTE_XN)
120 #endif /* __ASSEMBLY__ */
123 * The table below defines the page protection levels that we insert into our
124 * Linux page table version. These get translated into the best that the
125 * architecture can perform. Note that on most ARM hardware:
126 * 1) We cannot do execute protection
127 * 2) If we could do execute protection, then read is implied
128 * 3) write implies read permissions
130 #define __P000 __PAGE_NONE
131 #define __P001 __PAGE_READONLY
132 #define __P010 __PAGE_COPY
133 #define __P011 __PAGE_COPY
134 #define __P100 __PAGE_READONLY_EXEC
135 #define __P101 __PAGE_READONLY_EXEC
136 #define __P110 __PAGE_COPY_EXEC
137 #define __P111 __PAGE_COPY_EXEC
139 #define __S000 __PAGE_NONE
140 #define __S001 __PAGE_READONLY
141 #define __S010 __PAGE_SHARED
142 #define __S011 __PAGE_SHARED
143 #define __S100 __PAGE_READONLY_EXEC
144 #define __S101 __PAGE_READONLY_EXEC
145 #define __S110 __PAGE_SHARED_EXEC
146 #define __S111 __PAGE_SHARED_EXEC
150 * ZERO_PAGE is a global shared page that is always zero: used
151 * for zero-mapped memory areas etc..
153 extern struct page *empty_zero_page;
154 #define ZERO_PAGE(vaddr) (empty_zero_page)
157 extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
159 /* to find an entry in a page-table-directory */
160 #define pgd_index(addr) ((addr) >> PGDIR_SHIFT)
162 #define pgd_offset(mm, addr) ((mm)->pgd + pgd_index(addr))
164 /* to find an entry in a kernel page-table-directory */
165 #define pgd_offset_k(addr) pgd_offset(&init_mm, addr)
167 #define pmd_none(pmd) (!pmd_val(pmd))
168 #define pmd_present(pmd) (pmd_val(pmd))
170 static inline pte_t *pmd_page_vaddr(pmd_t pmd)
172 return __va(pmd_val(pmd) & PHYS_MASK & (s32)PAGE_MASK);
175 #define pmd_page(pmd) pfn_to_page(__phys_to_pfn(pmd_val(pmd) & PHYS_MASK))
177 #ifndef CONFIG_HIGHPTE
178 #define __pte_map(pmd) pmd_page_vaddr(*(pmd))
179 #define __pte_unmap(pte) do { } while (0)
181 #define __pte_map(pmd) (pte_t *)kmap_atomic(pmd_page(*(pmd)))
182 #define __pte_unmap(pte) kunmap_atomic(pte)
185 #define pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
187 #define pte_offset_kernel(pmd,addr) (pmd_page_vaddr(*(pmd)) + pte_index(addr))
189 #define pte_offset_map(pmd,addr) (__pte_map(pmd) + pte_index(addr))
190 #define pte_unmap(pte) __pte_unmap(pte)
192 #define pte_pfn(pte) ((pte_val(pte) & PHYS_MASK) >> PAGE_SHIFT)
193 #define pfn_pte(pfn,prot) __pte(__pfn_to_phys(pfn) | pgprot_val(prot))
195 #define pte_page(pte) pfn_to_page(pte_pfn(pte))
196 #define mk_pte(page,prot) pfn_pte(page_to_pfn(page), prot)
198 #define pte_clear(mm,addr,ptep) set_pte_ext(ptep, __pte(0), 0)
200 #if __LINUX_ARM_ARCH__ < 6
201 static inline void __sync_icache_dcache(pte_t pteval)
205 extern void __sync_icache_dcache(pte_t pteval);
208 static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
209 pte_t *ptep, pte_t pteval)
211 if (addr >= TASK_SIZE)
212 set_pte_ext(ptep, pteval, 0);
214 __sync_icache_dcache(pteval);
215 set_pte_ext(ptep, pteval, PTE_EXT_NG);
219 #define pte_none(pte) (!pte_val(pte))
220 #define pte_present(pte) (pte_val(pte) & L_PTE_PRESENT)
221 #define pte_write(pte) (!(pte_val(pte) & L_PTE_RDONLY))
222 #define pte_dirty(pte) (pte_val(pte) & L_PTE_DIRTY)
223 #define pte_young(pte) (pte_val(pte) & L_PTE_YOUNG)
224 #define pte_exec(pte) (!(pte_val(pte) & L_PTE_XN))
225 #define pte_special(pte) (0)
227 #define pte_present_user(pte) \
228 ((pte_val(pte) & (L_PTE_PRESENT | L_PTE_USER)) == \
229 (L_PTE_PRESENT | L_PTE_USER))
231 #define PTE_BIT_FUNC(fn,op) \
232 static inline pte_t pte_##fn(pte_t pte) { pte_val(pte) op; return pte; }
234 PTE_BIT_FUNC(wrprotect, |= L_PTE_RDONLY);
235 PTE_BIT_FUNC(mkwrite, &= ~L_PTE_RDONLY);
236 PTE_BIT_FUNC(mkclean, &= ~L_PTE_DIRTY);
237 PTE_BIT_FUNC(mkdirty, |= L_PTE_DIRTY);
238 PTE_BIT_FUNC(mkold, &= ~L_PTE_YOUNG);
239 PTE_BIT_FUNC(mkyoung, |= L_PTE_YOUNG);
241 static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
243 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
245 const pteval_t mask = L_PTE_XN | L_PTE_RDONLY | L_PTE_USER;
246 pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask);
251 * Encode and decode a swap entry. Swap entries are stored in the Linux
252 * page tables as follows:
254 * 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1
255 * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
256 * <--------------- offset --------------------> <- type --> 0 0 0
258 * This gives us up to 63 swap files and 32GB per swap file. Note that
259 * the offset field is always non-zero.
261 #define __SWP_TYPE_SHIFT 3
262 #define __SWP_TYPE_BITS 6
263 #define __SWP_TYPE_MASK ((1 << __SWP_TYPE_BITS) - 1)
264 #define __SWP_OFFSET_SHIFT (__SWP_TYPE_BITS + __SWP_TYPE_SHIFT)
266 #define __swp_type(x) (((x).val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK)
267 #define __swp_offset(x) ((x).val >> __SWP_OFFSET_SHIFT)
268 #define __swp_entry(type,offset) ((swp_entry_t) { ((type) << __SWP_TYPE_SHIFT) | ((offset) << __SWP_OFFSET_SHIFT) })
270 #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
271 #define __swp_entry_to_pte(swp) ((pte_t) { (swp).val })
274 * It is an error for the kernel to have more swap files than we can
275 * encode in the PTEs. This ensures that we know when MAX_SWAPFILES
276 * is increased beyond what we presently support.
278 #define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > __SWP_TYPE_BITS)
281 * Encode and decode a file entry. File entries are stored in the Linux
282 * page tables as follows:
284 * 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1
285 * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
286 * <----------------------- offset ------------------------> 1 0 0
288 #define pte_file(pte) (pte_val(pte) & L_PTE_FILE)
289 #define pte_to_pgoff(x) (pte_val(x) >> 3)
290 #define pgoff_to_pte(x) __pte(((x) << 3) | L_PTE_FILE)
292 #define PTE_FILE_MAX_BITS 29
294 /* Needs to be defined here and not in linux/mm.h, as it is arch dependent */
295 /* FIXME: this is not correct */
296 #define kern_addr_valid(addr) (1)
298 #include <asm-generic/pgtable.h>
301 * We provide our own arch_get_unmapped_area to cope with VIPT caches.
303 #define HAVE_ARCH_UNMAPPED_AREA
306 * remap a physical page `pfn' of size `size' with page protection `prot'
307 * into virtual address `from'
309 #define io_remap_pfn_range(vma,from,pfn,size,prot) \
310 remap_pfn_range(vma, from, pfn, size, prot)
312 #define pgtable_cache_init() do { } while (0)
314 #endif /* !__ASSEMBLY__ */
316 #endif /* CONFIG_MMU */
318 #endif /* _ASMARM_PGTABLE_H */