3 * Copyright IBM Corp. 1999, 2000
4 * Author(s): Hartmut Penner (hp@de.ibm.com)
5 * Ulrich Weigand (weigand@de.ibm.com)
6 * Martin Schwidefsky (schwidefsky@de.ibm.com)
8 * Derived from "include/asm-i386/pgtable.h"
11 #ifndef _ASM_S390_PGTABLE_H
12 #define _ASM_S390_PGTABLE_H
15 * The Linux memory management assumes a three-level page table setup. For
16 * s390 31 bit we "fold" the mid level into the top-level page table, so
17 * that we physically have the same two-level page table as the s390 mmu
18 * expects in 31 bit mode. For s390 64 bit we use three of the five levels
19 * the hardware provides (region first and region second tables are not
22 * The "pgd_xxx()" functions are trivial for a folded two-level
23 * setup: the pgd is never bad, and a pmd always exists (as it's folded
26 * This file contains the functions and defines necessary to modify and use
27 * the S390 page table tree.
30 #include <linux/sched.h>
31 #include <linux/mm_types.h>
32 #include <linux/page-flags.h>
33 #include <linux/radix-tree.h>
37 extern pgd_t swapper_pg_dir[] __attribute__ ((aligned (4096)));
38 extern void paging_init(void);
39 extern void vmem_map_init(void);
42 * The S390 doesn't have any external MMU info: the kernel page
43 * tables contain all the necessary information.
45 #define update_mmu_cache(vma, address, ptep) do { } while (0)
46 #define update_mmu_cache_pmd(vma, address, ptep) do { } while (0)
49 * ZERO_PAGE is a global shared page that is always zero; used
50 * for zero-mapped memory areas etc..
53 extern unsigned long empty_zero_page;
54 extern unsigned long zero_page_mask;
56 #define ZERO_PAGE(vaddr) \
57 (virt_to_page((void *)(empty_zero_page + \
58 (((unsigned long)(vaddr)) &zero_page_mask))))
59 #define __HAVE_COLOR_ZERO_PAGE
61 /* TODO: s390 cannot support io_remap_pfn_range... */
62 #endif /* !__ASSEMBLY__ */
65 * PMD_SHIFT determines the size of the area a second-level page
67 * PGDIR_SHIFT determines what a third-level page table entry can map
72 # define PGDIR_SHIFT 20
73 #else /* CONFIG_64BIT */
76 # define PGDIR_SHIFT 42
77 #endif /* CONFIG_64BIT */
79 #define PMD_SIZE (1UL << PMD_SHIFT)
80 #define PMD_MASK (~(PMD_SIZE-1))
81 #define PUD_SIZE (1UL << PUD_SHIFT)
82 #define PUD_MASK (~(PUD_SIZE-1))
83 #define PGDIR_SIZE (1UL << PGDIR_SHIFT)
84 #define PGDIR_MASK (~(PGDIR_SIZE-1))
87 * entries per page directory level: the S390 is two-level, so
88 * we don't really have any PMD directory physically.
89 * for S390 segment-table entries are combined to one PGD
90 * that leads to 1024 pte per pgd
92 #define PTRS_PER_PTE 256
94 #define __PAGETABLE_PUD_FOLDED
95 #define PTRS_PER_PMD 1
96 #define __PAGETABLE_PMD_FOLDED
97 #define PTRS_PER_PUD 1
98 #else /* CONFIG_64BIT */
99 #define PTRS_PER_PMD 2048
100 #define PTRS_PER_PUD 2048
101 #endif /* CONFIG_64BIT */
102 #define PTRS_PER_PGD 2048
104 #define FIRST_USER_ADDRESS 0UL
106 #define pte_ERROR(e) \
107 printk("%s:%d: bad pte %p.\n", __FILE__, __LINE__, (void *) pte_val(e))
108 #define pmd_ERROR(e) \
109 printk("%s:%d: bad pmd %p.\n", __FILE__, __LINE__, (void *) pmd_val(e))
110 #define pud_ERROR(e) \
111 printk("%s:%d: bad pud %p.\n", __FILE__, __LINE__, (void *) pud_val(e))
112 #define pgd_ERROR(e) \
113 printk("%s:%d: bad pgd %p.\n", __FILE__, __LINE__, (void *) pgd_val(e))
117 * The vmalloc and module area will always be on the topmost area of the kernel
118 * mapping. We reserve 96MB (31bit) / 128GB (64bit) for vmalloc and modules.
119 * On 64 bit kernels we have a 2GB area at the top of the vmalloc area where
120 * modules will reside. That makes sure that inter module branches always
121 * happen without trampolines and in addition the placement within a 2GB frame
122 * is branch prediction unit friendly.
124 extern unsigned long VMALLOC_START;
125 extern unsigned long VMALLOC_END;
126 extern struct page *vmemmap;
128 #define VMEM_MAX_PHYS ((unsigned long) vmemmap)
131 extern unsigned long MODULES_VADDR;
132 extern unsigned long MODULES_END;
133 #define MODULES_VADDR MODULES_VADDR
134 #define MODULES_END MODULES_END
135 #define MODULES_LEN (1UL << 31)
138 static inline int is_module_addr(void *addr)
141 BUILD_BUG_ON(MODULES_LEN > (1UL << 31));
142 if (addr < (void *)MODULES_VADDR)
144 if (addr > (void *)MODULES_END)
151 * A 31 bit pagetable entry of S390 has following format:
154 * 00000000001111111111222222222233
155 * 01234567890123456789012345678901
157 * I Page-Invalid Bit: Page is not available for address-translation
158 * P Page-Protection Bit: Store access not possible for page
160 * A 31 bit segmenttable entry of S390 has following format:
161 * | P-table origin | |PTL
163 * 00000000001111111111222222222233
164 * 01234567890123456789012345678901
166 * I Segment-Invalid Bit: Segment is not available for address-translation
167 * C Common-Segment Bit: Segment is not private (PoP 3-30)
168 * PTL Page-Table-Length: Page-table length (PTL+1*16 entries -> up to 256)
170 * The 31 bit segmenttable origin of S390 has following format:
172 * |S-table origin | | STL |
174 * 00000000001111111111222222222233
175 * 01234567890123456789012345678901
177 * X Space-Switch event:
178 * G Segment-Invalid Bit: *
179 * P Private-Space Bit: Segment is not private (PoP 3-30)
180 * S Storage-Alteration:
181 * STL Segment-Table-Length: Segment-table length (STL+1*16 entries -> up to 2048)
183 * A 64 bit pagetable entry of S390 has following format:
185 * 0000000000111111111122222222223333333333444444444455555555556666
186 * 0123456789012345678901234567890123456789012345678901234567890123
188 * I Page-Invalid Bit: Page is not available for address-translation
189 * P Page-Protection Bit: Store access not possible for page
190 * C Change-bit override: HW is not required to set change bit
192 * A 64 bit segmenttable entry of S390 has following format:
193 * | P-table origin | TT
194 * 0000000000111111111122222222223333333333444444444455555555556666
195 * 0123456789012345678901234567890123456789012345678901234567890123
197 * I Segment-Invalid Bit: Segment is not available for address-translation
198 * C Common-Segment Bit: Segment is not private (PoP 3-30)
199 * P Page-Protection Bit: Store access not possible for page
202 * A 64 bit region table entry of S390 has following format:
203 * | S-table origin | TF TTTL
204 * 0000000000111111111122222222223333333333444444444455555555556666
205 * 0123456789012345678901234567890123456789012345678901234567890123
207 * I Segment-Invalid Bit: Segment is not available for address-translation
212 * The 64 bit regiontable origin of S390 has following format:
213 * | region table origon | DTTL
214 * 0000000000111111111122222222223333333333444444444455555555556666
215 * 0123456789012345678901234567890123456789012345678901234567890123
217 * X Space-Switch event:
218 * G Segment-Invalid Bit:
219 * P Private-Space Bit:
220 * S Storage-Alteration:
224 * A storage key has the following format:
228 * F : fetch protection bit
233 /* Hardware bits in the page table entry */
234 #define _PAGE_PROTECT 0x200 /* HW read-only bit */
235 #define _PAGE_INVALID 0x400 /* HW invalid bit */
236 #define _PAGE_LARGE 0x800 /* Bit to mark a large pte */
238 /* Software bits in the page table entry */
239 #define _PAGE_PRESENT 0x001 /* SW pte present bit */
240 #define _PAGE_TYPE 0x002 /* SW pte type bit */
241 #define _PAGE_YOUNG 0x004 /* SW pte young bit */
242 #define _PAGE_DIRTY 0x008 /* SW pte dirty bit */
243 #define _PAGE_READ 0x010 /* SW pte read bit */
244 #define _PAGE_WRITE 0x020 /* SW pte write bit */
245 #define _PAGE_SPECIAL 0x040 /* SW associated with special page */
246 #define _PAGE_UNUSED 0x080 /* SW bit for pgste usage state */
247 #define __HAVE_ARCH_PTE_SPECIAL
249 /* Set of bits not changed in pte_modify */
250 #define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_SPECIAL | _PAGE_DIRTY | \
254 * handle_pte_fault uses pte_present and pte_none to find out the pte type
255 * WITHOUT holding the page table lock. The _PAGE_PRESENT bit is used to
256 * distinguish present from not-present ptes. It is changed only with the page
259 * The following table gives the different possible bit combinations for
260 * the pte hardware and software bits in the last 12 bits of a pte:
269 * prot-none, clean, old .11...000001
270 * prot-none, clean, young .11...000101
271 * prot-none, dirty, old .10...001001
272 * prot-none, dirty, young .10...001101
273 * read-only, clean, old .11...010001
274 * read-only, clean, young .01...010101
275 * read-only, dirty, old .11...011001
276 * read-only, dirty, young .01...011101
277 * read-write, clean, old .11...110001
278 * read-write, clean, young .01...110101
279 * read-write, dirty, old .10...111001
280 * read-write, dirty, young .00...111101
282 * pte_present is true for the bit pattern .xx...xxxxx1, (pte & 0x001) == 0x001
283 * pte_none is true for the bit pattern .10...xxxx00, (pte & 0x603) == 0x400
284 * pte_swap is true for the bit pattern .10...xxxx10, (pte & 0x603) == 0x402
289 /* Bits in the segment table address-space-control-element */
290 #define _ASCE_SPACE_SWITCH 0x80000000UL /* space switch event */
291 #define _ASCE_ORIGIN_MASK 0x7ffff000UL /* segment table origin */
292 #define _ASCE_PRIVATE_SPACE 0x100 /* private space control */
293 #define _ASCE_ALT_EVENT 0x80 /* storage alteration event control */
294 #define _ASCE_TABLE_LENGTH 0x7f /* 128 x 64 entries = 8k */
296 /* Bits in the segment table entry */
297 #define _SEGMENT_ENTRY_BITS 0x7fffffffUL /* Valid segment table bits */
298 #define _SEGMENT_ENTRY_ORIGIN 0x7fffffc0UL /* page table origin */
299 #define _SEGMENT_ENTRY_PROTECT 0x200 /* page protection bit */
300 #define _SEGMENT_ENTRY_INVALID 0x20 /* invalid segment table entry */
301 #define _SEGMENT_ENTRY_COMMON 0x10 /* common segment bit */
302 #define _SEGMENT_ENTRY_PTL 0x0f /* page table length */
304 #define _SEGMENT_ENTRY_DIRTY 0 /* No sw dirty bit for 31-bit */
305 #define _SEGMENT_ENTRY_YOUNG 0 /* No sw young bit for 31-bit */
306 #define _SEGMENT_ENTRY_READ 0 /* No sw read bit for 31-bit */
307 #define _SEGMENT_ENTRY_WRITE 0 /* No sw write bit for 31-bit */
308 #define _SEGMENT_ENTRY_LARGE 0 /* No large pages for 31-bit */
309 #define _SEGMENT_ENTRY_BITS_LARGE 0
310 #define _SEGMENT_ENTRY_ORIGIN_LARGE 0
312 #define _SEGMENT_ENTRY (_SEGMENT_ENTRY_PTL)
313 #define _SEGMENT_ENTRY_EMPTY (_SEGMENT_ENTRY_INVALID)
316 * Segment table entry encoding (I = invalid, R = read-only bit):
318 * prot-none ..1...1.....
319 * read-only ..1...0.....
320 * read-write ..0...0.....
324 /* Page status table bits for virtualization */
325 #define PGSTE_ACC_BITS 0xf0000000UL
326 #define PGSTE_FP_BIT 0x08000000UL
327 #define PGSTE_PCL_BIT 0x00800000UL
328 #define PGSTE_HR_BIT 0x00400000UL
329 #define PGSTE_HC_BIT 0x00200000UL
330 #define PGSTE_GR_BIT 0x00040000UL
331 #define PGSTE_GC_BIT 0x00020000UL
332 #define PGSTE_UC_BIT 0x00008000UL /* user dirty (migration) */
333 #define PGSTE_IN_BIT 0x00004000UL /* IPTE notify bit */
335 #else /* CONFIG_64BIT */
337 /* Bits in the segment/region table address-space-control-element */
338 #define _ASCE_ORIGIN ~0xfffUL/* segment table origin */
339 #define _ASCE_PRIVATE_SPACE 0x100 /* private space control */
340 #define _ASCE_ALT_EVENT 0x80 /* storage alteration event control */
341 #define _ASCE_SPACE_SWITCH 0x40 /* space switch event */
342 #define _ASCE_REAL_SPACE 0x20 /* real space control */
343 #define _ASCE_TYPE_MASK 0x0c /* asce table type mask */
344 #define _ASCE_TYPE_REGION1 0x0c /* region first table type */
345 #define _ASCE_TYPE_REGION2 0x08 /* region second table type */
346 #define _ASCE_TYPE_REGION3 0x04 /* region third table type */
347 #define _ASCE_TYPE_SEGMENT 0x00 /* segment table type */
348 #define _ASCE_TABLE_LENGTH 0x03 /* region table length */
350 /* Bits in the region table entry */
351 #define _REGION_ENTRY_ORIGIN ~0xfffUL/* region/segment table origin */
352 #define _REGION_ENTRY_PROTECT 0x200 /* region protection bit */
353 #define _REGION_ENTRY_INVALID 0x20 /* invalid region table entry */
354 #define _REGION_ENTRY_TYPE_MASK 0x0c /* region/segment table type mask */
355 #define _REGION_ENTRY_TYPE_R1 0x0c /* region first table type */
356 #define _REGION_ENTRY_TYPE_R2 0x08 /* region second table type */
357 #define _REGION_ENTRY_TYPE_R3 0x04 /* region third table type */
358 #define _REGION_ENTRY_LENGTH 0x03 /* region third length */
360 #define _REGION1_ENTRY (_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_LENGTH)
361 #define _REGION1_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_INVALID)
362 #define _REGION2_ENTRY (_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_LENGTH)
363 #define _REGION2_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_INVALID)
364 #define _REGION3_ENTRY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_LENGTH)
365 #define _REGION3_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_INVALID)
367 #define _REGION3_ENTRY_LARGE 0x400 /* RTTE-format control, large page */
368 #define _REGION3_ENTRY_RO 0x200 /* page protection bit */
370 /* Bits in the segment table entry */
371 #define _SEGMENT_ENTRY_BITS 0xfffffffffffffe33UL
372 #define _SEGMENT_ENTRY_BITS_LARGE 0xfffffffffff0ff33UL
373 #define _SEGMENT_ENTRY_ORIGIN_LARGE ~0xfffffUL /* large page address */
374 #define _SEGMENT_ENTRY_ORIGIN ~0x7ffUL/* segment table origin */
375 #define _SEGMENT_ENTRY_PROTECT 0x200 /* page protection bit */
376 #define _SEGMENT_ENTRY_INVALID 0x20 /* invalid segment table entry */
378 #define _SEGMENT_ENTRY (0)
379 #define _SEGMENT_ENTRY_EMPTY (_SEGMENT_ENTRY_INVALID)
381 #define _SEGMENT_ENTRY_DIRTY 0x2000 /* SW segment dirty bit */
382 #define _SEGMENT_ENTRY_YOUNG 0x1000 /* SW segment young bit */
383 #define _SEGMENT_ENTRY_SPLIT 0x0800 /* THP splitting bit */
384 #define _SEGMENT_ENTRY_LARGE 0x0400 /* STE-format control, large page */
385 #define _SEGMENT_ENTRY_READ 0x0002 /* SW segment read bit */
386 #define _SEGMENT_ENTRY_WRITE 0x0001 /* SW segment write bit */
389 * Segment table entry encoding (R = read-only, I = invalid, y = young bit):
391 * prot-none, clean, old 00..1...1...00
392 * prot-none, clean, young 01..1...1...00
393 * prot-none, dirty, old 10..1...1...00
394 * prot-none, dirty, young 11..1...1...00
395 * read-only, clean, old 00..1...1...01
396 * read-only, clean, young 01..1...0...01
397 * read-only, dirty, old 10..1...1...01
398 * read-only, dirty, young 11..1...0...01
399 * read-write, clean, old 00..1...1...11
400 * read-write, clean, young 01..1...0...11
401 * read-write, dirty, old 10..0...1...11
402 * read-write, dirty, young 11..0...0...11
403 * The segment table origin is used to distinguish empty (origin==0) from
404 * read-write, old segment table entries (origin!=0)
407 #define _SEGMENT_ENTRY_SPLIT_BIT 11 /* THP splitting bit number */
409 /* Page status table bits for virtualization */
410 #define PGSTE_ACC_BITS 0xf000000000000000UL
411 #define PGSTE_FP_BIT 0x0800000000000000UL
412 #define PGSTE_PCL_BIT 0x0080000000000000UL
413 #define PGSTE_HR_BIT 0x0040000000000000UL
414 #define PGSTE_HC_BIT 0x0020000000000000UL
415 #define PGSTE_GR_BIT 0x0004000000000000UL
416 #define PGSTE_GC_BIT 0x0002000000000000UL
417 #define PGSTE_UC_BIT 0x0000800000000000UL /* user dirty (migration) */
418 #define PGSTE_IN_BIT 0x0000400000000000UL /* IPTE notify bit */
420 #endif /* CONFIG_64BIT */
422 /* Guest Page State used for virtualization */
423 #define _PGSTE_GPS_ZERO 0x0000000080000000UL
424 #define _PGSTE_GPS_USAGE_MASK 0x0000000003000000UL
425 #define _PGSTE_GPS_USAGE_STABLE 0x0000000000000000UL
426 #define _PGSTE_GPS_USAGE_UNUSED 0x0000000001000000UL
429 * A user page table pointer has the space-switch-event bit, the
430 * private-space-control bit and the storage-alteration-event-control
431 * bit set. A kernel page table pointer doesn't need them.
433 #define _ASCE_USER_BITS (_ASCE_SPACE_SWITCH | _ASCE_PRIVATE_SPACE | \
437 * Page protection definitions.
439 #define PAGE_NONE __pgprot(_PAGE_PRESENT | _PAGE_INVALID)
440 #define PAGE_READ __pgprot(_PAGE_PRESENT | _PAGE_READ | \
441 _PAGE_INVALID | _PAGE_PROTECT)
442 #define PAGE_WRITE __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
443 _PAGE_INVALID | _PAGE_PROTECT)
445 #define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
446 _PAGE_YOUNG | _PAGE_DIRTY)
447 #define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
448 _PAGE_YOUNG | _PAGE_DIRTY)
449 #define PAGE_KERNEL_RO __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_YOUNG | \
453 * On s390 the page table entry has an invalid bit and a read-only bit.
454 * Read permission implies execute permission and write permission
455 * implies read permission.
458 #define __P000 PAGE_NONE
459 #define __P001 PAGE_READ
460 #define __P010 PAGE_READ
461 #define __P011 PAGE_READ
462 #define __P100 PAGE_READ
463 #define __P101 PAGE_READ
464 #define __P110 PAGE_READ
465 #define __P111 PAGE_READ
467 #define __S000 PAGE_NONE
468 #define __S001 PAGE_READ
469 #define __S010 PAGE_WRITE
470 #define __S011 PAGE_WRITE
471 #define __S100 PAGE_READ
472 #define __S101 PAGE_READ
473 #define __S110 PAGE_WRITE
474 #define __S111 PAGE_WRITE
477 * Segment entry (large page) protection definitions.
479 #define SEGMENT_NONE __pgprot(_SEGMENT_ENTRY_INVALID | \
480 _SEGMENT_ENTRY_PROTECT)
481 #define SEGMENT_READ __pgprot(_SEGMENT_ENTRY_PROTECT | \
483 #define SEGMENT_WRITE __pgprot(_SEGMENT_ENTRY_READ | \
484 _SEGMENT_ENTRY_WRITE)
486 static inline int mm_has_pgste(struct mm_struct *mm)
489 if (unlikely(mm->context.has_pgste))
496 * In the case that a guest uses storage keys
497 * faults should no longer be backed by zero pages
499 #define mm_forbids_zeropage mm_use_skey
500 static inline int mm_use_skey(struct mm_struct *mm)
503 if (mm->context.use_skey)
510 * pgd/pmd/pte query functions
514 static inline int pgd_present(pgd_t pgd) { return 1; }
515 static inline int pgd_none(pgd_t pgd) { return 0; }
516 static inline int pgd_bad(pgd_t pgd) { return 0; }
518 static inline int pud_present(pud_t pud) { return 1; }
519 static inline int pud_none(pud_t pud) { return 0; }
520 static inline int pud_large(pud_t pud) { return 0; }
521 static inline int pud_bad(pud_t pud) { return 0; }
523 #else /* CONFIG_64BIT */
525 static inline int pgd_present(pgd_t pgd)
527 if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R2)
529 return (pgd_val(pgd) & _REGION_ENTRY_ORIGIN) != 0UL;
532 static inline int pgd_none(pgd_t pgd)
534 if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R2)
536 return (pgd_val(pgd) & _REGION_ENTRY_INVALID) != 0UL;
539 static inline int pgd_bad(pgd_t pgd)
542 * With dynamic page table levels the pgd can be a region table
543 * entry or a segment table entry. Check for the bit that are
544 * invalid for either table entry.
547 ~_SEGMENT_ENTRY_ORIGIN & ~_REGION_ENTRY_INVALID &
548 ~_REGION_ENTRY_TYPE_MASK & ~_REGION_ENTRY_LENGTH;
549 return (pgd_val(pgd) & mask) != 0;
552 static inline int pud_present(pud_t pud)
554 if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R3)
556 return (pud_val(pud) & _REGION_ENTRY_ORIGIN) != 0UL;
559 static inline int pud_none(pud_t pud)
561 if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R3)
563 return (pud_val(pud) & _REGION_ENTRY_INVALID) != 0UL;
566 static inline int pud_large(pud_t pud)
568 if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) != _REGION_ENTRY_TYPE_R3)
570 return !!(pud_val(pud) & _REGION3_ENTRY_LARGE);
573 static inline int pud_bad(pud_t pud)
576 * With dynamic page table levels the pud can be a region table
577 * entry or a segment table entry. Check for the bit that are
578 * invalid for either table entry.
581 ~_SEGMENT_ENTRY_ORIGIN & ~_REGION_ENTRY_INVALID &
582 ~_REGION_ENTRY_TYPE_MASK & ~_REGION_ENTRY_LENGTH;
583 return (pud_val(pud) & mask) != 0;
586 #endif /* CONFIG_64BIT */
588 static inline int pmd_present(pmd_t pmd)
590 return pmd_val(pmd) != _SEGMENT_ENTRY_INVALID;
593 static inline int pmd_none(pmd_t pmd)
595 return pmd_val(pmd) == _SEGMENT_ENTRY_INVALID;
598 static inline int pmd_large(pmd_t pmd)
600 return (pmd_val(pmd) & _SEGMENT_ENTRY_LARGE) != 0;
603 static inline int pmd_pfn(pmd_t pmd)
605 unsigned long origin_mask;
607 origin_mask = _SEGMENT_ENTRY_ORIGIN;
609 origin_mask = _SEGMENT_ENTRY_ORIGIN_LARGE;
610 return (pmd_val(pmd) & origin_mask) >> PAGE_SHIFT;
613 static inline int pmd_bad(pmd_t pmd)
616 return (pmd_val(pmd) & ~_SEGMENT_ENTRY_BITS_LARGE) != 0;
617 return (pmd_val(pmd) & ~_SEGMENT_ENTRY_BITS) != 0;
620 #define __HAVE_ARCH_PMDP_SPLITTING_FLUSH
621 extern void pmdp_splitting_flush(struct vm_area_struct *vma,
622 unsigned long addr, pmd_t *pmdp);
624 #define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
625 extern int pmdp_set_access_flags(struct vm_area_struct *vma,
626 unsigned long address, pmd_t *pmdp,
627 pmd_t entry, int dirty);
629 #define __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
630 extern int pmdp_clear_flush_young(struct vm_area_struct *vma,
631 unsigned long address, pmd_t *pmdp);
633 #define __HAVE_ARCH_PMD_WRITE
634 static inline int pmd_write(pmd_t pmd)
636 return (pmd_val(pmd) & _SEGMENT_ENTRY_WRITE) != 0;
639 static inline int pmd_dirty(pmd_t pmd)
643 dirty = (pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY) != 0;
647 static inline int pmd_young(pmd_t pmd)
651 young = (pmd_val(pmd) & _SEGMENT_ENTRY_YOUNG) != 0;
655 static inline int pte_present(pte_t pte)
657 /* Bit pattern: (pte & 0x001) == 0x001 */
658 return (pte_val(pte) & _PAGE_PRESENT) != 0;
661 static inline int pte_none(pte_t pte)
663 /* Bit pattern: pte == 0x400 */
664 return pte_val(pte) == _PAGE_INVALID;
667 static inline int pte_swap(pte_t pte)
669 /* Bit pattern: (pte & 0x603) == 0x402 */
670 return (pte_val(pte) & (_PAGE_INVALID | _PAGE_PROTECT |
671 _PAGE_TYPE | _PAGE_PRESENT))
672 == (_PAGE_INVALID | _PAGE_TYPE);
675 static inline int pte_special(pte_t pte)
677 return (pte_val(pte) & _PAGE_SPECIAL);
680 #define __HAVE_ARCH_PTE_SAME
681 static inline int pte_same(pte_t a, pte_t b)
683 return pte_val(a) == pte_val(b);
686 static inline pgste_t pgste_get_lock(pte_t *ptep)
688 unsigned long new = 0;
696 " nihh %0,0xff7f\n" /* clear PCL bit in old */
697 " oihh %1,0x0080\n" /* set PCL bit in new */
700 : "=&d" (old), "=&d" (new), "=Q" (ptep[PTRS_PER_PTE])
701 : "Q" (ptep[PTRS_PER_PTE]) : "cc", "memory");
706 static inline void pgste_set_unlock(pte_t *ptep, pgste_t pgste)
710 " nihh %1,0xff7f\n" /* clear PCL bit */
712 : "=Q" (ptep[PTRS_PER_PTE])
713 : "d" (pgste_val(pgste)), "Q" (ptep[PTRS_PER_PTE])
719 static inline pgste_t pgste_get(pte_t *ptep)
721 unsigned long pgste = 0;
723 pgste = *(unsigned long *)(ptep + PTRS_PER_PTE);
725 return __pgste(pgste);
728 static inline void pgste_set(pte_t *ptep, pgste_t pgste)
731 *(pgste_t *)(ptep + PTRS_PER_PTE) = pgste;
735 static inline pgste_t pgste_update_all(pte_t *ptep, pgste_t pgste,
736 struct mm_struct *mm)
739 unsigned long address, bits, skey;
741 if (!mm_use_skey(mm) || pte_val(*ptep) & _PAGE_INVALID)
743 address = pte_val(*ptep) & PAGE_MASK;
744 skey = (unsigned long) page_get_storage_key(address);
745 bits = skey & (_PAGE_CHANGED | _PAGE_REFERENCED);
746 /* Transfer page changed & referenced bit to guest bits in pgste */
747 pgste_val(pgste) |= bits << 48; /* GR bit & GC bit */
748 /* Copy page access key and fetch protection bit to pgste */
749 pgste_val(pgste) &= ~(PGSTE_ACC_BITS | PGSTE_FP_BIT);
750 pgste_val(pgste) |= (skey & (_PAGE_ACC_BITS | _PAGE_FP_BIT)) << 56;
756 static inline void pgste_set_key(pte_t *ptep, pgste_t pgste, pte_t entry,
757 struct mm_struct *mm)
760 unsigned long address;
763 if (!mm_use_skey(mm) || pte_val(entry) & _PAGE_INVALID)
765 VM_BUG_ON(!(pte_val(*ptep) & _PAGE_INVALID));
766 address = pte_val(entry) & PAGE_MASK;
768 * Set page access key and fetch protection bit from pgste.
769 * The guest C/R information is still in the PGSTE, set real
772 nkey = (pgste_val(pgste) & (PGSTE_ACC_BITS | PGSTE_FP_BIT)) >> 56;
773 nkey |= (pgste_val(pgste) & (PGSTE_GR_BIT | PGSTE_GC_BIT)) >> 48;
774 page_set_storage_key(address, nkey, 0);
778 static inline pgste_t pgste_set_pte(pte_t *ptep, pgste_t pgste, pte_t entry)
780 if ((pte_val(entry) & _PAGE_PRESENT) &&
781 (pte_val(entry) & _PAGE_WRITE) &&
782 !(pte_val(entry) & _PAGE_INVALID)) {
783 if (!MACHINE_HAS_ESOP) {
785 * Without enhanced suppression-on-protection force
786 * the dirty bit on for all writable ptes.
788 pte_val(entry) |= _PAGE_DIRTY;
789 pte_val(entry) &= ~_PAGE_PROTECT;
791 if (!(pte_val(entry) & _PAGE_PROTECT))
792 /* This pte allows write access, set user-dirty */
793 pgste_val(pgste) |= PGSTE_UC_BIT;
800 * struct gmap_struct - guest address space
801 * @crst_list: list of all crst tables used in the guest address space
802 * @mm: pointer to the parent mm_struct
803 * @guest_to_host: radix tree with guest to host address translation
804 * @host_to_guest: radix tree with pointer to segment table entries
805 * @guest_table_lock: spinlock to protect all entries in the guest page table
806 * @table: pointer to the page directory
807 * @asce: address space control element for gmap page table
808 * @pfault_enabled: defines if pfaults are applicable for the guest
811 struct list_head list;
812 struct list_head crst_list;
813 struct mm_struct *mm;
814 struct radix_tree_root guest_to_host;
815 struct radix_tree_root host_to_guest;
816 spinlock_t guest_table_lock;
817 unsigned long *table;
819 unsigned long asce_end;
825 * struct gmap_notifier - notify function block for page invalidation
826 * @notifier_call: address of callback function
828 struct gmap_notifier {
829 struct list_head list;
830 void (*notifier_call)(struct gmap *gmap, unsigned long gaddr);
833 struct gmap *gmap_alloc(struct mm_struct *mm, unsigned long limit);
834 void gmap_free(struct gmap *gmap);
835 void gmap_enable(struct gmap *gmap);
836 void gmap_disable(struct gmap *gmap);
837 int gmap_map_segment(struct gmap *gmap, unsigned long from,
838 unsigned long to, unsigned long len);
839 int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len);
840 unsigned long __gmap_translate(struct gmap *, unsigned long gaddr);
841 unsigned long gmap_translate(struct gmap *, unsigned long gaddr);
842 int __gmap_link(struct gmap *gmap, unsigned long gaddr, unsigned long vmaddr);
843 int gmap_fault(struct gmap *, unsigned long gaddr, unsigned int fault_flags);
844 void gmap_discard(struct gmap *, unsigned long from, unsigned long to);
845 void __gmap_zap(struct gmap *, unsigned long gaddr);
846 bool gmap_test_and_clear_dirty(unsigned long address, struct gmap *);
849 void gmap_register_ipte_notifier(struct gmap_notifier *);
850 void gmap_unregister_ipte_notifier(struct gmap_notifier *);
851 int gmap_ipte_notify(struct gmap *, unsigned long start, unsigned long len);
852 void gmap_do_ipte_notify(struct mm_struct *, unsigned long addr, pte_t *);
854 static inline pgste_t pgste_ipte_notify(struct mm_struct *mm,
856 pte_t *ptep, pgste_t pgste)
859 if (pgste_val(pgste) & PGSTE_IN_BIT) {
860 pgste_val(pgste) &= ~PGSTE_IN_BIT;
861 gmap_do_ipte_notify(mm, addr, ptep);
868 * Certain architectures need to do special things when PTEs
869 * within a page table are directly modified. Thus, the following
870 * hook is made available.
872 static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
873 pte_t *ptep, pte_t entry)
877 if (mm_has_pgste(mm)) {
878 pgste = pgste_get_lock(ptep);
879 pgste_val(pgste) &= ~_PGSTE_GPS_ZERO;
880 pgste_set_key(ptep, pgste, entry, mm);
881 pgste = pgste_set_pte(ptep, pgste, entry);
882 pgste_set_unlock(ptep, pgste);
889 * query functions pte_write/pte_dirty/pte_young only work if
890 * pte_present() is true. Undefined behaviour if not..
892 static inline int pte_write(pte_t pte)
894 return (pte_val(pte) & _PAGE_WRITE) != 0;
897 static inline int pte_dirty(pte_t pte)
899 return (pte_val(pte) & _PAGE_DIRTY) != 0;
902 static inline int pte_young(pte_t pte)
904 return (pte_val(pte) & _PAGE_YOUNG) != 0;
907 #define __HAVE_ARCH_PTE_UNUSED
908 static inline int pte_unused(pte_t pte)
910 return pte_val(pte) & _PAGE_UNUSED;
914 * pgd/pmd/pte modification functions
917 static inline void pgd_clear(pgd_t *pgd)
920 if ((pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2)
921 pgd_val(*pgd) = _REGION2_ENTRY_EMPTY;
925 static inline void pud_clear(pud_t *pud)
928 if ((pud_val(*pud) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3)
929 pud_val(*pud) = _REGION3_ENTRY_EMPTY;
933 static inline void pmd_clear(pmd_t *pmdp)
935 pmd_val(*pmdp) = _SEGMENT_ENTRY_INVALID;
938 static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
940 pte_val(*ptep) = _PAGE_INVALID;
944 * The following pte modification functions only work if
945 * pte_present() is true. Undefined behaviour if not..
947 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
949 pte_val(pte) &= _PAGE_CHG_MASK;
950 pte_val(pte) |= pgprot_val(newprot);
952 * newprot for PAGE_NONE, PAGE_READ and PAGE_WRITE has the
953 * invalid bit set, clear it again for readable, young pages
955 if ((pte_val(pte) & _PAGE_YOUNG) && (pte_val(pte) & _PAGE_READ))
956 pte_val(pte) &= ~_PAGE_INVALID;
958 * newprot for PAGE_READ and PAGE_WRITE has the page protection
959 * bit set, clear it again for writable, dirty pages
961 if ((pte_val(pte) & _PAGE_DIRTY) && (pte_val(pte) & _PAGE_WRITE))
962 pte_val(pte) &= ~_PAGE_PROTECT;
966 static inline pte_t pte_wrprotect(pte_t pte)
968 pte_val(pte) &= ~_PAGE_WRITE;
969 pte_val(pte) |= _PAGE_PROTECT;
973 static inline pte_t pte_mkwrite(pte_t pte)
975 pte_val(pte) |= _PAGE_WRITE;
976 if (pte_val(pte) & _PAGE_DIRTY)
977 pte_val(pte) &= ~_PAGE_PROTECT;
981 static inline pte_t pte_mkclean(pte_t pte)
983 pte_val(pte) &= ~_PAGE_DIRTY;
984 pte_val(pte) |= _PAGE_PROTECT;
988 static inline pte_t pte_mkdirty(pte_t pte)
990 pte_val(pte) |= _PAGE_DIRTY;
991 if (pte_val(pte) & _PAGE_WRITE)
992 pte_val(pte) &= ~_PAGE_PROTECT;
996 static inline pte_t pte_mkold(pte_t pte)
998 pte_val(pte) &= ~_PAGE_YOUNG;
999 pte_val(pte) |= _PAGE_INVALID;
1003 static inline pte_t pte_mkyoung(pte_t pte)
1005 pte_val(pte) |= _PAGE_YOUNG;
1006 if (pte_val(pte) & _PAGE_READ)
1007 pte_val(pte) &= ~_PAGE_INVALID;
1011 static inline pte_t pte_mkspecial(pte_t pte)
1013 pte_val(pte) |= _PAGE_SPECIAL;
1017 #ifdef CONFIG_HUGETLB_PAGE
1018 static inline pte_t pte_mkhuge(pte_t pte)
1020 pte_val(pte) |= _PAGE_LARGE;
1025 static inline void __ptep_ipte(unsigned long address, pte_t *ptep)
1027 unsigned long pto = (unsigned long) ptep;
1029 #ifndef CONFIG_64BIT
1030 /* pto in ESA mode must point to the start of the segment table */
1033 /* Invalidation + global TLB flush for the pte */
1036 : "=m" (*ptep) : "m" (*ptep), "a" (pto), "a" (address));
1039 static inline void __ptep_ipte_local(unsigned long address, pte_t *ptep)
1041 unsigned long pto = (unsigned long) ptep;
1043 #ifndef CONFIG_64BIT
1044 /* pto in ESA mode must point to the start of the segment table */
1047 /* Invalidation + local TLB flush for the pte */
1049 " .insn rrf,0xb2210000,%2,%3,0,1"
1050 : "=m" (*ptep) : "m" (*ptep), "a" (pto), "a" (address));
1053 static inline void __ptep_ipte_range(unsigned long address, int nr, pte_t *ptep)
1055 unsigned long pto = (unsigned long) ptep;
1057 #ifndef CONFIG_64BIT
1058 /* pto in ESA mode must point to the start of the segment table */
1061 /* Invalidate a range of ptes + global TLB flush of the ptes */
1064 " .insn rrf,0xb2210000,%2,%0,%1,0"
1065 : "+a" (address), "+a" (nr) : "a" (pto) : "memory");
1066 } while (nr != 255);
1069 static inline void ptep_flush_direct(struct mm_struct *mm,
1070 unsigned long address, pte_t *ptep)
1074 if (pte_val(*ptep) & _PAGE_INVALID)
1076 active = (mm == current->active_mm) ? 1 : 0;
1077 count = atomic_add_return(0x10000, &mm->context.attach_count);
1078 if (MACHINE_HAS_TLB_LC && (count & 0xffff) <= active &&
1079 cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id())))
1080 __ptep_ipte_local(address, ptep);
1082 __ptep_ipte(address, ptep);
1083 atomic_sub(0x10000, &mm->context.attach_count);
1086 static inline void ptep_flush_lazy(struct mm_struct *mm,
1087 unsigned long address, pte_t *ptep)
1091 if (pte_val(*ptep) & _PAGE_INVALID)
1093 active = (mm == current->active_mm) ? 1 : 0;
1094 count = atomic_add_return(0x10000, &mm->context.attach_count);
1095 if ((count & 0xffff) <= active) {
1096 pte_val(*ptep) |= _PAGE_INVALID;
1097 mm->context.flush_mm = 1;
1099 __ptep_ipte(address, ptep);
1100 atomic_sub(0x10000, &mm->context.attach_count);
1104 * Get (and clear) the user dirty bit for a pte.
1106 static inline int ptep_test_and_clear_user_dirty(struct mm_struct *mm,
1114 if (!mm_has_pgste(mm))
1116 pgste = pgste_get_lock(ptep);
1117 dirty = !!(pgste_val(pgste) & PGSTE_UC_BIT);
1118 pgste_val(pgste) &= ~PGSTE_UC_BIT;
1120 if (dirty && (pte_val(pte) & _PAGE_PRESENT)) {
1121 pgste = pgste_ipte_notify(mm, addr, ptep, pgste);
1122 __ptep_ipte(addr, ptep);
1123 if (MACHINE_HAS_ESOP || !(pte_val(pte) & _PAGE_WRITE))
1124 pte_val(pte) |= _PAGE_PROTECT;
1126 pte_val(pte) |= _PAGE_INVALID;
1129 pgste_set_unlock(ptep, pgste);
1133 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
1134 static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
1135 unsigned long addr, pte_t *ptep)
1141 if (mm_has_pgste(vma->vm_mm)) {
1142 pgste = pgste_get_lock(ptep);
1143 pgste = pgste_ipte_notify(vma->vm_mm, addr, ptep, pgste);
1146 oldpte = pte = *ptep;
1147 ptep_flush_direct(vma->vm_mm, addr, ptep);
1148 young = pte_young(pte);
1149 pte = pte_mkold(pte);
1151 if (mm_has_pgste(vma->vm_mm)) {
1152 pgste = pgste_update_all(&oldpte, pgste, vma->vm_mm);
1153 pgste = pgste_set_pte(ptep, pgste, pte);
1154 pgste_set_unlock(ptep, pgste);
1161 #define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
1162 static inline int ptep_clear_flush_young(struct vm_area_struct *vma,
1163 unsigned long address, pte_t *ptep)
1165 return ptep_test_and_clear_young(vma, address, ptep);
1169 * This is hard to understand. ptep_get_and_clear and ptep_clear_flush
1170 * both clear the TLB for the unmapped pte. The reason is that
1171 * ptep_get_and_clear is used in common code (e.g. change_pte_range)
1172 * to modify an active pte. The sequence is
1173 * 1) ptep_get_and_clear
1175 * 3) flush_tlb_range
1176 * On s390 the tlb needs to get flushed with the modification of the pte
1177 * if the pte is active. The only way how this can be implemented is to
1178 * have ptep_get_and_clear do the tlb flush. In exchange flush_tlb_range
1181 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
1182 static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
1183 unsigned long address, pte_t *ptep)
1188 if (mm_has_pgste(mm)) {
1189 pgste = pgste_get_lock(ptep);
1190 pgste = pgste_ipte_notify(mm, address, ptep, pgste);
1194 ptep_flush_lazy(mm, address, ptep);
1195 pte_val(*ptep) = _PAGE_INVALID;
1197 if (mm_has_pgste(mm)) {
1198 pgste = pgste_update_all(&pte, pgste, mm);
1199 pgste_set_unlock(ptep, pgste);
1204 #define __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION
1205 static inline pte_t ptep_modify_prot_start(struct mm_struct *mm,
1206 unsigned long address,
1212 if (mm_has_pgste(mm)) {
1213 pgste = pgste_get_lock(ptep);
1214 pgste_ipte_notify(mm, address, ptep, pgste);
1218 ptep_flush_lazy(mm, address, ptep);
1220 if (mm_has_pgste(mm)) {
1221 pgste = pgste_update_all(&pte, pgste, mm);
1222 pgste_set(ptep, pgste);
1227 static inline void ptep_modify_prot_commit(struct mm_struct *mm,
1228 unsigned long address,
1229 pte_t *ptep, pte_t pte)
1233 if (mm_has_pgste(mm)) {
1234 pgste = pgste_get(ptep);
1235 pgste_set_key(ptep, pgste, pte, mm);
1236 pgste = pgste_set_pte(ptep, pgste, pte);
1237 pgste_set_unlock(ptep, pgste);
1242 #define __HAVE_ARCH_PTEP_CLEAR_FLUSH
1243 static inline pte_t ptep_clear_flush(struct vm_area_struct *vma,
1244 unsigned long address, pte_t *ptep)
1249 if (mm_has_pgste(vma->vm_mm)) {
1250 pgste = pgste_get_lock(ptep);
1251 pgste = pgste_ipte_notify(vma->vm_mm, address, ptep, pgste);
1255 ptep_flush_direct(vma->vm_mm, address, ptep);
1256 pte_val(*ptep) = _PAGE_INVALID;
1258 if (mm_has_pgste(vma->vm_mm)) {
1259 if ((pgste_val(pgste) & _PGSTE_GPS_USAGE_MASK) ==
1260 _PGSTE_GPS_USAGE_UNUSED)
1261 pte_val(pte) |= _PAGE_UNUSED;
1262 pgste = pgste_update_all(&pte, pgste, vma->vm_mm);
1263 pgste_set_unlock(ptep, pgste);
1269 * The batched pte unmap code uses ptep_get_and_clear_full to clear the
1270 * ptes. Here an optimization is possible. tlb_gather_mmu flushes all
1271 * tlbs of an mm if it can guarantee that the ptes of the mm_struct
1272 * cannot be accessed while the batched unmap is running. In this case
1273 * full==1 and a simple pte_clear is enough. See tlb.h.
1275 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
1276 static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
1277 unsigned long address,
1278 pte_t *ptep, int full)
1283 if (!full && mm_has_pgste(mm)) {
1284 pgste = pgste_get_lock(ptep);
1285 pgste = pgste_ipte_notify(mm, address, ptep, pgste);
1290 ptep_flush_lazy(mm, address, ptep);
1291 pte_val(*ptep) = _PAGE_INVALID;
1293 if (!full && mm_has_pgste(mm)) {
1294 pgste = pgste_update_all(&pte, pgste, mm);
1295 pgste_set_unlock(ptep, pgste);
1300 #define __HAVE_ARCH_PTEP_SET_WRPROTECT
1301 static inline pte_t ptep_set_wrprotect(struct mm_struct *mm,
1302 unsigned long address, pte_t *ptep)
1307 if (pte_write(pte)) {
1308 if (mm_has_pgste(mm)) {
1309 pgste = pgste_get_lock(ptep);
1310 pgste = pgste_ipte_notify(mm, address, ptep, pgste);
1313 ptep_flush_lazy(mm, address, ptep);
1314 pte = pte_wrprotect(pte);
1316 if (mm_has_pgste(mm)) {
1317 pgste = pgste_set_pte(ptep, pgste, pte);
1318 pgste_set_unlock(ptep, pgste);
1325 #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
1326 static inline int ptep_set_access_flags(struct vm_area_struct *vma,
1327 unsigned long address, pte_t *ptep,
1328 pte_t entry, int dirty)
1332 if (pte_same(*ptep, entry))
1334 if (mm_has_pgste(vma->vm_mm)) {
1335 pgste = pgste_get_lock(ptep);
1336 pgste = pgste_ipte_notify(vma->vm_mm, address, ptep, pgste);
1339 ptep_flush_direct(vma->vm_mm, address, ptep);
1341 if (mm_has_pgste(vma->vm_mm)) {
1342 pgste_set_key(ptep, pgste, entry, vma->vm_mm);
1343 pgste = pgste_set_pte(ptep, pgste, entry);
1344 pgste_set_unlock(ptep, pgste);
1351 * Conversion functions: convert a page and protection to a page entry,
1352 * and a page entry and page directory to the page they refer to.
1354 static inline pte_t mk_pte_phys(unsigned long physpage, pgprot_t pgprot)
1357 pte_val(__pte) = physpage + pgprot_val(pgprot);
1358 return pte_mkyoung(__pte);
1361 static inline pte_t mk_pte(struct page *page, pgprot_t pgprot)
1363 unsigned long physpage = page_to_phys(page);
1364 pte_t __pte = mk_pte_phys(physpage, pgprot);
1366 if (pte_write(__pte) && PageDirty(page))
1367 __pte = pte_mkdirty(__pte);
1371 #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
1372 #define pud_index(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
1373 #define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
1374 #define pte_index(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE-1))
1376 #define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
1377 #define pgd_offset_k(address) pgd_offset(&init_mm, address)
1379 #ifndef CONFIG_64BIT
1381 #define pmd_deref(pmd) (pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN)
1382 #define pud_deref(pmd) ({ BUG(); 0UL; })
1383 #define pgd_deref(pmd) ({ BUG(); 0UL; })
1385 #define pud_offset(pgd, address) ((pud_t *) pgd)
1386 #define pmd_offset(pud, address) ((pmd_t *) pud + pmd_index(address))
1388 #else /* CONFIG_64BIT */
1390 #define pmd_deref(pmd) (pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN)
1391 #define pud_deref(pud) (pud_val(pud) & _REGION_ENTRY_ORIGIN)
1392 #define pgd_deref(pgd) (pgd_val(pgd) & _REGION_ENTRY_ORIGIN)
1394 static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
1396 pud_t *pud = (pud_t *) pgd;
1397 if ((pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2)
1398 pud = (pud_t *) pgd_deref(*pgd);
1399 return pud + pud_index(address);
1402 static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
1404 pmd_t *pmd = (pmd_t *) pud;
1405 if ((pud_val(*pud) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3)
1406 pmd = (pmd_t *) pud_deref(*pud);
1407 return pmd + pmd_index(address);
1410 #endif /* CONFIG_64BIT */
1412 #define pfn_pte(pfn,pgprot) mk_pte_phys(__pa((pfn) << PAGE_SHIFT),(pgprot))
1413 #define pte_pfn(x) (pte_val(x) >> PAGE_SHIFT)
1414 #define pte_page(x) pfn_to_page(pte_pfn(x))
1416 #define pmd_page(pmd) pfn_to_page(pmd_pfn(pmd))
1418 /* Find an entry in the lowest level page table.. */
1419 #define pte_offset(pmd, addr) ((pte_t *) pmd_deref(*(pmd)) + pte_index(addr))
1420 #define pte_offset_kernel(pmd, address) pte_offset(pmd,address)
1421 #define pte_offset_map(pmd, address) pte_offset_kernel(pmd, address)
1422 #define pte_unmap(pte) do { } while (0)
1424 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLB_PAGE)
1425 static inline unsigned long massage_pgprot_pmd(pgprot_t pgprot)
1428 * pgprot is PAGE_NONE, PAGE_READ, or PAGE_WRITE (see __Pxxx / __Sxxx)
1429 * Convert to segment table entry format.
1431 if (pgprot_val(pgprot) == pgprot_val(PAGE_NONE))
1432 return pgprot_val(SEGMENT_NONE);
1433 if (pgprot_val(pgprot) == pgprot_val(PAGE_READ))
1434 return pgprot_val(SEGMENT_READ);
1435 return pgprot_val(SEGMENT_WRITE);
1438 static inline pmd_t pmd_wrprotect(pmd_t pmd)
1440 pmd_val(pmd) &= ~_SEGMENT_ENTRY_WRITE;
1441 pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT;
1445 static inline pmd_t pmd_mkwrite(pmd_t pmd)
1447 pmd_val(pmd) |= _SEGMENT_ENTRY_WRITE;
1448 if (pmd_large(pmd) && !(pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY))
1450 pmd_val(pmd) &= ~_SEGMENT_ENTRY_PROTECT;
1454 static inline pmd_t pmd_mkclean(pmd_t pmd)
1456 if (pmd_large(pmd)) {
1457 pmd_val(pmd) &= ~_SEGMENT_ENTRY_DIRTY;
1458 pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT;
1463 static inline pmd_t pmd_mkdirty(pmd_t pmd)
1465 if (pmd_large(pmd)) {
1466 pmd_val(pmd) |= _SEGMENT_ENTRY_DIRTY;
1467 if (pmd_val(pmd) & _SEGMENT_ENTRY_WRITE)
1468 pmd_val(pmd) &= ~_SEGMENT_ENTRY_PROTECT;
1473 static inline pmd_t pmd_mkyoung(pmd_t pmd)
1475 if (pmd_large(pmd)) {
1476 pmd_val(pmd) |= _SEGMENT_ENTRY_YOUNG;
1477 if (pmd_val(pmd) & _SEGMENT_ENTRY_READ)
1478 pmd_val(pmd) &= ~_SEGMENT_ENTRY_INVALID;
1483 static inline pmd_t pmd_mkold(pmd_t pmd)
1485 if (pmd_large(pmd)) {
1486 pmd_val(pmd) &= ~_SEGMENT_ENTRY_YOUNG;
1487 pmd_val(pmd) |= _SEGMENT_ENTRY_INVALID;
1492 static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
1494 if (pmd_large(pmd)) {
1495 pmd_val(pmd) &= _SEGMENT_ENTRY_ORIGIN_LARGE |
1496 _SEGMENT_ENTRY_DIRTY | _SEGMENT_ENTRY_YOUNG |
1497 _SEGMENT_ENTRY_LARGE | _SEGMENT_ENTRY_SPLIT;
1498 pmd_val(pmd) |= massage_pgprot_pmd(newprot);
1499 if (!(pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY))
1500 pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT;
1501 if (!(pmd_val(pmd) & _SEGMENT_ENTRY_YOUNG))
1502 pmd_val(pmd) |= _SEGMENT_ENTRY_INVALID;
1505 pmd_val(pmd) &= _SEGMENT_ENTRY_ORIGIN;
1506 pmd_val(pmd) |= massage_pgprot_pmd(newprot);
1510 static inline pmd_t mk_pmd_phys(unsigned long physpage, pgprot_t pgprot)
1513 pmd_val(__pmd) = physpage + massage_pgprot_pmd(pgprot);
1517 #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLB_PAGE */
1519 static inline void __pmdp_csp(pmd_t *pmdp)
1521 register unsigned long reg2 asm("2") = pmd_val(*pmdp);
1522 register unsigned long reg3 asm("3") = pmd_val(*pmdp) |
1523 _SEGMENT_ENTRY_INVALID;
1524 register unsigned long reg4 asm("4") = ((unsigned long) pmdp) + 5;
1529 : "d" (reg2), "d" (reg3), "d" (reg4), "m" (*pmdp) : "cc");
1532 static inline void __pmdp_idte(unsigned long address, pmd_t *pmdp)
1536 sto = (unsigned long) pmdp - pmd_index(address) * sizeof(pmd_t);
1538 " .insn rrf,0xb98e0000,%2,%3,0,0"
1540 : "m" (*pmdp), "a" (sto), "a" ((address & HPAGE_MASK))
1544 static inline void __pmdp_idte_local(unsigned long address, pmd_t *pmdp)
1548 sto = (unsigned long) pmdp - pmd_index(address) * sizeof(pmd_t);
1550 " .insn rrf,0xb98e0000,%2,%3,0,1"
1552 : "m" (*pmdp), "a" (sto), "a" ((address & HPAGE_MASK))
1556 static inline void pmdp_flush_direct(struct mm_struct *mm,
1557 unsigned long address, pmd_t *pmdp)
1561 if (pmd_val(*pmdp) & _SEGMENT_ENTRY_INVALID)
1563 if (!MACHINE_HAS_IDTE) {
1567 active = (mm == current->active_mm) ? 1 : 0;
1568 count = atomic_add_return(0x10000, &mm->context.attach_count);
1569 if (MACHINE_HAS_TLB_LC && (count & 0xffff) <= active &&
1570 cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id())))
1571 __pmdp_idte_local(address, pmdp);
1573 __pmdp_idte(address, pmdp);
1574 atomic_sub(0x10000, &mm->context.attach_count);
1577 static inline void pmdp_flush_lazy(struct mm_struct *mm,
1578 unsigned long address, pmd_t *pmdp)
1582 if (pmd_val(*pmdp) & _SEGMENT_ENTRY_INVALID)
1584 active = (mm == current->active_mm) ? 1 : 0;
1585 count = atomic_add_return(0x10000, &mm->context.attach_count);
1586 if ((count & 0xffff) <= active) {
1587 pmd_val(*pmdp) |= _SEGMENT_ENTRY_INVALID;
1588 mm->context.flush_mm = 1;
1589 } else if (MACHINE_HAS_IDTE)
1590 __pmdp_idte(address, pmdp);
1593 atomic_sub(0x10000, &mm->context.attach_count);
1596 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1598 #define __HAVE_ARCH_PGTABLE_DEPOSIT
1599 extern void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
1602 #define __HAVE_ARCH_PGTABLE_WITHDRAW
1603 extern pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp);
1605 static inline int pmd_trans_splitting(pmd_t pmd)
1607 return (pmd_val(pmd) & _SEGMENT_ENTRY_LARGE) &&
1608 (pmd_val(pmd) & _SEGMENT_ENTRY_SPLIT);
1611 static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
1612 pmd_t *pmdp, pmd_t entry)
1617 static inline pmd_t pmd_mkhuge(pmd_t pmd)
1619 pmd_val(pmd) |= _SEGMENT_ENTRY_LARGE;
1620 pmd_val(pmd) |= _SEGMENT_ENTRY_YOUNG;
1621 pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT;
1625 #define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
1626 static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
1627 unsigned long address, pmd_t *pmdp)
1632 pmdp_flush_direct(vma->vm_mm, address, pmdp);
1633 *pmdp = pmd_mkold(pmd);
1634 return pmd_young(pmd);
1637 #define __HAVE_ARCH_PMDP_GET_AND_CLEAR
1638 static inline pmd_t pmdp_get_and_clear(struct mm_struct *mm,
1639 unsigned long address, pmd_t *pmdp)
1643 pmdp_flush_direct(mm, address, pmdp);
1648 #define __HAVE_ARCH_PMDP_GET_AND_CLEAR_FULL
1649 static inline pmd_t pmdp_get_and_clear_full(struct mm_struct *mm,
1650 unsigned long address,
1651 pmd_t *pmdp, int full)
1656 pmdp_flush_lazy(mm, address, pmdp);
1661 #define __HAVE_ARCH_PMDP_CLEAR_FLUSH
1662 static inline pmd_t pmdp_clear_flush(struct vm_area_struct *vma,
1663 unsigned long address, pmd_t *pmdp)
1665 return pmdp_get_and_clear(vma->vm_mm, address, pmdp);
1668 #define __HAVE_ARCH_PMDP_INVALIDATE
1669 static inline void pmdp_invalidate(struct vm_area_struct *vma,
1670 unsigned long address, pmd_t *pmdp)
1672 pmdp_flush_direct(vma->vm_mm, address, pmdp);
1675 #define __HAVE_ARCH_PMDP_SET_WRPROTECT
1676 static inline void pmdp_set_wrprotect(struct mm_struct *mm,
1677 unsigned long address, pmd_t *pmdp)
1681 if (pmd_write(pmd)) {
1682 pmdp_flush_direct(mm, address, pmdp);
1683 set_pmd_at(mm, address, pmdp, pmd_wrprotect(pmd));
1687 #define pfn_pmd(pfn, pgprot) mk_pmd_phys(__pa((pfn) << PAGE_SHIFT), (pgprot))
1688 #define mk_pmd(page, pgprot) pfn_pmd(page_to_pfn(page), (pgprot))
1690 static inline int pmd_trans_huge(pmd_t pmd)
1692 return pmd_val(pmd) & _SEGMENT_ENTRY_LARGE;
1695 static inline int has_transparent_hugepage(void)
1697 return MACHINE_HAS_HPAGE ? 1 : 0;
1699 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
1702 * 31 bit swap entry format:
1703 * A page-table entry has some bits we have to treat in a special way.
1704 * Bits 0, 20 and bit 23 have to be zero, otherwise an specification
1705 * exception will occur instead of a page translation exception. The
1706 * specifiation exception has the bad habit not to store necessary
1707 * information in the lowcore.
1708 * Bits 21, 22, 30 and 31 are used to indicate the page type.
1709 * A swap pte is indicated by bit pattern (pte & 0x603) == 0x402
1710 * This leaves the bits 1-19 and bits 24-29 to store type and offset.
1711 * We use the 5 bits from 25-29 for the type and the 20 bits from 1-19
1712 * plus 24 for the offset.
1713 * 0| offset |0110|o|type |00|
1714 * 0 0000000001111111111 2222 2 22222 33
1715 * 0 1234567890123456789 0123 4 56789 01
1717 * 64 bit swap entry format:
1718 * A page-table entry has some bits we have to treat in a special way.
1719 * Bits 52 and bit 55 have to be zero, otherwise an specification
1720 * exception will occur instead of a page translation exception. The
1721 * specifiation exception has the bad habit not to store necessary
1722 * information in the lowcore.
1723 * Bits 53, 54, 62 and 63 are used to indicate the page type.
1724 * A swap pte is indicated by bit pattern (pte & 0x603) == 0x402
1725 * This leaves the bits 0-51 and bits 56-61 to store type and offset.
1726 * We use the 5 bits from 57-61 for the type and the 53 bits from 0-51
1727 * plus 56 for the offset.
1728 * | offset |0110|o|type |00|
1729 * 0000000000111111111122222222223333333333444444444455 5555 5 55566 66
1730 * 0123456789012345678901234567890123456789012345678901 2345 6 78901 23
1732 #ifndef CONFIG_64BIT
1733 #define __SWP_OFFSET_MASK (~0UL >> 12)
1735 #define __SWP_OFFSET_MASK (~0UL >> 11)
1737 static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset)
1740 offset &= __SWP_OFFSET_MASK;
1741 pte_val(pte) = _PAGE_INVALID | _PAGE_TYPE | ((type & 0x1f) << 2) |
1742 ((offset & 1UL) << 7) | ((offset & ~1UL) << 11);
1746 #define __swp_type(entry) (((entry).val >> 2) & 0x1f)
1747 #define __swp_offset(entry) (((entry).val >> 11) | (((entry).val >> 7) & 1))
1748 #define __swp_entry(type,offset) ((swp_entry_t) { pte_val(mk_swap_pte((type),(offset))) })
1750 #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
1751 #define __swp_entry_to_pte(x) ((pte_t) { (x).val })
1753 #endif /* !__ASSEMBLY__ */
1755 #define kern_addr_valid(addr) (1)
1757 extern int vmem_add_mapping(unsigned long start, unsigned long size);
1758 extern int vmem_remove_mapping(unsigned long start, unsigned long size);
1759 extern int s390_enable_sie(void);
1760 extern int s390_enable_skey(void);
1761 extern void s390_reset_cmma(struct mm_struct *mm);
1763 /* s390 has a private copy of get unmapped area to deal with cache synonyms */
1764 #define HAVE_ARCH_UNMAPPED_AREA
1765 #define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
1768 * No page table caches to initialise
1770 static inline void pgtable_cache_init(void) { }
1771 static inline void check_pgt_cache(void) { }
1773 #include <asm-generic/pgtable.h>
1775 #endif /* _S390_PAGE_H */