Merge branch 'next' of git://git.monstr.eu/linux-2.6-microblaze
[firefly-linux-kernel-4.4.55.git] / arch / x86 / mm / pageattr.c
1 /*
2  * Copyright 2002 Andi Kleen, SuSE Labs.
3  * Thanks to Ben LaHaise for precious feedback.
4  */
5 #include <linux/highmem.h>
6 #include <linux/bootmem.h>
7 #include <linux/module.h>
8 #include <linux/sched.h>
9 #include <linux/mm.h>
10 #include <linux/interrupt.h>
11 #include <linux/seq_file.h>
12 #include <linux/debugfs.h>
13 #include <linux/pfn.h>
14 #include <linux/percpu.h>
15 #include <linux/gfp.h>
16 #include <linux/pci.h>
17
18 #include <asm/e820.h>
19 #include <asm/processor.h>
20 #include <asm/tlbflush.h>
21 #include <asm/sections.h>
22 #include <asm/setup.h>
23 #include <asm/uaccess.h>
24 #include <asm/pgalloc.h>
25 #include <asm/proto.h>
26 #include <asm/pat.h>
27
28 /*
29  * The current flushing context - we pass it instead of 5 arguments:
30  */
31 struct cpa_data {
32         unsigned long   *vaddr;
33         pgprot_t        mask_set;
34         pgprot_t        mask_clr;
35         int             numpages;
36         int             flags;
37         unsigned long   pfn;
38         unsigned        force_split : 1;
39         int             curpage;
40         struct page     **pages;
41 };
42
43 /*
44  * Serialize cpa() (for !DEBUG_PAGEALLOC which uses large identity mappings)
45  * using cpa_lock. So that we don't allow any other cpu, with stale large tlb
46  * entries change the page attribute in parallel to some other cpu
47  * splitting a large page entry along with changing the attribute.
48  */
49 static DEFINE_SPINLOCK(cpa_lock);
50
51 #define CPA_FLUSHTLB 1
52 #define CPA_ARRAY 2
53 #define CPA_PAGES_ARRAY 4
54
55 #ifdef CONFIG_PROC_FS
56 static unsigned long direct_pages_count[PG_LEVEL_NUM];
57
58 void update_page_count(int level, unsigned long pages)
59 {
60         /* Protect against CPA */
61         spin_lock(&pgd_lock);
62         direct_pages_count[level] += pages;
63         spin_unlock(&pgd_lock);
64 }
65
66 static void split_page_count(int level)
67 {
68         direct_pages_count[level]--;
69         direct_pages_count[level - 1] += PTRS_PER_PTE;
70 }
71
72 void arch_report_meminfo(struct seq_file *m)
73 {
74         seq_printf(m, "DirectMap4k:    %8lu kB\n",
75                         direct_pages_count[PG_LEVEL_4K] << 2);
76 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
77         seq_printf(m, "DirectMap2M:    %8lu kB\n",
78                         direct_pages_count[PG_LEVEL_2M] << 11);
79 #else
80         seq_printf(m, "DirectMap4M:    %8lu kB\n",
81                         direct_pages_count[PG_LEVEL_2M] << 12);
82 #endif
83 #ifdef CONFIG_X86_64
84         if (direct_gbpages)
85                 seq_printf(m, "DirectMap1G:    %8lu kB\n",
86                         direct_pages_count[PG_LEVEL_1G] << 20);
87 #endif
88 }
89 #else
90 static inline void split_page_count(int level) { }
91 #endif
92
93 #ifdef CONFIG_X86_64
94
95 static inline unsigned long highmap_start_pfn(void)
96 {
97         return __pa(_text) >> PAGE_SHIFT;
98 }
99
100 static inline unsigned long highmap_end_pfn(void)
101 {
102         return __pa(roundup(_brk_end, PMD_SIZE)) >> PAGE_SHIFT;
103 }
104
105 #endif
106
107 #ifdef CONFIG_DEBUG_PAGEALLOC
108 # define debug_pagealloc 1
109 #else
110 # define debug_pagealloc 0
111 #endif
112
113 static inline int
114 within(unsigned long addr, unsigned long start, unsigned long end)
115 {
116         return addr >= start && addr < end;
117 }
118
119 /*
120  * Flushing functions
121  */
122
123 /**
124  * clflush_cache_range - flush a cache range with clflush
125  * @vaddr:      virtual start address
126  * @size:       number of bytes to flush
127  *
128  * clflush is an unordered instruction which needs fencing with mfence
129  * to avoid ordering issues.
130  */
131 void clflush_cache_range(void *vaddr, unsigned int size)
132 {
133         void *vend = vaddr + size - 1;
134
135         mb();
136
137         for (; vaddr < vend; vaddr += boot_cpu_data.x86_clflush_size)
138                 clflush(vaddr);
139         /*
140          * Flush any possible final partial cacheline:
141          */
142         clflush(vend);
143
144         mb();
145 }
146 EXPORT_SYMBOL_GPL(clflush_cache_range);
147
148 static void __cpa_flush_all(void *arg)
149 {
150         unsigned long cache = (unsigned long)arg;
151
152         /*
153          * Flush all to work around Errata in early athlons regarding
154          * large page flushing.
155          */
156         __flush_tlb_all();
157
158         if (cache && boot_cpu_data.x86 >= 4)
159                 wbinvd();
160 }
161
162 static void cpa_flush_all(unsigned long cache)
163 {
164         BUG_ON(irqs_disabled());
165
166         on_each_cpu(__cpa_flush_all, (void *) cache, 1);
167 }
168
169 static void __cpa_flush_range(void *arg)
170 {
171         /*
172          * We could optimize that further and do individual per page
173          * tlb invalidates for a low number of pages. Caveat: we must
174          * flush the high aliases on 64bit as well.
175          */
176         __flush_tlb_all();
177 }
178
179 static void cpa_flush_range(unsigned long start, int numpages, int cache)
180 {
181         unsigned int i, level;
182         unsigned long addr;
183
184         BUG_ON(irqs_disabled());
185         WARN_ON(PAGE_ALIGN(start) != start);
186
187         on_each_cpu(__cpa_flush_range, NULL, 1);
188
189         if (!cache)
190                 return;
191
192         /*
193          * We only need to flush on one CPU,
194          * clflush is a MESI-coherent instruction that
195          * will cause all other CPUs to flush the same
196          * cachelines:
197          */
198         for (i = 0, addr = start; i < numpages; i++, addr += PAGE_SIZE) {
199                 pte_t *pte = lookup_address(addr, &level);
200
201                 /*
202                  * Only flush present addresses:
203                  */
204                 if (pte && (pte_val(*pte) & _PAGE_PRESENT))
205                         clflush_cache_range((void *) addr, PAGE_SIZE);
206         }
207 }
208
209 static void cpa_flush_array(unsigned long *start, int numpages, int cache,
210                             int in_flags, struct page **pages)
211 {
212         unsigned int i, level;
213         unsigned long do_wbinvd = cache && numpages >= 1024; /* 4M threshold */
214
215         BUG_ON(irqs_disabled());
216
217         on_each_cpu(__cpa_flush_all, (void *) do_wbinvd, 1);
218
219         if (!cache || do_wbinvd)
220                 return;
221
222         /*
223          * We only need to flush on one CPU,
224          * clflush is a MESI-coherent instruction that
225          * will cause all other CPUs to flush the same
226          * cachelines:
227          */
228         for (i = 0; i < numpages; i++) {
229                 unsigned long addr;
230                 pte_t *pte;
231
232                 if (in_flags & CPA_PAGES_ARRAY)
233                         addr = (unsigned long)page_address(pages[i]);
234                 else
235                         addr = start[i];
236
237                 pte = lookup_address(addr, &level);
238
239                 /*
240                  * Only flush present addresses:
241                  */
242                 if (pte && (pte_val(*pte) & _PAGE_PRESENT))
243                         clflush_cache_range((void *)addr, PAGE_SIZE);
244         }
245 }
246
247 /*
248  * Certain areas of memory on x86 require very specific protection flags,
249  * for example the BIOS area or kernel text. Callers don't always get this
250  * right (again, ioremap() on BIOS memory is not uncommon) so this function
251  * checks and fixes these known static required protection bits.
252  */
253 static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
254                                    unsigned long pfn)
255 {
256         pgprot_t forbidden = __pgprot(0);
257
258         /*
259          * The BIOS area between 640k and 1Mb needs to be executable for
260          * PCI BIOS based config access (CONFIG_PCI_GOBIOS) support.
261          */
262 #ifdef CONFIG_PCI_BIOS
263         if (pcibios_enabled && within(pfn, BIOS_BEGIN >> PAGE_SHIFT, BIOS_END >> PAGE_SHIFT))
264                 pgprot_val(forbidden) |= _PAGE_NX;
265 #endif
266
267         /*
268          * The kernel text needs to be executable for obvious reasons
269          * Does not cover __inittext since that is gone later on. On
270          * 64bit we do not enforce !NX on the low mapping
271          */
272         if (within(address, (unsigned long)_text, (unsigned long)_etext))
273                 pgprot_val(forbidden) |= _PAGE_NX;
274
275         /*
276          * The .rodata section needs to be read-only. Using the pfn
277          * catches all aliases.
278          */
279         if (within(pfn, __pa((unsigned long)__start_rodata) >> PAGE_SHIFT,
280                    __pa((unsigned long)__end_rodata) >> PAGE_SHIFT))
281                 pgprot_val(forbidden) |= _PAGE_RW;
282
283 #if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA)
284         /*
285          * Once the kernel maps the text as RO (kernel_set_to_readonly is set),
286          * kernel text mappings for the large page aligned text, rodata sections
287          * will be always read-only. For the kernel identity mappings covering
288          * the holes caused by this alignment can be anything that user asks.
289          *
290          * This will preserve the large page mappings for kernel text/data
291          * at no extra cost.
292          */
293         if (kernel_set_to_readonly &&
294             within(address, (unsigned long)_text,
295                    (unsigned long)__end_rodata_hpage_align)) {
296                 unsigned int level;
297
298                 /*
299                  * Don't enforce the !RW mapping for the kernel text mapping,
300                  * if the current mapping is already using small page mapping.
301                  * No need to work hard to preserve large page mappings in this
302                  * case.
303                  *
304                  * This also fixes the Linux Xen paravirt guest boot failure
305                  * (because of unexpected read-only mappings for kernel identity
306                  * mappings). In this paravirt guest case, the kernel text
307                  * mapping and the kernel identity mapping share the same
308                  * page-table pages. Thus we can't really use different
309                  * protections for the kernel text and identity mappings. Also,
310                  * these shared mappings are made of small page mappings.
311                  * Thus this don't enforce !RW mapping for small page kernel
312                  * text mapping logic will help Linux Xen parvirt guest boot
313                  * as well.
314                  */
315                 if (lookup_address(address, &level) && (level != PG_LEVEL_4K))
316                         pgprot_val(forbidden) |= _PAGE_RW;
317         }
318 #endif
319
320         prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
321
322         return prot;
323 }
324
325 /*
326  * Lookup the page table entry for a virtual address. Return a pointer
327  * to the entry and the level of the mapping.
328  *
329  * Note: We return pud and pmd either when the entry is marked large
330  * or when the present bit is not set. Otherwise we would return a
331  * pointer to a nonexisting mapping.
332  */
333 pte_t *lookup_address(unsigned long address, unsigned int *level)
334 {
335         pgd_t *pgd = pgd_offset_k(address);
336         pud_t *pud;
337         pmd_t *pmd;
338
339         *level = PG_LEVEL_NONE;
340
341         if (pgd_none(*pgd))
342                 return NULL;
343
344         pud = pud_offset(pgd, address);
345         if (pud_none(*pud))
346                 return NULL;
347
348         *level = PG_LEVEL_1G;
349         if (pud_large(*pud) || !pud_present(*pud))
350                 return (pte_t *)pud;
351
352         pmd = pmd_offset(pud, address);
353         if (pmd_none(*pmd))
354                 return NULL;
355
356         *level = PG_LEVEL_2M;
357         if (pmd_large(*pmd) || !pmd_present(*pmd))
358                 return (pte_t *)pmd;
359
360         *level = PG_LEVEL_4K;
361
362         return pte_offset_kernel(pmd, address);
363 }
364 EXPORT_SYMBOL_GPL(lookup_address);
365
366 /*
367  * Set the new pmd in all the pgds we know about:
368  */
369 static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
370 {
371         /* change init_mm */
372         set_pte_atomic(kpte, pte);
373 #ifdef CONFIG_X86_32
374         if (!SHARED_KERNEL_PMD) {
375                 struct page *page;
376
377                 list_for_each_entry(page, &pgd_list, lru) {
378                         pgd_t *pgd;
379                         pud_t *pud;
380                         pmd_t *pmd;
381
382                         pgd = (pgd_t *)page_address(page) + pgd_index(address);
383                         pud = pud_offset(pgd, address);
384                         pmd = pmd_offset(pud, address);
385                         set_pte_atomic((pte_t *)pmd, pte);
386                 }
387         }
388 #endif
389 }
390
391 static int
392 try_preserve_large_page(pte_t *kpte, unsigned long address,
393                         struct cpa_data *cpa)
394 {
395         unsigned long nextpage_addr, numpages, pmask, psize, addr, pfn;
396         pte_t new_pte, old_pte, *tmp;
397         pgprot_t old_prot, new_prot, req_prot;
398         int i, do_split = 1;
399         unsigned int level;
400
401         if (cpa->force_split)
402                 return 1;
403
404         spin_lock(&pgd_lock);
405         /*
406          * Check for races, another CPU might have split this page
407          * up already:
408          */
409         tmp = lookup_address(address, &level);
410         if (tmp != kpte)
411                 goto out_unlock;
412
413         switch (level) {
414         case PG_LEVEL_2M:
415                 psize = PMD_PAGE_SIZE;
416                 pmask = PMD_PAGE_MASK;
417                 break;
418 #ifdef CONFIG_X86_64
419         case PG_LEVEL_1G:
420                 psize = PUD_PAGE_SIZE;
421                 pmask = PUD_PAGE_MASK;
422                 break;
423 #endif
424         default:
425                 do_split = -EINVAL;
426                 goto out_unlock;
427         }
428
429         /*
430          * Calculate the number of pages, which fit into this large
431          * page starting at address:
432          */
433         nextpage_addr = (address + psize) & pmask;
434         numpages = (nextpage_addr - address) >> PAGE_SHIFT;
435         if (numpages < cpa->numpages)
436                 cpa->numpages = numpages;
437
438         /*
439          * We are safe now. Check whether the new pgprot is the same:
440          */
441         old_pte = *kpte;
442         old_prot = new_prot = req_prot = pte_pgprot(old_pte);
443
444         pgprot_val(req_prot) &= ~pgprot_val(cpa->mask_clr);
445         pgprot_val(req_prot) |= pgprot_val(cpa->mask_set);
446
447         /*
448          * old_pte points to the large page base address. So we need
449          * to add the offset of the virtual address:
450          */
451         pfn = pte_pfn(old_pte) + ((address & (psize - 1)) >> PAGE_SHIFT);
452         cpa->pfn = pfn;
453
454         new_prot = static_protections(req_prot, address, pfn);
455
456         /*
457          * We need to check the full range, whether
458          * static_protection() requires a different pgprot for one of
459          * the pages in the range we try to preserve:
460          */
461         addr = address & pmask;
462         pfn = pte_pfn(old_pte);
463         for (i = 0; i < (psize >> PAGE_SHIFT); i++, addr += PAGE_SIZE, pfn++) {
464                 pgprot_t chk_prot = static_protections(req_prot, addr, pfn);
465
466                 if (pgprot_val(chk_prot) != pgprot_val(new_prot))
467                         goto out_unlock;
468         }
469
470         /*
471          * If there are no changes, return. maxpages has been updated
472          * above:
473          */
474         if (pgprot_val(new_prot) == pgprot_val(old_prot)) {
475                 do_split = 0;
476                 goto out_unlock;
477         }
478
479         /*
480          * We need to change the attributes. Check, whether we can
481          * change the large page in one go. We request a split, when
482          * the address is not aligned and the number of pages is
483          * smaller than the number of pages in the large page. Note
484          * that we limited the number of possible pages already to
485          * the number of pages in the large page.
486          */
487         if (address == (address & pmask) && cpa->numpages == (psize >> PAGE_SHIFT)) {
488                 /*
489                  * The address is aligned and the number of pages
490                  * covers the full page.
491                  */
492                 new_pte = pfn_pte(pte_pfn(old_pte), canon_pgprot(new_prot));
493                 __set_pmd_pte(kpte, address, new_pte);
494                 cpa->flags |= CPA_FLUSHTLB;
495                 do_split = 0;
496         }
497
498 out_unlock:
499         spin_unlock(&pgd_lock);
500
501         return do_split;
502 }
503
504 static int split_large_page(pte_t *kpte, unsigned long address)
505 {
506         unsigned long pfn, pfninc = 1;
507         unsigned int i, level;
508         pte_t *pbase, *tmp;
509         pgprot_t ref_prot;
510         struct page *base;
511
512         if (!debug_pagealloc)
513                 spin_unlock(&cpa_lock);
514         base = alloc_pages(GFP_KERNEL | __GFP_NOTRACK, 0);
515         if (!debug_pagealloc)
516                 spin_lock(&cpa_lock);
517         if (!base)
518                 return -ENOMEM;
519
520         spin_lock(&pgd_lock);
521         /*
522          * Check for races, another CPU might have split this page
523          * up for us already:
524          */
525         tmp = lookup_address(address, &level);
526         if (tmp != kpte)
527                 goto out_unlock;
528
529         pbase = (pte_t *)page_address(base);
530         paravirt_alloc_pte(&init_mm, page_to_pfn(base));
531         ref_prot = pte_pgprot(pte_clrhuge(*kpte));
532         /*
533          * If we ever want to utilize the PAT bit, we need to
534          * update this function to make sure it's converted from
535          * bit 12 to bit 7 when we cross from the 2MB level to
536          * the 4K level:
537          */
538         WARN_ON_ONCE(pgprot_val(ref_prot) & _PAGE_PAT_LARGE);
539
540 #ifdef CONFIG_X86_64
541         if (level == PG_LEVEL_1G) {
542                 pfninc = PMD_PAGE_SIZE >> PAGE_SHIFT;
543                 pgprot_val(ref_prot) |= _PAGE_PSE;
544         }
545 #endif
546
547         /*
548          * Get the target pfn from the original entry:
549          */
550         pfn = pte_pfn(*kpte);
551         for (i = 0; i < PTRS_PER_PTE; i++, pfn += pfninc)
552                 set_pte(&pbase[i], pfn_pte(pfn, ref_prot));
553
554         if (address >= (unsigned long)__va(0) &&
555                 address < (unsigned long)__va(max_low_pfn_mapped << PAGE_SHIFT))
556                 split_page_count(level);
557
558 #ifdef CONFIG_X86_64
559         if (address >= (unsigned long)__va(1UL<<32) &&
560                 address < (unsigned long)__va(max_pfn_mapped << PAGE_SHIFT))
561                 split_page_count(level);
562 #endif
563
564         /*
565          * Install the new, split up pagetable.
566          *
567          * We use the standard kernel pagetable protections for the new
568          * pagetable protections, the actual ptes set above control the
569          * primary protection behavior:
570          */
571         __set_pmd_pte(kpte, address, mk_pte(base, __pgprot(_KERNPG_TABLE)));
572
573         /*
574          * Intel Atom errata AAH41 workaround.
575          *
576          * The real fix should be in hw or in a microcode update, but
577          * we also probabilistically try to reduce the window of having
578          * a large TLB mixed with 4K TLBs while instruction fetches are
579          * going on.
580          */
581         __flush_tlb_all();
582
583         base = NULL;
584
585 out_unlock:
586         /*
587          * If we dropped out via the lookup_address check under
588          * pgd_lock then stick the page back into the pool:
589          */
590         if (base)
591                 __free_page(base);
592         spin_unlock(&pgd_lock);
593
594         return 0;
595 }
596
597 static int __cpa_process_fault(struct cpa_data *cpa, unsigned long vaddr,
598                                int primary)
599 {
600         /*
601          * Ignore all non primary paths.
602          */
603         if (!primary)
604                 return 0;
605
606         /*
607          * Ignore the NULL PTE for kernel identity mapping, as it is expected
608          * to have holes.
609          * Also set numpages to '1' indicating that we processed cpa req for
610          * one virtual address page and its pfn. TBD: numpages can be set based
611          * on the initial value and the level returned by lookup_address().
612          */
613         if (within(vaddr, PAGE_OFFSET,
614                    PAGE_OFFSET + (max_pfn_mapped << PAGE_SHIFT))) {
615                 cpa->numpages = 1;
616                 cpa->pfn = __pa(vaddr) >> PAGE_SHIFT;
617                 return 0;
618         } else {
619                 WARN(1, KERN_WARNING "CPA: called for zero pte. "
620                         "vaddr = %lx cpa->vaddr = %lx\n", vaddr,
621                         *cpa->vaddr);
622
623                 return -EFAULT;
624         }
625 }
626
627 static int __change_page_attr(struct cpa_data *cpa, int primary)
628 {
629         unsigned long address;
630         int do_split, err;
631         unsigned int level;
632         pte_t *kpte, old_pte;
633
634         if (cpa->flags & CPA_PAGES_ARRAY) {
635                 struct page *page = cpa->pages[cpa->curpage];
636                 if (unlikely(PageHighMem(page)))
637                         return 0;
638                 address = (unsigned long)page_address(page);
639         } else if (cpa->flags & CPA_ARRAY)
640                 address = cpa->vaddr[cpa->curpage];
641         else
642                 address = *cpa->vaddr;
643 repeat:
644         kpte = lookup_address(address, &level);
645         if (!kpte)
646                 return __cpa_process_fault(cpa, address, primary);
647
648         old_pte = *kpte;
649         if (!pte_val(old_pte))
650                 return __cpa_process_fault(cpa, address, primary);
651
652         if (level == PG_LEVEL_4K) {
653                 pte_t new_pte;
654                 pgprot_t new_prot = pte_pgprot(old_pte);
655                 unsigned long pfn = pte_pfn(old_pte);
656
657                 pgprot_val(new_prot) &= ~pgprot_val(cpa->mask_clr);
658                 pgprot_val(new_prot) |= pgprot_val(cpa->mask_set);
659
660                 new_prot = static_protections(new_prot, address, pfn);
661
662                 /*
663                  * We need to keep the pfn from the existing PTE,
664                  * after all we're only going to change it's attributes
665                  * not the memory it points to
666                  */
667                 new_pte = pfn_pte(pfn, canon_pgprot(new_prot));
668                 cpa->pfn = pfn;
669                 /*
670                  * Do we really change anything ?
671                  */
672                 if (pte_val(old_pte) != pte_val(new_pte)) {
673                         set_pte_atomic(kpte, new_pte);
674                         cpa->flags |= CPA_FLUSHTLB;
675                 }
676                 cpa->numpages = 1;
677                 return 0;
678         }
679
680         /*
681          * Check, whether we can keep the large page intact
682          * and just change the pte:
683          */
684         do_split = try_preserve_large_page(kpte, address, cpa);
685         /*
686          * When the range fits into the existing large page,
687          * return. cp->numpages and cpa->tlbflush have been updated in
688          * try_large_page:
689          */
690         if (do_split <= 0)
691                 return do_split;
692
693         /*
694          * We have to split the large page:
695          */
696         err = split_large_page(kpte, address);
697         if (!err) {
698                 /*
699                  * Do a global flush tlb after splitting the large page
700                  * and before we do the actual change page attribute in the PTE.
701                  *
702                  * With out this, we violate the TLB application note, that says
703                  * "The TLBs may contain both ordinary and large-page
704                  *  translations for a 4-KByte range of linear addresses. This
705                  *  may occur if software modifies the paging structures so that
706                  *  the page size used for the address range changes. If the two
707                  *  translations differ with respect to page frame or attributes
708                  *  (e.g., permissions), processor behavior is undefined and may
709                  *  be implementation-specific."
710                  *
711                  * We do this global tlb flush inside the cpa_lock, so that we
712                  * don't allow any other cpu, with stale tlb entries change the
713                  * page attribute in parallel, that also falls into the
714                  * just split large page entry.
715                  */
716                 flush_tlb_all();
717                 goto repeat;
718         }
719
720         return err;
721 }
722
723 static int __change_page_attr_set_clr(struct cpa_data *cpa, int checkalias);
724
725 static int cpa_process_alias(struct cpa_data *cpa)
726 {
727         struct cpa_data alias_cpa;
728         unsigned long laddr = (unsigned long)__va(cpa->pfn << PAGE_SHIFT);
729         unsigned long vaddr;
730         int ret;
731
732         if (cpa->pfn >= max_pfn_mapped)
733                 return 0;
734
735 #ifdef CONFIG_X86_64
736         if (cpa->pfn >= max_low_pfn_mapped && cpa->pfn < (1UL<<(32-PAGE_SHIFT)))
737                 return 0;
738 #endif
739         /*
740          * No need to redo, when the primary call touched the direct
741          * mapping already:
742          */
743         if (cpa->flags & CPA_PAGES_ARRAY) {
744                 struct page *page = cpa->pages[cpa->curpage];
745                 if (unlikely(PageHighMem(page)))
746                         return 0;
747                 vaddr = (unsigned long)page_address(page);
748         } else if (cpa->flags & CPA_ARRAY)
749                 vaddr = cpa->vaddr[cpa->curpage];
750         else
751                 vaddr = *cpa->vaddr;
752
753         if (!(within(vaddr, PAGE_OFFSET,
754                     PAGE_OFFSET + (max_pfn_mapped << PAGE_SHIFT)))) {
755
756                 alias_cpa = *cpa;
757                 alias_cpa.vaddr = &laddr;
758                 alias_cpa.flags &= ~(CPA_PAGES_ARRAY | CPA_ARRAY);
759
760                 ret = __change_page_attr_set_clr(&alias_cpa, 0);
761                 if (ret)
762                         return ret;
763         }
764
765 #ifdef CONFIG_X86_64
766         /*
767          * If the primary call didn't touch the high mapping already
768          * and the physical address is inside the kernel map, we need
769          * to touch the high mapped kernel as well:
770          */
771         if (!within(vaddr, (unsigned long)_text, _brk_end) &&
772             within(cpa->pfn, highmap_start_pfn(), highmap_end_pfn())) {
773                 unsigned long temp_cpa_vaddr = (cpa->pfn << PAGE_SHIFT) +
774                                                __START_KERNEL_map - phys_base;
775                 alias_cpa = *cpa;
776                 alias_cpa.vaddr = &temp_cpa_vaddr;
777                 alias_cpa.flags &= ~(CPA_PAGES_ARRAY | CPA_ARRAY);
778
779                 /*
780                  * The high mapping range is imprecise, so ignore the
781                  * return value.
782                  */
783                 __change_page_attr_set_clr(&alias_cpa, 0);
784         }
785 #endif
786
787         return 0;
788 }
789
790 static int __change_page_attr_set_clr(struct cpa_data *cpa, int checkalias)
791 {
792         int ret, numpages = cpa->numpages;
793
794         while (numpages) {
795                 /*
796                  * Store the remaining nr of pages for the large page
797                  * preservation check.
798                  */
799                 cpa->numpages = numpages;
800                 /* for array changes, we can't use large page */
801                 if (cpa->flags & (CPA_ARRAY | CPA_PAGES_ARRAY))
802                         cpa->numpages = 1;
803
804                 if (!debug_pagealloc)
805                         spin_lock(&cpa_lock);
806                 ret = __change_page_attr(cpa, checkalias);
807                 if (!debug_pagealloc)
808                         spin_unlock(&cpa_lock);
809                 if (ret)
810                         return ret;
811
812                 if (checkalias) {
813                         ret = cpa_process_alias(cpa);
814                         if (ret)
815                                 return ret;
816                 }
817
818                 /*
819                  * Adjust the number of pages with the result of the
820                  * CPA operation. Either a large page has been
821                  * preserved or a single page update happened.
822                  */
823                 BUG_ON(cpa->numpages > numpages);
824                 numpages -= cpa->numpages;
825                 if (cpa->flags & (CPA_PAGES_ARRAY | CPA_ARRAY))
826                         cpa->curpage++;
827                 else
828                         *cpa->vaddr += cpa->numpages * PAGE_SIZE;
829
830         }
831         return 0;
832 }
833
834 static inline int cache_attr(pgprot_t attr)
835 {
836         return pgprot_val(attr) &
837                 (_PAGE_PAT | _PAGE_PAT_LARGE | _PAGE_PWT | _PAGE_PCD);
838 }
839
840 static int change_page_attr_set_clr(unsigned long *addr, int numpages,
841                                     pgprot_t mask_set, pgprot_t mask_clr,
842                                     int force_split, int in_flag,
843                                     struct page **pages)
844 {
845         struct cpa_data cpa;
846         int ret, cache, checkalias;
847         unsigned long baddr = 0;
848
849         /*
850          * Check, if we are requested to change a not supported
851          * feature:
852          */
853         mask_set = canon_pgprot(mask_set);
854         mask_clr = canon_pgprot(mask_clr);
855         if (!pgprot_val(mask_set) && !pgprot_val(mask_clr) && !force_split)
856                 return 0;
857
858         /* Ensure we are PAGE_SIZE aligned */
859         if (in_flag & CPA_ARRAY) {
860                 int i;
861                 for (i = 0; i < numpages; i++) {
862                         if (addr[i] & ~PAGE_MASK) {
863                                 addr[i] &= PAGE_MASK;
864                                 WARN_ON_ONCE(1);
865                         }
866                 }
867         } else if (!(in_flag & CPA_PAGES_ARRAY)) {
868                 /*
869                  * in_flag of CPA_PAGES_ARRAY implies it is aligned.
870                  * No need to cehck in that case
871                  */
872                 if (*addr & ~PAGE_MASK) {
873                         *addr &= PAGE_MASK;
874                         /*
875                          * People should not be passing in unaligned addresses:
876                          */
877                         WARN_ON_ONCE(1);
878                 }
879                 /*
880                  * Save address for cache flush. *addr is modified in the call
881                  * to __change_page_attr_set_clr() below.
882                  */
883                 baddr = *addr;
884         }
885
886         /* Must avoid aliasing mappings in the highmem code */
887         kmap_flush_unused();
888
889         vm_unmap_aliases();
890
891         cpa.vaddr = addr;
892         cpa.pages = pages;
893         cpa.numpages = numpages;
894         cpa.mask_set = mask_set;
895         cpa.mask_clr = mask_clr;
896         cpa.flags = 0;
897         cpa.curpage = 0;
898         cpa.force_split = force_split;
899
900         if (in_flag & (CPA_ARRAY | CPA_PAGES_ARRAY))
901                 cpa.flags |= in_flag;
902
903         /* No alias checking for _NX bit modifications */
904         checkalias = (pgprot_val(mask_set) | pgprot_val(mask_clr)) != _PAGE_NX;
905
906         ret = __change_page_attr_set_clr(&cpa, checkalias);
907
908         /*
909          * Check whether we really changed something:
910          */
911         if (!(cpa.flags & CPA_FLUSHTLB))
912                 goto out;
913
914         /*
915          * No need to flush, when we did not set any of the caching
916          * attributes:
917          */
918         cache = cache_attr(mask_set);
919
920         /*
921          * On success we use clflush, when the CPU supports it to
922          * avoid the wbindv. If the CPU does not support it, in the
923          * error case, and during early boot (for EFI) we fall back
924          * to cpa_flush_all (which uses wbinvd):
925          */
926         if (early_boot_irqs_disabled)
927                 __cpa_flush_all((void *)(long)cache);
928         else if (!ret && cpu_has_clflush) {
929                 if (cpa.flags & (CPA_PAGES_ARRAY | CPA_ARRAY)) {
930                         cpa_flush_array(addr, numpages, cache,
931                                         cpa.flags, pages);
932                 } else
933                         cpa_flush_range(baddr, numpages, cache);
934         } else
935                 cpa_flush_all(cache);
936
937 out:
938         return ret;
939 }
940
941 static inline int change_page_attr_set(unsigned long *addr, int numpages,
942                                        pgprot_t mask, int array)
943 {
944         return change_page_attr_set_clr(addr, numpages, mask, __pgprot(0), 0,
945                 (array ? CPA_ARRAY : 0), NULL);
946 }
947
948 static inline int change_page_attr_clear(unsigned long *addr, int numpages,
949                                          pgprot_t mask, int array)
950 {
951         return change_page_attr_set_clr(addr, numpages, __pgprot(0), mask, 0,
952                 (array ? CPA_ARRAY : 0), NULL);
953 }
954
955 static inline int cpa_set_pages_array(struct page **pages, int numpages,
956                                        pgprot_t mask)
957 {
958         return change_page_attr_set_clr(NULL, numpages, mask, __pgprot(0), 0,
959                 CPA_PAGES_ARRAY, pages);
960 }
961
962 static inline int cpa_clear_pages_array(struct page **pages, int numpages,
963                                          pgprot_t mask)
964 {
965         return change_page_attr_set_clr(NULL, numpages, __pgprot(0), mask, 0,
966                 CPA_PAGES_ARRAY, pages);
967 }
968
969 int _set_memory_uc(unsigned long addr, int numpages)
970 {
971         /*
972          * for now UC MINUS. see comments in ioremap_nocache()
973          */
974         return change_page_attr_set(&addr, numpages,
975                                     __pgprot(_PAGE_CACHE_UC_MINUS), 0);
976 }
977
978 int set_memory_uc(unsigned long addr, int numpages)
979 {
980         int ret;
981
982         /*
983          * for now UC MINUS. see comments in ioremap_nocache()
984          */
985         ret = reserve_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE,
986                             _PAGE_CACHE_UC_MINUS, NULL);
987         if (ret)
988                 goto out_err;
989
990         ret = _set_memory_uc(addr, numpages);
991         if (ret)
992                 goto out_free;
993
994         return 0;
995
996 out_free:
997         free_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE);
998 out_err:
999         return ret;
1000 }
1001 EXPORT_SYMBOL(set_memory_uc);
1002
1003 static int _set_memory_array(unsigned long *addr, int addrinarray,
1004                 unsigned long new_type)
1005 {
1006         int i, j;
1007         int ret;
1008
1009         /*
1010          * for now UC MINUS. see comments in ioremap_nocache()
1011          */
1012         for (i = 0; i < addrinarray; i++) {
1013                 ret = reserve_memtype(__pa(addr[i]), __pa(addr[i]) + PAGE_SIZE,
1014                                         new_type, NULL);
1015                 if (ret)
1016                         goto out_free;
1017         }
1018
1019         ret = change_page_attr_set(addr, addrinarray,
1020                                     __pgprot(_PAGE_CACHE_UC_MINUS), 1);
1021
1022         if (!ret && new_type == _PAGE_CACHE_WC)
1023                 ret = change_page_attr_set_clr(addr, addrinarray,
1024                                                __pgprot(_PAGE_CACHE_WC),
1025                                                __pgprot(_PAGE_CACHE_MASK),
1026                                                0, CPA_ARRAY, NULL);
1027         if (ret)
1028                 goto out_free;
1029
1030         return 0;
1031
1032 out_free:
1033         for (j = 0; j < i; j++)
1034                 free_memtype(__pa(addr[j]), __pa(addr[j]) + PAGE_SIZE);
1035
1036         return ret;
1037 }
1038
1039 int set_memory_array_uc(unsigned long *addr, int addrinarray)
1040 {
1041         return _set_memory_array(addr, addrinarray, _PAGE_CACHE_UC_MINUS);
1042 }
1043 EXPORT_SYMBOL(set_memory_array_uc);
1044
1045 int set_memory_array_wc(unsigned long *addr, int addrinarray)
1046 {
1047         return _set_memory_array(addr, addrinarray, _PAGE_CACHE_WC);
1048 }
1049 EXPORT_SYMBOL(set_memory_array_wc);
1050
1051 int _set_memory_wc(unsigned long addr, int numpages)
1052 {
1053         int ret;
1054         unsigned long addr_copy = addr;
1055
1056         ret = change_page_attr_set(&addr, numpages,
1057                                     __pgprot(_PAGE_CACHE_UC_MINUS), 0);
1058         if (!ret) {
1059                 ret = change_page_attr_set_clr(&addr_copy, numpages,
1060                                                __pgprot(_PAGE_CACHE_WC),
1061                                                __pgprot(_PAGE_CACHE_MASK),
1062                                                0, 0, NULL);
1063         }
1064         return ret;
1065 }
1066
1067 int set_memory_wc(unsigned long addr, int numpages)
1068 {
1069         int ret;
1070
1071         if (!pat_enabled)
1072                 return set_memory_uc(addr, numpages);
1073
1074         ret = reserve_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE,
1075                 _PAGE_CACHE_WC, NULL);
1076         if (ret)
1077                 goto out_err;
1078
1079         ret = _set_memory_wc(addr, numpages);
1080         if (ret)
1081                 goto out_free;
1082
1083         return 0;
1084
1085 out_free:
1086         free_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE);
1087 out_err:
1088         return ret;
1089 }
1090 EXPORT_SYMBOL(set_memory_wc);
1091
1092 int _set_memory_wb(unsigned long addr, int numpages)
1093 {
1094         return change_page_attr_clear(&addr, numpages,
1095                                       __pgprot(_PAGE_CACHE_MASK), 0);
1096 }
1097
1098 int set_memory_wb(unsigned long addr, int numpages)
1099 {
1100         int ret;
1101
1102         ret = _set_memory_wb(addr, numpages);
1103         if (ret)
1104                 return ret;
1105
1106         free_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE);
1107         return 0;
1108 }
1109 EXPORT_SYMBOL(set_memory_wb);
1110
1111 int set_memory_array_wb(unsigned long *addr, int addrinarray)
1112 {
1113         int i;
1114         int ret;
1115
1116         ret = change_page_attr_clear(addr, addrinarray,
1117                                       __pgprot(_PAGE_CACHE_MASK), 1);
1118         if (ret)
1119                 return ret;
1120
1121         for (i = 0; i < addrinarray; i++)
1122                 free_memtype(__pa(addr[i]), __pa(addr[i]) + PAGE_SIZE);
1123
1124         return 0;
1125 }
1126 EXPORT_SYMBOL(set_memory_array_wb);
1127
1128 int set_memory_x(unsigned long addr, int numpages)
1129 {
1130         if (!(__supported_pte_mask & _PAGE_NX))
1131                 return 0;
1132
1133         return change_page_attr_clear(&addr, numpages, __pgprot(_PAGE_NX), 0);
1134 }
1135 EXPORT_SYMBOL(set_memory_x);
1136
1137 int set_memory_nx(unsigned long addr, int numpages)
1138 {
1139         if (!(__supported_pte_mask & _PAGE_NX))
1140                 return 0;
1141
1142         return change_page_attr_set(&addr, numpages, __pgprot(_PAGE_NX), 0);
1143 }
1144 EXPORT_SYMBOL(set_memory_nx);
1145
1146 int set_memory_ro(unsigned long addr, int numpages)
1147 {
1148         return change_page_attr_clear(&addr, numpages, __pgprot(_PAGE_RW), 0);
1149 }
1150 EXPORT_SYMBOL_GPL(set_memory_ro);
1151
1152 int set_memory_rw(unsigned long addr, int numpages)
1153 {
1154         return change_page_attr_set(&addr, numpages, __pgprot(_PAGE_RW), 0);
1155 }
1156 EXPORT_SYMBOL_GPL(set_memory_rw);
1157
1158 int set_memory_np(unsigned long addr, int numpages)
1159 {
1160         return change_page_attr_clear(&addr, numpages, __pgprot(_PAGE_PRESENT), 0);
1161 }
1162
1163 int set_memory_4k(unsigned long addr, int numpages)
1164 {
1165         return change_page_attr_set_clr(&addr, numpages, __pgprot(0),
1166                                         __pgprot(0), 1, 0, NULL);
1167 }
1168
1169 int set_pages_uc(struct page *page, int numpages)
1170 {
1171         unsigned long addr = (unsigned long)page_address(page);
1172
1173         return set_memory_uc(addr, numpages);
1174 }
1175 EXPORT_SYMBOL(set_pages_uc);
1176
1177 static int _set_pages_array(struct page **pages, int addrinarray,
1178                 unsigned long new_type)
1179 {
1180         unsigned long start;
1181         unsigned long end;
1182         int i;
1183         int free_idx;
1184         int ret;
1185
1186         for (i = 0; i < addrinarray; i++) {
1187                 if (PageHighMem(pages[i]))
1188                         continue;
1189                 start = page_to_pfn(pages[i]) << PAGE_SHIFT;
1190                 end = start + PAGE_SIZE;
1191                 if (reserve_memtype(start, end, new_type, NULL))
1192                         goto err_out;
1193         }
1194
1195         ret = cpa_set_pages_array(pages, addrinarray,
1196                         __pgprot(_PAGE_CACHE_UC_MINUS));
1197         if (!ret && new_type == _PAGE_CACHE_WC)
1198                 ret = change_page_attr_set_clr(NULL, addrinarray,
1199                                                __pgprot(_PAGE_CACHE_WC),
1200                                                __pgprot(_PAGE_CACHE_MASK),
1201                                                0, CPA_PAGES_ARRAY, pages);
1202         if (ret)
1203                 goto err_out;
1204         return 0; /* Success */
1205 err_out:
1206         free_idx = i;
1207         for (i = 0; i < free_idx; i++) {
1208                 if (PageHighMem(pages[i]))
1209                         continue;
1210                 start = page_to_pfn(pages[i]) << PAGE_SHIFT;
1211                 end = start + PAGE_SIZE;
1212                 free_memtype(start, end);
1213         }
1214         return -EINVAL;
1215 }
1216
1217 int set_pages_array_uc(struct page **pages, int addrinarray)
1218 {
1219         return _set_pages_array(pages, addrinarray, _PAGE_CACHE_UC_MINUS);
1220 }
1221 EXPORT_SYMBOL(set_pages_array_uc);
1222
1223 int set_pages_array_wc(struct page **pages, int addrinarray)
1224 {
1225         return _set_pages_array(pages, addrinarray, _PAGE_CACHE_WC);
1226 }
1227 EXPORT_SYMBOL(set_pages_array_wc);
1228
1229 int set_pages_wb(struct page *page, int numpages)
1230 {
1231         unsigned long addr = (unsigned long)page_address(page);
1232
1233         return set_memory_wb(addr, numpages);
1234 }
1235 EXPORT_SYMBOL(set_pages_wb);
1236
1237 int set_pages_array_wb(struct page **pages, int addrinarray)
1238 {
1239         int retval;
1240         unsigned long start;
1241         unsigned long end;
1242         int i;
1243
1244         retval = cpa_clear_pages_array(pages, addrinarray,
1245                         __pgprot(_PAGE_CACHE_MASK));
1246         if (retval)
1247                 return retval;
1248
1249         for (i = 0; i < addrinarray; i++) {
1250                 if (PageHighMem(pages[i]))
1251                         continue;
1252                 start = page_to_pfn(pages[i]) << PAGE_SHIFT;
1253                 end = start + PAGE_SIZE;
1254                 free_memtype(start, end);
1255         }
1256
1257         return 0;
1258 }
1259 EXPORT_SYMBOL(set_pages_array_wb);
1260
1261 int set_pages_x(struct page *page, int numpages)
1262 {
1263         unsigned long addr = (unsigned long)page_address(page);
1264
1265         return set_memory_x(addr, numpages);
1266 }
1267 EXPORT_SYMBOL(set_pages_x);
1268
1269 int set_pages_nx(struct page *page, int numpages)
1270 {
1271         unsigned long addr = (unsigned long)page_address(page);
1272
1273         return set_memory_nx(addr, numpages);
1274 }
1275 EXPORT_SYMBOL(set_pages_nx);
1276
1277 int set_pages_ro(struct page *page, int numpages)
1278 {
1279         unsigned long addr = (unsigned long)page_address(page);
1280
1281         return set_memory_ro(addr, numpages);
1282 }
1283
1284 int set_pages_rw(struct page *page, int numpages)
1285 {
1286         unsigned long addr = (unsigned long)page_address(page);
1287
1288         return set_memory_rw(addr, numpages);
1289 }
1290
1291 #ifdef CONFIG_DEBUG_PAGEALLOC
1292
1293 static int __set_pages_p(struct page *page, int numpages)
1294 {
1295         unsigned long tempaddr = (unsigned long) page_address(page);
1296         struct cpa_data cpa = { .vaddr = &tempaddr,
1297                                 .numpages = numpages,
1298                                 .mask_set = __pgprot(_PAGE_PRESENT | _PAGE_RW),
1299                                 .mask_clr = __pgprot(0),
1300                                 .flags = 0};
1301
1302         /*
1303          * No alias checking needed for setting present flag. otherwise,
1304          * we may need to break large pages for 64-bit kernel text
1305          * mappings (this adds to complexity if we want to do this from
1306          * atomic context especially). Let's keep it simple!
1307          */
1308         return __change_page_attr_set_clr(&cpa, 0);
1309 }
1310
1311 static int __set_pages_np(struct page *page, int numpages)
1312 {
1313         unsigned long tempaddr = (unsigned long) page_address(page);
1314         struct cpa_data cpa = { .vaddr = &tempaddr,
1315                                 .numpages = numpages,
1316                                 .mask_set = __pgprot(0),
1317                                 .mask_clr = __pgprot(_PAGE_PRESENT | _PAGE_RW),
1318                                 .flags = 0};
1319
1320         /*
1321          * No alias checking needed for setting not present flag. otherwise,
1322          * we may need to break large pages for 64-bit kernel text
1323          * mappings (this adds to complexity if we want to do this from
1324          * atomic context especially). Let's keep it simple!
1325          */
1326         return __change_page_attr_set_clr(&cpa, 0);
1327 }
1328
1329 void kernel_map_pages(struct page *page, int numpages, int enable)
1330 {
1331         if (PageHighMem(page))
1332                 return;
1333         if (!enable) {
1334                 debug_check_no_locks_freed(page_address(page),
1335                                            numpages * PAGE_SIZE);
1336         }
1337
1338         /*
1339          * The return value is ignored as the calls cannot fail.
1340          * Large pages for identity mappings are not used at boot time
1341          * and hence no memory allocations during large page split.
1342          */
1343         if (enable)
1344                 __set_pages_p(page, numpages);
1345         else
1346                 __set_pages_np(page, numpages);
1347
1348         /*
1349          * We should perform an IPI and flush all tlbs,
1350          * but that can deadlock->flush only current cpu:
1351          */
1352         __flush_tlb_all();
1353 }
1354
1355 #ifdef CONFIG_HIBERNATION
1356
1357 bool kernel_page_present(struct page *page)
1358 {
1359         unsigned int level;
1360         pte_t *pte;
1361
1362         if (PageHighMem(page))
1363                 return false;
1364
1365         pte = lookup_address((unsigned long)page_address(page), &level);
1366         return (pte_val(*pte) & _PAGE_PRESENT);
1367 }
1368
1369 #endif /* CONFIG_HIBERNATION */
1370
1371 #endif /* CONFIG_DEBUG_PAGEALLOC */
1372
1373 /*
1374  * The testcases use internal knowledge of the implementation that shouldn't
1375  * be exposed to the rest of the kernel. Include these directly here.
1376  */
1377 #ifdef CONFIG_CPA_DEBUG
1378 #include "pageattr-test.c"
1379 #endif