mm: numa: do not clear PTE for pte_numa update
[firefly-linux-kernel-4.4.55.git] / mm / mprotect.c
1 /*
2  *  mm/mprotect.c
3  *
4  *  (C) Copyright 1994 Linus Torvalds
5  *  (C) Copyright 2002 Christoph Hellwig
6  *
7  *  Address space accounting code       <alan@lxorguk.ukuu.org.uk>
8  *  (C) Copyright 2002 Red Hat Inc, All Rights Reserved
9  */
10
11 #include <linux/mm.h>
12 #include <linux/hugetlb.h>
13 #include <linux/shm.h>
14 #include <linux/mman.h>
15 #include <linux/fs.h>
16 #include <linux/highmem.h>
17 #include <linux/security.h>
18 #include <linux/mempolicy.h>
19 #include <linux/personality.h>
20 #include <linux/syscalls.h>
21 #include <linux/swap.h>
22 #include <linux/swapops.h>
23 #include <linux/mmu_notifier.h>
24 #include <linux/migrate.h>
25 #include <linux/perf_event.h>
26 #include <asm/uaccess.h>
27 #include <asm/pgtable.h>
28 #include <asm/cacheflush.h>
29 #include <asm/tlbflush.h>
30
31 #ifndef pgprot_modify
32 static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
33 {
34         return newprot;
35 }
36 #endif
37
38 static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
39                 unsigned long addr, unsigned long end, pgprot_t newprot,
40                 int dirty_accountable, int prot_numa)
41 {
42         struct mm_struct *mm = vma->vm_mm;
43         pte_t *pte, oldpte;
44         spinlock_t *ptl;
45         unsigned long pages = 0;
46
47         pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
48         arch_enter_lazy_mmu_mode();
49         do {
50                 oldpte = *pte;
51                 if (pte_present(oldpte)) {
52                         pte_t ptent;
53                         bool updated = false;
54
55                         if (!prot_numa) {
56                                 ptent = ptep_modify_prot_start(mm, addr, pte);
57                                 ptent = pte_modify(ptent, newprot);
58                                 updated = true;
59                         } else {
60                                 struct page *page;
61
62                                 ptent = *pte;
63                                 page = vm_normal_page(vma, addr, oldpte);
64                                 if (page) {
65                                         if (!pte_numa(oldpte)) {
66                                                 ptent = pte_mknuma(ptent);
67                                                 set_pte_at(mm, addr, pte, ptent);
68                                                 updated = true;
69                                         }
70                                 }
71                         }
72
73                         /*
74                          * Avoid taking write faults for pages we know to be
75                          * dirty.
76                          */
77                         if (dirty_accountable && pte_dirty(ptent)) {
78                                 ptent = pte_mkwrite(ptent);
79                                 updated = true;
80                         }
81
82                         if (updated)
83                                 pages++;
84
85                         /* Only !prot_numa always clears the pte */
86                         if (!prot_numa)
87                                 ptep_modify_prot_commit(mm, addr, pte, ptent);
88                 } else if (IS_ENABLED(CONFIG_MIGRATION) && !pte_file(oldpte)) {
89                         swp_entry_t entry = pte_to_swp_entry(oldpte);
90
91                         if (is_write_migration_entry(entry)) {
92                                 pte_t newpte;
93                                 /*
94                                  * A protection check is difficult so
95                                  * just be safe and disable write
96                                  */
97                                 make_migration_entry_read(&entry);
98                                 newpte = swp_entry_to_pte(entry);
99                                 if (pte_swp_soft_dirty(oldpte))
100                                         newpte = pte_swp_mksoft_dirty(newpte);
101                                 set_pte_at(mm, addr, pte, newpte);
102
103                                 pages++;
104                         }
105                 }
106         } while (pte++, addr += PAGE_SIZE, addr != end);
107         arch_leave_lazy_mmu_mode();
108         pte_unmap_unlock(pte - 1, ptl);
109
110         return pages;
111 }
112
113 static inline unsigned long change_pmd_range(struct vm_area_struct *vma,
114                 pud_t *pud, unsigned long addr, unsigned long end,
115                 pgprot_t newprot, int dirty_accountable, int prot_numa)
116 {
117         pmd_t *pmd;
118         unsigned long next;
119         unsigned long pages = 0;
120         unsigned long nr_huge_updates = 0;
121
122         pmd = pmd_offset(pud, addr);
123         do {
124                 unsigned long this_pages;
125
126                 next = pmd_addr_end(addr, end);
127                 if (pmd_trans_huge(*pmd)) {
128                         if (next - addr != HPAGE_PMD_SIZE)
129                                 split_huge_page_pmd(vma, addr, pmd);
130                         else {
131                                 int nr_ptes = change_huge_pmd(vma, pmd, addr,
132                                                 newprot, prot_numa);
133
134                                 if (nr_ptes) {
135                                         if (nr_ptes == HPAGE_PMD_NR) {
136                                                 pages += HPAGE_PMD_NR;
137                                                 nr_huge_updates++;
138                                         }
139                                         continue;
140                                 }
141                         }
142                         /* fall through */
143                 }
144                 if (pmd_none_or_clear_bad(pmd))
145                         continue;
146                 this_pages = change_pte_range(vma, pmd, addr, next, newprot,
147                                  dirty_accountable, prot_numa);
148                 pages += this_pages;
149         } while (pmd++, addr = next, addr != end);
150
151         if (nr_huge_updates)
152                 count_vm_numa_events(NUMA_HUGE_PTE_UPDATES, nr_huge_updates);
153         return pages;
154 }
155
156 static inline unsigned long change_pud_range(struct vm_area_struct *vma,
157                 pgd_t *pgd, unsigned long addr, unsigned long end,
158                 pgprot_t newprot, int dirty_accountable, int prot_numa)
159 {
160         pud_t *pud;
161         unsigned long next;
162         unsigned long pages = 0;
163
164         pud = pud_offset(pgd, addr);
165         do {
166                 next = pud_addr_end(addr, end);
167                 if (pud_none_or_clear_bad(pud))
168                         continue;
169                 pages += change_pmd_range(vma, pud, addr, next, newprot,
170                                  dirty_accountable, prot_numa);
171         } while (pud++, addr = next, addr != end);
172
173         return pages;
174 }
175
176 static unsigned long change_protection_range(struct vm_area_struct *vma,
177                 unsigned long addr, unsigned long end, pgprot_t newprot,
178                 int dirty_accountable, int prot_numa)
179 {
180         struct mm_struct *mm = vma->vm_mm;
181         pgd_t *pgd;
182         unsigned long next;
183         unsigned long start = addr;
184         unsigned long pages = 0;
185
186         BUG_ON(addr >= end);
187         pgd = pgd_offset(mm, addr);
188         flush_cache_range(vma, addr, end);
189         do {
190                 next = pgd_addr_end(addr, end);
191                 if (pgd_none_or_clear_bad(pgd))
192                         continue;
193                 pages += change_pud_range(vma, pgd, addr, next, newprot,
194                                  dirty_accountable, prot_numa);
195         } while (pgd++, addr = next, addr != end);
196
197         /* Only flush the TLB if we actually modified any entries: */
198         if (pages)
199                 flush_tlb_range(vma, start, end);
200
201         return pages;
202 }
203
204 unsigned long change_protection(struct vm_area_struct *vma, unsigned long start,
205                        unsigned long end, pgprot_t newprot,
206                        int dirty_accountable, int prot_numa)
207 {
208         struct mm_struct *mm = vma->vm_mm;
209         unsigned long pages;
210
211         mmu_notifier_invalidate_range_start(mm, start, end);
212         if (is_vm_hugetlb_page(vma))
213                 pages = hugetlb_change_protection(vma, start, end, newprot);
214         else
215                 pages = change_protection_range(vma, start, end, newprot, dirty_accountable, prot_numa);
216         mmu_notifier_invalidate_range_end(mm, start, end);
217
218         return pages;
219 }
220
221 int
222 mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
223         unsigned long start, unsigned long end, unsigned long newflags)
224 {
225         struct mm_struct *mm = vma->vm_mm;
226         unsigned long oldflags = vma->vm_flags;
227         long nrpages = (end - start) >> PAGE_SHIFT;
228         unsigned long charged = 0;
229         pgoff_t pgoff;
230         int error;
231         int dirty_accountable = 0;
232
233         if (newflags == oldflags) {
234                 *pprev = vma;
235                 return 0;
236         }
237
238         /*
239          * If we make a private mapping writable we increase our commit;
240          * but (without finer accounting) cannot reduce our commit if we
241          * make it unwritable again. hugetlb mapping were accounted for
242          * even if read-only so there is no need to account for them here
243          */
244         if (newflags & VM_WRITE) {
245                 if (!(oldflags & (VM_ACCOUNT|VM_WRITE|VM_HUGETLB|
246                                                 VM_SHARED|VM_NORESERVE))) {
247                         charged = nrpages;
248                         if (security_vm_enough_memory_mm(mm, charged))
249                                 return -ENOMEM;
250                         newflags |= VM_ACCOUNT;
251                 }
252         }
253
254         /*
255          * First try to merge with previous and/or next vma.
256          */
257         pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
258         *pprev = vma_merge(mm, *pprev, start, end, newflags,
259                         vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma));
260         if (*pprev) {
261                 vma = *pprev;
262                 goto success;
263         }
264
265         *pprev = vma;
266
267         if (start != vma->vm_start) {
268                 error = split_vma(mm, vma, start, 1);
269                 if (error)
270                         goto fail;
271         }
272
273         if (end != vma->vm_end) {
274                 error = split_vma(mm, vma, end, 0);
275                 if (error)
276                         goto fail;
277         }
278
279 success:
280         /*
281          * vm_flags and vm_page_prot are protected by the mmap_sem
282          * held in write mode.
283          */
284         vma->vm_flags = newflags;
285         vma->vm_page_prot = pgprot_modify(vma->vm_page_prot,
286                                           vm_get_page_prot(newflags));
287
288         if (vma_wants_writenotify(vma)) {
289                 vma->vm_page_prot = vm_get_page_prot(newflags & ~VM_SHARED);
290                 dirty_accountable = 1;
291         }
292
293         change_protection(vma, start, end, vma->vm_page_prot,
294                           dirty_accountable, 0);
295
296         vm_stat_account(mm, oldflags, vma->vm_file, -nrpages);
297         vm_stat_account(mm, newflags, vma->vm_file, nrpages);
298         perf_event_mmap(vma);
299         return 0;
300
301 fail:
302         vm_unacct_memory(charged);
303         return error;
304 }
305
306 SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
307                 unsigned long, prot)
308 {
309         unsigned long vm_flags, nstart, end, tmp, reqprot;
310         struct vm_area_struct *vma, *prev;
311         int error = -EINVAL;
312         const int grows = prot & (PROT_GROWSDOWN|PROT_GROWSUP);
313         prot &= ~(PROT_GROWSDOWN|PROT_GROWSUP);
314         if (grows == (PROT_GROWSDOWN|PROT_GROWSUP)) /* can't be both */
315                 return -EINVAL;
316
317         if (start & ~PAGE_MASK)
318                 return -EINVAL;
319         if (!len)
320                 return 0;
321         len = PAGE_ALIGN(len);
322         end = start + len;
323         if (end <= start)
324                 return -ENOMEM;
325         if (!arch_validate_prot(prot))
326                 return -EINVAL;
327
328         reqprot = prot;
329         /*
330          * Does the application expect PROT_READ to imply PROT_EXEC:
331          */
332         if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
333                 prot |= PROT_EXEC;
334
335         vm_flags = calc_vm_prot_bits(prot);
336
337         down_write(&current->mm->mmap_sem);
338
339         vma = find_vma(current->mm, start);
340         error = -ENOMEM;
341         if (!vma)
342                 goto out;
343         prev = vma->vm_prev;
344         if (unlikely(grows & PROT_GROWSDOWN)) {
345                 if (vma->vm_start >= end)
346                         goto out;
347                 start = vma->vm_start;
348                 error = -EINVAL;
349                 if (!(vma->vm_flags & VM_GROWSDOWN))
350                         goto out;
351         } else {
352                 if (vma->vm_start > start)
353                         goto out;
354                 if (unlikely(grows & PROT_GROWSUP)) {
355                         end = vma->vm_end;
356                         error = -EINVAL;
357                         if (!(vma->vm_flags & VM_GROWSUP))
358                                 goto out;
359                 }
360         }
361         if (start > vma->vm_start)
362                 prev = vma;
363
364         for (nstart = start ; ; ) {
365                 unsigned long newflags;
366
367                 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
368
369                 newflags = vm_flags;
370                 newflags |= (vma->vm_flags & ~(VM_READ | VM_WRITE | VM_EXEC));
371
372                 /* newflags >> 4 shift VM_MAY% in place of VM_% */
373                 if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
374                         error = -EACCES;
375                         goto out;
376                 }
377
378                 error = security_file_mprotect(vma, reqprot, prot);
379                 if (error)
380                         goto out;
381
382                 tmp = vma->vm_end;
383                 if (tmp > end)
384                         tmp = end;
385                 error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
386                 if (error)
387                         goto out;
388                 nstart = tmp;
389
390                 if (nstart < prev->vm_end)
391                         nstart = prev->vm_end;
392                 if (nstart >= end)
393                         goto out;
394
395                 vma = prev->vm_next;
396                 if (!vma || vma->vm_start != nstart) {
397                         error = -ENOMEM;
398                         goto out;
399                 }
400         }
401 out:
402         up_write(&current->mm->mmap_sem);
403         return error;
404 }