mm,numa: reorganize change_pmd_range()
[firefly-linux-kernel-4.4.55.git] / mm / mprotect.c
1 /*
2  *  mm/mprotect.c
3  *
4  *  (C) Copyright 1994 Linus Torvalds
5  *  (C) Copyright 2002 Christoph Hellwig
6  *
7  *  Address space accounting code       <alan@lxorguk.ukuu.org.uk>
8  *  (C) Copyright 2002 Red Hat Inc, All Rights Reserved
9  */
10
11 #include <linux/mm.h>
12 #include <linux/hugetlb.h>
13 #include <linux/shm.h>
14 #include <linux/mman.h>
15 #include <linux/fs.h>
16 #include <linux/highmem.h>
17 #include <linux/security.h>
18 #include <linux/mempolicy.h>
19 #include <linux/personality.h>
20 #include <linux/syscalls.h>
21 #include <linux/swap.h>
22 #include <linux/swapops.h>
23 #include <linux/mmu_notifier.h>
24 #include <linux/migrate.h>
25 #include <linux/perf_event.h>
26 #include <linux/ksm.h>
27 #include <asm/uaccess.h>
28 #include <asm/pgtable.h>
29 #include <asm/cacheflush.h>
30 #include <asm/tlbflush.h>
31
32 #ifndef pgprot_modify
33 static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
34 {
35         return newprot;
36 }
37 #endif
38
39 static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
40                 unsigned long addr, unsigned long end, pgprot_t newprot,
41                 int dirty_accountable, int prot_numa)
42 {
43         struct mm_struct *mm = vma->vm_mm;
44         pte_t *pte, oldpte;
45         spinlock_t *ptl;
46         unsigned long pages = 0;
47
48         pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
49         arch_enter_lazy_mmu_mode();
50         do {
51                 oldpte = *pte;
52                 if (pte_present(oldpte)) {
53                         pte_t ptent;
54                         bool updated = false;
55
56                         if (!prot_numa) {
57                                 ptent = ptep_modify_prot_start(mm, addr, pte);
58                                 if (pte_numa(ptent))
59                                         ptent = pte_mknonnuma(ptent);
60                                 ptent = pte_modify(ptent, newprot);
61                                 /*
62                                  * Avoid taking write faults for pages we
63                                  * know to be dirty.
64                                  */
65                                 if (dirty_accountable && pte_dirty(ptent))
66                                         ptent = pte_mkwrite(ptent);
67                                 ptep_modify_prot_commit(mm, addr, pte, ptent);
68                                 updated = true;
69                         } else {
70                                 struct page *page;
71
72                                 page = vm_normal_page(vma, addr, oldpte);
73                                 if (page && !PageKsm(page)) {
74                                         if (!pte_numa(oldpte)) {
75                                                 ptep_set_numa(mm, addr, pte);
76                                                 updated = true;
77                                         }
78                                 }
79                         }
80                         if (updated)
81                                 pages++;
82                 } else if (IS_ENABLED(CONFIG_MIGRATION) && !pte_file(oldpte)) {
83                         swp_entry_t entry = pte_to_swp_entry(oldpte);
84
85                         if (is_write_migration_entry(entry)) {
86                                 pte_t newpte;
87                                 /*
88                                  * A protection check is difficult so
89                                  * just be safe and disable write
90                                  */
91                                 make_migration_entry_read(&entry);
92                                 newpte = swp_entry_to_pte(entry);
93                                 if (pte_swp_soft_dirty(oldpte))
94                                         newpte = pte_swp_mksoft_dirty(newpte);
95                                 set_pte_at(mm, addr, pte, newpte);
96
97                                 pages++;
98                         }
99                 }
100         } while (pte++, addr += PAGE_SIZE, addr != end);
101         arch_leave_lazy_mmu_mode();
102         pte_unmap_unlock(pte - 1, ptl);
103
104         return pages;
105 }
106
107 static inline unsigned long change_pmd_range(struct vm_area_struct *vma,
108                 pud_t *pud, unsigned long addr, unsigned long end,
109                 pgprot_t newprot, int dirty_accountable, int prot_numa)
110 {
111         pmd_t *pmd;
112         unsigned long next;
113         unsigned long pages = 0;
114         unsigned long nr_huge_updates = 0;
115
116         pmd = pmd_offset(pud, addr);
117         do {
118                 unsigned long this_pages;
119
120                 next = pmd_addr_end(addr, end);
121                 if (!pmd_trans_huge(*pmd) && pmd_none_or_clear_bad(pmd))
122                         continue;
123                 if (pmd_trans_huge(*pmd)) {
124                         if (next - addr != HPAGE_PMD_SIZE)
125                                 split_huge_page_pmd(vma, addr, pmd);
126                         else {
127                                 int nr_ptes = change_huge_pmd(vma, pmd, addr,
128                                                 newprot, prot_numa);
129
130                                 if (nr_ptes) {
131                                         if (nr_ptes == HPAGE_PMD_NR) {
132                                                 pages += HPAGE_PMD_NR;
133                                                 nr_huge_updates++;
134                                         }
135                                         continue;
136                                 }
137                         }
138                         /* fall through, the trans huge pmd just split */
139                 }
140                 VM_BUG_ON(pmd_trans_huge(*pmd));
141                 this_pages = change_pte_range(vma, pmd, addr, next, newprot,
142                                  dirty_accountable, prot_numa);
143                 pages += this_pages;
144         } while (pmd++, addr = next, addr != end);
145
146         if (nr_huge_updates)
147                 count_vm_numa_events(NUMA_HUGE_PTE_UPDATES, nr_huge_updates);
148         return pages;
149 }
150
151 static inline unsigned long change_pud_range(struct vm_area_struct *vma,
152                 pgd_t *pgd, unsigned long addr, unsigned long end,
153                 pgprot_t newprot, int dirty_accountable, int prot_numa)
154 {
155         pud_t *pud;
156         unsigned long next;
157         unsigned long pages = 0;
158
159         pud = pud_offset(pgd, addr);
160         do {
161                 next = pud_addr_end(addr, end);
162                 if (pud_none_or_clear_bad(pud))
163                         continue;
164                 pages += change_pmd_range(vma, pud, addr, next, newprot,
165                                  dirty_accountable, prot_numa);
166         } while (pud++, addr = next, addr != end);
167
168         return pages;
169 }
170
171 static unsigned long change_protection_range(struct vm_area_struct *vma,
172                 unsigned long addr, unsigned long end, pgprot_t newprot,
173                 int dirty_accountable, int prot_numa)
174 {
175         struct mm_struct *mm = vma->vm_mm;
176         pgd_t *pgd;
177         unsigned long next;
178         unsigned long start = addr;
179         unsigned long pages = 0;
180
181         BUG_ON(addr >= end);
182         pgd = pgd_offset(mm, addr);
183         flush_cache_range(vma, addr, end);
184         set_tlb_flush_pending(mm);
185         do {
186                 next = pgd_addr_end(addr, end);
187                 if (pgd_none_or_clear_bad(pgd))
188                         continue;
189                 pages += change_pud_range(vma, pgd, addr, next, newprot,
190                                  dirty_accountable, prot_numa);
191         } while (pgd++, addr = next, addr != end);
192
193         /* Only flush the TLB if we actually modified any entries: */
194         if (pages)
195                 flush_tlb_range(vma, start, end);
196         clear_tlb_flush_pending(mm);
197
198         return pages;
199 }
200
201 unsigned long change_protection(struct vm_area_struct *vma, unsigned long start,
202                        unsigned long end, pgprot_t newprot,
203                        int dirty_accountable, int prot_numa)
204 {
205         struct mm_struct *mm = vma->vm_mm;
206         unsigned long pages;
207
208         mmu_notifier_invalidate_range_start(mm, start, end);
209         if (is_vm_hugetlb_page(vma))
210                 pages = hugetlb_change_protection(vma, start, end, newprot);
211         else
212                 pages = change_protection_range(vma, start, end, newprot, dirty_accountable, prot_numa);
213         mmu_notifier_invalidate_range_end(mm, start, end);
214
215         return pages;
216 }
217
218 int
219 mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
220         unsigned long start, unsigned long end, unsigned long newflags)
221 {
222         struct mm_struct *mm = vma->vm_mm;
223         unsigned long oldflags = vma->vm_flags;
224         long nrpages = (end - start) >> PAGE_SHIFT;
225         unsigned long charged = 0;
226         pgoff_t pgoff;
227         int error;
228         int dirty_accountable = 0;
229
230         if (newflags == oldflags) {
231                 *pprev = vma;
232                 return 0;
233         }
234
235         /*
236          * If we make a private mapping writable we increase our commit;
237          * but (without finer accounting) cannot reduce our commit if we
238          * make it unwritable again. hugetlb mapping were accounted for
239          * even if read-only so there is no need to account for them here
240          */
241         if (newflags & VM_WRITE) {
242                 if (!(oldflags & (VM_ACCOUNT|VM_WRITE|VM_HUGETLB|
243                                                 VM_SHARED|VM_NORESERVE))) {
244                         charged = nrpages;
245                         if (security_vm_enough_memory_mm(mm, charged))
246                                 return -ENOMEM;
247                         newflags |= VM_ACCOUNT;
248                 }
249         }
250
251         /*
252          * First try to merge with previous and/or next vma.
253          */
254         pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
255         *pprev = vma_merge(mm, *pprev, start, end, newflags,
256                         vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma));
257         if (*pprev) {
258                 vma = *pprev;
259                 goto success;
260         }
261
262         *pprev = vma;
263
264         if (start != vma->vm_start) {
265                 error = split_vma(mm, vma, start, 1);
266                 if (error)
267                         goto fail;
268         }
269
270         if (end != vma->vm_end) {
271                 error = split_vma(mm, vma, end, 0);
272                 if (error)
273                         goto fail;
274         }
275
276 success:
277         /*
278          * vm_flags and vm_page_prot are protected by the mmap_sem
279          * held in write mode.
280          */
281         vma->vm_flags = newflags;
282         vma->vm_page_prot = pgprot_modify(vma->vm_page_prot,
283                                           vm_get_page_prot(newflags));
284
285         if (vma_wants_writenotify(vma)) {
286                 vma->vm_page_prot = vm_get_page_prot(newflags & ~VM_SHARED);
287                 dirty_accountable = 1;
288         }
289
290         change_protection(vma, start, end, vma->vm_page_prot,
291                           dirty_accountable, 0);
292
293         vm_stat_account(mm, oldflags, vma->vm_file, -nrpages);
294         vm_stat_account(mm, newflags, vma->vm_file, nrpages);
295         perf_event_mmap(vma);
296         return 0;
297
298 fail:
299         vm_unacct_memory(charged);
300         return error;
301 }
302
303 SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
304                 unsigned long, prot)
305 {
306         unsigned long vm_flags, nstart, end, tmp, reqprot;
307         struct vm_area_struct *vma, *prev;
308         int error = -EINVAL;
309         const int grows = prot & (PROT_GROWSDOWN|PROT_GROWSUP);
310         prot &= ~(PROT_GROWSDOWN|PROT_GROWSUP);
311         if (grows == (PROT_GROWSDOWN|PROT_GROWSUP)) /* can't be both */
312                 return -EINVAL;
313
314         if (start & ~PAGE_MASK)
315                 return -EINVAL;
316         if (!len)
317                 return 0;
318         len = PAGE_ALIGN(len);
319         end = start + len;
320         if (end <= start)
321                 return -ENOMEM;
322         if (!arch_validate_prot(prot))
323                 return -EINVAL;
324
325         reqprot = prot;
326         /*
327          * Does the application expect PROT_READ to imply PROT_EXEC:
328          */
329         if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
330                 prot |= PROT_EXEC;
331
332         vm_flags = calc_vm_prot_bits(prot);
333
334         down_write(&current->mm->mmap_sem);
335
336         vma = find_vma(current->mm, start);
337         error = -ENOMEM;
338         if (!vma)
339                 goto out;
340         prev = vma->vm_prev;
341         if (unlikely(grows & PROT_GROWSDOWN)) {
342                 if (vma->vm_start >= end)
343                         goto out;
344                 start = vma->vm_start;
345                 error = -EINVAL;
346                 if (!(vma->vm_flags & VM_GROWSDOWN))
347                         goto out;
348         } else {
349                 if (vma->vm_start > start)
350                         goto out;
351                 if (unlikely(grows & PROT_GROWSUP)) {
352                         end = vma->vm_end;
353                         error = -EINVAL;
354                         if (!(vma->vm_flags & VM_GROWSUP))
355                                 goto out;
356                 }
357         }
358         if (start > vma->vm_start)
359                 prev = vma;
360
361         for (nstart = start ; ; ) {
362                 unsigned long newflags;
363
364                 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
365
366                 newflags = vm_flags;
367                 newflags |= (vma->vm_flags & ~(VM_READ | VM_WRITE | VM_EXEC));
368
369                 /* newflags >> 4 shift VM_MAY% in place of VM_% */
370                 if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
371                         error = -EACCES;
372                         goto out;
373                 }
374
375                 error = security_file_mprotect(vma, reqprot, prot);
376                 if (error)
377                         goto out;
378
379                 tmp = vma->vm_end;
380                 if (tmp > end)
381                         tmp = end;
382                 error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
383                 if (error)
384                         goto out;
385                 nstart = tmp;
386
387                 if (nstart < prev->vm_end)
388                         nstart = prev->vm_end;
389                 if (nstart >= end)
390                         goto out;
391
392                 vma = prev->vm_next;
393                 if (!vma || vma->vm_start != nstart) {
394                         error = -ENOMEM;
395                         goto out;
396                 }
397         }
398 out:
399         up_write(&current->mm->mmap_sem);
400         return error;
401 }