Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/sage/ceph...
[firefly-linux-kernel-4.4.55.git] / mm / madvise.c
1 /*
2  *      linux/mm/madvise.c
3  *
4  * Copyright (C) 1999  Linus Torvalds
5  * Copyright (C) 2002  Christoph Hellwig
6  */
7
8 #include <linux/mman.h>
9 #include <linux/pagemap.h>
10 #include <linux/syscalls.h>
11 #include <linux/mempolicy.h>
12 #include <linux/page-isolation.h>
13 #include <linux/hugetlb.h>
14 #include <linux/falloc.h>
15 #include <linux/sched.h>
16 #include <linux/ksm.h>
17 #include <linux/fs.h>
18 #include <linux/file.h>
19 #include <linux/blkdev.h>
20 #include <linux/swap.h>
21 #include <linux/swapops.h>
22
23 /*
24  * Any behaviour which results in changes to the vma->vm_flags needs to
25  * take mmap_sem for writing. Others, which simply traverse vmas, need
26  * to only take it for reading.
27  */
28 static int madvise_need_mmap_write(int behavior)
29 {
30         switch (behavior) {
31         case MADV_REMOVE:
32         case MADV_WILLNEED:
33         case MADV_DONTNEED:
34                 return 0;
35         default:
36                 /* be safe, default to 1. list exceptions explicitly */
37                 return 1;
38         }
39 }
40
41 /*
42  * We can potentially split a vm area into separate
43  * areas, each area with its own behavior.
44  */
45 static long madvise_behavior(struct vm_area_struct *vma,
46                      struct vm_area_struct **prev,
47                      unsigned long start, unsigned long end, int behavior)
48 {
49         struct mm_struct *mm = vma->vm_mm;
50         int error = 0;
51         pgoff_t pgoff;
52         unsigned long new_flags = vma->vm_flags;
53
54         switch (behavior) {
55         case MADV_NORMAL:
56                 new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ;
57                 break;
58         case MADV_SEQUENTIAL:
59                 new_flags = (new_flags & ~VM_RAND_READ) | VM_SEQ_READ;
60                 break;
61         case MADV_RANDOM:
62                 new_flags = (new_flags & ~VM_SEQ_READ) | VM_RAND_READ;
63                 break;
64         case MADV_DONTFORK:
65                 new_flags |= VM_DONTCOPY;
66                 break;
67         case MADV_DOFORK:
68                 if (vma->vm_flags & VM_IO) {
69                         error = -EINVAL;
70                         goto out;
71                 }
72                 new_flags &= ~VM_DONTCOPY;
73                 break;
74         case MADV_DONTDUMP:
75                 new_flags |= VM_DONTDUMP;
76                 break;
77         case MADV_DODUMP:
78                 if (new_flags & VM_SPECIAL) {
79                         error = -EINVAL;
80                         goto out;
81                 }
82                 new_flags &= ~VM_DONTDUMP;
83                 break;
84         case MADV_MERGEABLE:
85         case MADV_UNMERGEABLE:
86                 error = ksm_madvise(vma, start, end, behavior, &new_flags);
87                 if (error)
88                         goto out;
89                 break;
90         case MADV_HUGEPAGE:
91         case MADV_NOHUGEPAGE:
92                 error = hugepage_madvise(vma, &new_flags, behavior);
93                 if (error)
94                         goto out;
95                 break;
96         }
97
98         if (new_flags == vma->vm_flags) {
99                 *prev = vma;
100                 goto out;
101         }
102
103         pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
104         *prev = vma_merge(mm, *prev, start, end, new_flags, vma->anon_vma,
105                                 vma->vm_file, pgoff, vma_policy(vma));
106         if (*prev) {
107                 vma = *prev;
108                 goto success;
109         }
110
111         *prev = vma;
112
113         if (start != vma->vm_start) {
114                 error = split_vma(mm, vma, start, 1);
115                 if (error)
116                         goto out;
117         }
118
119         if (end != vma->vm_end) {
120                 error = split_vma(mm, vma, end, 0);
121                 if (error)
122                         goto out;
123         }
124
125 success:
126         /*
127          * vm_flags is protected by the mmap_sem held in write mode.
128          */
129         vma->vm_flags = new_flags;
130
131 out:
132         if (error == -ENOMEM)
133                 error = -EAGAIN;
134         return error;
135 }
136
137 #ifdef CONFIG_SWAP
138 static int swapin_walk_pmd_entry(pmd_t *pmd, unsigned long start,
139         unsigned long end, struct mm_walk *walk)
140 {
141         pte_t *orig_pte;
142         struct vm_area_struct *vma = walk->private;
143         unsigned long index;
144
145         if (pmd_none_or_trans_huge_or_clear_bad(pmd))
146                 return 0;
147
148         for (index = start; index != end; index += PAGE_SIZE) {
149                 pte_t pte;
150                 swp_entry_t entry;
151                 struct page *page;
152                 spinlock_t *ptl;
153
154                 orig_pte = pte_offset_map_lock(vma->vm_mm, pmd, start, &ptl);
155                 pte = *(orig_pte + ((index - start) / PAGE_SIZE));
156                 pte_unmap_unlock(orig_pte, ptl);
157
158                 if (pte_present(pte) || pte_none(pte) || pte_file(pte))
159                         continue;
160                 entry = pte_to_swp_entry(pte);
161                 if (unlikely(non_swap_entry(entry)))
162                         continue;
163
164                 page = read_swap_cache_async(entry, GFP_HIGHUSER_MOVABLE,
165                                                                 vma, index);
166                 if (page)
167                         page_cache_release(page);
168         }
169
170         return 0;
171 }
172
173 static void force_swapin_readahead(struct vm_area_struct *vma,
174                 unsigned long start, unsigned long end)
175 {
176         struct mm_walk walk = {
177                 .mm = vma->vm_mm,
178                 .pmd_entry = swapin_walk_pmd_entry,
179                 .private = vma,
180         };
181
182         walk_page_range(start, end, &walk);
183
184         lru_add_drain();        /* Push any new pages onto the LRU now */
185 }
186
187 static void force_shm_swapin_readahead(struct vm_area_struct *vma,
188                 unsigned long start, unsigned long end,
189                 struct address_space *mapping)
190 {
191         pgoff_t index;
192         struct page *page;
193         swp_entry_t swap;
194
195         for (; start < end; start += PAGE_SIZE) {
196                 index = ((start - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
197
198                 page = find_get_entry(mapping, index);
199                 if (!radix_tree_exceptional_entry(page)) {
200                         if (page)
201                                 page_cache_release(page);
202                         continue;
203                 }
204                 swap = radix_to_swp_entry(page);
205                 page = read_swap_cache_async(swap, GFP_HIGHUSER_MOVABLE,
206                                                                 NULL, 0);
207                 if (page)
208                         page_cache_release(page);
209         }
210
211         lru_add_drain();        /* Push any new pages onto the LRU now */
212 }
213 #endif          /* CONFIG_SWAP */
214
215 /*
216  * Schedule all required I/O operations.  Do not wait for completion.
217  */
218 static long madvise_willneed(struct vm_area_struct *vma,
219                              struct vm_area_struct **prev,
220                              unsigned long start, unsigned long end)
221 {
222         struct file *file = vma->vm_file;
223
224 #ifdef CONFIG_SWAP
225         if (!file || mapping_cap_swap_backed(file->f_mapping)) {
226                 *prev = vma;
227                 if (!file)
228                         force_swapin_readahead(vma, start, end);
229                 else
230                         force_shm_swapin_readahead(vma, start, end,
231                                                 file->f_mapping);
232                 return 0;
233         }
234 #endif
235
236         if (!file)
237                 return -EBADF;
238
239         if (file->f_mapping->a_ops->get_xip_mem) {
240                 /* no bad return value, but ignore advice */
241                 return 0;
242         }
243
244         *prev = vma;
245         start = ((start - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
246         if (end > vma->vm_end)
247                 end = vma->vm_end;
248         end = ((end - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
249
250         force_page_cache_readahead(file->f_mapping, file, start, end - start);
251         return 0;
252 }
253
254 /*
255  * Application no longer needs these pages.  If the pages are dirty,
256  * it's OK to just throw them away.  The app will be more careful about
257  * data it wants to keep.  Be sure to free swap resources too.  The
258  * zap_page_range call sets things up for shrink_active_list to actually free
259  * these pages later if no one else has touched them in the meantime,
260  * although we could add these pages to a global reuse list for
261  * shrink_active_list to pick up before reclaiming other pages.
262  *
263  * NB: This interface discards data rather than pushes it out to swap,
264  * as some implementations do.  This has performance implications for
265  * applications like large transactional databases which want to discard
266  * pages in anonymous maps after committing to backing store the data
267  * that was kept in them.  There is no reason to write this data out to
268  * the swap area if the application is discarding it.
269  *
270  * An interface that causes the system to free clean pages and flush
271  * dirty pages is already available as msync(MS_INVALIDATE).
272  */
273 static long madvise_dontneed(struct vm_area_struct *vma,
274                              struct vm_area_struct **prev,
275                              unsigned long start, unsigned long end)
276 {
277         *prev = vma;
278         if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP))
279                 return -EINVAL;
280
281         if (unlikely(vma->vm_flags & VM_NONLINEAR)) {
282                 struct zap_details details = {
283                         .nonlinear_vma = vma,
284                         .last_index = ULONG_MAX,
285                 };
286                 zap_page_range(vma, start, end - start, &details);
287         } else
288                 zap_page_range(vma, start, end - start, NULL);
289         return 0;
290 }
291
292 /*
293  * Application wants to free up the pages and associated backing store.
294  * This is effectively punching a hole into the middle of a file.
295  */
296 static long madvise_remove(struct vm_area_struct *vma,
297                                 struct vm_area_struct **prev,
298                                 unsigned long start, unsigned long end)
299 {
300         loff_t offset;
301         int error;
302         struct file *f;
303
304         *prev = NULL;   /* tell sys_madvise we drop mmap_sem */
305
306         if (vma->vm_flags & (VM_LOCKED|VM_NONLINEAR|VM_HUGETLB))
307                 return -EINVAL;
308
309         f = vma->vm_file;
310
311         if (!f || !f->f_mapping || !f->f_mapping->host) {
312                         return -EINVAL;
313         }
314
315         if ((vma->vm_flags & (VM_SHARED|VM_WRITE)) != (VM_SHARED|VM_WRITE))
316                 return -EACCES;
317
318         offset = (loff_t)(start - vma->vm_start)
319                         + ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
320
321         /*
322          * Filesystem's fallocate may need to take i_mutex.  We need to
323          * explicitly grab a reference because the vma (and hence the
324          * vma's reference to the file) can go away as soon as we drop
325          * mmap_sem.
326          */
327         get_file(f);
328         up_read(&current->mm->mmap_sem);
329         error = do_fallocate(f,
330                                 FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
331                                 offset, end - start);
332         fput(f);
333         down_read(&current->mm->mmap_sem);
334         return error;
335 }
336
337 #ifdef CONFIG_MEMORY_FAILURE
338 /*
339  * Error injection support for memory error handling.
340  */
341 static int madvise_hwpoison(int bhv, unsigned long start, unsigned long end)
342 {
343         struct page *p;
344         if (!capable(CAP_SYS_ADMIN))
345                 return -EPERM;
346         for (; start < end; start += PAGE_SIZE <<
347                                 compound_order(compound_head(p))) {
348                 int ret;
349
350                 ret = get_user_pages_fast(start, 1, 0, &p);
351                 if (ret != 1)
352                         return ret;
353
354                 if (PageHWPoison(p)) {
355                         put_page(p);
356                         continue;
357                 }
358                 if (bhv == MADV_SOFT_OFFLINE) {
359                         pr_info("Soft offlining page %#lx at %#lx\n",
360                                 page_to_pfn(p), start);
361                         ret = soft_offline_page(p, MF_COUNT_INCREASED);
362                         if (ret)
363                                 return ret;
364                         continue;
365                 }
366                 pr_info("Injecting memory failure for page %#lx at %#lx\n",
367                        page_to_pfn(p), start);
368                 /* Ignore return value for now */
369                 memory_failure(page_to_pfn(p), 0, MF_COUNT_INCREASED);
370         }
371         return 0;
372 }
373 #endif
374
375 static long
376 madvise_vma(struct vm_area_struct *vma, struct vm_area_struct **prev,
377                 unsigned long start, unsigned long end, int behavior)
378 {
379         switch (behavior) {
380         case MADV_REMOVE:
381                 return madvise_remove(vma, prev, start, end);
382         case MADV_WILLNEED:
383                 return madvise_willneed(vma, prev, start, end);
384         case MADV_DONTNEED:
385                 return madvise_dontneed(vma, prev, start, end);
386         default:
387                 return madvise_behavior(vma, prev, start, end, behavior);
388         }
389 }
390
391 static int
392 madvise_behavior_valid(int behavior)
393 {
394         switch (behavior) {
395         case MADV_DOFORK:
396         case MADV_DONTFORK:
397         case MADV_NORMAL:
398         case MADV_SEQUENTIAL:
399         case MADV_RANDOM:
400         case MADV_REMOVE:
401         case MADV_WILLNEED:
402         case MADV_DONTNEED:
403 #ifdef CONFIG_KSM
404         case MADV_MERGEABLE:
405         case MADV_UNMERGEABLE:
406 #endif
407 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
408         case MADV_HUGEPAGE:
409         case MADV_NOHUGEPAGE:
410 #endif
411         case MADV_DONTDUMP:
412         case MADV_DODUMP:
413                 return 1;
414
415         default:
416                 return 0;
417         }
418 }
419
420 /*
421  * The madvise(2) system call.
422  *
423  * Applications can use madvise() to advise the kernel how it should
424  * handle paging I/O in this VM area.  The idea is to help the kernel
425  * use appropriate read-ahead and caching techniques.  The information
426  * provided is advisory only, and can be safely disregarded by the
427  * kernel without affecting the correct operation of the application.
428  *
429  * behavior values:
430  *  MADV_NORMAL - the default behavior is to read clusters.  This
431  *              results in some read-ahead and read-behind.
432  *  MADV_RANDOM - the system should read the minimum amount of data
433  *              on any access, since it is unlikely that the appli-
434  *              cation will need more than what it asks for.
435  *  MADV_SEQUENTIAL - pages in the given range will probably be accessed
436  *              once, so they can be aggressively read ahead, and
437  *              can be freed soon after they are accessed.
438  *  MADV_WILLNEED - the application is notifying the system to read
439  *              some pages ahead.
440  *  MADV_DONTNEED - the application is finished with the given range,
441  *              so the kernel can free resources associated with it.
442  *  MADV_REMOVE - the application wants to free up the given range of
443  *              pages and associated backing store.
444  *  MADV_DONTFORK - omit this area from child's address space when forking:
445  *              typically, to avoid COWing pages pinned by get_user_pages().
446  *  MADV_DOFORK - cancel MADV_DONTFORK: no longer omit this area when forking.
447  *  MADV_MERGEABLE - the application recommends that KSM try to merge pages in
448  *              this area with pages of identical content from other such areas.
449  *  MADV_UNMERGEABLE- cancel MADV_MERGEABLE: no longer merge pages with others.
450  *
451  * return values:
452  *  zero    - success
453  *  -EINVAL - start + len < 0, start is not page-aligned,
454  *              "behavior" is not a valid value, or application
455  *              is attempting to release locked or shared pages.
456  *  -ENOMEM - addresses in the specified range are not currently
457  *              mapped, or are outside the AS of the process.
458  *  -EIO    - an I/O error occurred while paging in data.
459  *  -EBADF  - map exists, but area maps something that isn't a file.
460  *  -EAGAIN - a kernel resource was temporarily unavailable.
461  */
462 SYSCALL_DEFINE3(madvise, unsigned long, start, size_t, len_in, int, behavior)
463 {
464         unsigned long end, tmp;
465         struct vm_area_struct *vma, *prev;
466         int unmapped_error = 0;
467         int error = -EINVAL;
468         int write;
469         size_t len;
470         struct blk_plug plug;
471
472 #ifdef CONFIG_MEMORY_FAILURE
473         if (behavior == MADV_HWPOISON || behavior == MADV_SOFT_OFFLINE)
474                 return madvise_hwpoison(behavior, start, start+len_in);
475 #endif
476         if (!madvise_behavior_valid(behavior))
477                 return error;
478
479         if (start & ~PAGE_MASK)
480                 return error;
481         len = (len_in + ~PAGE_MASK) & PAGE_MASK;
482
483         /* Check to see whether len was rounded up from small -ve to zero */
484         if (len_in && !len)
485                 return error;
486
487         end = start + len;
488         if (end < start)
489                 return error;
490
491         error = 0;
492         if (end == start)
493                 return error;
494
495         write = madvise_need_mmap_write(behavior);
496         if (write)
497                 down_write(&current->mm->mmap_sem);
498         else
499                 down_read(&current->mm->mmap_sem);
500
501         /*
502          * If the interval [start,end) covers some unmapped address
503          * ranges, just ignore them, but return -ENOMEM at the end.
504          * - different from the way of handling in mlock etc.
505          */
506         vma = find_vma_prev(current->mm, start, &prev);
507         if (vma && start > vma->vm_start)
508                 prev = vma;
509
510         blk_start_plug(&plug);
511         for (;;) {
512                 /* Still start < end. */
513                 error = -ENOMEM;
514                 if (!vma)
515                         goto out;
516
517                 /* Here start < (end|vma->vm_end). */
518                 if (start < vma->vm_start) {
519                         unmapped_error = -ENOMEM;
520                         start = vma->vm_start;
521                         if (start >= end)
522                                 goto out;
523                 }
524
525                 /* Here vma->vm_start <= start < (end|vma->vm_end) */
526                 tmp = vma->vm_end;
527                 if (end < tmp)
528                         tmp = end;
529
530                 /* Here vma->vm_start <= start < tmp <= (end|vma->vm_end). */
531                 error = madvise_vma(vma, &prev, start, tmp, behavior);
532                 if (error)
533                         goto out;
534                 start = tmp;
535                 if (prev && start < prev->vm_end)
536                         start = prev->vm_end;
537                 error = unmapped_error;
538                 if (start >= end)
539                         goto out;
540                 if (prev)
541                         vma = prev->vm_next;
542                 else    /* madvise_remove dropped mmap_sem */
543                         vma = find_vma(current->mm, start);
544         }
545 out:
546         blk_finish_plug(&plug);
547         if (write)
548                 up_write(&current->mm->mmap_sem);
549         else
550                 up_read(&current->mm->mmap_sem);
551
552         return error;
553 }