hugetlb: make some static variables global
[firefly-linux-kernel-4.4.55.git] / mm / hugetlb.c
1 /*
2  * Generic hugetlb support.
3  * (C) William Irwin, April 2004
4  */
5 #include <linux/list.h>
6 #include <linux/init.h>
7 #include <linux/module.h>
8 #include <linux/mm.h>
9 #include <linux/seq_file.h>
10 #include <linux/sysctl.h>
11 #include <linux/highmem.h>
12 #include <linux/mmu_notifier.h>
13 #include <linux/nodemask.h>
14 #include <linux/pagemap.h>
15 #include <linux/mempolicy.h>
16 #include <linux/cpuset.h>
17 #include <linux/mutex.h>
18 #include <linux/bootmem.h>
19 #include <linux/sysfs.h>
20 #include <linux/slab.h>
21 #include <linux/rmap.h>
22 #include <linux/swap.h>
23 #include <linux/swapops.h>
24
25 #include <asm/page.h>
26 #include <asm/pgtable.h>
27 #include <asm/tlb.h>
28
29 #include <linux/io.h>
30 #include <linux/hugetlb.h>
31 #include <linux/node.h>
32 #include "internal.h"
33
34 const unsigned long hugetlb_zero = 0, hugetlb_infinity = ~0UL;
35 static gfp_t htlb_alloc_mask = GFP_HIGHUSER;
36 unsigned long hugepages_treat_as_movable;
37
38 int hugetlb_max_hstate __read_mostly;
39 unsigned int default_hstate_idx;
40 struct hstate hstates[HUGE_MAX_HSTATE];
41
42 __initdata LIST_HEAD(huge_boot_pages);
43
44 /* for command line parsing */
45 static struct hstate * __initdata parsed_hstate;
46 static unsigned long __initdata default_hstate_max_huge_pages;
47 static unsigned long __initdata default_hstate_size;
48
49 /*
50  * Protects updates to hugepage_freelists, nr_huge_pages, and free_huge_pages
51  */
52 DEFINE_SPINLOCK(hugetlb_lock);
53
54 static inline void unlock_or_release_subpool(struct hugepage_subpool *spool)
55 {
56         bool free = (spool->count == 0) && (spool->used_hpages == 0);
57
58         spin_unlock(&spool->lock);
59
60         /* If no pages are used, and no other handles to the subpool
61          * remain, free the subpool the subpool remain */
62         if (free)
63                 kfree(spool);
64 }
65
66 struct hugepage_subpool *hugepage_new_subpool(long nr_blocks)
67 {
68         struct hugepage_subpool *spool;
69
70         spool = kmalloc(sizeof(*spool), GFP_KERNEL);
71         if (!spool)
72                 return NULL;
73
74         spin_lock_init(&spool->lock);
75         spool->count = 1;
76         spool->max_hpages = nr_blocks;
77         spool->used_hpages = 0;
78
79         return spool;
80 }
81
82 void hugepage_put_subpool(struct hugepage_subpool *spool)
83 {
84         spin_lock(&spool->lock);
85         BUG_ON(!spool->count);
86         spool->count--;
87         unlock_or_release_subpool(spool);
88 }
89
90 static int hugepage_subpool_get_pages(struct hugepage_subpool *spool,
91                                       long delta)
92 {
93         int ret = 0;
94
95         if (!spool)
96                 return 0;
97
98         spin_lock(&spool->lock);
99         if ((spool->used_hpages + delta) <= spool->max_hpages) {
100                 spool->used_hpages += delta;
101         } else {
102                 ret = -ENOMEM;
103         }
104         spin_unlock(&spool->lock);
105
106         return ret;
107 }
108
109 static void hugepage_subpool_put_pages(struct hugepage_subpool *spool,
110                                        long delta)
111 {
112         if (!spool)
113                 return;
114
115         spin_lock(&spool->lock);
116         spool->used_hpages -= delta;
117         /* If hugetlbfs_put_super couldn't free spool due to
118         * an outstanding quota reference, free it now. */
119         unlock_or_release_subpool(spool);
120 }
121
122 static inline struct hugepage_subpool *subpool_inode(struct inode *inode)
123 {
124         return HUGETLBFS_SB(inode->i_sb)->spool;
125 }
126
127 static inline struct hugepage_subpool *subpool_vma(struct vm_area_struct *vma)
128 {
129         return subpool_inode(vma->vm_file->f_dentry->d_inode);
130 }
131
132 /*
133  * Region tracking -- allows tracking of reservations and instantiated pages
134  *                    across the pages in a mapping.
135  *
136  * The region data structures are protected by a combination of the mmap_sem
137  * and the hugetlb_instantion_mutex.  To access or modify a region the caller
138  * must either hold the mmap_sem for write, or the mmap_sem for read and
139  * the hugetlb_instantiation mutex:
140  *
141  *      down_write(&mm->mmap_sem);
142  * or
143  *      down_read(&mm->mmap_sem);
144  *      mutex_lock(&hugetlb_instantiation_mutex);
145  */
146 struct file_region {
147         struct list_head link;
148         long from;
149         long to;
150 };
151
152 static long region_add(struct list_head *head, long f, long t)
153 {
154         struct file_region *rg, *nrg, *trg;
155
156         /* Locate the region we are either in or before. */
157         list_for_each_entry(rg, head, link)
158                 if (f <= rg->to)
159                         break;
160
161         /* Round our left edge to the current segment if it encloses us. */
162         if (f > rg->from)
163                 f = rg->from;
164
165         /* Check for and consume any regions we now overlap with. */
166         nrg = rg;
167         list_for_each_entry_safe(rg, trg, rg->link.prev, link) {
168                 if (&rg->link == head)
169                         break;
170                 if (rg->from > t)
171                         break;
172
173                 /* If this area reaches higher then extend our area to
174                  * include it completely.  If this is not the first area
175                  * which we intend to reuse, free it. */
176                 if (rg->to > t)
177                         t = rg->to;
178                 if (rg != nrg) {
179                         list_del(&rg->link);
180                         kfree(rg);
181                 }
182         }
183         nrg->from = f;
184         nrg->to = t;
185         return 0;
186 }
187
188 static long region_chg(struct list_head *head, long f, long t)
189 {
190         struct file_region *rg, *nrg;
191         long chg = 0;
192
193         /* Locate the region we are before or in. */
194         list_for_each_entry(rg, head, link)
195                 if (f <= rg->to)
196                         break;
197
198         /* If we are below the current region then a new region is required.
199          * Subtle, allocate a new region at the position but make it zero
200          * size such that we can guarantee to record the reservation. */
201         if (&rg->link == head || t < rg->from) {
202                 nrg = kmalloc(sizeof(*nrg), GFP_KERNEL);
203                 if (!nrg)
204                         return -ENOMEM;
205                 nrg->from = f;
206                 nrg->to   = f;
207                 INIT_LIST_HEAD(&nrg->link);
208                 list_add(&nrg->link, rg->link.prev);
209
210                 return t - f;
211         }
212
213         /* Round our left edge to the current segment if it encloses us. */
214         if (f > rg->from)
215                 f = rg->from;
216         chg = t - f;
217
218         /* Check for and consume any regions we now overlap with. */
219         list_for_each_entry(rg, rg->link.prev, link) {
220                 if (&rg->link == head)
221                         break;
222                 if (rg->from > t)
223                         return chg;
224
225                 /* We overlap with this area, if it extends further than
226                  * us then we must extend ourselves.  Account for its
227                  * existing reservation. */
228                 if (rg->to > t) {
229                         chg += rg->to - t;
230                         t = rg->to;
231                 }
232                 chg -= rg->to - rg->from;
233         }
234         return chg;
235 }
236
237 static long region_truncate(struct list_head *head, long end)
238 {
239         struct file_region *rg, *trg;
240         long chg = 0;
241
242         /* Locate the region we are either in or before. */
243         list_for_each_entry(rg, head, link)
244                 if (end <= rg->to)
245                         break;
246         if (&rg->link == head)
247                 return 0;
248
249         /* If we are in the middle of a region then adjust it. */
250         if (end > rg->from) {
251                 chg = rg->to - end;
252                 rg->to = end;
253                 rg = list_entry(rg->link.next, typeof(*rg), link);
254         }
255
256         /* Drop any remaining regions. */
257         list_for_each_entry_safe(rg, trg, rg->link.prev, link) {
258                 if (&rg->link == head)
259                         break;
260                 chg += rg->to - rg->from;
261                 list_del(&rg->link);
262                 kfree(rg);
263         }
264         return chg;
265 }
266
267 static long region_count(struct list_head *head, long f, long t)
268 {
269         struct file_region *rg;
270         long chg = 0;
271
272         /* Locate each segment we overlap with, and count that overlap. */
273         list_for_each_entry(rg, head, link) {
274                 long seg_from;
275                 long seg_to;
276
277                 if (rg->to <= f)
278                         continue;
279                 if (rg->from >= t)
280                         break;
281
282                 seg_from = max(rg->from, f);
283                 seg_to = min(rg->to, t);
284
285                 chg += seg_to - seg_from;
286         }
287
288         return chg;
289 }
290
291 /*
292  * Convert the address within this vma to the page offset within
293  * the mapping, in pagecache page units; huge pages here.
294  */
295 static pgoff_t vma_hugecache_offset(struct hstate *h,
296                         struct vm_area_struct *vma, unsigned long address)
297 {
298         return ((address - vma->vm_start) >> huge_page_shift(h)) +
299                         (vma->vm_pgoff >> huge_page_order(h));
300 }
301
302 pgoff_t linear_hugepage_index(struct vm_area_struct *vma,
303                                      unsigned long address)
304 {
305         return vma_hugecache_offset(hstate_vma(vma), vma, address);
306 }
307
308 /*
309  * Return the size of the pages allocated when backing a VMA. In the majority
310  * cases this will be same size as used by the page table entries.
311  */
312 unsigned long vma_kernel_pagesize(struct vm_area_struct *vma)
313 {
314         struct hstate *hstate;
315
316         if (!is_vm_hugetlb_page(vma))
317                 return PAGE_SIZE;
318
319         hstate = hstate_vma(vma);
320
321         return 1UL << (hstate->order + PAGE_SHIFT);
322 }
323 EXPORT_SYMBOL_GPL(vma_kernel_pagesize);
324
325 /*
326  * Return the page size being used by the MMU to back a VMA. In the majority
327  * of cases, the page size used by the kernel matches the MMU size. On
328  * architectures where it differs, an architecture-specific version of this
329  * function is required.
330  */
331 #ifndef vma_mmu_pagesize
332 unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
333 {
334         return vma_kernel_pagesize(vma);
335 }
336 #endif
337
338 /*
339  * Flags for MAP_PRIVATE reservations.  These are stored in the bottom
340  * bits of the reservation map pointer, which are always clear due to
341  * alignment.
342  */
343 #define HPAGE_RESV_OWNER    (1UL << 0)
344 #define HPAGE_RESV_UNMAPPED (1UL << 1)
345 #define HPAGE_RESV_MASK (HPAGE_RESV_OWNER | HPAGE_RESV_UNMAPPED)
346
347 /*
348  * These helpers are used to track how many pages are reserved for
349  * faults in a MAP_PRIVATE mapping. Only the process that called mmap()
350  * is guaranteed to have their future faults succeed.
351  *
352  * With the exception of reset_vma_resv_huge_pages() which is called at fork(),
353  * the reserve counters are updated with the hugetlb_lock held. It is safe
354  * to reset the VMA at fork() time as it is not in use yet and there is no
355  * chance of the global counters getting corrupted as a result of the values.
356  *
357  * The private mapping reservation is represented in a subtly different
358  * manner to a shared mapping.  A shared mapping has a region map associated
359  * with the underlying file, this region map represents the backing file
360  * pages which have ever had a reservation assigned which this persists even
361  * after the page is instantiated.  A private mapping has a region map
362  * associated with the original mmap which is attached to all VMAs which
363  * reference it, this region map represents those offsets which have consumed
364  * reservation ie. where pages have been instantiated.
365  */
366 static unsigned long get_vma_private_data(struct vm_area_struct *vma)
367 {
368         return (unsigned long)vma->vm_private_data;
369 }
370
371 static void set_vma_private_data(struct vm_area_struct *vma,
372                                                         unsigned long value)
373 {
374         vma->vm_private_data = (void *)value;
375 }
376
377 struct resv_map {
378         struct kref refs;
379         struct list_head regions;
380 };
381
382 static struct resv_map *resv_map_alloc(void)
383 {
384         struct resv_map *resv_map = kmalloc(sizeof(*resv_map), GFP_KERNEL);
385         if (!resv_map)
386                 return NULL;
387
388         kref_init(&resv_map->refs);
389         INIT_LIST_HEAD(&resv_map->regions);
390
391         return resv_map;
392 }
393
394 static void resv_map_release(struct kref *ref)
395 {
396         struct resv_map *resv_map = container_of(ref, struct resv_map, refs);
397
398         /* Clear out any active regions before we release the map. */
399         region_truncate(&resv_map->regions, 0);
400         kfree(resv_map);
401 }
402
403 static struct resv_map *vma_resv_map(struct vm_area_struct *vma)
404 {
405         VM_BUG_ON(!is_vm_hugetlb_page(vma));
406         if (!(vma->vm_flags & VM_MAYSHARE))
407                 return (struct resv_map *)(get_vma_private_data(vma) &
408                                                         ~HPAGE_RESV_MASK);
409         return NULL;
410 }
411
412 static void set_vma_resv_map(struct vm_area_struct *vma, struct resv_map *map)
413 {
414         VM_BUG_ON(!is_vm_hugetlb_page(vma));
415         VM_BUG_ON(vma->vm_flags & VM_MAYSHARE);
416
417         set_vma_private_data(vma, (get_vma_private_data(vma) &
418                                 HPAGE_RESV_MASK) | (unsigned long)map);
419 }
420
421 static void set_vma_resv_flags(struct vm_area_struct *vma, unsigned long flags)
422 {
423         VM_BUG_ON(!is_vm_hugetlb_page(vma));
424         VM_BUG_ON(vma->vm_flags & VM_MAYSHARE);
425
426         set_vma_private_data(vma, get_vma_private_data(vma) | flags);
427 }
428
429 static int is_vma_resv_set(struct vm_area_struct *vma, unsigned long flag)
430 {
431         VM_BUG_ON(!is_vm_hugetlb_page(vma));
432
433         return (get_vma_private_data(vma) & flag) != 0;
434 }
435
436 /* Decrement the reserved pages in the hugepage pool by one */
437 static void decrement_hugepage_resv_vma(struct hstate *h,
438                         struct vm_area_struct *vma)
439 {
440         if (vma->vm_flags & VM_NORESERVE)
441                 return;
442
443         if (vma->vm_flags & VM_MAYSHARE) {
444                 /* Shared mappings always use reserves */
445                 h->resv_huge_pages--;
446         } else if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
447                 /*
448                  * Only the process that called mmap() has reserves for
449                  * private mappings.
450                  */
451                 h->resv_huge_pages--;
452         }
453 }
454
455 /* Reset counters to 0 and clear all HPAGE_RESV_* flags */
456 void reset_vma_resv_huge_pages(struct vm_area_struct *vma)
457 {
458         VM_BUG_ON(!is_vm_hugetlb_page(vma));
459         if (!(vma->vm_flags & VM_MAYSHARE))
460                 vma->vm_private_data = (void *)0;
461 }
462
463 /* Returns true if the VMA has associated reserve pages */
464 static int vma_has_reserves(struct vm_area_struct *vma)
465 {
466         if (vma->vm_flags & VM_MAYSHARE)
467                 return 1;
468         if (is_vma_resv_set(vma, HPAGE_RESV_OWNER))
469                 return 1;
470         return 0;
471 }
472
473 static void copy_gigantic_page(struct page *dst, struct page *src)
474 {
475         int i;
476         struct hstate *h = page_hstate(src);
477         struct page *dst_base = dst;
478         struct page *src_base = src;
479
480         for (i = 0; i < pages_per_huge_page(h); ) {
481                 cond_resched();
482                 copy_highpage(dst, src);
483
484                 i++;
485                 dst = mem_map_next(dst, dst_base, i);
486                 src = mem_map_next(src, src_base, i);
487         }
488 }
489
490 void copy_huge_page(struct page *dst, struct page *src)
491 {
492         int i;
493         struct hstate *h = page_hstate(src);
494
495         if (unlikely(pages_per_huge_page(h) > MAX_ORDER_NR_PAGES)) {
496                 copy_gigantic_page(dst, src);
497                 return;
498         }
499
500         might_sleep();
501         for (i = 0; i < pages_per_huge_page(h); i++) {
502                 cond_resched();
503                 copy_highpage(dst + i, src + i);
504         }
505 }
506
507 static void enqueue_huge_page(struct hstate *h, struct page *page)
508 {
509         int nid = page_to_nid(page);
510         list_move(&page->lru, &h->hugepage_freelists[nid]);
511         h->free_huge_pages++;
512         h->free_huge_pages_node[nid]++;
513 }
514
515 static struct page *dequeue_huge_page_node(struct hstate *h, int nid)
516 {
517         struct page *page;
518
519         if (list_empty(&h->hugepage_freelists[nid]))
520                 return NULL;
521         page = list_entry(h->hugepage_freelists[nid].next, struct page, lru);
522         list_move(&page->lru, &h->hugepage_activelist);
523         set_page_refcounted(page);
524         h->free_huge_pages--;
525         h->free_huge_pages_node[nid]--;
526         return page;
527 }
528
529 static struct page *dequeue_huge_page_vma(struct hstate *h,
530                                 struct vm_area_struct *vma,
531                                 unsigned long address, int avoid_reserve)
532 {
533         struct page *page = NULL;
534         struct mempolicy *mpol;
535         nodemask_t *nodemask;
536         struct zonelist *zonelist;
537         struct zone *zone;
538         struct zoneref *z;
539         unsigned int cpuset_mems_cookie;
540
541 retry_cpuset:
542         cpuset_mems_cookie = get_mems_allowed();
543         zonelist = huge_zonelist(vma, address,
544                                         htlb_alloc_mask, &mpol, &nodemask);
545         /*
546          * A child process with MAP_PRIVATE mappings created by their parent
547          * have no page reserves. This check ensures that reservations are
548          * not "stolen". The child may still get SIGKILLed
549          */
550         if (!vma_has_reserves(vma) &&
551                         h->free_huge_pages - h->resv_huge_pages == 0)
552                 goto err;
553
554         /* If reserves cannot be used, ensure enough pages are in the pool */
555         if (avoid_reserve && h->free_huge_pages - h->resv_huge_pages == 0)
556                 goto err;
557
558         for_each_zone_zonelist_nodemask(zone, z, zonelist,
559                                                 MAX_NR_ZONES - 1, nodemask) {
560                 if (cpuset_zone_allowed_softwall(zone, htlb_alloc_mask)) {
561                         page = dequeue_huge_page_node(h, zone_to_nid(zone));
562                         if (page) {
563                                 if (!avoid_reserve)
564                                         decrement_hugepage_resv_vma(h, vma);
565                                 break;
566                         }
567                 }
568         }
569
570         mpol_cond_put(mpol);
571         if (unlikely(!put_mems_allowed(cpuset_mems_cookie) && !page))
572                 goto retry_cpuset;
573         return page;
574
575 err:
576         mpol_cond_put(mpol);
577         return NULL;
578 }
579
580 static void update_and_free_page(struct hstate *h, struct page *page)
581 {
582         int i;
583
584         VM_BUG_ON(h->order >= MAX_ORDER);
585
586         h->nr_huge_pages--;
587         h->nr_huge_pages_node[page_to_nid(page)]--;
588         for (i = 0; i < pages_per_huge_page(h); i++) {
589                 page[i].flags &= ~(1 << PG_locked | 1 << PG_error |
590                                 1 << PG_referenced | 1 << PG_dirty |
591                                 1 << PG_active | 1 << PG_reserved |
592                                 1 << PG_private | 1 << PG_writeback);
593         }
594         set_compound_page_dtor(page, NULL);
595         set_page_refcounted(page);
596         arch_release_hugepage(page);
597         __free_pages(page, huge_page_order(h));
598 }
599
600 struct hstate *size_to_hstate(unsigned long size)
601 {
602         struct hstate *h;
603
604         for_each_hstate(h) {
605                 if (huge_page_size(h) == size)
606                         return h;
607         }
608         return NULL;
609 }
610
611 static void free_huge_page(struct page *page)
612 {
613         /*
614          * Can't pass hstate in here because it is called from the
615          * compound page destructor.
616          */
617         struct hstate *h = page_hstate(page);
618         int nid = page_to_nid(page);
619         struct hugepage_subpool *spool =
620                 (struct hugepage_subpool *)page_private(page);
621
622         set_page_private(page, 0);
623         page->mapping = NULL;
624         BUG_ON(page_count(page));
625         BUG_ON(page_mapcount(page));
626
627         spin_lock(&hugetlb_lock);
628         if (h->surplus_huge_pages_node[nid] && huge_page_order(h) < MAX_ORDER) {
629                 /* remove the page from active list */
630                 list_del(&page->lru);
631                 update_and_free_page(h, page);
632                 h->surplus_huge_pages--;
633                 h->surplus_huge_pages_node[nid]--;
634         } else {
635                 enqueue_huge_page(h, page);
636         }
637         spin_unlock(&hugetlb_lock);
638         hugepage_subpool_put_pages(spool, 1);
639 }
640
641 static void prep_new_huge_page(struct hstate *h, struct page *page, int nid)
642 {
643         INIT_LIST_HEAD(&page->lru);
644         set_compound_page_dtor(page, free_huge_page);
645         spin_lock(&hugetlb_lock);
646         h->nr_huge_pages++;
647         h->nr_huge_pages_node[nid]++;
648         spin_unlock(&hugetlb_lock);
649         put_page(page); /* free it into the hugepage allocator */
650 }
651
652 static void prep_compound_gigantic_page(struct page *page, unsigned long order)
653 {
654         int i;
655         int nr_pages = 1 << order;
656         struct page *p = page + 1;
657
658         /* we rely on prep_new_huge_page to set the destructor */
659         set_compound_order(page, order);
660         __SetPageHead(page);
661         for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) {
662                 __SetPageTail(p);
663                 set_page_count(p, 0);
664                 p->first_page = page;
665         }
666 }
667
668 int PageHuge(struct page *page)
669 {
670         compound_page_dtor *dtor;
671
672         if (!PageCompound(page))
673                 return 0;
674
675         page = compound_head(page);
676         dtor = get_compound_page_dtor(page);
677
678         return dtor == free_huge_page;
679 }
680 EXPORT_SYMBOL_GPL(PageHuge);
681
682 static struct page *alloc_fresh_huge_page_node(struct hstate *h, int nid)
683 {
684         struct page *page;
685
686         if (h->order >= MAX_ORDER)
687                 return NULL;
688
689         page = alloc_pages_exact_node(nid,
690                 htlb_alloc_mask|__GFP_COMP|__GFP_THISNODE|
691                                                 __GFP_REPEAT|__GFP_NOWARN,
692                 huge_page_order(h));
693         if (page) {
694                 if (arch_prepare_hugepage(page)) {
695                         __free_pages(page, huge_page_order(h));
696                         return NULL;
697                 }
698                 prep_new_huge_page(h, page, nid);
699         }
700
701         return page;
702 }
703
704 /*
705  * common helper functions for hstate_next_node_to_{alloc|free}.
706  * We may have allocated or freed a huge page based on a different
707  * nodes_allowed previously, so h->next_node_to_{alloc|free} might
708  * be outside of *nodes_allowed.  Ensure that we use an allowed
709  * node for alloc or free.
710  */
711 static int next_node_allowed(int nid, nodemask_t *nodes_allowed)
712 {
713         nid = next_node(nid, *nodes_allowed);
714         if (nid == MAX_NUMNODES)
715                 nid = first_node(*nodes_allowed);
716         VM_BUG_ON(nid >= MAX_NUMNODES);
717
718         return nid;
719 }
720
721 static int get_valid_node_allowed(int nid, nodemask_t *nodes_allowed)
722 {
723         if (!node_isset(nid, *nodes_allowed))
724                 nid = next_node_allowed(nid, nodes_allowed);
725         return nid;
726 }
727
728 /*
729  * returns the previously saved node ["this node"] from which to
730  * allocate a persistent huge page for the pool and advance the
731  * next node from which to allocate, handling wrap at end of node
732  * mask.
733  */
734 static int hstate_next_node_to_alloc(struct hstate *h,
735                                         nodemask_t *nodes_allowed)
736 {
737         int nid;
738
739         VM_BUG_ON(!nodes_allowed);
740
741         nid = get_valid_node_allowed(h->next_nid_to_alloc, nodes_allowed);
742         h->next_nid_to_alloc = next_node_allowed(nid, nodes_allowed);
743
744         return nid;
745 }
746
747 static int alloc_fresh_huge_page(struct hstate *h, nodemask_t *nodes_allowed)
748 {
749         struct page *page;
750         int start_nid;
751         int next_nid;
752         int ret = 0;
753
754         start_nid = hstate_next_node_to_alloc(h, nodes_allowed);
755         next_nid = start_nid;
756
757         do {
758                 page = alloc_fresh_huge_page_node(h, next_nid);
759                 if (page) {
760                         ret = 1;
761                         break;
762                 }
763                 next_nid = hstate_next_node_to_alloc(h, nodes_allowed);
764         } while (next_nid != start_nid);
765
766         if (ret)
767                 count_vm_event(HTLB_BUDDY_PGALLOC);
768         else
769                 count_vm_event(HTLB_BUDDY_PGALLOC_FAIL);
770
771         return ret;
772 }
773
774 /*
775  * helper for free_pool_huge_page() - return the previously saved
776  * node ["this node"] from which to free a huge page.  Advance the
777  * next node id whether or not we find a free huge page to free so
778  * that the next attempt to free addresses the next node.
779  */
780 static int hstate_next_node_to_free(struct hstate *h, nodemask_t *nodes_allowed)
781 {
782         int nid;
783
784         VM_BUG_ON(!nodes_allowed);
785
786         nid = get_valid_node_allowed(h->next_nid_to_free, nodes_allowed);
787         h->next_nid_to_free = next_node_allowed(nid, nodes_allowed);
788
789         return nid;
790 }
791
792 /*
793  * Free huge page from pool from next node to free.
794  * Attempt to keep persistent huge pages more or less
795  * balanced over allowed nodes.
796  * Called with hugetlb_lock locked.
797  */
798 static int free_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed,
799                                                          bool acct_surplus)
800 {
801         int start_nid;
802         int next_nid;
803         int ret = 0;
804
805         start_nid = hstate_next_node_to_free(h, nodes_allowed);
806         next_nid = start_nid;
807
808         do {
809                 /*
810                  * If we're returning unused surplus pages, only examine
811                  * nodes with surplus pages.
812                  */
813                 if ((!acct_surplus || h->surplus_huge_pages_node[next_nid]) &&
814                     !list_empty(&h->hugepage_freelists[next_nid])) {
815                         struct page *page =
816                                 list_entry(h->hugepage_freelists[next_nid].next,
817                                           struct page, lru);
818                         list_del(&page->lru);
819                         h->free_huge_pages--;
820                         h->free_huge_pages_node[next_nid]--;
821                         if (acct_surplus) {
822                                 h->surplus_huge_pages--;
823                                 h->surplus_huge_pages_node[next_nid]--;
824                         }
825                         update_and_free_page(h, page);
826                         ret = 1;
827                         break;
828                 }
829                 next_nid = hstate_next_node_to_free(h, nodes_allowed);
830         } while (next_nid != start_nid);
831
832         return ret;
833 }
834
835 static struct page *alloc_buddy_huge_page(struct hstate *h, int nid)
836 {
837         struct page *page;
838         unsigned int r_nid;
839
840         if (h->order >= MAX_ORDER)
841                 return NULL;
842
843         /*
844          * Assume we will successfully allocate the surplus page to
845          * prevent racing processes from causing the surplus to exceed
846          * overcommit
847          *
848          * This however introduces a different race, where a process B
849          * tries to grow the static hugepage pool while alloc_pages() is
850          * called by process A. B will only examine the per-node
851          * counters in determining if surplus huge pages can be
852          * converted to normal huge pages in adjust_pool_surplus(). A
853          * won't be able to increment the per-node counter, until the
854          * lock is dropped by B, but B doesn't drop hugetlb_lock until
855          * no more huge pages can be converted from surplus to normal
856          * state (and doesn't try to convert again). Thus, we have a
857          * case where a surplus huge page exists, the pool is grown, and
858          * the surplus huge page still exists after, even though it
859          * should just have been converted to a normal huge page. This
860          * does not leak memory, though, as the hugepage will be freed
861          * once it is out of use. It also does not allow the counters to
862          * go out of whack in adjust_pool_surplus() as we don't modify
863          * the node values until we've gotten the hugepage and only the
864          * per-node value is checked there.
865          */
866         spin_lock(&hugetlb_lock);
867         if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages) {
868                 spin_unlock(&hugetlb_lock);
869                 return NULL;
870         } else {
871                 h->nr_huge_pages++;
872                 h->surplus_huge_pages++;
873         }
874         spin_unlock(&hugetlb_lock);
875
876         if (nid == NUMA_NO_NODE)
877                 page = alloc_pages(htlb_alloc_mask|__GFP_COMP|
878                                    __GFP_REPEAT|__GFP_NOWARN,
879                                    huge_page_order(h));
880         else
881                 page = alloc_pages_exact_node(nid,
882                         htlb_alloc_mask|__GFP_COMP|__GFP_THISNODE|
883                         __GFP_REPEAT|__GFP_NOWARN, huge_page_order(h));
884
885         if (page && arch_prepare_hugepage(page)) {
886                 __free_pages(page, huge_page_order(h));
887                 page = NULL;
888         }
889
890         spin_lock(&hugetlb_lock);
891         if (page) {
892                 INIT_LIST_HEAD(&page->lru);
893                 r_nid = page_to_nid(page);
894                 set_compound_page_dtor(page, free_huge_page);
895                 /*
896                  * We incremented the global counters already
897                  */
898                 h->nr_huge_pages_node[r_nid]++;
899                 h->surplus_huge_pages_node[r_nid]++;
900                 __count_vm_event(HTLB_BUDDY_PGALLOC);
901         } else {
902                 h->nr_huge_pages--;
903                 h->surplus_huge_pages--;
904                 __count_vm_event(HTLB_BUDDY_PGALLOC_FAIL);
905         }
906         spin_unlock(&hugetlb_lock);
907
908         return page;
909 }
910
911 /*
912  * This allocation function is useful in the context where vma is irrelevant.
913  * E.g. soft-offlining uses this function because it only cares physical
914  * address of error page.
915  */
916 struct page *alloc_huge_page_node(struct hstate *h, int nid)
917 {
918         struct page *page;
919
920         spin_lock(&hugetlb_lock);
921         page = dequeue_huge_page_node(h, nid);
922         spin_unlock(&hugetlb_lock);
923
924         if (!page)
925                 page = alloc_buddy_huge_page(h, nid);
926
927         return page;
928 }
929
930 /*
931  * Increase the hugetlb pool such that it can accommodate a reservation
932  * of size 'delta'.
933  */
934 static int gather_surplus_pages(struct hstate *h, int delta)
935 {
936         struct list_head surplus_list;
937         struct page *page, *tmp;
938         int ret, i;
939         int needed, allocated;
940         bool alloc_ok = true;
941
942         needed = (h->resv_huge_pages + delta) - h->free_huge_pages;
943         if (needed <= 0) {
944                 h->resv_huge_pages += delta;
945                 return 0;
946         }
947
948         allocated = 0;
949         INIT_LIST_HEAD(&surplus_list);
950
951         ret = -ENOMEM;
952 retry:
953         spin_unlock(&hugetlb_lock);
954         for (i = 0; i < needed; i++) {
955                 page = alloc_buddy_huge_page(h, NUMA_NO_NODE);
956                 if (!page) {
957                         alloc_ok = false;
958                         break;
959                 }
960                 list_add(&page->lru, &surplus_list);
961         }
962         allocated += i;
963
964         /*
965          * After retaking hugetlb_lock, we need to recalculate 'needed'
966          * because either resv_huge_pages or free_huge_pages may have changed.
967          */
968         spin_lock(&hugetlb_lock);
969         needed = (h->resv_huge_pages + delta) -
970                         (h->free_huge_pages + allocated);
971         if (needed > 0) {
972                 if (alloc_ok)
973                         goto retry;
974                 /*
975                  * We were not able to allocate enough pages to
976                  * satisfy the entire reservation so we free what
977                  * we've allocated so far.
978                  */
979                 goto free;
980         }
981         /*
982          * The surplus_list now contains _at_least_ the number of extra pages
983          * needed to accommodate the reservation.  Add the appropriate number
984          * of pages to the hugetlb pool and free the extras back to the buddy
985          * allocator.  Commit the entire reservation here to prevent another
986          * process from stealing the pages as they are added to the pool but
987          * before they are reserved.
988          */
989         needed += allocated;
990         h->resv_huge_pages += delta;
991         ret = 0;
992
993         /* Free the needed pages to the hugetlb pool */
994         list_for_each_entry_safe(page, tmp, &surplus_list, lru) {
995                 if ((--needed) < 0)
996                         break;
997                 /*
998                  * This page is now managed by the hugetlb allocator and has
999                  * no users -- drop the buddy allocator's reference.
1000                  */
1001                 put_page_testzero(page);
1002                 VM_BUG_ON(page_count(page));
1003                 enqueue_huge_page(h, page);
1004         }
1005 free:
1006         spin_unlock(&hugetlb_lock);
1007
1008         /* Free unnecessary surplus pages to the buddy allocator */
1009         if (!list_empty(&surplus_list)) {
1010                 list_for_each_entry_safe(page, tmp, &surplus_list, lru) {
1011                         put_page(page);
1012                 }
1013         }
1014         spin_lock(&hugetlb_lock);
1015
1016         return ret;
1017 }
1018
1019 /*
1020  * When releasing a hugetlb pool reservation, any surplus pages that were
1021  * allocated to satisfy the reservation must be explicitly freed if they were
1022  * never used.
1023  * Called with hugetlb_lock held.
1024  */
1025 static void return_unused_surplus_pages(struct hstate *h,
1026                                         unsigned long unused_resv_pages)
1027 {
1028         unsigned long nr_pages;
1029
1030         /* Uncommit the reservation */
1031         h->resv_huge_pages -= unused_resv_pages;
1032
1033         /* Cannot return gigantic pages currently */
1034         if (h->order >= MAX_ORDER)
1035                 return;
1036
1037         nr_pages = min(unused_resv_pages, h->surplus_huge_pages);
1038
1039         /*
1040          * We want to release as many surplus pages as possible, spread
1041          * evenly across all nodes with memory. Iterate across these nodes
1042          * until we can no longer free unreserved surplus pages. This occurs
1043          * when the nodes with surplus pages have no free pages.
1044          * free_pool_huge_page() will balance the the freed pages across the
1045          * on-line nodes with memory and will handle the hstate accounting.
1046          */
1047         while (nr_pages--) {
1048                 if (!free_pool_huge_page(h, &node_states[N_HIGH_MEMORY], 1))
1049                         break;
1050         }
1051 }
1052
1053 /*
1054  * Determine if the huge page at addr within the vma has an associated
1055  * reservation.  Where it does not we will need to logically increase
1056  * reservation and actually increase subpool usage before an allocation
1057  * can occur.  Where any new reservation would be required the
1058  * reservation change is prepared, but not committed.  Once the page
1059  * has been allocated from the subpool and instantiated the change should
1060  * be committed via vma_commit_reservation.  No action is required on
1061  * failure.
1062  */
1063 static long vma_needs_reservation(struct hstate *h,
1064                         struct vm_area_struct *vma, unsigned long addr)
1065 {
1066         struct address_space *mapping = vma->vm_file->f_mapping;
1067         struct inode *inode = mapping->host;
1068
1069         if (vma->vm_flags & VM_MAYSHARE) {
1070                 pgoff_t idx = vma_hugecache_offset(h, vma, addr);
1071                 return region_chg(&inode->i_mapping->private_list,
1072                                                         idx, idx + 1);
1073
1074         } else if (!is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
1075                 return 1;
1076
1077         } else  {
1078                 long err;
1079                 pgoff_t idx = vma_hugecache_offset(h, vma, addr);
1080                 struct resv_map *reservations = vma_resv_map(vma);
1081
1082                 err = region_chg(&reservations->regions, idx, idx + 1);
1083                 if (err < 0)
1084                         return err;
1085                 return 0;
1086         }
1087 }
1088 static void vma_commit_reservation(struct hstate *h,
1089                         struct vm_area_struct *vma, unsigned long addr)
1090 {
1091         struct address_space *mapping = vma->vm_file->f_mapping;
1092         struct inode *inode = mapping->host;
1093
1094         if (vma->vm_flags & VM_MAYSHARE) {
1095                 pgoff_t idx = vma_hugecache_offset(h, vma, addr);
1096                 region_add(&inode->i_mapping->private_list, idx, idx + 1);
1097
1098         } else if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
1099                 pgoff_t idx = vma_hugecache_offset(h, vma, addr);
1100                 struct resv_map *reservations = vma_resv_map(vma);
1101
1102                 /* Mark this page used in the map. */
1103                 region_add(&reservations->regions, idx, idx + 1);
1104         }
1105 }
1106
1107 static struct page *alloc_huge_page(struct vm_area_struct *vma,
1108                                     unsigned long addr, int avoid_reserve)
1109 {
1110         struct hugepage_subpool *spool = subpool_vma(vma);
1111         struct hstate *h = hstate_vma(vma);
1112         struct page *page;
1113         long chg;
1114
1115         /*
1116          * Processes that did not create the mapping will have no
1117          * reserves and will not have accounted against subpool
1118          * limit. Check that the subpool limit can be made before
1119          * satisfying the allocation MAP_NORESERVE mappings may also
1120          * need pages and subpool limit allocated allocated if no reserve
1121          * mapping overlaps.
1122          */
1123         chg = vma_needs_reservation(h, vma, addr);
1124         if (chg < 0)
1125                 return ERR_PTR(-ENOMEM);
1126         if (chg)
1127                 if (hugepage_subpool_get_pages(spool, chg))
1128                         return ERR_PTR(-ENOSPC);
1129
1130         spin_lock(&hugetlb_lock);
1131         page = dequeue_huge_page_vma(h, vma, addr, avoid_reserve);
1132         spin_unlock(&hugetlb_lock);
1133
1134         if (!page) {
1135                 page = alloc_buddy_huge_page(h, NUMA_NO_NODE);
1136                 if (!page) {
1137                         hugepage_subpool_put_pages(spool, chg);
1138                         return ERR_PTR(-ENOSPC);
1139                 }
1140         }
1141
1142         set_page_private(page, (unsigned long)spool);
1143
1144         vma_commit_reservation(h, vma, addr);
1145
1146         return page;
1147 }
1148
1149 int __weak alloc_bootmem_huge_page(struct hstate *h)
1150 {
1151         struct huge_bootmem_page *m;
1152         int nr_nodes = nodes_weight(node_states[N_HIGH_MEMORY]);
1153
1154         while (nr_nodes) {
1155                 void *addr;
1156
1157                 addr = __alloc_bootmem_node_nopanic(
1158                                 NODE_DATA(hstate_next_node_to_alloc(h,
1159                                                 &node_states[N_HIGH_MEMORY])),
1160                                 huge_page_size(h), huge_page_size(h), 0);
1161
1162                 if (addr) {
1163                         /*
1164                          * Use the beginning of the huge page to store the
1165                          * huge_bootmem_page struct (until gather_bootmem
1166                          * puts them into the mem_map).
1167                          */
1168                         m = addr;
1169                         goto found;
1170                 }
1171                 nr_nodes--;
1172         }
1173         return 0;
1174
1175 found:
1176         BUG_ON((unsigned long)virt_to_phys(m) & (huge_page_size(h) - 1));
1177         /* Put them into a private list first because mem_map is not up yet */
1178         list_add(&m->list, &huge_boot_pages);
1179         m->hstate = h;
1180         return 1;
1181 }
1182
1183 static void prep_compound_huge_page(struct page *page, int order)
1184 {
1185         if (unlikely(order > (MAX_ORDER - 1)))
1186                 prep_compound_gigantic_page(page, order);
1187         else
1188                 prep_compound_page(page, order);
1189 }
1190
1191 /* Put bootmem huge pages into the standard lists after mem_map is up */
1192 static void __init gather_bootmem_prealloc(void)
1193 {
1194         struct huge_bootmem_page *m;
1195
1196         list_for_each_entry(m, &huge_boot_pages, list) {
1197                 struct hstate *h = m->hstate;
1198                 struct page *page;
1199
1200 #ifdef CONFIG_HIGHMEM
1201                 page = pfn_to_page(m->phys >> PAGE_SHIFT);
1202                 free_bootmem_late((unsigned long)m,
1203                                   sizeof(struct huge_bootmem_page));
1204 #else
1205                 page = virt_to_page(m);
1206 #endif
1207                 __ClearPageReserved(page);
1208                 WARN_ON(page_count(page) != 1);
1209                 prep_compound_huge_page(page, h->order);
1210                 prep_new_huge_page(h, page, page_to_nid(page));
1211                 /*
1212                  * If we had gigantic hugepages allocated at boot time, we need
1213                  * to restore the 'stolen' pages to totalram_pages in order to
1214                  * fix confusing memory reports from free(1) and another
1215                  * side-effects, like CommitLimit going negative.
1216                  */
1217                 if (h->order > (MAX_ORDER - 1))
1218                         totalram_pages += 1 << h->order;
1219         }
1220 }
1221
1222 static void __init hugetlb_hstate_alloc_pages(struct hstate *h)
1223 {
1224         unsigned long i;
1225
1226         for (i = 0; i < h->max_huge_pages; ++i) {
1227                 if (h->order >= MAX_ORDER) {
1228                         if (!alloc_bootmem_huge_page(h))
1229                                 break;
1230                 } else if (!alloc_fresh_huge_page(h,
1231                                          &node_states[N_HIGH_MEMORY]))
1232                         break;
1233         }
1234         h->max_huge_pages = i;
1235 }
1236
1237 static void __init hugetlb_init_hstates(void)
1238 {
1239         struct hstate *h;
1240
1241         for_each_hstate(h) {
1242                 /* oversize hugepages were init'ed in early boot */
1243                 if (h->order < MAX_ORDER)
1244                         hugetlb_hstate_alloc_pages(h);
1245         }
1246 }
1247
1248 static char * __init memfmt(char *buf, unsigned long n)
1249 {
1250         if (n >= (1UL << 30))
1251                 sprintf(buf, "%lu GB", n >> 30);
1252         else if (n >= (1UL << 20))
1253                 sprintf(buf, "%lu MB", n >> 20);
1254         else
1255                 sprintf(buf, "%lu KB", n >> 10);
1256         return buf;
1257 }
1258
1259 static void __init report_hugepages(void)
1260 {
1261         struct hstate *h;
1262
1263         for_each_hstate(h) {
1264                 char buf[32];
1265                 printk(KERN_INFO "HugeTLB registered %s page size, "
1266                                  "pre-allocated %ld pages\n",
1267                         memfmt(buf, huge_page_size(h)),
1268                         h->free_huge_pages);
1269         }
1270 }
1271
1272 #ifdef CONFIG_HIGHMEM
1273 static void try_to_free_low(struct hstate *h, unsigned long count,
1274                                                 nodemask_t *nodes_allowed)
1275 {
1276         int i;
1277
1278         if (h->order >= MAX_ORDER)
1279                 return;
1280
1281         for_each_node_mask(i, *nodes_allowed) {
1282                 struct page *page, *next;
1283                 struct list_head *freel = &h->hugepage_freelists[i];
1284                 list_for_each_entry_safe(page, next, freel, lru) {
1285                         if (count >= h->nr_huge_pages)
1286                                 return;
1287                         if (PageHighMem(page))
1288                                 continue;
1289                         list_del(&page->lru);
1290                         update_and_free_page(h, page);
1291                         h->free_huge_pages--;
1292                         h->free_huge_pages_node[page_to_nid(page)]--;
1293                 }
1294         }
1295 }
1296 #else
1297 static inline void try_to_free_low(struct hstate *h, unsigned long count,
1298                                                 nodemask_t *nodes_allowed)
1299 {
1300 }
1301 #endif
1302
1303 /*
1304  * Increment or decrement surplus_huge_pages.  Keep node-specific counters
1305  * balanced by operating on them in a round-robin fashion.
1306  * Returns 1 if an adjustment was made.
1307  */
1308 static int adjust_pool_surplus(struct hstate *h, nodemask_t *nodes_allowed,
1309                                 int delta)
1310 {
1311         int start_nid, next_nid;
1312         int ret = 0;
1313
1314         VM_BUG_ON(delta != -1 && delta != 1);
1315
1316         if (delta < 0)
1317                 start_nid = hstate_next_node_to_alloc(h, nodes_allowed);
1318         else
1319                 start_nid = hstate_next_node_to_free(h, nodes_allowed);
1320         next_nid = start_nid;
1321
1322         do {
1323                 int nid = next_nid;
1324                 if (delta < 0)  {
1325                         /*
1326                          * To shrink on this node, there must be a surplus page
1327                          */
1328                         if (!h->surplus_huge_pages_node[nid]) {
1329                                 next_nid = hstate_next_node_to_alloc(h,
1330                                                                 nodes_allowed);
1331                                 continue;
1332                         }
1333                 }
1334                 if (delta > 0) {
1335                         /*
1336                          * Surplus cannot exceed the total number of pages
1337                          */
1338                         if (h->surplus_huge_pages_node[nid] >=
1339                                                 h->nr_huge_pages_node[nid]) {
1340                                 next_nid = hstate_next_node_to_free(h,
1341                                                                 nodes_allowed);
1342                                 continue;
1343                         }
1344                 }
1345
1346                 h->surplus_huge_pages += delta;
1347                 h->surplus_huge_pages_node[nid] += delta;
1348                 ret = 1;
1349                 break;
1350         } while (next_nid != start_nid);
1351
1352         return ret;
1353 }
1354
1355 #define persistent_huge_pages(h) (h->nr_huge_pages - h->surplus_huge_pages)
1356 static unsigned long set_max_huge_pages(struct hstate *h, unsigned long count,
1357                                                 nodemask_t *nodes_allowed)
1358 {
1359         unsigned long min_count, ret;
1360
1361         if (h->order >= MAX_ORDER)
1362                 return h->max_huge_pages;
1363
1364         /*
1365          * Increase the pool size
1366          * First take pages out of surplus state.  Then make up the
1367          * remaining difference by allocating fresh huge pages.
1368          *
1369          * We might race with alloc_buddy_huge_page() here and be unable
1370          * to convert a surplus huge page to a normal huge page. That is
1371          * not critical, though, it just means the overall size of the
1372          * pool might be one hugepage larger than it needs to be, but
1373          * within all the constraints specified by the sysctls.
1374          */
1375         spin_lock(&hugetlb_lock);
1376         while (h->surplus_huge_pages && count > persistent_huge_pages(h)) {
1377                 if (!adjust_pool_surplus(h, nodes_allowed, -1))
1378                         break;
1379         }
1380
1381         while (count > persistent_huge_pages(h)) {
1382                 /*
1383                  * If this allocation races such that we no longer need the
1384                  * page, free_huge_page will handle it by freeing the page
1385                  * and reducing the surplus.
1386                  */
1387                 spin_unlock(&hugetlb_lock);
1388                 ret = alloc_fresh_huge_page(h, nodes_allowed);
1389                 spin_lock(&hugetlb_lock);
1390                 if (!ret)
1391                         goto out;
1392
1393                 /* Bail for signals. Probably ctrl-c from user */
1394                 if (signal_pending(current))
1395                         goto out;
1396         }
1397
1398         /*
1399          * Decrease the pool size
1400          * First return free pages to the buddy allocator (being careful
1401          * to keep enough around to satisfy reservations).  Then place
1402          * pages into surplus state as needed so the pool will shrink
1403          * to the desired size as pages become free.
1404          *
1405          * By placing pages into the surplus state independent of the
1406          * overcommit value, we are allowing the surplus pool size to
1407          * exceed overcommit. There are few sane options here. Since
1408          * alloc_buddy_huge_page() is checking the global counter,
1409          * though, we'll note that we're not allowed to exceed surplus
1410          * and won't grow the pool anywhere else. Not until one of the
1411          * sysctls are changed, or the surplus pages go out of use.
1412          */
1413         min_count = h->resv_huge_pages + h->nr_huge_pages - h->free_huge_pages;
1414         min_count = max(count, min_count);
1415         try_to_free_low(h, min_count, nodes_allowed);
1416         while (min_count < persistent_huge_pages(h)) {
1417                 if (!free_pool_huge_page(h, nodes_allowed, 0))
1418                         break;
1419         }
1420         while (count < persistent_huge_pages(h)) {
1421                 if (!adjust_pool_surplus(h, nodes_allowed, 1))
1422                         break;
1423         }
1424 out:
1425         ret = persistent_huge_pages(h);
1426         spin_unlock(&hugetlb_lock);
1427         return ret;
1428 }
1429
1430 #define HSTATE_ATTR_RO(_name) \
1431         static struct kobj_attribute _name##_attr = __ATTR_RO(_name)
1432
1433 #define HSTATE_ATTR(_name) \
1434         static struct kobj_attribute _name##_attr = \
1435                 __ATTR(_name, 0644, _name##_show, _name##_store)
1436
1437 static struct kobject *hugepages_kobj;
1438 static struct kobject *hstate_kobjs[HUGE_MAX_HSTATE];
1439
1440 static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp);
1441
1442 static struct hstate *kobj_to_hstate(struct kobject *kobj, int *nidp)
1443 {
1444         int i;
1445
1446         for (i = 0; i < HUGE_MAX_HSTATE; i++)
1447                 if (hstate_kobjs[i] == kobj) {
1448                         if (nidp)
1449                                 *nidp = NUMA_NO_NODE;
1450                         return &hstates[i];
1451                 }
1452
1453         return kobj_to_node_hstate(kobj, nidp);
1454 }
1455
1456 static ssize_t nr_hugepages_show_common(struct kobject *kobj,
1457                                         struct kobj_attribute *attr, char *buf)
1458 {
1459         struct hstate *h;
1460         unsigned long nr_huge_pages;
1461         int nid;
1462
1463         h = kobj_to_hstate(kobj, &nid);
1464         if (nid == NUMA_NO_NODE)
1465                 nr_huge_pages = h->nr_huge_pages;
1466         else
1467                 nr_huge_pages = h->nr_huge_pages_node[nid];
1468
1469         return sprintf(buf, "%lu\n", nr_huge_pages);
1470 }
1471
1472 static ssize_t nr_hugepages_store_common(bool obey_mempolicy,
1473                         struct kobject *kobj, struct kobj_attribute *attr,
1474                         const char *buf, size_t len)
1475 {
1476         int err;
1477         int nid;
1478         unsigned long count;
1479         struct hstate *h;
1480         NODEMASK_ALLOC(nodemask_t, nodes_allowed, GFP_KERNEL | __GFP_NORETRY);
1481
1482         err = strict_strtoul(buf, 10, &count);
1483         if (err)
1484                 goto out;
1485
1486         h = kobj_to_hstate(kobj, &nid);
1487         if (h->order >= MAX_ORDER) {
1488                 err = -EINVAL;
1489                 goto out;
1490         }
1491
1492         if (nid == NUMA_NO_NODE) {
1493                 /*
1494                  * global hstate attribute
1495                  */
1496                 if (!(obey_mempolicy &&
1497                                 init_nodemask_of_mempolicy(nodes_allowed))) {
1498                         NODEMASK_FREE(nodes_allowed);
1499                         nodes_allowed = &node_states[N_HIGH_MEMORY];
1500                 }
1501         } else if (nodes_allowed) {
1502                 /*
1503                  * per node hstate attribute: adjust count to global,
1504                  * but restrict alloc/free to the specified node.
1505                  */
1506                 count += h->nr_huge_pages - h->nr_huge_pages_node[nid];
1507                 init_nodemask_of_node(nodes_allowed, nid);
1508         } else
1509                 nodes_allowed = &node_states[N_HIGH_MEMORY];
1510
1511         h->max_huge_pages = set_max_huge_pages(h, count, nodes_allowed);
1512
1513         if (nodes_allowed != &node_states[N_HIGH_MEMORY])
1514                 NODEMASK_FREE(nodes_allowed);
1515
1516         return len;
1517 out:
1518         NODEMASK_FREE(nodes_allowed);
1519         return err;
1520 }
1521
1522 static ssize_t nr_hugepages_show(struct kobject *kobj,
1523                                        struct kobj_attribute *attr, char *buf)
1524 {
1525         return nr_hugepages_show_common(kobj, attr, buf);
1526 }
1527
1528 static ssize_t nr_hugepages_store(struct kobject *kobj,
1529                struct kobj_attribute *attr, const char *buf, size_t len)
1530 {
1531         return nr_hugepages_store_common(false, kobj, attr, buf, len);
1532 }
1533 HSTATE_ATTR(nr_hugepages);
1534
1535 #ifdef CONFIG_NUMA
1536
1537 /*
1538  * hstate attribute for optionally mempolicy-based constraint on persistent
1539  * huge page alloc/free.
1540  */
1541 static ssize_t nr_hugepages_mempolicy_show(struct kobject *kobj,
1542                                        struct kobj_attribute *attr, char *buf)
1543 {
1544         return nr_hugepages_show_common(kobj, attr, buf);
1545 }
1546
1547 static ssize_t nr_hugepages_mempolicy_store(struct kobject *kobj,
1548                struct kobj_attribute *attr, const char *buf, size_t len)
1549 {
1550         return nr_hugepages_store_common(true, kobj, attr, buf, len);
1551 }
1552 HSTATE_ATTR(nr_hugepages_mempolicy);
1553 #endif
1554
1555
1556 static ssize_t nr_overcommit_hugepages_show(struct kobject *kobj,
1557                                         struct kobj_attribute *attr, char *buf)
1558 {
1559         struct hstate *h = kobj_to_hstate(kobj, NULL);
1560         return sprintf(buf, "%lu\n", h->nr_overcommit_huge_pages);
1561 }
1562
1563 static ssize_t nr_overcommit_hugepages_store(struct kobject *kobj,
1564                 struct kobj_attribute *attr, const char *buf, size_t count)
1565 {
1566         int err;
1567         unsigned long input;
1568         struct hstate *h = kobj_to_hstate(kobj, NULL);
1569
1570         if (h->order >= MAX_ORDER)
1571                 return -EINVAL;
1572
1573         err = strict_strtoul(buf, 10, &input);
1574         if (err)
1575                 return err;
1576
1577         spin_lock(&hugetlb_lock);
1578         h->nr_overcommit_huge_pages = input;
1579         spin_unlock(&hugetlb_lock);
1580
1581         return count;
1582 }
1583 HSTATE_ATTR(nr_overcommit_hugepages);
1584
1585 static ssize_t free_hugepages_show(struct kobject *kobj,
1586                                         struct kobj_attribute *attr, char *buf)
1587 {
1588         struct hstate *h;
1589         unsigned long free_huge_pages;
1590         int nid;
1591
1592         h = kobj_to_hstate(kobj, &nid);
1593         if (nid == NUMA_NO_NODE)
1594                 free_huge_pages = h->free_huge_pages;
1595         else
1596                 free_huge_pages = h->free_huge_pages_node[nid];
1597
1598         return sprintf(buf, "%lu\n", free_huge_pages);
1599 }
1600 HSTATE_ATTR_RO(free_hugepages);
1601
1602 static ssize_t resv_hugepages_show(struct kobject *kobj,
1603                                         struct kobj_attribute *attr, char *buf)
1604 {
1605         struct hstate *h = kobj_to_hstate(kobj, NULL);
1606         return sprintf(buf, "%lu\n", h->resv_huge_pages);
1607 }
1608 HSTATE_ATTR_RO(resv_hugepages);
1609
1610 static ssize_t surplus_hugepages_show(struct kobject *kobj,
1611                                         struct kobj_attribute *attr, char *buf)
1612 {
1613         struct hstate *h;
1614         unsigned long surplus_huge_pages;
1615         int nid;
1616
1617         h = kobj_to_hstate(kobj, &nid);
1618         if (nid == NUMA_NO_NODE)
1619                 surplus_huge_pages = h->surplus_huge_pages;
1620         else
1621                 surplus_huge_pages = h->surplus_huge_pages_node[nid];
1622
1623         return sprintf(buf, "%lu\n", surplus_huge_pages);
1624 }
1625 HSTATE_ATTR_RO(surplus_hugepages);
1626
1627 static struct attribute *hstate_attrs[] = {
1628         &nr_hugepages_attr.attr,
1629         &nr_overcommit_hugepages_attr.attr,
1630         &free_hugepages_attr.attr,
1631         &resv_hugepages_attr.attr,
1632         &surplus_hugepages_attr.attr,
1633 #ifdef CONFIG_NUMA
1634         &nr_hugepages_mempolicy_attr.attr,
1635 #endif
1636         NULL,
1637 };
1638
1639 static struct attribute_group hstate_attr_group = {
1640         .attrs = hstate_attrs,
1641 };
1642
1643 static int hugetlb_sysfs_add_hstate(struct hstate *h, struct kobject *parent,
1644                                     struct kobject **hstate_kobjs,
1645                                     struct attribute_group *hstate_attr_group)
1646 {
1647         int retval;
1648         int hi = hstate_index(h);
1649
1650         hstate_kobjs[hi] = kobject_create_and_add(h->name, parent);
1651         if (!hstate_kobjs[hi])
1652                 return -ENOMEM;
1653
1654         retval = sysfs_create_group(hstate_kobjs[hi], hstate_attr_group);
1655         if (retval)
1656                 kobject_put(hstate_kobjs[hi]);
1657
1658         return retval;
1659 }
1660
1661 static void __init hugetlb_sysfs_init(void)
1662 {
1663         struct hstate *h;
1664         int err;
1665
1666         hugepages_kobj = kobject_create_and_add("hugepages", mm_kobj);
1667         if (!hugepages_kobj)
1668                 return;
1669
1670         for_each_hstate(h) {
1671                 err = hugetlb_sysfs_add_hstate(h, hugepages_kobj,
1672                                          hstate_kobjs, &hstate_attr_group);
1673                 if (err)
1674                         printk(KERN_ERR "Hugetlb: Unable to add hstate %s",
1675                                                                 h->name);
1676         }
1677 }
1678
1679 #ifdef CONFIG_NUMA
1680
1681 /*
1682  * node_hstate/s - associate per node hstate attributes, via their kobjects,
1683  * with node devices in node_devices[] using a parallel array.  The array
1684  * index of a node device or _hstate == node id.
1685  * This is here to avoid any static dependency of the node device driver, in
1686  * the base kernel, on the hugetlb module.
1687  */
1688 struct node_hstate {
1689         struct kobject          *hugepages_kobj;
1690         struct kobject          *hstate_kobjs[HUGE_MAX_HSTATE];
1691 };
1692 struct node_hstate node_hstates[MAX_NUMNODES];
1693
1694 /*
1695  * A subset of global hstate attributes for node devices
1696  */
1697 static struct attribute *per_node_hstate_attrs[] = {
1698         &nr_hugepages_attr.attr,
1699         &free_hugepages_attr.attr,
1700         &surplus_hugepages_attr.attr,
1701         NULL,
1702 };
1703
1704 static struct attribute_group per_node_hstate_attr_group = {
1705         .attrs = per_node_hstate_attrs,
1706 };
1707
1708 /*
1709  * kobj_to_node_hstate - lookup global hstate for node device hstate attr kobj.
1710  * Returns node id via non-NULL nidp.
1711  */
1712 static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp)
1713 {
1714         int nid;
1715
1716         for (nid = 0; nid < nr_node_ids; nid++) {
1717                 struct node_hstate *nhs = &node_hstates[nid];
1718                 int i;
1719                 for (i = 0; i < HUGE_MAX_HSTATE; i++)
1720                         if (nhs->hstate_kobjs[i] == kobj) {
1721                                 if (nidp)
1722                                         *nidp = nid;
1723                                 return &hstates[i];
1724                         }
1725         }
1726
1727         BUG();
1728         return NULL;
1729 }
1730
1731 /*
1732  * Unregister hstate attributes from a single node device.
1733  * No-op if no hstate attributes attached.
1734  */
1735 void hugetlb_unregister_node(struct node *node)
1736 {
1737         struct hstate *h;
1738         struct node_hstate *nhs = &node_hstates[node->dev.id];
1739
1740         if (!nhs->hugepages_kobj)
1741                 return;         /* no hstate attributes */
1742
1743         for_each_hstate(h) {
1744                 int idx = hstate_index(h);
1745                 if (nhs->hstate_kobjs[idx]) {
1746                         kobject_put(nhs->hstate_kobjs[idx]);
1747                         nhs->hstate_kobjs[idx] = NULL;
1748                 }
1749         }
1750
1751         kobject_put(nhs->hugepages_kobj);
1752         nhs->hugepages_kobj = NULL;
1753 }
1754
1755 /*
1756  * hugetlb module exit:  unregister hstate attributes from node devices
1757  * that have them.
1758  */
1759 static void hugetlb_unregister_all_nodes(void)
1760 {
1761         int nid;
1762
1763         /*
1764          * disable node device registrations.
1765          */
1766         register_hugetlbfs_with_node(NULL, NULL);
1767
1768         /*
1769          * remove hstate attributes from any nodes that have them.
1770          */
1771         for (nid = 0; nid < nr_node_ids; nid++)
1772                 hugetlb_unregister_node(&node_devices[nid]);
1773 }
1774
1775 /*
1776  * Register hstate attributes for a single node device.
1777  * No-op if attributes already registered.
1778  */
1779 void hugetlb_register_node(struct node *node)
1780 {
1781         struct hstate *h;
1782         struct node_hstate *nhs = &node_hstates[node->dev.id];
1783         int err;
1784
1785         if (nhs->hugepages_kobj)
1786                 return;         /* already allocated */
1787
1788         nhs->hugepages_kobj = kobject_create_and_add("hugepages",
1789                                                         &node->dev.kobj);
1790         if (!nhs->hugepages_kobj)
1791                 return;
1792
1793         for_each_hstate(h) {
1794                 err = hugetlb_sysfs_add_hstate(h, nhs->hugepages_kobj,
1795                                                 nhs->hstate_kobjs,
1796                                                 &per_node_hstate_attr_group);
1797                 if (err) {
1798                         printk(KERN_ERR "Hugetlb: Unable to add hstate %s"
1799                                         " for node %d\n",
1800                                                 h->name, node->dev.id);
1801                         hugetlb_unregister_node(node);
1802                         break;
1803                 }
1804         }
1805 }
1806
1807 /*
1808  * hugetlb init time:  register hstate attributes for all registered node
1809  * devices of nodes that have memory.  All on-line nodes should have
1810  * registered their associated device by this time.
1811  */
1812 static void hugetlb_register_all_nodes(void)
1813 {
1814         int nid;
1815
1816         for_each_node_state(nid, N_HIGH_MEMORY) {
1817                 struct node *node = &node_devices[nid];
1818                 if (node->dev.id == nid)
1819                         hugetlb_register_node(node);
1820         }
1821
1822         /*
1823          * Let the node device driver know we're here so it can
1824          * [un]register hstate attributes on node hotplug.
1825          */
1826         register_hugetlbfs_with_node(hugetlb_register_node,
1827                                      hugetlb_unregister_node);
1828 }
1829 #else   /* !CONFIG_NUMA */
1830
1831 static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp)
1832 {
1833         BUG();
1834         if (nidp)
1835                 *nidp = -1;
1836         return NULL;
1837 }
1838
1839 static void hugetlb_unregister_all_nodes(void) { }
1840
1841 static void hugetlb_register_all_nodes(void) { }
1842
1843 #endif
1844
1845 static void __exit hugetlb_exit(void)
1846 {
1847         struct hstate *h;
1848
1849         hugetlb_unregister_all_nodes();
1850
1851         for_each_hstate(h) {
1852                 kobject_put(hstate_kobjs[hstate_index(h)]);
1853         }
1854
1855         kobject_put(hugepages_kobj);
1856 }
1857 module_exit(hugetlb_exit);
1858
1859 static int __init hugetlb_init(void)
1860 {
1861         /* Some platform decide whether they support huge pages at boot
1862          * time. On these, such as powerpc, HPAGE_SHIFT is set to 0 when
1863          * there is no such support
1864          */
1865         if (HPAGE_SHIFT == 0)
1866                 return 0;
1867
1868         if (!size_to_hstate(default_hstate_size)) {
1869                 default_hstate_size = HPAGE_SIZE;
1870                 if (!size_to_hstate(default_hstate_size))
1871                         hugetlb_add_hstate(HUGETLB_PAGE_ORDER);
1872         }
1873         default_hstate_idx = hstate_index(size_to_hstate(default_hstate_size));
1874         if (default_hstate_max_huge_pages)
1875                 default_hstate.max_huge_pages = default_hstate_max_huge_pages;
1876
1877         hugetlb_init_hstates();
1878
1879         gather_bootmem_prealloc();
1880
1881         report_hugepages();
1882
1883         hugetlb_sysfs_init();
1884
1885         hugetlb_register_all_nodes();
1886
1887         return 0;
1888 }
1889 module_init(hugetlb_init);
1890
1891 /* Should be called on processing a hugepagesz=... option */
1892 void __init hugetlb_add_hstate(unsigned order)
1893 {
1894         struct hstate *h;
1895         unsigned long i;
1896
1897         if (size_to_hstate(PAGE_SIZE << order)) {
1898                 printk(KERN_WARNING "hugepagesz= specified twice, ignoring\n");
1899                 return;
1900         }
1901         BUG_ON(hugetlb_max_hstate >= HUGE_MAX_HSTATE);
1902         BUG_ON(order == 0);
1903         h = &hstates[hugetlb_max_hstate++];
1904         h->order = order;
1905         h->mask = ~((1ULL << (order + PAGE_SHIFT)) - 1);
1906         h->nr_huge_pages = 0;
1907         h->free_huge_pages = 0;
1908         for (i = 0; i < MAX_NUMNODES; ++i)
1909                 INIT_LIST_HEAD(&h->hugepage_freelists[i]);
1910         INIT_LIST_HEAD(&h->hugepage_activelist);
1911         h->next_nid_to_alloc = first_node(node_states[N_HIGH_MEMORY]);
1912         h->next_nid_to_free = first_node(node_states[N_HIGH_MEMORY]);
1913         snprintf(h->name, HSTATE_NAME_LEN, "hugepages-%lukB",
1914                                         huge_page_size(h)/1024);
1915
1916         parsed_hstate = h;
1917 }
1918
1919 static int __init hugetlb_nrpages_setup(char *s)
1920 {
1921         unsigned long *mhp;
1922         static unsigned long *last_mhp;
1923
1924         /*
1925          * !hugetlb_max_hstate means we haven't parsed a hugepagesz= parameter yet,
1926          * so this hugepages= parameter goes to the "default hstate".
1927          */
1928         if (!hugetlb_max_hstate)
1929                 mhp = &default_hstate_max_huge_pages;
1930         else
1931                 mhp = &parsed_hstate->max_huge_pages;
1932
1933         if (mhp == last_mhp) {
1934                 printk(KERN_WARNING "hugepages= specified twice without "
1935                         "interleaving hugepagesz=, ignoring\n");
1936                 return 1;
1937         }
1938
1939         if (sscanf(s, "%lu", mhp) <= 0)
1940                 *mhp = 0;
1941
1942         /*
1943          * Global state is always initialized later in hugetlb_init.
1944          * But we need to allocate >= MAX_ORDER hstates here early to still
1945          * use the bootmem allocator.
1946          */
1947         if (hugetlb_max_hstate && parsed_hstate->order >= MAX_ORDER)
1948                 hugetlb_hstate_alloc_pages(parsed_hstate);
1949
1950         last_mhp = mhp;
1951
1952         return 1;
1953 }
1954 __setup("hugepages=", hugetlb_nrpages_setup);
1955
1956 static int __init hugetlb_default_setup(char *s)
1957 {
1958         default_hstate_size = memparse(s, &s);
1959         return 1;
1960 }
1961 __setup("default_hugepagesz=", hugetlb_default_setup);
1962
1963 static unsigned int cpuset_mems_nr(unsigned int *array)
1964 {
1965         int node;
1966         unsigned int nr = 0;
1967
1968         for_each_node_mask(node, cpuset_current_mems_allowed)
1969                 nr += array[node];
1970
1971         return nr;
1972 }
1973
1974 #ifdef CONFIG_SYSCTL
1975 static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
1976                          struct ctl_table *table, int write,
1977                          void __user *buffer, size_t *length, loff_t *ppos)
1978 {
1979         struct hstate *h = &default_hstate;
1980         unsigned long tmp;
1981         int ret;
1982
1983         tmp = h->max_huge_pages;
1984
1985         if (write && h->order >= MAX_ORDER)
1986                 return -EINVAL;
1987
1988         table->data = &tmp;
1989         table->maxlen = sizeof(unsigned long);
1990         ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
1991         if (ret)
1992                 goto out;
1993
1994         if (write) {
1995                 NODEMASK_ALLOC(nodemask_t, nodes_allowed,
1996                                                 GFP_KERNEL | __GFP_NORETRY);
1997                 if (!(obey_mempolicy &&
1998                                init_nodemask_of_mempolicy(nodes_allowed))) {
1999                         NODEMASK_FREE(nodes_allowed);
2000                         nodes_allowed = &node_states[N_HIGH_MEMORY];
2001                 }
2002                 h->max_huge_pages = set_max_huge_pages(h, tmp, nodes_allowed);
2003
2004                 if (nodes_allowed != &node_states[N_HIGH_MEMORY])
2005                         NODEMASK_FREE(nodes_allowed);
2006         }
2007 out:
2008         return ret;
2009 }
2010
2011 int hugetlb_sysctl_handler(struct ctl_table *table, int write,
2012                           void __user *buffer, size_t *length, loff_t *ppos)
2013 {
2014
2015         return hugetlb_sysctl_handler_common(false, table, write,
2016                                                         buffer, length, ppos);
2017 }
2018
2019 #ifdef CONFIG_NUMA
2020 int hugetlb_mempolicy_sysctl_handler(struct ctl_table *table, int write,
2021                           void __user *buffer, size_t *length, loff_t *ppos)
2022 {
2023         return hugetlb_sysctl_handler_common(true, table, write,
2024                                                         buffer, length, ppos);
2025 }
2026 #endif /* CONFIG_NUMA */
2027
2028 int hugetlb_treat_movable_handler(struct ctl_table *table, int write,
2029                         void __user *buffer,
2030                         size_t *length, loff_t *ppos)
2031 {
2032         proc_dointvec(table, write, buffer, length, ppos);
2033         if (hugepages_treat_as_movable)
2034                 htlb_alloc_mask = GFP_HIGHUSER_MOVABLE;
2035         else
2036                 htlb_alloc_mask = GFP_HIGHUSER;
2037         return 0;
2038 }
2039
2040 int hugetlb_overcommit_handler(struct ctl_table *table, int write,
2041                         void __user *buffer,
2042                         size_t *length, loff_t *ppos)
2043 {
2044         struct hstate *h = &default_hstate;
2045         unsigned long tmp;
2046         int ret;
2047
2048         tmp = h->nr_overcommit_huge_pages;
2049
2050         if (write && h->order >= MAX_ORDER)
2051                 return -EINVAL;
2052
2053         table->data = &tmp;
2054         table->maxlen = sizeof(unsigned long);
2055         ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
2056         if (ret)
2057                 goto out;
2058
2059         if (write) {
2060                 spin_lock(&hugetlb_lock);
2061                 h->nr_overcommit_huge_pages = tmp;
2062                 spin_unlock(&hugetlb_lock);
2063         }
2064 out:
2065         return ret;
2066 }
2067
2068 #endif /* CONFIG_SYSCTL */
2069
2070 void hugetlb_report_meminfo(struct seq_file *m)
2071 {
2072         struct hstate *h = &default_hstate;
2073         seq_printf(m,
2074                         "HugePages_Total:   %5lu\n"
2075                         "HugePages_Free:    %5lu\n"
2076                         "HugePages_Rsvd:    %5lu\n"
2077                         "HugePages_Surp:    %5lu\n"
2078                         "Hugepagesize:   %8lu kB\n",
2079                         h->nr_huge_pages,
2080                         h->free_huge_pages,
2081                         h->resv_huge_pages,
2082                         h->surplus_huge_pages,
2083                         1UL << (huge_page_order(h) + PAGE_SHIFT - 10));
2084 }
2085
2086 int hugetlb_report_node_meminfo(int nid, char *buf)
2087 {
2088         struct hstate *h = &default_hstate;
2089         return sprintf(buf,
2090                 "Node %d HugePages_Total: %5u\n"
2091                 "Node %d HugePages_Free:  %5u\n"
2092                 "Node %d HugePages_Surp:  %5u\n",
2093                 nid, h->nr_huge_pages_node[nid],
2094                 nid, h->free_huge_pages_node[nid],
2095                 nid, h->surplus_huge_pages_node[nid]);
2096 }
2097
2098 /* Return the number pages of memory we physically have, in PAGE_SIZE units. */
2099 unsigned long hugetlb_total_pages(void)
2100 {
2101         struct hstate *h = &default_hstate;
2102         return h->nr_huge_pages * pages_per_huge_page(h);
2103 }
2104
2105 static int hugetlb_acct_memory(struct hstate *h, long delta)
2106 {
2107         int ret = -ENOMEM;
2108
2109         spin_lock(&hugetlb_lock);
2110         /*
2111          * When cpuset is configured, it breaks the strict hugetlb page
2112          * reservation as the accounting is done on a global variable. Such
2113          * reservation is completely rubbish in the presence of cpuset because
2114          * the reservation is not checked against page availability for the
2115          * current cpuset. Application can still potentially OOM'ed by kernel
2116          * with lack of free htlb page in cpuset that the task is in.
2117          * Attempt to enforce strict accounting with cpuset is almost
2118          * impossible (or too ugly) because cpuset is too fluid that
2119          * task or memory node can be dynamically moved between cpusets.
2120          *
2121          * The change of semantics for shared hugetlb mapping with cpuset is
2122          * undesirable. However, in order to preserve some of the semantics,
2123          * we fall back to check against current free page availability as
2124          * a best attempt and hopefully to minimize the impact of changing
2125          * semantics that cpuset has.
2126          */
2127         if (delta > 0) {
2128                 if (gather_surplus_pages(h, delta) < 0)
2129                         goto out;
2130
2131                 if (delta > cpuset_mems_nr(h->free_huge_pages_node)) {
2132                         return_unused_surplus_pages(h, delta);
2133                         goto out;
2134                 }
2135         }
2136
2137         ret = 0;
2138         if (delta < 0)
2139                 return_unused_surplus_pages(h, (unsigned long) -delta);
2140
2141 out:
2142         spin_unlock(&hugetlb_lock);
2143         return ret;
2144 }
2145
2146 static void hugetlb_vm_op_open(struct vm_area_struct *vma)
2147 {
2148         struct resv_map *reservations = vma_resv_map(vma);
2149
2150         /*
2151          * This new VMA should share its siblings reservation map if present.
2152          * The VMA will only ever have a valid reservation map pointer where
2153          * it is being copied for another still existing VMA.  As that VMA
2154          * has a reference to the reservation map it cannot disappear until
2155          * after this open call completes.  It is therefore safe to take a
2156          * new reference here without additional locking.
2157          */
2158         if (reservations)
2159                 kref_get(&reservations->refs);
2160 }
2161
2162 static void resv_map_put(struct vm_area_struct *vma)
2163 {
2164         struct resv_map *reservations = vma_resv_map(vma);
2165
2166         if (!reservations)
2167                 return;
2168         kref_put(&reservations->refs, resv_map_release);
2169 }
2170
2171 static void hugetlb_vm_op_close(struct vm_area_struct *vma)
2172 {
2173         struct hstate *h = hstate_vma(vma);
2174         struct resv_map *reservations = vma_resv_map(vma);
2175         struct hugepage_subpool *spool = subpool_vma(vma);
2176         unsigned long reserve;
2177         unsigned long start;
2178         unsigned long end;
2179
2180         if (reservations) {
2181                 start = vma_hugecache_offset(h, vma, vma->vm_start);
2182                 end = vma_hugecache_offset(h, vma, vma->vm_end);
2183
2184                 reserve = (end - start) -
2185                         region_count(&reservations->regions, start, end);
2186
2187                 resv_map_put(vma);
2188
2189                 if (reserve) {
2190                         hugetlb_acct_memory(h, -reserve);
2191                         hugepage_subpool_put_pages(spool, reserve);
2192                 }
2193         }
2194 }
2195
2196 /*
2197  * We cannot handle pagefaults against hugetlb pages at all.  They cause
2198  * handle_mm_fault() to try to instantiate regular-sized pages in the
2199  * hugegpage VMA.  do_page_fault() is supposed to trap this, so BUG is we get
2200  * this far.
2201  */
2202 static int hugetlb_vm_op_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
2203 {
2204         BUG();
2205         return 0;
2206 }
2207
2208 const struct vm_operations_struct hugetlb_vm_ops = {
2209         .fault = hugetlb_vm_op_fault,
2210         .open = hugetlb_vm_op_open,
2211         .close = hugetlb_vm_op_close,
2212 };
2213
2214 static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page,
2215                                 int writable)
2216 {
2217         pte_t entry;
2218
2219         if (writable) {
2220                 entry =
2221                     pte_mkwrite(pte_mkdirty(mk_pte(page, vma->vm_page_prot)));
2222         } else {
2223                 entry = huge_pte_wrprotect(mk_pte(page, vma->vm_page_prot));
2224         }
2225         entry = pte_mkyoung(entry);
2226         entry = pte_mkhuge(entry);
2227         entry = arch_make_huge_pte(entry, vma, page, writable);
2228
2229         return entry;
2230 }
2231
2232 static void set_huge_ptep_writable(struct vm_area_struct *vma,
2233                                    unsigned long address, pte_t *ptep)
2234 {
2235         pte_t entry;
2236
2237         entry = pte_mkwrite(pte_mkdirty(huge_ptep_get(ptep)));
2238         if (huge_ptep_set_access_flags(vma, address, ptep, entry, 1))
2239                 update_mmu_cache(vma, address, ptep);
2240 }
2241
2242
2243 int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
2244                             struct vm_area_struct *vma)
2245 {
2246         pte_t *src_pte, *dst_pte, entry;
2247         struct page *ptepage;
2248         unsigned long addr;
2249         int cow;
2250         struct hstate *h = hstate_vma(vma);
2251         unsigned long sz = huge_page_size(h);
2252
2253         cow = (vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
2254
2255         for (addr = vma->vm_start; addr < vma->vm_end; addr += sz) {
2256                 src_pte = huge_pte_offset(src, addr);
2257                 if (!src_pte)
2258                         continue;
2259                 dst_pte = huge_pte_alloc(dst, addr, sz);
2260                 if (!dst_pte)
2261                         goto nomem;
2262
2263                 /* If the pagetables are shared don't copy or take references */
2264                 if (dst_pte == src_pte)
2265                         continue;
2266
2267                 spin_lock(&dst->page_table_lock);
2268                 spin_lock_nested(&src->page_table_lock, SINGLE_DEPTH_NESTING);
2269                 if (!huge_pte_none(huge_ptep_get(src_pte))) {
2270                         if (cow)
2271                                 huge_ptep_set_wrprotect(src, addr, src_pte);
2272                         entry = huge_ptep_get(src_pte);
2273                         ptepage = pte_page(entry);
2274                         get_page(ptepage);
2275                         page_dup_rmap(ptepage);
2276                         set_huge_pte_at(dst, addr, dst_pte, entry);
2277                 }
2278                 spin_unlock(&src->page_table_lock);
2279                 spin_unlock(&dst->page_table_lock);
2280         }
2281         return 0;
2282
2283 nomem:
2284         return -ENOMEM;
2285 }
2286
2287 static int is_hugetlb_entry_migration(pte_t pte)
2288 {
2289         swp_entry_t swp;
2290
2291         if (huge_pte_none(pte) || pte_present(pte))
2292                 return 0;
2293         swp = pte_to_swp_entry(pte);
2294         if (non_swap_entry(swp) && is_migration_entry(swp))
2295                 return 1;
2296         else
2297                 return 0;
2298 }
2299
2300 static int is_hugetlb_entry_hwpoisoned(pte_t pte)
2301 {
2302         swp_entry_t swp;
2303
2304         if (huge_pte_none(pte) || pte_present(pte))
2305                 return 0;
2306         swp = pte_to_swp_entry(pte);
2307         if (non_swap_entry(swp) && is_hwpoison_entry(swp))
2308                 return 1;
2309         else
2310                 return 0;
2311 }
2312
2313 void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
2314                             unsigned long start, unsigned long end,
2315                             struct page *ref_page)
2316 {
2317         int force_flush = 0;
2318         struct mm_struct *mm = vma->vm_mm;
2319         unsigned long address;
2320         pte_t *ptep;
2321         pte_t pte;
2322         struct page *page;
2323         struct hstate *h = hstate_vma(vma);
2324         unsigned long sz = huge_page_size(h);
2325
2326         WARN_ON(!is_vm_hugetlb_page(vma));
2327         BUG_ON(start & ~huge_page_mask(h));
2328         BUG_ON(end & ~huge_page_mask(h));
2329
2330         tlb_start_vma(tlb, vma);
2331         mmu_notifier_invalidate_range_start(mm, start, end);
2332 again:
2333         spin_lock(&mm->page_table_lock);
2334         for (address = start; address < end; address += sz) {
2335                 ptep = huge_pte_offset(mm, address);
2336                 if (!ptep)
2337                         continue;
2338
2339                 if (huge_pmd_unshare(mm, &address, ptep))
2340                         continue;
2341
2342                 pte = huge_ptep_get(ptep);
2343                 if (huge_pte_none(pte))
2344                         continue;
2345
2346                 /*
2347                  * HWPoisoned hugepage is already unmapped and dropped reference
2348                  */
2349                 if (unlikely(is_hugetlb_entry_hwpoisoned(pte)))
2350                         continue;
2351
2352                 page = pte_page(pte);
2353                 /*
2354                  * If a reference page is supplied, it is because a specific
2355                  * page is being unmapped, not a range. Ensure the page we
2356                  * are about to unmap is the actual page of interest.
2357                  */
2358                 if (ref_page) {
2359                         if (page != ref_page)
2360                                 continue;
2361
2362                         /*
2363                          * Mark the VMA as having unmapped its page so that
2364                          * future faults in this VMA will fail rather than
2365                          * looking like data was lost
2366                          */
2367                         set_vma_resv_flags(vma, HPAGE_RESV_UNMAPPED);
2368                 }
2369
2370                 pte = huge_ptep_get_and_clear(mm, address, ptep);
2371                 tlb_remove_tlb_entry(tlb, ptep, address);
2372                 if (pte_dirty(pte))
2373                         set_page_dirty(page);
2374
2375                 page_remove_rmap(page);
2376                 force_flush = !__tlb_remove_page(tlb, page);
2377                 if (force_flush)
2378                         break;
2379                 /* Bail out after unmapping reference page if supplied */
2380                 if (ref_page)
2381                         break;
2382         }
2383         spin_unlock(&mm->page_table_lock);
2384         /*
2385          * mmu_gather ran out of room to batch pages, we break out of
2386          * the PTE lock to avoid doing the potential expensive TLB invalidate
2387          * and page-free while holding it.
2388          */
2389         if (force_flush) {
2390                 force_flush = 0;
2391                 tlb_flush_mmu(tlb);
2392                 if (address < end && !ref_page)
2393                         goto again;
2394         }
2395         mmu_notifier_invalidate_range_end(mm, start, end);
2396         tlb_end_vma(tlb, vma);
2397 }
2398
2399 void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
2400                           unsigned long end, struct page *ref_page)
2401 {
2402         struct mm_struct *mm;
2403         struct mmu_gather tlb;
2404
2405         mm = vma->vm_mm;
2406
2407         tlb_gather_mmu(&tlb, mm, 0);
2408         __unmap_hugepage_range(&tlb, vma, start, end, ref_page);
2409         tlb_finish_mmu(&tlb, start, end);
2410 }
2411
2412 /*
2413  * This is called when the original mapper is failing to COW a MAP_PRIVATE
2414  * mappping it owns the reserve page for. The intention is to unmap the page
2415  * from other VMAs and let the children be SIGKILLed if they are faulting the
2416  * same region.
2417  */
2418 static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
2419                                 struct page *page, unsigned long address)
2420 {
2421         struct hstate *h = hstate_vma(vma);
2422         struct vm_area_struct *iter_vma;
2423         struct address_space *mapping;
2424         struct prio_tree_iter iter;
2425         pgoff_t pgoff;
2426
2427         /*
2428          * vm_pgoff is in PAGE_SIZE units, hence the different calculation
2429          * from page cache lookup which is in HPAGE_SIZE units.
2430          */
2431         address = address & huge_page_mask(h);
2432         pgoff = vma_hugecache_offset(h, vma, address);
2433         mapping = vma->vm_file->f_dentry->d_inode->i_mapping;
2434
2435         /*
2436          * Take the mapping lock for the duration of the table walk. As
2437          * this mapping should be shared between all the VMAs,
2438          * __unmap_hugepage_range() is called as the lock is already held
2439          */
2440         mutex_lock(&mapping->i_mmap_mutex);
2441         vma_prio_tree_foreach(iter_vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
2442                 /* Do not unmap the current VMA */
2443                 if (iter_vma == vma)
2444                         continue;
2445
2446                 /*
2447                  * Unmap the page from other VMAs without their own reserves.
2448                  * They get marked to be SIGKILLed if they fault in these
2449                  * areas. This is because a future no-page fault on this VMA
2450                  * could insert a zeroed page instead of the data existing
2451                  * from the time of fork. This would look like data corruption
2452                  */
2453                 if (!is_vma_resv_set(iter_vma, HPAGE_RESV_OWNER))
2454                         unmap_hugepage_range(iter_vma, address,
2455                                              address + huge_page_size(h), page);
2456         }
2457         mutex_unlock(&mapping->i_mmap_mutex);
2458
2459         return 1;
2460 }
2461
2462 /*
2463  * Hugetlb_cow() should be called with page lock of the original hugepage held.
2464  * Called with hugetlb_instantiation_mutex held and pte_page locked so we
2465  * cannot race with other handlers or page migration.
2466  * Keep the pte_same checks anyway to make transition from the mutex easier.
2467  */
2468 static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
2469                         unsigned long address, pte_t *ptep, pte_t pte,
2470                         struct page *pagecache_page)
2471 {
2472         struct hstate *h = hstate_vma(vma);
2473         struct page *old_page, *new_page;
2474         int avoidcopy;
2475         int outside_reserve = 0;
2476
2477         old_page = pte_page(pte);
2478
2479 retry_avoidcopy:
2480         /* If no-one else is actually using this page, avoid the copy
2481          * and just make the page writable */
2482         avoidcopy = (page_mapcount(old_page) == 1);
2483         if (avoidcopy) {
2484                 if (PageAnon(old_page))
2485                         page_move_anon_rmap(old_page, vma, address);
2486                 set_huge_ptep_writable(vma, address, ptep);
2487                 return 0;
2488         }
2489
2490         /*
2491          * If the process that created a MAP_PRIVATE mapping is about to
2492          * perform a COW due to a shared page count, attempt to satisfy
2493          * the allocation without using the existing reserves. The pagecache
2494          * page is used to determine if the reserve at this address was
2495          * consumed or not. If reserves were used, a partial faulted mapping
2496          * at the time of fork() could consume its reserves on COW instead
2497          * of the full address range.
2498          */
2499         if (!(vma->vm_flags & VM_MAYSHARE) &&
2500                         is_vma_resv_set(vma, HPAGE_RESV_OWNER) &&
2501                         old_page != pagecache_page)
2502                 outside_reserve = 1;
2503
2504         page_cache_get(old_page);
2505
2506         /* Drop page_table_lock as buddy allocator may be called */
2507         spin_unlock(&mm->page_table_lock);
2508         new_page = alloc_huge_page(vma, address, outside_reserve);
2509
2510         if (IS_ERR(new_page)) {
2511                 long err = PTR_ERR(new_page);
2512                 page_cache_release(old_page);
2513
2514                 /*
2515                  * If a process owning a MAP_PRIVATE mapping fails to COW,
2516                  * it is due to references held by a child and an insufficient
2517                  * huge page pool. To guarantee the original mappers
2518                  * reliability, unmap the page from child processes. The child
2519                  * may get SIGKILLed if it later faults.
2520                  */
2521                 if (outside_reserve) {
2522                         BUG_ON(huge_pte_none(pte));
2523                         if (unmap_ref_private(mm, vma, old_page, address)) {
2524                                 BUG_ON(huge_pte_none(pte));
2525                                 spin_lock(&mm->page_table_lock);
2526                                 ptep = huge_pte_offset(mm, address & huge_page_mask(h));
2527                                 if (likely(pte_same(huge_ptep_get(ptep), pte)))
2528                                         goto retry_avoidcopy;
2529                                 /*
2530                                  * race occurs while re-acquiring page_table_lock, and
2531                                  * our job is done.
2532                                  */
2533                                 return 0;
2534                         }
2535                         WARN_ON_ONCE(1);
2536                 }
2537
2538                 /* Caller expects lock to be held */
2539                 spin_lock(&mm->page_table_lock);
2540                 if (err == -ENOMEM)
2541                         return VM_FAULT_OOM;
2542                 else
2543                         return VM_FAULT_SIGBUS;
2544         }
2545
2546         /*
2547          * When the original hugepage is shared one, it does not have
2548          * anon_vma prepared.
2549          */
2550         if (unlikely(anon_vma_prepare(vma))) {
2551                 page_cache_release(new_page);
2552                 page_cache_release(old_page);
2553                 /* Caller expects lock to be held */
2554                 spin_lock(&mm->page_table_lock);
2555                 return VM_FAULT_OOM;
2556         }
2557
2558         copy_user_huge_page(new_page, old_page, address, vma,
2559                             pages_per_huge_page(h));
2560         __SetPageUptodate(new_page);
2561
2562         /*
2563          * Retake the page_table_lock to check for racing updates
2564          * before the page tables are altered
2565          */
2566         spin_lock(&mm->page_table_lock);
2567         ptep = huge_pte_offset(mm, address & huge_page_mask(h));
2568         if (likely(pte_same(huge_ptep_get(ptep), pte))) {
2569                 /* Break COW */
2570                 mmu_notifier_invalidate_range_start(mm,
2571                         address & huge_page_mask(h),
2572                         (address & huge_page_mask(h)) + huge_page_size(h));
2573                 huge_ptep_clear_flush(vma, address, ptep);
2574                 set_huge_pte_at(mm, address, ptep,
2575                                 make_huge_pte(vma, new_page, 1));
2576                 page_remove_rmap(old_page);
2577                 hugepage_add_new_anon_rmap(new_page, vma, address);
2578                 /* Make the old page be freed below */
2579                 new_page = old_page;
2580                 mmu_notifier_invalidate_range_end(mm,
2581                         address & huge_page_mask(h),
2582                         (address & huge_page_mask(h)) + huge_page_size(h));
2583         }
2584         page_cache_release(new_page);
2585         page_cache_release(old_page);
2586         return 0;
2587 }
2588
2589 /* Return the pagecache page at a given address within a VMA */
2590 static struct page *hugetlbfs_pagecache_page(struct hstate *h,
2591                         struct vm_area_struct *vma, unsigned long address)
2592 {
2593         struct address_space *mapping;
2594         pgoff_t idx;
2595
2596         mapping = vma->vm_file->f_mapping;
2597         idx = vma_hugecache_offset(h, vma, address);
2598
2599         return find_lock_page(mapping, idx);
2600 }
2601
2602 /*
2603  * Return whether there is a pagecache page to back given address within VMA.
2604  * Caller follow_hugetlb_page() holds page_table_lock so we cannot lock_page.
2605  */
2606 static bool hugetlbfs_pagecache_present(struct hstate *h,
2607                         struct vm_area_struct *vma, unsigned long address)
2608 {
2609         struct address_space *mapping;
2610         pgoff_t idx;
2611         struct page *page;
2612
2613         mapping = vma->vm_file->f_mapping;
2614         idx = vma_hugecache_offset(h, vma, address);
2615
2616         page = find_get_page(mapping, idx);
2617         if (page)
2618                 put_page(page);
2619         return page != NULL;
2620 }
2621
2622 static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
2623                         unsigned long address, pte_t *ptep, unsigned int flags)
2624 {
2625         struct hstate *h = hstate_vma(vma);
2626         int ret = VM_FAULT_SIGBUS;
2627         int anon_rmap = 0;
2628         pgoff_t idx;
2629         unsigned long size;
2630         struct page *page;
2631         struct address_space *mapping;
2632         pte_t new_pte;
2633
2634         /*
2635          * Currently, we are forced to kill the process in the event the
2636          * original mapper has unmapped pages from the child due to a failed
2637          * COW. Warn that such a situation has occurred as it may not be obvious
2638          */
2639         if (is_vma_resv_set(vma, HPAGE_RESV_UNMAPPED)) {
2640                 printk(KERN_WARNING
2641                         "PID %d killed due to inadequate hugepage pool\n",
2642                         current->pid);
2643                 return ret;
2644         }
2645
2646         mapping = vma->vm_file->f_mapping;
2647         idx = vma_hugecache_offset(h, vma, address);
2648
2649         /*
2650          * Use page lock to guard against racing truncation
2651          * before we get page_table_lock.
2652          */
2653 retry:
2654         page = find_lock_page(mapping, idx);
2655         if (!page) {
2656                 size = i_size_read(mapping->host) >> huge_page_shift(h);
2657                 if (idx >= size)
2658                         goto out;
2659                 page = alloc_huge_page(vma, address, 0);
2660                 if (IS_ERR(page)) {
2661                         ret = PTR_ERR(page);
2662                         if (ret == -ENOMEM)
2663                                 ret = VM_FAULT_OOM;
2664                         else
2665                                 ret = VM_FAULT_SIGBUS;
2666                         goto out;
2667                 }
2668                 clear_huge_page(page, address, pages_per_huge_page(h));
2669                 __SetPageUptodate(page);
2670
2671                 if (vma->vm_flags & VM_MAYSHARE) {
2672                         int err;
2673                         struct inode *inode = mapping->host;
2674
2675                         err = add_to_page_cache(page, mapping, idx, GFP_KERNEL);
2676                         if (err) {
2677                                 put_page(page);
2678                                 if (err == -EEXIST)
2679                                         goto retry;
2680                                 goto out;
2681                         }
2682
2683                         spin_lock(&inode->i_lock);
2684                         inode->i_blocks += blocks_per_huge_page(h);
2685                         spin_unlock(&inode->i_lock);
2686                 } else {
2687                         lock_page(page);
2688                         if (unlikely(anon_vma_prepare(vma))) {
2689                                 ret = VM_FAULT_OOM;
2690                                 goto backout_unlocked;
2691                         }
2692                         anon_rmap = 1;
2693                 }
2694         } else {
2695                 /*
2696                  * If memory error occurs between mmap() and fault, some process
2697                  * don't have hwpoisoned swap entry for errored virtual address.
2698                  * So we need to block hugepage fault by PG_hwpoison bit check.
2699                  */
2700                 if (unlikely(PageHWPoison(page))) {
2701                         ret = VM_FAULT_HWPOISON |
2702                                 VM_FAULT_SET_HINDEX(hstate_index(h));
2703                         goto backout_unlocked;
2704                 }
2705         }
2706
2707         /*
2708          * If we are going to COW a private mapping later, we examine the
2709          * pending reservations for this page now. This will ensure that
2710          * any allocations necessary to record that reservation occur outside
2711          * the spinlock.
2712          */
2713         if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED))
2714                 if (vma_needs_reservation(h, vma, address) < 0) {
2715                         ret = VM_FAULT_OOM;
2716                         goto backout_unlocked;
2717                 }
2718
2719         spin_lock(&mm->page_table_lock);
2720         size = i_size_read(mapping->host) >> huge_page_shift(h);
2721         if (idx >= size)
2722                 goto backout;
2723
2724         ret = 0;
2725         if (!huge_pte_none(huge_ptep_get(ptep)))
2726                 goto backout;
2727
2728         if (anon_rmap)
2729                 hugepage_add_new_anon_rmap(page, vma, address);
2730         else
2731                 page_dup_rmap(page);
2732         new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE)
2733                                 && (vma->vm_flags & VM_SHARED)));
2734         set_huge_pte_at(mm, address, ptep, new_pte);
2735
2736         if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
2737                 /* Optimization, do the COW without a second fault */
2738                 ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page);
2739         }
2740
2741         spin_unlock(&mm->page_table_lock);
2742         unlock_page(page);
2743 out:
2744         return ret;
2745
2746 backout:
2747         spin_unlock(&mm->page_table_lock);
2748 backout_unlocked:
2749         unlock_page(page);
2750         put_page(page);
2751         goto out;
2752 }
2753
2754 int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
2755                         unsigned long address, unsigned int flags)
2756 {
2757         pte_t *ptep;
2758         pte_t entry;
2759         int ret;
2760         struct page *page = NULL;
2761         struct page *pagecache_page = NULL;
2762         static DEFINE_MUTEX(hugetlb_instantiation_mutex);
2763         struct hstate *h = hstate_vma(vma);
2764
2765         address &= huge_page_mask(h);
2766
2767         ptep = huge_pte_offset(mm, address);
2768         if (ptep) {
2769                 entry = huge_ptep_get(ptep);
2770                 if (unlikely(is_hugetlb_entry_migration(entry))) {
2771                         migration_entry_wait(mm, (pmd_t *)ptep, address);
2772                         return 0;
2773                 } else if (unlikely(is_hugetlb_entry_hwpoisoned(entry)))
2774                         return VM_FAULT_HWPOISON_LARGE |
2775                                 VM_FAULT_SET_HINDEX(hstate_index(h));
2776         }
2777
2778         ptep = huge_pte_alloc(mm, address, huge_page_size(h));
2779         if (!ptep)
2780                 return VM_FAULT_OOM;
2781
2782         /*
2783          * Serialize hugepage allocation and instantiation, so that we don't
2784          * get spurious allocation failures if two CPUs race to instantiate
2785          * the same page in the page cache.
2786          */
2787         mutex_lock(&hugetlb_instantiation_mutex);
2788         entry = huge_ptep_get(ptep);
2789         if (huge_pte_none(entry)) {
2790                 ret = hugetlb_no_page(mm, vma, address, ptep, flags);
2791                 goto out_mutex;
2792         }
2793
2794         ret = 0;
2795
2796         /*
2797          * If we are going to COW the mapping later, we examine the pending
2798          * reservations for this page now. This will ensure that any
2799          * allocations necessary to record that reservation occur outside the
2800          * spinlock. For private mappings, we also lookup the pagecache
2801          * page now as it is used to determine if a reservation has been
2802          * consumed.
2803          */
2804         if ((flags & FAULT_FLAG_WRITE) && !pte_write(entry)) {
2805                 if (vma_needs_reservation(h, vma, address) < 0) {
2806                         ret = VM_FAULT_OOM;
2807                         goto out_mutex;
2808                 }
2809
2810                 if (!(vma->vm_flags & VM_MAYSHARE))
2811                         pagecache_page = hugetlbfs_pagecache_page(h,
2812                                                                 vma, address);
2813         }
2814
2815         /*
2816          * hugetlb_cow() requires page locks of pte_page(entry) and
2817          * pagecache_page, so here we need take the former one
2818          * when page != pagecache_page or !pagecache_page.
2819          * Note that locking order is always pagecache_page -> page,
2820          * so no worry about deadlock.
2821          */
2822         page = pte_page(entry);
2823         get_page(page);
2824         if (page != pagecache_page)
2825                 lock_page(page);
2826
2827         spin_lock(&mm->page_table_lock);
2828         /* Check for a racing update before calling hugetlb_cow */
2829         if (unlikely(!pte_same(entry, huge_ptep_get(ptep))))
2830                 goto out_page_table_lock;
2831
2832
2833         if (flags & FAULT_FLAG_WRITE) {
2834                 if (!pte_write(entry)) {
2835                         ret = hugetlb_cow(mm, vma, address, ptep, entry,
2836                                                         pagecache_page);
2837                         goto out_page_table_lock;
2838                 }
2839                 entry = pte_mkdirty(entry);
2840         }
2841         entry = pte_mkyoung(entry);
2842         if (huge_ptep_set_access_flags(vma, address, ptep, entry,
2843                                                 flags & FAULT_FLAG_WRITE))
2844                 update_mmu_cache(vma, address, ptep);
2845
2846 out_page_table_lock:
2847         spin_unlock(&mm->page_table_lock);
2848
2849         if (pagecache_page) {
2850                 unlock_page(pagecache_page);
2851                 put_page(pagecache_page);
2852         }
2853         if (page != pagecache_page)
2854                 unlock_page(page);
2855         put_page(page);
2856
2857 out_mutex:
2858         mutex_unlock(&hugetlb_instantiation_mutex);
2859
2860         return ret;
2861 }
2862
2863 /* Can be overriden by architectures */
2864 __attribute__((weak)) struct page *
2865 follow_huge_pud(struct mm_struct *mm, unsigned long address,
2866                pud_t *pud, int write)
2867 {
2868         BUG();
2869         return NULL;
2870 }
2871
2872 int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
2873                         struct page **pages, struct vm_area_struct **vmas,
2874                         unsigned long *position, int *length, int i,
2875                         unsigned int flags)
2876 {
2877         unsigned long pfn_offset;
2878         unsigned long vaddr = *position;
2879         int remainder = *length;
2880         struct hstate *h = hstate_vma(vma);
2881
2882         spin_lock(&mm->page_table_lock);
2883         while (vaddr < vma->vm_end && remainder) {
2884                 pte_t *pte;
2885                 int absent;
2886                 struct page *page;
2887
2888                 /*
2889                  * Some archs (sparc64, sh*) have multiple pte_ts to
2890                  * each hugepage.  We have to make sure we get the
2891                  * first, for the page indexing below to work.
2892                  */
2893                 pte = huge_pte_offset(mm, vaddr & huge_page_mask(h));
2894                 absent = !pte || huge_pte_none(huge_ptep_get(pte));
2895
2896                 /*
2897                  * When coredumping, it suits get_dump_page if we just return
2898                  * an error where there's an empty slot with no huge pagecache
2899                  * to back it.  This way, we avoid allocating a hugepage, and
2900                  * the sparse dumpfile avoids allocating disk blocks, but its
2901                  * huge holes still show up with zeroes where they need to be.
2902                  */
2903                 if (absent && (flags & FOLL_DUMP) &&
2904                     !hugetlbfs_pagecache_present(h, vma, vaddr)) {
2905                         remainder = 0;
2906                         break;
2907                 }
2908
2909                 if (absent ||
2910                     ((flags & FOLL_WRITE) && !pte_write(huge_ptep_get(pte)))) {
2911                         int ret;
2912
2913                         spin_unlock(&mm->page_table_lock);
2914                         ret = hugetlb_fault(mm, vma, vaddr,
2915                                 (flags & FOLL_WRITE) ? FAULT_FLAG_WRITE : 0);
2916                         spin_lock(&mm->page_table_lock);
2917                         if (!(ret & VM_FAULT_ERROR))
2918                                 continue;
2919
2920                         remainder = 0;
2921                         break;
2922                 }
2923
2924                 pfn_offset = (vaddr & ~huge_page_mask(h)) >> PAGE_SHIFT;
2925                 page = pte_page(huge_ptep_get(pte));
2926 same_page:
2927                 if (pages) {
2928                         pages[i] = mem_map_offset(page, pfn_offset);
2929                         get_page(pages[i]);
2930                 }
2931
2932                 if (vmas)
2933                         vmas[i] = vma;
2934
2935                 vaddr += PAGE_SIZE;
2936                 ++pfn_offset;
2937                 --remainder;
2938                 ++i;
2939                 if (vaddr < vma->vm_end && remainder &&
2940                                 pfn_offset < pages_per_huge_page(h)) {
2941                         /*
2942                          * We use pfn_offset to avoid touching the pageframes
2943                          * of this compound page.
2944                          */
2945                         goto same_page;
2946                 }
2947         }
2948         spin_unlock(&mm->page_table_lock);
2949         *length = remainder;
2950         *position = vaddr;
2951
2952         return i ? i : -EFAULT;
2953 }
2954
2955 void hugetlb_change_protection(struct vm_area_struct *vma,
2956                 unsigned long address, unsigned long end, pgprot_t newprot)
2957 {
2958         struct mm_struct *mm = vma->vm_mm;
2959         unsigned long start = address;
2960         pte_t *ptep;
2961         pte_t pte;
2962         struct hstate *h = hstate_vma(vma);
2963
2964         BUG_ON(address >= end);
2965         flush_cache_range(vma, address, end);
2966
2967         mutex_lock(&vma->vm_file->f_mapping->i_mmap_mutex);
2968         spin_lock(&mm->page_table_lock);
2969         for (; address < end; address += huge_page_size(h)) {
2970                 ptep = huge_pte_offset(mm, address);
2971                 if (!ptep)
2972                         continue;
2973                 if (huge_pmd_unshare(mm, &address, ptep))
2974                         continue;
2975                 if (!huge_pte_none(huge_ptep_get(ptep))) {
2976                         pte = huge_ptep_get_and_clear(mm, address, ptep);
2977                         pte = pte_mkhuge(pte_modify(pte, newprot));
2978                         set_huge_pte_at(mm, address, ptep, pte);
2979                 }
2980         }
2981         spin_unlock(&mm->page_table_lock);
2982         mutex_unlock(&vma->vm_file->f_mapping->i_mmap_mutex);
2983
2984         flush_tlb_range(vma, start, end);
2985 }
2986
2987 int hugetlb_reserve_pages(struct inode *inode,
2988                                         long from, long to,
2989                                         struct vm_area_struct *vma,
2990                                         vm_flags_t vm_flags)
2991 {
2992         long ret, chg;
2993         struct hstate *h = hstate_inode(inode);
2994         struct hugepage_subpool *spool = subpool_inode(inode);
2995
2996         /*
2997          * Only apply hugepage reservation if asked. At fault time, an
2998          * attempt will be made for VM_NORESERVE to allocate a page
2999          * without using reserves
3000          */
3001         if (vm_flags & VM_NORESERVE)
3002                 return 0;
3003
3004         /*
3005          * Shared mappings base their reservation on the number of pages that
3006          * are already allocated on behalf of the file. Private mappings need
3007          * to reserve the full area even if read-only as mprotect() may be
3008          * called to make the mapping read-write. Assume !vma is a shm mapping
3009          */
3010         if (!vma || vma->vm_flags & VM_MAYSHARE)
3011                 chg = region_chg(&inode->i_mapping->private_list, from, to);
3012         else {
3013                 struct resv_map *resv_map = resv_map_alloc();
3014                 if (!resv_map)
3015                         return -ENOMEM;
3016
3017                 chg = to - from;
3018
3019                 set_vma_resv_map(vma, resv_map);
3020                 set_vma_resv_flags(vma, HPAGE_RESV_OWNER);
3021         }
3022
3023         if (chg < 0) {
3024                 ret = chg;
3025                 goto out_err;
3026         }
3027
3028         /* There must be enough pages in the subpool for the mapping */
3029         if (hugepage_subpool_get_pages(spool, chg)) {
3030                 ret = -ENOSPC;
3031                 goto out_err;
3032         }
3033
3034         /*
3035          * Check enough hugepages are available for the reservation.
3036          * Hand the pages back to the subpool if there are not
3037          */
3038         ret = hugetlb_acct_memory(h, chg);
3039         if (ret < 0) {
3040                 hugepage_subpool_put_pages(spool, chg);
3041                 goto out_err;
3042         }
3043
3044         /*
3045          * Account for the reservations made. Shared mappings record regions
3046          * that have reservations as they are shared by multiple VMAs.
3047          * When the last VMA disappears, the region map says how much
3048          * the reservation was and the page cache tells how much of
3049          * the reservation was consumed. Private mappings are per-VMA and
3050          * only the consumed reservations are tracked. When the VMA
3051          * disappears, the original reservation is the VMA size and the
3052          * consumed reservations are stored in the map. Hence, nothing
3053          * else has to be done for private mappings here
3054          */
3055         if (!vma || vma->vm_flags & VM_MAYSHARE)
3056                 region_add(&inode->i_mapping->private_list, from, to);
3057         return 0;
3058 out_err:
3059         if (vma)
3060                 resv_map_put(vma);
3061         return ret;
3062 }
3063
3064 void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed)
3065 {
3066         struct hstate *h = hstate_inode(inode);
3067         long chg = region_truncate(&inode->i_mapping->private_list, offset);
3068         struct hugepage_subpool *spool = subpool_inode(inode);
3069
3070         spin_lock(&inode->i_lock);
3071         inode->i_blocks -= (blocks_per_huge_page(h) * freed);
3072         spin_unlock(&inode->i_lock);
3073
3074         hugepage_subpool_put_pages(spool, (chg - freed));
3075         hugetlb_acct_memory(h, -(chg - freed));
3076 }
3077
3078 #ifdef CONFIG_MEMORY_FAILURE
3079
3080 /* Should be called in hugetlb_lock */
3081 static int is_hugepage_on_freelist(struct page *hpage)
3082 {
3083         struct page *page;
3084         struct page *tmp;
3085         struct hstate *h = page_hstate(hpage);
3086         int nid = page_to_nid(hpage);
3087
3088         list_for_each_entry_safe(page, tmp, &h->hugepage_freelists[nid], lru)
3089                 if (page == hpage)
3090                         return 1;
3091         return 0;
3092 }
3093
3094 /*
3095  * This function is called from memory failure code.
3096  * Assume the caller holds page lock of the head page.
3097  */
3098 int dequeue_hwpoisoned_huge_page(struct page *hpage)
3099 {
3100         struct hstate *h = page_hstate(hpage);
3101         int nid = page_to_nid(hpage);
3102         int ret = -EBUSY;
3103
3104         spin_lock(&hugetlb_lock);
3105         if (is_hugepage_on_freelist(hpage)) {
3106                 list_del(&hpage->lru);
3107                 set_page_refcounted(hpage);
3108                 h->free_huge_pages--;
3109                 h->free_huge_pages_node[nid]--;
3110                 ret = 0;
3111         }
3112         spin_unlock(&hugetlb_lock);
3113         return ret;
3114 }
3115 #endif