2 * Generic hugetlb support.
3 * (C) Nadia Yvette Chambers, April 2004
5 #include <linux/list.h>
6 #include <linux/init.h>
7 #include <linux/module.h>
9 #include <linux/seq_file.h>
10 #include <linux/sysctl.h>
11 #include <linux/highmem.h>
12 #include <linux/mmu_notifier.h>
13 #include <linux/nodemask.h>
14 #include <linux/pagemap.h>
15 #include <linux/mempolicy.h>
16 #include <linux/compiler.h>
17 #include <linux/cpuset.h>
18 #include <linux/mutex.h>
19 #include <linux/bootmem.h>
20 #include <linux/sysfs.h>
21 #include <linux/slab.h>
22 #include <linux/rmap.h>
23 #include <linux/swap.h>
24 #include <linux/swapops.h>
25 #include <linux/page-isolation.h>
26 #include <linux/jhash.h>
29 #include <asm/pgtable.h>
33 #include <linux/hugetlb.h>
34 #include <linux/hugetlb_cgroup.h>
35 #include <linux/node.h>
38 int hugepages_treat_as_movable;
40 int hugetlb_max_hstate __read_mostly;
41 unsigned int default_hstate_idx;
42 struct hstate hstates[HUGE_MAX_HSTATE];
44 * Minimum page order among possible hugepage sizes, set to a proper value
47 static unsigned int minimum_order __read_mostly = UINT_MAX;
49 __initdata LIST_HEAD(huge_boot_pages);
51 /* for command line parsing */
52 static struct hstate * __initdata parsed_hstate;
53 static unsigned long __initdata default_hstate_max_huge_pages;
54 static unsigned long __initdata default_hstate_size;
57 * Protects updates to hugepage_freelists, hugepage_activelist, nr_huge_pages,
58 * free_huge_pages, and surplus_huge_pages.
60 DEFINE_SPINLOCK(hugetlb_lock);
63 * Serializes faults on the same logical page. This is used to
64 * prevent spurious OOMs when the hugepage pool is fully utilized.
66 static int num_fault_mutexes;
67 static struct mutex *htlb_fault_mutex_table ____cacheline_aligned_in_smp;
69 /* Forward declaration */
70 static int hugetlb_acct_memory(struct hstate *h, long delta);
72 static inline void unlock_or_release_subpool(struct hugepage_subpool *spool)
74 bool free = (spool->count == 0) && (spool->used_hpages == 0);
76 spin_unlock(&spool->lock);
78 /* If no pages are used, and no other handles to the subpool
79 * remain, give up any reservations mased on minimum size and
82 if (spool->min_hpages != -1)
83 hugetlb_acct_memory(spool->hstate,
89 struct hugepage_subpool *hugepage_new_subpool(struct hstate *h, long max_hpages,
92 struct hugepage_subpool *spool;
94 spool = kzalloc(sizeof(*spool), GFP_KERNEL);
98 spin_lock_init(&spool->lock);
100 spool->max_hpages = max_hpages;
102 spool->min_hpages = min_hpages;
104 if (min_hpages != -1 && hugetlb_acct_memory(h, min_hpages)) {
108 spool->rsv_hpages = min_hpages;
113 void hugepage_put_subpool(struct hugepage_subpool *spool)
115 spin_lock(&spool->lock);
116 BUG_ON(!spool->count);
118 unlock_or_release_subpool(spool);
122 * Subpool accounting for allocating and reserving pages.
123 * Return -ENOMEM if there are not enough resources to satisfy the
124 * the request. Otherwise, return the number of pages by which the
125 * global pools must be adjusted (upward). The returned value may
126 * only be different than the passed value (delta) in the case where
127 * a subpool minimum size must be manitained.
129 static long hugepage_subpool_get_pages(struct hugepage_subpool *spool,
137 spin_lock(&spool->lock);
139 if (spool->max_hpages != -1) { /* maximum size accounting */
140 if ((spool->used_hpages + delta) <= spool->max_hpages)
141 spool->used_hpages += delta;
148 if (spool->min_hpages != -1) { /* minimum size accounting */
149 if (delta > spool->rsv_hpages) {
151 * Asking for more reserves than those already taken on
152 * behalf of subpool. Return difference.
154 ret = delta - spool->rsv_hpages;
155 spool->rsv_hpages = 0;
157 ret = 0; /* reserves already accounted for */
158 spool->rsv_hpages -= delta;
163 spin_unlock(&spool->lock);
168 * Subpool accounting for freeing and unreserving pages.
169 * Return the number of global page reservations that must be dropped.
170 * The return value may only be different than the passed value (delta)
171 * in the case where a subpool minimum size must be maintained.
173 static long hugepage_subpool_put_pages(struct hugepage_subpool *spool,
181 spin_lock(&spool->lock);
183 if (spool->max_hpages != -1) /* maximum size accounting */
184 spool->used_hpages -= delta;
186 if (spool->min_hpages != -1) { /* minimum size accounting */
187 if (spool->rsv_hpages + delta <= spool->min_hpages)
190 ret = spool->rsv_hpages + delta - spool->min_hpages;
192 spool->rsv_hpages += delta;
193 if (spool->rsv_hpages > spool->min_hpages)
194 spool->rsv_hpages = spool->min_hpages;
198 * If hugetlbfs_put_super couldn't free spool due to an outstanding
199 * quota reference, free it now.
201 unlock_or_release_subpool(spool);
206 static inline struct hugepage_subpool *subpool_inode(struct inode *inode)
208 return HUGETLBFS_SB(inode->i_sb)->spool;
211 static inline struct hugepage_subpool *subpool_vma(struct vm_area_struct *vma)
213 return subpool_inode(file_inode(vma->vm_file));
217 * Region tracking -- allows tracking of reservations and instantiated pages
218 * across the pages in a mapping.
220 * The region data structures are embedded into a resv_map and protected
221 * by a resv_map's lock. The set of regions within the resv_map represent
222 * reservations for huge pages, or huge pages that have already been
223 * instantiated within the map. The from and to elements are huge page
224 * indicies into the associated mapping. from indicates the starting index
225 * of the region. to represents the first index past the end of the region.
227 * For example, a file region structure with from == 0 and to == 4 represents
228 * four huge pages in a mapping. It is important to note that the to element
229 * represents the first element past the end of the region. This is used in
230 * arithmetic as 4(to) - 0(from) = 4 huge pages in the region.
232 * Interval notation of the form [from, to) will be used to indicate that
233 * the endpoint from is inclusive and to is exclusive.
236 struct list_head link;
242 * Add the huge page range represented by [f, t) to the reserve
243 * map. Existing regions will be expanded to accommodate the
244 * specified range. We know only existing regions need to be
245 * expanded, because region_add is only called after region_chg
246 * with the same range. If a new file_region structure must
247 * be allocated, it is done in region_chg.
249 static long region_add(struct resv_map *resv, long f, long t)
251 struct list_head *head = &resv->regions;
252 struct file_region *rg, *nrg, *trg;
254 spin_lock(&resv->lock);
255 /* Locate the region we are either in or before. */
256 list_for_each_entry(rg, head, link)
260 /* Round our left edge to the current segment if it encloses us. */
264 /* Check for and consume any regions we now overlap with. */
266 list_for_each_entry_safe(rg, trg, rg->link.prev, link) {
267 if (&rg->link == head)
272 /* If this area reaches higher then extend our area to
273 * include it completely. If this is not the first area
274 * which we intend to reuse, free it. */
284 spin_unlock(&resv->lock);
289 * Examine the existing reserve map and determine how many
290 * huge pages in the specified range [f, t) are NOT currently
291 * represented. This routine is called before a subsequent
292 * call to region_add that will actually modify the reserve
293 * map to add the specified range [f, t). region_chg does
294 * not change the number of huge pages represented by the
295 * map. However, if the existing regions in the map can not
296 * be expanded to represent the new range, a new file_region
297 * structure is added to the map as a placeholder. This is
298 * so that the subsequent region_add call will have all the
299 * regions it needs and will not fail.
301 * Returns the number of huge pages that need to be added
302 * to the existing reservation map for the range [f, t).
303 * This number is greater or equal to zero. -ENOMEM is
304 * returned if a new file_region structure is needed and can
307 static long region_chg(struct resv_map *resv, long f, long t)
309 struct list_head *head = &resv->regions;
310 struct file_region *rg, *nrg = NULL;
314 spin_lock(&resv->lock);
315 /* Locate the region we are before or in. */
316 list_for_each_entry(rg, head, link)
320 /* If we are below the current region then a new region is required.
321 * Subtle, allocate a new region at the position but make it zero
322 * size such that we can guarantee to record the reservation. */
323 if (&rg->link == head || t < rg->from) {
325 spin_unlock(&resv->lock);
326 nrg = kmalloc(sizeof(*nrg), GFP_KERNEL);
332 INIT_LIST_HEAD(&nrg->link);
336 list_add(&nrg->link, rg->link.prev);
341 /* Round our left edge to the current segment if it encloses us. */
346 /* Check for and consume any regions we now overlap with. */
347 list_for_each_entry(rg, rg->link.prev, link) {
348 if (&rg->link == head)
353 /* We overlap with this area, if it extends further than
354 * us then we must extend ourselves. Account for its
355 * existing reservation. */
360 chg -= rg->to - rg->from;
364 spin_unlock(&resv->lock);
365 /* We already know we raced and no longer need the new region */
369 spin_unlock(&resv->lock);
374 * Truncate the reserve map at index 'end'. Modify/truncate any
375 * region which contains end. Delete any regions past end.
376 * Return the number of huge pages removed from the map.
378 static long region_truncate(struct resv_map *resv, long end)
380 struct list_head *head = &resv->regions;
381 struct file_region *rg, *trg;
384 spin_lock(&resv->lock);
385 /* Locate the region we are either in or before. */
386 list_for_each_entry(rg, head, link)
389 if (&rg->link == head)
392 /* If we are in the middle of a region then adjust it. */
393 if (end > rg->from) {
396 rg = list_entry(rg->link.next, typeof(*rg), link);
399 /* Drop any remaining regions. */
400 list_for_each_entry_safe(rg, trg, rg->link.prev, link) {
401 if (&rg->link == head)
403 chg += rg->to - rg->from;
409 spin_unlock(&resv->lock);
414 * Count and return the number of huge pages in the reserve map
415 * that intersect with the range [f, t).
417 static long region_count(struct resv_map *resv, long f, long t)
419 struct list_head *head = &resv->regions;
420 struct file_region *rg;
423 spin_lock(&resv->lock);
424 /* Locate each segment we overlap with, and count that overlap. */
425 list_for_each_entry(rg, head, link) {
434 seg_from = max(rg->from, f);
435 seg_to = min(rg->to, t);
437 chg += seg_to - seg_from;
439 spin_unlock(&resv->lock);
445 * Convert the address within this vma to the page offset within
446 * the mapping, in pagecache page units; huge pages here.
448 static pgoff_t vma_hugecache_offset(struct hstate *h,
449 struct vm_area_struct *vma, unsigned long address)
451 return ((address - vma->vm_start) >> huge_page_shift(h)) +
452 (vma->vm_pgoff >> huge_page_order(h));
455 pgoff_t linear_hugepage_index(struct vm_area_struct *vma,
456 unsigned long address)
458 return vma_hugecache_offset(hstate_vma(vma), vma, address);
462 * Return the size of the pages allocated when backing a VMA. In the majority
463 * cases this will be same size as used by the page table entries.
465 unsigned long vma_kernel_pagesize(struct vm_area_struct *vma)
467 struct hstate *hstate;
469 if (!is_vm_hugetlb_page(vma))
472 hstate = hstate_vma(vma);
474 return 1UL << huge_page_shift(hstate);
476 EXPORT_SYMBOL_GPL(vma_kernel_pagesize);
479 * Return the page size being used by the MMU to back a VMA. In the majority
480 * of cases, the page size used by the kernel matches the MMU size. On
481 * architectures where it differs, an architecture-specific version of this
482 * function is required.
484 #ifndef vma_mmu_pagesize
485 unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
487 return vma_kernel_pagesize(vma);
492 * Flags for MAP_PRIVATE reservations. These are stored in the bottom
493 * bits of the reservation map pointer, which are always clear due to
496 #define HPAGE_RESV_OWNER (1UL << 0)
497 #define HPAGE_RESV_UNMAPPED (1UL << 1)
498 #define HPAGE_RESV_MASK (HPAGE_RESV_OWNER | HPAGE_RESV_UNMAPPED)
501 * These helpers are used to track how many pages are reserved for
502 * faults in a MAP_PRIVATE mapping. Only the process that called mmap()
503 * is guaranteed to have their future faults succeed.
505 * With the exception of reset_vma_resv_huge_pages() which is called at fork(),
506 * the reserve counters are updated with the hugetlb_lock held. It is safe
507 * to reset the VMA at fork() time as it is not in use yet and there is no
508 * chance of the global counters getting corrupted as a result of the values.
510 * The private mapping reservation is represented in a subtly different
511 * manner to a shared mapping. A shared mapping has a region map associated
512 * with the underlying file, this region map represents the backing file
513 * pages which have ever had a reservation assigned which this persists even
514 * after the page is instantiated. A private mapping has a region map
515 * associated with the original mmap which is attached to all VMAs which
516 * reference it, this region map represents those offsets which have consumed
517 * reservation ie. where pages have been instantiated.
519 static unsigned long get_vma_private_data(struct vm_area_struct *vma)
521 return (unsigned long)vma->vm_private_data;
524 static void set_vma_private_data(struct vm_area_struct *vma,
527 vma->vm_private_data = (void *)value;
530 struct resv_map *resv_map_alloc(void)
532 struct resv_map *resv_map = kmalloc(sizeof(*resv_map), GFP_KERNEL);
536 kref_init(&resv_map->refs);
537 spin_lock_init(&resv_map->lock);
538 INIT_LIST_HEAD(&resv_map->regions);
543 void resv_map_release(struct kref *ref)
545 struct resv_map *resv_map = container_of(ref, struct resv_map, refs);
547 /* Clear out any active regions before we release the map. */
548 region_truncate(resv_map, 0);
552 static inline struct resv_map *inode_resv_map(struct inode *inode)
554 return inode->i_mapping->private_data;
557 static struct resv_map *vma_resv_map(struct vm_area_struct *vma)
559 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
560 if (vma->vm_flags & VM_MAYSHARE) {
561 struct address_space *mapping = vma->vm_file->f_mapping;
562 struct inode *inode = mapping->host;
564 return inode_resv_map(inode);
567 return (struct resv_map *)(get_vma_private_data(vma) &
572 static void set_vma_resv_map(struct vm_area_struct *vma, struct resv_map *map)
574 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
575 VM_BUG_ON_VMA(vma->vm_flags & VM_MAYSHARE, vma);
577 set_vma_private_data(vma, (get_vma_private_data(vma) &
578 HPAGE_RESV_MASK) | (unsigned long)map);
581 static void set_vma_resv_flags(struct vm_area_struct *vma, unsigned long flags)
583 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
584 VM_BUG_ON_VMA(vma->vm_flags & VM_MAYSHARE, vma);
586 set_vma_private_data(vma, get_vma_private_data(vma) | flags);
589 static int is_vma_resv_set(struct vm_area_struct *vma, unsigned long flag)
591 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
593 return (get_vma_private_data(vma) & flag) != 0;
596 /* Reset counters to 0 and clear all HPAGE_RESV_* flags */
597 void reset_vma_resv_huge_pages(struct vm_area_struct *vma)
599 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
600 if (!(vma->vm_flags & VM_MAYSHARE))
601 vma->vm_private_data = (void *)0;
604 /* Returns true if the VMA has associated reserve pages */
605 static int vma_has_reserves(struct vm_area_struct *vma, long chg)
607 if (vma->vm_flags & VM_NORESERVE) {
609 * This address is already reserved by other process(chg == 0),
610 * so, we should decrement reserved count. Without decrementing,
611 * reserve count remains after releasing inode, because this
612 * allocated page will go into page cache and is regarded as
613 * coming from reserved pool in releasing step. Currently, we
614 * don't have any other solution to deal with this situation
615 * properly, so add work-around here.
617 if (vma->vm_flags & VM_MAYSHARE && chg == 0)
623 /* Shared mappings always use reserves */
624 if (vma->vm_flags & VM_MAYSHARE)
628 * Only the process that called mmap() has reserves for
631 if (is_vma_resv_set(vma, HPAGE_RESV_OWNER))
637 static void enqueue_huge_page(struct hstate *h, struct page *page)
639 int nid = page_to_nid(page);
640 list_move(&page->lru, &h->hugepage_freelists[nid]);
641 h->free_huge_pages++;
642 h->free_huge_pages_node[nid]++;
645 static struct page *dequeue_huge_page_node(struct hstate *h, int nid)
649 list_for_each_entry(page, &h->hugepage_freelists[nid], lru)
650 if (!is_migrate_isolate_page(page))
653 * if 'non-isolated free hugepage' not found on the list,
654 * the allocation fails.
656 if (&h->hugepage_freelists[nid] == &page->lru)
658 list_move(&page->lru, &h->hugepage_activelist);
659 set_page_refcounted(page);
660 h->free_huge_pages--;
661 h->free_huge_pages_node[nid]--;
665 /* Movability of hugepages depends on migration support. */
666 static inline gfp_t htlb_alloc_mask(struct hstate *h)
668 if (hugepages_treat_as_movable || hugepage_migration_supported(h))
669 return GFP_HIGHUSER_MOVABLE;
674 static struct page *dequeue_huge_page_vma(struct hstate *h,
675 struct vm_area_struct *vma,
676 unsigned long address, int avoid_reserve,
679 struct page *page = NULL;
680 struct mempolicy *mpol;
681 nodemask_t *nodemask;
682 struct zonelist *zonelist;
685 unsigned int cpuset_mems_cookie;
688 * A child process with MAP_PRIVATE mappings created by their parent
689 * have no page reserves. This check ensures that reservations are
690 * not "stolen". The child may still get SIGKILLed
692 if (!vma_has_reserves(vma, chg) &&
693 h->free_huge_pages - h->resv_huge_pages == 0)
696 /* If reserves cannot be used, ensure enough pages are in the pool */
697 if (avoid_reserve && h->free_huge_pages - h->resv_huge_pages == 0)
701 cpuset_mems_cookie = read_mems_allowed_begin();
702 zonelist = huge_zonelist(vma, address,
703 htlb_alloc_mask(h), &mpol, &nodemask);
705 for_each_zone_zonelist_nodemask(zone, z, zonelist,
706 MAX_NR_ZONES - 1, nodemask) {
707 if (cpuset_zone_allowed(zone, htlb_alloc_mask(h))) {
708 page = dequeue_huge_page_node(h, zone_to_nid(zone));
712 if (!vma_has_reserves(vma, chg))
715 SetPagePrivate(page);
716 h->resv_huge_pages--;
723 if (unlikely(!page && read_mems_allowed_retry(cpuset_mems_cookie)))
732 * common helper functions for hstate_next_node_to_{alloc|free}.
733 * We may have allocated or freed a huge page based on a different
734 * nodes_allowed previously, so h->next_node_to_{alloc|free} might
735 * be outside of *nodes_allowed. Ensure that we use an allowed
736 * node for alloc or free.
738 static int next_node_allowed(int nid, nodemask_t *nodes_allowed)
740 nid = next_node(nid, *nodes_allowed);
741 if (nid == MAX_NUMNODES)
742 nid = first_node(*nodes_allowed);
743 VM_BUG_ON(nid >= MAX_NUMNODES);
748 static int get_valid_node_allowed(int nid, nodemask_t *nodes_allowed)
750 if (!node_isset(nid, *nodes_allowed))
751 nid = next_node_allowed(nid, nodes_allowed);
756 * returns the previously saved node ["this node"] from which to
757 * allocate a persistent huge page for the pool and advance the
758 * next node from which to allocate, handling wrap at end of node
761 static int hstate_next_node_to_alloc(struct hstate *h,
762 nodemask_t *nodes_allowed)
766 VM_BUG_ON(!nodes_allowed);
768 nid = get_valid_node_allowed(h->next_nid_to_alloc, nodes_allowed);
769 h->next_nid_to_alloc = next_node_allowed(nid, nodes_allowed);
775 * helper for free_pool_huge_page() - return the previously saved
776 * node ["this node"] from which to free a huge page. Advance the
777 * next node id whether or not we find a free huge page to free so
778 * that the next attempt to free addresses the next node.
780 static int hstate_next_node_to_free(struct hstate *h, nodemask_t *nodes_allowed)
784 VM_BUG_ON(!nodes_allowed);
786 nid = get_valid_node_allowed(h->next_nid_to_free, nodes_allowed);
787 h->next_nid_to_free = next_node_allowed(nid, nodes_allowed);
792 #define for_each_node_mask_to_alloc(hs, nr_nodes, node, mask) \
793 for (nr_nodes = nodes_weight(*mask); \
795 ((node = hstate_next_node_to_alloc(hs, mask)) || 1); \
798 #define for_each_node_mask_to_free(hs, nr_nodes, node, mask) \
799 for (nr_nodes = nodes_weight(*mask); \
801 ((node = hstate_next_node_to_free(hs, mask)) || 1); \
804 #if defined(CONFIG_CMA) && defined(CONFIG_X86_64)
805 static void destroy_compound_gigantic_page(struct page *page,
809 int nr_pages = 1 << order;
810 struct page *p = page + 1;
812 for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) {
814 set_page_refcounted(p);
815 p->first_page = NULL;
818 set_compound_order(page, 0);
819 __ClearPageHead(page);
822 static void free_gigantic_page(struct page *page, unsigned order)
824 free_contig_range(page_to_pfn(page), 1 << order);
827 static int __alloc_gigantic_page(unsigned long start_pfn,
828 unsigned long nr_pages)
830 unsigned long end_pfn = start_pfn + nr_pages;
831 return alloc_contig_range(start_pfn, end_pfn, MIGRATE_MOVABLE);
834 static bool pfn_range_valid_gigantic(unsigned long start_pfn,
835 unsigned long nr_pages)
837 unsigned long i, end_pfn = start_pfn + nr_pages;
840 for (i = start_pfn; i < end_pfn; i++) {
844 page = pfn_to_page(i);
846 if (PageReserved(page))
849 if (page_count(page) > 0)
859 static bool zone_spans_last_pfn(const struct zone *zone,
860 unsigned long start_pfn, unsigned long nr_pages)
862 unsigned long last_pfn = start_pfn + nr_pages - 1;
863 return zone_spans_pfn(zone, last_pfn);
866 static struct page *alloc_gigantic_page(int nid, unsigned order)
868 unsigned long nr_pages = 1 << order;
869 unsigned long ret, pfn, flags;
872 z = NODE_DATA(nid)->node_zones;
873 for (; z - NODE_DATA(nid)->node_zones < MAX_NR_ZONES; z++) {
874 spin_lock_irqsave(&z->lock, flags);
876 pfn = ALIGN(z->zone_start_pfn, nr_pages);
877 while (zone_spans_last_pfn(z, pfn, nr_pages)) {
878 if (pfn_range_valid_gigantic(pfn, nr_pages)) {
880 * We release the zone lock here because
881 * alloc_contig_range() will also lock the zone
882 * at some point. If there's an allocation
883 * spinning on this lock, it may win the race
884 * and cause alloc_contig_range() to fail...
886 spin_unlock_irqrestore(&z->lock, flags);
887 ret = __alloc_gigantic_page(pfn, nr_pages);
889 return pfn_to_page(pfn);
890 spin_lock_irqsave(&z->lock, flags);
895 spin_unlock_irqrestore(&z->lock, flags);
901 static void prep_new_huge_page(struct hstate *h, struct page *page, int nid);
902 static void prep_compound_gigantic_page(struct page *page, unsigned long order);
904 static struct page *alloc_fresh_gigantic_page_node(struct hstate *h, int nid)
908 page = alloc_gigantic_page(nid, huge_page_order(h));
910 prep_compound_gigantic_page(page, huge_page_order(h));
911 prep_new_huge_page(h, page, nid);
917 static int alloc_fresh_gigantic_page(struct hstate *h,
918 nodemask_t *nodes_allowed)
920 struct page *page = NULL;
923 for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) {
924 page = alloc_fresh_gigantic_page_node(h, node);
932 static inline bool gigantic_page_supported(void) { return true; }
934 static inline bool gigantic_page_supported(void) { return false; }
935 static inline void free_gigantic_page(struct page *page, unsigned order) { }
936 static inline void destroy_compound_gigantic_page(struct page *page,
937 unsigned long order) { }
938 static inline int alloc_fresh_gigantic_page(struct hstate *h,
939 nodemask_t *nodes_allowed) { return 0; }
942 static void update_and_free_page(struct hstate *h, struct page *page)
946 if (hstate_is_gigantic(h) && !gigantic_page_supported())
950 h->nr_huge_pages_node[page_to_nid(page)]--;
951 for (i = 0; i < pages_per_huge_page(h); i++) {
952 page[i].flags &= ~(1 << PG_locked | 1 << PG_error |
953 1 << PG_referenced | 1 << PG_dirty |
954 1 << PG_active | 1 << PG_private |
957 VM_BUG_ON_PAGE(hugetlb_cgroup_from_page(page), page);
958 set_compound_page_dtor(page, NULL);
959 set_page_refcounted(page);
960 if (hstate_is_gigantic(h)) {
961 destroy_compound_gigantic_page(page, huge_page_order(h));
962 free_gigantic_page(page, huge_page_order(h));
964 arch_release_hugepage(page);
965 __free_pages(page, huge_page_order(h));
969 struct hstate *size_to_hstate(unsigned long size)
974 if (huge_page_size(h) == size)
981 * Test to determine whether the hugepage is "active/in-use" (i.e. being linked
982 * to hstate->hugepage_activelist.)
984 * This function can be called for tail pages, but never returns true for them.
986 bool page_huge_active(struct page *page)
988 VM_BUG_ON_PAGE(!PageHuge(page), page);
989 return PageHead(page) && PagePrivate(&page[1]);
992 /* never called for tail page */
993 static void set_page_huge_active(struct page *page)
995 VM_BUG_ON_PAGE(!PageHeadHuge(page), page);
996 SetPagePrivate(&page[1]);
999 static void clear_page_huge_active(struct page *page)
1001 VM_BUG_ON_PAGE(!PageHeadHuge(page), page);
1002 ClearPagePrivate(&page[1]);
1005 void free_huge_page(struct page *page)
1008 * Can't pass hstate in here because it is called from the
1009 * compound page destructor.
1011 struct hstate *h = page_hstate(page);
1012 int nid = page_to_nid(page);
1013 struct hugepage_subpool *spool =
1014 (struct hugepage_subpool *)page_private(page);
1015 bool restore_reserve;
1017 set_page_private(page, 0);
1018 page->mapping = NULL;
1019 BUG_ON(page_count(page));
1020 BUG_ON(page_mapcount(page));
1021 restore_reserve = PagePrivate(page);
1022 ClearPagePrivate(page);
1025 * A return code of zero implies that the subpool will be under its
1026 * minimum size if the reservation is not restored after page is free.
1027 * Therefore, force restore_reserve operation.
1029 if (hugepage_subpool_put_pages(spool, 1) == 0)
1030 restore_reserve = true;
1032 spin_lock(&hugetlb_lock);
1033 clear_page_huge_active(page);
1034 hugetlb_cgroup_uncharge_page(hstate_index(h),
1035 pages_per_huge_page(h), page);
1036 if (restore_reserve)
1037 h->resv_huge_pages++;
1039 if (h->surplus_huge_pages_node[nid]) {
1040 /* remove the page from active list */
1041 list_del(&page->lru);
1042 update_and_free_page(h, page);
1043 h->surplus_huge_pages--;
1044 h->surplus_huge_pages_node[nid]--;
1046 arch_clear_hugepage_flags(page);
1047 enqueue_huge_page(h, page);
1049 spin_unlock(&hugetlb_lock);
1052 static void prep_new_huge_page(struct hstate *h, struct page *page, int nid)
1054 INIT_LIST_HEAD(&page->lru);
1055 set_compound_page_dtor(page, free_huge_page);
1056 spin_lock(&hugetlb_lock);
1057 set_hugetlb_cgroup(page, NULL);
1059 h->nr_huge_pages_node[nid]++;
1060 spin_unlock(&hugetlb_lock);
1061 put_page(page); /* free it into the hugepage allocator */
1064 static void prep_compound_gigantic_page(struct page *page, unsigned long order)
1067 int nr_pages = 1 << order;
1068 struct page *p = page + 1;
1070 /* we rely on prep_new_huge_page to set the destructor */
1071 set_compound_order(page, order);
1072 __SetPageHead(page);
1073 __ClearPageReserved(page);
1074 for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) {
1076 * For gigantic hugepages allocated through bootmem at
1077 * boot, it's safer to be consistent with the not-gigantic
1078 * hugepages and clear the PG_reserved bit from all tail pages
1079 * too. Otherwse drivers using get_user_pages() to access tail
1080 * pages may get the reference counting wrong if they see
1081 * PG_reserved set on a tail page (despite the head page not
1082 * having PG_reserved set). Enforcing this consistency between
1083 * head and tail pages allows drivers to optimize away a check
1084 * on the head page when they need know if put_page() is needed
1085 * after get_user_pages().
1087 __ClearPageReserved(p);
1088 set_page_count(p, 0);
1089 p->first_page = page;
1090 /* Make sure p->first_page is always valid for PageTail() */
1097 * PageHuge() only returns true for hugetlbfs pages, but not for normal or
1098 * transparent huge pages. See the PageTransHuge() documentation for more
1101 int PageHuge(struct page *page)
1103 if (!PageCompound(page))
1106 page = compound_head(page);
1107 return get_compound_page_dtor(page) == free_huge_page;
1109 EXPORT_SYMBOL_GPL(PageHuge);
1112 * PageHeadHuge() only returns true for hugetlbfs head page, but not for
1113 * normal or transparent huge pages.
1115 int PageHeadHuge(struct page *page_head)
1117 if (!PageHead(page_head))
1120 return get_compound_page_dtor(page_head) == free_huge_page;
1123 pgoff_t __basepage_index(struct page *page)
1125 struct page *page_head = compound_head(page);
1126 pgoff_t index = page_index(page_head);
1127 unsigned long compound_idx;
1129 if (!PageHuge(page_head))
1130 return page_index(page);
1132 if (compound_order(page_head) >= MAX_ORDER)
1133 compound_idx = page_to_pfn(page) - page_to_pfn(page_head);
1135 compound_idx = page - page_head;
1137 return (index << compound_order(page_head)) + compound_idx;
1140 static struct page *alloc_fresh_huge_page_node(struct hstate *h, int nid)
1144 page = alloc_pages_exact_node(nid,
1145 htlb_alloc_mask(h)|__GFP_COMP|__GFP_THISNODE|
1146 __GFP_REPEAT|__GFP_NOWARN,
1147 huge_page_order(h));
1149 if (arch_prepare_hugepage(page)) {
1150 __free_pages(page, huge_page_order(h));
1153 prep_new_huge_page(h, page, nid);
1159 static int alloc_fresh_huge_page(struct hstate *h, nodemask_t *nodes_allowed)
1165 for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) {
1166 page = alloc_fresh_huge_page_node(h, node);
1174 count_vm_event(HTLB_BUDDY_PGALLOC);
1176 count_vm_event(HTLB_BUDDY_PGALLOC_FAIL);
1182 * Free huge page from pool from next node to free.
1183 * Attempt to keep persistent huge pages more or less
1184 * balanced over allowed nodes.
1185 * Called with hugetlb_lock locked.
1187 static int free_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed,
1193 for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) {
1195 * If we're returning unused surplus pages, only examine
1196 * nodes with surplus pages.
1198 if ((!acct_surplus || h->surplus_huge_pages_node[node]) &&
1199 !list_empty(&h->hugepage_freelists[node])) {
1201 list_entry(h->hugepage_freelists[node].next,
1203 list_del(&page->lru);
1204 h->free_huge_pages--;
1205 h->free_huge_pages_node[node]--;
1207 h->surplus_huge_pages--;
1208 h->surplus_huge_pages_node[node]--;
1210 update_and_free_page(h, page);
1220 * Dissolve a given free hugepage into free buddy pages. This function does
1221 * nothing for in-use (including surplus) hugepages.
1223 static void dissolve_free_huge_page(struct page *page)
1225 spin_lock(&hugetlb_lock);
1226 if (PageHuge(page) && !page_count(page)) {
1227 struct hstate *h = page_hstate(page);
1228 int nid = page_to_nid(page);
1229 list_del(&page->lru);
1230 h->free_huge_pages--;
1231 h->free_huge_pages_node[nid]--;
1232 update_and_free_page(h, page);
1234 spin_unlock(&hugetlb_lock);
1238 * Dissolve free hugepages in a given pfn range. Used by memory hotplug to
1239 * make specified memory blocks removable from the system.
1240 * Note that start_pfn should aligned with (minimum) hugepage size.
1242 void dissolve_free_huge_pages(unsigned long start_pfn, unsigned long end_pfn)
1246 if (!hugepages_supported())
1249 VM_BUG_ON(!IS_ALIGNED(start_pfn, 1 << minimum_order));
1250 for (pfn = start_pfn; pfn < end_pfn; pfn += 1 << minimum_order)
1251 dissolve_free_huge_page(pfn_to_page(pfn));
1254 static struct page *alloc_buddy_huge_page(struct hstate *h, int nid)
1259 if (hstate_is_gigantic(h))
1263 * Assume we will successfully allocate the surplus page to
1264 * prevent racing processes from causing the surplus to exceed
1267 * This however introduces a different race, where a process B
1268 * tries to grow the static hugepage pool while alloc_pages() is
1269 * called by process A. B will only examine the per-node
1270 * counters in determining if surplus huge pages can be
1271 * converted to normal huge pages in adjust_pool_surplus(). A
1272 * won't be able to increment the per-node counter, until the
1273 * lock is dropped by B, but B doesn't drop hugetlb_lock until
1274 * no more huge pages can be converted from surplus to normal
1275 * state (and doesn't try to convert again). Thus, we have a
1276 * case where a surplus huge page exists, the pool is grown, and
1277 * the surplus huge page still exists after, even though it
1278 * should just have been converted to a normal huge page. This
1279 * does not leak memory, though, as the hugepage will be freed
1280 * once it is out of use. It also does not allow the counters to
1281 * go out of whack in adjust_pool_surplus() as we don't modify
1282 * the node values until we've gotten the hugepage and only the
1283 * per-node value is checked there.
1285 spin_lock(&hugetlb_lock);
1286 if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages) {
1287 spin_unlock(&hugetlb_lock);
1291 h->surplus_huge_pages++;
1293 spin_unlock(&hugetlb_lock);
1295 if (nid == NUMA_NO_NODE)
1296 page = alloc_pages(htlb_alloc_mask(h)|__GFP_COMP|
1297 __GFP_REPEAT|__GFP_NOWARN,
1298 huge_page_order(h));
1300 page = alloc_pages_exact_node(nid,
1301 htlb_alloc_mask(h)|__GFP_COMP|__GFP_THISNODE|
1302 __GFP_REPEAT|__GFP_NOWARN, huge_page_order(h));
1304 if (page && arch_prepare_hugepage(page)) {
1305 __free_pages(page, huge_page_order(h));
1309 spin_lock(&hugetlb_lock);
1311 INIT_LIST_HEAD(&page->lru);
1312 r_nid = page_to_nid(page);
1313 set_compound_page_dtor(page, free_huge_page);
1314 set_hugetlb_cgroup(page, NULL);
1316 * We incremented the global counters already
1318 h->nr_huge_pages_node[r_nid]++;
1319 h->surplus_huge_pages_node[r_nid]++;
1320 __count_vm_event(HTLB_BUDDY_PGALLOC);
1323 h->surplus_huge_pages--;
1324 __count_vm_event(HTLB_BUDDY_PGALLOC_FAIL);
1326 spin_unlock(&hugetlb_lock);
1332 * This allocation function is useful in the context where vma is irrelevant.
1333 * E.g. soft-offlining uses this function because it only cares physical
1334 * address of error page.
1336 struct page *alloc_huge_page_node(struct hstate *h, int nid)
1338 struct page *page = NULL;
1340 spin_lock(&hugetlb_lock);
1341 if (h->free_huge_pages - h->resv_huge_pages > 0)
1342 page = dequeue_huge_page_node(h, nid);
1343 spin_unlock(&hugetlb_lock);
1346 page = alloc_buddy_huge_page(h, nid);
1352 * Increase the hugetlb pool such that it can accommodate a reservation
1355 static int gather_surplus_pages(struct hstate *h, int delta)
1357 struct list_head surplus_list;
1358 struct page *page, *tmp;
1360 int needed, allocated;
1361 bool alloc_ok = true;
1363 needed = (h->resv_huge_pages + delta) - h->free_huge_pages;
1365 h->resv_huge_pages += delta;
1370 INIT_LIST_HEAD(&surplus_list);
1374 spin_unlock(&hugetlb_lock);
1375 for (i = 0; i < needed; i++) {
1376 page = alloc_buddy_huge_page(h, NUMA_NO_NODE);
1381 list_add(&page->lru, &surplus_list);
1386 * After retaking hugetlb_lock, we need to recalculate 'needed'
1387 * because either resv_huge_pages or free_huge_pages may have changed.
1389 spin_lock(&hugetlb_lock);
1390 needed = (h->resv_huge_pages + delta) -
1391 (h->free_huge_pages + allocated);
1396 * We were not able to allocate enough pages to
1397 * satisfy the entire reservation so we free what
1398 * we've allocated so far.
1403 * The surplus_list now contains _at_least_ the number of extra pages
1404 * needed to accommodate the reservation. Add the appropriate number
1405 * of pages to the hugetlb pool and free the extras back to the buddy
1406 * allocator. Commit the entire reservation here to prevent another
1407 * process from stealing the pages as they are added to the pool but
1408 * before they are reserved.
1410 needed += allocated;
1411 h->resv_huge_pages += delta;
1414 /* Free the needed pages to the hugetlb pool */
1415 list_for_each_entry_safe(page, tmp, &surplus_list, lru) {
1419 * This page is now managed by the hugetlb allocator and has
1420 * no users -- drop the buddy allocator's reference.
1422 put_page_testzero(page);
1423 VM_BUG_ON_PAGE(page_count(page), page);
1424 enqueue_huge_page(h, page);
1427 spin_unlock(&hugetlb_lock);
1429 /* Free unnecessary surplus pages to the buddy allocator */
1430 list_for_each_entry_safe(page, tmp, &surplus_list, lru)
1432 spin_lock(&hugetlb_lock);
1438 * When releasing a hugetlb pool reservation, any surplus pages that were
1439 * allocated to satisfy the reservation must be explicitly freed if they were
1441 * Called with hugetlb_lock held.
1443 static void return_unused_surplus_pages(struct hstate *h,
1444 unsigned long unused_resv_pages)
1446 unsigned long nr_pages;
1448 /* Uncommit the reservation */
1449 h->resv_huge_pages -= unused_resv_pages;
1451 /* Cannot return gigantic pages currently */
1452 if (hstate_is_gigantic(h))
1455 nr_pages = min(unused_resv_pages, h->surplus_huge_pages);
1458 * We want to release as many surplus pages as possible, spread
1459 * evenly across all nodes with memory. Iterate across these nodes
1460 * until we can no longer free unreserved surplus pages. This occurs
1461 * when the nodes with surplus pages have no free pages.
1462 * free_pool_huge_page() will balance the the freed pages across the
1463 * on-line nodes with memory and will handle the hstate accounting.
1465 while (nr_pages--) {
1466 if (!free_pool_huge_page(h, &node_states[N_MEMORY], 1))
1468 cond_resched_lock(&hugetlb_lock);
1473 * Determine if the huge page at addr within the vma has an associated
1474 * reservation. Where it does not we will need to logically increase
1475 * reservation and actually increase subpool usage before an allocation
1476 * can occur. Where any new reservation would be required the
1477 * reservation change is prepared, but not committed. Once the page
1478 * has been allocated from the subpool and instantiated the change should
1479 * be committed via vma_commit_reservation. No action is required on
1482 static long vma_needs_reservation(struct hstate *h,
1483 struct vm_area_struct *vma, unsigned long addr)
1485 struct resv_map *resv;
1489 resv = vma_resv_map(vma);
1493 idx = vma_hugecache_offset(h, vma, addr);
1494 chg = region_chg(resv, idx, idx + 1);
1496 if (vma->vm_flags & VM_MAYSHARE)
1499 return chg < 0 ? chg : 0;
1501 static void vma_commit_reservation(struct hstate *h,
1502 struct vm_area_struct *vma, unsigned long addr)
1504 struct resv_map *resv;
1507 resv = vma_resv_map(vma);
1511 idx = vma_hugecache_offset(h, vma, addr);
1512 region_add(resv, idx, idx + 1);
1515 static struct page *alloc_huge_page(struct vm_area_struct *vma,
1516 unsigned long addr, int avoid_reserve)
1518 struct hugepage_subpool *spool = subpool_vma(vma);
1519 struct hstate *h = hstate_vma(vma);
1523 struct hugetlb_cgroup *h_cg;
1525 idx = hstate_index(h);
1527 * Processes that did not create the mapping will have no
1528 * reserves and will not have accounted against subpool
1529 * limit. Check that the subpool limit can be made before
1530 * satisfying the allocation MAP_NORESERVE mappings may also
1531 * need pages and subpool limit allocated allocated if no reserve
1534 chg = vma_needs_reservation(h, vma, addr);
1536 return ERR_PTR(-ENOMEM);
1537 if (chg || avoid_reserve)
1538 if (hugepage_subpool_get_pages(spool, 1) < 0)
1539 return ERR_PTR(-ENOSPC);
1541 ret = hugetlb_cgroup_charge_cgroup(idx, pages_per_huge_page(h), &h_cg);
1543 goto out_subpool_put;
1545 spin_lock(&hugetlb_lock);
1546 page = dequeue_huge_page_vma(h, vma, addr, avoid_reserve, chg);
1548 spin_unlock(&hugetlb_lock);
1549 page = alloc_buddy_huge_page(h, NUMA_NO_NODE);
1551 goto out_uncharge_cgroup;
1553 spin_lock(&hugetlb_lock);
1554 list_move(&page->lru, &h->hugepage_activelist);
1557 hugetlb_cgroup_commit_charge(idx, pages_per_huge_page(h), h_cg, page);
1558 spin_unlock(&hugetlb_lock);
1560 set_page_private(page, (unsigned long)spool);
1562 vma_commit_reservation(h, vma, addr);
1565 out_uncharge_cgroup:
1566 hugetlb_cgroup_uncharge_cgroup(idx, pages_per_huge_page(h), h_cg);
1568 if (chg || avoid_reserve)
1569 hugepage_subpool_put_pages(spool, 1);
1570 return ERR_PTR(-ENOSPC);
1574 * alloc_huge_page()'s wrapper which simply returns the page if allocation
1575 * succeeds, otherwise NULL. This function is called from new_vma_page(),
1576 * where no ERR_VALUE is expected to be returned.
1578 struct page *alloc_huge_page_noerr(struct vm_area_struct *vma,
1579 unsigned long addr, int avoid_reserve)
1581 struct page *page = alloc_huge_page(vma, addr, avoid_reserve);
1587 int __weak alloc_bootmem_huge_page(struct hstate *h)
1589 struct huge_bootmem_page *m;
1592 for_each_node_mask_to_alloc(h, nr_nodes, node, &node_states[N_MEMORY]) {
1595 addr = memblock_virt_alloc_try_nid_nopanic(
1596 huge_page_size(h), huge_page_size(h),
1597 0, BOOTMEM_ALLOC_ACCESSIBLE, node);
1600 * Use the beginning of the huge page to store the
1601 * huge_bootmem_page struct (until gather_bootmem
1602 * puts them into the mem_map).
1611 BUG_ON(!IS_ALIGNED(virt_to_phys(m), huge_page_size(h)));
1612 /* Put them into a private list first because mem_map is not up yet */
1613 list_add(&m->list, &huge_boot_pages);
1618 static void __init prep_compound_huge_page(struct page *page, int order)
1620 if (unlikely(order > (MAX_ORDER - 1)))
1621 prep_compound_gigantic_page(page, order);
1623 prep_compound_page(page, order);
1626 /* Put bootmem huge pages into the standard lists after mem_map is up */
1627 static void __init gather_bootmem_prealloc(void)
1629 struct huge_bootmem_page *m;
1631 list_for_each_entry(m, &huge_boot_pages, list) {
1632 struct hstate *h = m->hstate;
1635 #ifdef CONFIG_HIGHMEM
1636 page = pfn_to_page(m->phys >> PAGE_SHIFT);
1637 memblock_free_late(__pa(m),
1638 sizeof(struct huge_bootmem_page));
1640 page = virt_to_page(m);
1642 WARN_ON(page_count(page) != 1);
1643 prep_compound_huge_page(page, h->order);
1644 WARN_ON(PageReserved(page));
1645 prep_new_huge_page(h, page, page_to_nid(page));
1647 * If we had gigantic hugepages allocated at boot time, we need
1648 * to restore the 'stolen' pages to totalram_pages in order to
1649 * fix confusing memory reports from free(1) and another
1650 * side-effects, like CommitLimit going negative.
1652 if (hstate_is_gigantic(h))
1653 adjust_managed_page_count(page, 1 << h->order);
1657 static void __init hugetlb_hstate_alloc_pages(struct hstate *h)
1661 for (i = 0; i < h->max_huge_pages; ++i) {
1662 if (hstate_is_gigantic(h)) {
1663 if (!alloc_bootmem_huge_page(h))
1665 } else if (!alloc_fresh_huge_page(h,
1666 &node_states[N_MEMORY]))
1669 h->max_huge_pages = i;
1672 static void __init hugetlb_init_hstates(void)
1676 for_each_hstate(h) {
1677 if (minimum_order > huge_page_order(h))
1678 minimum_order = huge_page_order(h);
1680 /* oversize hugepages were init'ed in early boot */
1681 if (!hstate_is_gigantic(h))
1682 hugetlb_hstate_alloc_pages(h);
1684 VM_BUG_ON(minimum_order == UINT_MAX);
1687 static char * __init memfmt(char *buf, unsigned long n)
1689 if (n >= (1UL << 30))
1690 sprintf(buf, "%lu GB", n >> 30);
1691 else if (n >= (1UL << 20))
1692 sprintf(buf, "%lu MB", n >> 20);
1694 sprintf(buf, "%lu KB", n >> 10);
1698 static void __init report_hugepages(void)
1702 for_each_hstate(h) {
1704 pr_info("HugeTLB registered %s page size, pre-allocated %ld pages\n",
1705 memfmt(buf, huge_page_size(h)),
1706 h->free_huge_pages);
1710 #ifdef CONFIG_HIGHMEM
1711 static void try_to_free_low(struct hstate *h, unsigned long count,
1712 nodemask_t *nodes_allowed)
1716 if (hstate_is_gigantic(h))
1719 for_each_node_mask(i, *nodes_allowed) {
1720 struct page *page, *next;
1721 struct list_head *freel = &h->hugepage_freelists[i];
1722 list_for_each_entry_safe(page, next, freel, lru) {
1723 if (count >= h->nr_huge_pages)
1725 if (PageHighMem(page))
1727 list_del(&page->lru);
1728 update_and_free_page(h, page);
1729 h->free_huge_pages--;
1730 h->free_huge_pages_node[page_to_nid(page)]--;
1735 static inline void try_to_free_low(struct hstate *h, unsigned long count,
1736 nodemask_t *nodes_allowed)
1742 * Increment or decrement surplus_huge_pages. Keep node-specific counters
1743 * balanced by operating on them in a round-robin fashion.
1744 * Returns 1 if an adjustment was made.
1746 static int adjust_pool_surplus(struct hstate *h, nodemask_t *nodes_allowed,
1751 VM_BUG_ON(delta != -1 && delta != 1);
1754 for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) {
1755 if (h->surplus_huge_pages_node[node])
1759 for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) {
1760 if (h->surplus_huge_pages_node[node] <
1761 h->nr_huge_pages_node[node])
1768 h->surplus_huge_pages += delta;
1769 h->surplus_huge_pages_node[node] += delta;
1773 #define persistent_huge_pages(h) (h->nr_huge_pages - h->surplus_huge_pages)
1774 static unsigned long set_max_huge_pages(struct hstate *h, unsigned long count,
1775 nodemask_t *nodes_allowed)
1777 unsigned long min_count, ret;
1779 if (hstate_is_gigantic(h) && !gigantic_page_supported())
1780 return h->max_huge_pages;
1783 * Increase the pool size
1784 * First take pages out of surplus state. Then make up the
1785 * remaining difference by allocating fresh huge pages.
1787 * We might race with alloc_buddy_huge_page() here and be unable
1788 * to convert a surplus huge page to a normal huge page. That is
1789 * not critical, though, it just means the overall size of the
1790 * pool might be one hugepage larger than it needs to be, but
1791 * within all the constraints specified by the sysctls.
1793 spin_lock(&hugetlb_lock);
1794 while (h->surplus_huge_pages && count > persistent_huge_pages(h)) {
1795 if (!adjust_pool_surplus(h, nodes_allowed, -1))
1799 while (count > persistent_huge_pages(h)) {
1801 * If this allocation races such that we no longer need the
1802 * page, free_huge_page will handle it by freeing the page
1803 * and reducing the surplus.
1805 spin_unlock(&hugetlb_lock);
1806 if (hstate_is_gigantic(h))
1807 ret = alloc_fresh_gigantic_page(h, nodes_allowed);
1809 ret = alloc_fresh_huge_page(h, nodes_allowed);
1810 spin_lock(&hugetlb_lock);
1814 /* Bail for signals. Probably ctrl-c from user */
1815 if (signal_pending(current))
1820 * Decrease the pool size
1821 * First return free pages to the buddy allocator (being careful
1822 * to keep enough around to satisfy reservations). Then place
1823 * pages into surplus state as needed so the pool will shrink
1824 * to the desired size as pages become free.
1826 * By placing pages into the surplus state independent of the
1827 * overcommit value, we are allowing the surplus pool size to
1828 * exceed overcommit. There are few sane options here. Since
1829 * alloc_buddy_huge_page() is checking the global counter,
1830 * though, we'll note that we're not allowed to exceed surplus
1831 * and won't grow the pool anywhere else. Not until one of the
1832 * sysctls are changed, or the surplus pages go out of use.
1834 min_count = h->resv_huge_pages + h->nr_huge_pages - h->free_huge_pages;
1835 min_count = max(count, min_count);
1836 try_to_free_low(h, min_count, nodes_allowed);
1837 while (min_count < persistent_huge_pages(h)) {
1838 if (!free_pool_huge_page(h, nodes_allowed, 0))
1840 cond_resched_lock(&hugetlb_lock);
1842 while (count < persistent_huge_pages(h)) {
1843 if (!adjust_pool_surplus(h, nodes_allowed, 1))
1847 ret = persistent_huge_pages(h);
1848 spin_unlock(&hugetlb_lock);
1852 #define HSTATE_ATTR_RO(_name) \
1853 static struct kobj_attribute _name##_attr = __ATTR_RO(_name)
1855 #define HSTATE_ATTR(_name) \
1856 static struct kobj_attribute _name##_attr = \
1857 __ATTR(_name, 0644, _name##_show, _name##_store)
1859 static struct kobject *hugepages_kobj;
1860 static struct kobject *hstate_kobjs[HUGE_MAX_HSTATE];
1862 static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp);
1864 static struct hstate *kobj_to_hstate(struct kobject *kobj, int *nidp)
1868 for (i = 0; i < HUGE_MAX_HSTATE; i++)
1869 if (hstate_kobjs[i] == kobj) {
1871 *nidp = NUMA_NO_NODE;
1875 return kobj_to_node_hstate(kobj, nidp);
1878 static ssize_t nr_hugepages_show_common(struct kobject *kobj,
1879 struct kobj_attribute *attr, char *buf)
1882 unsigned long nr_huge_pages;
1885 h = kobj_to_hstate(kobj, &nid);
1886 if (nid == NUMA_NO_NODE)
1887 nr_huge_pages = h->nr_huge_pages;
1889 nr_huge_pages = h->nr_huge_pages_node[nid];
1891 return sprintf(buf, "%lu\n", nr_huge_pages);
1894 static ssize_t __nr_hugepages_store_common(bool obey_mempolicy,
1895 struct hstate *h, int nid,
1896 unsigned long count, size_t len)
1899 NODEMASK_ALLOC(nodemask_t, nodes_allowed, GFP_KERNEL | __GFP_NORETRY);
1901 if (hstate_is_gigantic(h) && !gigantic_page_supported()) {
1906 if (nid == NUMA_NO_NODE) {
1908 * global hstate attribute
1910 if (!(obey_mempolicy &&
1911 init_nodemask_of_mempolicy(nodes_allowed))) {
1912 NODEMASK_FREE(nodes_allowed);
1913 nodes_allowed = &node_states[N_MEMORY];
1915 } else if (nodes_allowed) {
1917 * per node hstate attribute: adjust count to global,
1918 * but restrict alloc/free to the specified node.
1920 count += h->nr_huge_pages - h->nr_huge_pages_node[nid];
1921 init_nodemask_of_node(nodes_allowed, nid);
1923 nodes_allowed = &node_states[N_MEMORY];
1925 h->max_huge_pages = set_max_huge_pages(h, count, nodes_allowed);
1927 if (nodes_allowed != &node_states[N_MEMORY])
1928 NODEMASK_FREE(nodes_allowed);
1932 NODEMASK_FREE(nodes_allowed);
1936 static ssize_t nr_hugepages_store_common(bool obey_mempolicy,
1937 struct kobject *kobj, const char *buf,
1941 unsigned long count;
1945 err = kstrtoul(buf, 10, &count);
1949 h = kobj_to_hstate(kobj, &nid);
1950 return __nr_hugepages_store_common(obey_mempolicy, h, nid, count, len);
1953 static ssize_t nr_hugepages_show(struct kobject *kobj,
1954 struct kobj_attribute *attr, char *buf)
1956 return nr_hugepages_show_common(kobj, attr, buf);
1959 static ssize_t nr_hugepages_store(struct kobject *kobj,
1960 struct kobj_attribute *attr, const char *buf, size_t len)
1962 return nr_hugepages_store_common(false, kobj, buf, len);
1964 HSTATE_ATTR(nr_hugepages);
1969 * hstate attribute for optionally mempolicy-based constraint on persistent
1970 * huge page alloc/free.
1972 static ssize_t nr_hugepages_mempolicy_show(struct kobject *kobj,
1973 struct kobj_attribute *attr, char *buf)
1975 return nr_hugepages_show_common(kobj, attr, buf);
1978 static ssize_t nr_hugepages_mempolicy_store(struct kobject *kobj,
1979 struct kobj_attribute *attr, const char *buf, size_t len)
1981 return nr_hugepages_store_common(true, kobj, buf, len);
1983 HSTATE_ATTR(nr_hugepages_mempolicy);
1987 static ssize_t nr_overcommit_hugepages_show(struct kobject *kobj,
1988 struct kobj_attribute *attr, char *buf)
1990 struct hstate *h = kobj_to_hstate(kobj, NULL);
1991 return sprintf(buf, "%lu\n", h->nr_overcommit_huge_pages);
1994 static ssize_t nr_overcommit_hugepages_store(struct kobject *kobj,
1995 struct kobj_attribute *attr, const char *buf, size_t count)
1998 unsigned long input;
1999 struct hstate *h = kobj_to_hstate(kobj, NULL);
2001 if (hstate_is_gigantic(h))
2004 err = kstrtoul(buf, 10, &input);
2008 spin_lock(&hugetlb_lock);
2009 h->nr_overcommit_huge_pages = input;
2010 spin_unlock(&hugetlb_lock);
2014 HSTATE_ATTR(nr_overcommit_hugepages);
2016 static ssize_t free_hugepages_show(struct kobject *kobj,
2017 struct kobj_attribute *attr, char *buf)
2020 unsigned long free_huge_pages;
2023 h = kobj_to_hstate(kobj, &nid);
2024 if (nid == NUMA_NO_NODE)
2025 free_huge_pages = h->free_huge_pages;
2027 free_huge_pages = h->free_huge_pages_node[nid];
2029 return sprintf(buf, "%lu\n", free_huge_pages);
2031 HSTATE_ATTR_RO(free_hugepages);
2033 static ssize_t resv_hugepages_show(struct kobject *kobj,
2034 struct kobj_attribute *attr, char *buf)
2036 struct hstate *h = kobj_to_hstate(kobj, NULL);
2037 return sprintf(buf, "%lu\n", h->resv_huge_pages);
2039 HSTATE_ATTR_RO(resv_hugepages);
2041 static ssize_t surplus_hugepages_show(struct kobject *kobj,
2042 struct kobj_attribute *attr, char *buf)
2045 unsigned long surplus_huge_pages;
2048 h = kobj_to_hstate(kobj, &nid);
2049 if (nid == NUMA_NO_NODE)
2050 surplus_huge_pages = h->surplus_huge_pages;
2052 surplus_huge_pages = h->surplus_huge_pages_node[nid];
2054 return sprintf(buf, "%lu\n", surplus_huge_pages);
2056 HSTATE_ATTR_RO(surplus_hugepages);
2058 static struct attribute *hstate_attrs[] = {
2059 &nr_hugepages_attr.attr,
2060 &nr_overcommit_hugepages_attr.attr,
2061 &free_hugepages_attr.attr,
2062 &resv_hugepages_attr.attr,
2063 &surplus_hugepages_attr.attr,
2065 &nr_hugepages_mempolicy_attr.attr,
2070 static struct attribute_group hstate_attr_group = {
2071 .attrs = hstate_attrs,
2074 static int hugetlb_sysfs_add_hstate(struct hstate *h, struct kobject *parent,
2075 struct kobject **hstate_kobjs,
2076 struct attribute_group *hstate_attr_group)
2079 int hi = hstate_index(h);
2081 hstate_kobjs[hi] = kobject_create_and_add(h->name, parent);
2082 if (!hstate_kobjs[hi])
2085 retval = sysfs_create_group(hstate_kobjs[hi], hstate_attr_group);
2087 kobject_put(hstate_kobjs[hi]);
2092 static void __init hugetlb_sysfs_init(void)
2097 hugepages_kobj = kobject_create_and_add("hugepages", mm_kobj);
2098 if (!hugepages_kobj)
2101 for_each_hstate(h) {
2102 err = hugetlb_sysfs_add_hstate(h, hugepages_kobj,
2103 hstate_kobjs, &hstate_attr_group);
2105 pr_err("Hugetlb: Unable to add hstate %s", h->name);
2112 * node_hstate/s - associate per node hstate attributes, via their kobjects,
2113 * with node devices in node_devices[] using a parallel array. The array
2114 * index of a node device or _hstate == node id.
2115 * This is here to avoid any static dependency of the node device driver, in
2116 * the base kernel, on the hugetlb module.
2118 struct node_hstate {
2119 struct kobject *hugepages_kobj;
2120 struct kobject *hstate_kobjs[HUGE_MAX_HSTATE];
2122 struct node_hstate node_hstates[MAX_NUMNODES];
2125 * A subset of global hstate attributes for node devices
2127 static struct attribute *per_node_hstate_attrs[] = {
2128 &nr_hugepages_attr.attr,
2129 &free_hugepages_attr.attr,
2130 &surplus_hugepages_attr.attr,
2134 static struct attribute_group per_node_hstate_attr_group = {
2135 .attrs = per_node_hstate_attrs,
2139 * kobj_to_node_hstate - lookup global hstate for node device hstate attr kobj.
2140 * Returns node id via non-NULL nidp.
2142 static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp)
2146 for (nid = 0; nid < nr_node_ids; nid++) {
2147 struct node_hstate *nhs = &node_hstates[nid];
2149 for (i = 0; i < HUGE_MAX_HSTATE; i++)
2150 if (nhs->hstate_kobjs[i] == kobj) {
2162 * Unregister hstate attributes from a single node device.
2163 * No-op if no hstate attributes attached.
2165 static void hugetlb_unregister_node(struct node *node)
2168 struct node_hstate *nhs = &node_hstates[node->dev.id];
2170 if (!nhs->hugepages_kobj)
2171 return; /* no hstate attributes */
2173 for_each_hstate(h) {
2174 int idx = hstate_index(h);
2175 if (nhs->hstate_kobjs[idx]) {
2176 kobject_put(nhs->hstate_kobjs[idx]);
2177 nhs->hstate_kobjs[idx] = NULL;
2181 kobject_put(nhs->hugepages_kobj);
2182 nhs->hugepages_kobj = NULL;
2186 * hugetlb module exit: unregister hstate attributes from node devices
2189 static void hugetlb_unregister_all_nodes(void)
2194 * disable node device registrations.
2196 register_hugetlbfs_with_node(NULL, NULL);
2199 * remove hstate attributes from any nodes that have them.
2201 for (nid = 0; nid < nr_node_ids; nid++)
2202 hugetlb_unregister_node(node_devices[nid]);
2206 * Register hstate attributes for a single node device.
2207 * No-op if attributes already registered.
2209 static void hugetlb_register_node(struct node *node)
2212 struct node_hstate *nhs = &node_hstates[node->dev.id];
2215 if (nhs->hugepages_kobj)
2216 return; /* already allocated */
2218 nhs->hugepages_kobj = kobject_create_and_add("hugepages",
2220 if (!nhs->hugepages_kobj)
2223 for_each_hstate(h) {
2224 err = hugetlb_sysfs_add_hstate(h, nhs->hugepages_kobj,
2226 &per_node_hstate_attr_group);
2228 pr_err("Hugetlb: Unable to add hstate %s for node %d\n",
2229 h->name, node->dev.id);
2230 hugetlb_unregister_node(node);
2237 * hugetlb init time: register hstate attributes for all registered node
2238 * devices of nodes that have memory. All on-line nodes should have
2239 * registered their associated device by this time.
2241 static void __init hugetlb_register_all_nodes(void)
2245 for_each_node_state(nid, N_MEMORY) {
2246 struct node *node = node_devices[nid];
2247 if (node->dev.id == nid)
2248 hugetlb_register_node(node);
2252 * Let the node device driver know we're here so it can
2253 * [un]register hstate attributes on node hotplug.
2255 register_hugetlbfs_with_node(hugetlb_register_node,
2256 hugetlb_unregister_node);
2258 #else /* !CONFIG_NUMA */
2260 static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp)
2268 static void hugetlb_unregister_all_nodes(void) { }
2270 static void hugetlb_register_all_nodes(void) { }
2274 static void __exit hugetlb_exit(void)
2278 hugetlb_unregister_all_nodes();
2280 for_each_hstate(h) {
2281 kobject_put(hstate_kobjs[hstate_index(h)]);
2284 kobject_put(hugepages_kobj);
2285 kfree(htlb_fault_mutex_table);
2287 module_exit(hugetlb_exit);
2289 static int __init hugetlb_init(void)
2293 if (!hugepages_supported())
2296 if (!size_to_hstate(default_hstate_size)) {
2297 default_hstate_size = HPAGE_SIZE;
2298 if (!size_to_hstate(default_hstate_size))
2299 hugetlb_add_hstate(HUGETLB_PAGE_ORDER);
2301 default_hstate_idx = hstate_index(size_to_hstate(default_hstate_size));
2302 if (default_hstate_max_huge_pages)
2303 default_hstate.max_huge_pages = default_hstate_max_huge_pages;
2305 hugetlb_init_hstates();
2306 gather_bootmem_prealloc();
2309 hugetlb_sysfs_init();
2310 hugetlb_register_all_nodes();
2311 hugetlb_cgroup_file_init();
2314 num_fault_mutexes = roundup_pow_of_two(8 * num_possible_cpus());
2316 num_fault_mutexes = 1;
2318 htlb_fault_mutex_table =
2319 kmalloc(sizeof(struct mutex) * num_fault_mutexes, GFP_KERNEL);
2320 BUG_ON(!htlb_fault_mutex_table);
2322 for (i = 0; i < num_fault_mutexes; i++)
2323 mutex_init(&htlb_fault_mutex_table[i]);
2326 module_init(hugetlb_init);
2328 /* Should be called on processing a hugepagesz=... option */
2329 void __init hugetlb_add_hstate(unsigned order)
2334 if (size_to_hstate(PAGE_SIZE << order)) {
2335 pr_warning("hugepagesz= specified twice, ignoring\n");
2338 BUG_ON(hugetlb_max_hstate >= HUGE_MAX_HSTATE);
2340 h = &hstates[hugetlb_max_hstate++];
2342 h->mask = ~((1ULL << (order + PAGE_SHIFT)) - 1);
2343 h->nr_huge_pages = 0;
2344 h->free_huge_pages = 0;
2345 for (i = 0; i < MAX_NUMNODES; ++i)
2346 INIT_LIST_HEAD(&h->hugepage_freelists[i]);
2347 INIT_LIST_HEAD(&h->hugepage_activelist);
2348 h->next_nid_to_alloc = first_node(node_states[N_MEMORY]);
2349 h->next_nid_to_free = first_node(node_states[N_MEMORY]);
2350 snprintf(h->name, HSTATE_NAME_LEN, "hugepages-%lukB",
2351 huge_page_size(h)/1024);
2356 static int __init hugetlb_nrpages_setup(char *s)
2359 static unsigned long *last_mhp;
2362 * !hugetlb_max_hstate means we haven't parsed a hugepagesz= parameter yet,
2363 * so this hugepages= parameter goes to the "default hstate".
2365 if (!hugetlb_max_hstate)
2366 mhp = &default_hstate_max_huge_pages;
2368 mhp = &parsed_hstate->max_huge_pages;
2370 if (mhp == last_mhp) {
2371 pr_warning("hugepages= specified twice without "
2372 "interleaving hugepagesz=, ignoring\n");
2376 if (sscanf(s, "%lu", mhp) <= 0)
2380 * Global state is always initialized later in hugetlb_init.
2381 * But we need to allocate >= MAX_ORDER hstates here early to still
2382 * use the bootmem allocator.
2384 if (hugetlb_max_hstate && parsed_hstate->order >= MAX_ORDER)
2385 hugetlb_hstate_alloc_pages(parsed_hstate);
2391 __setup("hugepages=", hugetlb_nrpages_setup);
2393 static int __init hugetlb_default_setup(char *s)
2395 default_hstate_size = memparse(s, &s);
2398 __setup("default_hugepagesz=", hugetlb_default_setup);
2400 static unsigned int cpuset_mems_nr(unsigned int *array)
2403 unsigned int nr = 0;
2405 for_each_node_mask(node, cpuset_current_mems_allowed)
2411 #ifdef CONFIG_SYSCTL
2412 static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
2413 struct ctl_table *table, int write,
2414 void __user *buffer, size_t *length, loff_t *ppos)
2416 struct hstate *h = &default_hstate;
2417 unsigned long tmp = h->max_huge_pages;
2420 if (!hugepages_supported())
2424 table->maxlen = sizeof(unsigned long);
2425 ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
2430 ret = __nr_hugepages_store_common(obey_mempolicy, h,
2431 NUMA_NO_NODE, tmp, *length);
2436 int hugetlb_sysctl_handler(struct ctl_table *table, int write,
2437 void __user *buffer, size_t *length, loff_t *ppos)
2440 return hugetlb_sysctl_handler_common(false, table, write,
2441 buffer, length, ppos);
2445 int hugetlb_mempolicy_sysctl_handler(struct ctl_table *table, int write,
2446 void __user *buffer, size_t *length, loff_t *ppos)
2448 return hugetlb_sysctl_handler_common(true, table, write,
2449 buffer, length, ppos);
2451 #endif /* CONFIG_NUMA */
2453 int hugetlb_overcommit_handler(struct ctl_table *table, int write,
2454 void __user *buffer,
2455 size_t *length, loff_t *ppos)
2457 struct hstate *h = &default_hstate;
2461 if (!hugepages_supported())
2464 tmp = h->nr_overcommit_huge_pages;
2466 if (write && hstate_is_gigantic(h))
2470 table->maxlen = sizeof(unsigned long);
2471 ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
2476 spin_lock(&hugetlb_lock);
2477 h->nr_overcommit_huge_pages = tmp;
2478 spin_unlock(&hugetlb_lock);
2484 #endif /* CONFIG_SYSCTL */
2486 void hugetlb_report_meminfo(struct seq_file *m)
2488 struct hstate *h = &default_hstate;
2489 if (!hugepages_supported())
2492 "HugePages_Total: %5lu\n"
2493 "HugePages_Free: %5lu\n"
2494 "HugePages_Rsvd: %5lu\n"
2495 "HugePages_Surp: %5lu\n"
2496 "Hugepagesize: %8lu kB\n",
2500 h->surplus_huge_pages,
2501 1UL << (huge_page_order(h) + PAGE_SHIFT - 10));
2504 int hugetlb_report_node_meminfo(int nid, char *buf)
2506 struct hstate *h = &default_hstate;
2507 if (!hugepages_supported())
2510 "Node %d HugePages_Total: %5u\n"
2511 "Node %d HugePages_Free: %5u\n"
2512 "Node %d HugePages_Surp: %5u\n",
2513 nid, h->nr_huge_pages_node[nid],
2514 nid, h->free_huge_pages_node[nid],
2515 nid, h->surplus_huge_pages_node[nid]);
2518 void hugetlb_show_meminfo(void)
2523 if (!hugepages_supported())
2526 for_each_node_state(nid, N_MEMORY)
2528 pr_info("Node %d hugepages_total=%u hugepages_free=%u hugepages_surp=%u hugepages_size=%lukB\n",
2530 h->nr_huge_pages_node[nid],
2531 h->free_huge_pages_node[nid],
2532 h->surplus_huge_pages_node[nid],
2533 1UL << (huge_page_order(h) + PAGE_SHIFT - 10));
2536 /* Return the number pages of memory we physically have, in PAGE_SIZE units. */
2537 unsigned long hugetlb_total_pages(void)
2540 unsigned long nr_total_pages = 0;
2543 nr_total_pages += h->nr_huge_pages * pages_per_huge_page(h);
2544 return nr_total_pages;
2547 static int hugetlb_acct_memory(struct hstate *h, long delta)
2551 spin_lock(&hugetlb_lock);
2553 * When cpuset is configured, it breaks the strict hugetlb page
2554 * reservation as the accounting is done on a global variable. Such
2555 * reservation is completely rubbish in the presence of cpuset because
2556 * the reservation is not checked against page availability for the
2557 * current cpuset. Application can still potentially OOM'ed by kernel
2558 * with lack of free htlb page in cpuset that the task is in.
2559 * Attempt to enforce strict accounting with cpuset is almost
2560 * impossible (or too ugly) because cpuset is too fluid that
2561 * task or memory node can be dynamically moved between cpusets.
2563 * The change of semantics for shared hugetlb mapping with cpuset is
2564 * undesirable. However, in order to preserve some of the semantics,
2565 * we fall back to check against current free page availability as
2566 * a best attempt and hopefully to minimize the impact of changing
2567 * semantics that cpuset has.
2570 if (gather_surplus_pages(h, delta) < 0)
2573 if (delta > cpuset_mems_nr(h->free_huge_pages_node)) {
2574 return_unused_surplus_pages(h, delta);
2581 return_unused_surplus_pages(h, (unsigned long) -delta);
2584 spin_unlock(&hugetlb_lock);
2588 static void hugetlb_vm_op_open(struct vm_area_struct *vma)
2590 struct resv_map *resv = vma_resv_map(vma);
2593 * This new VMA should share its siblings reservation map if present.
2594 * The VMA will only ever have a valid reservation map pointer where
2595 * it is being copied for another still existing VMA. As that VMA
2596 * has a reference to the reservation map it cannot disappear until
2597 * after this open call completes. It is therefore safe to take a
2598 * new reference here without additional locking.
2600 if (resv && is_vma_resv_set(vma, HPAGE_RESV_OWNER))
2601 kref_get(&resv->refs);
2604 static void hugetlb_vm_op_close(struct vm_area_struct *vma)
2606 struct hstate *h = hstate_vma(vma);
2607 struct resv_map *resv = vma_resv_map(vma);
2608 struct hugepage_subpool *spool = subpool_vma(vma);
2609 unsigned long reserve, start, end;
2612 if (!resv || !is_vma_resv_set(vma, HPAGE_RESV_OWNER))
2615 start = vma_hugecache_offset(h, vma, vma->vm_start);
2616 end = vma_hugecache_offset(h, vma, vma->vm_end);
2618 reserve = (end - start) - region_count(resv, start, end);
2620 kref_put(&resv->refs, resv_map_release);
2624 * Decrement reserve counts. The global reserve count may be
2625 * adjusted if the subpool has a minimum size.
2627 gbl_reserve = hugepage_subpool_put_pages(spool, reserve);
2628 hugetlb_acct_memory(h, -gbl_reserve);
2633 * We cannot handle pagefaults against hugetlb pages at all. They cause
2634 * handle_mm_fault() to try to instantiate regular-sized pages in the
2635 * hugegpage VMA. do_page_fault() is supposed to trap this, so BUG is we get
2638 static int hugetlb_vm_op_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
2644 const struct vm_operations_struct hugetlb_vm_ops = {
2645 .fault = hugetlb_vm_op_fault,
2646 .open = hugetlb_vm_op_open,
2647 .close = hugetlb_vm_op_close,
2650 static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page,
2656 entry = huge_pte_mkwrite(huge_pte_mkdirty(mk_huge_pte(page,
2657 vma->vm_page_prot)));
2659 entry = huge_pte_wrprotect(mk_huge_pte(page,
2660 vma->vm_page_prot));
2662 entry = pte_mkyoung(entry);
2663 entry = pte_mkhuge(entry);
2664 entry = arch_make_huge_pte(entry, vma, page, writable);
2669 static void set_huge_ptep_writable(struct vm_area_struct *vma,
2670 unsigned long address, pte_t *ptep)
2674 entry = huge_pte_mkwrite(huge_pte_mkdirty(huge_ptep_get(ptep)));
2675 if (huge_ptep_set_access_flags(vma, address, ptep, entry, 1))
2676 update_mmu_cache(vma, address, ptep);
2679 static int is_hugetlb_entry_migration(pte_t pte)
2683 if (huge_pte_none(pte) || pte_present(pte))
2685 swp = pte_to_swp_entry(pte);
2686 if (non_swap_entry(swp) && is_migration_entry(swp))
2692 static int is_hugetlb_entry_hwpoisoned(pte_t pte)
2696 if (huge_pte_none(pte) || pte_present(pte))
2698 swp = pte_to_swp_entry(pte);
2699 if (non_swap_entry(swp) && is_hwpoison_entry(swp))
2705 int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
2706 struct vm_area_struct *vma)
2708 pte_t *src_pte, *dst_pte, entry;
2709 struct page *ptepage;
2712 struct hstate *h = hstate_vma(vma);
2713 unsigned long sz = huge_page_size(h);
2714 unsigned long mmun_start; /* For mmu_notifiers */
2715 unsigned long mmun_end; /* For mmu_notifiers */
2718 cow = (vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
2720 mmun_start = vma->vm_start;
2721 mmun_end = vma->vm_end;
2723 mmu_notifier_invalidate_range_start(src, mmun_start, mmun_end);
2725 for (addr = vma->vm_start; addr < vma->vm_end; addr += sz) {
2726 spinlock_t *src_ptl, *dst_ptl;
2727 src_pte = huge_pte_offset(src, addr);
2730 dst_pte = huge_pte_alloc(dst, addr, sz);
2736 /* If the pagetables are shared don't copy or take references */
2737 if (dst_pte == src_pte)
2740 dst_ptl = huge_pte_lock(h, dst, dst_pte);
2741 src_ptl = huge_pte_lockptr(h, src, src_pte);
2742 spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
2743 entry = huge_ptep_get(src_pte);
2744 if (huge_pte_none(entry)) { /* skip none entry */
2746 } else if (unlikely(is_hugetlb_entry_migration(entry) ||
2747 is_hugetlb_entry_hwpoisoned(entry))) {
2748 swp_entry_t swp_entry = pte_to_swp_entry(entry);
2750 if (is_write_migration_entry(swp_entry) && cow) {
2752 * COW mappings require pages in both
2753 * parent and child to be set to read.
2755 make_migration_entry_read(&swp_entry);
2756 entry = swp_entry_to_pte(swp_entry);
2757 set_huge_pte_at(src, addr, src_pte, entry);
2759 set_huge_pte_at(dst, addr, dst_pte, entry);
2762 huge_ptep_set_wrprotect(src, addr, src_pte);
2763 mmu_notifier_invalidate_range(src, mmun_start,
2766 entry = huge_ptep_get(src_pte);
2767 ptepage = pte_page(entry);
2769 page_dup_rmap(ptepage);
2770 set_huge_pte_at(dst, addr, dst_pte, entry);
2772 spin_unlock(src_ptl);
2773 spin_unlock(dst_ptl);
2777 mmu_notifier_invalidate_range_end(src, mmun_start, mmun_end);
2782 void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
2783 unsigned long start, unsigned long end,
2784 struct page *ref_page)
2786 int force_flush = 0;
2787 struct mm_struct *mm = vma->vm_mm;
2788 unsigned long address;
2793 struct hstate *h = hstate_vma(vma);
2794 unsigned long sz = huge_page_size(h);
2795 const unsigned long mmun_start = start; /* For mmu_notifiers */
2796 const unsigned long mmun_end = end; /* For mmu_notifiers */
2798 WARN_ON(!is_vm_hugetlb_page(vma));
2799 BUG_ON(start & ~huge_page_mask(h));
2800 BUG_ON(end & ~huge_page_mask(h));
2802 tlb_start_vma(tlb, vma);
2803 mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
2806 for (; address < end; address += sz) {
2807 ptep = huge_pte_offset(mm, address);
2811 ptl = huge_pte_lock(h, mm, ptep);
2812 if (huge_pmd_unshare(mm, &address, ptep))
2815 pte = huge_ptep_get(ptep);
2816 if (huge_pte_none(pte))
2820 * Migrating hugepage or HWPoisoned hugepage is already
2821 * unmapped and its refcount is dropped, so just clear pte here.
2823 if (unlikely(!pte_present(pte))) {
2824 huge_pte_clear(mm, address, ptep);
2828 page = pte_page(pte);
2830 * If a reference page is supplied, it is because a specific
2831 * page is being unmapped, not a range. Ensure the page we
2832 * are about to unmap is the actual page of interest.
2835 if (page != ref_page)
2839 * Mark the VMA as having unmapped its page so that
2840 * future faults in this VMA will fail rather than
2841 * looking like data was lost
2843 set_vma_resv_flags(vma, HPAGE_RESV_UNMAPPED);
2846 pte = huge_ptep_get_and_clear(mm, address, ptep);
2847 tlb_remove_tlb_entry(tlb, ptep, address);
2848 if (huge_pte_dirty(pte))
2849 set_page_dirty(page);
2851 page_remove_rmap(page);
2852 force_flush = !__tlb_remove_page(tlb, page);
2858 /* Bail out after unmapping reference page if supplied */
2867 * mmu_gather ran out of room to batch pages, we break out of
2868 * the PTE lock to avoid doing the potential expensive TLB invalidate
2869 * and page-free while holding it.
2874 if (address < end && !ref_page)
2877 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
2878 tlb_end_vma(tlb, vma);
2881 void __unmap_hugepage_range_final(struct mmu_gather *tlb,
2882 struct vm_area_struct *vma, unsigned long start,
2883 unsigned long end, struct page *ref_page)
2885 __unmap_hugepage_range(tlb, vma, start, end, ref_page);
2888 * Clear this flag so that x86's huge_pmd_share page_table_shareable
2889 * test will fail on a vma being torn down, and not grab a page table
2890 * on its way out. We're lucky that the flag has such an appropriate
2891 * name, and can in fact be safely cleared here. We could clear it
2892 * before the __unmap_hugepage_range above, but all that's necessary
2893 * is to clear it before releasing the i_mmap_rwsem. This works
2894 * because in the context this is called, the VMA is about to be
2895 * destroyed and the i_mmap_rwsem is held.
2897 vma->vm_flags &= ~VM_MAYSHARE;
2900 void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
2901 unsigned long end, struct page *ref_page)
2903 struct mm_struct *mm;
2904 struct mmu_gather tlb;
2908 tlb_gather_mmu(&tlb, mm, start, end);
2909 __unmap_hugepage_range(&tlb, vma, start, end, ref_page);
2910 tlb_finish_mmu(&tlb, start, end);
2914 * This is called when the original mapper is failing to COW a MAP_PRIVATE
2915 * mappping it owns the reserve page for. The intention is to unmap the page
2916 * from other VMAs and let the children be SIGKILLed if they are faulting the
2919 static void unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
2920 struct page *page, unsigned long address)
2922 struct hstate *h = hstate_vma(vma);
2923 struct vm_area_struct *iter_vma;
2924 struct address_space *mapping;
2928 * vm_pgoff is in PAGE_SIZE units, hence the different calculation
2929 * from page cache lookup which is in HPAGE_SIZE units.
2931 address = address & huge_page_mask(h);
2932 pgoff = ((address - vma->vm_start) >> PAGE_SHIFT) +
2934 mapping = file_inode(vma->vm_file)->i_mapping;
2937 * Take the mapping lock for the duration of the table walk. As
2938 * this mapping should be shared between all the VMAs,
2939 * __unmap_hugepage_range() is called as the lock is already held
2941 i_mmap_lock_write(mapping);
2942 vma_interval_tree_foreach(iter_vma, &mapping->i_mmap, pgoff, pgoff) {
2943 /* Do not unmap the current VMA */
2944 if (iter_vma == vma)
2948 * Unmap the page from other VMAs without their own reserves.
2949 * They get marked to be SIGKILLed if they fault in these
2950 * areas. This is because a future no-page fault on this VMA
2951 * could insert a zeroed page instead of the data existing
2952 * from the time of fork. This would look like data corruption
2954 if (!is_vma_resv_set(iter_vma, HPAGE_RESV_OWNER))
2955 unmap_hugepage_range(iter_vma, address,
2956 address + huge_page_size(h), page);
2958 i_mmap_unlock_write(mapping);
2962 * Hugetlb_cow() should be called with page lock of the original hugepage held.
2963 * Called with hugetlb_instantiation_mutex held and pte_page locked so we
2964 * cannot race with other handlers or page migration.
2965 * Keep the pte_same checks anyway to make transition from the mutex easier.
2967 static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
2968 unsigned long address, pte_t *ptep, pte_t pte,
2969 struct page *pagecache_page, spinlock_t *ptl)
2971 struct hstate *h = hstate_vma(vma);
2972 struct page *old_page, *new_page;
2973 int ret = 0, outside_reserve = 0;
2974 unsigned long mmun_start; /* For mmu_notifiers */
2975 unsigned long mmun_end; /* For mmu_notifiers */
2977 old_page = pte_page(pte);
2980 /* If no-one else is actually using this page, avoid the copy
2981 * and just make the page writable */
2982 if (page_mapcount(old_page) == 1 && PageAnon(old_page)) {
2983 page_move_anon_rmap(old_page, vma, address);
2984 set_huge_ptep_writable(vma, address, ptep);
2989 * If the process that created a MAP_PRIVATE mapping is about to
2990 * perform a COW due to a shared page count, attempt to satisfy
2991 * the allocation without using the existing reserves. The pagecache
2992 * page is used to determine if the reserve at this address was
2993 * consumed or not. If reserves were used, a partial faulted mapping
2994 * at the time of fork() could consume its reserves on COW instead
2995 * of the full address range.
2997 if (is_vma_resv_set(vma, HPAGE_RESV_OWNER) &&
2998 old_page != pagecache_page)
2999 outside_reserve = 1;
3001 page_cache_get(old_page);
3004 * Drop page table lock as buddy allocator may be called. It will
3005 * be acquired again before returning to the caller, as expected.
3008 new_page = alloc_huge_page(vma, address, outside_reserve);
3010 if (IS_ERR(new_page)) {
3012 * If a process owning a MAP_PRIVATE mapping fails to COW,
3013 * it is due to references held by a child and an insufficient
3014 * huge page pool. To guarantee the original mappers
3015 * reliability, unmap the page from child processes. The child
3016 * may get SIGKILLed if it later faults.
3018 if (outside_reserve) {
3019 page_cache_release(old_page);
3020 BUG_ON(huge_pte_none(pte));
3021 unmap_ref_private(mm, vma, old_page, address);
3022 BUG_ON(huge_pte_none(pte));
3024 ptep = huge_pte_offset(mm, address & huge_page_mask(h));
3026 pte_same(huge_ptep_get(ptep), pte)))
3027 goto retry_avoidcopy;
3029 * race occurs while re-acquiring page table
3030 * lock, and our job is done.
3035 ret = (PTR_ERR(new_page) == -ENOMEM) ?
3036 VM_FAULT_OOM : VM_FAULT_SIGBUS;
3037 goto out_release_old;
3041 * When the original hugepage is shared one, it does not have
3042 * anon_vma prepared.
3044 if (unlikely(anon_vma_prepare(vma))) {
3046 goto out_release_all;
3049 copy_user_huge_page(new_page, old_page, address, vma,
3050 pages_per_huge_page(h));
3051 __SetPageUptodate(new_page);
3052 set_page_huge_active(new_page);
3054 mmun_start = address & huge_page_mask(h);
3055 mmun_end = mmun_start + huge_page_size(h);
3056 mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
3059 * Retake the page table lock to check for racing updates
3060 * before the page tables are altered
3063 ptep = huge_pte_offset(mm, address & huge_page_mask(h));
3064 if (likely(ptep && pte_same(huge_ptep_get(ptep), pte))) {
3065 ClearPagePrivate(new_page);
3068 huge_ptep_clear_flush(vma, address, ptep);
3069 mmu_notifier_invalidate_range(mm, mmun_start, mmun_end);
3070 set_huge_pte_at(mm, address, ptep,
3071 make_huge_pte(vma, new_page, 1));
3072 page_remove_rmap(old_page);
3073 hugepage_add_new_anon_rmap(new_page, vma, address);
3074 /* Make the old page be freed below */
3075 new_page = old_page;
3078 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
3080 page_cache_release(new_page);
3082 page_cache_release(old_page);
3084 spin_lock(ptl); /* Caller expects lock to be held */
3088 /* Return the pagecache page at a given address within a VMA */
3089 static struct page *hugetlbfs_pagecache_page(struct hstate *h,
3090 struct vm_area_struct *vma, unsigned long address)
3092 struct address_space *mapping;
3095 mapping = vma->vm_file->f_mapping;
3096 idx = vma_hugecache_offset(h, vma, address);
3098 return find_lock_page(mapping, idx);
3102 * Return whether there is a pagecache page to back given address within VMA.
3103 * Caller follow_hugetlb_page() holds page_table_lock so we cannot lock_page.
3105 static bool hugetlbfs_pagecache_present(struct hstate *h,
3106 struct vm_area_struct *vma, unsigned long address)
3108 struct address_space *mapping;
3112 mapping = vma->vm_file->f_mapping;
3113 idx = vma_hugecache_offset(h, vma, address);
3115 page = find_get_page(mapping, idx);
3118 return page != NULL;
3121 static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
3122 struct address_space *mapping, pgoff_t idx,
3123 unsigned long address, pte_t *ptep, unsigned int flags)
3125 struct hstate *h = hstate_vma(vma);
3126 int ret = VM_FAULT_SIGBUS;
3134 * Currently, we are forced to kill the process in the event the
3135 * original mapper has unmapped pages from the child due to a failed
3136 * COW. Warn that such a situation has occurred as it may not be obvious
3138 if (is_vma_resv_set(vma, HPAGE_RESV_UNMAPPED)) {
3139 pr_warning("PID %d killed due to inadequate hugepage pool\n",
3145 * Use page lock to guard against racing truncation
3146 * before we get page_table_lock.
3149 page = find_lock_page(mapping, idx);
3151 size = i_size_read(mapping->host) >> huge_page_shift(h);
3154 page = alloc_huge_page(vma, address, 0);
3156 ret = PTR_ERR(page);
3160 ret = VM_FAULT_SIGBUS;
3163 clear_huge_page(page, address, pages_per_huge_page(h));
3164 __SetPageUptodate(page);
3165 set_page_huge_active(page);
3167 if (vma->vm_flags & VM_MAYSHARE) {
3169 struct inode *inode = mapping->host;
3171 err = add_to_page_cache(page, mapping, idx, GFP_KERNEL);
3178 ClearPagePrivate(page);
3180 spin_lock(&inode->i_lock);
3181 inode->i_blocks += blocks_per_huge_page(h);
3182 spin_unlock(&inode->i_lock);
3185 if (unlikely(anon_vma_prepare(vma))) {
3187 goto backout_unlocked;
3193 * If memory error occurs between mmap() and fault, some process
3194 * don't have hwpoisoned swap entry for errored virtual address.
3195 * So we need to block hugepage fault by PG_hwpoison bit check.
3197 if (unlikely(PageHWPoison(page))) {
3198 ret = VM_FAULT_HWPOISON |
3199 VM_FAULT_SET_HINDEX(hstate_index(h));
3200 goto backout_unlocked;
3205 * If we are going to COW a private mapping later, we examine the
3206 * pending reservations for this page now. This will ensure that
3207 * any allocations necessary to record that reservation occur outside
3210 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED))
3211 if (vma_needs_reservation(h, vma, address) < 0) {
3213 goto backout_unlocked;
3216 ptl = huge_pte_lockptr(h, mm, ptep);
3218 size = i_size_read(mapping->host) >> huge_page_shift(h);
3223 if (!huge_pte_none(huge_ptep_get(ptep)))
3227 ClearPagePrivate(page);
3228 hugepage_add_new_anon_rmap(page, vma, address);
3230 page_dup_rmap(page);
3231 new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE)
3232 && (vma->vm_flags & VM_SHARED)));
3233 set_huge_pte_at(mm, address, ptep, new_pte);
3235 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
3236 /* Optimization, do the COW without a second fault */
3237 ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page, ptl);
3254 static u32 fault_mutex_hash(struct hstate *h, struct mm_struct *mm,
3255 struct vm_area_struct *vma,
3256 struct address_space *mapping,
3257 pgoff_t idx, unsigned long address)
3259 unsigned long key[2];
3262 if (vma->vm_flags & VM_SHARED) {
3263 key[0] = (unsigned long) mapping;
3266 key[0] = (unsigned long) mm;
3267 key[1] = address >> huge_page_shift(h);
3270 hash = jhash2((u32 *)&key, sizeof(key)/sizeof(u32), 0);
3272 return hash & (num_fault_mutexes - 1);
3276 * For uniprocesor systems we always use a single mutex, so just
3277 * return 0 and avoid the hashing overhead.
3279 static u32 fault_mutex_hash(struct hstate *h, struct mm_struct *mm,
3280 struct vm_area_struct *vma,
3281 struct address_space *mapping,
3282 pgoff_t idx, unsigned long address)
3288 int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
3289 unsigned long address, unsigned int flags)
3296 struct page *page = NULL;
3297 struct page *pagecache_page = NULL;
3298 struct hstate *h = hstate_vma(vma);
3299 struct address_space *mapping;
3300 int need_wait_lock = 0;
3302 address &= huge_page_mask(h);
3304 ptep = huge_pte_offset(mm, address);
3306 entry = huge_ptep_get(ptep);
3307 if (unlikely(is_hugetlb_entry_migration(entry))) {
3308 migration_entry_wait_huge(vma, mm, ptep);
3310 } else if (unlikely(is_hugetlb_entry_hwpoisoned(entry)))
3311 return VM_FAULT_HWPOISON_LARGE |
3312 VM_FAULT_SET_HINDEX(hstate_index(h));
3315 ptep = huge_pte_alloc(mm, address, huge_page_size(h));
3317 return VM_FAULT_OOM;
3319 mapping = vma->vm_file->f_mapping;
3320 idx = vma_hugecache_offset(h, vma, address);
3323 * Serialize hugepage allocation and instantiation, so that we don't
3324 * get spurious allocation failures if two CPUs race to instantiate
3325 * the same page in the page cache.
3327 hash = fault_mutex_hash(h, mm, vma, mapping, idx, address);
3328 mutex_lock(&htlb_fault_mutex_table[hash]);
3330 entry = huge_ptep_get(ptep);
3331 if (huge_pte_none(entry)) {
3332 ret = hugetlb_no_page(mm, vma, mapping, idx, address, ptep, flags);
3339 * entry could be a migration/hwpoison entry at this point, so this
3340 * check prevents the kernel from going below assuming that we have
3341 * a active hugepage in pagecache. This goto expects the 2nd page fault,
3342 * and is_hugetlb_entry_(migration|hwpoisoned) check will properly
3345 if (!pte_present(entry))
3349 * If we are going to COW the mapping later, we examine the pending
3350 * reservations for this page now. This will ensure that any
3351 * allocations necessary to record that reservation occur outside the
3352 * spinlock. For private mappings, we also lookup the pagecache
3353 * page now as it is used to determine if a reservation has been
3356 if ((flags & FAULT_FLAG_WRITE) && !huge_pte_write(entry)) {
3357 if (vma_needs_reservation(h, vma, address) < 0) {
3362 if (!(vma->vm_flags & VM_MAYSHARE))
3363 pagecache_page = hugetlbfs_pagecache_page(h,
3367 ptl = huge_pte_lock(h, mm, ptep);
3369 /* Check for a racing update before calling hugetlb_cow */
3370 if (unlikely(!pte_same(entry, huge_ptep_get(ptep))))
3374 * hugetlb_cow() requires page locks of pte_page(entry) and
3375 * pagecache_page, so here we need take the former one
3376 * when page != pagecache_page or !pagecache_page.
3378 page = pte_page(entry);
3379 if (page != pagecache_page)
3380 if (!trylock_page(page)) {
3387 if (flags & FAULT_FLAG_WRITE) {
3388 if (!huge_pte_write(entry)) {
3389 ret = hugetlb_cow(mm, vma, address, ptep, entry,
3390 pagecache_page, ptl);
3393 entry = huge_pte_mkdirty(entry);
3395 entry = pte_mkyoung(entry);
3396 if (huge_ptep_set_access_flags(vma, address, ptep, entry,
3397 flags & FAULT_FLAG_WRITE))
3398 update_mmu_cache(vma, address, ptep);
3400 if (page != pagecache_page)
3406 if (pagecache_page) {
3407 unlock_page(pagecache_page);
3408 put_page(pagecache_page);
3411 mutex_unlock(&htlb_fault_mutex_table[hash]);
3413 * Generally it's safe to hold refcount during waiting page lock. But
3414 * here we just wait to defer the next page fault to avoid busy loop and
3415 * the page is not used after unlocked before returning from the current
3416 * page fault. So we are safe from accessing freed page, even if we wait
3417 * here without taking refcount.
3420 wait_on_page_locked(page);
3424 long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
3425 struct page **pages, struct vm_area_struct **vmas,
3426 unsigned long *position, unsigned long *nr_pages,
3427 long i, unsigned int flags)
3429 unsigned long pfn_offset;
3430 unsigned long vaddr = *position;
3431 unsigned long remainder = *nr_pages;
3432 struct hstate *h = hstate_vma(vma);
3434 while (vaddr < vma->vm_end && remainder) {
3436 spinlock_t *ptl = NULL;
3441 * If we have a pending SIGKILL, don't keep faulting pages and
3442 * potentially allocating memory.
3444 if (unlikely(fatal_signal_pending(current))) {
3450 * Some archs (sparc64, sh*) have multiple pte_ts to
3451 * each hugepage. We have to make sure we get the
3452 * first, for the page indexing below to work.
3454 * Note that page table lock is not held when pte is null.
3456 pte = huge_pte_offset(mm, vaddr & huge_page_mask(h));
3458 ptl = huge_pte_lock(h, mm, pte);
3459 absent = !pte || huge_pte_none(huge_ptep_get(pte));
3462 * When coredumping, it suits get_dump_page if we just return
3463 * an error where there's an empty slot with no huge pagecache
3464 * to back it. This way, we avoid allocating a hugepage, and
3465 * the sparse dumpfile avoids allocating disk blocks, but its
3466 * huge holes still show up with zeroes where they need to be.
3468 if (absent && (flags & FOLL_DUMP) &&
3469 !hugetlbfs_pagecache_present(h, vma, vaddr)) {
3477 * We need call hugetlb_fault for both hugepages under migration
3478 * (in which case hugetlb_fault waits for the migration,) and
3479 * hwpoisoned hugepages (in which case we need to prevent the
3480 * caller from accessing to them.) In order to do this, we use
3481 * here is_swap_pte instead of is_hugetlb_entry_migration and
3482 * is_hugetlb_entry_hwpoisoned. This is because it simply covers
3483 * both cases, and because we can't follow correct pages
3484 * directly from any kind of swap entries.
3486 if (absent || is_swap_pte(huge_ptep_get(pte)) ||
3487 ((flags & FOLL_WRITE) &&
3488 !huge_pte_write(huge_ptep_get(pte)))) {
3493 ret = hugetlb_fault(mm, vma, vaddr,
3494 (flags & FOLL_WRITE) ? FAULT_FLAG_WRITE : 0);
3495 if (!(ret & VM_FAULT_ERROR))
3502 pfn_offset = (vaddr & ~huge_page_mask(h)) >> PAGE_SHIFT;
3503 page = pte_page(huge_ptep_get(pte));
3506 pages[i] = mem_map_offset(page, pfn_offset);
3507 get_page_foll(pages[i]);
3517 if (vaddr < vma->vm_end && remainder &&
3518 pfn_offset < pages_per_huge_page(h)) {
3520 * We use pfn_offset to avoid touching the pageframes
3521 * of this compound page.
3527 *nr_pages = remainder;
3530 return i ? i : -EFAULT;
3533 unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
3534 unsigned long address, unsigned long end, pgprot_t newprot)
3536 struct mm_struct *mm = vma->vm_mm;
3537 unsigned long start = address;
3540 struct hstate *h = hstate_vma(vma);
3541 unsigned long pages = 0;
3543 BUG_ON(address >= end);
3544 flush_cache_range(vma, address, end);
3546 mmu_notifier_invalidate_range_start(mm, start, end);
3547 i_mmap_lock_write(vma->vm_file->f_mapping);
3548 for (; address < end; address += huge_page_size(h)) {
3550 ptep = huge_pte_offset(mm, address);
3553 ptl = huge_pte_lock(h, mm, ptep);
3554 if (huge_pmd_unshare(mm, &address, ptep)) {
3559 pte = huge_ptep_get(ptep);
3560 if (unlikely(is_hugetlb_entry_hwpoisoned(pte))) {
3564 if (unlikely(is_hugetlb_entry_migration(pte))) {
3565 swp_entry_t entry = pte_to_swp_entry(pte);
3567 if (is_write_migration_entry(entry)) {
3570 make_migration_entry_read(&entry);
3571 newpte = swp_entry_to_pte(entry);
3572 set_huge_pte_at(mm, address, ptep, newpte);
3578 if (!huge_pte_none(pte)) {
3579 pte = huge_ptep_get_and_clear(mm, address, ptep);
3580 pte = pte_mkhuge(huge_pte_modify(pte, newprot));
3581 pte = arch_make_huge_pte(pte, vma, NULL, 0);
3582 set_huge_pte_at(mm, address, ptep, pte);
3588 * Must flush TLB before releasing i_mmap_rwsem: x86's huge_pmd_unshare
3589 * may have cleared our pud entry and done put_page on the page table:
3590 * once we release i_mmap_rwsem, another task can do the final put_page
3591 * and that page table be reused and filled with junk.
3593 flush_tlb_range(vma, start, end);
3594 mmu_notifier_invalidate_range(mm, start, end);
3595 i_mmap_unlock_write(vma->vm_file->f_mapping);
3596 mmu_notifier_invalidate_range_end(mm, start, end);
3598 return pages << h->order;
3601 int hugetlb_reserve_pages(struct inode *inode,
3603 struct vm_area_struct *vma,
3604 vm_flags_t vm_flags)
3607 struct hstate *h = hstate_inode(inode);
3608 struct hugepage_subpool *spool = subpool_inode(inode);
3609 struct resv_map *resv_map;
3613 * Only apply hugepage reservation if asked. At fault time, an
3614 * attempt will be made for VM_NORESERVE to allocate a page
3615 * without using reserves
3617 if (vm_flags & VM_NORESERVE)
3621 * Shared mappings base their reservation on the number of pages that
3622 * are already allocated on behalf of the file. Private mappings need
3623 * to reserve the full area even if read-only as mprotect() may be
3624 * called to make the mapping read-write. Assume !vma is a shm mapping
3626 if (!vma || vma->vm_flags & VM_MAYSHARE) {
3627 resv_map = inode_resv_map(inode);
3629 chg = region_chg(resv_map, from, to);
3632 resv_map = resv_map_alloc();
3638 set_vma_resv_map(vma, resv_map);
3639 set_vma_resv_flags(vma, HPAGE_RESV_OWNER);
3648 * There must be enough pages in the subpool for the mapping. If
3649 * the subpool has a minimum size, there may be some global
3650 * reservations already in place (gbl_reserve).
3652 gbl_reserve = hugepage_subpool_get_pages(spool, chg);
3653 if (gbl_reserve < 0) {
3659 * Check enough hugepages are available for the reservation.
3660 * Hand the pages back to the subpool if there are not
3662 ret = hugetlb_acct_memory(h, gbl_reserve);
3664 /* put back original number of pages, chg */
3665 (void)hugepage_subpool_put_pages(spool, chg);
3670 * Account for the reservations made. Shared mappings record regions
3671 * that have reservations as they are shared by multiple VMAs.
3672 * When the last VMA disappears, the region map says how much
3673 * the reservation was and the page cache tells how much of
3674 * the reservation was consumed. Private mappings are per-VMA and
3675 * only the consumed reservations are tracked. When the VMA
3676 * disappears, the original reservation is the VMA size and the
3677 * consumed reservations are stored in the map. Hence, nothing
3678 * else has to be done for private mappings here
3680 if (!vma || vma->vm_flags & VM_MAYSHARE)
3681 region_add(resv_map, from, to);
3684 if (vma && is_vma_resv_set(vma, HPAGE_RESV_OWNER))
3685 kref_put(&resv_map->refs, resv_map_release);
3689 void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed)
3691 struct hstate *h = hstate_inode(inode);
3692 struct resv_map *resv_map = inode_resv_map(inode);
3694 struct hugepage_subpool *spool = subpool_inode(inode);
3698 chg = region_truncate(resv_map, offset);
3699 spin_lock(&inode->i_lock);
3700 inode->i_blocks -= (blocks_per_huge_page(h) * freed);
3701 spin_unlock(&inode->i_lock);
3704 * If the subpool has a minimum size, the number of global
3705 * reservations to be released may be adjusted.
3707 gbl_reserve = hugepage_subpool_put_pages(spool, (chg - freed));
3708 hugetlb_acct_memory(h, -gbl_reserve);
3711 #ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE
3712 static unsigned long page_table_shareable(struct vm_area_struct *svma,
3713 struct vm_area_struct *vma,
3714 unsigned long addr, pgoff_t idx)
3716 unsigned long saddr = ((idx - svma->vm_pgoff) << PAGE_SHIFT) +
3718 unsigned long sbase = saddr & PUD_MASK;
3719 unsigned long s_end = sbase + PUD_SIZE;
3721 /* Allow segments to share if only one is marked locked */
3722 unsigned long vm_flags = vma->vm_flags & ~VM_LOCKED;
3723 unsigned long svm_flags = svma->vm_flags & ~VM_LOCKED;
3726 * match the virtual addresses, permission and the alignment of the
3729 if (pmd_index(addr) != pmd_index(saddr) ||
3730 vm_flags != svm_flags ||
3731 sbase < svma->vm_start || svma->vm_end < s_end)
3737 static int vma_shareable(struct vm_area_struct *vma, unsigned long addr)
3739 unsigned long base = addr & PUD_MASK;
3740 unsigned long end = base + PUD_SIZE;
3743 * check on proper vm_flags and page table alignment
3745 if (vma->vm_flags & VM_MAYSHARE &&
3746 vma->vm_start <= base && end <= vma->vm_end)
3752 * Search for a shareable pmd page for hugetlb. In any case calls pmd_alloc()
3753 * and returns the corresponding pte. While this is not necessary for the
3754 * !shared pmd case because we can allocate the pmd later as well, it makes the
3755 * code much cleaner. pmd allocation is essential for the shared case because
3756 * pud has to be populated inside the same i_mmap_rwsem section - otherwise
3757 * racing tasks could either miss the sharing (see huge_pte_offset) or select a
3758 * bad pmd for sharing.
3760 pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
3762 struct vm_area_struct *vma = find_vma(mm, addr);
3763 struct address_space *mapping = vma->vm_file->f_mapping;
3764 pgoff_t idx = ((addr - vma->vm_start) >> PAGE_SHIFT) +
3766 struct vm_area_struct *svma;
3767 unsigned long saddr;
3772 if (!vma_shareable(vma, addr))
3773 return (pte_t *)pmd_alloc(mm, pud, addr);
3775 i_mmap_lock_write(mapping);
3776 vma_interval_tree_foreach(svma, &mapping->i_mmap, idx, idx) {
3780 saddr = page_table_shareable(svma, vma, addr, idx);
3782 spte = huge_pte_offset(svma->vm_mm, saddr);
3785 get_page(virt_to_page(spte));
3794 ptl = huge_pte_lockptr(hstate_vma(vma), mm, spte);
3796 if (pud_none(*pud)) {
3797 pud_populate(mm, pud,
3798 (pmd_t *)((unsigned long)spte & PAGE_MASK));
3800 put_page(virt_to_page(spte));
3805 pte = (pte_t *)pmd_alloc(mm, pud, addr);
3806 i_mmap_unlock_write(mapping);
3811 * unmap huge page backed by shared pte.
3813 * Hugetlb pte page is ref counted at the time of mapping. If pte is shared
3814 * indicated by page_count > 1, unmap is achieved by clearing pud and
3815 * decrementing the ref count. If count == 1, the pte page is not shared.
3817 * called with page table lock held.
3819 * returns: 1 successfully unmapped a shared pte page
3820 * 0 the underlying pte page is not shared, or it is the last user
3822 int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
3824 pgd_t *pgd = pgd_offset(mm, *addr);
3825 pud_t *pud = pud_offset(pgd, *addr);
3827 BUG_ON(page_count(virt_to_page(ptep)) == 0);
3828 if (page_count(virt_to_page(ptep)) == 1)
3832 put_page(virt_to_page(ptep));
3834 *addr = ALIGN(*addr, HPAGE_SIZE * PTRS_PER_PTE) - HPAGE_SIZE;
3837 #define want_pmd_share() (1)
3838 #else /* !CONFIG_ARCH_WANT_HUGE_PMD_SHARE */
3839 pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
3844 int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
3848 #define want_pmd_share() (0)
3849 #endif /* CONFIG_ARCH_WANT_HUGE_PMD_SHARE */
3851 #ifdef CONFIG_ARCH_WANT_GENERAL_HUGETLB
3852 pte_t *huge_pte_alloc(struct mm_struct *mm,
3853 unsigned long addr, unsigned long sz)
3859 pgd = pgd_offset(mm, addr);
3860 pud = pud_alloc(mm, pgd, addr);
3862 if (sz == PUD_SIZE) {
3865 BUG_ON(sz != PMD_SIZE);
3866 if (want_pmd_share() && pud_none(*pud))
3867 pte = huge_pmd_share(mm, addr, pud);
3869 pte = (pte_t *)pmd_alloc(mm, pud, addr);
3872 BUG_ON(pte && !pte_none(*pte) && !pte_huge(*pte));
3877 pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
3883 pgd = pgd_offset(mm, addr);
3884 if (pgd_present(*pgd)) {
3885 pud = pud_offset(pgd, addr);
3886 if (pud_present(*pud)) {
3888 return (pte_t *)pud;
3889 pmd = pmd_offset(pud, addr);
3892 return (pte_t *) pmd;
3895 #endif /* CONFIG_ARCH_WANT_GENERAL_HUGETLB */
3898 * These functions are overwritable if your architecture needs its own
3901 struct page * __weak
3902 follow_huge_addr(struct mm_struct *mm, unsigned long address,
3905 return ERR_PTR(-EINVAL);
3908 struct page * __weak
3909 follow_huge_pmd(struct mm_struct *mm, unsigned long address,
3910 pmd_t *pmd, int flags)
3912 struct page *page = NULL;
3915 ptl = pmd_lockptr(mm, pmd);
3918 * make sure that the address range covered by this pmd is not
3919 * unmapped from other threads.
3921 if (!pmd_huge(*pmd))
3923 if (pmd_present(*pmd)) {
3924 page = pmd_page(*pmd) + ((address & ~PMD_MASK) >> PAGE_SHIFT);
3925 if (flags & FOLL_GET)
3928 if (is_hugetlb_entry_migration(huge_ptep_get((pte_t *)pmd))) {
3930 __migration_entry_wait(mm, (pte_t *)pmd, ptl);
3934 * hwpoisoned entry is treated as no_page_table in
3935 * follow_page_mask().
3943 struct page * __weak
3944 follow_huge_pud(struct mm_struct *mm, unsigned long address,
3945 pud_t *pud, int flags)
3947 if (flags & FOLL_GET)
3950 return pte_page(*(pte_t *)pud) + ((address & ~PUD_MASK) >> PAGE_SHIFT);
3953 #ifdef CONFIG_MEMORY_FAILURE
3956 * This function is called from memory failure code.
3957 * Assume the caller holds page lock of the head page.
3959 int dequeue_hwpoisoned_huge_page(struct page *hpage)
3961 struct hstate *h = page_hstate(hpage);
3962 int nid = page_to_nid(hpage);
3965 spin_lock(&hugetlb_lock);
3967 * Just checking !page_huge_active is not enough, because that could be
3968 * an isolated/hwpoisoned hugepage (which have >0 refcount).
3970 if (!page_huge_active(hpage) && !page_count(hpage)) {
3972 * Hwpoisoned hugepage isn't linked to activelist or freelist,
3973 * but dangling hpage->lru can trigger list-debug warnings
3974 * (this happens when we call unpoison_memory() on it),
3975 * so let it point to itself with list_del_init().
3977 list_del_init(&hpage->lru);
3978 set_page_refcounted(hpage);
3979 h->free_huge_pages--;
3980 h->free_huge_pages_node[nid]--;
3983 spin_unlock(&hugetlb_lock);
3988 bool isolate_huge_page(struct page *page, struct list_head *list)
3992 VM_BUG_ON_PAGE(!PageHead(page), page);
3993 spin_lock(&hugetlb_lock);
3994 if (!page_huge_active(page) || !get_page_unless_zero(page)) {
3998 clear_page_huge_active(page);
3999 list_move_tail(&page->lru, list);
4001 spin_unlock(&hugetlb_lock);
4005 void putback_active_hugepage(struct page *page)
4007 VM_BUG_ON_PAGE(!PageHead(page), page);
4008 spin_lock(&hugetlb_lock);
4009 set_page_huge_active(page);
4010 list_move_tail(&page->lru, &(page_hstate(page))->hugepage_activelist);
4011 spin_unlock(&hugetlb_lock);