X-Git-Url: http://demsky.eecs.uci.edu/git/?a=blobdiff_plain;f=mm%2Fpage_isolation.c;h=d1473b2e9481731988695755a618baa0991556a7;hb=06b8ab55289345ab191bf4bf0e4acc6d4bdf293d;hp=0cee10ffb98d4cf8e6ad930faa1f4de925bdf3a5;hpb=1025c04cecd19882e28f16c4004034b475c372c5;p=firefly-linux-kernel-4.4.55.git diff --git a/mm/page_isolation.c b/mm/page_isolation.c index 0cee10ffb98d..d1473b2e9481 100644 --- a/mm/page_isolation.c +++ b/mm/page_isolation.c @@ -6,6 +6,7 @@ #include #include #include +#include #include "internal.h" int set_migratetype_isolate(struct page *page, bool skip_hwpoisoned_pages) @@ -252,6 +253,19 @@ struct page *alloc_migrate_target(struct page *page, unsigned long private, { gfp_t gfp_mask = GFP_USER | __GFP_MOVABLE; + /* + * TODO: allocate a destination hugepage from a nearest neighbor node, + * accordance with memory policy of the user process if possible. For + * now as a simple work-around, we use the next node for destination. + */ + if (PageHuge(page)) { + nodemask_t src = nodemask_of_node(page_to_nid(page)); + nodemask_t dst; + nodes_complement(dst, src); + return alloc_huge_page_node(page_hstate(compound_head(page)), + next_node(page_to_nid(page), dst)); + } + if (PageHighMem(page)) gfp_mask |= __GFP_HIGHMEM;