mm: numa: recheck for transhuge pages under lock during protection changes
[firefly-linux-kernel-4.4.55.git] / mm / readahead.c
index 0de2360d65f3f4f4cce4e0ae94f02d78a51a2429..29c5e1af5a0c79a5af00f097c2a03b7492316c8c 100644 (file)
@@ -179,7 +179,7 @@ __do_page_cache_readahead(struct address_space *mapping, struct file *filp,
                rcu_read_lock();
                page = radix_tree_lookup(&mapping->page_tree, page_offset);
                rcu_read_unlock();
-               if (page)
+               if (page && !radix_tree_exceptional_entry(page))
                        continue;
 
                page = page_cache_alloc_readahead(mapping);
@@ -233,14 +233,14 @@ int force_page_cache_readahead(struct address_space *mapping, struct file *filp,
        return 0;
 }
 
+#define MAX_READAHEAD   ((512*4096)/PAGE_CACHE_SIZE)
 /*
  * Given a desired number of PAGE_CACHE_SIZE readahead pages, return a
  * sensible upper limit.
  */
 unsigned long max_sane_readahead(unsigned long nr)
 {
-       return min(nr, (node_page_state(numa_node_id(), NR_INACTIVE_FILE)
-               + node_page_state(numa_node_id(), NR_FREE_PAGES)) / 2);
+       return min(nr, MAX_READAHEAD);
 }
 
 /*
@@ -347,7 +347,7 @@ static pgoff_t count_history_pages(struct address_space *mapping,
        pgoff_t head;
 
        rcu_read_lock();
-       head = radix_tree_prev_hole(&mapping->page_tree, offset - 1, max);
+       head = page_cache_prev_hole(mapping, offset - 1, max);
        rcu_read_unlock();
 
        return offset - 1 - head;
@@ -427,7 +427,7 @@ ondemand_readahead(struct address_space *mapping,
                pgoff_t start;
 
                rcu_read_lock();
-               start = radix_tree_next_hole(&mapping->page_tree, offset+1,max);
+               start = page_cache_next_hole(mapping, offset + 1, max);
                rcu_read_unlock();
 
                if (!start || start - offset > max)