2.6.33-rc1 commit
73848b4684e84a84cfd1555af78d41158f31e16b, adjusted
to include
31e855ea7173bdb0520f9684580423a9560f66e0's movement of
the unlock_page(oldpage), but omit other intervening cleanups.
When KSM merges an mlocked page, it has been forgetting to munlock it:
that's been left to free_page_mlock(), which reports it in /proc/vmstat
as unevictable_pgs_mlockfreed instead of unevictable_pgs_munlocked,
which indicates that such pages _might_ be left unevictable for long
after they should be evictable. Call munlock_vma_page() to fix that.
Signed-off-by: Hugh Dickins <hugh.dickins@tiscali.co.uk>
Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
}
/*
- * must be called with vma's mmap_sem held for read, and page locked.
+ * must be called with vma's mmap_sem held for read or write, and page locked.
*/
extern void mlock_vma_page(struct page *page);
+extern void munlock_vma_page(struct page *page);
/*
* Clear the page's PageMlocked(). This can be useful in a situation where
#include <linux/ksm.h>
#include <asm/tlbflush.h>
+#include "internal.h"
/*
* A few notes about the KSM scanning process,
* ptes are necessarily already write-protected. But in either
* case, we need to lock and check page_count is not raised.
*/
- if (write_protect_page(vma, oldpage, &orig_pte)) {
- unlock_page(oldpage);
- goto out_putpage;
- }
- unlock_page(oldpage);
-
- if (pages_identical(oldpage, newpage))
+ if (write_protect_page(vma, oldpage, &orig_pte) == 0 &&
+ pages_identical(oldpage, newpage))
err = replace_page(vma, oldpage, newpage, orig_pte);
+ if ((vma->vm_flags & VM_LOCKED) && !err)
+ munlock_vma_page(oldpage);
+
+ unlock_page(oldpage);
out_putpage:
put_page(oldpage);
put_page(newpage);
* not get another chance to clear PageMlocked. If we successfully
* isolate the page and try_to_munlock() detects other VM_LOCKED vmas
* mapping the page, it will restore the PageMlocked state, unless the page
- * is mapped in a non-linear vma. So, we go ahead and SetPageMlocked(),
+ * is mapped in a non-linear vma. So, we go ahead and ClearPageMlocked(),
* perhaps redundantly.
* If we lose the isolation race, and the page is mapped by other VM_LOCKED
* vmas, we'll detect this in vmscan--via try_to_munlock() or try_to_unmap()
* either of which will restore the PageMlocked state by calling
* mlock_vma_page() above, if it can grab the vma's mmap sem.
*/
-static void munlock_vma_page(struct page *page)
+void munlock_vma_page(struct page *page)
{
BUG_ON(!PageLocked(page));