From 6e8bb0193af3f308ef22817a5560422d33e58b90 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Mon, 5 Mar 2012 13:41:15 -0500 Subject: [PATCH] VM: make unmap_vmas() return void same story - nobody uses it and it's been pointless since "mm: Remove i_mmap_lock lockbreak" went in. Signed-off-by: Al Viro --- include/linux/mm.h | 2 +- mm/memory.c | 6 +----- mm/mmap.c | 3 +-- 3 files changed, 3 insertions(+), 8 deletions(-) diff --git a/include/linux/mm.h b/include/linux/mm.h index 6c65d24852e5..b5bb54d6d667 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -895,7 +895,7 @@ int zap_vma_ptes(struct vm_area_struct *vma, unsigned long address, unsigned long size); void zap_page_range(struct vm_area_struct *vma, unsigned long address, unsigned long size, struct zap_details *); -unsigned long unmap_vmas(struct mmu_gather *tlb, +void unmap_vmas(struct mmu_gather *tlb, struct vm_area_struct *start_vma, unsigned long start_addr, unsigned long end_addr, unsigned long *nr_accounted, struct zap_details *); diff --git a/mm/memory.c b/mm/memory.c index cfb57b007a6c..016c67587ef4 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -1316,8 +1316,6 @@ static void unmap_page_range(struct mmu_gather *tlb, * @nr_accounted: Place number of unmapped pages in vm-accountable vma's here * @details: details of nonlinear truncation or shared cache invalidation * - * Returns the end address of the unmapping (restart addr if interrupted). - * * Unmap all pages in the vma list. * * Only addresses between `start' and `end' will be unmapped. @@ -1329,7 +1327,7 @@ static void unmap_page_range(struct mmu_gather *tlb, * ensure that any thus-far unmapped pages are flushed before unmap_vmas() * drops the lock and schedules. */ -unsigned long unmap_vmas(struct mmu_gather *tlb, +void unmap_vmas(struct mmu_gather *tlb, struct vm_area_struct *vma, unsigned long start_addr, unsigned long end_addr, unsigned long *nr_accounted, struct zap_details *details) @@ -1372,11 +1370,9 @@ unsigned long unmap_vmas(struct mmu_gather *tlb, } else unmap_page_range(tlb, vma, start, end, details); } - start = end; } mmu_notifier_invalidate_range_end(mm, start_addr, end_addr); - return start; /* which is now the end (or restart) address */ } /** diff --git a/mm/mmap.c b/mm/mmap.c index 2b2b45eb816c..9365a8fe3701 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -2224,7 +2224,6 @@ void exit_mmap(struct mm_struct *mm) struct mmu_gather tlb; struct vm_area_struct *vma; unsigned long nr_accounted = 0; - unsigned long end; /* mm's last user has gone, and its about to be pulled down */ mmu_notifier_release(mm); @@ -2249,7 +2248,7 @@ void exit_mmap(struct mm_struct *mm) tlb_gather_mmu(&tlb, mm, 1); /* update_hiwater_rss(mm) here? but nobody should be looking */ /* Use -1 here to ensure all VMAs in the mm are unmapped */ - end = unmap_vmas(&tlb, vma, 0, -1, &nr_accounted, NULL); + unmap_vmas(&tlb, vma, 0, -1, &nr_accounted, NULL); vm_unacct_memory(nr_accounted); free_pgtables(&tlb, vma, FIRST_USER_ADDRESS, 0); -- 2.34.1