[PATCH] mm: arches skip ptlock
authorHugh Dickins <hugh@veritas.com>
Sun, 30 Oct 2005 01:16:24 +0000 (18:16 -0700)
committerLinus Torvalds <torvalds@g5.osdl.org>
Sun, 30 Oct 2005 04:40:40 +0000 (21:40 -0700)
Convert those few architectures which are calling pud_alloc, pmd_alloc,
pte_alloc_map on a user mm, not to take the page_table_lock first, nor drop it
after.  Each of these can continue to use pte_alloc_map, no need to change
over to pte_alloc_map_lock, they're neither racy nor swappable.

In the sparc64 io_remap_pfn_range, flush_tlb_range then falls outside of the
page_table_lock: that's okay, on sparc64 it's like flush_tlb_mm, and that has
always been called from outside of page_table_lock in dup_mmap.

Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
arch/arm/mm/mm-armv.c
arch/arm26/mm/memc.c
arch/sparc/mm/generic.c
arch/sparc64/mm/generic.c
arch/um/kernel/skas/mmu.c

index 61bc2fa0511ec95380c9f8ccaa114d040290b622..60f3e039bac26b48324e81c15bdcd0158898d441 100644 (file)
@@ -179,11 +179,6 @@ pgd_t *get_pgd_slow(struct mm_struct *mm)
        clean_dcache_area(new_pgd, PTRS_PER_PGD * sizeof(pgd_t));
 
        if (!vectors_high()) {
-               /*
-                * This lock is here just to satisfy pmd_alloc and pte_lock
-                */
-               spin_lock(&mm->page_table_lock);
-
                /*
                 * On ARM, first page must always be allocated since it
                 * contains the machine vectors.
@@ -201,23 +196,14 @@ pgd_t *get_pgd_slow(struct mm_struct *mm)
                set_pte(new_pte, *init_pte);
                pte_unmap_nested(init_pte);
                pte_unmap(new_pte);
-
-               spin_unlock(&mm->page_table_lock);
        }
 
        return new_pgd;
 
 no_pte:
-       spin_unlock(&mm->page_table_lock);
        pmd_free(new_pmd);
-       free_pages((unsigned long)new_pgd, 2);
-       return NULL;
-
 no_pmd:
-       spin_unlock(&mm->page_table_lock);
        free_pages((unsigned long)new_pgd, 2);
-       return NULL;
-
 no_pgd:
        return NULL;
 }
index d6b008b8db76a179cc8e66a6a1b6cdaef0d4afd2..34def6397c3c5c718227f7e17c7dae4877d9cb03 100644 (file)
@@ -78,12 +78,6 @@ pgd_t *get_pgd_slow(struct mm_struct *mm)
        if (!new_pgd)
                goto no_pgd;
 
-       /*
-        * This lock is here just to satisfy pmd_alloc and pte_lock
-         * FIXME: I bet we could avoid taking it pretty much altogether
-        */
-       spin_lock(&mm->page_table_lock);
-
        /*
         * On ARM, first page must always be allocated since it contains
         * the machine vectors.
@@ -113,23 +107,14 @@ pgd_t *get_pgd_slow(struct mm_struct *mm)
        memcpy(new_pgd + FIRST_KERNEL_PGD_NR, init_pgd + FIRST_KERNEL_PGD_NR,
                (PTRS_PER_PGD - FIRST_KERNEL_PGD_NR) * sizeof(pgd_t));
 
-       spin_unlock(&mm->page_table_lock);
-
        /* update MEMC tables */
        cpu_memc_update_all(new_pgd);
        return new_pgd;
 
 no_pte:
-       spin_unlock(&mm->page_table_lock);
        pmd_free(new_pmd);
-       free_pgd_slow(new_pgd);
-       return NULL;
-
 no_pmd:
-       spin_unlock(&mm->page_table_lock);
        free_pgd_slow(new_pgd);
-       return NULL;
-
 no_pgd:
        return NULL;
 }
index 659c9a71f867fd959b9475df284e4d4e78bbf7e6..9604893ffdbd020955ec7d0a1e1bbdd6ac942afe 100644 (file)
@@ -81,9 +81,8 @@ int io_remap_pfn_range(struct vm_area_struct *vma, unsigned long from,
        dir = pgd_offset(mm, from);
        flush_cache_range(vma, beg, end);
 
-       spin_lock(&mm->page_table_lock);
        while (from < end) {
-               pmd_t *pmd = pmd_alloc(current->mm, dir, from);
+               pmd_t *pmd = pmd_alloc(mm, dir, from);
                error = -ENOMEM;
                if (!pmd)
                        break;
@@ -93,7 +92,6 @@ int io_remap_pfn_range(struct vm_area_struct *vma, unsigned long from,
                from = (from + PGDIR_SIZE) & PGDIR_MASK;
                dir++;
        }
-       spin_unlock(&mm->page_table_lock);
 
        flush_tlb_range(vma, beg, end);
        return error;
index afc01cec701f56f7cb539b03a9a9f0e829c35256..112c316e7cd213e6b59cc1284a49d7fc05aae898 100644 (file)
@@ -135,9 +135,8 @@ int io_remap_pfn_range(struct vm_area_struct *vma, unsigned long from,
        dir = pgd_offset(mm, from);
        flush_cache_range(vma, beg, end);
 
-       spin_lock(&mm->page_table_lock);
        while (from < end) {
-               pud_t *pud = pud_alloc(current->mm, dir, from);
+               pud_t *pud = pud_alloc(mm, dir, from);
                error = -ENOMEM;
                if (!pud)
                        break;
@@ -147,8 +146,7 @@ int io_remap_pfn_range(struct vm_area_struct *vma, unsigned long from,
                from = (from + PGDIR_SIZE) & PGDIR_MASK;
                dir++;
        }
-       flush_tlb_range(vma, beg, end);
-       spin_unlock(&mm->page_table_lock);
 
+       flush_tlb_range(vma, beg, end);
        return error;
 }
index 240143b616a2c359629492e38fcfa8056732aa78..02cf36e0331a2b8b14f8741febd329df9bb888e3 100644 (file)
@@ -28,7 +28,6 @@ static int init_stub_pte(struct mm_struct *mm, unsigned long proc,
        pmd_t *pmd;
        pte_t *pte;
 
-       spin_lock(&mm->page_table_lock);
        pgd = pgd_offset(mm, proc);
        pud = pud_alloc(mm, pgd, proc);
        if (!pud)
@@ -63,7 +62,6 @@ static int init_stub_pte(struct mm_struct *mm, unsigned long proc,
        *pte = mk_pte(virt_to_page(kernel), __pgprot(_PAGE_PRESENT));
        *pte = pte_mkexec(*pte);
        *pte = pte_wrprotect(*pte);
-       spin_unlock(&mm->page_table_lock);
        return(0);
 
  out_pmd:
@@ -71,7 +69,6 @@ static int init_stub_pte(struct mm_struct *mm, unsigned long proc,
  out_pte:
        pmd_free(pmd);
  out:
-       spin_unlock(&mm->page_table_lock);
        return(-ENOMEM);
 }