[ARM] Do not call flush_tlb_kernel_range() with IRQs disabled.
authorRussell King <rmk@dyn-67.arm.linux.org.uk>
Fri, 25 Nov 2005 15:52:51 +0000 (15:52 +0000)
committerRussell King <rmk+kernel@arm.linux.org.uk>
Fri, 25 Nov 2005 15:52:51 +0000 (15:52 +0000)
We must not call TLB maintainence operations with interrupts disabled,
otherwise we risk a lockup in the SMP IPI code.

This means that consistent_free() can not be called from a context with
IRQs disabled.  In addition, we must not hold the lock in consistent_free
when we call flush_tlb_kernel_range().  However, we must continue to
prevent consistent_alloc() from re-using the memory region until we've
finished tearing down the mapping and dealing with the TLB.

Therefore, leave the vm_region entry in the list, but mark it inactive
before dropping the lock and starting the tear-down process.  After the
mapping has been torn down, re-acquire the lock and remove the entry
from the list.

Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
arch/arm/mm/consistent.c

index 47b0b767f080ef78a2ca50da6d432525eb406360..dbfe9e891f015a87f3efb5b31349d8c1baca20c8 100644 (file)
@@ -66,6 +66,7 @@ struct vm_region {
        unsigned long           vm_start;
        unsigned long           vm_end;
        struct page             *vm_pages;
+       int                     vm_active;
 };
 
 static struct vm_region consistent_head = {
@@ -104,6 +105,7 @@ vm_region_alloc(struct vm_region *head, size_t size, gfp_t gfp)
        list_add_tail(&new->vm_list, &c->vm_list);
        new->vm_start = addr;
        new->vm_end = addr + size;
+       new->vm_active = 1;
 
        spin_unlock_irqrestore(&consistent_lock, flags);
        return new;
@@ -120,7 +122,7 @@ static struct vm_region *vm_region_find(struct vm_region *head, unsigned long ad
        struct vm_region *c;
        
        list_for_each_entry(c, &head->vm_list, vm_list) {
-               if (c->vm_start == addr)
+               if (c->vm_active && c->vm_start == addr)
                        goto out;
        }
        c = NULL;
@@ -319,6 +321,7 @@ EXPORT_SYMBOL(dma_mmap_writecombine);
 
 /*
  * free a page as defined by the above mapping.
+ * Must not be called with IRQs disabled.
  */
 void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, dma_addr_t handle)
 {
@@ -326,14 +329,18 @@ void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, dma_addr
        unsigned long flags, addr;
        pte_t *ptep;
 
+       WARN_ON(irqs_disabled());
+
        size = PAGE_ALIGN(size);
 
        spin_lock_irqsave(&consistent_lock, flags);
-
        c = vm_region_find(&consistent_head, (unsigned long)cpu_addr);
        if (!c)
                goto no_area;
 
+       c->vm_active = 0;
+       spin_unlock_irqrestore(&consistent_lock, flags);
+
        if ((c->vm_end - c->vm_start) != size) {
                printk(KERN_ERR "%s: freeing wrong coherent size (%ld != %d)\n",
                       __func__, c->vm_end - c->vm_start, size);
@@ -372,8 +379,8 @@ void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, dma_addr
 
        flush_tlb_kernel_range(c->vm_start, c->vm_end);
 
+       spin_lock_irqsave(&consistent_lock, flags);
        list_del(&c->vm_list);
-
        spin_unlock_irqrestore(&consistent_lock, flags);
 
        kfree(c);