sched/numa: Fix task_numa_free() lockdep splat
authorMike Galbraith <bitbucket@online.de>
Mon, 7 Apr 2014 08:55:15 +0000 (10:55 +0200)
committerIngo Molnar <mingo@kernel.org>
Fri, 11 Apr 2014 08:39:15 +0000 (10:39 +0200)
Sasha reported that lockdep claims that the following commit:
made numa_group.lock interrupt unsafe:

  156654f491dd ("sched/numa: Move task_numa_free() to __put_task_struct()")

While I don't see how that could be, given the commit in question moved
task_numa_free() from one irq enabled region to another, the below does
make both gripes and lockups upon gripe with numa=fake=4 go away.

Reported-by: Sasha Levin <sasha.levin@oracle.com>
Fixes: 156654f491dd ("sched/numa: Move task_numa_free() to __put_task_struct()")
Signed-off-by: Mike Galbraith <bitbucket@online.de>
Signed-off-by: Peter Zijlstra <peterz@infradead.org>
Cc: torvalds@linux-foundation.org
Cc: mgorman@suse.com
Cc: akpm@linux-foundation.org
Cc: Dave Jones <davej@redhat.com>
Link: http://lkml.kernel.org/r/1396860915.5170.5.camel@marge.simpson.net
Signed-off-by: Ingo Molnar <mingo@kernel.org>
kernel/sched/fair.c
kernel/sched/sched.h

index 7e9bd0b1fa9ef1aa16880a5a10601374c7bb618b..4f14a656a7200ea35003cb81414efb5e2d7d11d7 100644 (file)
@@ -1497,7 +1497,7 @@ static void task_numa_placement(struct task_struct *p)
        /* If the task is part of a group prevent parallel updates to group stats */
        if (p->numa_group) {
                group_lock = &p->numa_group->lock;
-               spin_lock(group_lock);
+               spin_lock_irq(group_lock);
        }
 
        /* Find the node with the highest number of faults */
@@ -1572,7 +1572,7 @@ static void task_numa_placement(struct task_struct *p)
                        }
                }
 
-               spin_unlock(group_lock);
+               spin_unlock_irq(group_lock);
        }
 
        /* Preferred node as the node with the most faults */
@@ -1677,7 +1677,8 @@ static void task_numa_group(struct task_struct *p, int cpupid, int flags,
        if (!join)
                return;
 
-       double_lock(&my_grp->lock, &grp->lock);
+       BUG_ON(irqs_disabled());
+       double_lock_irq(&my_grp->lock, &grp->lock);
 
        for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++) {
                my_grp->faults[i] -= p->numa_faults_memory[i];
@@ -1691,7 +1692,7 @@ static void task_numa_group(struct task_struct *p, int cpupid, int flags,
        grp->nr_tasks++;
 
        spin_unlock(&my_grp->lock);
-       spin_unlock(&grp->lock);
+       spin_unlock_irq(&grp->lock);
 
        rcu_assign_pointer(p->numa_group, grp);
 
@@ -1710,14 +1711,14 @@ void task_numa_free(struct task_struct *p)
        void *numa_faults = p->numa_faults_memory;
 
        if (grp) {
-               spin_lock(&grp->lock);
+               spin_lock_irq(&grp->lock);
                for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++)
                        grp->faults[i] -= p->numa_faults_memory[i];
                grp->total_faults -= p->total_numa_faults;
 
                list_del(&p->numa_entry);
                grp->nr_tasks--;
-               spin_unlock(&grp->lock);
+               spin_unlock_irq(&grp->lock);
                rcu_assign_pointer(p->numa_group, NULL);
                put_numa_group(grp);
        }
index c9007f28d3a222ca97b5fb98210b2ecc1e756b7a..456e492a3dca37c13d7cb7b57a51965bfa18d6b3 100644 (file)
@@ -1385,6 +1385,15 @@ static inline void double_lock(spinlock_t *l1, spinlock_t *l2)
        spin_lock_nested(l2, SINGLE_DEPTH_NESTING);
 }
 
+static inline void double_lock_irq(spinlock_t *l1, spinlock_t *l2)
+{
+       if (l1 > l2)
+               swap(l1, l2);
+
+       spin_lock_irq(l1);
+       spin_lock_nested(l2, SINGLE_DEPTH_NESTING);
+}
+
 static inline void double_raw_lock(raw_spinlock_t *l1, raw_spinlock_t *l2)
 {
        if (l1 > l2)