bonding: fix destruction of bond with devices different from arphrd_ether
[firefly-linux-kernel-4.4.55.git] / ipc / sem.c
index afb0e62af956766486a052c391021ae3124aa754..47a15192b8b879c76e618b3b2088b4d76ef3b1a7 100644 (file)
--- a/ipc/sem.c
+++ b/ipc/sem.c
@@ -252,6 +252,16 @@ static void sem_rcu_free(struct rcu_head *head)
        ipc_rcu_free(head);
 }
 
+/*
+ * spin_unlock_wait() and !spin_is_locked() are not memory barriers, they
+ * are only control barriers.
+ * The code must pair with spin_unlock(&sem->lock) or
+ * spin_unlock(&sem_perm.lock), thus just the control barrier is insufficient.
+ *
+ * smp_rmb() is sufficient, as writes cannot pass the control barrier.
+ */
+#define ipc_smp_acquire__after_spin_is_unlocked()      smp_rmb()
+
 /*
  * Wait until all currently ongoing simple ops have completed.
  * Caller must own sem_perm.lock.
@@ -275,6 +285,7 @@ static void sem_wait_array(struct sem_array *sma)
                sem = sma->sem_base + i;
                spin_unlock_wait(&sem->lock);
        }
+       ipc_smp_acquire__after_spin_is_unlocked();
 }
 
 /*
@@ -326,8 +337,13 @@ static inline int sem_lock(struct sem_array *sma, struct sembuf *sops,
 
                /* Then check that the global lock is free */
                if (!spin_is_locked(&sma->sem_perm.lock)) {
-                       /* spin_is_locked() is not a memory barrier */
-                       smp_mb();
+                       /*
+                        * We need a memory barrier with acquire semantics,
+                        * otherwise we can race with another thread that does:
+                        *      complex_count++;
+                        *      spin_unlock(sem_perm.lock);
+                        */
+                       ipc_smp_acquire__after_spin_is_unlocked();
 
                        /* Now repeat the test of complex_count:
                         * It can't change anymore until we drop sem->lock.