From cd961bb9eebb630452f49dcbf3e5f0059428614a Mon Sep 17 00:00:00 2001
From: Daniel Vetter <daniel@ffwll.ch>
Date: Wed, 28 Jan 2015 10:02:23 +0100
Subject: [PATCH] drm/mst: fix recursive sleep warning on qlock

With drm-next, we can get a backtrace from sleeping
with mutex detection.

this is due to the callback checking the txmsg state taking
the mutex, which can cause a sleep inside a sleep,

Daniel went over it and was happy we could drop this mutex
in this case.

Signed-off-by: Dave Airlie <airlied@redhat.com>
---
 drivers/gpu/drm/drm_dp_mst_topology.c | 11 ++++++++---
 1 file changed, 8 insertions(+), 3 deletions(-)

diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
index 9a5b68717ec8..379ab4555756 100644
--- a/drivers/gpu/drm/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/drm_dp_mst_topology.c
@@ -733,10 +733,14 @@ static bool check_txmsg_state(struct drm_dp_mst_topology_mgr *mgr,
 			      struct drm_dp_sideband_msg_tx *txmsg)
 {
 	bool ret;
-	mutex_lock(&mgr->qlock);
+
+	/*
+	 * All updates to txmsg->state are protected by mgr->qlock, and the two
+	 * cases we check here are terminal states. For those the barriers
+	 * provided by the wake_up/wait_event pair are enough.
+	 */
 	ret = (txmsg->state == DRM_DP_SIDEBAND_TX_RX ||
 	       txmsg->state == DRM_DP_SIDEBAND_TX_TIMEOUT);
-	mutex_unlock(&mgr->qlock);
 	return ret;
 }
 
@@ -1363,12 +1367,13 @@ static int process_single_tx_qlock(struct drm_dp_mst_topology_mgr *mgr,
 	return 0;
 }
 
-/* must be called holding qlock */
 static void process_single_down_tx_qlock(struct drm_dp_mst_topology_mgr *mgr)
 {
 	struct drm_dp_sideband_msg_tx *txmsg;
 	int ret;
 
+	WARN_ON(!mutex_is_locked(&mgr->qlock));
+
 	/* construct a chunk from the first msg in the tx_msg queue */
 	if (list_empty(&mgr->tx_msg_downq)) {
 		mgr->tx_down_in_progress = false;
-- 
2.34.1