drm/ttm: change fence_lock to inner lock
authorMaarten Lankhorst <m.b.lankhorst@gmail.com>
Wed, 28 Nov 2012 11:25:39 +0000 (12:25 +0100)
committerDave Airlie <airlied@redhat.com>
Mon, 10 Dec 2012 10:09:58 +0000 (20:09 +1000)
This requires changing the order in ttm_bo_cleanup_refs_or_queue to
take the reservation first, as there is otherwise no race free way to
take lru lock before fence_lock.

Signed-off-by: Maarten Lankhorst <maarten.lankhorst@canonical.com>
Reviewed-by: Thomas Hellstrom <thellstrom@vmware.com>
Signed-off-by: Dave Airlie <airlied@redhat.com>
drivers/gpu/drm/ttm/ttm_bo.c
drivers/gpu/drm/ttm/ttm_execbuf_util.c

index 2c54c3d414b3cb956e7cf953ab6a610bc5e6e82e..b7781453bfd17728a3522520c46706b86f53e372 100644 (file)
@@ -500,27 +500,17 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
 {
        struct ttm_bo_device *bdev = bo->bdev;
        struct ttm_bo_global *glob = bo->glob;
-       struct ttm_bo_driver *driver;
+       struct ttm_bo_driver *driver = bdev->driver;
        void *sync_obj = NULL;
        int put_count;
        int ret;
 
+       spin_lock(&glob->lru_lock);
+       ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
+
        spin_lock(&bdev->fence_lock);
        (void) ttm_bo_wait(bo, false, false, true);
-       if (!bo->sync_obj) {
-
-               spin_lock(&glob->lru_lock);
-
-               /**
-                * Lock inversion between bo:reserve and bdev::fence_lock here,
-                * but that's OK, since we're only trylocking.
-                */
-
-               ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
-
-               if (unlikely(ret == -EBUSY))
-                       goto queue;
-
+       if (!ret && !bo->sync_obj) {
                spin_unlock(&bdev->fence_lock);
                put_count = ttm_bo_del_from_lru(bo);
 
@@ -530,18 +520,19 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
                ttm_bo_list_ref_sub(bo, put_count, true);
 
                return;
-       } else {
-               spin_lock(&glob->lru_lock);
        }
-queue:
-       driver = bdev->driver;
        if (bo->sync_obj)
                sync_obj = driver->sync_obj_ref(bo->sync_obj);
+       spin_unlock(&bdev->fence_lock);
+
+       if (!ret) {
+               atomic_set(&bo->reserved, 0);
+               wake_up_all(&bo->event_queue);
+       }
 
        kref_get(&bo->list_kref);
        list_add_tail(&bo->ddestroy, &bdev->ddestroy);
        spin_unlock(&glob->lru_lock);
-       spin_unlock(&bdev->fence_lock);
 
        if (sync_obj) {
                driver->sync_obj_flush(sync_obj);
index 1986d006c2641750fa1b4202a5d200fbf6df898f..cd9e4523dc561efaf0208689a90a4790b4dab986 100644 (file)
@@ -213,8 +213,8 @@ void ttm_eu_fence_buffer_objects(struct list_head *list, void *sync_obj)
        driver = bdev->driver;
        glob = bo->glob;
 
-       spin_lock(&bdev->fence_lock);
        spin_lock(&glob->lru_lock);
+       spin_lock(&bdev->fence_lock);
 
        list_for_each_entry(entry, list, head) {
                bo = entry->bo;
@@ -223,8 +223,8 @@ void ttm_eu_fence_buffer_objects(struct list_head *list, void *sync_obj)
                ttm_bo_unreserve_locked(bo);
                entry->reserved = false;
        }
-       spin_unlock(&glob->lru_lock);
        spin_unlock(&bdev->fence_lock);
+       spin_unlock(&glob->lru_lock);
 
        list_for_each_entry(entry, list, head) {
                if (entry->old_sync_obj)