drm/ttm: introduce utility function to free an allocated memory node
authorBen Skeggs <bskeggs@redhat.com>
Wed, 4 Aug 2010 02:07:08 +0000 (12:07 +1000)
committerBen Skeggs <bskeggs@redhat.com>
Tue, 5 Oct 2010 00:00:34 +0000 (10:00 +1000)
Existing core code/drivers call drm_mm_put_block on ttm_mem_reg.mm_node
directly.  Future patches will modify TTM behaviour in such a way that
ttm_mem_reg.mm_node doesn't necessarily belong to drm_mm.

Reviewed-by: Jerome Glisse <jglisse@redhat.com>
Acked-by: Thomas Hellström <thellstrom@vmware.com>
Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
drivers/gpu/drm/nouveau/nouveau_bo.c
drivers/gpu/drm/radeon/radeon_ttm.c
drivers/gpu/drm/ttm/ttm_bo.c
drivers/gpu/drm/ttm/ttm_bo_util.c
include/drm/ttm/ttm_bo_driver.h

index 4e813638bdb75784096c7dd2ad71e8d4fd78b889..f685f392c226e4ad78154da9eb6d3f161be7a52c 100644 (file)
@@ -693,12 +693,7 @@ nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
 
        ret = ttm_bo_move_ttm(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
 out:
-       if (tmp_mem.mm_node) {
-               spin_lock(&bo->bdev->glob->lru_lock);
-               drm_mm_put_block(tmp_mem.mm_node);
-               spin_unlock(&bo->bdev->glob->lru_lock);
-       }
-
+       ttm_bo_mem_put(bo, &tmp_mem);
        return ret;
 }
 
@@ -731,12 +726,7 @@ nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr,
                goto out;
 
 out:
-       if (tmp_mem.mm_node) {
-               spin_lock(&bo->bdev->glob->lru_lock);
-               drm_mm_put_block(tmp_mem.mm_node);
-               spin_unlock(&bo->bdev->glob->lru_lock);
-       }
-
+       ttm_bo_mem_put(bo, &tmp_mem);
        return ret;
 }
 
index 84c53e41a88fd783d6f7ba32c9155109eb3c95be..cc19aba9bb74f68021312a29f3149a967be52cca 100644 (file)
@@ -326,14 +326,7 @@ static int radeon_move_vram_ram(struct ttm_buffer_object *bo,
        }
        r = ttm_bo_move_ttm(bo, true, no_wait_reserve, no_wait_gpu, new_mem);
 out_cleanup:
-       if (tmp_mem.mm_node) {
-               struct ttm_bo_global *glob = rdev->mman.bdev.glob;
-
-               spin_lock(&glob->lru_lock);
-               drm_mm_put_block(tmp_mem.mm_node);
-               spin_unlock(&glob->lru_lock);
-               return r;
-       }
+       ttm_bo_mem_put(bo, &tmp_mem);
        return r;
 }
 
@@ -372,14 +365,7 @@ static int radeon_move_ram_vram(struct ttm_buffer_object *bo,
                goto out_cleanup;
        }
 out_cleanup:
-       if (tmp_mem.mm_node) {
-               struct ttm_bo_global *glob = rdev->mman.bdev.glob;
-
-               spin_lock(&glob->lru_lock);
-               drm_mm_put_block(tmp_mem.mm_node);
-               spin_unlock(&glob->lru_lock);
-               return r;
-       }
+       ttm_bo_mem_put(bo, &tmp_mem);
        return r;
 }
 
index cb4cf7ef4d1eee9bc726c4d4ee34f8962526b316..80d37b460a8c505de381df3b334c02ef3de53818 100644 (file)
@@ -475,11 +475,8 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, bool remove_all)
                        list_del_init(&bo->ddestroy);
                        ++put_count;
                }
-               if (bo->mem.mm_node) {
-                       drm_mm_put_block(bo->mem.mm_node);
-                       bo->mem.mm_node = NULL;
-               }
                spin_unlock(&glob->lru_lock);
+               ttm_bo_mem_put(bo, &bo->mem);
 
                atomic_set(&bo->reserved, 0);
 
@@ -621,7 +618,6 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
                        bool no_wait_reserve, bool no_wait_gpu)
 {
        struct ttm_bo_device *bdev = bo->bdev;
-       struct ttm_bo_global *glob = bo->glob;
        struct ttm_mem_reg evict_mem;
        struct ttm_placement placement;
        int ret = 0;
@@ -667,12 +663,7 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
        if (ret) {
                if (ret != -ERESTARTSYS)
                        printk(KERN_ERR TTM_PFX "Buffer eviction failed\n");
-               spin_lock(&glob->lru_lock);
-               if (evict_mem.mm_node) {
-                       drm_mm_put_block(evict_mem.mm_node);
-                       evict_mem.mm_node = NULL;
-               }
-               spin_unlock(&glob->lru_lock);
+               ttm_bo_mem_put(bo, &evict_mem);
                goto out;
        }
        bo->evicted = true;
@@ -769,6 +760,19 @@ static int ttm_bo_man_get_node(struct ttm_buffer_object *bo,
        return 0;
 }
 
+void ttm_bo_mem_put(struct ttm_buffer_object *bo, struct ttm_mem_reg *mem)
+{
+       struct ttm_bo_global *glob = bo->glob;
+
+       if (mem->mm_node) {
+               spin_lock(&glob->lru_lock);
+               drm_mm_put_block(mem->mm_node);
+               spin_unlock(&glob->lru_lock);
+               mem->mm_node = NULL;
+       }
+}
+EXPORT_SYMBOL(ttm_bo_mem_put);
+
 /**
  * Repeatedly evict memory from the LRU for @mem_type until we create enough
  * space, or we've evicted everything and there isn't enough space.
index 7cffb3e0423249ec4f78f7c7cbdc72b6df921e50..0ebfe0d9493165b24956f81616b06b2bba9fb1d1 100644 (file)
 
 void ttm_bo_free_old_node(struct ttm_buffer_object *bo)
 {
-       struct ttm_mem_reg *old_mem = &bo->mem;
-
-       if (old_mem->mm_node) {
-               spin_lock(&bo->glob->lru_lock);
-               drm_mm_put_block(old_mem->mm_node);
-               spin_unlock(&bo->glob->lru_lock);
-       }
-       old_mem->mm_node = NULL;
+       ttm_bo_mem_put(bo, &bo->mem);
 }
 
 int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
index b87504235f183f45c01d527ae585cbbafbd197ab..6c694d86e03d2597e0c0e8c4649c3d736effddf8 100644 (file)
@@ -649,6 +649,10 @@ extern int ttm_bo_mem_space(struct ttm_buffer_object *bo,
                                struct ttm_mem_reg *mem,
                                bool interruptible,
                                bool no_wait_reserve, bool no_wait_gpu);
+
+extern void ttm_bo_mem_put(struct ttm_buffer_object *bo,
+                          struct ttm_mem_reg *mem);
+
 /**
  * ttm_bo_wait_for_cpu
  *