drm/radeon: allow creating overlapping userptrs
authorChristian König <christian.koenig@amd.com>
Tue, 31 Mar 2015 15:37:00 +0000 (17:37 +0200)
committerAlex Deucher <alexander.deucher@amd.com>
Mon, 13 Apr 2015 15:17:59 +0000 (11:17 -0400)
Similar to the Intel implementation, but instead of just falling back to a
global linear list when we have an overlapping userptr request we accumulate
all overlapping userptrs in a local list.

Signed-off-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
drivers/gpu/drm/radeon/radeon.h
drivers/gpu/drm/radeon/radeon_mn.c

index 73a6432da1a574c2f1b33b4b09e9426851638f5e..d2abe481954fc14a8146c0f9db0f0a6e73816900 100644 (file)
@@ -507,7 +507,7 @@ struct radeon_bo {
        pid_t                           pid;
 
        struct radeon_mn                *mn;
-       struct interval_tree_node       mn_it;
+       struct list_head                mn_list;
 };
 #define gem_to_radeon_bo(gobj) container_of((gobj), struct radeon_bo, gem_base)
 
index 572b4dbec186a9d59e8066782773c86acdbfa46b..01701376b23987c624a8d48a899e065d10d4bb8b 100644 (file)
@@ -53,6 +53,11 @@ struct radeon_mn {
        struct rb_root          objects;
 };
 
+struct radeon_mn_node {
+       struct interval_tree_node       it;
+       struct list_head                bos;
+};
+
 /**
  * radeon_mn_destroy - destroy the rmn
  *
@@ -64,14 +69,21 @@ static void radeon_mn_destroy(struct work_struct *work)
 {
        struct radeon_mn *rmn = container_of(work, struct radeon_mn, work);
        struct radeon_device *rdev = rmn->rdev;
-       struct radeon_bo *bo, *next;
+       struct radeon_mn_node *node, *next_node;
+       struct radeon_bo *bo, *next_bo;
 
        mutex_lock(&rdev->mn_lock);
        mutex_lock(&rmn->lock);
        hash_del(&rmn->node);
-       rbtree_postorder_for_each_entry_safe(bo, next, &rmn->objects, mn_it.rb) {
-               interval_tree_remove(&bo->mn_it, &rmn->objects);
-               bo->mn = NULL;
+       rbtree_postorder_for_each_entry_safe(node, next_node, &rmn->objects,
+                                            it.rb) {
+
+               interval_tree_remove(&node->it, &rmn->objects);
+               list_for_each_entry_safe(bo, next_bo, &node->bos, mn_list) {
+                       bo->mn = NULL;
+                       list_del_init(&bo->mn_list);
+               }
+               kfree(node);
        }
        mutex_unlock(&rmn->lock);
        mutex_unlock(&rdev->mn_lock);
@@ -121,29 +133,33 @@ static void radeon_mn_invalidate_range_start(struct mmu_notifier *mn,
 
        it = interval_tree_iter_first(&rmn->objects, start, end);
        while (it) {
+               struct radeon_mn_node *node;
                struct radeon_bo *bo;
                int r;
 
-               bo = container_of(it, struct radeon_bo, mn_it);
+               node = container_of(it, struct radeon_mn_node, it);
                it = interval_tree_iter_next(it, start, end);
 
-               r = radeon_bo_reserve(bo, true);
-               if (r) {
-                       DRM_ERROR("(%d) failed to reserve user bo\n", r);
-                       continue;
-               }
+               list_for_each_entry(bo, &node->bos, mn_list) {
 
-               r = reservation_object_wait_timeout_rcu(bo->tbo.resv, true,
-                       false, MAX_SCHEDULE_TIMEOUT);
-               if (r)
-                       DRM_ERROR("(%d) failed to wait for user bo\n", r);
+                       r = radeon_bo_reserve(bo, true);
+                       if (r) {
+                               DRM_ERROR("(%d) failed to reserve user bo\n", r);
+                               continue;
+                       }
 
-               radeon_ttm_placement_from_domain(bo, RADEON_GEM_DOMAIN_CPU);
-               r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
-               if (r)
-                       DRM_ERROR("(%d) failed to validate user bo\n", r);
+                       r = reservation_object_wait_timeout_rcu(bo->tbo.resv,
+                               true, false, MAX_SCHEDULE_TIMEOUT);
+                       if (r)
+                               DRM_ERROR("(%d) failed to wait for user bo\n", r);
 
-               radeon_bo_unreserve(bo);
+                       radeon_ttm_placement_from_domain(bo, RADEON_GEM_DOMAIN_CPU);
+                       r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
+                       if (r)
+                               DRM_ERROR("(%d) failed to validate user bo\n", r);
+
+                       radeon_bo_unreserve(bo);
+               }
        }
        
        mutex_unlock(&rmn->lock);
@@ -220,24 +236,44 @@ int radeon_mn_register(struct radeon_bo *bo, unsigned long addr)
        unsigned long end = addr + radeon_bo_size(bo) - 1;
        struct radeon_device *rdev = bo->rdev;
        struct radeon_mn *rmn;
+       struct radeon_mn_node *node = NULL;
+       struct list_head bos;
        struct interval_tree_node *it;
 
        rmn = radeon_mn_get(rdev);
        if (IS_ERR(rmn))
                return PTR_ERR(rmn);
 
+       INIT_LIST_HEAD(&bos);
+
        mutex_lock(&rmn->lock);
 
-       it = interval_tree_iter_first(&rmn->objects, addr, end);
-       if (it) {
-               mutex_unlock(&rmn->lock);
-               return -EEXIST;
+       while ((it = interval_tree_iter_first(&rmn->objects, addr, end))) {
+               kfree(node);
+               node = container_of(it, struct radeon_mn_node, it);
+               interval_tree_remove(&node->it, &rmn->objects);
+               addr = min(it->start, addr);
+               end = max(it->last, end);
+               list_splice(&node->bos, &bos);
+       }
+
+       if (!node) {
+               node = kmalloc(sizeof(struct radeon_mn_node), GFP_KERNEL);
+               if (!node) {
+                       mutex_unlock(&rmn->lock);
+                       return -ENOMEM;
+               }
        }
 
        bo->mn = rmn;
-       bo->mn_it.start = addr;
-       bo->mn_it.last = end;
-       interval_tree_insert(&bo->mn_it, &rmn->objects);
+
+       node->it.start = addr;
+       node->it.last = end;
+       INIT_LIST_HEAD(&node->bos);
+       list_splice(&bos, &node->bos);
+       list_add(&bo->mn_list, &node->bos);
+
+       interval_tree_insert(&node->it, &rmn->objects);
 
        mutex_unlock(&rmn->lock);
 
@@ -255,6 +291,7 @@ void radeon_mn_unregister(struct radeon_bo *bo)
 {
        struct radeon_device *rdev = bo->rdev;
        struct radeon_mn *rmn;
+       struct list_head *head;
 
        mutex_lock(&rdev->mn_lock);
        rmn = bo->mn;
@@ -264,8 +301,19 @@ void radeon_mn_unregister(struct radeon_bo *bo)
        }
 
        mutex_lock(&rmn->lock);
-       interval_tree_remove(&bo->mn_it, &rmn->objects);
+       /* save the next list entry for later */
+       head = bo->mn_list.next;
+
        bo->mn = NULL;
+       list_del(&bo->mn_list);
+
+       if (list_empty(head)) {
+               struct radeon_mn_node *node;
+               node = container_of(head, struct radeon_mn_node, bos);
+               interval_tree_remove(&node->it, &rmn->objects);
+               kfree(node);
+       }
+
        mutex_unlock(&rmn->lock);
        mutex_unlock(&rdev->mn_lock);
 }