drm/ttm: ttm object security fixes for render nodes
authorThomas Hellstrom <thellstrom@vmware.com>
Wed, 18 Dec 2013 13:13:29 +0000 (14:13 +0100)
committerThomas Hellstrom <thellstrom@vmware.com>
Wed, 8 Jan 2014 09:11:57 +0000 (10:11 +0100)
When a client looks up a ttm object, don't look it up through the device hash
table, but rather from the file hash table. That makes sure that the client
has indeed put a reference on the object, or in gem terms, has opened
the object; either using prime or using the global "name".

To avoid a performance loss, make sure the file hash table entries can be
looked up from under an RCU lock, and as a consequence, replace the rwlock
with a spinlock, since we never need to take it in read mode only anymore.

Finally add a ttm object lookup function for the device hash table, that is
intended to be used when we put a ref object on a base object or, in  gem terms,
when we open the object.

Signed-off-by: Thomas Hellstrom <thellstrom@vmware.com>
Reviewed-by: Brian Paul <brianp@vmware.com>
drivers/gpu/drm/ttm/ttm_object.c
drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
include/drm/ttm/ttm_object.h

index 6fe7b92a82d1f72f465a79d48dd0088337d713fe..37079859afc86e6cef120cd4d8a924388c3f6e3b 100644 (file)
@@ -68,7 +68,7 @@
 
 struct ttm_object_file {
        struct ttm_object_device *tdev;
-       rwlock_t lock;
+       spinlock_t lock;
        struct list_head ref_list;
        struct drm_open_hash ref_hash[TTM_REF_NUM];
        struct kref refcount;
@@ -118,6 +118,7 @@ struct ttm_object_device {
  */
 
 struct ttm_ref_object {
+       struct rcu_head rcu_head;
        struct drm_hash_item hash;
        struct list_head head;
        struct kref kref;
@@ -210,10 +211,9 @@ static void ttm_release_base(struct kref *kref)
         * call_rcu() or ttm_base_object_kfree().
         */
 
-       if (base->refcount_release) {
-               ttm_object_file_unref(&base->tfile);
+       ttm_object_file_unref(&base->tfile);
+       if (base->refcount_release)
                base->refcount_release(&base);
-       }
 }
 
 void ttm_base_object_unref(struct ttm_base_object **p_base)
@@ -229,32 +229,46 @@ EXPORT_SYMBOL(ttm_base_object_unref);
 struct ttm_base_object *ttm_base_object_lookup(struct ttm_object_file *tfile,
                                               uint32_t key)
 {
-       struct ttm_object_device *tdev = tfile->tdev;
-       struct ttm_base_object *uninitialized_var(base);
+       struct ttm_base_object *base = NULL;
        struct drm_hash_item *hash;
+       struct drm_open_hash *ht = &tfile->ref_hash[TTM_REF_USAGE];
        int ret;
 
        rcu_read_lock();
-       ret = drm_ht_find_item_rcu(&tdev->object_hash, key, &hash);
+       ret = drm_ht_find_item_rcu(ht, key, &hash);
 
        if (likely(ret == 0)) {
-               base = drm_hash_entry(hash, struct ttm_base_object, hash);
-               ret = kref_get_unless_zero(&base->refcount) ? 0 : -EINVAL;
+               base = drm_hash_entry(hash, struct ttm_ref_object, hash)->obj;
+               if (!kref_get_unless_zero(&base->refcount))
+                       base = NULL;
        }
        rcu_read_unlock();
 
-       if (unlikely(ret != 0))
-               return NULL;
+       return base;
+}
+EXPORT_SYMBOL(ttm_base_object_lookup);
 
-       if (tfile != base->tfile && !base->shareable) {
-               pr_err("Attempted access of non-shareable object\n");
-               ttm_base_object_unref(&base);
-               return NULL;
+struct ttm_base_object *
+ttm_base_object_lookup_for_ref(struct ttm_object_device *tdev, uint32_t key)
+{
+       struct ttm_base_object *base = NULL;
+       struct drm_hash_item *hash;
+       struct drm_open_hash *ht = &tdev->object_hash;
+       int ret;
+
+       rcu_read_lock();
+       ret = drm_ht_find_item_rcu(ht, key, &hash);
+
+       if (likely(ret == 0)) {
+               base = drm_hash_entry(hash, struct ttm_base_object, hash);
+               if (!kref_get_unless_zero(&base->refcount))
+                       base = NULL;
        }
+       rcu_read_unlock();
 
        return base;
 }
-EXPORT_SYMBOL(ttm_base_object_lookup);
+EXPORT_SYMBOL(ttm_base_object_lookup_for_ref);
 
 int ttm_ref_object_add(struct ttm_object_file *tfile,
                       struct ttm_base_object *base,
@@ -266,21 +280,25 @@ int ttm_ref_object_add(struct ttm_object_file *tfile,
        struct ttm_mem_global *mem_glob = tfile->tdev->mem_glob;
        int ret = -EINVAL;
 
+       if (base->tfile != tfile && !base->shareable)
+               return -EPERM;
+
        if (existed != NULL)
                *existed = true;
 
        while (ret == -EINVAL) {
-               read_lock(&tfile->lock);
-               ret = drm_ht_find_item(ht, base->hash.key, &hash);
+               rcu_read_lock();
+               ret = drm_ht_find_item_rcu(ht, base->hash.key, &hash);
 
                if (ret == 0) {
                        ref = drm_hash_entry(hash, struct ttm_ref_object, hash);
-                       kref_get(&ref->kref);
-                       read_unlock(&tfile->lock);
-                       break;
+                       if (!kref_get_unless_zero(&ref->kref)) {
+                               rcu_read_unlock();
+                               break;
+                       }
                }
 
-               read_unlock(&tfile->lock);
+               rcu_read_unlock();
                ret = ttm_mem_global_alloc(mem_glob, sizeof(*ref),
                                           false, false);
                if (unlikely(ret != 0))
@@ -297,19 +315,19 @@ int ttm_ref_object_add(struct ttm_object_file *tfile,
                ref->ref_type = ref_type;
                kref_init(&ref->kref);
 
-               write_lock(&tfile->lock);
-               ret = drm_ht_insert_item(ht, &ref->hash);
+               spin_lock(&tfile->lock);
+               ret = drm_ht_insert_item_rcu(ht, &ref->hash);
 
                if (likely(ret == 0)) {
                        list_add_tail(&ref->head, &tfile->ref_list);
                        kref_get(&base->refcount);
-                       write_unlock(&tfile->lock);
+                       spin_unlock(&tfile->lock);
                        if (existed != NULL)
                                *existed = false;
                        break;
                }
 
-               write_unlock(&tfile->lock);
+               spin_unlock(&tfile->lock);
                BUG_ON(ret != -EINVAL);
 
                ttm_mem_global_free(mem_glob, sizeof(*ref));
@@ -330,17 +348,17 @@ static void ttm_ref_object_release(struct kref *kref)
        struct ttm_mem_global *mem_glob = tfile->tdev->mem_glob;
 
        ht = &tfile->ref_hash[ref->ref_type];
-       (void)drm_ht_remove_item(ht, &ref->hash);
+       (void)drm_ht_remove_item_rcu(ht, &ref->hash);
        list_del(&ref->head);
-       write_unlock(&tfile->lock);
+       spin_unlock(&tfile->lock);
 
        if (ref->ref_type != TTM_REF_USAGE && base->ref_obj_release)
                base->ref_obj_release(base, ref->ref_type);
 
        ttm_base_object_unref(&ref->obj);
        ttm_mem_global_free(mem_glob, sizeof(*ref));
-       kfree(ref);
-       write_lock(&tfile->lock);
+       kfree_rcu(ref, rcu_head);
+       spin_lock(&tfile->lock);
 }
 
 int ttm_ref_object_base_unref(struct ttm_object_file *tfile,
@@ -351,15 +369,15 @@ int ttm_ref_object_base_unref(struct ttm_object_file *tfile,
        struct drm_hash_item *hash;
        int ret;
 
-       write_lock(&tfile->lock);
+       spin_lock(&tfile->lock);
        ret = drm_ht_find_item(ht, key, &hash);
        if (unlikely(ret != 0)) {
-               write_unlock(&tfile->lock);
+               spin_unlock(&tfile->lock);
                return -EINVAL;
        }
        ref = drm_hash_entry(hash, struct ttm_ref_object, hash);
        kref_put(&ref->kref, ttm_ref_object_release);
-       write_unlock(&tfile->lock);
+       spin_unlock(&tfile->lock);
        return 0;
 }
 EXPORT_SYMBOL(ttm_ref_object_base_unref);
@@ -372,7 +390,7 @@ void ttm_object_file_release(struct ttm_object_file **p_tfile)
        struct ttm_object_file *tfile = *p_tfile;
 
        *p_tfile = NULL;
-       write_lock(&tfile->lock);
+       spin_lock(&tfile->lock);
 
        /*
         * Since we release the lock within the loop, we have to
@@ -388,7 +406,7 @@ void ttm_object_file_release(struct ttm_object_file **p_tfile)
        for (i = 0; i < TTM_REF_NUM; ++i)
                drm_ht_remove(&tfile->ref_hash[i]);
 
-       write_unlock(&tfile->lock);
+       spin_unlock(&tfile->lock);
        ttm_object_file_unref(&tfile);
 }
 EXPORT_SYMBOL(ttm_object_file_release);
@@ -404,7 +422,7 @@ struct ttm_object_file *ttm_object_file_init(struct ttm_object_device *tdev,
        if (unlikely(tfile == NULL))
                return NULL;
 
-       rwlock_init(&tfile->lock);
+       spin_lock_init(&tfile->lock);
        tfile->tdev = tdev;
        kref_init(&tfile->refcount);
        INIT_LIST_HEAD(&tfile->ref_list);
index c62d20e8a6f169cce1173c849b42f160c55a4f39..5d3f67768524442a2238b5f74285dc67cac77162 100644 (file)
@@ -1080,7 +1080,8 @@ int vmw_fence_event_ioctl(struct drm_device *dev, void *data,
         */
        if (arg->handle) {
                struct ttm_base_object *base =
-                       ttm_base_object_lookup(vmw_fp->tfile, arg->handle);
+                       ttm_base_object_lookup_for_ref(dev_priv->tdev,
+                                                      arg->handle);
 
                if (unlikely(base == NULL)) {
                        DRM_ERROR("Fence event invalid fence object handle "
index 7de2ea8bd55357561913384bfa23196fe3479ec2..0fc93398bba202b36205203dbe9407efc326aade 100644 (file)
@@ -843,6 +843,7 @@ out_unlock:
 int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,
                                struct drm_file *file_priv)
 {
+       struct vmw_private *dev_priv = vmw_priv(dev);
        union drm_vmw_surface_reference_arg *arg =
            (union drm_vmw_surface_reference_arg *)data;
        struct drm_vmw_surface_arg *req = &arg->req;
@@ -854,7 +855,7 @@ int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,
        struct ttm_base_object *base;
        int ret = -EINVAL;
 
-       base = ttm_base_object_lookup(tfile, req->sid);
+       base = ttm_base_object_lookup_for_ref(dev_priv->tdev, req->sid);
        if (unlikely(base == NULL)) {
                DRM_ERROR("Could not find surface to reference.\n");
                return -EINVAL;
index 58b029894eb33ea32dff2ba2977546c097f2b533..0097cc03034e18b10e5ad9cdc34b9019eb821314 100644 (file)
@@ -190,13 +190,25 @@ extern int ttm_base_object_init(struct ttm_object_file *tfile,
  * @key: Hash key
  *
  * Looks up a struct ttm_base_object with the key @key.
- * Also verifies that the object is visible to the application, by
- * comparing the @tfile argument and checking the object shareable flag.
  */
 
 extern struct ttm_base_object *ttm_base_object_lookup(struct ttm_object_file
                                                      *tfile, uint32_t key);
 
+/**
+ * ttm_base_object_lookup_for_ref
+ *
+ * @tdev: Pointer to a struct ttm_object_device.
+ * @key: Hash key
+ *
+ * Looks up a struct ttm_base_object with the key @key.
+ * This function should only be used when the struct tfile associated with the
+ * caller doesn't yet have a reference to the base object.
+ */
+
+extern struct ttm_base_object *
+ttm_base_object_lookup_for_ref(struct ttm_object_device *tdev, uint32_t key);
+
 /**
  * ttm_base_object_unref
  *
@@ -218,6 +230,8 @@ extern void ttm_base_object_unref(struct ttm_base_object **p_base);
  * @existed: Upon completion, indicates that an identical reference object
  * already existed, and the refcount was upped on that object instead.
  *
+ * Checks that the base object is shareable and adds a ref object to it.
+ *
  * Adding a ref object to a base object is basically like referencing the
  * base object, but a user-space application holds the reference. When the
  * file corresponding to @tfile is closed, all its reference objects are