drm/radeon: add userptr flag to directly validate the BO to GTT
[firefly-linux-kernel-4.4.55.git] / drivers / gpu / drm / radeon / radeon_gem.c
index d1239be0c61173862953e803afa9fa60943faead..450656027aba91db2a6574c5ddc20c718266ee23 100644 (file)
@@ -272,6 +272,82 @@ int radeon_gem_create_ioctl(struct drm_device *dev, void *data,
        return 0;
 }
 
+int radeon_gem_userptr_ioctl(struct drm_device *dev, void *data,
+                            struct drm_file *filp)
+{
+       struct radeon_device *rdev = dev->dev_private;
+       struct drm_radeon_gem_userptr *args = data;
+       struct drm_gem_object *gobj;
+       struct radeon_bo *bo;
+       uint32_t handle;
+       int r;
+
+       if (offset_in_page(args->addr | args->size))
+               return -EINVAL;
+
+       /* we only support read only mappings for now */
+       if (!(args->flags & RADEON_GEM_USERPTR_READONLY))
+               return -EACCES;
+
+       /* reject unknown flag values */
+       if (args->flags & ~(RADEON_GEM_USERPTR_READONLY |
+           RADEON_GEM_USERPTR_ANONONLY | RADEON_GEM_USERPTR_VALIDATE))
+               return -EINVAL;
+
+       /* readonly pages not tested on older hardware */
+       if (rdev->family < CHIP_R600)
+               return -EINVAL;
+
+       down_read(&rdev->exclusive_lock);
+
+       /* create a gem object to contain this object in */
+       r = radeon_gem_object_create(rdev, args->size, 0,
+                                    RADEON_GEM_DOMAIN_CPU, 0,
+                                    false, &gobj);
+       if (r)
+               goto handle_lockup;
+
+       bo = gem_to_radeon_bo(gobj);
+       r = radeon_ttm_tt_set_userptr(bo->tbo.ttm, args->addr, args->flags);
+       if (r)
+               goto release_object;
+
+       if (args->flags & RADEON_GEM_USERPTR_VALIDATE) {
+               down_read(&current->mm->mmap_sem);
+               r = radeon_bo_reserve(bo, true);
+               if (r) {
+                       up_read(&current->mm->mmap_sem);
+                       goto release_object;
+               }
+
+               radeon_ttm_placement_from_domain(bo, RADEON_GEM_DOMAIN_GTT);
+               r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
+               radeon_bo_unreserve(bo);
+               up_read(&current->mm->mmap_sem);
+               if (r)
+                       goto release_object;
+       }
+
+       r = drm_gem_handle_create(filp, gobj, &handle);
+       /* drop reference from allocate - handle holds it now */
+       drm_gem_object_unreference_unlocked(gobj);
+       if (r)
+               goto handle_lockup;
+
+       args->handle = handle;
+       up_read(&rdev->exclusive_lock);
+       return 0;
+
+release_object:
+       drm_gem_object_unreference_unlocked(gobj);
+
+handle_lockup:
+       up_read(&rdev->exclusive_lock);
+       r = radeon_gem_handle_lockup(rdev, r);
+
+       return r;
+}
+
 int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data,
                                struct drm_file *filp)
 {
@@ -315,6 +391,10 @@ int radeon_mode_dumb_mmap(struct drm_file *filp,
                return -ENOENT;
        }
        robj = gem_to_radeon_bo(gobj);
+       if (radeon_ttm_tt_has_userptr(robj->tbo.ttm)) {
+               drm_gem_object_unreference_unlocked(gobj);
+               return -EPERM;
+       }
        *offset_p = radeon_bo_mmap_offset(robj);
        drm_gem_object_unreference_unlocked(gobj);
        return 0;
@@ -358,16 +438,18 @@ int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
        struct drm_gem_object *gobj;
        struct radeon_bo *robj;
        int r;
+       uint32_t cur_placement = 0;
 
        gobj = drm_gem_object_lookup(dev, filp, args->handle);
        if (gobj == NULL) {
                return -ENOENT;
        }
        robj = gem_to_radeon_bo(gobj);
-       r = radeon_bo_wait(robj, NULL, false);
-       /* callback hw specific functions if any */
-       if (rdev->asic->ioctl_wait_idle)
-               robj->rdev->asic->ioctl_wait_idle(rdev, robj);
+       r = radeon_bo_wait(robj, &cur_placement, false);
+       /* Flush HDP cache via MMIO if necessary */
+       if (rdev->asic->mmio_hdp_flush &&
+           radeon_mem_type_to_domain(cur_placement) == RADEON_GEM_DOMAIN_VRAM)
+               robj->rdev->asic->mmio_hdp_flush(rdev);
        drm_gem_object_unreference_unlocked(gobj);
        r = radeon_gem_handle_lockup(rdev, r);
        return r;
@@ -494,9 +576,9 @@ int radeon_gem_va_ioctl(struct drm_device *dev, void *data,
 
        switch (args->operation) {
        case RADEON_VA_MAP:
-               if (bo_va->soffset) {
+               if (bo_va->it.start) {
                        args->operation = RADEON_VA_RESULT_VA_EXIST;
-                       args->offset = bo_va->soffset;
+                       args->offset = bo_va->it.start * RADEON_GPU_PAGE_SIZE;
                        goto out;
                }
                r = radeon_vm_bo_set_addr(rdev, bo_va, args->offset, args->flags);
@@ -530,6 +612,11 @@ int radeon_gem_op_ioctl(struct drm_device *dev, void *data,
                return -ENOENT;
        }
        robj = gem_to_radeon_bo(gobj);
+
+       r = -EPERM;
+       if (radeon_ttm_tt_has_userptr(robj->tbo.ttm))
+               goto out;
+
        r = radeon_bo_reserve(robj, false);
        if (unlikely(r))
                goto out;