drm/amdgpu: stop trying to suspend UVD sessions v2
authorChristian König <christian.koenig@amd.com>
Thu, 7 May 2015 13:19:25 +0000 (15:19 +0200)
committerAlex Deucher <alexander.deucher@amd.com>
Fri, 28 Aug 2015 19:04:18 +0000 (15:04 -0400)
Saving the current UVD state on suspend and restoring it on resume
just doesn't work reliable. Just close cleanup all sessions on suspend.

Ported from radeon commit "12e49feadff6d7b7ebbe852b36943a71524d8d34".

v2: rebased

Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com> (v1)
drivers/gpu/drm/amd/amdgpu/amdgpu.h
drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c

index 719506808b4a6f3ee423272e1149f7ae5b2c5b10..9fefcd9c1af1ab668caf9c8e741da10b999cdaa8 100644 (file)
@@ -1666,7 +1666,6 @@ struct amdgpu_uvd {
        struct amdgpu_bo        *vcpu_bo;
        void                    *cpu_addr;
        uint64_t                gpu_addr;
-       void                    *saved_bo;
        atomic_t                handles[AMDGPU_MAX_UVD_HANDLES];
        struct drm_file         *filp[AMDGPU_MAX_UVD_HANDLES];
        struct delayed_work     idle_work;
index b87355ccfb1d071282d183ceab7d6bc22c86fffe..3ad4a83c418fed123a5660aacba5a7f6457651cf 100644 (file)
@@ -221,31 +221,32 @@ int amdgpu_uvd_sw_fini(struct amdgpu_device *adev)
 
 int amdgpu_uvd_suspend(struct amdgpu_device *adev)
 {
-       unsigned size;
-       void *ptr;
-       const struct common_firmware_header *hdr;
-       int i;
+       struct amdgpu_ring *ring = &adev->uvd.ring;
+       int i, r;
 
        if (adev->uvd.vcpu_bo == NULL)
                return 0;
 
-       for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i)
-               if (atomic_read(&adev->uvd.handles[i]))
-                       break;
-
-       if (i == AMDGPU_MAX_UVD_HANDLES)
-               return 0;
+       for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i) {
+               uint32_t handle = atomic_read(&adev->uvd.handles[i]);
+               if (handle != 0) {
+                       struct fence *fence;
 
-       hdr = (const struct common_firmware_header *)adev->uvd.fw->data;
+                       amdgpu_uvd_note_usage(adev);
 
-       size = amdgpu_bo_size(adev->uvd.vcpu_bo);
-       size -= le32_to_cpu(hdr->ucode_size_bytes);
+                       r = amdgpu_uvd_get_destroy_msg(ring, handle, &fence);
+                       if (r) {
+                               DRM_ERROR("Error destroying UVD (%d)!\n", r);
+                               continue;
+                       }
 
-       ptr = adev->uvd.cpu_addr;
-       ptr += le32_to_cpu(hdr->ucode_size_bytes);
+                       fence_wait(fence, false);
+                       fence_put(fence);
 
-       adev->uvd.saved_bo = kmalloc(size, GFP_KERNEL);
-       memcpy(adev->uvd.saved_bo, ptr, size);
+                       adev->uvd.filp[i] = NULL;
+                       atomic_set(&adev->uvd.handles[i], 0);
+               }
+       }
 
        return 0;
 }
@@ -270,12 +271,7 @@ int amdgpu_uvd_resume(struct amdgpu_device *adev)
        ptr = adev->uvd.cpu_addr;
        ptr += le32_to_cpu(hdr->ucode_size_bytes);
 
-       if (adev->uvd.saved_bo != NULL) {
-               memcpy(ptr, adev->uvd.saved_bo, size);
-               kfree(adev->uvd.saved_bo);
-               adev->uvd.saved_bo = NULL;
-       } else
-               memset(ptr, 0, size);
+       memset(ptr, 0, size);
 
        return 0;
 }