drm/amdgpu: add helper function for kernel submission
authorChunming Zhou <david1.zhou@amd.com>
Wed, 29 Jul 2015 02:33:14 +0000 (10:33 +0800)
committerAlex Deucher <alexander.deucher@amd.com>
Mon, 17 Aug 2015 20:50:44 +0000 (16:50 -0400)
Signed-off-by: Chunming Zhou <david1.zhou@amd.com>
Reviewed-by: Christian K?nig <christian.koenig@amd.com>
drivers/gpu/drm/amd/amdgpu/amdgpu.h
drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c

index 2619c78ec30347b4a3fcb276117efeffc37220f6..1e87acf3589201ff9af99c531e94831df6f41c86 100644 (file)
@@ -864,6 +864,13 @@ enum amdgpu_ring_type {
 
 extern struct amd_sched_backend_ops amdgpu_sched_ops;
 
+int amdgpu_sched_ib_submit_kernel_helper(struct amdgpu_device *adev,
+                                        struct amdgpu_ring *ring,
+                                        struct amdgpu_ib *ibs,
+                                        unsigned num_ibs,
+                                        int (*free_job)(struct amdgpu_cs_parser *),
+                                        void *owner);
+
 struct amdgpu_ring {
        struct amdgpu_device            *adev;
        const struct amdgpu_ring_funcs  *funcs;
index b913c22dd6b2e54632198f9fd96201829b246725..d682fabca958435a147d66d119a559e9d276a537 100644 (file)
@@ -108,3 +108,38 @@ struct amd_sched_backend_ops amdgpu_sched_ops = {
        .process_job = amdgpu_sched_process_job
 };
 
+int amdgpu_sched_ib_submit_kernel_helper(struct amdgpu_device *adev,
+                                        struct amdgpu_ring *ring,
+                                        struct amdgpu_ib *ibs,
+                                        unsigned num_ibs,
+                                        int (*free_job)(struct amdgpu_cs_parser *),
+                                        void *owner)
+{
+       int r = 0;
+       if (amdgpu_enable_scheduler) {
+               uint64_t v_seq;
+               struct amdgpu_cs_parser *sched_job =
+                       amdgpu_cs_parser_create(adev,
+                                               owner,
+                                               adev->kernel_ctx,
+                                               ibs, 1);
+               if(!sched_job) {
+                       return -ENOMEM;
+               }
+               sched_job->free_job = free_job;
+               v_seq = atomic64_inc_return(&adev->kernel_ctx->rings[ring->idx].c_entity.last_queued_v_seq);
+               ibs[num_ibs - 1].sequence = v_seq;
+               amd_sched_push_job(ring->scheduler,
+                                  &adev->kernel_ctx->rings[ring->idx].c_entity,
+                                  sched_job);
+               r = amd_sched_wait_emit(
+                       &adev->kernel_ctx->rings[ring->idx].c_entity,
+                       v_seq,
+                       false,
+                       -1);
+               if (r)
+                       WARN(true, "emit timeout\n");
+       } else
+               r = amdgpu_ib_schedule(adev, 1, ibs, owner);
+       return r;
+}