2 * Copyright 2015 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #ifndef _GPU_SCHEDULER_H_
25 #define _GPU_SCHEDULER_H_
27 #include <linux/kfifo.h>
28 #include <linux/fence.h>
30 #define AMD_GPU_WAIT_IDLE_TIMEOUT_IN_MS 3000
32 struct amd_gpu_scheduler;
36 * A scheduler entity is a wrapper around a job queue or a group
37 * of other entities. Entities take turns emitting jobs from their
38 * job queues to corresponding hardware ring based on scheduling
41 struct amd_sched_entity {
42 struct list_head list;
43 struct amd_sched_rq *belongto_rq;
45 /* the virtual_seq is unique per context per ring */
46 atomic64_t last_queued_v_seq;
47 atomic64_t last_signaled_v_seq;
48 /* the job_queue maintains the jobs submitted by clients */
49 struct kfifo job_queue;
50 spinlock_t queue_lock;
51 struct amd_gpu_scheduler *scheduler;
52 wait_queue_head_t wait_queue;
53 wait_queue_head_t wait_emit;
55 uint64_t fence_context;
61 * Run queue is a set of entities scheduling command submissions for
62 * one specific ring. It implements the scheduling policy that selects
63 * the next entity to emit commands from.
67 struct list_head entities;
68 struct amd_sched_entity *current_entity;
71 struct amd_sched_fence {
74 struct amd_sched_entity *entity;
79 struct amd_sched_job {
80 struct list_head list;
82 struct amd_gpu_scheduler *sched;
83 struct amd_sched_entity *s_entity;
84 struct amd_sched_fence *s_fence;
87 extern const struct fence_ops amd_sched_fence_ops;
88 static inline struct amd_sched_fence *to_amd_sched_fence(struct fence *f)
90 struct amd_sched_fence *__f = container_of(f, struct amd_sched_fence, base);
92 if (__f->base.ops == &amd_sched_fence_ops)
99 * Define the backend operations called by the scheduler,
100 * these functions should be implemented in driver side
102 struct amd_sched_backend_ops {
103 int (*prepare_job)(struct amd_gpu_scheduler *sched,
104 struct amd_sched_entity *c_entity,
105 struct amd_sched_job *job);
106 struct fence *(*run_job)(struct amd_gpu_scheduler *sched,
107 struct amd_sched_entity *c_entity,
108 struct amd_sched_job *job);
109 void (*process_job)(struct amd_gpu_scheduler *sched,
110 struct amd_sched_job *job);
114 * One scheduler is implemented for each hardware ring
116 struct amd_gpu_scheduler {
118 struct task_struct *thread;
119 struct amd_sched_rq sched_rq;
120 struct amd_sched_rq kernel_rq;
121 struct list_head active_hw_rq;
122 atomic64_t hw_rq_count;
123 struct amd_sched_backend_ops *ops;
125 uint32_t granularity; /* in ms unit */
127 wait_queue_head_t wait_queue;
128 struct amd_sched_entity *current_entity;
129 struct mutex sched_lock;
130 spinlock_t queue_lock;
131 uint32_t hw_submission_limit;
134 struct amd_gpu_scheduler *amd_sched_create(void *device,
135 struct amd_sched_backend_ops *ops,
137 uint32_t granularity,
139 uint32_t hw_submission);
140 int amd_sched_destroy(struct amd_gpu_scheduler *sched);
142 int amd_sched_push_job(struct amd_sched_job *sched_job);
144 int amd_sched_entity_init(struct amd_gpu_scheduler *sched,
145 struct amd_sched_entity *entity,
146 struct amd_sched_rq *rq,
148 int amd_sched_entity_fini(struct amd_gpu_scheduler *sched,
149 struct amd_sched_entity *entity);
151 uint64_t amd_sched_next_queued_seq(struct amd_sched_entity *c_entity);
153 struct amd_sched_fence *amd_sched_fence_create(
154 struct amd_sched_entity *s_entity);
155 void amd_sched_fence_signal(struct amd_sched_fence *fence);