2 * Copyright 2015 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #include <linux/kthread.h>
25 #include <linux/wait.h>
26 #include <linux/sched.h>
28 #include "gpu_scheduler.h"
30 /* Initialize a given run queue struct */
31 static void init_rq(struct amd_run_queue *rq)
33 INIT_LIST_HEAD(&rq->head.list);
34 rq->head.belongto_rq = rq;
35 mutex_init(&rq->lock);
36 atomic_set(&rq->nr_entity, 0);
37 rq->current_entity = &rq->head;
40 /* Note: caller must hold the lock or in a atomic context */
41 static void rq_remove_entity(struct amd_run_queue *rq,
42 struct amd_sched_entity *entity)
44 if (rq->current_entity == entity)
45 rq->current_entity = list_entry(entity->list.prev,
46 typeof(*entity), list);
47 list_del_init(&entity->list);
48 atomic_dec(&rq->nr_entity);
51 static void rq_add_entity(struct amd_run_queue *rq,
52 struct amd_sched_entity *entity)
54 list_add_tail(&entity->list, &rq->head.list);
55 atomic_inc(&rq->nr_entity);
59 * Select next entity from a specified run queue with round robin policy.
60 * It could return the same entity as current one if current is the only
61 * available one in the queue. Return NULL if nothing available.
63 static struct amd_sched_entity *rq_select_entity(struct amd_run_queue *rq)
65 struct amd_sched_entity *p = rq->current_entity;
66 int i = atomic_read(&rq->nr_entity) + 1; /*real count + dummy head*/
68 p = list_entry(p->list.next, typeof(*p), list);
69 if (!rq->check_entity_status(p)) {
70 rq->current_entity = p;
78 static bool context_entity_is_waiting(struct amd_context_entity *entity)
80 /* TODO: sync obj for multi-ring synchronization */
84 static int gpu_entity_check_status(struct amd_sched_entity *entity)
86 struct amd_context_entity *tmp = NULL;
88 if (entity == &entity->belongto_rq->head)
91 tmp = container_of(entity, typeof(*tmp), generic_entity);
92 if (kfifo_is_empty(&tmp->job_queue) ||
93 context_entity_is_waiting(tmp))
100 * Note: This function should only been called inside scheduler main
101 * function for thread safety, there is no other protection here.
102 * return ture if scheduler has something ready to run.
104 * For active_hw_rq, there is only one producer(scheduler thread) and
105 * one consumer(ISR). It should be safe to use this function in scheduler
106 * main thread to decide whether to continue emit more IBs.
108 static bool is_scheduler_ready(struct amd_gpu_scheduler *sched)
110 return !kfifo_is_full(&sched->active_hw_rq);
114 * Select next entity from the kernel run queue, if not available,
117 static struct amd_context_entity *kernel_rq_select_context(
118 struct amd_gpu_scheduler *sched)
120 struct amd_sched_entity *sched_entity = NULL;
121 struct amd_context_entity *tmp = NULL;
122 struct amd_run_queue *rq = &sched->kernel_rq;
124 mutex_lock(&rq->lock);
125 sched_entity = rq_select_entity(rq);
127 tmp = container_of(sched_entity,
130 mutex_unlock(&rq->lock);
135 * Select next entity containing real IB submissions
137 static struct amd_context_entity *select_context(
138 struct amd_gpu_scheduler *sched)
140 struct amd_context_entity *wake_entity = NULL;
141 struct amd_context_entity *tmp;
142 struct amd_run_queue *rq;
144 if (!is_scheduler_ready(sched))
147 /* Kernel run queue has higher priority than normal run queue*/
148 tmp = kernel_rq_select_context(sched);
152 WARN_ON(offsetof(struct amd_context_entity, generic_entity) != 0);
154 rq = &sched->sched_rq;
155 mutex_lock(&rq->lock);
156 tmp = container_of(rq_select_entity(rq),
157 typeof(*tmp), generic_entity);
158 mutex_unlock(&rq->lock);
160 if (sched->current_entity && (sched->current_entity != tmp))
161 wake_entity = sched->current_entity;
162 sched->current_entity = tmp;
164 wake_up(&wake_entity->wait_queue);
169 * Init a context entity used by scheduler when submit to HW ring.
171 * @sched The pointer to the scheduler
172 * @entity The pointer to a valid amd_context_entity
173 * @parent The parent entity of this amd_context_entity
174 * @rq The run queue this entity belongs
175 * @context_id The context id for this entity
176 * @jobs The max number of jobs in the job queue
178 * return 0 if succeed. negative error code on failure
180 int amd_context_entity_init(struct amd_gpu_scheduler *sched,
181 struct amd_context_entity *entity,
182 struct amd_sched_entity *parent,
183 struct amd_run_queue *rq,
187 uint64_t seq_ring = 0;
189 if (!(sched && entity && rq))
192 memset(entity, 0, sizeof(struct amd_context_entity));
193 seq_ring = ((uint64_t)sched->ring_id) << 60;
194 spin_lock_init(&entity->lock);
195 entity->generic_entity.belongto_rq = rq;
196 entity->generic_entity.parent = parent;
197 entity->scheduler = sched;
198 init_waitqueue_head(&entity->wait_queue);
199 init_waitqueue_head(&entity->wait_emit);
200 if(kfifo_alloc(&entity->job_queue,
201 jobs * sizeof(void *),
205 spin_lock_init(&entity->queue_lock);
206 entity->tgid = (context_id == AMD_KERNEL_CONTEXT_ID) ?
207 AMD_KERNEL_PROCESS_ID : current->tgid;
208 entity->context_id = context_id;
209 atomic64_set(&entity->last_emitted_v_seq, seq_ring);
210 atomic64_set(&entity->last_queued_v_seq, seq_ring);
211 atomic64_set(&entity->last_signaled_v_seq, seq_ring);
213 /* Add the entity to the run queue */
214 mutex_lock(&rq->lock);
215 rq_add_entity(rq, &entity->generic_entity);
216 mutex_unlock(&rq->lock);
221 * Query if entity is initialized
223 * @sched Pointer to scheduler instance
224 * @entity The pointer to a valid scheduler entity
226 * return true if entity is initialized, false otherwise
228 static bool is_context_entity_initialized(struct amd_gpu_scheduler *sched,
229 struct amd_context_entity *entity)
231 return entity->scheduler == sched &&
232 entity->generic_entity.belongto_rq != NULL;
235 static bool is_context_entity_idle(struct amd_gpu_scheduler *sched,
236 struct amd_context_entity *entity)
239 * Idle means no pending IBs, and the entity is not
240 * currently being used.
243 if ((sched->current_entity != entity) &&
244 kfifo_is_empty(&entity->job_queue))
251 * Destroy a context entity
253 * @sched Pointer to scheduler instance
254 * @entity The pointer to a valid scheduler entity
256 * return 0 if succeed. negative error code on failure
258 int amd_context_entity_fini(struct amd_gpu_scheduler *sched,
259 struct amd_context_entity *entity)
262 struct amd_run_queue *rq = entity->generic_entity.belongto_rq;
264 if (!is_context_entity_initialized(sched, entity))
268 * The client will not queue more IBs during this fini, consume existing
271 r = wait_event_timeout(
273 is_context_entity_idle(sched, entity),
274 msecs_to_jiffies(AMD_GPU_WAIT_IDLE_TIMEOUT_IN_MS)
278 if (entity->is_pending)
279 DRM_INFO("Entity %u is in waiting state during fini,\
280 all pending ibs will be canceled.\n",
284 mutex_lock(&rq->lock);
285 rq_remove_entity(rq, &entity->generic_entity);
286 mutex_unlock(&rq->lock);
287 kfifo_free(&entity->job_queue);
292 * Submit a normal job to the job queue
294 * @sched The pointer to the scheduler
295 * @c_entity The pointer to amd_context_entity
296 * @job The pointer to job required to submit
297 * return 0 if succeed. -1 if failed.
298 * -2 indicate queue is full for this client, client should wait untill
299 * scheduler consum some queued command.
302 int amd_sched_push_job(struct amd_gpu_scheduler *sched,
303 struct amd_context_entity *c_entity,
306 while (kfifo_in_spinlocked(&c_entity->job_queue, &job, sizeof(void *),
307 &c_entity->queue_lock) != sizeof(void *)) {
309 * Current context used up all its IB slots
310 * wait here, or need to check whether GPU is hung
315 wake_up_interruptible(&sched->wait_queue);
320 * Check the virtual sequence number for specified context
322 * @seq The virtual sequence number to check
323 * @c_entity The pointer to a valid amd_context_entity
325 * return 0 if signaled, -1 else.
327 int amd_sched_check_ts(struct amd_context_entity *c_entity, uint64_t seq)
329 return (seq <= atomic64_read(&c_entity->last_signaled_v_seq)) ? 0 : -1;
333 * Wait for a virtual sequence number to be signaled or timeout
335 * @c_entity The pointer to a valid context entity
336 * @seq The virtual sequence number to wait
337 * @intr Interruptible or not
338 * @timeout Timeout in ms, wait infinitely if <0
339 * @emit wait for emit or signal
341 * return =0 signaled , <0 failed
343 static int amd_sched_wait(struct amd_context_entity *c_entity,
349 atomic64_t *v_seq = emit ? &c_entity->last_emitted_v_seq :
350 &c_entity->last_signaled_v_seq;
351 wait_queue_head_t *wait_queue = emit ? &c_entity->wait_emit :
352 &c_entity->wait_queue;
354 if (intr && (timeout < 0)) {
355 wait_event_interruptible(
357 seq <= atomic64_read(v_seq));
359 } else if (intr && (timeout >= 0)) {
360 wait_event_interruptible_timeout(
362 seq <= atomic64_read(v_seq),
363 msecs_to_jiffies(timeout));
364 return (seq <= atomic64_read(v_seq)) ?
366 } else if (!intr && (timeout < 0)) {
369 seq <= atomic64_read(v_seq));
371 } else if (!intr && (timeout >= 0)) {
374 seq <= atomic64_read(v_seq),
375 msecs_to_jiffies(timeout));
376 return (seq <= atomic64_read(v_seq)) ?
382 int amd_sched_wait_signal(struct amd_context_entity *c_entity,
387 return amd_sched_wait(c_entity, seq, intr, timeout, false);
390 int amd_sched_wait_emit(struct amd_context_entity *c_entity,
395 return amd_sched_wait(c_entity, seq, intr, timeout, true);
398 static int amd_sched_main(void *param)
402 struct sched_param sparam = {.sched_priority = 1};
403 struct amd_context_entity *c_entity = NULL;
404 struct amd_gpu_scheduler *sched = (struct amd_gpu_scheduler *)param;
406 sched_setscheduler(current, SCHED_FIFO, &sparam);
408 while (!kthread_should_stop()) {
409 wait_event_interruptible(sched->wait_queue,
410 is_scheduler_ready(sched) &&
411 (c_entity = select_context(sched)));
412 r = kfifo_out(&c_entity->job_queue, &job, sizeof(void *));
413 if (r != sizeof(void *))
415 r = sched->ops->prepare_job(sched, c_entity, job);
417 WARN_ON(kfifo_in_spinlocked(
418 &sched->active_hw_rq,
421 &sched->queue_lock) != sizeof(void *));
422 mutex_lock(&sched->sched_lock);
423 sched->ops->run_job(sched, c_entity, job);
424 mutex_unlock(&sched->sched_lock);
429 uint64_t amd_sched_get_handled_seq(struct amd_gpu_scheduler *sched)
431 return sched->last_handled_seq;
435 * ISR to handle EOP inetrrupts
437 * @sched: gpu scheduler
440 void amd_sched_isr(struct amd_gpu_scheduler *sched)
444 r = kfifo_out_spinlocked(&sched->active_hw_rq,
445 &job, sizeof(void *),
448 if (r != sizeof(void *))
451 sched->ops->process_job(sched, job);
452 sched->last_handled_seq++;
453 wake_up_interruptible(&sched->wait_queue);
457 * Create a gpu scheduler
459 * @device The device context for this scheduler
460 * @ops The backend operations for this scheduler.
461 * @id The scheduler is per ring, here is ring id.
462 * @granularity The minumum ms unit the scheduler will scheduled.
463 * @preemption Indicate whether this ring support preemption, 0 is no.
465 * return the pointer to scheduler for success, otherwise return NULL
467 struct amd_gpu_scheduler *amd_sched_create(void *device,
468 struct amd_sched_backend_ops *ops,
470 unsigned granularity,
473 struct amd_gpu_scheduler *sched;
474 char name[20] = "gpu_sched[0]";
476 sched = kzalloc(sizeof(struct amd_gpu_scheduler), GFP_KERNEL);
480 sched->device = device;
482 sched->granularity = granularity;
483 sched->ring_id = ring;
484 sched->preemption = preemption;
485 sched->last_handled_seq = 0;
487 snprintf(name, sizeof(name), "gpu_sched[%d]", ring);
488 mutex_init(&sched->sched_lock);
489 spin_lock_init(&sched->queue_lock);
490 init_rq(&sched->sched_rq);
491 sched->sched_rq.check_entity_status = gpu_entity_check_status;
493 init_rq(&sched->kernel_rq);
494 sched->kernel_rq.check_entity_status = gpu_entity_check_status;
496 init_waitqueue_head(&sched->wait_queue);
497 if(kfifo_alloc(&sched->active_hw_rq,
498 AMD_MAX_ACTIVE_HW_SUBMISSION * sizeof(void *),
504 /* Each scheduler will run on a seperate kernel thread */
505 sched->thread = kthread_create(amd_sched_main, sched, name);
507 wake_up_process(sched->thread);
511 DRM_ERROR("Failed to create scheduler for id %d.\n", ring);
512 kfifo_free(&sched->active_hw_rq);
518 * Destroy a gpu scheduler
520 * @sched The pointer to the scheduler
522 * return 0 if succeed. -1 if failed.
524 int amd_sched_destroy(struct amd_gpu_scheduler *sched)
526 kthread_stop(sched->thread);
527 kfifo_free(&sched->active_hw_rq);