drm/amdgpu: add amdgpu.sched_hw_submission option
[firefly-linux-kernel-4.4.55.git] / drivers / gpu / drm / amd / scheduler / gpu_scheduler.h
1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23
24 #ifndef _GPU_SCHEDULER_H_
25 #define _GPU_SCHEDULER_H_
26
27 #include <linux/kfifo.h>
28
29 #define AMD_KERNEL_CONTEXT_ID                   0
30 #define AMD_KERNEL_PROCESS_ID                   0
31
32 #define AMD_GPU_WAIT_IDLE_TIMEOUT_IN_MS         3000
33
34 struct amd_gpu_scheduler;
35 struct amd_run_queue;
36
37 /**
38  * A scheduler entity is a wrapper around a job queue or a group
39  * of other entities. Entities take turns emitting jobs from their 
40  * job queues to corresponding hardware ring based on scheduling
41  * policy.
42 */
43 struct amd_sched_entity {
44         struct list_head                list;
45         struct amd_run_queue            *belongto_rq;
46         struct amd_sched_entity         *parent;
47 };
48
49 /**
50  * Run queue is a set of entities scheduling command submissions for
51  * one specific ring. It implements the scheduling policy that selects
52  * the next entity to emit commands from.
53 */
54 struct amd_run_queue {
55         struct mutex                    lock;
56         atomic_t                        nr_entity;
57         struct amd_sched_entity         head;
58         struct amd_sched_entity         *current_entity;
59         /**
60          * Return 0 means this entity can be scheduled
61          * Return -1 means this entity cannot be scheduled for reasons,
62          * i.e, it is the head, or these is no job, etc
63         */
64         int (*check_entity_status)(struct amd_sched_entity *entity);
65 };
66
67 /**
68  * Context based scheduler entity, there can be multiple entities for
69  * each context, and one entity per ring
70 */
71 struct amd_context_entity {
72         struct amd_sched_entity         generic_entity;
73         spinlock_t                      lock;
74         /* the virtual_seq is unique per context per ring */
75         atomic64_t                      last_queued_v_seq;
76         atomic64_t                      last_emitted_v_seq;
77         atomic64_t                      last_signaled_v_seq;
78         pid_t                           tgid;
79         uint32_t                        context_id;
80         /* the job_queue maintains the jobs submitted by clients */
81         struct kfifo                    job_queue;
82         spinlock_t                      queue_lock;
83         struct amd_gpu_scheduler        *scheduler;
84         wait_queue_head_t               wait_queue;
85         wait_queue_head_t               wait_emit;
86         bool                            is_pending;
87 };
88
89 /**
90  * Define the backend operations called by the scheduler,
91  * these functions should be implemented in driver side
92 */
93 struct amd_sched_backend_ops {
94         int (*prepare_job)(struct amd_gpu_scheduler *sched,
95                            struct amd_context_entity *c_entity,
96                            void *job);
97         void (*run_job)(struct amd_gpu_scheduler *sched,
98                         struct amd_context_entity *c_entity,
99                         void *job);
100         void (*process_job)(struct amd_gpu_scheduler *sched, void *job);
101 };
102
103 /**
104  * One scheduler is implemented for each hardware ring
105 */
106 struct amd_gpu_scheduler {
107         void                            *device;
108         struct task_struct              *thread;
109         struct amd_run_queue            sched_rq;
110         struct amd_run_queue            kernel_rq;
111         struct kfifo                    active_hw_rq;
112         struct amd_sched_backend_ops    *ops;
113         uint32_t                        ring_id;
114         uint32_t                        granularity; /* in ms unit */
115         uint32_t                        preemption;
116         uint64_t                        last_handled_seq;
117         wait_queue_head_t               wait_queue;
118         struct amd_context_entity       *current_entity;
119         struct mutex                    sched_lock;
120         spinlock_t                      queue_lock;
121 };
122
123
124 struct amd_gpu_scheduler *amd_sched_create(void *device,
125                                 struct amd_sched_backend_ops *ops,
126                                 uint32_t ring,
127                                 uint32_t granularity,
128                                 uint32_t preemption,
129                                 uint32_t hw_submission);
130
131 int amd_sched_destroy(struct amd_gpu_scheduler *sched);
132
133 int amd_sched_push_job(struct amd_gpu_scheduler *sched,
134                        struct amd_context_entity *c_entity,
135                        void *job);
136
137 int amd_sched_check_ts(struct amd_context_entity *c_entity, uint64_t seq);
138
139 int amd_sched_wait_signal(struct amd_context_entity *c_entity,
140                           uint64_t seq, bool intr, long timeout);
141 int amd_sched_wait_emit(struct amd_context_entity *c_entity,
142                         uint64_t seq,
143                         bool intr,
144                         long timeout);
145
146 void amd_sched_isr(struct amd_gpu_scheduler *sched);
147 uint64_t amd_sched_get_handled_seq(struct amd_gpu_scheduler *sched);
148
149 int amd_context_entity_fini(struct amd_gpu_scheduler *sched,
150                             struct amd_context_entity *entity);
151
152 int amd_context_entity_init(struct amd_gpu_scheduler *sched,
153                             struct amd_context_entity *entity,
154                             struct amd_sched_entity *parent,
155                             struct amd_run_queue *rq,
156                             uint32_t context_id,
157                             uint32_t jobs);
158
159 #endif