2 * Copyright 2015 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
22 * Authors: monk liu <monk.liu@amd.com>
28 static void amdgpu_ctx_do_release(struct kref *ref)
30 struct amdgpu_ctx *ctx;
31 struct amdgpu_device *adev;
34 ctx = container_of(ref, struct amdgpu_ctx, refcount);
38 for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
39 for (j = 0; j < AMDGPU_CTX_MAX_CS_PENDING; ++j)
40 fence_put(ctx->rings[i].fences[j]);
42 if (amdgpu_enable_scheduler) {
43 for (i = 0; i < adev->num_rings; i++)
44 amd_context_entity_fini(adev->rings[i]->scheduler,
45 &ctx->rings[i].c_entity);
51 static void amdgpu_ctx_init(struct amdgpu_device *adev,
52 struct amdgpu_fpriv *fpriv,
53 struct amdgpu_ctx *ctx,
57 memset(ctx, 0, sizeof(*ctx));
59 kref_init(&ctx->refcount);
60 spin_lock_init(&ctx->ring_lock);
61 for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
62 ctx->rings[i].sequence = 1;
65 int amdgpu_ctx_alloc(struct amdgpu_device *adev, struct amdgpu_fpriv *fpriv,
68 struct amdgpu_ctx *ctx;
71 ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
75 struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr;
76 mutex_lock(&mgr->lock);
77 r = idr_alloc(&mgr->ctx_handles, ctx, 1, 0, GFP_KERNEL);
79 mutex_unlock(&mgr->lock);
84 amdgpu_ctx_init(adev, fpriv, ctx, *id);
85 mutex_unlock(&mgr->lock);
87 if (adev->kernel_ctx) {
88 DRM_ERROR("kernel cnotext has been created.\n");
92 *id = AMD_KERNEL_CONTEXT_ID;
93 amdgpu_ctx_init(adev, fpriv, ctx, *id);
95 adev->kernel_ctx = ctx;
98 if (amdgpu_enable_scheduler) {
99 /* create context entity for each ring */
100 for (i = 0; i < adev->num_rings; i++) {
101 struct amd_run_queue *rq;
103 rq = &adev->rings[i]->scheduler->sched_rq;
105 rq = &adev->rings[i]->scheduler->kernel_rq;
106 r = amd_context_entity_init(adev->rings[i]->scheduler,
107 &ctx->rings[i].c_entity,
114 if (i < adev->num_rings) {
115 for (j = 0; j < i; j++)
116 amd_context_entity_fini(adev->rings[j]->scheduler,
117 &ctx->rings[j].c_entity);
126 int amdgpu_ctx_free(struct amdgpu_device *adev, struct amdgpu_fpriv *fpriv, uint32_t id)
128 struct amdgpu_ctx *ctx;
131 struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr;
132 mutex_lock(&mgr->lock);
133 ctx = idr_find(&mgr->ctx_handles, id);
135 idr_remove(&mgr->ctx_handles, id);
136 kref_put(&ctx->refcount, amdgpu_ctx_do_release);
137 mutex_unlock(&mgr->lock);
140 mutex_unlock(&mgr->lock);
142 ctx = adev->kernel_ctx;
143 kref_put(&ctx->refcount, amdgpu_ctx_do_release);
149 static int amdgpu_ctx_query(struct amdgpu_device *adev,
150 struct amdgpu_fpriv *fpriv, uint32_t id,
151 union drm_amdgpu_ctx_out *out)
153 struct amdgpu_ctx *ctx;
154 struct amdgpu_ctx_mgr *mgr;
155 unsigned reset_counter;
160 mgr = &fpriv->ctx_mgr;
161 mutex_lock(&mgr->lock);
162 ctx = idr_find(&mgr->ctx_handles, id);
164 mutex_unlock(&mgr->lock);
168 /* TODO: these two are always zero */
169 out->state.flags = 0x0;
170 out->state.hangs = 0x0;
172 /* determine if a GPU reset has occured since the last call */
173 reset_counter = atomic_read(&adev->gpu_reset_counter);
174 /* TODO: this should ideally return NO, GUILTY, or INNOCENT. */
175 if (ctx->reset_counter == reset_counter)
176 out->state.reset_status = AMDGPU_CTX_NO_RESET;
178 out->state.reset_status = AMDGPU_CTX_UNKNOWN_RESET;
179 ctx->reset_counter = reset_counter;
181 mutex_unlock(&mgr->lock);
185 int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
186 struct drm_file *filp)
191 union drm_amdgpu_ctx *args = data;
192 struct amdgpu_device *adev = dev->dev_private;
193 struct amdgpu_fpriv *fpriv = filp->driver_priv;
196 id = args->in.ctx_id;
198 switch (args->in.op) {
199 case AMDGPU_CTX_OP_ALLOC_CTX:
200 r = amdgpu_ctx_alloc(adev, fpriv, &id);
201 args->out.alloc.ctx_id = id;
203 case AMDGPU_CTX_OP_FREE_CTX:
204 r = amdgpu_ctx_free(adev, fpriv, id);
206 case AMDGPU_CTX_OP_QUERY_STATE:
207 r = amdgpu_ctx_query(adev, fpriv, id, &args->out);
216 struct amdgpu_ctx *amdgpu_ctx_get(struct amdgpu_fpriv *fpriv, uint32_t id)
218 struct amdgpu_ctx *ctx;
219 struct amdgpu_ctx_mgr *mgr;
224 mgr = &fpriv->ctx_mgr;
226 mutex_lock(&mgr->lock);
227 ctx = idr_find(&mgr->ctx_handles, id);
229 kref_get(&ctx->refcount);
230 mutex_unlock(&mgr->lock);
234 int amdgpu_ctx_put(struct amdgpu_ctx *ctx)
239 kref_put(&ctx->refcount, amdgpu_ctx_do_release);
243 uint64_t amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring,
244 struct fence *fence, uint64_t queued_seq)
246 struct amdgpu_ctx_ring *cring = & ctx->rings[ring->idx];
249 struct fence *other = NULL;
251 if (amdgpu_enable_scheduler)
254 seq = cring->sequence;
255 idx = seq % AMDGPU_CTX_MAX_CS_PENDING;
256 other = cring->fences[idx];
259 r = fence_wait_timeout(other, false, MAX_SCHEDULE_TIMEOUT);
261 DRM_ERROR("Error (%ld) waiting for fence!\n", r);
266 spin_lock(&ctx->ring_lock);
267 cring->fences[idx] = fence;
268 if (!amdgpu_enable_scheduler)
270 spin_unlock(&ctx->ring_lock);
277 struct fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
278 struct amdgpu_ring *ring, uint64_t seq)
280 struct amdgpu_ctx_ring *cring = & ctx->rings[ring->idx];
285 if (amdgpu_enable_scheduler) {
286 r = amd_sched_wait_emit(&cring->c_entity,
294 spin_lock(&ctx->ring_lock);
295 if (amdgpu_enable_scheduler)
296 queued_seq = amd_sched_next_queued_seq(&cring->c_entity);
298 queued_seq = cring->sequence;
300 if (seq >= queued_seq) {
301 spin_unlock(&ctx->ring_lock);
302 return ERR_PTR(-EINVAL);
306 if (seq + AMDGPU_CTX_MAX_CS_PENDING < queued_seq) {
307 spin_unlock(&ctx->ring_lock);
311 fence = fence_get(cring->fences[seq % AMDGPU_CTX_MAX_CS_PENDING]);
312 spin_unlock(&ctx->ring_lock);
317 void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr)
319 mutex_init(&mgr->lock);
320 idr_init(&mgr->ctx_handles);
323 void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr)
325 struct amdgpu_ctx *ctx;
329 idp = &mgr->ctx_handles;
331 idr_for_each_entry(idp, ctx, id) {
332 if (kref_put(&ctx->refcount, amdgpu_ctx_do_release) != 1)
333 DRM_ERROR("ctx %p is still alive\n", ctx);
336 idr_destroy(&mgr->ctx_handles);
337 mutex_destroy(&mgr->lock);