2 * Copyright 2014 Advanced Micro Devices, Inc.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
28 * Christian König <christian.koenig@amd.com>
33 #include "amdgpu_trace.h"
36 * amdgpu_sync_create - zero init sync object
38 * @sync: sync object to initialize
40 * Just clear the sync object for now.
42 void amdgpu_sync_create(struct amdgpu_sync *sync)
46 for (i = 0; i < AMDGPU_NUM_SYNCS; ++i)
47 sync->semaphores[i] = NULL;
49 for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
50 sync->sync_to[i] = NULL;
52 sync->last_vm_update = NULL;
56 * amdgpu_sync_fence - remember to sync to this fence
58 * @sync: sync object to add fence to
59 * @fence: fence to sync to
62 int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync,
65 struct amdgpu_fence *fence;
66 struct amdgpu_fence *other;
71 fence = to_amdgpu_fence(f);
72 if (!fence || fence->ring->adev != adev)
73 return fence_wait(f, true);
75 other = sync->sync_to[fence->ring->idx];
76 sync->sync_to[fence->ring->idx] = amdgpu_fence_ref(
77 amdgpu_fence_later(fence, other));
78 amdgpu_fence_unref(&other);
80 if (fence->owner == AMDGPU_FENCE_OWNER_VM) {
81 other = sync->last_vm_update;
82 sync->last_vm_update = amdgpu_fence_ref(
83 amdgpu_fence_later(fence, other));
84 amdgpu_fence_unref(&other);
91 * amdgpu_sync_resv - use the semaphores to sync to a reservation object
93 * @sync: sync object to add fences from reservation object to
94 * @resv: reservation object with embedded fence
95 * @shared: true if we should only sync to the exclusive fence
97 * Sync to the fence using the semaphore objects
99 int amdgpu_sync_resv(struct amdgpu_device *adev,
100 struct amdgpu_sync *sync,
101 struct reservation_object *resv,
104 struct reservation_object_list *flist;
106 struct amdgpu_fence *fence;
113 /* always sync to the exclusive fence */
114 f = reservation_object_get_excl(resv);
115 r = amdgpu_sync_fence(adev, sync, f);
117 flist = reservation_object_get_list(resv);
121 for (i = 0; i < flist->shared_count; ++i) {
122 f = rcu_dereference_protected(flist->shared[i],
123 reservation_object_held(resv));
124 fence = f ? to_amdgpu_fence(f) : NULL;
125 if (fence && fence->ring->adev == adev) {
126 /* VM updates are only interesting
127 * for other VM updates and moves.
129 if ((owner != AMDGPU_FENCE_OWNER_MOVE) &&
130 (fence->owner != AMDGPU_FENCE_OWNER_MOVE) &&
131 ((owner == AMDGPU_FENCE_OWNER_VM) !=
132 (fence->owner == AMDGPU_FENCE_OWNER_VM)))
135 /* Ignore fence from the same owner as
136 * long as it isn't undefined.
138 if (owner != AMDGPU_FENCE_OWNER_UNDEFINED &&
139 fence->owner == owner)
143 r = amdgpu_sync_fence(adev, sync, f);
151 * amdgpu_sync_rings - sync ring to all registered fences
153 * @sync: sync object to use
154 * @ring: ring that needs sync
156 * Ensure that all registered fences are signaled before letting
157 * the ring continue. The caller must hold the ring lock.
159 int amdgpu_sync_rings(struct amdgpu_sync *sync,
160 struct amdgpu_ring *ring)
162 struct amdgpu_device *adev = ring->adev;
166 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
167 struct amdgpu_fence *fence = sync->sync_to[i];
168 struct amdgpu_semaphore *semaphore;
169 struct amdgpu_ring *other = adev->rings[i];
171 /* check if we really need to sync */
172 if (!amdgpu_fence_need_sync(fence, ring))
175 /* prevent GPU deadlocks */
177 dev_err(adev->dev, "Syncing to a disabled ring!");
181 if (amdgpu_enable_scheduler || (count >= AMDGPU_NUM_SYNCS)) {
182 /* not enough room, wait manually */
183 r = amdgpu_fence_wait(fence, false);
188 r = amdgpu_semaphore_create(adev, &semaphore);
192 sync->semaphores[count++] = semaphore;
194 /* allocate enough space for sync command */
195 r = amdgpu_ring_alloc(other, 16);
199 /* emit the signal semaphore */
200 if (!amdgpu_semaphore_emit_signal(other, semaphore)) {
201 /* signaling wasn't successful wait manually */
202 amdgpu_ring_undo(other);
203 r = amdgpu_fence_wait(fence, false);
209 /* we assume caller has already allocated space on waiters ring */
210 if (!amdgpu_semaphore_emit_wait(ring, semaphore)) {
211 /* waiting wasn't successful wait manually */
212 amdgpu_ring_undo(other);
213 r = amdgpu_fence_wait(fence, false);
219 amdgpu_ring_commit(other);
220 amdgpu_fence_note_sync(fence, ring);
227 * amdgpu_sync_free - free the sync object
229 * @adev: amdgpu_device pointer
230 * @sync: sync object to use
231 * @fence: fence to use for the free
233 * Free the sync object by freeing all semaphores in it.
235 void amdgpu_sync_free(struct amdgpu_device *adev,
236 struct amdgpu_sync *sync,
237 struct amdgpu_fence *fence)
241 for (i = 0; i < AMDGPU_NUM_SYNCS; ++i)
242 amdgpu_semaphore_free(adev, &sync->semaphores[i], fence);
244 for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
245 amdgpu_fence_unref(&sync->sync_to[i]);
247 amdgpu_fence_unref(&sync->last_vm_update);