a94f66fb3b13f715967bd3377fdaea4a5471961d
[firefly-linux-kernel-4.4.55.git] / drivers / gpu / drm / radeon / radeon_test.c
1 /*
2  * Copyright 2009 VMware, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: Michel Dänzer
23  */
24 #include <drm/drmP.h>
25 #include <drm/radeon_drm.h>
26 #include "radeon_reg.h"
27 #include "radeon.h"
28
29
30 /* Test BO GTT->VRAM and VRAM->GTT GPU copies across the whole GTT aperture */
31 void radeon_test_moves(struct radeon_device *rdev)
32 {
33         struct radeon_bo *vram_obj = NULL;
34         struct radeon_bo **gtt_obj = NULL;
35         struct radeon_fence *fence = NULL;
36         uint64_t gtt_addr, vram_addr;
37         unsigned i, n, size;
38         int r;
39
40         size = 1024 * 1024;
41
42         /* Number of tests =
43          * (Total GTT - IB pool - writeback page - ring buffers) / test size
44          */
45         n = rdev->mc.gtt_size - RADEON_IB_POOL_SIZE*64*1024;
46         for (i = 0; i < RADEON_NUM_RINGS; ++i)
47                 n -= rdev->ring[i].ring_size;
48         if (rdev->wb.wb_obj)
49                 n -= RADEON_GPU_PAGE_SIZE;
50         if (rdev->ih.ring_obj)
51                 n -= rdev->ih.ring_size;
52         n /= size;
53
54         gtt_obj = kzalloc(n * sizeof(*gtt_obj), GFP_KERNEL);
55         if (!gtt_obj) {
56                 DRM_ERROR("Failed to allocate %d pointers\n", n);
57                 r = 1;
58                 goto out_cleanup;
59         }
60
61         r = radeon_bo_create(rdev, size, PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
62                              NULL, &vram_obj);
63         if (r) {
64                 DRM_ERROR("Failed to create VRAM object\n");
65                 goto out_cleanup;
66         }
67         r = radeon_bo_reserve(vram_obj, false);
68         if (unlikely(r != 0))
69                 goto out_cleanup;
70         r = radeon_bo_pin(vram_obj, RADEON_GEM_DOMAIN_VRAM, &vram_addr);
71         if (r) {
72                 DRM_ERROR("Failed to pin VRAM object\n");
73                 goto out_cleanup;
74         }
75         for (i = 0; i < n; i++) {
76                 void *gtt_map, *vram_map;
77                 void **gtt_start, **gtt_end;
78                 void **vram_start, **vram_end;
79
80                 r = radeon_bo_create(rdev, size, PAGE_SIZE, true,
81                                      RADEON_GEM_DOMAIN_GTT, NULL, gtt_obj + i);
82                 if (r) {
83                         DRM_ERROR("Failed to create GTT object %d\n", i);
84                         goto out_cleanup;
85                 }
86
87                 r = radeon_bo_reserve(gtt_obj[i], false);
88                 if (unlikely(r != 0))
89                         goto out_cleanup;
90                 r = radeon_bo_pin(gtt_obj[i], RADEON_GEM_DOMAIN_GTT, &gtt_addr);
91                 if (r) {
92                         DRM_ERROR("Failed to pin GTT object %d\n", i);
93                         goto out_cleanup;
94                 }
95
96                 r = radeon_bo_kmap(gtt_obj[i], &gtt_map);
97                 if (r) {
98                         DRM_ERROR("Failed to map GTT object %d\n", i);
99                         goto out_cleanup;
100                 }
101
102                 for (gtt_start = gtt_map, gtt_end = gtt_map + size;
103                      gtt_start < gtt_end;
104                      gtt_start++)
105                         *gtt_start = gtt_start;
106
107                 radeon_bo_kunmap(gtt_obj[i]);
108
109                 r = radeon_copy(rdev, gtt_addr, vram_addr, size / RADEON_GPU_PAGE_SIZE, &fence);
110                 if (r) {
111                         DRM_ERROR("Failed GTT->VRAM copy %d\n", i);
112                         goto out_cleanup;
113                 }
114
115                 r = radeon_fence_wait(fence, false);
116                 if (r) {
117                         DRM_ERROR("Failed to wait for GTT->VRAM fence %d\n", i);
118                         goto out_cleanup;
119                 }
120
121                 radeon_fence_unref(&fence);
122
123                 r = radeon_bo_kmap(vram_obj, &vram_map);
124                 if (r) {
125                         DRM_ERROR("Failed to map VRAM object after copy %d\n", i);
126                         goto out_cleanup;
127                 }
128
129                 for (gtt_start = gtt_map, gtt_end = gtt_map + size,
130                      vram_start = vram_map, vram_end = vram_map + size;
131                      vram_start < vram_end;
132                      gtt_start++, vram_start++) {
133                         if (*vram_start != gtt_start) {
134                                 DRM_ERROR("Incorrect GTT->VRAM copy %d: Got 0x%p, "
135                                           "expected 0x%p (GTT/VRAM offset "
136                                           "0x%16llx/0x%16llx)\n",
137                                           i, *vram_start, gtt_start,
138                                           (unsigned long long)
139                                           (gtt_addr - rdev->mc.gtt_start +
140                                            (void*)gtt_start - gtt_map),
141                                           (unsigned long long)
142                                           (vram_addr - rdev->mc.vram_start +
143                                            (void*)gtt_start - gtt_map));
144                                 radeon_bo_kunmap(vram_obj);
145                                 goto out_cleanup;
146                         }
147                         *vram_start = vram_start;
148                 }
149
150                 radeon_bo_kunmap(vram_obj);
151
152                 r = radeon_copy(rdev, vram_addr, gtt_addr, size / RADEON_GPU_PAGE_SIZE, &fence);
153                 if (r) {
154                         DRM_ERROR("Failed VRAM->GTT copy %d\n", i);
155                         goto out_cleanup;
156                 }
157
158                 r = radeon_fence_wait(fence, false);
159                 if (r) {
160                         DRM_ERROR("Failed to wait for VRAM->GTT fence %d\n", i);
161                         goto out_cleanup;
162                 }
163
164                 radeon_fence_unref(&fence);
165
166                 r = radeon_bo_kmap(gtt_obj[i], &gtt_map);
167                 if (r) {
168                         DRM_ERROR("Failed to map GTT object after copy %d\n", i);
169                         goto out_cleanup;
170                 }
171
172                 for (gtt_start = gtt_map, gtt_end = gtt_map + size,
173                      vram_start = vram_map, vram_end = vram_map + size;
174                      gtt_start < gtt_end;
175                      gtt_start++, vram_start++) {
176                         if (*gtt_start != vram_start) {
177                                 DRM_ERROR("Incorrect VRAM->GTT copy %d: Got 0x%p, "
178                                           "expected 0x%p (VRAM/GTT offset "
179                                           "0x%16llx/0x%16llx)\n",
180                                           i, *gtt_start, vram_start,
181                                           (unsigned long long)
182                                           (vram_addr - rdev->mc.vram_start +
183                                            (void*)vram_start - vram_map),
184                                           (unsigned long long)
185                                           (gtt_addr - rdev->mc.gtt_start +
186                                            (void*)vram_start - vram_map));
187                                 radeon_bo_kunmap(gtt_obj[i]);
188                                 goto out_cleanup;
189                         }
190                 }
191
192                 radeon_bo_kunmap(gtt_obj[i]);
193
194                 DRM_INFO("Tested GTT->VRAM and VRAM->GTT copy for GTT offset 0x%llx\n",
195                          gtt_addr - rdev->mc.gtt_start);
196         }
197
198 out_cleanup:
199         if (vram_obj) {
200                 if (radeon_bo_is_reserved(vram_obj)) {
201                         radeon_bo_unpin(vram_obj);
202                         radeon_bo_unreserve(vram_obj);
203                 }
204                 radeon_bo_unref(&vram_obj);
205         }
206         if (gtt_obj) {
207                 for (i = 0; i < n; i++) {
208                         if (gtt_obj[i]) {
209                                 if (radeon_bo_is_reserved(gtt_obj[i])) {
210                                         radeon_bo_unpin(gtt_obj[i]);
211                                         radeon_bo_unreserve(gtt_obj[i]);
212                                 }
213                                 radeon_bo_unref(&gtt_obj[i]);
214                         }
215                 }
216                 kfree(gtt_obj);
217         }
218         if (fence) {
219                 radeon_fence_unref(&fence);
220         }
221         if (r) {
222                 printk(KERN_WARNING "Error while testing BO move.\n");
223         }
224 }
225
226 void radeon_test_ring_sync(struct radeon_device *rdev,
227                            struct radeon_ring *ringA,
228                            struct radeon_ring *ringB)
229 {
230         struct radeon_fence *fence1 = NULL, *fence2 = NULL;
231         struct radeon_semaphore *semaphore = NULL;
232         int ridxA = radeon_ring_index(rdev, ringA);
233         int ridxB = radeon_ring_index(rdev, ringB);
234         int r;
235
236         r = radeon_semaphore_create(rdev, &semaphore);
237         if (r) {
238                 DRM_ERROR("Failed to create semaphore\n");
239                 goto out_cleanup;
240         }
241
242         r = radeon_ring_lock(rdev, ringA, 64);
243         if (r) {
244                 DRM_ERROR("Failed to lock ring A %d\n", ridxA);
245                 goto out_cleanup;
246         }
247         radeon_semaphore_emit_wait(rdev, ridxA, semaphore);
248         r = radeon_fence_emit(rdev, &fence1, ridxA);
249         if (r) {
250                 DRM_ERROR("Failed to emit fence 1\n");
251                 radeon_ring_unlock_undo(rdev, ringA);
252                 goto out_cleanup;
253         }
254         radeon_semaphore_emit_wait(rdev, ridxA, semaphore);
255         r = radeon_fence_emit(rdev, &fence2, ridxA);
256         if (r) {
257                 DRM_ERROR("Failed to emit fence 2\n");
258                 radeon_ring_unlock_undo(rdev, ringA);
259                 goto out_cleanup;
260         }
261         radeon_ring_unlock_commit(rdev, ringA);
262
263         mdelay(1000);
264
265         if (radeon_fence_signaled(fence1)) {
266                 DRM_ERROR("Fence 1 signaled without waiting for semaphore.\n");
267                 goto out_cleanup;
268         }
269
270         r = radeon_ring_lock(rdev, ringB, 64);
271         if (r) {
272                 DRM_ERROR("Failed to lock ring B %p\n", ringB);
273                 goto out_cleanup;
274         }
275         radeon_semaphore_emit_signal(rdev, ridxB, semaphore);
276         radeon_ring_unlock_commit(rdev, ringB);
277
278         r = radeon_fence_wait(fence1, false);
279         if (r) {
280                 DRM_ERROR("Failed to wait for sync fence 1\n");
281                 goto out_cleanup;
282         }
283
284         mdelay(1000);
285
286         if (radeon_fence_signaled(fence2)) {
287                 DRM_ERROR("Fence 2 signaled without waiting for semaphore.\n");
288                 goto out_cleanup;
289         }
290
291         r = radeon_ring_lock(rdev, ringB, 64);
292         if (r) {
293                 DRM_ERROR("Failed to lock ring B %p\n", ringB);
294                 goto out_cleanup;
295         }
296         radeon_semaphore_emit_signal(rdev, ridxB, semaphore);
297         radeon_ring_unlock_commit(rdev, ringB);
298
299         r = radeon_fence_wait(fence2, false);
300         if (r) {
301                 DRM_ERROR("Failed to wait for sync fence 1\n");
302                 goto out_cleanup;
303         }
304
305 out_cleanup:
306         radeon_semaphore_free(rdev, &semaphore, NULL);
307
308         if (fence1)
309                 radeon_fence_unref(&fence1);
310
311         if (fence2)
312                 radeon_fence_unref(&fence2);
313
314         if (r)
315                 printk(KERN_WARNING "Error while testing ring sync (%d).\n", r);
316 }
317
318 void radeon_test_ring_sync2(struct radeon_device *rdev,
319                             struct radeon_ring *ringA,
320                             struct radeon_ring *ringB,
321                             struct radeon_ring *ringC)
322 {
323         struct radeon_fence *fenceA = NULL, *fenceB = NULL;
324         struct radeon_semaphore *semaphore = NULL;
325         int ridxA = radeon_ring_index(rdev, ringA);
326         int ridxB = radeon_ring_index(rdev, ringB);
327         int ridxC = radeon_ring_index(rdev, ringC);
328         bool sigA, sigB;
329         int i, r;
330
331         r = radeon_semaphore_create(rdev, &semaphore);
332         if (r) {
333                 DRM_ERROR("Failed to create semaphore\n");
334                 goto out_cleanup;
335         }
336
337         r = radeon_ring_lock(rdev, ringA, 64);
338         if (r) {
339                 DRM_ERROR("Failed to lock ring A %d\n", ridxA);
340                 goto out_cleanup;
341         }
342         radeon_semaphore_emit_wait(rdev, ridxA, semaphore);
343         r = radeon_fence_emit(rdev, &fenceA, ridxA);
344         if (r) {
345                 DRM_ERROR("Failed to emit sync fence 1\n");
346                 radeon_ring_unlock_undo(rdev, ringA);
347                 goto out_cleanup;
348         }
349         radeon_ring_unlock_commit(rdev, ringA);
350
351         r = radeon_ring_lock(rdev, ringB, 64);
352         if (r) {
353                 DRM_ERROR("Failed to lock ring B %d\n", ridxB);
354                 goto out_cleanup;
355         }
356         radeon_semaphore_emit_wait(rdev, ridxB, semaphore);
357         r = radeon_fence_emit(rdev, &fenceB, ridxB);
358         if (r) {
359                 DRM_ERROR("Failed to create sync fence 2\n");
360                 radeon_ring_unlock_undo(rdev, ringB);
361                 goto out_cleanup;
362         }
363         radeon_ring_unlock_commit(rdev, ringB);
364
365         mdelay(1000);
366
367         if (radeon_fence_signaled(fenceA)) {
368                 DRM_ERROR("Fence A signaled without waiting for semaphore.\n");
369                 goto out_cleanup;
370         }
371         if (radeon_fence_signaled(fenceB)) {
372                 DRM_ERROR("Fence A signaled without waiting for semaphore.\n");
373                 goto out_cleanup;
374         }
375
376         r = radeon_ring_lock(rdev, ringC, 64);
377         if (r) {
378                 DRM_ERROR("Failed to lock ring B %p\n", ringC);
379                 goto out_cleanup;
380         }
381         radeon_semaphore_emit_signal(rdev, ridxC, semaphore);
382         radeon_ring_unlock_commit(rdev, ringC);
383
384         for (i = 0; i < 30; ++i) {
385                 mdelay(100);
386                 sigA = radeon_fence_signaled(fenceA);
387                 sigB = radeon_fence_signaled(fenceB);
388                 if (sigA || sigB)
389                         break;
390         }
391
392         if (!sigA && !sigB) {
393                 DRM_ERROR("Neither fence A nor B has been signaled\n");
394                 goto out_cleanup;
395         } else if (sigA && sigB) {
396                 DRM_ERROR("Both fence A and B has been signaled\n");
397                 goto out_cleanup;
398         }
399
400         DRM_INFO("Fence %c was first signaled\n", sigA ? 'A' : 'B');
401
402         r = radeon_ring_lock(rdev, ringC, 64);
403         if (r) {
404                 DRM_ERROR("Failed to lock ring B %p\n", ringC);
405                 goto out_cleanup;
406         }
407         radeon_semaphore_emit_signal(rdev, ridxC, semaphore);
408         radeon_ring_unlock_commit(rdev, ringC);
409
410         mdelay(1000);
411
412         r = radeon_fence_wait(fenceA, false);
413         if (r) {
414                 DRM_ERROR("Failed to wait for sync fence A\n");
415                 goto out_cleanup;
416         }
417         r = radeon_fence_wait(fenceB, false);
418         if (r) {
419                 DRM_ERROR("Failed to wait for sync fence B\n");
420                 goto out_cleanup;
421         }
422
423 out_cleanup:
424         radeon_semaphore_free(rdev, &semaphore, NULL);
425
426         if (fenceA)
427                 radeon_fence_unref(&fenceA);
428
429         if (fenceB)
430                 radeon_fence_unref(&fenceB);
431
432         if (r)
433                 printk(KERN_WARNING "Error while testing ring sync (%d).\n", r);
434 }
435
436 void radeon_test_syncing(struct radeon_device *rdev)
437 {
438         int i, j, k;
439
440         for (i = 1; i < RADEON_NUM_RINGS; ++i) {
441                 struct radeon_ring *ringA = &rdev->ring[i];
442                 if (!ringA->ready)
443                         continue;
444
445                 for (j = 0; j < i; ++j) {
446                         struct radeon_ring *ringB = &rdev->ring[j];
447                         if (!ringB->ready)
448                                 continue;
449
450                         DRM_INFO("Testing syncing between rings %d and %d...\n", i, j);
451                         radeon_test_ring_sync(rdev, ringA, ringB);
452
453                         DRM_INFO("Testing syncing between rings %d and %d...\n", j, i);
454                         radeon_test_ring_sync(rdev, ringB, ringA);
455
456                         for (k = 0; k < j; ++k) {
457                                 struct radeon_ring *ringC = &rdev->ring[k];
458                                 if (!ringC->ready)
459                                         continue;
460
461                                 DRM_INFO("Testing syncing between rings %d, %d and %d...\n", i, j, k);
462                                 radeon_test_ring_sync2(rdev, ringA, ringB, ringC);
463
464                                 DRM_INFO("Testing syncing between rings %d, %d and %d...\n", i, k, j);
465                                 radeon_test_ring_sync2(rdev, ringA, ringC, ringB);
466
467                                 DRM_INFO("Testing syncing between rings %d, %d and %d...\n", j, i, k);
468                                 radeon_test_ring_sync2(rdev, ringB, ringA, ringC);
469
470                                 DRM_INFO("Testing syncing between rings %d, %d and %d...\n", j, k, i);
471                                 radeon_test_ring_sync2(rdev, ringB, ringC, ringA);
472
473                                 DRM_INFO("Testing syncing between rings %d, %d and %d...\n", k, i, j);
474                                 radeon_test_ring_sync2(rdev, ringC, ringA, ringB);
475
476                                 DRM_INFO("Testing syncing between rings %d, %d and %d...\n", k, j, i);
477                                 radeon_test_ring_sync2(rdev, ringC, ringB, ringA);
478                         }
479                 }
480         }
481 }