2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
24 * Authors: Dave Airlie
29 #include <linux/seq_file.h>
30 #include <linux/slab.h>
32 #include <drm/radeon_drm.h>
33 #include "radeon_reg.h"
39 * IBs (Indirect Buffers) and areas of GPU accessible memory where
40 * commands are stored. You can put a pointer to the IB in the
41 * command ring and the hw will fetch the commands from the IB
42 * and execute them. Generally userspace acceleration drivers
43 * produce command buffers which are send to the kernel and
44 * put in IBs for execution by the requested ring.
46 static int radeon_debugfs_sa_init(struct radeon_device *rdev);
49 * radeon_ib_get - request an IB (Indirect Buffer)
51 * @rdev: radeon_device pointer
52 * @ring: ring index the IB is associated with
53 * @ib: IB object returned
54 * @size: requested IB size
56 * Request an IB (all asics). IBs are allocated using the
58 * Returns 0 on success, error on failure.
60 int radeon_ib_get(struct radeon_device *rdev, int ring,
61 struct radeon_ib *ib, struct radeon_vm *vm,
66 r = radeon_sa_bo_new(rdev, &rdev->ring_tmp_bo, &ib->sa_bo, size, 256);
68 dev_err(rdev->dev, "failed to get a new IB (%d)\n", r);
72 r = radeon_semaphore_create(rdev, &ib->semaphore);
79 ib->ptr = radeon_sa_bo_cpu_addr(ib->sa_bo);
82 /* ib pool is bound at RADEON_VA_IB_OFFSET in virtual address
83 * space and soffset is the offset inside the pool bo
85 ib->gpu_addr = ib->sa_bo->soffset + RADEON_VA_IB_OFFSET;
87 ib->gpu_addr = radeon_sa_bo_gpu_addr(ib->sa_bo);
89 ib->is_const_ib = false;
95 * radeon_ib_free - free an IB (Indirect Buffer)
97 * @rdev: radeon_device pointer
98 * @ib: IB object to free
100 * Free an IB (all asics).
102 void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib *ib)
104 radeon_semaphore_free(rdev, &ib->semaphore, ib->fence);
105 radeon_sa_bo_free(rdev, &ib->sa_bo, ib->fence);
106 radeon_fence_unref(&ib->fence);
110 * radeon_ib_schedule - schedule an IB (Indirect Buffer) on the ring
112 * @rdev: radeon_device pointer
113 * @ib: IB object to schedule
114 * @const_ib: Const IB to schedule (SI only)
116 * Schedule an IB on the associated ring (all asics).
117 * Returns 0 on success, error on failure.
119 * On SI, there are two parallel engines fed from the primary ring,
120 * the CE (Constant Engine) and the DE (Drawing Engine). Since
121 * resource descriptors have moved to memory, the CE allows you to
122 * prime the caches while the DE is updating register state so that
123 * the resource descriptors will be already in cache when the draw is
124 * processed. To accomplish this, the userspace driver submits two
125 * IBs, one for the CE and one for the DE. If there is a CE IB (called
126 * a CONST_IB), it will be put on the ring prior to the DE IB. Prior
127 * to SI there was just a DE IB.
129 int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib,
130 struct radeon_ib *const_ib)
132 struct radeon_ring *ring = &rdev->ring[ib->ring];
135 if (!ib->length_dw || !ring->ready) {
136 /* TODO: Nothings in the ib we should report. */
137 dev_err(rdev->dev, "couldn't schedule ib\n");
141 /* 64 dwords should be enough for fence too */
142 r = radeon_ring_lock(rdev, ring, 64 + RADEON_NUM_SYNCS * 8);
144 dev_err(rdev->dev, "scheduling IB failed (%d).\n", r);
148 /* grab a vm id if necessary */
150 struct radeon_fence *vm_id_fence;
151 vm_id_fence = radeon_vm_grab_id(rdev, ib->vm, ib->ring);
152 radeon_semaphore_sync_to(ib->semaphore, vm_id_fence);
155 /* sync with other rings */
156 r = radeon_semaphore_sync_rings(rdev, ib->semaphore, ib->ring);
158 dev_err(rdev->dev, "failed to sync rings (%d)\n", r);
159 radeon_ring_unlock_undo(rdev, ring);
164 radeon_vm_flush(rdev, ib->vm, ib->ring);
167 radeon_ring_ib_execute(rdev, const_ib->ring, const_ib);
168 radeon_semaphore_free(rdev, &const_ib->semaphore, NULL);
170 radeon_ring_ib_execute(rdev, ib->ring, ib);
171 r = radeon_fence_emit(rdev, &ib->fence, ib->ring);
173 dev_err(rdev->dev, "failed to emit fence for new IB (%d)\n", r);
174 radeon_ring_unlock_undo(rdev, ring);
178 const_ib->fence = radeon_fence_ref(ib->fence);
182 radeon_vm_fence(rdev, ib->vm, ib->fence);
184 radeon_ring_unlock_commit(rdev, ring);
189 * radeon_ib_pool_init - Init the IB (Indirect Buffer) pool
191 * @rdev: radeon_device pointer
193 * Initialize the suballocator to manage a pool of memory
194 * for use as IBs (all asics).
195 * Returns 0 on success, error on failure.
197 int radeon_ib_pool_init(struct radeon_device *rdev)
201 if (rdev->ib_pool_ready) {
205 if (rdev->family >= CHIP_BONAIRE) {
206 r = radeon_sa_bo_manager_init(rdev, &rdev->ring_tmp_bo,
207 RADEON_IB_POOL_SIZE*64*1024,
208 RADEON_GPU_PAGE_SIZE,
209 RADEON_GEM_DOMAIN_GTT,
212 /* Before CIK, it's better to stick to cacheable GTT due
213 * to the command stream checking
215 r = radeon_sa_bo_manager_init(rdev, &rdev->ring_tmp_bo,
216 RADEON_IB_POOL_SIZE*64*1024,
217 RADEON_GPU_PAGE_SIZE,
218 RADEON_GEM_DOMAIN_GTT, 0);
224 r = radeon_sa_bo_manager_start(rdev, &rdev->ring_tmp_bo);
229 rdev->ib_pool_ready = true;
230 if (radeon_debugfs_sa_init(rdev)) {
231 dev_err(rdev->dev, "failed to register debugfs file for SA\n");
237 * radeon_ib_pool_fini - Free the IB (Indirect Buffer) pool
239 * @rdev: radeon_device pointer
241 * Tear down the suballocator managing the pool of memory
242 * for use as IBs (all asics).
244 void radeon_ib_pool_fini(struct radeon_device *rdev)
246 if (rdev->ib_pool_ready) {
247 radeon_sa_bo_manager_suspend(rdev, &rdev->ring_tmp_bo);
248 radeon_sa_bo_manager_fini(rdev, &rdev->ring_tmp_bo);
249 rdev->ib_pool_ready = false;
254 * radeon_ib_ring_tests - test IBs on the rings
256 * @rdev: radeon_device pointer
258 * Test an IB (Indirect Buffer) on each ring.
259 * If the test fails, disable the ring.
260 * Returns 0 on success, error if the primary GFX ring
263 int radeon_ib_ring_tests(struct radeon_device *rdev)
268 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
269 struct radeon_ring *ring = &rdev->ring[i];
274 r = radeon_ib_test(rdev, i, ring);
277 rdev->needs_reset = false;
279 if (i == RADEON_RING_TYPE_GFX_INDEX) {
280 /* oh, oh, that's really bad */
281 DRM_ERROR("radeon: failed testing IB on GFX ring (%d).\n", r);
282 rdev->accel_working = false;
286 /* still not good, but we can live with it */
287 DRM_ERROR("radeon: failed testing IB on ring %d (%d).\n", i, r);
296 * Most engines on the GPU are fed via ring buffers. Ring
297 * buffers are areas of GPU accessible memory that the host
298 * writes commands into and the GPU reads commands out of.
299 * There is a rptr (read pointer) that determines where the
300 * GPU is currently reading, and a wptr (write pointer)
301 * which determines where the host has written. When the
302 * pointers are equal, the ring is idle. When the host
303 * writes commands to the ring buffer, it increments the
304 * wptr. The GPU then starts fetching commands and executes
305 * them until the pointers are equal again.
307 static int radeon_debugfs_ring_init(struct radeon_device *rdev, struct radeon_ring *ring);
310 * radeon_ring_write - write a value to the ring
312 * @ring: radeon_ring structure holding ring information
313 * @v: dword (dw) value to write
315 * Write a value to the requested ring buffer (all asics).
317 void radeon_ring_write(struct radeon_ring *ring, uint32_t v)
320 if (ring->count_dw <= 0) {
321 DRM_ERROR("radeon: writing more dwords to the ring than expected!\n");
324 ring->ring[ring->wptr++] = v;
325 ring->wptr &= ring->ptr_mask;
327 ring->ring_free_dw--;
331 * radeon_ring_supports_scratch_reg - check if the ring supports
332 * writing to scratch registers
334 * @rdev: radeon_device pointer
335 * @ring: radeon_ring structure holding ring information
337 * Check if a specific ring supports writing to scratch registers (all asics).
338 * Returns true if the ring supports writing to scratch regs, false if not.
340 bool radeon_ring_supports_scratch_reg(struct radeon_device *rdev,
341 struct radeon_ring *ring)
344 case RADEON_RING_TYPE_GFX_INDEX:
345 case CAYMAN_RING_TYPE_CP1_INDEX:
346 case CAYMAN_RING_TYPE_CP2_INDEX:
354 * radeon_ring_free_size - update the free size
356 * @rdev: radeon_device pointer
357 * @ring: radeon_ring structure holding ring information
359 * Update the free dw slots in the ring buffer (all asics).
361 void radeon_ring_free_size(struct radeon_device *rdev, struct radeon_ring *ring)
363 uint32_t rptr = radeon_ring_get_rptr(rdev, ring);
365 /* This works because ring_size is a power of 2 */
366 ring->ring_free_dw = rptr + (ring->ring_size / 4);
367 ring->ring_free_dw -= ring->wptr;
368 ring->ring_free_dw &= ring->ptr_mask;
369 if (!ring->ring_free_dw) {
370 /* this is an empty ring */
371 ring->ring_free_dw = ring->ring_size / 4;
372 /* update lockup info to avoid false positive */
373 radeon_ring_lockup_update(rdev, ring);
378 * radeon_ring_alloc - allocate space on the ring buffer
380 * @rdev: radeon_device pointer
381 * @ring: radeon_ring structure holding ring information
382 * @ndw: number of dwords to allocate in the ring buffer
384 * Allocate @ndw dwords in the ring buffer (all asics).
385 * Returns 0 on success, error on failure.
387 int radeon_ring_alloc(struct radeon_device *rdev, struct radeon_ring *ring, unsigned ndw)
391 /* make sure we aren't trying to allocate more space than there is on the ring */
392 if (ndw > (ring->ring_size / 4))
394 /* Align requested size with padding so unlock_commit can
396 radeon_ring_free_size(rdev, ring);
397 ndw = (ndw + ring->align_mask) & ~ring->align_mask;
398 while (ndw > (ring->ring_free_dw - 1)) {
399 radeon_ring_free_size(rdev, ring);
400 if (ndw < ring->ring_free_dw) {
403 r = radeon_fence_wait_next(rdev, ring->idx);
407 ring->count_dw = ndw;
408 ring->wptr_old = ring->wptr;
413 * radeon_ring_lock - lock the ring and allocate space on it
415 * @rdev: radeon_device pointer
416 * @ring: radeon_ring structure holding ring information
417 * @ndw: number of dwords to allocate in the ring buffer
419 * Lock the ring and allocate @ndw dwords in the ring buffer
421 * Returns 0 on success, error on failure.
423 int radeon_ring_lock(struct radeon_device *rdev, struct radeon_ring *ring, unsigned ndw)
427 mutex_lock(&rdev->ring_lock);
428 r = radeon_ring_alloc(rdev, ring, ndw);
430 mutex_unlock(&rdev->ring_lock);
437 * radeon_ring_commit - tell the GPU to execute the new
438 * commands on the ring buffer
440 * @rdev: radeon_device pointer
441 * @ring: radeon_ring structure holding ring information
443 * Update the wptr (write pointer) to tell the GPU to
444 * execute new commands on the ring buffer (all asics).
446 void radeon_ring_commit(struct radeon_device *rdev, struct radeon_ring *ring)
448 /* We pad to match fetch size */
449 while (ring->wptr & ring->align_mask) {
450 radeon_ring_write(ring, ring->nop);
453 radeon_ring_set_wptr(rdev, ring);
457 * radeon_ring_unlock_commit - tell the GPU to execute the new
458 * commands on the ring buffer and unlock it
460 * @rdev: radeon_device pointer
461 * @ring: radeon_ring structure holding ring information
463 * Call radeon_ring_commit() then unlock the ring (all asics).
465 void radeon_ring_unlock_commit(struct radeon_device *rdev, struct radeon_ring *ring)
467 radeon_ring_commit(rdev, ring);
468 mutex_unlock(&rdev->ring_lock);
472 * radeon_ring_undo - reset the wptr
474 * @ring: radeon_ring structure holding ring information
476 * Reset the driver's copy of the wptr (all asics).
478 void radeon_ring_undo(struct radeon_ring *ring)
480 ring->wptr = ring->wptr_old;
484 * radeon_ring_unlock_undo - reset the wptr and unlock the ring
486 * @ring: radeon_ring structure holding ring information
488 * Call radeon_ring_undo() then unlock the ring (all asics).
490 void radeon_ring_unlock_undo(struct radeon_device *rdev, struct radeon_ring *ring)
492 radeon_ring_undo(ring);
493 mutex_unlock(&rdev->ring_lock);
497 * radeon_ring_lockup_update - update lockup variables
499 * @ring: radeon_ring structure holding ring information
501 * Update the last rptr value and timestamp (all asics).
503 void radeon_ring_lockup_update(struct radeon_device *rdev,
504 struct radeon_ring *ring)
506 atomic_set(&ring->last_rptr, radeon_ring_get_rptr(rdev, ring));
507 atomic64_set(&ring->last_activity, jiffies_64);
511 * radeon_ring_test_lockup() - check if ring is lockedup by recording information
512 * @rdev: radeon device structure
513 * @ring: radeon_ring structure holding ring information
516 bool radeon_ring_test_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
518 uint32_t rptr = radeon_ring_get_rptr(rdev, ring);
519 uint64_t last = atomic64_read(&ring->last_activity);
522 if (rptr != atomic_read(&ring->last_rptr)) {
523 /* ring is still working, no lockup */
524 radeon_ring_lockup_update(rdev, ring);
528 elapsed = jiffies_to_msecs(jiffies_64 - last);
529 if (radeon_lockup_timeout && elapsed >= radeon_lockup_timeout) {
530 dev_err(rdev->dev, "ring %d stalled for more than %llumsec\n",
534 /* give a chance to the GPU ... */
539 * radeon_ring_backup - Back up the content of a ring
541 * @rdev: radeon_device pointer
542 * @ring: the ring we want to back up
544 * Saves all unprocessed commits from a ring, returns the number of dwords saved.
546 unsigned radeon_ring_backup(struct radeon_device *rdev, struct radeon_ring *ring,
549 unsigned size, ptr, i;
551 /* just in case lock the ring */
552 mutex_lock(&rdev->ring_lock);
555 if (ring->ring_obj == NULL) {
556 mutex_unlock(&rdev->ring_lock);
560 /* it doesn't make sense to save anything if all fences are signaled */
561 if (!radeon_fence_count_emitted(rdev, ring->idx)) {
562 mutex_unlock(&rdev->ring_lock);
566 /* calculate the number of dw on the ring */
567 if (ring->rptr_save_reg)
568 ptr = RREG32(ring->rptr_save_reg);
569 else if (rdev->wb.enabled)
570 ptr = le32_to_cpu(*ring->next_rptr_cpu_addr);
572 /* no way to read back the next rptr */
573 mutex_unlock(&rdev->ring_lock);
577 size = ring->wptr + (ring->ring_size / 4);
579 size &= ring->ptr_mask;
581 mutex_unlock(&rdev->ring_lock);
585 /* and then save the content of the ring */
586 *data = kmalloc_array(size, sizeof(uint32_t), GFP_KERNEL);
588 mutex_unlock(&rdev->ring_lock);
591 for (i = 0; i < size; ++i) {
592 (*data)[i] = ring->ring[ptr++];
593 ptr &= ring->ptr_mask;
596 mutex_unlock(&rdev->ring_lock);
601 * radeon_ring_restore - append saved commands to the ring again
603 * @rdev: radeon_device pointer
604 * @ring: ring to append commands to
605 * @size: number of dwords we want to write
606 * @data: saved commands
608 * Allocates space on the ring and restore the previously saved commands.
610 int radeon_ring_restore(struct radeon_device *rdev, struct radeon_ring *ring,
611 unsigned size, uint32_t *data)
618 /* restore the saved ring content */
619 r = radeon_ring_lock(rdev, ring, size);
623 for (i = 0; i < size; ++i) {
624 radeon_ring_write(ring, data[i]);
627 radeon_ring_unlock_commit(rdev, ring);
633 * radeon_ring_init - init driver ring struct.
635 * @rdev: radeon_device pointer
636 * @ring: radeon_ring structure holding ring information
637 * @ring_size: size of the ring
638 * @rptr_offs: offset of the rptr writeback location in the WB buffer
639 * @nop: nop packet for this ring
641 * Initialize the driver information for the selected ring (all asics).
642 * Returns 0 on success, error on failure.
644 int radeon_ring_init(struct radeon_device *rdev, struct radeon_ring *ring, unsigned ring_size,
645 unsigned rptr_offs, u32 nop)
649 ring->ring_size = ring_size;
650 ring->rptr_offs = rptr_offs;
652 /* Allocate ring buffer */
653 if (ring->ring_obj == NULL) {
654 r = radeon_bo_create(rdev, ring->ring_size, PAGE_SIZE, true,
655 RADEON_GEM_DOMAIN_GTT,
656 (rdev->flags & RADEON_IS_PCIE) ?
657 RADEON_GEM_GTT_WC : 0,
658 NULL, &ring->ring_obj);
660 dev_err(rdev->dev, "(%d) ring create failed\n", r);
663 r = radeon_bo_reserve(ring->ring_obj, false);
664 if (unlikely(r != 0))
666 r = radeon_bo_pin(ring->ring_obj, RADEON_GEM_DOMAIN_GTT,
669 radeon_bo_unreserve(ring->ring_obj);
670 dev_err(rdev->dev, "(%d) ring pin failed\n", r);
673 r = radeon_bo_kmap(ring->ring_obj,
674 (void **)&ring->ring);
675 radeon_bo_unreserve(ring->ring_obj);
677 dev_err(rdev->dev, "(%d) ring map failed\n", r);
681 ring->ptr_mask = (ring->ring_size / 4) - 1;
682 ring->ring_free_dw = ring->ring_size / 4;
683 if (rdev->wb.enabled) {
684 u32 index = RADEON_WB_RING0_NEXT_RPTR + (ring->idx * 4);
685 ring->next_rptr_gpu_addr = rdev->wb.gpu_addr + index;
686 ring->next_rptr_cpu_addr = &rdev->wb.wb[index/4];
688 if (radeon_debugfs_ring_init(rdev, ring)) {
689 DRM_ERROR("Failed to register debugfs file for rings !\n");
691 radeon_ring_lockup_update(rdev, ring);
696 * radeon_ring_fini - tear down the driver ring struct.
698 * @rdev: radeon_device pointer
699 * @ring: radeon_ring structure holding ring information
701 * Tear down the driver information for the selected ring (all asics).
703 void radeon_ring_fini(struct radeon_device *rdev, struct radeon_ring *ring)
706 struct radeon_bo *ring_obj;
708 mutex_lock(&rdev->ring_lock);
709 ring_obj = ring->ring_obj;
712 ring->ring_obj = NULL;
713 mutex_unlock(&rdev->ring_lock);
716 r = radeon_bo_reserve(ring_obj, false);
717 if (likely(r == 0)) {
718 radeon_bo_kunmap(ring_obj);
719 radeon_bo_unpin(ring_obj);
720 radeon_bo_unreserve(ring_obj);
722 radeon_bo_unref(&ring_obj);
729 #if defined(CONFIG_DEBUG_FS)
731 static int radeon_debugfs_ring_info(struct seq_file *m, void *data)
733 struct drm_info_node *node = (struct drm_info_node *) m->private;
734 struct drm_device *dev = node->minor->dev;
735 struct radeon_device *rdev = dev->dev_private;
736 int ridx = *(int*)node->info_ent->data;
737 struct radeon_ring *ring = &rdev->ring[ridx];
739 uint32_t rptr, wptr, rptr_next;
740 unsigned count, i, j;
742 radeon_ring_free_size(rdev, ring);
743 count = (ring->ring_size / 4) - ring->ring_free_dw;
745 wptr = radeon_ring_get_wptr(rdev, ring);
746 seq_printf(m, "wptr: 0x%08x [%5d]\n",
749 rptr = radeon_ring_get_rptr(rdev, ring);
750 seq_printf(m, "rptr: 0x%08x [%5d]\n",
753 if (ring->rptr_save_reg) {
754 rptr_next = RREG32(ring->rptr_save_reg);
755 seq_printf(m, "rptr next(0x%04x): 0x%08x [%5d]\n",
756 ring->rptr_save_reg, rptr_next, rptr_next);
760 seq_printf(m, "driver's copy of the wptr: 0x%08x [%5d]\n",
761 ring->wptr, ring->wptr);
762 seq_printf(m, "last semaphore signal addr : 0x%016llx\n",
763 ring->last_semaphore_signal_addr);
764 seq_printf(m, "last semaphore wait addr : 0x%016llx\n",
765 ring->last_semaphore_wait_addr);
766 seq_printf(m, "%u free dwords in ring\n", ring->ring_free_dw);
767 seq_printf(m, "%u dwords in ring\n", count);
772 /* print 8 dw before current rptr as often it's the last executed
773 * packet that is the root issue
775 i = (rptr + ring->ptr_mask + 1 - 32) & ring->ptr_mask;
776 for (j = 0; j <= (count + 32); j++) {
777 seq_printf(m, "r[%5d]=0x%08x", i, ring->ring[i]);
783 i = (i + 1) & ring->ptr_mask;
788 static int radeon_gfx_index = RADEON_RING_TYPE_GFX_INDEX;
789 static int cayman_cp1_index = CAYMAN_RING_TYPE_CP1_INDEX;
790 static int cayman_cp2_index = CAYMAN_RING_TYPE_CP2_INDEX;
791 static int radeon_dma1_index = R600_RING_TYPE_DMA_INDEX;
792 static int radeon_dma2_index = CAYMAN_RING_TYPE_DMA1_INDEX;
793 static int r600_uvd_index = R600_RING_TYPE_UVD_INDEX;
794 static int si_vce1_index = TN_RING_TYPE_VCE1_INDEX;
795 static int si_vce2_index = TN_RING_TYPE_VCE2_INDEX;
797 static struct drm_info_list radeon_debugfs_ring_info_list[] = {
798 {"radeon_ring_gfx", radeon_debugfs_ring_info, 0, &radeon_gfx_index},
799 {"radeon_ring_cp1", radeon_debugfs_ring_info, 0, &cayman_cp1_index},
800 {"radeon_ring_cp2", radeon_debugfs_ring_info, 0, &cayman_cp2_index},
801 {"radeon_ring_dma1", radeon_debugfs_ring_info, 0, &radeon_dma1_index},
802 {"radeon_ring_dma2", radeon_debugfs_ring_info, 0, &radeon_dma2_index},
803 {"radeon_ring_uvd", radeon_debugfs_ring_info, 0, &r600_uvd_index},
804 {"radeon_ring_vce1", radeon_debugfs_ring_info, 0, &si_vce1_index},
805 {"radeon_ring_vce2", radeon_debugfs_ring_info, 0, &si_vce2_index},
808 static int radeon_debugfs_sa_info(struct seq_file *m, void *data)
810 struct drm_info_node *node = (struct drm_info_node *) m->private;
811 struct drm_device *dev = node->minor->dev;
812 struct radeon_device *rdev = dev->dev_private;
814 radeon_sa_bo_dump_debug_info(&rdev->ring_tmp_bo, m);
820 static struct drm_info_list radeon_debugfs_sa_list[] = {
821 {"radeon_sa_info", &radeon_debugfs_sa_info, 0, NULL},
826 static int radeon_debugfs_ring_init(struct radeon_device *rdev, struct radeon_ring *ring)
828 #if defined(CONFIG_DEBUG_FS)
830 for (i = 0; i < ARRAY_SIZE(radeon_debugfs_ring_info_list); ++i) {
831 struct drm_info_list *info = &radeon_debugfs_ring_info_list[i];
832 int ridx = *(int*)radeon_debugfs_ring_info_list[i].data;
835 if (&rdev->ring[ridx] != ring)
838 r = radeon_debugfs_add_files(rdev, info, 1);
846 static int radeon_debugfs_sa_init(struct radeon_device *rdev)
848 #if defined(CONFIG_DEBUG_FS)
849 return radeon_debugfs_add_files(rdev, radeon_debugfs_sa_list, 1);