2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
24 * Authors: Dave Airlie
28 #include <linux/seq_file.h>
29 #include <linux/slab.h>
32 #include <drm/drm_crtc_helper.h>
33 #include "radeon_reg.h"
35 #include "radeon_asic.h"
36 #include <drm/radeon_drm.h>
37 #include "r100_track.h"
40 #include "r300_reg_safe.h"
42 /* This files gather functions specifics to: r300,r350,rv350,rv370,rv380
45 * - HOST_PATH_CNTL: r300 family seems to dislike write to HOST_PATH_CNTL
46 * using MMIO to flush host path read cache, this lead to HARDLOCKUP.
47 * However, scheduling such write to the ring seems harmless, i suspect
48 * the CP read collide with the flush somehow, or maybe the MC, hard to
49 * tell. (Jerome Glisse)
53 * rv370,rv380 PCIE GART
55 static int rv370_debugfs_pcie_gart_info_init(struct radeon_device *rdev);
57 void rv370_pcie_gart_tlb_flush(struct radeon_device *rdev)
62 /* Workaround HW bug do flush 2 times */
63 for (i = 0; i < 2; i++) {
64 tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL);
65 WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp | RADEON_PCIE_TX_GART_INVALIDATE_TLB);
66 (void)RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL);
67 WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp);
72 #define R300_PTE_UNSNOOPED (1 << 0)
73 #define R300_PTE_WRITEABLE (1 << 2)
74 #define R300_PTE_READABLE (1 << 3)
76 void rv370_pcie_gart_set_page(struct radeon_device *rdev, unsigned i,
77 uint64_t addr, uint32_t flags)
79 void __iomem *ptr = rdev->gart.ptr;
81 addr = (lower_32_bits(addr) >> 8) |
82 ((upper_32_bits(addr) & 0xff) << 24);
83 if (flags & RADEON_GART_PAGE_READ)
84 addr |= R300_PTE_READABLE;
85 if (flags & RADEON_GART_PAGE_WRITE)
86 addr |= R300_PTE_WRITEABLE;
87 if (!(flags & RADEON_GART_PAGE_SNOOP))
88 addr |= R300_PTE_UNSNOOPED;
89 /* on x86 we want this to be CPU endian, on powerpc
90 * on powerpc without HW swappers, it'll get swapped on way
91 * into VRAM - so no need for cpu_to_le32 on VRAM tables */
92 writel(addr, ((void __iomem *)ptr) + (i * 4));
95 int rv370_pcie_gart_init(struct radeon_device *rdev)
99 if (rdev->gart.robj) {
100 WARN(1, "RV370 PCIE GART already initialized\n");
103 /* Initialize common gart structure */
104 r = radeon_gart_init(rdev);
107 r = rv370_debugfs_pcie_gart_info_init(rdev);
109 DRM_ERROR("Failed to register debugfs file for PCIE gart !\n");
110 rdev->gart.table_size = rdev->gart.num_gpu_pages * 4;
111 rdev->asic->gart.tlb_flush = &rv370_pcie_gart_tlb_flush;
112 rdev->asic->gart.set_page = &rv370_pcie_gart_set_page;
113 return radeon_gart_table_vram_alloc(rdev);
116 int rv370_pcie_gart_enable(struct radeon_device *rdev)
122 if (rdev->gart.robj == NULL) {
123 dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
126 r = radeon_gart_table_vram_pin(rdev);
129 /* discard memory request outside of configured range */
130 tmp = RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD;
131 WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp);
132 WREG32_PCIE(RADEON_PCIE_TX_GART_START_LO, rdev->mc.gtt_start);
133 tmp = rdev->mc.gtt_end & ~RADEON_GPU_PAGE_MASK;
134 WREG32_PCIE(RADEON_PCIE_TX_GART_END_LO, tmp);
135 WREG32_PCIE(RADEON_PCIE_TX_GART_START_HI, 0);
136 WREG32_PCIE(RADEON_PCIE_TX_GART_END_HI, 0);
137 table_addr = rdev->gart.table_addr;
138 WREG32_PCIE(RADEON_PCIE_TX_GART_BASE, table_addr);
139 /* FIXME: setup default page */
140 WREG32_PCIE(RADEON_PCIE_TX_DISCARD_RD_ADDR_LO, rdev->mc.vram_start);
141 WREG32_PCIE(RADEON_PCIE_TX_DISCARD_RD_ADDR_HI, 0);
143 WREG32_PCIE(RADEON_PCIE_TX_GART_ERROR, 0);
144 tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL);
145 tmp |= RADEON_PCIE_TX_GART_EN;
146 tmp |= RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD;
147 WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp);
148 rv370_pcie_gart_tlb_flush(rdev);
149 DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
150 (unsigned)(rdev->mc.gtt_size >> 20),
151 (unsigned long long)table_addr);
152 rdev->gart.ready = true;
156 void rv370_pcie_gart_disable(struct radeon_device *rdev)
160 WREG32_PCIE(RADEON_PCIE_TX_GART_START_LO, 0);
161 WREG32_PCIE(RADEON_PCIE_TX_GART_END_LO, 0);
162 WREG32_PCIE(RADEON_PCIE_TX_GART_START_HI, 0);
163 WREG32_PCIE(RADEON_PCIE_TX_GART_END_HI, 0);
164 tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL);
165 tmp |= RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD;
166 WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp & ~RADEON_PCIE_TX_GART_EN);
167 radeon_gart_table_vram_unpin(rdev);
170 void rv370_pcie_gart_fini(struct radeon_device *rdev)
172 radeon_gart_fini(rdev);
173 rv370_pcie_gart_disable(rdev);
174 radeon_gart_table_vram_free(rdev);
177 void r300_fence_ring_emit(struct radeon_device *rdev,
178 struct radeon_fence *fence)
180 struct radeon_ring *ring = &rdev->ring[fence->ring];
182 /* Who ever call radeon_fence_emit should call ring_lock and ask
183 * for enough space (today caller are ib schedule and buffer move) */
184 /* Write SC register so SC & US assert idle */
185 radeon_ring_write(ring, PACKET0(R300_RE_SCISSORS_TL, 0));
186 radeon_ring_write(ring, 0);
187 radeon_ring_write(ring, PACKET0(R300_RE_SCISSORS_BR, 0));
188 radeon_ring_write(ring, 0);
190 radeon_ring_write(ring, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
191 radeon_ring_write(ring, R300_RB3D_DC_FLUSH);
192 radeon_ring_write(ring, PACKET0(R300_RB3D_ZCACHE_CTLSTAT, 0));
193 radeon_ring_write(ring, R300_ZC_FLUSH);
194 /* Wait until IDLE & CLEAN */
195 radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0));
196 radeon_ring_write(ring, (RADEON_WAIT_3D_IDLECLEAN |
197 RADEON_WAIT_2D_IDLECLEAN |
198 RADEON_WAIT_DMA_GUI_IDLE));
199 radeon_ring_write(ring, PACKET0(RADEON_HOST_PATH_CNTL, 0));
200 radeon_ring_write(ring, rdev->config.r300.hdp_cntl |
201 RADEON_HDP_READ_BUFFER_INVALIDATE);
202 radeon_ring_write(ring, PACKET0(RADEON_HOST_PATH_CNTL, 0));
203 radeon_ring_write(ring, rdev->config.r300.hdp_cntl);
204 /* Emit fence sequence & fire IRQ */
205 radeon_ring_write(ring, PACKET0(rdev->fence_drv[fence->ring].scratch_reg, 0));
206 radeon_ring_write(ring, fence->seq);
207 radeon_ring_write(ring, PACKET0(RADEON_GEN_INT_STATUS, 0));
208 radeon_ring_write(ring, RADEON_SW_INT_FIRE);
211 void r300_ring_start(struct radeon_device *rdev, struct radeon_ring *ring)
213 unsigned gb_tile_config;
216 /* Sub pixel 1/12 so we can have 4K rendering according to doc */
217 gb_tile_config = (R300_ENABLE_TILING | R300_TILE_SIZE_16);
218 switch(rdev->num_gb_pipes) {
220 gb_tile_config |= R300_PIPE_COUNT_R300;
223 gb_tile_config |= R300_PIPE_COUNT_R420_3P;
226 gb_tile_config |= R300_PIPE_COUNT_R420;
230 gb_tile_config |= R300_PIPE_COUNT_RV350;
234 r = radeon_ring_lock(rdev, ring, 64);
238 radeon_ring_write(ring, PACKET0(RADEON_ISYNC_CNTL, 0));
239 radeon_ring_write(ring,
240 RADEON_ISYNC_ANY2D_IDLE3D |
241 RADEON_ISYNC_ANY3D_IDLE2D |
242 RADEON_ISYNC_WAIT_IDLEGUI |
243 RADEON_ISYNC_CPSCRATCH_IDLEGUI);
244 radeon_ring_write(ring, PACKET0(R300_GB_TILE_CONFIG, 0));
245 radeon_ring_write(ring, gb_tile_config);
246 radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0));
247 radeon_ring_write(ring,
248 RADEON_WAIT_2D_IDLECLEAN |
249 RADEON_WAIT_3D_IDLECLEAN);
250 radeon_ring_write(ring, PACKET0(R300_DST_PIPE_CONFIG, 0));
251 radeon_ring_write(ring, R300_PIPE_AUTO_CONFIG);
252 radeon_ring_write(ring, PACKET0(R300_GB_SELECT, 0));
253 radeon_ring_write(ring, 0);
254 radeon_ring_write(ring, PACKET0(R300_GB_ENABLE, 0));
255 radeon_ring_write(ring, 0);
256 radeon_ring_write(ring, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
257 radeon_ring_write(ring, R300_RB3D_DC_FLUSH | R300_RB3D_DC_FREE);
258 radeon_ring_write(ring, PACKET0(R300_RB3D_ZCACHE_CTLSTAT, 0));
259 radeon_ring_write(ring, R300_ZC_FLUSH | R300_ZC_FREE);
260 radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0));
261 radeon_ring_write(ring,
262 RADEON_WAIT_2D_IDLECLEAN |
263 RADEON_WAIT_3D_IDLECLEAN);
264 radeon_ring_write(ring, PACKET0(R300_GB_AA_CONFIG, 0));
265 radeon_ring_write(ring, 0);
266 radeon_ring_write(ring, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
267 radeon_ring_write(ring, R300_RB3D_DC_FLUSH | R300_RB3D_DC_FREE);
268 radeon_ring_write(ring, PACKET0(R300_RB3D_ZCACHE_CTLSTAT, 0));
269 radeon_ring_write(ring, R300_ZC_FLUSH | R300_ZC_FREE);
270 radeon_ring_write(ring, PACKET0(R300_GB_MSPOS0, 0));
271 radeon_ring_write(ring,
272 ((6 << R300_MS_X0_SHIFT) |
273 (6 << R300_MS_Y0_SHIFT) |
274 (6 << R300_MS_X1_SHIFT) |
275 (6 << R300_MS_Y1_SHIFT) |
276 (6 << R300_MS_X2_SHIFT) |
277 (6 << R300_MS_Y2_SHIFT) |
278 (6 << R300_MSBD0_Y_SHIFT) |
279 (6 << R300_MSBD0_X_SHIFT)));
280 radeon_ring_write(ring, PACKET0(R300_GB_MSPOS1, 0));
281 radeon_ring_write(ring,
282 ((6 << R300_MS_X3_SHIFT) |
283 (6 << R300_MS_Y3_SHIFT) |
284 (6 << R300_MS_X4_SHIFT) |
285 (6 << R300_MS_Y4_SHIFT) |
286 (6 << R300_MS_X5_SHIFT) |
287 (6 << R300_MS_Y5_SHIFT) |
288 (6 << R300_MSBD1_SHIFT)));
289 radeon_ring_write(ring, PACKET0(R300_GA_ENHANCE, 0));
290 radeon_ring_write(ring, R300_GA_DEADLOCK_CNTL | R300_GA_FASTSYNC_CNTL);
291 radeon_ring_write(ring, PACKET0(R300_GA_POLY_MODE, 0));
292 radeon_ring_write(ring,
293 R300_FRONT_PTYPE_TRIANGE | R300_BACK_PTYPE_TRIANGE);
294 radeon_ring_write(ring, PACKET0(R300_GA_ROUND_MODE, 0));
295 radeon_ring_write(ring,
296 R300_GEOMETRY_ROUND_NEAREST |
297 R300_COLOR_ROUND_NEAREST);
298 radeon_ring_unlock_commit(rdev, ring);
301 static void r300_errata(struct radeon_device *rdev)
303 rdev->pll_errata = 0;
305 if (rdev->family == CHIP_R300 &&
306 (RREG32(RADEON_CONFIG_CNTL) & RADEON_CFG_ATI_REV_ID_MASK) == RADEON_CFG_ATI_REV_A11) {
307 rdev->pll_errata |= CHIP_ERRATA_R300_CG;
311 int r300_mc_wait_for_idle(struct radeon_device *rdev)
316 for (i = 0; i < rdev->usec_timeout; i++) {
318 tmp = RREG32(RADEON_MC_STATUS);
319 if (tmp & R300_MC_IDLE) {
327 static void r300_gpu_init(struct radeon_device *rdev)
329 uint32_t gb_tile_config, tmp;
331 if ((rdev->family == CHIP_R300 && rdev->pdev->device != 0x4144) ||
332 (rdev->family == CHIP_R350 && rdev->pdev->device != 0x4148)) {
334 rdev->num_gb_pipes = 2;
336 /* rv350,rv370,rv380,r300 AD, r350 AH */
337 rdev->num_gb_pipes = 1;
339 rdev->num_z_pipes = 1;
340 gb_tile_config = (R300_ENABLE_TILING | R300_TILE_SIZE_16);
341 switch (rdev->num_gb_pipes) {
343 gb_tile_config |= R300_PIPE_COUNT_R300;
346 gb_tile_config |= R300_PIPE_COUNT_R420_3P;
349 gb_tile_config |= R300_PIPE_COUNT_R420;
353 gb_tile_config |= R300_PIPE_COUNT_RV350;
356 WREG32(R300_GB_TILE_CONFIG, gb_tile_config);
358 if (r100_gui_wait_for_idle(rdev)) {
359 printk(KERN_WARNING "Failed to wait GUI idle while "
360 "programming pipes. Bad things might happen.\n");
363 tmp = RREG32(R300_DST_PIPE_CONFIG);
364 WREG32(R300_DST_PIPE_CONFIG, tmp | R300_PIPE_AUTO_CONFIG);
366 WREG32(R300_RB2D_DSTCACHE_MODE,
367 R300_DC_AUTOFLUSH_ENABLE |
368 R300_DC_DC_DISABLE_IGNORE_PE);
370 if (r100_gui_wait_for_idle(rdev)) {
371 printk(KERN_WARNING "Failed to wait GUI idle while "
372 "programming pipes. Bad things might happen.\n");
374 if (r300_mc_wait_for_idle(rdev)) {
375 printk(KERN_WARNING "Failed to wait MC idle while "
376 "programming pipes. Bad things might happen.\n");
378 DRM_INFO("radeon: %d quad pipes, %d Z pipes initialized.\n",
379 rdev->num_gb_pipes, rdev->num_z_pipes);
382 int r300_asic_reset(struct radeon_device *rdev)
384 struct r100_mc_save save;
388 status = RREG32(R_000E40_RBBM_STATUS);
389 if (!G_000E40_GUI_ACTIVE(status)) {
392 r100_mc_stop(rdev, &save);
393 status = RREG32(R_000E40_RBBM_STATUS);
394 dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
396 WREG32(RADEON_CP_CSQ_CNTL, 0);
397 tmp = RREG32(RADEON_CP_RB_CNTL);
398 WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_RPTR_WR_ENA);
399 WREG32(RADEON_CP_RB_RPTR_WR, 0);
400 WREG32(RADEON_CP_RB_WPTR, 0);
401 WREG32(RADEON_CP_RB_CNTL, tmp);
403 pci_save_state(rdev->pdev);
404 /* disable bus mastering */
405 r100_bm_disable(rdev);
406 WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_VAP(1) |
407 S_0000F0_SOFT_RESET_GA(1));
408 RREG32(R_0000F0_RBBM_SOFT_RESET);
410 WREG32(R_0000F0_RBBM_SOFT_RESET, 0);
412 status = RREG32(R_000E40_RBBM_STATUS);
413 dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
414 /* resetting the CP seems to be problematic sometimes it end up
415 * hard locking the computer, but it's necessary for successful
416 * reset more test & playing is needed on R3XX/R4XX to find a
417 * reliable (if any solution)
419 WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_CP(1));
420 RREG32(R_0000F0_RBBM_SOFT_RESET);
422 WREG32(R_0000F0_RBBM_SOFT_RESET, 0);
424 status = RREG32(R_000E40_RBBM_STATUS);
425 dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
426 /* restore PCI & busmastering */
427 pci_restore_state(rdev->pdev);
428 r100_enable_bm(rdev);
429 /* Check if GPU is idle */
430 if (G_000E40_GA_BUSY(status) || G_000E40_VAP_BUSY(status)) {
431 dev_err(rdev->dev, "failed to reset GPU\n");
434 dev_info(rdev->dev, "GPU reset succeed\n");
435 r100_mc_resume(rdev, &save);
440 * r300,r350,rv350,rv380 VRAM info
442 void r300_mc_init(struct radeon_device *rdev)
447 /* DDR for all card after R300 & IGP */
448 rdev->mc.vram_is_ddr = true;
449 tmp = RREG32(RADEON_MEM_CNTL);
450 tmp &= R300_MEM_NUM_CHANNELS_MASK;
452 case 0: rdev->mc.vram_width = 64; break;
453 case 1: rdev->mc.vram_width = 128; break;
454 case 2: rdev->mc.vram_width = 256; break;
455 default: rdev->mc.vram_width = 128; break;
457 r100_vram_init_sizes(rdev);
458 base = rdev->mc.aper_base;
459 if (rdev->flags & RADEON_IS_IGP)
460 base = (RREG32(RADEON_NB_TOM) & 0xffff) << 16;
461 radeon_vram_location(rdev, &rdev->mc, base);
462 rdev->mc.gtt_base_align = 0;
463 if (!(rdev->flags & RADEON_IS_AGP))
464 radeon_gtt_location(rdev, &rdev->mc);
465 radeon_update_bandwidth_info(rdev);
468 void rv370_set_pcie_lanes(struct radeon_device *rdev, int lanes)
470 uint32_t link_width_cntl, mask;
472 if (rdev->flags & RADEON_IS_IGP)
475 if (!(rdev->flags & RADEON_IS_PCIE))
478 /* FIXME wait for idle */
482 mask = RADEON_PCIE_LC_LINK_WIDTH_X0;
485 mask = RADEON_PCIE_LC_LINK_WIDTH_X1;
488 mask = RADEON_PCIE_LC_LINK_WIDTH_X2;
491 mask = RADEON_PCIE_LC_LINK_WIDTH_X4;
494 mask = RADEON_PCIE_LC_LINK_WIDTH_X8;
497 mask = RADEON_PCIE_LC_LINK_WIDTH_X12;
501 mask = RADEON_PCIE_LC_LINK_WIDTH_X16;
505 link_width_cntl = RREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL);
507 if ((link_width_cntl & RADEON_PCIE_LC_LINK_WIDTH_RD_MASK) ==
508 (mask << RADEON_PCIE_LC_LINK_WIDTH_RD_SHIFT))
511 link_width_cntl &= ~(RADEON_PCIE_LC_LINK_WIDTH_MASK |
512 RADEON_PCIE_LC_RECONFIG_NOW |
513 RADEON_PCIE_LC_RECONFIG_LATER |
514 RADEON_PCIE_LC_SHORT_RECONFIG_EN);
515 link_width_cntl |= mask;
516 WREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
517 WREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL, (link_width_cntl |
518 RADEON_PCIE_LC_RECONFIG_NOW));
520 /* wait for lane set to complete */
521 link_width_cntl = RREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL);
522 while (link_width_cntl == 0xffffffff)
523 link_width_cntl = RREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL);
527 int rv370_get_pcie_lanes(struct radeon_device *rdev)
531 if (rdev->flags & RADEON_IS_IGP)
534 if (!(rdev->flags & RADEON_IS_PCIE))
537 /* FIXME wait for idle */
539 link_width_cntl = RREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL);
541 switch ((link_width_cntl & RADEON_PCIE_LC_LINK_WIDTH_RD_MASK) >> RADEON_PCIE_LC_LINK_WIDTH_RD_SHIFT) {
542 case RADEON_PCIE_LC_LINK_WIDTH_X0:
544 case RADEON_PCIE_LC_LINK_WIDTH_X1:
546 case RADEON_PCIE_LC_LINK_WIDTH_X2:
548 case RADEON_PCIE_LC_LINK_WIDTH_X4:
550 case RADEON_PCIE_LC_LINK_WIDTH_X8:
552 case RADEON_PCIE_LC_LINK_WIDTH_X16:
558 #if defined(CONFIG_DEBUG_FS)
559 static int rv370_debugfs_pcie_gart_info(struct seq_file *m, void *data)
561 struct drm_info_node *node = (struct drm_info_node *) m->private;
562 struct drm_device *dev = node->minor->dev;
563 struct radeon_device *rdev = dev->dev_private;
566 tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL);
567 seq_printf(m, "PCIE_TX_GART_CNTL 0x%08x\n", tmp);
568 tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_BASE);
569 seq_printf(m, "PCIE_TX_GART_BASE 0x%08x\n", tmp);
570 tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_START_LO);
571 seq_printf(m, "PCIE_TX_GART_START_LO 0x%08x\n", tmp);
572 tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_START_HI);
573 seq_printf(m, "PCIE_TX_GART_START_HI 0x%08x\n", tmp);
574 tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_END_LO);
575 seq_printf(m, "PCIE_TX_GART_END_LO 0x%08x\n", tmp);
576 tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_END_HI);
577 seq_printf(m, "PCIE_TX_GART_END_HI 0x%08x\n", tmp);
578 tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_ERROR);
579 seq_printf(m, "PCIE_TX_GART_ERROR 0x%08x\n", tmp);
583 static struct drm_info_list rv370_pcie_gart_info_list[] = {
584 {"rv370_pcie_gart_info", rv370_debugfs_pcie_gart_info, 0, NULL},
588 static int rv370_debugfs_pcie_gart_info_init(struct radeon_device *rdev)
590 #if defined(CONFIG_DEBUG_FS)
591 return radeon_debugfs_add_files(rdev, rv370_pcie_gart_info_list, 1);
597 static int r300_packet0_check(struct radeon_cs_parser *p,
598 struct radeon_cs_packet *pkt,
599 unsigned idx, unsigned reg)
601 struct radeon_cs_reloc *reloc;
602 struct r100_cs_track *track;
603 volatile uint32_t *ib;
604 uint32_t tmp, tile_flags = 0;
610 track = (struct r100_cs_track *)p->track;
611 idx_value = radeon_get_ib_value(p, idx);
614 case AVIVO_D1MODE_VLINE_START_END:
615 case RADEON_CRTC_GUI_TRIG_VLINE:
616 r = r100_cs_packet_parse_vline(p);
618 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
620 radeon_cs_dump_packet(p, pkt);
624 case RADEON_DST_PITCH_OFFSET:
625 case RADEON_SRC_PITCH_OFFSET:
626 r = r100_reloc_pitch_offset(p, pkt, idx, reg);
630 case R300_RB3D_COLOROFFSET0:
631 case R300_RB3D_COLOROFFSET1:
632 case R300_RB3D_COLOROFFSET2:
633 case R300_RB3D_COLOROFFSET3:
634 i = (reg - R300_RB3D_COLOROFFSET0) >> 2;
635 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
637 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
639 radeon_cs_dump_packet(p, pkt);
642 track->cb[i].robj = reloc->robj;
643 track->cb[i].offset = idx_value;
644 track->cb_dirty = true;
645 ib[idx] = idx_value + ((u32)reloc->gpu_offset);
647 case R300_ZB_DEPTHOFFSET:
648 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
650 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
652 radeon_cs_dump_packet(p, pkt);
655 track->zb.robj = reloc->robj;
656 track->zb.offset = idx_value;
657 track->zb_dirty = true;
658 ib[idx] = idx_value + ((u32)reloc->gpu_offset);
660 case R300_TX_OFFSET_0:
661 case R300_TX_OFFSET_0+4:
662 case R300_TX_OFFSET_0+8:
663 case R300_TX_OFFSET_0+12:
664 case R300_TX_OFFSET_0+16:
665 case R300_TX_OFFSET_0+20:
666 case R300_TX_OFFSET_0+24:
667 case R300_TX_OFFSET_0+28:
668 case R300_TX_OFFSET_0+32:
669 case R300_TX_OFFSET_0+36:
670 case R300_TX_OFFSET_0+40:
671 case R300_TX_OFFSET_0+44:
672 case R300_TX_OFFSET_0+48:
673 case R300_TX_OFFSET_0+52:
674 case R300_TX_OFFSET_0+56:
675 case R300_TX_OFFSET_0+60:
676 i = (reg - R300_TX_OFFSET_0) >> 2;
677 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
679 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
681 radeon_cs_dump_packet(p, pkt);
685 if (p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS) {
686 ib[idx] = (idx_value & 31) | /* keep the 1st 5 bits */
687 ((idx_value & ~31) + (u32)reloc->gpu_offset);
689 if (reloc->tiling_flags & RADEON_TILING_MACRO)
690 tile_flags |= R300_TXO_MACRO_TILE;
691 if (reloc->tiling_flags & RADEON_TILING_MICRO)
692 tile_flags |= R300_TXO_MICRO_TILE;
693 else if (reloc->tiling_flags & RADEON_TILING_MICRO_SQUARE)
694 tile_flags |= R300_TXO_MICRO_TILE_SQUARE;
696 tmp = idx_value + ((u32)reloc->gpu_offset);
700 track->textures[i].robj = reloc->robj;
701 track->tex_dirty = true;
703 /* Tracked registers */
706 track->vap_vf_cntl = idx_value;
710 track->vtx_size = idx_value & 0x7F;
713 /* VAP_VF_MAX_VTX_INDX */
714 track->max_indx = idx_value & 0x00FFFFFFUL;
717 /* VAP_ALT_NUM_VERTICES - only valid on r500 */
718 if (p->rdev->family < CHIP_RV515)
720 track->vap_alt_nverts = idx_value & 0xFFFFFF;
724 track->maxy = ((idx_value >> 13) & 0x1FFF) + 1;
725 if (p->rdev->family < CHIP_RV515) {
728 track->cb_dirty = true;
729 track->zb_dirty = true;
733 if ((idx_value & (1 << 10)) && /* CMASK_ENABLE */
734 p->rdev->cmask_filp != p->filp) {
735 DRM_ERROR("Invalid RB3D_CCTL: Cannot enable CMASK.\n");
738 track->num_cb = ((idx_value >> 5) & 0x3) + 1;
739 track->cb_dirty = true;
745 /* RB3D_COLORPITCH0 */
746 /* RB3D_COLORPITCH1 */
747 /* RB3D_COLORPITCH2 */
748 /* RB3D_COLORPITCH3 */
749 if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
750 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
752 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
754 radeon_cs_dump_packet(p, pkt);
758 if (reloc->tiling_flags & RADEON_TILING_MACRO)
759 tile_flags |= R300_COLOR_TILE_ENABLE;
760 if (reloc->tiling_flags & RADEON_TILING_MICRO)
761 tile_flags |= R300_COLOR_MICROTILE_ENABLE;
762 else if (reloc->tiling_flags & RADEON_TILING_MICRO_SQUARE)
763 tile_flags |= R300_COLOR_MICROTILE_SQUARE_ENABLE;
765 tmp = idx_value & ~(0x7 << 16);
769 i = (reg - 0x4E38) >> 2;
770 track->cb[i].pitch = idx_value & 0x3FFE;
771 switch (((idx_value >> 21) & 0xF)) {
775 track->cb[i].cpp = 1;
781 track->cb[i].cpp = 2;
784 if (p->rdev->family < CHIP_RV515) {
785 DRM_ERROR("Invalid color buffer format (%d)!\n",
786 ((idx_value >> 21) & 0xF));
791 track->cb[i].cpp = 4;
794 track->cb[i].cpp = 8;
797 track->cb[i].cpp = 16;
800 DRM_ERROR("Invalid color buffer format (%d) !\n",
801 ((idx_value >> 21) & 0xF));
804 track->cb_dirty = true;
809 track->z_enabled = true;
811 track->z_enabled = false;
813 track->zb_dirty = true;
817 switch ((idx_value & 0xF)) {
826 DRM_ERROR("Invalid z buffer format (%d) !\n",
830 track->zb_dirty = true;
834 if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
835 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
837 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
839 radeon_cs_dump_packet(p, pkt);
843 if (reloc->tiling_flags & RADEON_TILING_MACRO)
844 tile_flags |= R300_DEPTHMACROTILE_ENABLE;
845 if (reloc->tiling_flags & RADEON_TILING_MICRO)
846 tile_flags |= R300_DEPTHMICROTILE_TILED;
847 else if (reloc->tiling_flags & RADEON_TILING_MICRO_SQUARE)
848 tile_flags |= R300_DEPTHMICROTILE_TILED_SQUARE;
850 tmp = idx_value & ~(0x7 << 16);
854 track->zb.pitch = idx_value & 0x3FFC;
855 track->zb_dirty = true;
859 for (i = 0; i < 16; i++) {
862 enabled = !!(idx_value & (1 << i));
863 track->textures[i].enabled = enabled;
865 track->tex_dirty = true;
883 /* TX_FORMAT1_[0-15] */
884 i = (reg - 0x44C0) >> 2;
885 tmp = (idx_value >> 25) & 0x3;
886 track->textures[i].tex_coord_type = tmp;
887 switch ((idx_value & 0x1F)) {
888 case R300_TX_FORMAT_X8:
889 case R300_TX_FORMAT_Y4X4:
890 case R300_TX_FORMAT_Z3Y3X2:
891 track->textures[i].cpp = 1;
892 track->textures[i].compress_format = R100_TRACK_COMP_NONE;
894 case R300_TX_FORMAT_X16:
895 case R300_TX_FORMAT_FL_I16:
896 case R300_TX_FORMAT_Y8X8:
897 case R300_TX_FORMAT_Z5Y6X5:
898 case R300_TX_FORMAT_Z6Y5X5:
899 case R300_TX_FORMAT_W4Z4Y4X4:
900 case R300_TX_FORMAT_W1Z5Y5X5:
901 case R300_TX_FORMAT_D3DMFT_CxV8U8:
902 case R300_TX_FORMAT_B8G8_B8G8:
903 case R300_TX_FORMAT_G8R8_G8B8:
904 track->textures[i].cpp = 2;
905 track->textures[i].compress_format = R100_TRACK_COMP_NONE;
907 case R300_TX_FORMAT_Y16X16:
908 case R300_TX_FORMAT_FL_I16A16:
909 case R300_TX_FORMAT_Z11Y11X10:
910 case R300_TX_FORMAT_Z10Y11X11:
911 case R300_TX_FORMAT_W8Z8Y8X8:
912 case R300_TX_FORMAT_W2Z10Y10X10:
914 case R300_TX_FORMAT_FL_I32:
916 track->textures[i].cpp = 4;
917 track->textures[i].compress_format = R100_TRACK_COMP_NONE;
919 case R300_TX_FORMAT_W16Z16Y16X16:
920 case R300_TX_FORMAT_FL_R16G16B16A16:
921 case R300_TX_FORMAT_FL_I32A32:
922 track->textures[i].cpp = 8;
923 track->textures[i].compress_format = R100_TRACK_COMP_NONE;
925 case R300_TX_FORMAT_FL_R32G32B32A32:
926 track->textures[i].cpp = 16;
927 track->textures[i].compress_format = R100_TRACK_COMP_NONE;
929 case R300_TX_FORMAT_DXT1:
930 track->textures[i].cpp = 1;
931 track->textures[i].compress_format = R100_TRACK_COMP_DXT1;
933 case R300_TX_FORMAT_ATI2N:
934 if (p->rdev->family < CHIP_R420) {
935 DRM_ERROR("Invalid texture format %u\n",
939 /* The same rules apply as for DXT3/5. */
941 case R300_TX_FORMAT_DXT3:
942 case R300_TX_FORMAT_DXT5:
943 track->textures[i].cpp = 1;
944 track->textures[i].compress_format = R100_TRACK_COMP_DXT35;
947 DRM_ERROR("Invalid texture format %u\n",
951 track->tex_dirty = true;
969 /* TX_FILTER0_[0-15] */
970 i = (reg - 0x4400) >> 2;
971 tmp = idx_value & 0x7;
972 if (tmp == 2 || tmp == 4 || tmp == 6) {
973 track->textures[i].roundup_w = false;
975 tmp = (idx_value >> 3) & 0x7;
976 if (tmp == 2 || tmp == 4 || tmp == 6) {
977 track->textures[i].roundup_h = false;
979 track->tex_dirty = true;
997 /* TX_FORMAT2_[0-15] */
998 i = (reg - 0x4500) >> 2;
999 tmp = idx_value & 0x3FFF;
1000 track->textures[i].pitch = tmp + 1;
1001 if (p->rdev->family >= CHIP_RV515) {
1002 tmp = ((idx_value >> 15) & 1) << 11;
1003 track->textures[i].width_11 = tmp;
1004 tmp = ((idx_value >> 16) & 1) << 11;
1005 track->textures[i].height_11 = tmp;
1008 if (idx_value & (1 << 14)) {
1009 /* The same rules apply as for DXT1. */
1010 track->textures[i].compress_format =
1011 R100_TRACK_COMP_DXT1;
1013 } else if (idx_value & (1 << 14)) {
1014 DRM_ERROR("Forbidden bit TXFORMAT_MSB\n");
1017 track->tex_dirty = true;
1035 /* TX_FORMAT0_[0-15] */
1036 i = (reg - 0x4480) >> 2;
1037 tmp = idx_value & 0x7FF;
1038 track->textures[i].width = tmp + 1;
1039 tmp = (idx_value >> 11) & 0x7FF;
1040 track->textures[i].height = tmp + 1;
1041 tmp = (idx_value >> 26) & 0xF;
1042 track->textures[i].num_levels = tmp;
1043 tmp = idx_value & (1 << 31);
1044 track->textures[i].use_pitch = !!tmp;
1045 tmp = (idx_value >> 22) & 0xF;
1046 track->textures[i].txdepth = tmp;
1047 track->tex_dirty = true;
1049 case R300_ZB_ZPASS_ADDR:
1050 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1052 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1054 radeon_cs_dump_packet(p, pkt);
1057 ib[idx] = idx_value + ((u32)reloc->gpu_offset);
1060 /* RB3D_COLOR_CHANNEL_MASK */
1061 track->color_channel_mask = idx_value;
1062 track->cb_dirty = true;
1066 /* r300c emits this register - we need to disable hyperz for it
1067 * without complaining */
1068 if (p->rdev->hyperz_filp != p->filp) {
1069 if (idx_value & 0x1)
1070 ib[idx] = idx_value & ~1;
1075 track->zb_cb_clear = !!(idx_value & (1 << 5));
1076 track->cb_dirty = true;
1077 track->zb_dirty = true;
1078 if (p->rdev->hyperz_filp != p->filp) {
1079 if (idx_value & (R300_HIZ_ENABLE |
1080 R300_RD_COMP_ENABLE |
1081 R300_WR_COMP_ENABLE |
1082 R300_FAST_FILL_ENABLE))
1087 /* RB3D_BLENDCNTL */
1088 track->blend_read_enable = !!(idx_value & (1 << 2));
1089 track->cb_dirty = true;
1091 case R300_RB3D_AARESOLVE_OFFSET:
1092 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1094 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1096 radeon_cs_dump_packet(p, pkt);
1099 track->aa.robj = reloc->robj;
1100 track->aa.offset = idx_value;
1101 track->aa_dirty = true;
1102 ib[idx] = idx_value + ((u32)reloc->gpu_offset);
1104 case R300_RB3D_AARESOLVE_PITCH:
1105 track->aa.pitch = idx_value & 0x3FFE;
1106 track->aa_dirty = true;
1108 case R300_RB3D_AARESOLVE_CTL:
1109 track->aaresolve = idx_value & 0x1;
1110 track->aa_dirty = true;
1112 case 0x4f30: /* ZB_MASK_OFFSET */
1113 case 0x4f34: /* ZB_ZMASK_PITCH */
1114 case 0x4f44: /* ZB_HIZ_OFFSET */
1115 case 0x4f54: /* ZB_HIZ_PITCH */
1116 if (idx_value && (p->rdev->hyperz_filp != p->filp))
1120 if (idx_value && (p->rdev->hyperz_filp != p->filp))
1122 /* GB_Z_PEQ_CONFIG */
1123 if (p->rdev->family >= CHIP_RV350)
1128 /* valid register only on RV530 */
1129 if (p->rdev->family == CHIP_RV530)
1131 /* fallthrough do not move */
1137 printk(KERN_ERR "Forbidden register 0x%04X in cs at %d (val=%08x)\n",
1138 reg, idx, idx_value);
1142 static int r300_packet3_check(struct radeon_cs_parser *p,
1143 struct radeon_cs_packet *pkt)
1145 struct radeon_cs_reloc *reloc;
1146 struct r100_cs_track *track;
1147 volatile uint32_t *ib;
1153 track = (struct r100_cs_track *)p->track;
1154 switch(pkt->opcode) {
1155 case PACKET3_3D_LOAD_VBPNTR:
1156 r = r100_packet3_load_vbpntr(p, pkt, idx);
1160 case PACKET3_INDX_BUFFER:
1161 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1163 DRM_ERROR("No reloc for packet3 %d\n", pkt->opcode);
1164 radeon_cs_dump_packet(p, pkt);
1167 ib[idx+1] = radeon_get_ib_value(p, idx + 1) + ((u32)reloc->gpu_offset);
1168 r = r100_cs_track_check_pkt3_indx_buffer(p, pkt, reloc->robj);
1174 case PACKET3_3D_DRAW_IMMD:
1175 /* Number of dwords is vtx_size * (num_vertices - 1)
1176 * PRIM_WALK must be equal to 3 vertex data in embedded
1178 if (((radeon_get_ib_value(p, idx + 1) >> 4) & 0x3) != 3) {
1179 DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n");
1182 track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1);
1183 track->immd_dwords = pkt->count - 1;
1184 r = r100_cs_track_check(p->rdev, track);
1189 case PACKET3_3D_DRAW_IMMD_2:
1190 /* Number of dwords is vtx_size * (num_vertices - 1)
1191 * PRIM_WALK must be equal to 3 vertex data in embedded
1193 if (((radeon_get_ib_value(p, idx) >> 4) & 0x3) != 3) {
1194 DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n");
1197 track->vap_vf_cntl = radeon_get_ib_value(p, idx);
1198 track->immd_dwords = pkt->count;
1199 r = r100_cs_track_check(p->rdev, track);
1204 case PACKET3_3D_DRAW_VBUF:
1205 track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1);
1206 r = r100_cs_track_check(p->rdev, track);
1211 case PACKET3_3D_DRAW_VBUF_2:
1212 track->vap_vf_cntl = radeon_get_ib_value(p, idx);
1213 r = r100_cs_track_check(p->rdev, track);
1218 case PACKET3_3D_DRAW_INDX:
1219 track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1);
1220 r = r100_cs_track_check(p->rdev, track);
1225 case PACKET3_3D_DRAW_INDX_2:
1226 track->vap_vf_cntl = radeon_get_ib_value(p, idx);
1227 r = r100_cs_track_check(p->rdev, track);
1232 case PACKET3_3D_CLEAR_HIZ:
1233 case PACKET3_3D_CLEAR_ZMASK:
1234 if (p->rdev->hyperz_filp != p->filp)
1237 case PACKET3_3D_CLEAR_CMASK:
1238 if (p->rdev->cmask_filp != p->filp)
1244 DRM_ERROR("Packet3 opcode %x not supported\n", pkt->opcode);
1250 int r300_cs_parse(struct radeon_cs_parser *p)
1252 struct radeon_cs_packet pkt;
1253 struct r100_cs_track *track;
1256 track = kzalloc(sizeof(*track), GFP_KERNEL);
1259 r100_cs_track_clear(p->rdev, track);
1262 r = radeon_cs_packet_parse(p, &pkt, p->idx);
1266 p->idx += pkt.count + 2;
1268 case RADEON_PACKET_TYPE0:
1269 r = r100_cs_parse_packet0(p, &pkt,
1270 p->rdev->config.r300.reg_safe_bm,
1271 p->rdev->config.r300.reg_safe_bm_size,
1272 &r300_packet0_check);
1274 case RADEON_PACKET_TYPE2:
1276 case RADEON_PACKET_TYPE3:
1277 r = r300_packet3_check(p, &pkt);
1280 DRM_ERROR("Unknown packet type %d !\n", pkt.type);
1286 } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
1290 void r300_set_reg_safe(struct radeon_device *rdev)
1292 rdev->config.r300.reg_safe_bm = r300_reg_safe_bm;
1293 rdev->config.r300.reg_safe_bm_size = ARRAY_SIZE(r300_reg_safe_bm);
1296 void r300_mc_program(struct radeon_device *rdev)
1298 struct r100_mc_save save;
1301 r = r100_debugfs_mc_info_init(rdev);
1303 dev_err(rdev->dev, "Failed to create r100_mc debugfs file.\n");
1306 /* Stops all mc clients */
1307 r100_mc_stop(rdev, &save);
1308 if (rdev->flags & RADEON_IS_AGP) {
1309 WREG32(R_00014C_MC_AGP_LOCATION,
1310 S_00014C_MC_AGP_START(rdev->mc.gtt_start >> 16) |
1311 S_00014C_MC_AGP_TOP(rdev->mc.gtt_end >> 16));
1312 WREG32(R_000170_AGP_BASE, lower_32_bits(rdev->mc.agp_base));
1313 WREG32(R_00015C_AGP_BASE_2,
1314 upper_32_bits(rdev->mc.agp_base) & 0xff);
1316 WREG32(R_00014C_MC_AGP_LOCATION, 0x0FFFFFFF);
1317 WREG32(R_000170_AGP_BASE, 0);
1318 WREG32(R_00015C_AGP_BASE_2, 0);
1320 /* Wait for mc idle */
1321 if (r300_mc_wait_for_idle(rdev))
1322 DRM_INFO("Failed to wait MC idle before programming MC.\n");
1323 /* Program MC, should be a 32bits limited address space */
1324 WREG32(R_000148_MC_FB_LOCATION,
1325 S_000148_MC_FB_START(rdev->mc.vram_start >> 16) |
1326 S_000148_MC_FB_TOP(rdev->mc.vram_end >> 16));
1327 r100_mc_resume(rdev, &save);
1330 void r300_clock_startup(struct radeon_device *rdev)
1334 if (radeon_dynclks != -1 && radeon_dynclks)
1335 radeon_legacy_set_clock_gating(rdev, 1);
1336 /* We need to force on some of the block */
1337 tmp = RREG32_PLL(R_00000D_SCLK_CNTL);
1338 tmp |= S_00000D_FORCE_CP(1) | S_00000D_FORCE_VIP(1);
1339 if ((rdev->family == CHIP_RV350) || (rdev->family == CHIP_RV380))
1340 tmp |= S_00000D_FORCE_VAP(1);
1341 WREG32_PLL(R_00000D_SCLK_CNTL, tmp);
1344 static int r300_startup(struct radeon_device *rdev)
1348 /* set common regs */
1349 r100_set_common_regs(rdev);
1351 r300_mc_program(rdev);
1353 r300_clock_startup(rdev);
1354 /* Initialize GPU configuration (# pipes, ...) */
1355 r300_gpu_init(rdev);
1356 /* Initialize GART (initialize after TTM so we can allocate
1357 * memory through TTM but finalize after TTM) */
1358 if (rdev->flags & RADEON_IS_PCIE) {
1359 r = rv370_pcie_gart_enable(rdev);
1364 if (rdev->family == CHIP_R300 ||
1365 rdev->family == CHIP_R350 ||
1366 rdev->family == CHIP_RV350)
1367 r100_enable_bm(rdev);
1369 if (rdev->flags & RADEON_IS_PCI) {
1370 r = r100_pci_gart_enable(rdev);
1375 /* allocate wb buffer */
1376 r = radeon_wb_init(rdev);
1380 r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
1382 dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
1387 if (!rdev->irq.installed) {
1388 r = radeon_irq_kms_init(rdev);
1394 rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
1395 /* 1M ring buffer */
1396 r = r100_cp_init(rdev, 1024 * 1024);
1398 dev_err(rdev->dev, "failed initializing CP (%d).\n", r);
1402 r = radeon_ib_pool_init(rdev);
1404 dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
1411 int r300_resume(struct radeon_device *rdev)
1415 /* Make sur GART are not working */
1416 if (rdev->flags & RADEON_IS_PCIE)
1417 rv370_pcie_gart_disable(rdev);
1418 if (rdev->flags & RADEON_IS_PCI)
1419 r100_pci_gart_disable(rdev);
1420 /* Resume clock before doing reset */
1421 r300_clock_startup(rdev);
1422 /* Reset gpu before posting otherwise ATOM will enter infinite loop */
1423 if (radeon_asic_reset(rdev)) {
1424 dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
1425 RREG32(R_000E40_RBBM_STATUS),
1426 RREG32(R_0007C0_CP_STAT));
1429 radeon_combios_asic_init(rdev->ddev);
1430 /* Resume clock after posting */
1431 r300_clock_startup(rdev);
1432 /* Initialize surface registers */
1433 radeon_surface_init(rdev);
1435 rdev->accel_working = true;
1436 r = r300_startup(rdev);
1438 rdev->accel_working = false;
1443 int r300_suspend(struct radeon_device *rdev)
1445 radeon_pm_suspend(rdev);
1446 r100_cp_disable(rdev);
1447 radeon_wb_disable(rdev);
1448 r100_irq_disable(rdev);
1449 if (rdev->flags & RADEON_IS_PCIE)
1450 rv370_pcie_gart_disable(rdev);
1451 if (rdev->flags & RADEON_IS_PCI)
1452 r100_pci_gart_disable(rdev);
1456 void r300_fini(struct radeon_device *rdev)
1458 radeon_pm_fini(rdev);
1460 radeon_wb_fini(rdev);
1461 radeon_ib_pool_fini(rdev);
1462 radeon_gem_fini(rdev);
1463 if (rdev->flags & RADEON_IS_PCIE)
1464 rv370_pcie_gart_fini(rdev);
1465 if (rdev->flags & RADEON_IS_PCI)
1466 r100_pci_gart_fini(rdev);
1467 radeon_agp_fini(rdev);
1468 radeon_irq_kms_fini(rdev);
1469 radeon_fence_driver_fini(rdev);
1470 radeon_bo_fini(rdev);
1471 radeon_atombios_fini(rdev);
1476 int r300_init(struct radeon_device *rdev)
1481 r100_vga_render_disable(rdev);
1482 /* Initialize scratch registers */
1483 radeon_scratch_init(rdev);
1484 /* Initialize surface registers */
1485 radeon_surface_init(rdev);
1486 /* TODO: disable VGA need to use VGA request */
1487 /* restore some register to sane defaults */
1488 r100_restore_sanity(rdev);
1490 if (!radeon_get_bios(rdev)) {
1491 if (ASIC_IS_AVIVO(rdev))
1494 if (rdev->is_atom_bios) {
1495 dev_err(rdev->dev, "Expecting combios for RS400/RS480 GPU\n");
1498 r = radeon_combios_init(rdev);
1502 /* Reset gpu before posting otherwise ATOM will enter infinite loop */
1503 if (radeon_asic_reset(rdev)) {
1505 "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
1506 RREG32(R_000E40_RBBM_STATUS),
1507 RREG32(R_0007C0_CP_STAT));
1509 /* check if cards are posted or not */
1510 if (radeon_boot_test_post_card(rdev) == false)
1512 /* Set asic errata */
1514 /* Initialize clocks */
1515 radeon_get_clock_info(rdev->ddev);
1516 /* initialize AGP */
1517 if (rdev->flags & RADEON_IS_AGP) {
1518 r = radeon_agp_init(rdev);
1520 radeon_agp_disable(rdev);
1523 /* initialize memory controller */
1526 r = radeon_fence_driver_init(rdev);
1529 /* Memory manager */
1530 r = radeon_bo_init(rdev);
1533 if (rdev->flags & RADEON_IS_PCIE) {
1534 r = rv370_pcie_gart_init(rdev);
1538 if (rdev->flags & RADEON_IS_PCI) {
1539 r = r100_pci_gart_init(rdev);
1543 r300_set_reg_safe(rdev);
1545 /* Initialize power management */
1546 radeon_pm_init(rdev);
1548 rdev->accel_working = true;
1549 r = r300_startup(rdev);
1551 /* Something went wrong with the accel init, so stop accel */
1552 dev_err(rdev->dev, "Disabling GPU acceleration\n");
1554 radeon_wb_fini(rdev);
1555 radeon_ib_pool_fini(rdev);
1556 radeon_irq_kms_fini(rdev);
1557 if (rdev->flags & RADEON_IS_PCIE)
1558 rv370_pcie_gart_fini(rdev);
1559 if (rdev->flags & RADEON_IS_PCI)
1560 r100_pci_gart_fini(rdev);
1561 radeon_agp_fini(rdev);
1562 rdev->accel_working = false;