2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
24 * Authors: Dave Airlie
28 /* RS600 / Radeon X1250/X1270 integrated GPU
30 * This file gather function specific to RS600 which is the IGP of
31 * the X1250/X1270 family supporting intel CPU (while RS690/RS740
32 * is the X1250/X1270 supporting AMD CPU). The display engine are
33 * the avivo one, bios is an atombios, 3D block are the one of the
34 * R4XX family. The GART is different from the RS400 one and is very
35 * close to the one of the R600 family (R600 likely being an evolution
36 * of the RS600 GART block).
43 #include "rs600_reg_safe.h"
45 void rs600_gpu_init(struct radeon_device *rdev);
46 int rs600_mc_wait_for_idle(struct radeon_device *rdev);
48 int rs600_mc_init(struct radeon_device *rdev)
50 /* read back the MC value from the hw */
54 /* Setup GPU memory space */
55 tmp = RREG32_MC(R_000004_MC_FB_LOCATION);
56 rdev->mc.vram_location = G_000004_MC_FB_START(tmp) << 16;
57 rdev->mc.gtt_location = 0xffffffffUL;
58 r = radeon_mc_setup(rdev);
59 rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev);
65 /* hpd for digital panel detect/disconnect */
66 bool rs600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd)
69 bool connected = false;
73 tmp = RREG32(R_007D04_DC_HOT_PLUG_DETECT1_INT_STATUS);
74 if (G_007D04_DC_HOT_PLUG_DETECT1_SENSE(tmp))
78 tmp = RREG32(R_007D14_DC_HOT_PLUG_DETECT2_INT_STATUS);
79 if (G_007D14_DC_HOT_PLUG_DETECT2_SENSE(tmp))
88 void rs600_hpd_set_polarity(struct radeon_device *rdev,
89 enum radeon_hpd_id hpd)
92 bool connected = rs600_hpd_sense(rdev, hpd);
96 tmp = RREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL);
98 tmp &= ~S_007D08_DC_HOT_PLUG_DETECT1_INT_POLARITY(1);
100 tmp |= S_007D08_DC_HOT_PLUG_DETECT1_INT_POLARITY(1);
101 WREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp);
104 tmp = RREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL);
106 tmp &= ~S_007D18_DC_HOT_PLUG_DETECT2_INT_POLARITY(1);
108 tmp |= S_007D18_DC_HOT_PLUG_DETECT2_INT_POLARITY(1);
109 WREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp);
116 void rs600_hpd_init(struct radeon_device *rdev)
118 struct drm_device *dev = rdev->ddev;
119 struct drm_connector *connector;
121 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
122 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
123 switch (radeon_connector->hpd.hpd) {
125 WREG32(R_007D00_DC_HOT_PLUG_DETECT1_CONTROL,
126 S_007D00_DC_HOT_PLUG_DETECT1_EN(1));
127 rdev->irq.hpd[0] = true;
130 WREG32(R_007D10_DC_HOT_PLUG_DETECT2_CONTROL,
131 S_007D10_DC_HOT_PLUG_DETECT2_EN(1));
132 rdev->irq.hpd[1] = true;
138 if (rdev->irq.installed)
142 void rs600_hpd_fini(struct radeon_device *rdev)
144 struct drm_device *dev = rdev->ddev;
145 struct drm_connector *connector;
147 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
148 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
149 switch (radeon_connector->hpd.hpd) {
151 WREG32(R_007D00_DC_HOT_PLUG_DETECT1_CONTROL,
152 S_007D00_DC_HOT_PLUG_DETECT1_EN(0));
153 rdev->irq.hpd[0] = false;
156 WREG32(R_007D10_DC_HOT_PLUG_DETECT2_CONTROL,
157 S_007D10_DC_HOT_PLUG_DETECT2_EN(0));
158 rdev->irq.hpd[1] = false;
169 void rs600_gart_tlb_flush(struct radeon_device *rdev)
173 tmp = RREG32_MC(R_000100_MC_PT0_CNTL);
174 tmp &= C_000100_INVALIDATE_ALL_L1_TLBS & C_000100_INVALIDATE_L2_CACHE;
175 WREG32_MC(R_000100_MC_PT0_CNTL, tmp);
177 tmp = RREG32_MC(R_000100_MC_PT0_CNTL);
178 tmp |= S_000100_INVALIDATE_ALL_L1_TLBS(1) & S_000100_INVALIDATE_L2_CACHE(1);
179 WREG32_MC(R_000100_MC_PT0_CNTL, tmp);
181 tmp = RREG32_MC(R_000100_MC_PT0_CNTL);
182 tmp &= C_000100_INVALIDATE_ALL_L1_TLBS & C_000100_INVALIDATE_L2_CACHE;
183 WREG32_MC(R_000100_MC_PT0_CNTL, tmp);
184 tmp = RREG32_MC(R_000100_MC_PT0_CNTL);
187 int rs600_gart_init(struct radeon_device *rdev)
191 if (rdev->gart.table.vram.robj) {
192 WARN(1, "RS600 GART already initialized.\n");
195 /* Initialize common gart structure */
196 r = radeon_gart_init(rdev);
200 rdev->gart.table_size = rdev->gart.num_gpu_pages * 8;
201 return radeon_gart_table_vram_alloc(rdev);
204 int rs600_gart_enable(struct radeon_device *rdev)
209 if (rdev->gart.table.vram.robj == NULL) {
210 dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
213 r = radeon_gart_table_vram_pin(rdev);
216 /* Enable bus master */
217 tmp = RREG32(R_00004C_BUS_CNTL) & C_00004C_BUS_MASTER_DIS;
218 WREG32(R_00004C_BUS_CNTL, tmp);
219 /* FIXME: setup default page */
220 WREG32_MC(R_000100_MC_PT0_CNTL,
221 (S_000100_EFFECTIVE_L2_CACHE_SIZE(6) |
222 S_000100_EFFECTIVE_L2_QUEUE_SIZE(6)));
224 for (i = 0; i < 19; i++) {
225 WREG32_MC(R_00016C_MC_PT0_CLIENT0_CNTL + i,
226 S_00016C_ENABLE_TRANSLATION_MODE_OVERRIDE(1) |
227 S_00016C_SYSTEM_ACCESS_MODE_MASK(
228 V_00016C_SYSTEM_ACCESS_MODE_NOT_IN_SYS) |
229 S_00016C_SYSTEM_APERTURE_UNMAPPED_ACCESS(
230 V_00016C_SYSTEM_APERTURE_UNMAPPED_PASSTHROUGH) |
231 S_00016C_EFFECTIVE_L1_CACHE_SIZE(3) |
232 S_00016C_ENABLE_FRAGMENT_PROCESSING(1) |
233 S_00016C_EFFECTIVE_L1_QUEUE_SIZE(3));
235 /* enable first context */
236 WREG32_MC(R_000102_MC_PT0_CONTEXT0_CNTL,
237 S_000102_ENABLE_PAGE_TABLE(1) |
238 S_000102_PAGE_TABLE_DEPTH(V_000102_PAGE_TABLE_FLAT));
240 /* disable all other contexts */
241 for (i = 1; i < 8; i++)
242 WREG32_MC(R_000102_MC_PT0_CONTEXT0_CNTL + i, 0);
244 /* setup the page table */
245 WREG32_MC(R_00012C_MC_PT0_CONTEXT0_FLAT_BASE_ADDR,
246 rdev->gart.table_addr);
247 WREG32_MC(R_00013C_MC_PT0_CONTEXT0_FLAT_START_ADDR, rdev->mc.gtt_start);
248 WREG32_MC(R_00014C_MC_PT0_CONTEXT0_FLAT_END_ADDR, rdev->mc.gtt_end);
249 WREG32_MC(R_00011C_MC_PT0_CONTEXT0_DEFAULT_READ_ADDR, 0);
251 /* System context maps to VRAM space */
252 WREG32_MC(R_000112_MC_PT0_SYSTEM_APERTURE_LOW_ADDR, rdev->mc.vram_start);
253 WREG32_MC(R_000114_MC_PT0_SYSTEM_APERTURE_HIGH_ADDR, rdev->mc.vram_end);
255 /* enable page tables */
256 tmp = RREG32_MC(R_000100_MC_PT0_CNTL);
257 WREG32_MC(R_000100_MC_PT0_CNTL, (tmp | S_000100_ENABLE_PT(1)));
258 tmp = RREG32_MC(R_000009_MC_CNTL1);
259 WREG32_MC(R_000009_MC_CNTL1, (tmp | S_000009_ENABLE_PAGE_TABLES(1)));
260 rs600_gart_tlb_flush(rdev);
261 rdev->gart.ready = true;
265 void rs600_gart_disable(struct radeon_device *rdev)
270 /* FIXME: disable out of gart access */
271 WREG32_MC(R_000100_MC_PT0_CNTL, 0);
272 tmp = RREG32_MC(R_000009_MC_CNTL1);
273 WREG32_MC(R_000009_MC_CNTL1, tmp & C_000009_ENABLE_PAGE_TABLES);
274 if (rdev->gart.table.vram.robj) {
275 r = radeon_bo_reserve(rdev->gart.table.vram.robj, false);
277 radeon_bo_kunmap(rdev->gart.table.vram.robj);
278 radeon_bo_unpin(rdev->gart.table.vram.robj);
279 radeon_bo_unreserve(rdev->gart.table.vram.robj);
284 void rs600_gart_fini(struct radeon_device *rdev)
286 rs600_gart_disable(rdev);
287 radeon_gart_table_vram_free(rdev);
288 radeon_gart_fini(rdev);
291 #define R600_PTE_VALID (1 << 0)
292 #define R600_PTE_SYSTEM (1 << 1)
293 #define R600_PTE_SNOOPED (1 << 2)
294 #define R600_PTE_READABLE (1 << 5)
295 #define R600_PTE_WRITEABLE (1 << 6)
297 int rs600_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr)
299 void __iomem *ptr = (void *)rdev->gart.table.vram.ptr;
301 if (i < 0 || i > rdev->gart.num_gpu_pages) {
304 addr = addr & 0xFFFFFFFFFFFFF000ULL;
305 addr |= R600_PTE_VALID | R600_PTE_SYSTEM | R600_PTE_SNOOPED;
306 addr |= R600_PTE_READABLE | R600_PTE_WRITEABLE;
307 writeq(addr, ((void __iomem *)ptr) + (i * 8));
311 int rs600_irq_set(struct radeon_device *rdev)
314 uint32_t mode_int = 0;
315 u32 hpd1 = RREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL) &
316 ~S_007D08_DC_HOT_PLUG_DETECT1_INT_EN(1);
317 u32 hpd2 = RREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL) &
318 ~S_007D18_DC_HOT_PLUG_DETECT2_INT_EN(1);
320 if (!rdev->irq.installed) {
321 WARN(1, "Can't enable IRQ/MSI because no handler is installed.\n");
322 WREG32(R_000040_GEN_INT_CNTL, 0);
325 if (rdev->irq.sw_int) {
326 tmp |= S_000040_SW_INT_EN(1);
328 if (rdev->irq.crtc_vblank_int[0]) {
329 mode_int |= S_006540_D1MODE_VBLANK_INT_MASK(1);
331 if (rdev->irq.crtc_vblank_int[1]) {
332 mode_int |= S_006540_D2MODE_VBLANK_INT_MASK(1);
334 if (rdev->irq.hpd[0]) {
335 hpd1 |= S_007D08_DC_HOT_PLUG_DETECT1_INT_EN(1);
337 if (rdev->irq.hpd[1]) {
338 hpd2 |= S_007D18_DC_HOT_PLUG_DETECT2_INT_EN(1);
340 WREG32(R_000040_GEN_INT_CNTL, tmp);
341 WREG32(R_006540_DxMODE_INT_MASK, mode_int);
342 WREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL, hpd1);
343 WREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL, hpd2);
347 static inline uint32_t rs600_irq_ack(struct radeon_device *rdev, u32 *r500_disp_int)
349 uint32_t irqs = RREG32(R_000044_GEN_INT_STATUS);
350 uint32_t irq_mask = ~C_000044_SW_INT;
353 if (G_000044_DISPLAY_INT_STAT(irqs)) {
354 *r500_disp_int = RREG32(R_007EDC_DISP_INTERRUPT_STATUS);
355 if (G_007EDC_LB_D1_VBLANK_INTERRUPT(*r500_disp_int)) {
356 WREG32(R_006534_D1MODE_VBLANK_STATUS,
357 S_006534_D1MODE_VBLANK_ACK(1));
359 if (G_007EDC_LB_D2_VBLANK_INTERRUPT(*r500_disp_int)) {
360 WREG32(R_006D34_D2MODE_VBLANK_STATUS,
361 S_006D34_D2MODE_VBLANK_ACK(1));
363 if (G_007EDC_DC_HOT_PLUG_DETECT1_INTERRUPT(*r500_disp_int)) {
364 tmp = RREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL);
365 tmp |= S_007D08_DC_HOT_PLUG_DETECT1_INT_ACK(1);
366 WREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp);
368 if (G_007EDC_DC_HOT_PLUG_DETECT2_INTERRUPT(*r500_disp_int)) {
369 tmp = RREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL);
370 tmp |= S_007D18_DC_HOT_PLUG_DETECT2_INT_ACK(1);
371 WREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp);
378 WREG32(R_000044_GEN_INT_STATUS, irqs);
380 return irqs & irq_mask;
383 void rs600_irq_disable(struct radeon_device *rdev)
387 WREG32(R_000040_GEN_INT_CNTL, 0);
388 WREG32(R_006540_DxMODE_INT_MASK, 0);
389 /* Wait and acknowledge irq */
391 rs600_irq_ack(rdev, &tmp);
394 int rs600_irq_process(struct radeon_device *rdev)
396 uint32_t status, msi_rearm;
397 uint32_t r500_disp_int;
398 bool queue_hotplug = false;
400 status = rs600_irq_ack(rdev, &r500_disp_int);
401 if (!status && !r500_disp_int) {
404 while (status || r500_disp_int) {
406 if (G_000044_SW_INT(status))
407 radeon_fence_process(rdev);
408 /* Vertical blank interrupts */
409 if (G_007EDC_LB_D1_VBLANK_INTERRUPT(r500_disp_int)) {
410 drm_handle_vblank(rdev->ddev, 0);
411 if (rdev->pm.vblank_callback)
412 queue_work(rdev->wq, &rdev->pm.reclock_work);
414 if (G_007EDC_LB_D2_VBLANK_INTERRUPT(r500_disp_int)) {
415 drm_handle_vblank(rdev->ddev, 1);
416 if (rdev->pm.vblank_callback)
417 queue_work(rdev->wq, &rdev->pm.reclock_work);
419 if (G_007EDC_DC_HOT_PLUG_DETECT1_INTERRUPT(r500_disp_int)) {
420 queue_hotplug = true;
423 if (G_007EDC_DC_HOT_PLUG_DETECT2_INTERRUPT(r500_disp_int)) {
424 queue_hotplug = true;
427 status = rs600_irq_ack(rdev, &r500_disp_int);
430 queue_work(rdev->wq, &rdev->hotplug_work);
431 if (rdev->msi_enabled) {
432 switch (rdev->family) {
436 msi_rearm = RREG32(RADEON_BUS_CNTL) & ~RS600_MSI_REARM;
437 WREG32(RADEON_BUS_CNTL, msi_rearm);
438 WREG32(RADEON_BUS_CNTL, msi_rearm | RS600_MSI_REARM);
441 msi_rearm = RREG32(RADEON_MSI_REARM_EN) & ~RV370_MSI_REARM_EN;
442 WREG32(RADEON_MSI_REARM_EN, msi_rearm);
443 WREG32(RADEON_MSI_REARM_EN, msi_rearm | RV370_MSI_REARM_EN);
450 u32 rs600_get_vblank_counter(struct radeon_device *rdev, int crtc)
453 return RREG32(R_0060A4_D1CRTC_STATUS_FRAME_COUNT);
455 return RREG32(R_0068A4_D2CRTC_STATUS_FRAME_COUNT);
458 int rs600_mc_wait_for_idle(struct radeon_device *rdev)
462 for (i = 0; i < rdev->usec_timeout; i++) {
463 if (G_000000_MC_IDLE(RREG32_MC(R_000000_MC_STATUS)))
470 void rs600_gpu_init(struct radeon_device *rdev)
472 r100_hdp_reset(rdev);
473 r420_pipes_init(rdev);
474 /* Wait for mc idle */
475 if (rs600_mc_wait_for_idle(rdev))
476 dev_warn(rdev->dev, "Wait MC idle timeout before updating MC.\n");
479 void rs600_vram_info(struct radeon_device *rdev)
481 rdev->mc.vram_is_ddr = true;
482 rdev->mc.vram_width = 128;
484 rdev->mc.real_vram_size = RREG32(RADEON_CONFIG_MEMSIZE);
485 rdev->mc.mc_vram_size = rdev->mc.real_vram_size;
487 rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0);
488 rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0);
490 if (rdev->mc.mc_vram_size > rdev->mc.aper_size)
491 rdev->mc.mc_vram_size = rdev->mc.aper_size;
493 if (rdev->mc.real_vram_size > rdev->mc.aper_size)
494 rdev->mc.real_vram_size = rdev->mc.aper_size;
497 void rs600_bandwidth_update(struct radeon_device *rdev)
499 /* FIXME: implement, should this be like rs690 ? */
502 uint32_t rs600_mc_rreg(struct radeon_device *rdev, uint32_t reg)
504 WREG32(R_000070_MC_IND_INDEX, S_000070_MC_IND_ADDR(reg) |
505 S_000070_MC_IND_CITF_ARB0(1));
506 return RREG32(R_000074_MC_IND_DATA);
509 void rs600_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
511 WREG32(R_000070_MC_IND_INDEX, S_000070_MC_IND_ADDR(reg) |
512 S_000070_MC_IND_CITF_ARB0(1) | S_000070_MC_IND_WR_EN(1));
513 WREG32(R_000074_MC_IND_DATA, v);
516 void rs600_debugfs(struct radeon_device *rdev)
518 if (r100_debugfs_rbbm_init(rdev))
519 DRM_ERROR("Failed to register debugfs file for RBBM !\n");
522 void rs600_set_safe_registers(struct radeon_device *rdev)
524 rdev->config.r300.reg_safe_bm = rs600_reg_safe_bm;
525 rdev->config.r300.reg_safe_bm_size = ARRAY_SIZE(rs600_reg_safe_bm);
528 static void rs600_mc_program(struct radeon_device *rdev)
530 struct rv515_mc_save save;
532 /* Stops all mc clients */
533 rv515_mc_stop(rdev, &save);
535 /* Wait for mc idle */
536 if (rs600_mc_wait_for_idle(rdev))
537 dev_warn(rdev->dev, "Wait MC idle timeout before updating MC.\n");
539 /* FIXME: What does AGP means for such chipset ? */
540 WREG32_MC(R_000005_MC_AGP_LOCATION, 0x0FFFFFFF);
541 WREG32_MC(R_000006_AGP_BASE, 0);
542 WREG32_MC(R_000007_AGP_BASE_2, 0);
544 WREG32_MC(R_000004_MC_FB_LOCATION,
545 S_000004_MC_FB_START(rdev->mc.vram_start >> 16) |
546 S_000004_MC_FB_TOP(rdev->mc.vram_end >> 16));
547 WREG32(R_000134_HDP_FB_LOCATION,
548 S_000134_HDP_FB_START(rdev->mc.vram_start >> 16));
550 rv515_mc_resume(rdev, &save);
553 static int rs600_startup(struct radeon_device *rdev)
557 rs600_mc_program(rdev);
559 rv515_clock_startup(rdev);
560 /* Initialize GPU configuration (# pipes, ...) */
561 rs600_gpu_init(rdev);
562 /* Initialize GART (initialize after TTM so we can allocate
563 * memory through TTM but finalize after TTM) */
564 r = rs600_gart_enable(rdev);
569 rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
571 r = r100_cp_init(rdev, 1024 * 1024);
573 dev_err(rdev->dev, "failled initializing CP (%d).\n", r);
576 r = r100_wb_init(rdev);
578 dev_err(rdev->dev, "failled initializing WB (%d).\n", r);
579 r = r100_ib_init(rdev);
581 dev_err(rdev->dev, "failled initializing IB (%d).\n", r);
587 int rs600_resume(struct radeon_device *rdev)
589 /* Make sur GART are not working */
590 rs600_gart_disable(rdev);
591 /* Resume clock before doing reset */
592 rv515_clock_startup(rdev);
593 /* Reset gpu before posting otherwise ATOM will enter infinite loop */
594 if (radeon_gpu_reset(rdev)) {
595 dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
596 RREG32(R_000E40_RBBM_STATUS),
597 RREG32(R_0007C0_CP_STAT));
600 atom_asic_init(rdev->mode_info.atom_context);
601 /* Resume clock after posting */
602 rv515_clock_startup(rdev);
603 /* Initialize surface registers */
604 radeon_surface_init(rdev);
605 return rs600_startup(rdev);
608 int rs600_suspend(struct radeon_device *rdev)
610 r100_cp_disable(rdev);
611 r100_wb_disable(rdev);
612 rs600_irq_disable(rdev);
613 rs600_gart_disable(rdev);
617 void rs600_fini(struct radeon_device *rdev)
622 radeon_gem_fini(rdev);
623 rs600_gart_fini(rdev);
624 radeon_irq_kms_fini(rdev);
625 radeon_fence_driver_fini(rdev);
626 radeon_bo_fini(rdev);
627 radeon_atombios_fini(rdev);
632 int rs600_init(struct radeon_device *rdev)
637 rv515_vga_render_disable(rdev);
638 /* Initialize scratch registers */
639 radeon_scratch_init(rdev);
640 /* Initialize surface registers */
641 radeon_surface_init(rdev);
643 if (!radeon_get_bios(rdev)) {
644 if (ASIC_IS_AVIVO(rdev))
647 if (rdev->is_atom_bios) {
648 r = radeon_atombios_init(rdev);
652 dev_err(rdev->dev, "Expecting atombios for RS600 GPU\n");
655 /* Reset gpu before posting otherwise ATOM will enter infinite loop */
656 if (radeon_gpu_reset(rdev)) {
658 "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
659 RREG32(R_000E40_RBBM_STATUS),
660 RREG32(R_0007C0_CP_STAT));
662 /* check if cards are posted or not */
663 if (radeon_boot_test_post_card(rdev) == false)
666 /* Initialize clocks */
667 radeon_get_clock_info(rdev->ddev);
668 /* Initialize power management */
669 radeon_pm_init(rdev);
670 /* Get vram informations */
671 rs600_vram_info(rdev);
672 /* Initialize memory controller (also test AGP) */
673 r = rs600_mc_init(rdev);
678 r = radeon_fence_driver_init(rdev);
681 r = radeon_irq_kms_init(rdev);
685 r = radeon_bo_init(rdev);
688 r = rs600_gart_init(rdev);
691 rs600_set_safe_registers(rdev);
692 rdev->accel_working = true;
693 r = rs600_startup(rdev);
695 /* Somethings want wront with the accel init stop accel */
696 dev_err(rdev->dev, "Disabling GPU acceleration\n");
700 rs600_gart_fini(rdev);
701 radeon_irq_kms_fini(rdev);
702 rdev->accel_working = false;