2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
24 * Authors: Dave Airlie
28 #include <linux/seq_file.h>
29 #include <linux/slab.h>
31 #include <drm/radeon_drm.h>
32 #include "radeon_reg.h"
34 #include "radeon_asic.h"
41 #include <linux/firmware.h>
42 #include <linux/platform_device.h>
43 #include <linux/module.h>
45 #include "r100_reg_safe.h"
46 #include "rn50_reg_safe.h"
49 #define FIRMWARE_R100 "radeon/R100_cp.bin"
50 #define FIRMWARE_R200 "radeon/R200_cp.bin"
51 #define FIRMWARE_R300 "radeon/R300_cp.bin"
52 #define FIRMWARE_R420 "radeon/R420_cp.bin"
53 #define FIRMWARE_RS690 "radeon/RS690_cp.bin"
54 #define FIRMWARE_RS600 "radeon/RS600_cp.bin"
55 #define FIRMWARE_R520 "radeon/R520_cp.bin"
57 MODULE_FIRMWARE(FIRMWARE_R100);
58 MODULE_FIRMWARE(FIRMWARE_R200);
59 MODULE_FIRMWARE(FIRMWARE_R300);
60 MODULE_FIRMWARE(FIRMWARE_R420);
61 MODULE_FIRMWARE(FIRMWARE_RS690);
62 MODULE_FIRMWARE(FIRMWARE_RS600);
63 MODULE_FIRMWARE(FIRMWARE_R520);
65 #include "r100_track.h"
67 /* This files gather functions specifics to:
68 * r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280
69 * and others in some cases.
72 static bool r100_is_in_vblank(struct radeon_device *rdev, int crtc)
75 if (RREG32(RADEON_CRTC_STATUS) & RADEON_CRTC_VBLANK_CUR)
80 if (RREG32(RADEON_CRTC2_STATUS) & RADEON_CRTC2_VBLANK_CUR)
87 static bool r100_is_counter_moving(struct radeon_device *rdev, int crtc)
92 vline1 = (RREG32(RADEON_CRTC_VLINE_CRNT_VLINE) >> 16) & RADEON_CRTC_V_TOTAL;
93 vline2 = (RREG32(RADEON_CRTC_VLINE_CRNT_VLINE) >> 16) & RADEON_CRTC_V_TOTAL;
95 vline1 = (RREG32(RADEON_CRTC2_VLINE_CRNT_VLINE) >> 16) & RADEON_CRTC_V_TOTAL;
96 vline2 = (RREG32(RADEON_CRTC2_VLINE_CRNT_VLINE) >> 16) & RADEON_CRTC_V_TOTAL;
105 * r100_wait_for_vblank - vblank wait asic callback.
107 * @rdev: radeon_device pointer
108 * @crtc: crtc to wait for vblank on
110 * Wait for vblank on the requested crtc (r1xx-r4xx).
112 void r100_wait_for_vblank(struct radeon_device *rdev, int crtc)
116 if (crtc >= rdev->num_crtc)
120 if (!(RREG32(RADEON_CRTC_GEN_CNTL) & RADEON_CRTC_EN))
123 if (!(RREG32(RADEON_CRTC2_GEN_CNTL) & RADEON_CRTC2_EN))
127 /* depending on when we hit vblank, we may be close to active; if so,
128 * wait for another frame.
130 while (r100_is_in_vblank(rdev, crtc)) {
131 if (i++ % 100 == 0) {
132 if (!r100_is_counter_moving(rdev, crtc))
137 while (!r100_is_in_vblank(rdev, crtc)) {
138 if (i++ % 100 == 0) {
139 if (!r100_is_counter_moving(rdev, crtc))
146 * r100_pre_page_flip - pre-pageflip callback.
148 * @rdev: radeon_device pointer
149 * @crtc: crtc to prepare for pageflip on
151 * Pre-pageflip callback (r1xx-r4xx).
152 * Enables the pageflip irq (vblank irq).
154 void r100_pre_page_flip(struct radeon_device *rdev, int crtc)
156 /* enable the pflip int */
157 radeon_irq_kms_pflip_irq_get(rdev, crtc);
161 * r100_post_page_flip - pos-pageflip callback.
163 * @rdev: radeon_device pointer
164 * @crtc: crtc to cleanup pageflip on
166 * Post-pageflip callback (r1xx-r4xx).
167 * Disables the pageflip irq (vblank irq).
169 void r100_post_page_flip(struct radeon_device *rdev, int crtc)
171 /* disable the pflip int */
172 radeon_irq_kms_pflip_irq_put(rdev, crtc);
176 * r100_page_flip - pageflip callback.
178 * @rdev: radeon_device pointer
179 * @crtc_id: crtc to cleanup pageflip on
180 * @crtc_base: new address of the crtc (GPU MC address)
182 * Does the actual pageflip (r1xx-r4xx).
183 * During vblank we take the crtc lock and wait for the update_pending
184 * bit to go high, when it does, we release the lock, and allow the
185 * double buffered update to take place.
186 * Returns the current update pending status.
188 u32 r100_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
190 struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
191 u32 tmp = ((u32)crtc_base) | RADEON_CRTC_OFFSET__OFFSET_LOCK;
194 /* Lock the graphics update lock */
195 /* update the scanout addresses */
196 WREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset, tmp);
198 /* Wait for update_pending to go high. */
199 for (i = 0; i < rdev->usec_timeout; i++) {
200 if (RREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset) & RADEON_CRTC_OFFSET__GUI_TRIG_OFFSET)
204 DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n");
206 /* Unlock the lock, so double-buffering can take place inside vblank */
207 tmp &= ~RADEON_CRTC_OFFSET__OFFSET_LOCK;
208 WREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset, tmp);
210 /* Return current update_pending status: */
211 return RREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset) & RADEON_CRTC_OFFSET__GUI_TRIG_OFFSET;
215 * r100_pm_get_dynpm_state - look up dynpm power state callback.
217 * @rdev: radeon_device pointer
219 * Look up the optimal power state based on the
220 * current state of the GPU (r1xx-r5xx).
221 * Used for dynpm only.
223 void r100_pm_get_dynpm_state(struct radeon_device *rdev)
226 rdev->pm.dynpm_can_upclock = true;
227 rdev->pm.dynpm_can_downclock = true;
229 switch (rdev->pm.dynpm_planned_action) {
230 case DYNPM_ACTION_MINIMUM:
231 rdev->pm.requested_power_state_index = 0;
232 rdev->pm.dynpm_can_downclock = false;
234 case DYNPM_ACTION_DOWNCLOCK:
235 if (rdev->pm.current_power_state_index == 0) {
236 rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index;
237 rdev->pm.dynpm_can_downclock = false;
239 if (rdev->pm.active_crtc_count > 1) {
240 for (i = 0; i < rdev->pm.num_power_states; i++) {
241 if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY)
243 else if (i >= rdev->pm.current_power_state_index) {
244 rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index;
247 rdev->pm.requested_power_state_index = i;
252 rdev->pm.requested_power_state_index =
253 rdev->pm.current_power_state_index - 1;
255 /* don't use the power state if crtcs are active and no display flag is set */
256 if ((rdev->pm.active_crtc_count > 0) &&
257 (rdev->pm.power_state[rdev->pm.requested_power_state_index].clock_info[0].flags &
258 RADEON_PM_MODE_NO_DISPLAY)) {
259 rdev->pm.requested_power_state_index++;
262 case DYNPM_ACTION_UPCLOCK:
263 if (rdev->pm.current_power_state_index == (rdev->pm.num_power_states - 1)) {
264 rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index;
265 rdev->pm.dynpm_can_upclock = false;
267 if (rdev->pm.active_crtc_count > 1) {
268 for (i = (rdev->pm.num_power_states - 1); i >= 0; i--) {
269 if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY)
271 else if (i <= rdev->pm.current_power_state_index) {
272 rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index;
275 rdev->pm.requested_power_state_index = i;
280 rdev->pm.requested_power_state_index =
281 rdev->pm.current_power_state_index + 1;
284 case DYNPM_ACTION_DEFAULT:
285 rdev->pm.requested_power_state_index = rdev->pm.default_power_state_index;
286 rdev->pm.dynpm_can_upclock = false;
288 case DYNPM_ACTION_NONE:
290 DRM_ERROR("Requested mode for not defined action\n");
293 /* only one clock mode per power state */
294 rdev->pm.requested_clock_mode_index = 0;
296 DRM_DEBUG_DRIVER("Requested: e: %d m: %d p: %d\n",
297 rdev->pm.power_state[rdev->pm.requested_power_state_index].
298 clock_info[rdev->pm.requested_clock_mode_index].sclk,
299 rdev->pm.power_state[rdev->pm.requested_power_state_index].
300 clock_info[rdev->pm.requested_clock_mode_index].mclk,
301 rdev->pm.power_state[rdev->pm.requested_power_state_index].
306 * r100_pm_init_profile - Initialize power profiles callback.
308 * @rdev: radeon_device pointer
310 * Initialize the power states used in profile mode
312 * Used for profile mode only.
314 void r100_pm_init_profile(struct radeon_device *rdev)
317 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
318 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
319 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
320 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
322 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 0;
323 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 0;
324 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
325 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
327 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 0;
328 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 0;
329 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
330 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0;
332 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 0;
333 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
334 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
335 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0;
337 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 0;
338 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
339 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
340 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
342 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 0;
343 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
344 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
345 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0;
347 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 0;
348 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
349 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
350 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0;
354 * r100_pm_misc - set additional pm hw parameters callback.
356 * @rdev: radeon_device pointer
358 * Set non-clock parameters associated with a power state
359 * (voltage, pcie lanes, etc.) (r1xx-r4xx).
361 void r100_pm_misc(struct radeon_device *rdev)
363 int requested_index = rdev->pm.requested_power_state_index;
364 struct radeon_power_state *ps = &rdev->pm.power_state[requested_index];
365 struct radeon_voltage *voltage = &ps->clock_info[0].voltage;
366 u32 tmp, sclk_cntl, sclk_cntl2, sclk_more_cntl;
368 if ((voltage->type == VOLTAGE_GPIO) && (voltage->gpio.valid)) {
369 if (ps->misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) {
370 tmp = RREG32(voltage->gpio.reg);
371 if (voltage->active_high)
372 tmp |= voltage->gpio.mask;
374 tmp &= ~(voltage->gpio.mask);
375 WREG32(voltage->gpio.reg, tmp);
377 udelay(voltage->delay);
379 tmp = RREG32(voltage->gpio.reg);
380 if (voltage->active_high)
381 tmp &= ~voltage->gpio.mask;
383 tmp |= voltage->gpio.mask;
384 WREG32(voltage->gpio.reg, tmp);
386 udelay(voltage->delay);
390 sclk_cntl = RREG32_PLL(SCLK_CNTL);
391 sclk_cntl2 = RREG32_PLL(SCLK_CNTL2);
392 sclk_cntl2 &= ~REDUCED_SPEED_SCLK_SEL(3);
393 sclk_more_cntl = RREG32_PLL(SCLK_MORE_CNTL);
394 sclk_more_cntl &= ~VOLTAGE_DELAY_SEL(3);
395 if (ps->misc & ATOM_PM_MISCINFO_ASIC_REDUCED_SPEED_SCLK_EN) {
396 sclk_more_cntl |= REDUCED_SPEED_SCLK_EN;
397 if (ps->misc & ATOM_PM_MISCINFO_DYN_CLK_3D_IDLE)
398 sclk_cntl2 |= REDUCED_SPEED_SCLK_MODE;
400 sclk_cntl2 &= ~REDUCED_SPEED_SCLK_MODE;
401 if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_CLOCK_DIVIDER_BY_2)
402 sclk_cntl2 |= REDUCED_SPEED_SCLK_SEL(0);
403 else if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_CLOCK_DIVIDER_BY_4)
404 sclk_cntl2 |= REDUCED_SPEED_SCLK_SEL(2);
406 sclk_more_cntl &= ~REDUCED_SPEED_SCLK_EN;
408 if (ps->misc & ATOM_PM_MISCINFO_ASIC_DYNAMIC_VOLTAGE_EN) {
409 sclk_more_cntl |= IO_CG_VOLTAGE_DROP;
410 if (voltage->delay) {
411 sclk_more_cntl |= VOLTAGE_DROP_SYNC;
412 switch (voltage->delay) {
414 sclk_more_cntl |= VOLTAGE_DELAY_SEL(0);
417 sclk_more_cntl |= VOLTAGE_DELAY_SEL(1);
420 sclk_more_cntl |= VOLTAGE_DELAY_SEL(2);
423 sclk_more_cntl |= VOLTAGE_DELAY_SEL(3);
427 sclk_more_cntl &= ~VOLTAGE_DROP_SYNC;
429 sclk_more_cntl &= ~IO_CG_VOLTAGE_DROP;
431 if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_HDP_BLOCK_EN)
432 sclk_cntl &= ~FORCE_HDP;
434 sclk_cntl |= FORCE_HDP;
436 WREG32_PLL(SCLK_CNTL, sclk_cntl);
437 WREG32_PLL(SCLK_CNTL2, sclk_cntl2);
438 WREG32_PLL(SCLK_MORE_CNTL, sclk_more_cntl);
441 if ((rdev->flags & RADEON_IS_PCIE) &&
442 !(rdev->flags & RADEON_IS_IGP) &&
443 rdev->asic->pm.set_pcie_lanes &&
445 rdev->pm.power_state[rdev->pm.current_power_state_index].pcie_lanes)) {
446 radeon_set_pcie_lanes(rdev,
448 DRM_DEBUG_DRIVER("Setting: p: %d\n", ps->pcie_lanes);
453 * r100_pm_prepare - pre-power state change callback.
455 * @rdev: radeon_device pointer
457 * Prepare for a power state change (r1xx-r4xx).
459 void r100_pm_prepare(struct radeon_device *rdev)
461 struct drm_device *ddev = rdev->ddev;
462 struct drm_crtc *crtc;
463 struct radeon_crtc *radeon_crtc;
466 /* disable any active CRTCs */
467 list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) {
468 radeon_crtc = to_radeon_crtc(crtc);
469 if (radeon_crtc->enabled) {
470 if (radeon_crtc->crtc_id) {
471 tmp = RREG32(RADEON_CRTC2_GEN_CNTL);
472 tmp |= RADEON_CRTC2_DISP_REQ_EN_B;
473 WREG32(RADEON_CRTC2_GEN_CNTL, tmp);
475 tmp = RREG32(RADEON_CRTC_GEN_CNTL);
476 tmp |= RADEON_CRTC_DISP_REQ_EN_B;
477 WREG32(RADEON_CRTC_GEN_CNTL, tmp);
484 * r100_pm_finish - post-power state change callback.
486 * @rdev: radeon_device pointer
488 * Clean up after a power state change (r1xx-r4xx).
490 void r100_pm_finish(struct radeon_device *rdev)
492 struct drm_device *ddev = rdev->ddev;
493 struct drm_crtc *crtc;
494 struct radeon_crtc *radeon_crtc;
497 /* enable any active CRTCs */
498 list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) {
499 radeon_crtc = to_radeon_crtc(crtc);
500 if (radeon_crtc->enabled) {
501 if (radeon_crtc->crtc_id) {
502 tmp = RREG32(RADEON_CRTC2_GEN_CNTL);
503 tmp &= ~RADEON_CRTC2_DISP_REQ_EN_B;
504 WREG32(RADEON_CRTC2_GEN_CNTL, tmp);
506 tmp = RREG32(RADEON_CRTC_GEN_CNTL);
507 tmp &= ~RADEON_CRTC_DISP_REQ_EN_B;
508 WREG32(RADEON_CRTC_GEN_CNTL, tmp);
515 * r100_gui_idle - gui idle callback.
517 * @rdev: radeon_device pointer
519 * Check of the GUI (2D/3D engines) are idle (r1xx-r5xx).
520 * Returns true if idle, false if not.
522 bool r100_gui_idle(struct radeon_device *rdev)
524 if (RREG32(RADEON_RBBM_STATUS) & RADEON_RBBM_ACTIVE)
530 /* hpd for digital panel detect/disconnect */
532 * r100_hpd_sense - hpd sense callback.
534 * @rdev: radeon_device pointer
535 * @hpd: hpd (hotplug detect) pin
537 * Checks if a digital monitor is connected (r1xx-r4xx).
538 * Returns true if connected, false if not connected.
540 bool r100_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd)
542 bool connected = false;
546 if (RREG32(RADEON_FP_GEN_CNTL) & RADEON_FP_DETECT_SENSE)
550 if (RREG32(RADEON_FP2_GEN_CNTL) & RADEON_FP2_DETECT_SENSE)
560 * r100_hpd_set_polarity - hpd set polarity callback.
562 * @rdev: radeon_device pointer
563 * @hpd: hpd (hotplug detect) pin
565 * Set the polarity of the hpd pin (r1xx-r4xx).
567 void r100_hpd_set_polarity(struct radeon_device *rdev,
568 enum radeon_hpd_id hpd)
571 bool connected = r100_hpd_sense(rdev, hpd);
575 tmp = RREG32(RADEON_FP_GEN_CNTL);
577 tmp &= ~RADEON_FP_DETECT_INT_POL;
579 tmp |= RADEON_FP_DETECT_INT_POL;
580 WREG32(RADEON_FP_GEN_CNTL, tmp);
583 tmp = RREG32(RADEON_FP2_GEN_CNTL);
585 tmp &= ~RADEON_FP2_DETECT_INT_POL;
587 tmp |= RADEON_FP2_DETECT_INT_POL;
588 WREG32(RADEON_FP2_GEN_CNTL, tmp);
596 * r100_hpd_init - hpd setup callback.
598 * @rdev: radeon_device pointer
600 * Setup the hpd pins used by the card (r1xx-r4xx).
601 * Set the polarity, and enable the hpd interrupts.
603 void r100_hpd_init(struct radeon_device *rdev)
605 struct drm_device *dev = rdev->ddev;
606 struct drm_connector *connector;
609 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
610 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
611 enable |= 1 << radeon_connector->hpd.hpd;
612 radeon_hpd_set_polarity(rdev, radeon_connector->hpd.hpd);
614 radeon_irq_kms_enable_hpd(rdev, enable);
618 * r100_hpd_fini - hpd tear down callback.
620 * @rdev: radeon_device pointer
622 * Tear down the hpd pins used by the card (r1xx-r4xx).
623 * Disable the hpd interrupts.
625 void r100_hpd_fini(struct radeon_device *rdev)
627 struct drm_device *dev = rdev->ddev;
628 struct drm_connector *connector;
629 unsigned disable = 0;
631 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
632 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
633 disable |= 1 << radeon_connector->hpd.hpd;
635 radeon_irq_kms_disable_hpd(rdev, disable);
641 void r100_pci_gart_tlb_flush(struct radeon_device *rdev)
643 /* TODO: can we do somethings here ? */
644 /* It seems hw only cache one entry so we should discard this
645 * entry otherwise if first GPU GART read hit this entry it
646 * could end up in wrong address. */
649 int r100_pci_gart_init(struct radeon_device *rdev)
653 if (rdev->gart.ptr) {
654 WARN(1, "R100 PCI GART already initialized\n");
657 /* Initialize common gart structure */
658 r = radeon_gart_init(rdev);
661 rdev->gart.table_size = rdev->gart.num_gpu_pages * 4;
662 rdev->asic->gart.tlb_flush = &r100_pci_gart_tlb_flush;
663 rdev->asic->gart.set_page = &r100_pci_gart_set_page;
664 return radeon_gart_table_ram_alloc(rdev);
667 int r100_pci_gart_enable(struct radeon_device *rdev)
671 radeon_gart_restore(rdev);
672 /* discard memory request outside of configured range */
673 tmp = RREG32(RADEON_AIC_CNTL) | RADEON_DIS_OUT_OF_PCI_GART_ACCESS;
674 WREG32(RADEON_AIC_CNTL, tmp);
675 /* set address range for PCI address translate */
676 WREG32(RADEON_AIC_LO_ADDR, rdev->mc.gtt_start);
677 WREG32(RADEON_AIC_HI_ADDR, rdev->mc.gtt_end);
678 /* set PCI GART page-table base address */
679 WREG32(RADEON_AIC_PT_BASE, rdev->gart.table_addr);
680 tmp = RREG32(RADEON_AIC_CNTL) | RADEON_PCIGART_TRANSLATE_EN;
681 WREG32(RADEON_AIC_CNTL, tmp);
682 r100_pci_gart_tlb_flush(rdev);
683 DRM_INFO("PCI GART of %uM enabled (table at 0x%016llX).\n",
684 (unsigned)(rdev->mc.gtt_size >> 20),
685 (unsigned long long)rdev->gart.table_addr);
686 rdev->gart.ready = true;
690 void r100_pci_gart_disable(struct radeon_device *rdev)
694 /* discard memory request outside of configured range */
695 tmp = RREG32(RADEON_AIC_CNTL) | RADEON_DIS_OUT_OF_PCI_GART_ACCESS;
696 WREG32(RADEON_AIC_CNTL, tmp & ~RADEON_PCIGART_TRANSLATE_EN);
697 WREG32(RADEON_AIC_LO_ADDR, 0);
698 WREG32(RADEON_AIC_HI_ADDR, 0);
701 int r100_pci_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr)
703 u32 *gtt = rdev->gart.ptr;
705 if (i < 0 || i > rdev->gart.num_gpu_pages) {
708 gtt[i] = cpu_to_le32(lower_32_bits(addr));
712 void r100_pci_gart_fini(struct radeon_device *rdev)
714 radeon_gart_fini(rdev);
715 r100_pci_gart_disable(rdev);
716 radeon_gart_table_ram_free(rdev);
719 int r100_irq_set(struct radeon_device *rdev)
723 if (!rdev->irq.installed) {
724 WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
725 WREG32(R_000040_GEN_INT_CNTL, 0);
728 if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) {
729 tmp |= RADEON_SW_INT_ENABLE;
731 if (rdev->irq.crtc_vblank_int[0] ||
732 atomic_read(&rdev->irq.pflip[0])) {
733 tmp |= RADEON_CRTC_VBLANK_MASK;
735 if (rdev->irq.crtc_vblank_int[1] ||
736 atomic_read(&rdev->irq.pflip[1])) {
737 tmp |= RADEON_CRTC2_VBLANK_MASK;
739 if (rdev->irq.hpd[0]) {
740 tmp |= RADEON_FP_DETECT_MASK;
742 if (rdev->irq.hpd[1]) {
743 tmp |= RADEON_FP2_DETECT_MASK;
745 WREG32(RADEON_GEN_INT_CNTL, tmp);
749 void r100_irq_disable(struct radeon_device *rdev)
753 WREG32(R_000040_GEN_INT_CNTL, 0);
754 /* Wait and acknowledge irq */
756 tmp = RREG32(R_000044_GEN_INT_STATUS);
757 WREG32(R_000044_GEN_INT_STATUS, tmp);
760 static uint32_t r100_irq_ack(struct radeon_device *rdev)
762 uint32_t irqs = RREG32(RADEON_GEN_INT_STATUS);
763 uint32_t irq_mask = RADEON_SW_INT_TEST |
764 RADEON_CRTC_VBLANK_STAT | RADEON_CRTC2_VBLANK_STAT |
765 RADEON_FP_DETECT_STAT | RADEON_FP2_DETECT_STAT;
768 WREG32(RADEON_GEN_INT_STATUS, irqs);
770 return irqs & irq_mask;
773 int r100_irq_process(struct radeon_device *rdev)
775 uint32_t status, msi_rearm;
776 bool queue_hotplug = false;
778 status = r100_irq_ack(rdev);
782 if (rdev->shutdown) {
787 if (status & RADEON_SW_INT_TEST) {
788 radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
790 /* Vertical blank interrupts */
791 if (status & RADEON_CRTC_VBLANK_STAT) {
792 if (rdev->irq.crtc_vblank_int[0]) {
793 drm_handle_vblank(rdev->ddev, 0);
794 rdev->pm.vblank_sync = true;
795 wake_up(&rdev->irq.vblank_queue);
797 if (atomic_read(&rdev->irq.pflip[0]))
798 radeon_crtc_handle_flip(rdev, 0);
800 if (status & RADEON_CRTC2_VBLANK_STAT) {
801 if (rdev->irq.crtc_vblank_int[1]) {
802 drm_handle_vblank(rdev->ddev, 1);
803 rdev->pm.vblank_sync = true;
804 wake_up(&rdev->irq.vblank_queue);
806 if (atomic_read(&rdev->irq.pflip[1]))
807 radeon_crtc_handle_flip(rdev, 1);
809 if (status & RADEON_FP_DETECT_STAT) {
810 queue_hotplug = true;
813 if (status & RADEON_FP2_DETECT_STAT) {
814 queue_hotplug = true;
817 status = r100_irq_ack(rdev);
820 schedule_work(&rdev->hotplug_work);
821 if (rdev->msi_enabled) {
822 switch (rdev->family) {
825 msi_rearm = RREG32(RADEON_AIC_CNTL) & ~RS400_MSI_REARM;
826 WREG32(RADEON_AIC_CNTL, msi_rearm);
827 WREG32(RADEON_AIC_CNTL, msi_rearm | RS400_MSI_REARM);
830 WREG32(RADEON_MSI_REARM_EN, RV370_MSI_REARM_EN);
837 u32 r100_get_vblank_counter(struct radeon_device *rdev, int crtc)
840 return RREG32(RADEON_CRTC_CRNT_FRAME);
842 return RREG32(RADEON_CRTC2_CRNT_FRAME);
845 /* Who ever call radeon_fence_emit should call ring_lock and ask
846 * for enough space (today caller are ib schedule and buffer move) */
847 void r100_fence_ring_emit(struct radeon_device *rdev,
848 struct radeon_fence *fence)
850 struct radeon_ring *ring = &rdev->ring[fence->ring];
852 /* We have to make sure that caches are flushed before
853 * CPU might read something from VRAM. */
854 radeon_ring_write(ring, PACKET0(RADEON_RB3D_DSTCACHE_CTLSTAT, 0));
855 radeon_ring_write(ring, RADEON_RB3D_DC_FLUSH_ALL);
856 radeon_ring_write(ring, PACKET0(RADEON_RB3D_ZCACHE_CTLSTAT, 0));
857 radeon_ring_write(ring, RADEON_RB3D_ZC_FLUSH_ALL);
858 /* Wait until IDLE & CLEAN */
859 radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0));
860 radeon_ring_write(ring, RADEON_WAIT_2D_IDLECLEAN | RADEON_WAIT_3D_IDLECLEAN);
861 radeon_ring_write(ring, PACKET0(RADEON_HOST_PATH_CNTL, 0));
862 radeon_ring_write(ring, rdev->config.r100.hdp_cntl |
863 RADEON_HDP_READ_BUFFER_INVALIDATE);
864 radeon_ring_write(ring, PACKET0(RADEON_HOST_PATH_CNTL, 0));
865 radeon_ring_write(ring, rdev->config.r100.hdp_cntl);
866 /* Emit fence sequence & fire IRQ */
867 radeon_ring_write(ring, PACKET0(rdev->fence_drv[fence->ring].scratch_reg, 0));
868 radeon_ring_write(ring, fence->seq);
869 radeon_ring_write(ring, PACKET0(RADEON_GEN_INT_STATUS, 0));
870 radeon_ring_write(ring, RADEON_SW_INT_FIRE);
873 void r100_semaphore_ring_emit(struct radeon_device *rdev,
874 struct radeon_ring *ring,
875 struct radeon_semaphore *semaphore,
878 /* Unused on older asics, since we don't have semaphores or multiple rings */
882 int r100_copy_blit(struct radeon_device *rdev,
885 unsigned num_gpu_pages,
886 struct radeon_fence **fence)
888 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
890 uint32_t stride_bytes = RADEON_GPU_PAGE_SIZE;
892 uint32_t stride_pixels;
897 /* radeon limited to 16k stride */
898 stride_bytes &= 0x3fff;
899 /* radeon pitch is /64 */
900 pitch = stride_bytes / 64;
901 stride_pixels = stride_bytes / 4;
902 num_loops = DIV_ROUND_UP(num_gpu_pages, 8191);
904 /* Ask for enough room for blit + flush + fence */
905 ndw = 64 + (10 * num_loops);
906 r = radeon_ring_lock(rdev, ring, ndw);
908 DRM_ERROR("radeon: moving bo (%d) asking for %u dw.\n", r, ndw);
911 while (num_gpu_pages > 0) {
912 cur_pages = num_gpu_pages;
913 if (cur_pages > 8191) {
916 num_gpu_pages -= cur_pages;
918 /* pages are in Y direction - height
919 page width in X direction - width */
920 radeon_ring_write(ring, PACKET3(PACKET3_BITBLT_MULTI, 8));
921 radeon_ring_write(ring,
922 RADEON_GMC_SRC_PITCH_OFFSET_CNTL |
923 RADEON_GMC_DST_PITCH_OFFSET_CNTL |
924 RADEON_GMC_SRC_CLIPPING |
925 RADEON_GMC_DST_CLIPPING |
926 RADEON_GMC_BRUSH_NONE |
927 (RADEON_COLOR_FORMAT_ARGB8888 << 8) |
928 RADEON_GMC_SRC_DATATYPE_COLOR |
930 RADEON_DP_SRC_SOURCE_MEMORY |
931 RADEON_GMC_CLR_CMP_CNTL_DIS |
932 RADEON_GMC_WR_MSK_DIS);
933 radeon_ring_write(ring, (pitch << 22) | (src_offset >> 10));
934 radeon_ring_write(ring, (pitch << 22) | (dst_offset >> 10));
935 radeon_ring_write(ring, (0x1fff) | (0x1fff << 16));
936 radeon_ring_write(ring, 0);
937 radeon_ring_write(ring, (0x1fff) | (0x1fff << 16));
938 radeon_ring_write(ring, num_gpu_pages);
939 radeon_ring_write(ring, num_gpu_pages);
940 radeon_ring_write(ring, cur_pages | (stride_pixels << 16));
942 radeon_ring_write(ring, PACKET0(RADEON_DSTCACHE_CTLSTAT, 0));
943 radeon_ring_write(ring, RADEON_RB2D_DC_FLUSH_ALL);
944 radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0));
945 radeon_ring_write(ring,
946 RADEON_WAIT_2D_IDLECLEAN |
947 RADEON_WAIT_HOST_IDLECLEAN |
948 RADEON_WAIT_DMA_GUI_IDLE);
950 r = radeon_fence_emit(rdev, fence, RADEON_RING_TYPE_GFX_INDEX);
952 radeon_ring_unlock_commit(rdev, ring);
956 static int r100_cp_wait_for_idle(struct radeon_device *rdev)
961 for (i = 0; i < rdev->usec_timeout; i++) {
962 tmp = RREG32(R_000E40_RBBM_STATUS);
963 if (!G_000E40_CP_CMDSTRM_BUSY(tmp)) {
971 void r100_ring_start(struct radeon_device *rdev, struct radeon_ring *ring)
975 r = radeon_ring_lock(rdev, ring, 2);
979 radeon_ring_write(ring, PACKET0(RADEON_ISYNC_CNTL, 0));
980 radeon_ring_write(ring,
981 RADEON_ISYNC_ANY2D_IDLE3D |
982 RADEON_ISYNC_ANY3D_IDLE2D |
983 RADEON_ISYNC_WAIT_IDLEGUI |
984 RADEON_ISYNC_CPSCRATCH_IDLEGUI);
985 radeon_ring_unlock_commit(rdev, ring);
989 /* Load the microcode for the CP */
990 static int r100_cp_init_microcode(struct radeon_device *rdev)
992 struct platform_device *pdev;
993 const char *fw_name = NULL;
998 pdev = platform_device_register_simple("radeon_cp", 0, NULL, 0);
1001 printk(KERN_ERR "radeon_cp: Failed to register firmware\n");
1004 if ((rdev->family == CHIP_R100) || (rdev->family == CHIP_RV100) ||
1005 (rdev->family == CHIP_RV200) || (rdev->family == CHIP_RS100) ||
1006 (rdev->family == CHIP_RS200)) {
1007 DRM_INFO("Loading R100 Microcode\n");
1008 fw_name = FIRMWARE_R100;
1009 } else if ((rdev->family == CHIP_R200) ||
1010 (rdev->family == CHIP_RV250) ||
1011 (rdev->family == CHIP_RV280) ||
1012 (rdev->family == CHIP_RS300)) {
1013 DRM_INFO("Loading R200 Microcode\n");
1014 fw_name = FIRMWARE_R200;
1015 } else if ((rdev->family == CHIP_R300) ||
1016 (rdev->family == CHIP_R350) ||
1017 (rdev->family == CHIP_RV350) ||
1018 (rdev->family == CHIP_RV380) ||
1019 (rdev->family == CHIP_RS400) ||
1020 (rdev->family == CHIP_RS480)) {
1021 DRM_INFO("Loading R300 Microcode\n");
1022 fw_name = FIRMWARE_R300;
1023 } else if ((rdev->family == CHIP_R420) ||
1024 (rdev->family == CHIP_R423) ||
1025 (rdev->family == CHIP_RV410)) {
1026 DRM_INFO("Loading R400 Microcode\n");
1027 fw_name = FIRMWARE_R420;
1028 } else if ((rdev->family == CHIP_RS690) ||
1029 (rdev->family == CHIP_RS740)) {
1030 DRM_INFO("Loading RS690/RS740 Microcode\n");
1031 fw_name = FIRMWARE_RS690;
1032 } else if (rdev->family == CHIP_RS600) {
1033 DRM_INFO("Loading RS600 Microcode\n");
1034 fw_name = FIRMWARE_RS600;
1035 } else if ((rdev->family == CHIP_RV515) ||
1036 (rdev->family == CHIP_R520) ||
1037 (rdev->family == CHIP_RV530) ||
1038 (rdev->family == CHIP_R580) ||
1039 (rdev->family == CHIP_RV560) ||
1040 (rdev->family == CHIP_RV570)) {
1041 DRM_INFO("Loading R500 Microcode\n");
1042 fw_name = FIRMWARE_R520;
1045 err = request_firmware(&rdev->me_fw, fw_name, &pdev->dev);
1046 platform_device_unregister(pdev);
1048 printk(KERN_ERR "radeon_cp: Failed to load firmware \"%s\"\n",
1050 } else if (rdev->me_fw->size % 8) {
1052 "radeon_cp: Bogus length %zu in firmware \"%s\"\n",
1053 rdev->me_fw->size, fw_name);
1055 release_firmware(rdev->me_fw);
1061 static void r100_cp_load_microcode(struct radeon_device *rdev)
1063 const __be32 *fw_data;
1066 if (r100_gui_wait_for_idle(rdev)) {
1067 printk(KERN_WARNING "Failed to wait GUI idle while "
1068 "programming pipes. Bad things might happen.\n");
1072 size = rdev->me_fw->size / 4;
1073 fw_data = (const __be32 *)&rdev->me_fw->data[0];
1074 WREG32(RADEON_CP_ME_RAM_ADDR, 0);
1075 for (i = 0; i < size; i += 2) {
1076 WREG32(RADEON_CP_ME_RAM_DATAH,
1077 be32_to_cpup(&fw_data[i]));
1078 WREG32(RADEON_CP_ME_RAM_DATAL,
1079 be32_to_cpup(&fw_data[i + 1]));
1084 int r100_cp_init(struct radeon_device *rdev, unsigned ring_size)
1086 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
1090 unsigned pre_write_timer;
1091 unsigned pre_write_limit;
1092 unsigned indirect2_start;
1093 unsigned indirect1_start;
1097 if (r100_debugfs_cp_init(rdev)) {
1098 DRM_ERROR("Failed to register debugfs file for CP !\n");
1101 r = r100_cp_init_microcode(rdev);
1103 DRM_ERROR("Failed to load firmware!\n");
1108 /* Align ring size */
1109 rb_bufsz = drm_order(ring_size / 8);
1110 ring_size = (1 << (rb_bufsz + 1)) * 4;
1111 r100_cp_load_microcode(rdev);
1112 r = radeon_ring_init(rdev, ring, ring_size, RADEON_WB_CP_RPTR_OFFSET,
1113 RADEON_CP_RB_RPTR, RADEON_CP_RB_WPTR,
1114 0, 0x7fffff, RADEON_CP_PACKET2);
1118 /* Each time the cp read 1024 bytes (16 dword/quadword) update
1119 * the rptr copy in system ram */
1121 /* cp will read 128bytes at a time (4 dwords) */
1123 ring->align_mask = 16 - 1;
1124 /* Write to CP_RB_WPTR will be delayed for pre_write_timer clocks */
1125 pre_write_timer = 64;
1126 /* Force CP_RB_WPTR write if written more than one time before the
1129 pre_write_limit = 0;
1130 /* Setup the cp cache like this (cache size is 96 dwords) :
1132 * INDIRECT1 16 to 79
1133 * INDIRECT2 80 to 95
1134 * So ring cache size is 16dwords (> (2 * max_fetch = 2 * 4dwords))
1135 * indirect1 cache size is 64dwords (> (2 * max_fetch = 2 * 4dwords))
1136 * indirect2 cache size is 16dwords (> (2 * max_fetch = 2 * 4dwords))
1137 * Idea being that most of the gpu cmd will be through indirect1 buffer
1138 * so it gets the bigger cache.
1140 indirect2_start = 80;
1141 indirect1_start = 16;
1143 WREG32(0x718, pre_write_timer | (pre_write_limit << 28));
1144 tmp = (REG_SET(RADEON_RB_BUFSZ, rb_bufsz) |
1145 REG_SET(RADEON_RB_BLKSZ, rb_blksz) |
1146 REG_SET(RADEON_MAX_FETCH, max_fetch));
1148 tmp |= RADEON_BUF_SWAP_32BIT;
1150 WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_NO_UPDATE);
1152 /* Set ring address */
1153 DRM_INFO("radeon: ring at 0x%016lX\n", (unsigned long)ring->gpu_addr);
1154 WREG32(RADEON_CP_RB_BASE, ring->gpu_addr);
1155 /* Force read & write ptr to 0 */
1156 WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_RPTR_WR_ENA | RADEON_RB_NO_UPDATE);
1157 WREG32(RADEON_CP_RB_RPTR_WR, 0);
1159 WREG32(RADEON_CP_RB_WPTR, ring->wptr);
1161 /* set the wb address whether it's enabled or not */
1162 WREG32(R_00070C_CP_RB_RPTR_ADDR,
1163 S_00070C_RB_RPTR_ADDR((rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) >> 2));
1164 WREG32(R_000774_SCRATCH_ADDR, rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET);
1166 if (rdev->wb.enabled)
1167 WREG32(R_000770_SCRATCH_UMSK, 0xff);
1169 tmp |= RADEON_RB_NO_UPDATE;
1170 WREG32(R_000770_SCRATCH_UMSK, 0);
1173 WREG32(RADEON_CP_RB_CNTL, tmp);
1175 ring->rptr = RREG32(RADEON_CP_RB_RPTR);
1176 /* Set cp mode to bus mastering & enable cp*/
1177 WREG32(RADEON_CP_CSQ_MODE,
1178 REG_SET(RADEON_INDIRECT2_START, indirect2_start) |
1179 REG_SET(RADEON_INDIRECT1_START, indirect1_start));
1180 WREG32(RADEON_CP_RB_WPTR_DELAY, 0);
1181 WREG32(RADEON_CP_CSQ_MODE, 0x00004D4D);
1182 WREG32(RADEON_CP_CSQ_CNTL, RADEON_CSQ_PRIBM_INDBM);
1184 /* at this point everything should be setup correctly to enable master */
1185 pci_set_master(rdev->pdev);
1187 radeon_ring_start(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
1188 r = radeon_ring_test(rdev, RADEON_RING_TYPE_GFX_INDEX, ring);
1190 DRM_ERROR("radeon: cp isn't working (%d).\n", r);
1194 radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
1196 if (!ring->rptr_save_reg /* not resuming from suspend */
1197 && radeon_ring_supports_scratch_reg(rdev, ring)) {
1198 r = radeon_scratch_get(rdev, &ring->rptr_save_reg);
1200 DRM_ERROR("failed to get scratch reg for rptr save (%d).\n", r);
1201 ring->rptr_save_reg = 0;
1207 void r100_cp_fini(struct radeon_device *rdev)
1209 if (r100_cp_wait_for_idle(rdev)) {
1210 DRM_ERROR("Wait for CP idle timeout, shutting down CP.\n");
1213 r100_cp_disable(rdev);
1214 radeon_scratch_free(rdev, rdev->ring[RADEON_RING_TYPE_GFX_INDEX].rptr_save_reg);
1215 radeon_ring_fini(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
1216 DRM_INFO("radeon: cp finalized\n");
1219 void r100_cp_disable(struct radeon_device *rdev)
1222 radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
1223 rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
1224 WREG32(RADEON_CP_CSQ_MODE, 0);
1225 WREG32(RADEON_CP_CSQ_CNTL, 0);
1226 WREG32(R_000770_SCRATCH_UMSK, 0);
1227 if (r100_gui_wait_for_idle(rdev)) {
1228 printk(KERN_WARNING "Failed to wait GUI idle while "
1229 "programming pipes. Bad things might happen.\n");
1236 int r100_reloc_pitch_offset(struct radeon_cs_parser *p,
1237 struct radeon_cs_packet *pkt,
1244 struct radeon_cs_reloc *reloc;
1247 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1249 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1251 radeon_cs_dump_packet(p, pkt);
1255 value = radeon_get_ib_value(p, idx);
1256 tmp = value & 0x003fffff;
1257 tmp += (((u32)reloc->lobj.gpu_offset) >> 10);
1259 if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
1260 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
1261 tile_flags |= RADEON_DST_TILE_MACRO;
1262 if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) {
1263 if (reg == RADEON_SRC_PITCH_OFFSET) {
1264 DRM_ERROR("Cannot src blit from microtiled surface\n");
1265 radeon_cs_dump_packet(p, pkt);
1268 tile_flags |= RADEON_DST_TILE_MICRO;
1272 p->ib.ptr[idx] = (value & 0x3fc00000) | tmp;
1274 p->ib.ptr[idx] = (value & 0xffc00000) | tmp;
1278 int r100_packet3_load_vbpntr(struct radeon_cs_parser *p,
1279 struct radeon_cs_packet *pkt,
1283 struct radeon_cs_reloc *reloc;
1284 struct r100_cs_track *track;
1286 volatile uint32_t *ib;
1290 track = (struct r100_cs_track *)p->track;
1291 c = radeon_get_ib_value(p, idx++) & 0x1F;
1293 DRM_ERROR("Only 16 vertex buffers are allowed %d\n",
1295 radeon_cs_dump_packet(p, pkt);
1298 track->num_arrays = c;
1299 for (i = 0; i < (c - 1); i+=2, idx+=3) {
1300 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1302 DRM_ERROR("No reloc for packet3 %d\n",
1304 radeon_cs_dump_packet(p, pkt);
1307 idx_value = radeon_get_ib_value(p, idx);
1308 ib[idx+1] = radeon_get_ib_value(p, idx + 1) + ((u32)reloc->lobj.gpu_offset);
1310 track->arrays[i + 0].esize = idx_value >> 8;
1311 track->arrays[i + 0].robj = reloc->robj;
1312 track->arrays[i + 0].esize &= 0x7F;
1313 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1315 DRM_ERROR("No reloc for packet3 %d\n",
1317 radeon_cs_dump_packet(p, pkt);
1320 ib[idx+2] = radeon_get_ib_value(p, idx + 2) + ((u32)reloc->lobj.gpu_offset);
1321 track->arrays[i + 1].robj = reloc->robj;
1322 track->arrays[i + 1].esize = idx_value >> 24;
1323 track->arrays[i + 1].esize &= 0x7F;
1326 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1328 DRM_ERROR("No reloc for packet3 %d\n",
1330 radeon_cs_dump_packet(p, pkt);
1333 idx_value = radeon_get_ib_value(p, idx);
1334 ib[idx+1] = radeon_get_ib_value(p, idx + 1) + ((u32)reloc->lobj.gpu_offset);
1335 track->arrays[i + 0].robj = reloc->robj;
1336 track->arrays[i + 0].esize = idx_value >> 8;
1337 track->arrays[i + 0].esize &= 0x7F;
1342 int r100_cs_parse_packet0(struct radeon_cs_parser *p,
1343 struct radeon_cs_packet *pkt,
1344 const unsigned *auth, unsigned n,
1345 radeon_packet0_check_t check)
1354 /* Check that register fall into register range
1355 * determined by the number of entry (n) in the
1356 * safe register bitmap.
1358 if (pkt->one_reg_wr) {
1359 if ((reg >> 7) > n) {
1363 if (((reg + (pkt->count << 2)) >> 7) > n) {
1367 for (i = 0; i <= pkt->count; i++, idx++) {
1369 m = 1 << ((reg >> 2) & 31);
1371 r = check(p, pkt, idx, reg);
1376 if (pkt->one_reg_wr) {
1377 if (!(auth[j] & m)) {
1388 * r100_cs_packet_next_vline() - parse userspace VLINE packet
1389 * @parser: parser structure holding parsing context.
1391 * Userspace sends a special sequence for VLINE waits.
1392 * PACKET0 - VLINE_START_END + value
1393 * PACKET0 - WAIT_UNTIL +_value
1394 * RELOC (P3) - crtc_id in reloc.
1396 * This function parses this and relocates the VLINE START END
1397 * and WAIT UNTIL packets to the correct crtc.
1398 * It also detects a switched off crtc and nulls out the
1399 * wait in that case.
1401 int r100_cs_packet_parse_vline(struct radeon_cs_parser *p)
1403 struct drm_mode_object *obj;
1404 struct drm_crtc *crtc;
1405 struct radeon_crtc *radeon_crtc;
1406 struct radeon_cs_packet p3reloc, waitreloc;
1409 uint32_t header, h_idx, reg;
1410 volatile uint32_t *ib;
1414 /* parse the wait until */
1415 r = radeon_cs_packet_parse(p, &waitreloc, p->idx);
1419 /* check its a wait until and only 1 count */
1420 if (waitreloc.reg != RADEON_WAIT_UNTIL ||
1421 waitreloc.count != 0) {
1422 DRM_ERROR("vline wait had illegal wait until segment\n");
1426 if (radeon_get_ib_value(p, waitreloc.idx + 1) != RADEON_WAIT_CRTC_VLINE) {
1427 DRM_ERROR("vline wait had illegal wait until\n");
1431 /* jump over the NOP */
1432 r = radeon_cs_packet_parse(p, &p3reloc, p->idx + waitreloc.count + 2);
1437 p->idx += waitreloc.count + 2;
1438 p->idx += p3reloc.count + 2;
1440 header = radeon_get_ib_value(p, h_idx);
1441 crtc_id = radeon_get_ib_value(p, h_idx + 5);
1442 reg = R100_CP_PACKET0_GET_REG(header);
1443 obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC);
1445 DRM_ERROR("cannot find crtc %d\n", crtc_id);
1448 crtc = obj_to_crtc(obj);
1449 radeon_crtc = to_radeon_crtc(crtc);
1450 crtc_id = radeon_crtc->crtc_id;
1452 if (!crtc->enabled) {
1453 /* if the CRTC isn't enabled - we need to nop out the wait until */
1454 ib[h_idx + 2] = PACKET2(0);
1455 ib[h_idx + 3] = PACKET2(0);
1456 } else if (crtc_id == 1) {
1458 case AVIVO_D1MODE_VLINE_START_END:
1459 header &= ~R300_CP_PACKET0_REG_MASK;
1460 header |= AVIVO_D2MODE_VLINE_START_END >> 2;
1462 case RADEON_CRTC_GUI_TRIG_VLINE:
1463 header &= ~R300_CP_PACKET0_REG_MASK;
1464 header |= RADEON_CRTC2_GUI_TRIG_VLINE >> 2;
1467 DRM_ERROR("unknown crtc reloc\n");
1471 ib[h_idx + 3] |= RADEON_ENG_DISPLAY_SELECT_CRTC1;
1477 static int r100_get_vtx_size(uint32_t vtx_fmt)
1481 /* ordered according to bits in spec */
1482 if (vtx_fmt & RADEON_SE_VTX_FMT_W0)
1484 if (vtx_fmt & RADEON_SE_VTX_FMT_FPCOLOR)
1486 if (vtx_fmt & RADEON_SE_VTX_FMT_FPALPHA)
1488 if (vtx_fmt & RADEON_SE_VTX_FMT_PKCOLOR)
1490 if (vtx_fmt & RADEON_SE_VTX_FMT_FPSPEC)
1492 if (vtx_fmt & RADEON_SE_VTX_FMT_FPFOG)
1494 if (vtx_fmt & RADEON_SE_VTX_FMT_PKSPEC)
1496 if (vtx_fmt & RADEON_SE_VTX_FMT_ST0)
1498 if (vtx_fmt & RADEON_SE_VTX_FMT_ST1)
1500 if (vtx_fmt & RADEON_SE_VTX_FMT_Q1)
1502 if (vtx_fmt & RADEON_SE_VTX_FMT_ST2)
1504 if (vtx_fmt & RADEON_SE_VTX_FMT_Q2)
1506 if (vtx_fmt & RADEON_SE_VTX_FMT_ST3)
1508 if (vtx_fmt & RADEON_SE_VTX_FMT_Q3)
1510 if (vtx_fmt & RADEON_SE_VTX_FMT_Q0)
1513 if (vtx_fmt & (0x7 << 15))
1514 vtx_size += (vtx_fmt >> 15) & 0x7;
1515 if (vtx_fmt & RADEON_SE_VTX_FMT_N0)
1517 if (vtx_fmt & RADEON_SE_VTX_FMT_XY1)
1519 if (vtx_fmt & RADEON_SE_VTX_FMT_Z1)
1521 if (vtx_fmt & RADEON_SE_VTX_FMT_W1)
1523 if (vtx_fmt & RADEON_SE_VTX_FMT_N1)
1525 if (vtx_fmt & RADEON_SE_VTX_FMT_Z)
1530 static int r100_packet0_check(struct radeon_cs_parser *p,
1531 struct radeon_cs_packet *pkt,
1532 unsigned idx, unsigned reg)
1534 struct radeon_cs_reloc *reloc;
1535 struct r100_cs_track *track;
1536 volatile uint32_t *ib;
1544 track = (struct r100_cs_track *)p->track;
1546 idx_value = radeon_get_ib_value(p, idx);
1549 case RADEON_CRTC_GUI_TRIG_VLINE:
1550 r = r100_cs_packet_parse_vline(p);
1552 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1554 radeon_cs_dump_packet(p, pkt);
1558 /* FIXME: only allow PACKET3 blit? easier to check for out of
1560 case RADEON_DST_PITCH_OFFSET:
1561 case RADEON_SRC_PITCH_OFFSET:
1562 r = r100_reloc_pitch_offset(p, pkt, idx, reg);
1566 case RADEON_RB3D_DEPTHOFFSET:
1567 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1569 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1571 radeon_cs_dump_packet(p, pkt);
1574 track->zb.robj = reloc->robj;
1575 track->zb.offset = idx_value;
1576 track->zb_dirty = true;
1577 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
1579 case RADEON_RB3D_COLOROFFSET:
1580 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1582 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1584 radeon_cs_dump_packet(p, pkt);
1587 track->cb[0].robj = reloc->robj;
1588 track->cb[0].offset = idx_value;
1589 track->cb_dirty = true;
1590 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
1592 case RADEON_PP_TXOFFSET_0:
1593 case RADEON_PP_TXOFFSET_1:
1594 case RADEON_PP_TXOFFSET_2:
1595 i = (reg - RADEON_PP_TXOFFSET_0) / 24;
1596 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1598 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1600 radeon_cs_dump_packet(p, pkt);
1603 if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
1604 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
1605 tile_flags |= RADEON_TXO_MACRO_TILE;
1606 if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
1607 tile_flags |= RADEON_TXO_MICRO_TILE_X2;
1609 tmp = idx_value & ~(0x7 << 2);
1611 ib[idx] = tmp + ((u32)reloc->lobj.gpu_offset);
1613 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
1614 track->textures[i].robj = reloc->robj;
1615 track->tex_dirty = true;
1617 case RADEON_PP_CUBIC_OFFSET_T0_0:
1618 case RADEON_PP_CUBIC_OFFSET_T0_1:
1619 case RADEON_PP_CUBIC_OFFSET_T0_2:
1620 case RADEON_PP_CUBIC_OFFSET_T0_3:
1621 case RADEON_PP_CUBIC_OFFSET_T0_4:
1622 i = (reg - RADEON_PP_CUBIC_OFFSET_T0_0) / 4;
1623 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1625 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1627 radeon_cs_dump_packet(p, pkt);
1630 track->textures[0].cube_info[i].offset = idx_value;
1631 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
1632 track->textures[0].cube_info[i].robj = reloc->robj;
1633 track->tex_dirty = true;
1635 case RADEON_PP_CUBIC_OFFSET_T1_0:
1636 case RADEON_PP_CUBIC_OFFSET_T1_1:
1637 case RADEON_PP_CUBIC_OFFSET_T1_2:
1638 case RADEON_PP_CUBIC_OFFSET_T1_3:
1639 case RADEON_PP_CUBIC_OFFSET_T1_4:
1640 i = (reg - RADEON_PP_CUBIC_OFFSET_T1_0) / 4;
1641 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1643 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1645 radeon_cs_dump_packet(p, pkt);
1648 track->textures[1].cube_info[i].offset = idx_value;
1649 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
1650 track->textures[1].cube_info[i].robj = reloc->robj;
1651 track->tex_dirty = true;
1653 case RADEON_PP_CUBIC_OFFSET_T2_0:
1654 case RADEON_PP_CUBIC_OFFSET_T2_1:
1655 case RADEON_PP_CUBIC_OFFSET_T2_2:
1656 case RADEON_PP_CUBIC_OFFSET_T2_3:
1657 case RADEON_PP_CUBIC_OFFSET_T2_4:
1658 i = (reg - RADEON_PP_CUBIC_OFFSET_T2_0) / 4;
1659 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1661 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1663 radeon_cs_dump_packet(p, pkt);
1666 track->textures[2].cube_info[i].offset = idx_value;
1667 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
1668 track->textures[2].cube_info[i].robj = reloc->robj;
1669 track->tex_dirty = true;
1671 case RADEON_RE_WIDTH_HEIGHT:
1672 track->maxy = ((idx_value >> 16) & 0x7FF);
1673 track->cb_dirty = true;
1674 track->zb_dirty = true;
1676 case RADEON_RB3D_COLORPITCH:
1677 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1679 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1681 radeon_cs_dump_packet(p, pkt);
1684 if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
1685 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
1686 tile_flags |= RADEON_COLOR_TILE_ENABLE;
1687 if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
1688 tile_flags |= RADEON_COLOR_MICROTILE_ENABLE;
1690 tmp = idx_value & ~(0x7 << 16);
1694 ib[idx] = idx_value;
1696 track->cb[0].pitch = idx_value & RADEON_COLORPITCH_MASK;
1697 track->cb_dirty = true;
1699 case RADEON_RB3D_DEPTHPITCH:
1700 track->zb.pitch = idx_value & RADEON_DEPTHPITCH_MASK;
1701 track->zb_dirty = true;
1703 case RADEON_RB3D_CNTL:
1704 switch ((idx_value >> RADEON_RB3D_COLOR_FORMAT_SHIFT) & 0x1f) {
1710 track->cb[0].cpp = 1;
1715 track->cb[0].cpp = 2;
1718 track->cb[0].cpp = 4;
1721 DRM_ERROR("Invalid color buffer format (%d) !\n",
1722 ((idx_value >> RADEON_RB3D_COLOR_FORMAT_SHIFT) & 0x1f));
1725 track->z_enabled = !!(idx_value & RADEON_Z_ENABLE);
1726 track->cb_dirty = true;
1727 track->zb_dirty = true;
1729 case RADEON_RB3D_ZSTENCILCNTL:
1730 switch (idx_value & 0xf) {
1745 track->zb_dirty = true;
1747 case RADEON_RB3D_ZPASS_ADDR:
1748 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1750 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1752 radeon_cs_dump_packet(p, pkt);
1755 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
1757 case RADEON_PP_CNTL:
1759 uint32_t temp = idx_value >> 4;
1760 for (i = 0; i < track->num_texture; i++)
1761 track->textures[i].enabled = !!(temp & (1 << i));
1762 track->tex_dirty = true;
1765 case RADEON_SE_VF_CNTL:
1766 track->vap_vf_cntl = idx_value;
1768 case RADEON_SE_VTX_FMT:
1769 track->vtx_size = r100_get_vtx_size(idx_value);
1771 case RADEON_PP_TEX_SIZE_0:
1772 case RADEON_PP_TEX_SIZE_1:
1773 case RADEON_PP_TEX_SIZE_2:
1774 i = (reg - RADEON_PP_TEX_SIZE_0) / 8;
1775 track->textures[i].width = (idx_value & RADEON_TEX_USIZE_MASK) + 1;
1776 track->textures[i].height = ((idx_value & RADEON_TEX_VSIZE_MASK) >> RADEON_TEX_VSIZE_SHIFT) + 1;
1777 track->tex_dirty = true;
1779 case RADEON_PP_TEX_PITCH_0:
1780 case RADEON_PP_TEX_PITCH_1:
1781 case RADEON_PP_TEX_PITCH_2:
1782 i = (reg - RADEON_PP_TEX_PITCH_0) / 8;
1783 track->textures[i].pitch = idx_value + 32;
1784 track->tex_dirty = true;
1786 case RADEON_PP_TXFILTER_0:
1787 case RADEON_PP_TXFILTER_1:
1788 case RADEON_PP_TXFILTER_2:
1789 i = (reg - RADEON_PP_TXFILTER_0) / 24;
1790 track->textures[i].num_levels = ((idx_value & RADEON_MAX_MIP_LEVEL_MASK)
1791 >> RADEON_MAX_MIP_LEVEL_SHIFT);
1792 tmp = (idx_value >> 23) & 0x7;
1793 if (tmp == 2 || tmp == 6)
1794 track->textures[i].roundup_w = false;
1795 tmp = (idx_value >> 27) & 0x7;
1796 if (tmp == 2 || tmp == 6)
1797 track->textures[i].roundup_h = false;
1798 track->tex_dirty = true;
1800 case RADEON_PP_TXFORMAT_0:
1801 case RADEON_PP_TXFORMAT_1:
1802 case RADEON_PP_TXFORMAT_2:
1803 i = (reg - RADEON_PP_TXFORMAT_0) / 24;
1804 if (idx_value & RADEON_TXFORMAT_NON_POWER2) {
1805 track->textures[i].use_pitch = 1;
1807 track->textures[i].use_pitch = 0;
1808 track->textures[i].width = 1 << ((idx_value >> RADEON_TXFORMAT_WIDTH_SHIFT) & RADEON_TXFORMAT_WIDTH_MASK);
1809 track->textures[i].height = 1 << ((idx_value >> RADEON_TXFORMAT_HEIGHT_SHIFT) & RADEON_TXFORMAT_HEIGHT_MASK);
1811 if (idx_value & RADEON_TXFORMAT_CUBIC_MAP_ENABLE)
1812 track->textures[i].tex_coord_type = 2;
1813 switch ((idx_value & RADEON_TXFORMAT_FORMAT_MASK)) {
1814 case RADEON_TXFORMAT_I8:
1815 case RADEON_TXFORMAT_RGB332:
1816 case RADEON_TXFORMAT_Y8:
1817 track->textures[i].cpp = 1;
1818 track->textures[i].compress_format = R100_TRACK_COMP_NONE;
1820 case RADEON_TXFORMAT_AI88:
1821 case RADEON_TXFORMAT_ARGB1555:
1822 case RADEON_TXFORMAT_RGB565:
1823 case RADEON_TXFORMAT_ARGB4444:
1824 case RADEON_TXFORMAT_VYUY422:
1825 case RADEON_TXFORMAT_YVYU422:
1826 case RADEON_TXFORMAT_SHADOW16:
1827 case RADEON_TXFORMAT_LDUDV655:
1828 case RADEON_TXFORMAT_DUDV88:
1829 track->textures[i].cpp = 2;
1830 track->textures[i].compress_format = R100_TRACK_COMP_NONE;
1832 case RADEON_TXFORMAT_ARGB8888:
1833 case RADEON_TXFORMAT_RGBA8888:
1834 case RADEON_TXFORMAT_SHADOW32:
1835 case RADEON_TXFORMAT_LDUDUV8888:
1836 track->textures[i].cpp = 4;
1837 track->textures[i].compress_format = R100_TRACK_COMP_NONE;
1839 case RADEON_TXFORMAT_DXT1:
1840 track->textures[i].cpp = 1;
1841 track->textures[i].compress_format = R100_TRACK_COMP_DXT1;
1843 case RADEON_TXFORMAT_DXT23:
1844 case RADEON_TXFORMAT_DXT45:
1845 track->textures[i].cpp = 1;
1846 track->textures[i].compress_format = R100_TRACK_COMP_DXT35;
1849 track->textures[i].cube_info[4].width = 1 << ((idx_value >> 16) & 0xf);
1850 track->textures[i].cube_info[4].height = 1 << ((idx_value >> 20) & 0xf);
1851 track->tex_dirty = true;
1853 case RADEON_PP_CUBIC_FACES_0:
1854 case RADEON_PP_CUBIC_FACES_1:
1855 case RADEON_PP_CUBIC_FACES_2:
1857 i = (reg - RADEON_PP_CUBIC_FACES_0) / 4;
1858 for (face = 0; face < 4; face++) {
1859 track->textures[i].cube_info[face].width = 1 << ((tmp >> (face * 8)) & 0xf);
1860 track->textures[i].cube_info[face].height = 1 << ((tmp >> ((face * 8) + 4)) & 0xf);
1862 track->tex_dirty = true;
1865 printk(KERN_ERR "Forbidden register 0x%04X in cs at %d\n",
1872 int r100_cs_track_check_pkt3_indx_buffer(struct radeon_cs_parser *p,
1873 struct radeon_cs_packet *pkt,
1874 struct radeon_bo *robj)
1879 value = radeon_get_ib_value(p, idx + 2);
1880 if ((value + 1) > radeon_bo_size(robj)) {
1881 DRM_ERROR("[drm] Buffer too small for PACKET3 INDX_BUFFER "
1882 "(need %u have %lu) !\n",
1884 radeon_bo_size(robj));
1890 static int r100_packet3_check(struct radeon_cs_parser *p,
1891 struct radeon_cs_packet *pkt)
1893 struct radeon_cs_reloc *reloc;
1894 struct r100_cs_track *track;
1896 volatile uint32_t *ib;
1901 track = (struct r100_cs_track *)p->track;
1902 switch (pkt->opcode) {
1903 case PACKET3_3D_LOAD_VBPNTR:
1904 r = r100_packet3_load_vbpntr(p, pkt, idx);
1908 case PACKET3_INDX_BUFFER:
1909 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1911 DRM_ERROR("No reloc for packet3 %d\n", pkt->opcode);
1912 radeon_cs_dump_packet(p, pkt);
1915 ib[idx+1] = radeon_get_ib_value(p, idx+1) + ((u32)reloc->lobj.gpu_offset);
1916 r = r100_cs_track_check_pkt3_indx_buffer(p, pkt, reloc->robj);
1922 /* 3D_RNDR_GEN_INDX_PRIM on r100/r200 */
1923 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1925 DRM_ERROR("No reloc for packet3 %d\n", pkt->opcode);
1926 radeon_cs_dump_packet(p, pkt);
1929 ib[idx] = radeon_get_ib_value(p, idx) + ((u32)reloc->lobj.gpu_offset);
1930 track->num_arrays = 1;
1931 track->vtx_size = r100_get_vtx_size(radeon_get_ib_value(p, idx + 2));
1933 track->arrays[0].robj = reloc->robj;
1934 track->arrays[0].esize = track->vtx_size;
1936 track->max_indx = radeon_get_ib_value(p, idx+1);
1938 track->vap_vf_cntl = radeon_get_ib_value(p, idx+3);
1939 track->immd_dwords = pkt->count - 1;
1940 r = r100_cs_track_check(p->rdev, track);
1944 case PACKET3_3D_DRAW_IMMD:
1945 if (((radeon_get_ib_value(p, idx + 1) >> 4) & 0x3) != 3) {
1946 DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n");
1949 track->vtx_size = r100_get_vtx_size(radeon_get_ib_value(p, idx + 0));
1950 track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1);
1951 track->immd_dwords = pkt->count - 1;
1952 r = r100_cs_track_check(p->rdev, track);
1956 /* triggers drawing using in-packet vertex data */
1957 case PACKET3_3D_DRAW_IMMD_2:
1958 if (((radeon_get_ib_value(p, idx) >> 4) & 0x3) != 3) {
1959 DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n");
1962 track->vap_vf_cntl = radeon_get_ib_value(p, idx);
1963 track->immd_dwords = pkt->count;
1964 r = r100_cs_track_check(p->rdev, track);
1968 /* triggers drawing using in-packet vertex data */
1969 case PACKET3_3D_DRAW_VBUF_2:
1970 track->vap_vf_cntl = radeon_get_ib_value(p, idx);
1971 r = r100_cs_track_check(p->rdev, track);
1975 /* triggers drawing of vertex buffers setup elsewhere */
1976 case PACKET3_3D_DRAW_INDX_2:
1977 track->vap_vf_cntl = radeon_get_ib_value(p, idx);
1978 r = r100_cs_track_check(p->rdev, track);
1982 /* triggers drawing using indices to vertex buffer */
1983 case PACKET3_3D_DRAW_VBUF:
1984 track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1);
1985 r = r100_cs_track_check(p->rdev, track);
1989 /* triggers drawing of vertex buffers setup elsewhere */
1990 case PACKET3_3D_DRAW_INDX:
1991 track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1);
1992 r = r100_cs_track_check(p->rdev, track);
1996 /* triggers drawing using indices to vertex buffer */
1997 case PACKET3_3D_CLEAR_HIZ:
1998 case PACKET3_3D_CLEAR_ZMASK:
1999 if (p->rdev->hyperz_filp != p->filp)
2005 DRM_ERROR("Packet3 opcode %x not supported\n", pkt->opcode);
2011 int r100_cs_parse(struct radeon_cs_parser *p)
2013 struct radeon_cs_packet pkt;
2014 struct r100_cs_track *track;
2017 track = kzalloc(sizeof(*track), GFP_KERNEL);
2020 r100_cs_track_clear(p->rdev, track);
2023 r = radeon_cs_packet_parse(p, &pkt, p->idx);
2027 p->idx += pkt.count + 2;
2029 case RADEON_PACKET_TYPE0:
2030 if (p->rdev->family >= CHIP_R200)
2031 r = r100_cs_parse_packet0(p, &pkt,
2032 p->rdev->config.r100.reg_safe_bm,
2033 p->rdev->config.r100.reg_safe_bm_size,
2034 &r200_packet0_check);
2036 r = r100_cs_parse_packet0(p, &pkt,
2037 p->rdev->config.r100.reg_safe_bm,
2038 p->rdev->config.r100.reg_safe_bm_size,
2039 &r100_packet0_check);
2041 case RADEON_PACKET_TYPE2:
2043 case RADEON_PACKET_TYPE3:
2044 r = r100_packet3_check(p, &pkt);
2047 DRM_ERROR("Unknown packet type %d !\n",
2053 } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
2057 static void r100_cs_track_texture_print(struct r100_cs_track_texture *t)
2059 DRM_ERROR("pitch %d\n", t->pitch);
2060 DRM_ERROR("use_pitch %d\n", t->use_pitch);
2061 DRM_ERROR("width %d\n", t->width);
2062 DRM_ERROR("width_11 %d\n", t->width_11);
2063 DRM_ERROR("height %d\n", t->height);
2064 DRM_ERROR("height_11 %d\n", t->height_11);
2065 DRM_ERROR("num levels %d\n", t->num_levels);
2066 DRM_ERROR("depth %d\n", t->txdepth);
2067 DRM_ERROR("bpp %d\n", t->cpp);
2068 DRM_ERROR("coordinate type %d\n", t->tex_coord_type);
2069 DRM_ERROR("width round to power of 2 %d\n", t->roundup_w);
2070 DRM_ERROR("height round to power of 2 %d\n", t->roundup_h);
2071 DRM_ERROR("compress format %d\n", t->compress_format);
2074 static int r100_track_compress_size(int compress_format, int w, int h)
2076 int block_width, block_height, block_bytes;
2077 int wblocks, hblocks;
2084 switch (compress_format) {
2085 case R100_TRACK_COMP_DXT1:
2090 case R100_TRACK_COMP_DXT35:
2096 hblocks = (h + block_height - 1) / block_height;
2097 wblocks = (w + block_width - 1) / block_width;
2098 if (wblocks < min_wblocks)
2099 wblocks = min_wblocks;
2100 sz = wblocks * hblocks * block_bytes;
2104 static int r100_cs_track_cube(struct radeon_device *rdev,
2105 struct r100_cs_track *track, unsigned idx)
2107 unsigned face, w, h;
2108 struct radeon_bo *cube_robj;
2110 unsigned compress_format = track->textures[idx].compress_format;
2112 for (face = 0; face < 5; face++) {
2113 cube_robj = track->textures[idx].cube_info[face].robj;
2114 w = track->textures[idx].cube_info[face].width;
2115 h = track->textures[idx].cube_info[face].height;
2117 if (compress_format) {
2118 size = r100_track_compress_size(compress_format, w, h);
2121 size *= track->textures[idx].cpp;
2123 size += track->textures[idx].cube_info[face].offset;
2125 if (size > radeon_bo_size(cube_robj)) {
2126 DRM_ERROR("Cube texture offset greater than object size %lu %lu\n",
2127 size, radeon_bo_size(cube_robj));
2128 r100_cs_track_texture_print(&track->textures[idx]);
2135 static int r100_cs_track_texture_check(struct radeon_device *rdev,
2136 struct r100_cs_track *track)
2138 struct radeon_bo *robj;
2140 unsigned u, i, w, h, d;
2143 for (u = 0; u < track->num_texture; u++) {
2144 if (!track->textures[u].enabled)
2146 if (track->textures[u].lookup_disable)
2148 robj = track->textures[u].robj;
2150 DRM_ERROR("No texture bound to unit %u\n", u);
2154 for (i = 0; i <= track->textures[u].num_levels; i++) {
2155 if (track->textures[u].use_pitch) {
2156 if (rdev->family < CHIP_R300)
2157 w = (track->textures[u].pitch / track->textures[u].cpp) / (1 << i);
2159 w = track->textures[u].pitch / (1 << i);
2161 w = track->textures[u].width;
2162 if (rdev->family >= CHIP_RV515)
2163 w |= track->textures[u].width_11;
2165 if (track->textures[u].roundup_w)
2166 w = roundup_pow_of_two(w);
2168 h = track->textures[u].height;
2169 if (rdev->family >= CHIP_RV515)
2170 h |= track->textures[u].height_11;
2172 if (track->textures[u].roundup_h)
2173 h = roundup_pow_of_two(h);
2174 if (track->textures[u].tex_coord_type == 1) {
2175 d = (1 << track->textures[u].txdepth) / (1 << i);
2181 if (track->textures[u].compress_format) {
2183 size += r100_track_compress_size(track->textures[u].compress_format, w, h) * d;
2184 /* compressed textures are block based */
2188 size *= track->textures[u].cpp;
2190 switch (track->textures[u].tex_coord_type) {
2195 if (track->separate_cube) {
2196 ret = r100_cs_track_cube(rdev, track, u);
2203 DRM_ERROR("Invalid texture coordinate type %u for unit "
2204 "%u\n", track->textures[u].tex_coord_type, u);
2207 if (size > radeon_bo_size(robj)) {
2208 DRM_ERROR("Texture of unit %u needs %lu bytes but is "
2209 "%lu\n", u, size, radeon_bo_size(robj));
2210 r100_cs_track_texture_print(&track->textures[u]);
2217 int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track)
2223 unsigned num_cb = track->cb_dirty ? track->num_cb : 0;
2225 if (num_cb && !track->zb_cb_clear && !track->color_channel_mask &&
2226 !track->blend_read_enable)
2229 for (i = 0; i < num_cb; i++) {
2230 if (track->cb[i].robj == NULL) {
2231 DRM_ERROR("[drm] No buffer for color buffer %d !\n", i);
2234 size = track->cb[i].pitch * track->cb[i].cpp * track->maxy;
2235 size += track->cb[i].offset;
2236 if (size > radeon_bo_size(track->cb[i].robj)) {
2237 DRM_ERROR("[drm] Buffer too small for color buffer %d "
2238 "(need %lu have %lu) !\n", i, size,
2239 radeon_bo_size(track->cb[i].robj));
2240 DRM_ERROR("[drm] color buffer %d (%u %u %u %u)\n",
2241 i, track->cb[i].pitch, track->cb[i].cpp,
2242 track->cb[i].offset, track->maxy);
2246 track->cb_dirty = false;
2248 if (track->zb_dirty && track->z_enabled) {
2249 if (track->zb.robj == NULL) {
2250 DRM_ERROR("[drm] No buffer for z buffer !\n");
2253 size = track->zb.pitch * track->zb.cpp * track->maxy;
2254 size += track->zb.offset;
2255 if (size > radeon_bo_size(track->zb.robj)) {
2256 DRM_ERROR("[drm] Buffer too small for z buffer "
2257 "(need %lu have %lu) !\n", size,
2258 radeon_bo_size(track->zb.robj));
2259 DRM_ERROR("[drm] zbuffer (%u %u %u %u)\n",
2260 track->zb.pitch, track->zb.cpp,
2261 track->zb.offset, track->maxy);
2265 track->zb_dirty = false;
2267 if (track->aa_dirty && track->aaresolve) {
2268 if (track->aa.robj == NULL) {
2269 DRM_ERROR("[drm] No buffer for AA resolve buffer %d !\n", i);
2272 /* I believe the format comes from colorbuffer0. */
2273 size = track->aa.pitch * track->cb[0].cpp * track->maxy;
2274 size += track->aa.offset;
2275 if (size > radeon_bo_size(track->aa.robj)) {
2276 DRM_ERROR("[drm] Buffer too small for AA resolve buffer %d "
2277 "(need %lu have %lu) !\n", i, size,
2278 radeon_bo_size(track->aa.robj));
2279 DRM_ERROR("[drm] AA resolve buffer %d (%u %u %u %u)\n",
2280 i, track->aa.pitch, track->cb[0].cpp,
2281 track->aa.offset, track->maxy);
2285 track->aa_dirty = false;
2287 prim_walk = (track->vap_vf_cntl >> 4) & 0x3;
2288 if (track->vap_vf_cntl & (1 << 14)) {
2289 nverts = track->vap_alt_nverts;
2291 nverts = (track->vap_vf_cntl >> 16) & 0xFFFF;
2293 switch (prim_walk) {
2295 for (i = 0; i < track->num_arrays; i++) {
2296 size = track->arrays[i].esize * track->max_indx * 4;
2297 if (track->arrays[i].robj == NULL) {
2298 DRM_ERROR("(PW %u) Vertex array %u no buffer "
2299 "bound\n", prim_walk, i);
2302 if (size > radeon_bo_size(track->arrays[i].robj)) {
2303 dev_err(rdev->dev, "(PW %u) Vertex array %u "
2304 "need %lu dwords have %lu dwords\n",
2305 prim_walk, i, size >> 2,
2306 radeon_bo_size(track->arrays[i].robj)
2308 DRM_ERROR("Max indices %u\n", track->max_indx);
2314 for (i = 0; i < track->num_arrays; i++) {
2315 size = track->arrays[i].esize * (nverts - 1) * 4;
2316 if (track->arrays[i].robj == NULL) {
2317 DRM_ERROR("(PW %u) Vertex array %u no buffer "
2318 "bound\n", prim_walk, i);
2321 if (size > radeon_bo_size(track->arrays[i].robj)) {
2322 dev_err(rdev->dev, "(PW %u) Vertex array %u "
2323 "need %lu dwords have %lu dwords\n",
2324 prim_walk, i, size >> 2,
2325 radeon_bo_size(track->arrays[i].robj)
2332 size = track->vtx_size * nverts;
2333 if (size != track->immd_dwords) {
2334 DRM_ERROR("IMMD draw %u dwors but needs %lu dwords\n",
2335 track->immd_dwords, size);
2336 DRM_ERROR("VAP_VF_CNTL.NUM_VERTICES %u, VTX_SIZE %u\n",
2337 nverts, track->vtx_size);
2342 DRM_ERROR("[drm] Invalid primitive walk %d for VAP_VF_CNTL\n",
2347 if (track->tex_dirty) {
2348 track->tex_dirty = false;
2349 return r100_cs_track_texture_check(rdev, track);
2354 void r100_cs_track_clear(struct radeon_device *rdev, struct r100_cs_track *track)
2358 track->cb_dirty = true;
2359 track->zb_dirty = true;
2360 track->tex_dirty = true;
2361 track->aa_dirty = true;
2363 if (rdev->family < CHIP_R300) {
2365 if (rdev->family <= CHIP_RS200)
2366 track->num_texture = 3;
2368 track->num_texture = 6;
2370 track->separate_cube = 1;
2373 track->num_texture = 16;
2375 track->separate_cube = 0;
2376 track->aaresolve = false;
2377 track->aa.robj = NULL;
2380 for (i = 0; i < track->num_cb; i++) {
2381 track->cb[i].robj = NULL;
2382 track->cb[i].pitch = 8192;
2383 track->cb[i].cpp = 16;
2384 track->cb[i].offset = 0;
2386 track->z_enabled = true;
2387 track->zb.robj = NULL;
2388 track->zb.pitch = 8192;
2390 track->zb.offset = 0;
2391 track->vtx_size = 0x7F;
2392 track->immd_dwords = 0xFFFFFFFFUL;
2393 track->num_arrays = 11;
2394 track->max_indx = 0x00FFFFFFUL;
2395 for (i = 0; i < track->num_arrays; i++) {
2396 track->arrays[i].robj = NULL;
2397 track->arrays[i].esize = 0x7F;
2399 for (i = 0; i < track->num_texture; i++) {
2400 track->textures[i].compress_format = R100_TRACK_COMP_NONE;
2401 track->textures[i].pitch = 16536;
2402 track->textures[i].width = 16536;
2403 track->textures[i].height = 16536;
2404 track->textures[i].width_11 = 1 << 11;
2405 track->textures[i].height_11 = 1 << 11;
2406 track->textures[i].num_levels = 12;
2407 if (rdev->family <= CHIP_RS200) {
2408 track->textures[i].tex_coord_type = 0;
2409 track->textures[i].txdepth = 0;
2411 track->textures[i].txdepth = 16;
2412 track->textures[i].tex_coord_type = 1;
2414 track->textures[i].cpp = 64;
2415 track->textures[i].robj = NULL;
2416 /* CS IB emission code makes sure texture unit are disabled */
2417 track->textures[i].enabled = false;
2418 track->textures[i].lookup_disable = false;
2419 track->textures[i].roundup_w = true;
2420 track->textures[i].roundup_h = true;
2421 if (track->separate_cube)
2422 for (face = 0; face < 5; face++) {
2423 track->textures[i].cube_info[face].robj = NULL;
2424 track->textures[i].cube_info[face].width = 16536;
2425 track->textures[i].cube_info[face].height = 16536;
2426 track->textures[i].cube_info[face].offset = 0;
2432 * Global GPU functions
2434 static void r100_errata(struct radeon_device *rdev)
2436 rdev->pll_errata = 0;
2438 if (rdev->family == CHIP_RV200 || rdev->family == CHIP_RS200) {
2439 rdev->pll_errata |= CHIP_ERRATA_PLL_DUMMYREADS;
2442 if (rdev->family == CHIP_RV100 ||
2443 rdev->family == CHIP_RS100 ||
2444 rdev->family == CHIP_RS200) {
2445 rdev->pll_errata |= CHIP_ERRATA_PLL_DELAY;
2449 static int r100_rbbm_fifo_wait_for_entry(struct radeon_device *rdev, unsigned n)
2454 for (i = 0; i < rdev->usec_timeout; i++) {
2455 tmp = RREG32(RADEON_RBBM_STATUS) & RADEON_RBBM_FIFOCNT_MASK;
2464 int r100_gui_wait_for_idle(struct radeon_device *rdev)
2469 if (r100_rbbm_fifo_wait_for_entry(rdev, 64)) {
2470 printk(KERN_WARNING "radeon: wait for empty RBBM fifo failed !"
2471 " Bad things might happen.\n");
2473 for (i = 0; i < rdev->usec_timeout; i++) {
2474 tmp = RREG32(RADEON_RBBM_STATUS);
2475 if (!(tmp & RADEON_RBBM_ACTIVE)) {
2483 int r100_mc_wait_for_idle(struct radeon_device *rdev)
2488 for (i = 0; i < rdev->usec_timeout; i++) {
2489 /* read MC_STATUS */
2490 tmp = RREG32(RADEON_MC_STATUS);
2491 if (tmp & RADEON_MC_IDLE) {
2499 bool r100_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
2503 rbbm_status = RREG32(R_000E40_RBBM_STATUS);
2504 if (!G_000E40_GUI_ACTIVE(rbbm_status)) {
2505 radeon_ring_lockup_update(ring);
2508 /* force CP activities */
2509 radeon_ring_force_activity(rdev, ring);
2510 return radeon_ring_test_lockup(rdev, ring);
2513 /* required on r1xx, r2xx, r300, r(v)350, r420/r481, rs400/rs480 */
2514 void r100_enable_bm(struct radeon_device *rdev)
2517 /* Enable bus mastering */
2518 tmp = RREG32(RADEON_BUS_CNTL) & ~RADEON_BUS_MASTER_DIS;
2519 WREG32(RADEON_BUS_CNTL, tmp);
2522 void r100_bm_disable(struct radeon_device *rdev)
2526 /* disable bus mastering */
2527 tmp = RREG32(R_000030_BUS_CNTL);
2528 WREG32(R_000030_BUS_CNTL, (tmp & 0xFFFFFFFF) | 0x00000044);
2530 WREG32(R_000030_BUS_CNTL, (tmp & 0xFFFFFFFF) | 0x00000042);
2532 WREG32(R_000030_BUS_CNTL, (tmp & 0xFFFFFFFF) | 0x00000040);
2533 tmp = RREG32(RADEON_BUS_CNTL);
2535 pci_clear_master(rdev->pdev);
2539 int r100_asic_reset(struct radeon_device *rdev)
2541 struct r100_mc_save save;
2545 status = RREG32(R_000E40_RBBM_STATUS);
2546 if (!G_000E40_GUI_ACTIVE(status)) {
2549 r100_mc_stop(rdev, &save);
2550 status = RREG32(R_000E40_RBBM_STATUS);
2551 dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
2553 WREG32(RADEON_CP_CSQ_CNTL, 0);
2554 tmp = RREG32(RADEON_CP_RB_CNTL);
2555 WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_RPTR_WR_ENA);
2556 WREG32(RADEON_CP_RB_RPTR_WR, 0);
2557 WREG32(RADEON_CP_RB_WPTR, 0);
2558 WREG32(RADEON_CP_RB_CNTL, tmp);
2559 /* save PCI state */
2560 pci_save_state(rdev->pdev);
2561 /* disable bus mastering */
2562 r100_bm_disable(rdev);
2563 WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_SE(1) |
2564 S_0000F0_SOFT_RESET_RE(1) |
2565 S_0000F0_SOFT_RESET_PP(1) |
2566 S_0000F0_SOFT_RESET_RB(1));
2567 RREG32(R_0000F0_RBBM_SOFT_RESET);
2569 WREG32(R_0000F0_RBBM_SOFT_RESET, 0);
2571 status = RREG32(R_000E40_RBBM_STATUS);
2572 dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
2574 WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_CP(1));
2575 RREG32(R_0000F0_RBBM_SOFT_RESET);
2577 WREG32(R_0000F0_RBBM_SOFT_RESET, 0);
2579 status = RREG32(R_000E40_RBBM_STATUS);
2580 dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
2581 /* restore PCI & busmastering */
2582 pci_restore_state(rdev->pdev);
2583 r100_enable_bm(rdev);
2584 /* Check if GPU is idle */
2585 if (G_000E40_SE_BUSY(status) || G_000E40_RE_BUSY(status) ||
2586 G_000E40_TAM_BUSY(status) || G_000E40_PB_BUSY(status)) {
2587 dev_err(rdev->dev, "failed to reset GPU\n");
2590 dev_info(rdev->dev, "GPU reset succeed\n");
2591 r100_mc_resume(rdev, &save);
2595 void r100_set_common_regs(struct radeon_device *rdev)
2597 struct drm_device *dev = rdev->ddev;
2598 bool force_dac2 = false;
2601 /* set these so they don't interfere with anything */
2602 WREG32(RADEON_OV0_SCALE_CNTL, 0);
2603 WREG32(RADEON_SUBPIC_CNTL, 0);
2604 WREG32(RADEON_VIPH_CONTROL, 0);
2605 WREG32(RADEON_I2C_CNTL_1, 0);
2606 WREG32(RADEON_DVI_I2C_CNTL_1, 0);
2607 WREG32(RADEON_CAP0_TRIG_CNTL, 0);
2608 WREG32(RADEON_CAP1_TRIG_CNTL, 0);
2610 /* always set up dac2 on rn50 and some rv100 as lots
2611 * of servers seem to wire it up to a VGA port but
2612 * don't report it in the bios connector
2615 switch (dev->pdev->device) {
2624 /* DELL triple head servers */
2625 if ((dev->pdev->subsystem_vendor == 0x1028 /* DELL */) &&
2626 ((dev->pdev->subsystem_device == 0x016c) ||
2627 (dev->pdev->subsystem_device == 0x016d) ||
2628 (dev->pdev->subsystem_device == 0x016e) ||
2629 (dev->pdev->subsystem_device == 0x016f) ||
2630 (dev->pdev->subsystem_device == 0x0170) ||
2631 (dev->pdev->subsystem_device == 0x017d) ||
2632 (dev->pdev->subsystem_device == 0x017e) ||
2633 (dev->pdev->subsystem_device == 0x0183) ||
2634 (dev->pdev->subsystem_device == 0x018a) ||
2635 (dev->pdev->subsystem_device == 0x019a)))
2641 u32 disp_hw_debug = RREG32(RADEON_DISP_HW_DEBUG);
2642 u32 tv_dac_cntl = RREG32(RADEON_TV_DAC_CNTL);
2643 u32 dac2_cntl = RREG32(RADEON_DAC_CNTL2);
2645 /* For CRT on DAC2, don't turn it on if BIOS didn't
2646 enable it, even it's detected.
2649 /* force it to crtc0 */
2650 dac2_cntl &= ~RADEON_DAC2_DAC_CLK_SEL;
2651 dac2_cntl |= RADEON_DAC2_DAC2_CLK_SEL;
2652 disp_hw_debug |= RADEON_CRT2_DISP1_SEL;
2654 /* set up the TV DAC */
2655 tv_dac_cntl &= ~(RADEON_TV_DAC_PEDESTAL |
2656 RADEON_TV_DAC_STD_MASK |
2657 RADEON_TV_DAC_RDACPD |
2658 RADEON_TV_DAC_GDACPD |
2659 RADEON_TV_DAC_BDACPD |
2660 RADEON_TV_DAC_BGADJ_MASK |
2661 RADEON_TV_DAC_DACADJ_MASK);
2662 tv_dac_cntl |= (RADEON_TV_DAC_NBLANK |
2663 RADEON_TV_DAC_NHOLD |
2664 RADEON_TV_DAC_STD_PS2 |
2667 WREG32(RADEON_TV_DAC_CNTL, tv_dac_cntl);
2668 WREG32(RADEON_DISP_HW_DEBUG, disp_hw_debug);
2669 WREG32(RADEON_DAC_CNTL2, dac2_cntl);
2672 /* switch PM block to ACPI mode */
2673 tmp = RREG32_PLL(RADEON_PLL_PWRMGT_CNTL);
2674 tmp &= ~RADEON_PM_MODE_SEL;
2675 WREG32_PLL(RADEON_PLL_PWRMGT_CNTL, tmp);
2682 static void r100_vram_get_type(struct radeon_device *rdev)
2686 rdev->mc.vram_is_ddr = false;
2687 if (rdev->flags & RADEON_IS_IGP)
2688 rdev->mc.vram_is_ddr = true;
2689 else if (RREG32(RADEON_MEM_SDRAM_MODE_REG) & RADEON_MEM_CFG_TYPE_DDR)
2690 rdev->mc.vram_is_ddr = true;
2691 if ((rdev->family == CHIP_RV100) ||
2692 (rdev->family == CHIP_RS100) ||
2693 (rdev->family == CHIP_RS200)) {
2694 tmp = RREG32(RADEON_MEM_CNTL);
2695 if (tmp & RV100_HALF_MODE) {
2696 rdev->mc.vram_width = 32;
2698 rdev->mc.vram_width = 64;
2700 if (rdev->flags & RADEON_SINGLE_CRTC) {
2701 rdev->mc.vram_width /= 4;
2702 rdev->mc.vram_is_ddr = true;
2704 } else if (rdev->family <= CHIP_RV280) {
2705 tmp = RREG32(RADEON_MEM_CNTL);
2706 if (tmp & RADEON_MEM_NUM_CHANNELS_MASK) {
2707 rdev->mc.vram_width = 128;
2709 rdev->mc.vram_width = 64;
2713 rdev->mc.vram_width = 128;
2717 static u32 r100_get_accessible_vram(struct radeon_device *rdev)
2722 aper_size = RREG32(RADEON_CONFIG_APER_SIZE);
2724 /* Set HDP_APER_CNTL only on cards that are known not to be broken,
2725 * that is has the 2nd generation multifunction PCI interface
2727 if (rdev->family == CHIP_RV280 ||
2728 rdev->family >= CHIP_RV350) {
2729 WREG32_P(RADEON_HOST_PATH_CNTL, RADEON_HDP_APER_CNTL,
2730 ~RADEON_HDP_APER_CNTL);
2731 DRM_INFO("Generation 2 PCI interface, using max accessible memory\n");
2732 return aper_size * 2;
2735 /* Older cards have all sorts of funny issues to deal with. First
2736 * check if it's a multifunction card by reading the PCI config
2737 * header type... Limit those to one aperture size
2739 pci_read_config_byte(rdev->pdev, 0xe, &byte);
2741 DRM_INFO("Generation 1 PCI interface in multifunction mode\n");
2742 DRM_INFO("Limiting VRAM to one aperture\n");
2746 /* Single function older card. We read HDP_APER_CNTL to see how the BIOS
2747 * have set it up. We don't write this as it's broken on some ASICs but
2748 * we expect the BIOS to have done the right thing (might be too optimistic...)
2750 if (RREG32(RADEON_HOST_PATH_CNTL) & RADEON_HDP_APER_CNTL)
2751 return aper_size * 2;
2755 void r100_vram_init_sizes(struct radeon_device *rdev)
2757 u64 config_aper_size;
2759 /* work out accessible VRAM */
2760 rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0);
2761 rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0);
2762 rdev->mc.visible_vram_size = r100_get_accessible_vram(rdev);
2763 /* FIXME we don't use the second aperture yet when we could use it */
2764 if (rdev->mc.visible_vram_size > rdev->mc.aper_size)
2765 rdev->mc.visible_vram_size = rdev->mc.aper_size;
2766 config_aper_size = RREG32(RADEON_CONFIG_APER_SIZE);
2767 if (rdev->flags & RADEON_IS_IGP) {
2769 /* read NB_TOM to get the amount of ram stolen for the GPU */
2770 tom = RREG32(RADEON_NB_TOM);
2771 rdev->mc.real_vram_size = (((tom >> 16) - (tom & 0xffff) + 1) << 16);
2772 WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.real_vram_size);
2773 rdev->mc.mc_vram_size = rdev->mc.real_vram_size;
2775 rdev->mc.real_vram_size = RREG32(RADEON_CONFIG_MEMSIZE);
2776 /* Some production boards of m6 will report 0
2779 if (rdev->mc.real_vram_size == 0) {
2780 rdev->mc.real_vram_size = 8192 * 1024;
2781 WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.real_vram_size);
2783 /* Fix for RN50, M6, M7 with 8/16/32(??) MBs of VRAM -
2784 * Novell bug 204882 + along with lots of ubuntu ones
2786 if (rdev->mc.aper_size > config_aper_size)
2787 config_aper_size = rdev->mc.aper_size;
2789 if (config_aper_size > rdev->mc.real_vram_size)
2790 rdev->mc.mc_vram_size = config_aper_size;
2792 rdev->mc.mc_vram_size = rdev->mc.real_vram_size;
2796 void r100_vga_set_state(struct radeon_device *rdev, bool state)
2800 temp = RREG32(RADEON_CONFIG_CNTL);
2801 if (state == false) {
2802 temp &= ~RADEON_CFG_VGA_RAM_EN;
2803 temp |= RADEON_CFG_VGA_IO_DIS;
2805 temp &= ~RADEON_CFG_VGA_IO_DIS;
2807 WREG32(RADEON_CONFIG_CNTL, temp);
2810 static void r100_mc_init(struct radeon_device *rdev)
2814 r100_vram_get_type(rdev);
2815 r100_vram_init_sizes(rdev);
2816 base = rdev->mc.aper_base;
2817 if (rdev->flags & RADEON_IS_IGP)
2818 base = (RREG32(RADEON_NB_TOM) & 0xffff) << 16;
2819 radeon_vram_location(rdev, &rdev->mc, base);
2820 rdev->mc.gtt_base_align = 0;
2821 if (!(rdev->flags & RADEON_IS_AGP))
2822 radeon_gtt_location(rdev, &rdev->mc);
2823 radeon_update_bandwidth_info(rdev);
2828 * Indirect registers accessor
2830 void r100_pll_errata_after_index(struct radeon_device *rdev)
2832 if (rdev->pll_errata & CHIP_ERRATA_PLL_DUMMYREADS) {
2833 (void)RREG32(RADEON_CLOCK_CNTL_DATA);
2834 (void)RREG32(RADEON_CRTC_GEN_CNTL);
2838 static void r100_pll_errata_after_data(struct radeon_device *rdev)
2840 /* This workarounds is necessary on RV100, RS100 and RS200 chips
2841 * or the chip could hang on a subsequent access
2843 if (rdev->pll_errata & CHIP_ERRATA_PLL_DELAY) {
2847 /* This function is required to workaround a hardware bug in some (all?)
2848 * revisions of the R300. This workaround should be called after every
2849 * CLOCK_CNTL_INDEX register access. If not, register reads afterward
2850 * may not be correct.
2852 if (rdev->pll_errata & CHIP_ERRATA_R300_CG) {
2855 save = RREG32(RADEON_CLOCK_CNTL_INDEX);
2856 tmp = save & ~(0x3f | RADEON_PLL_WR_EN);
2857 WREG32(RADEON_CLOCK_CNTL_INDEX, tmp);
2858 tmp = RREG32(RADEON_CLOCK_CNTL_DATA);
2859 WREG32(RADEON_CLOCK_CNTL_INDEX, save);
2863 uint32_t r100_pll_rreg(struct radeon_device *rdev, uint32_t reg)
2867 WREG8(RADEON_CLOCK_CNTL_INDEX, reg & 0x3f);
2868 r100_pll_errata_after_index(rdev);
2869 data = RREG32(RADEON_CLOCK_CNTL_DATA);
2870 r100_pll_errata_after_data(rdev);
2874 void r100_pll_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
2876 WREG8(RADEON_CLOCK_CNTL_INDEX, ((reg & 0x3f) | RADEON_PLL_WR_EN));
2877 r100_pll_errata_after_index(rdev);
2878 WREG32(RADEON_CLOCK_CNTL_DATA, v);
2879 r100_pll_errata_after_data(rdev);
2882 static void r100_set_safe_registers(struct radeon_device *rdev)
2884 if (ASIC_IS_RN50(rdev)) {
2885 rdev->config.r100.reg_safe_bm = rn50_reg_safe_bm;
2886 rdev->config.r100.reg_safe_bm_size = ARRAY_SIZE(rn50_reg_safe_bm);
2887 } else if (rdev->family < CHIP_R200) {
2888 rdev->config.r100.reg_safe_bm = r100_reg_safe_bm;
2889 rdev->config.r100.reg_safe_bm_size = ARRAY_SIZE(r100_reg_safe_bm);
2891 r200_set_safe_registers(rdev);
2898 #if defined(CONFIG_DEBUG_FS)
2899 static int r100_debugfs_rbbm_info(struct seq_file *m, void *data)
2901 struct drm_info_node *node = (struct drm_info_node *) m->private;
2902 struct drm_device *dev = node->minor->dev;
2903 struct radeon_device *rdev = dev->dev_private;
2904 uint32_t reg, value;
2907 seq_printf(m, "RBBM_STATUS 0x%08x\n", RREG32(RADEON_RBBM_STATUS));
2908 seq_printf(m, "RBBM_CMDFIFO_STAT 0x%08x\n", RREG32(0xE7C));
2909 seq_printf(m, "CP_STAT 0x%08x\n", RREG32(RADEON_CP_STAT));
2910 for (i = 0; i < 64; i++) {
2911 WREG32(RADEON_RBBM_CMDFIFO_ADDR, i | 0x100);
2912 reg = (RREG32(RADEON_RBBM_CMDFIFO_DATA) - 1) >> 2;
2913 WREG32(RADEON_RBBM_CMDFIFO_ADDR, i);
2914 value = RREG32(RADEON_RBBM_CMDFIFO_DATA);
2915 seq_printf(m, "[0x%03X] 0x%04X=0x%08X\n", i, reg, value);
2920 static int r100_debugfs_cp_ring_info(struct seq_file *m, void *data)
2922 struct drm_info_node *node = (struct drm_info_node *) m->private;
2923 struct drm_device *dev = node->minor->dev;
2924 struct radeon_device *rdev = dev->dev_private;
2925 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
2927 unsigned count, i, j;
2929 radeon_ring_free_size(rdev, ring);
2930 rdp = RREG32(RADEON_CP_RB_RPTR);
2931 wdp = RREG32(RADEON_CP_RB_WPTR);
2932 count = (rdp + ring->ring_size - wdp) & ring->ptr_mask;
2933 seq_printf(m, "CP_STAT 0x%08x\n", RREG32(RADEON_CP_STAT));
2934 seq_printf(m, "CP_RB_WPTR 0x%08x\n", wdp);
2935 seq_printf(m, "CP_RB_RPTR 0x%08x\n", rdp);
2936 seq_printf(m, "%u free dwords in ring\n", ring->ring_free_dw);
2937 seq_printf(m, "%u dwords in ring\n", count);
2938 for (j = 0; j <= count; j++) {
2939 i = (rdp + j) & ring->ptr_mask;
2940 seq_printf(m, "r[%04d]=0x%08x\n", i, ring->ring[i]);
2946 static int r100_debugfs_cp_csq_fifo(struct seq_file *m, void *data)
2948 struct drm_info_node *node = (struct drm_info_node *) m->private;
2949 struct drm_device *dev = node->minor->dev;
2950 struct radeon_device *rdev = dev->dev_private;
2951 uint32_t csq_stat, csq2_stat, tmp;
2952 unsigned r_rptr, r_wptr, ib1_rptr, ib1_wptr, ib2_rptr, ib2_wptr;
2955 seq_printf(m, "CP_STAT 0x%08x\n", RREG32(RADEON_CP_STAT));
2956 seq_printf(m, "CP_CSQ_MODE 0x%08x\n", RREG32(RADEON_CP_CSQ_MODE));
2957 csq_stat = RREG32(RADEON_CP_CSQ_STAT);
2958 csq2_stat = RREG32(RADEON_CP_CSQ2_STAT);
2959 r_rptr = (csq_stat >> 0) & 0x3ff;
2960 r_wptr = (csq_stat >> 10) & 0x3ff;
2961 ib1_rptr = (csq_stat >> 20) & 0x3ff;
2962 ib1_wptr = (csq2_stat >> 0) & 0x3ff;
2963 ib2_rptr = (csq2_stat >> 10) & 0x3ff;
2964 ib2_wptr = (csq2_stat >> 20) & 0x3ff;
2965 seq_printf(m, "CP_CSQ_STAT 0x%08x\n", csq_stat);
2966 seq_printf(m, "CP_CSQ2_STAT 0x%08x\n", csq2_stat);
2967 seq_printf(m, "Ring rptr %u\n", r_rptr);
2968 seq_printf(m, "Ring wptr %u\n", r_wptr);
2969 seq_printf(m, "Indirect1 rptr %u\n", ib1_rptr);
2970 seq_printf(m, "Indirect1 wptr %u\n", ib1_wptr);
2971 seq_printf(m, "Indirect2 rptr %u\n", ib2_rptr);
2972 seq_printf(m, "Indirect2 wptr %u\n", ib2_wptr);
2973 /* FIXME: 0, 128, 640 depends on fifo setup see cp_init_kms
2974 * 128 = indirect1_start * 8 & 640 = indirect2_start * 8 */
2975 seq_printf(m, "Ring fifo:\n");
2976 for (i = 0; i < 256; i++) {
2977 WREG32(RADEON_CP_CSQ_ADDR, i << 2);
2978 tmp = RREG32(RADEON_CP_CSQ_DATA);
2979 seq_printf(m, "rfifo[%04d]=0x%08X\n", i, tmp);
2981 seq_printf(m, "Indirect1 fifo:\n");
2982 for (i = 256; i <= 512; i++) {
2983 WREG32(RADEON_CP_CSQ_ADDR, i << 2);
2984 tmp = RREG32(RADEON_CP_CSQ_DATA);
2985 seq_printf(m, "ib1fifo[%04d]=0x%08X\n", i, tmp);
2987 seq_printf(m, "Indirect2 fifo:\n");
2988 for (i = 640; i < ib1_wptr; i++) {
2989 WREG32(RADEON_CP_CSQ_ADDR, i << 2);
2990 tmp = RREG32(RADEON_CP_CSQ_DATA);
2991 seq_printf(m, "ib2fifo[%04d]=0x%08X\n", i, tmp);
2996 static int r100_debugfs_mc_info(struct seq_file *m, void *data)
2998 struct drm_info_node *node = (struct drm_info_node *) m->private;
2999 struct drm_device *dev = node->minor->dev;
3000 struct radeon_device *rdev = dev->dev_private;
3003 tmp = RREG32(RADEON_CONFIG_MEMSIZE);
3004 seq_printf(m, "CONFIG_MEMSIZE 0x%08x\n", tmp);
3005 tmp = RREG32(RADEON_MC_FB_LOCATION);
3006 seq_printf(m, "MC_FB_LOCATION 0x%08x\n", tmp);
3007 tmp = RREG32(RADEON_BUS_CNTL);
3008 seq_printf(m, "BUS_CNTL 0x%08x\n", tmp);
3009 tmp = RREG32(RADEON_MC_AGP_LOCATION);
3010 seq_printf(m, "MC_AGP_LOCATION 0x%08x\n", tmp);
3011 tmp = RREG32(RADEON_AGP_BASE);
3012 seq_printf(m, "AGP_BASE 0x%08x\n", tmp);
3013 tmp = RREG32(RADEON_HOST_PATH_CNTL);
3014 seq_printf(m, "HOST_PATH_CNTL 0x%08x\n", tmp);
3015 tmp = RREG32(0x01D0);
3016 seq_printf(m, "AIC_CTRL 0x%08x\n", tmp);
3017 tmp = RREG32(RADEON_AIC_LO_ADDR);
3018 seq_printf(m, "AIC_LO_ADDR 0x%08x\n", tmp);
3019 tmp = RREG32(RADEON_AIC_HI_ADDR);
3020 seq_printf(m, "AIC_HI_ADDR 0x%08x\n", tmp);
3021 tmp = RREG32(0x01E4);
3022 seq_printf(m, "AIC_TLB_ADDR 0x%08x\n", tmp);
3026 static struct drm_info_list r100_debugfs_rbbm_list[] = {
3027 {"r100_rbbm_info", r100_debugfs_rbbm_info, 0, NULL},
3030 static struct drm_info_list r100_debugfs_cp_list[] = {
3031 {"r100_cp_ring_info", r100_debugfs_cp_ring_info, 0, NULL},
3032 {"r100_cp_csq_fifo", r100_debugfs_cp_csq_fifo, 0, NULL},
3035 static struct drm_info_list r100_debugfs_mc_info_list[] = {
3036 {"r100_mc_info", r100_debugfs_mc_info, 0, NULL},
3040 int r100_debugfs_rbbm_init(struct radeon_device *rdev)
3042 #if defined(CONFIG_DEBUG_FS)
3043 return radeon_debugfs_add_files(rdev, r100_debugfs_rbbm_list, 1);
3049 int r100_debugfs_cp_init(struct radeon_device *rdev)
3051 #if defined(CONFIG_DEBUG_FS)
3052 return radeon_debugfs_add_files(rdev, r100_debugfs_cp_list, 2);
3058 int r100_debugfs_mc_info_init(struct radeon_device *rdev)
3060 #if defined(CONFIG_DEBUG_FS)
3061 return radeon_debugfs_add_files(rdev, r100_debugfs_mc_info_list, 1);
3067 int r100_set_surface_reg(struct radeon_device *rdev, int reg,
3068 uint32_t tiling_flags, uint32_t pitch,
3069 uint32_t offset, uint32_t obj_size)
3071 int surf_index = reg * 16;
3074 if (rdev->family <= CHIP_RS200) {
3075 if ((tiling_flags & (RADEON_TILING_MACRO|RADEON_TILING_MICRO))
3076 == (RADEON_TILING_MACRO|RADEON_TILING_MICRO))
3077 flags |= RADEON_SURF_TILE_COLOR_BOTH;
3078 if (tiling_flags & RADEON_TILING_MACRO)
3079 flags |= RADEON_SURF_TILE_COLOR_MACRO;
3080 /* setting pitch to 0 disables tiling */
3081 if ((tiling_flags & (RADEON_TILING_MACRO|RADEON_TILING_MICRO))
3084 } else if (rdev->family <= CHIP_RV280) {
3085 if (tiling_flags & (RADEON_TILING_MACRO))
3086 flags |= R200_SURF_TILE_COLOR_MACRO;
3087 if (tiling_flags & RADEON_TILING_MICRO)
3088 flags |= R200_SURF_TILE_COLOR_MICRO;
3090 if (tiling_flags & RADEON_TILING_MACRO)
3091 flags |= R300_SURF_TILE_MACRO;
3092 if (tiling_flags & RADEON_TILING_MICRO)
3093 flags |= R300_SURF_TILE_MICRO;
3096 if (tiling_flags & RADEON_TILING_SWAP_16BIT)
3097 flags |= RADEON_SURF_AP0_SWP_16BPP | RADEON_SURF_AP1_SWP_16BPP;
3098 if (tiling_flags & RADEON_TILING_SWAP_32BIT)
3099 flags |= RADEON_SURF_AP0_SWP_32BPP | RADEON_SURF_AP1_SWP_32BPP;
3101 /* r100/r200 divide by 16 */
3102 if (rdev->family < CHIP_R300)
3103 flags |= pitch / 16;
3108 DRM_DEBUG_KMS("writing surface %d %d %x %x\n", reg, flags, offset, offset+obj_size-1);
3109 WREG32(RADEON_SURFACE0_INFO + surf_index, flags);
3110 WREG32(RADEON_SURFACE0_LOWER_BOUND + surf_index, offset);
3111 WREG32(RADEON_SURFACE0_UPPER_BOUND + surf_index, offset + obj_size - 1);
3115 void r100_clear_surface_reg(struct radeon_device *rdev, int reg)
3117 int surf_index = reg * 16;
3118 WREG32(RADEON_SURFACE0_INFO + surf_index, 0);
3121 void r100_bandwidth_update(struct radeon_device *rdev)
3123 fixed20_12 trcd_ff, trp_ff, tras_ff, trbs_ff, tcas_ff;
3124 fixed20_12 sclk_ff, mclk_ff, sclk_eff_ff, sclk_delay_ff;
3125 fixed20_12 peak_disp_bw, mem_bw, pix_clk, pix_clk2, temp_ff, crit_point_ff;
3126 uint32_t temp, data, mem_trcd, mem_trp, mem_tras;
3127 fixed20_12 memtcas_ff[8] = {
3132 dfixed_init_half(1),
3133 dfixed_init_half(2),
3136 fixed20_12 memtcas_rs480_ff[8] = {
3142 dfixed_init_half(1),
3143 dfixed_init_half(2),
3144 dfixed_init_half(3),
3146 fixed20_12 memtcas2_ff[8] = {
3156 fixed20_12 memtrbs[8] = {
3158 dfixed_init_half(1),
3160 dfixed_init_half(2),
3162 dfixed_init_half(3),
3166 fixed20_12 memtrbs_r4xx[8] = {
3176 fixed20_12 min_mem_eff;
3177 fixed20_12 mc_latency_sclk, mc_latency_mclk, k1;
3178 fixed20_12 cur_latency_mclk, cur_latency_sclk;
3179 fixed20_12 disp_latency, disp_latency_overhead, disp_drain_rate,
3180 disp_drain_rate2, read_return_rate;
3181 fixed20_12 time_disp1_drop_priority;
3183 int cur_size = 16; /* in octawords */
3184 int critical_point = 0, critical_point2;
3185 /* uint32_t read_return_rate, time_disp1_drop_priority; */
3186 int stop_req, max_stop_req;
3187 struct drm_display_mode *mode1 = NULL;
3188 struct drm_display_mode *mode2 = NULL;
3189 uint32_t pixel_bytes1 = 0;
3190 uint32_t pixel_bytes2 = 0;
3192 radeon_update_display_priority(rdev);
3194 if (rdev->mode_info.crtcs[0]->base.enabled) {
3195 mode1 = &rdev->mode_info.crtcs[0]->base.mode;
3196 pixel_bytes1 = rdev->mode_info.crtcs[0]->base.fb->bits_per_pixel / 8;
3198 if (!(rdev->flags & RADEON_SINGLE_CRTC)) {
3199 if (rdev->mode_info.crtcs[1]->base.enabled) {
3200 mode2 = &rdev->mode_info.crtcs[1]->base.mode;
3201 pixel_bytes2 = rdev->mode_info.crtcs[1]->base.fb->bits_per_pixel / 8;
3205 min_mem_eff.full = dfixed_const_8(0);
3207 if ((rdev->disp_priority == 2) && ASIC_IS_R300(rdev)) {
3208 uint32_t mc_init_misc_lat_timer = RREG32(R300_MC_INIT_MISC_LAT_TIMER);
3209 mc_init_misc_lat_timer &= ~(R300_MC_DISP1R_INIT_LAT_MASK << R300_MC_DISP1R_INIT_LAT_SHIFT);
3210 mc_init_misc_lat_timer &= ~(R300_MC_DISP0R_INIT_LAT_MASK << R300_MC_DISP0R_INIT_LAT_SHIFT);
3211 /* check crtc enables */
3213 mc_init_misc_lat_timer |= (1 << R300_MC_DISP1R_INIT_LAT_SHIFT);
3215 mc_init_misc_lat_timer |= (1 << R300_MC_DISP0R_INIT_LAT_SHIFT);
3216 WREG32(R300_MC_INIT_MISC_LAT_TIMER, mc_init_misc_lat_timer);
3220 * determine is there is enough bw for current mode
3222 sclk_ff = rdev->pm.sclk;
3223 mclk_ff = rdev->pm.mclk;
3225 temp = (rdev->mc.vram_width / 8) * (rdev->mc.vram_is_ddr ? 2 : 1);
3226 temp_ff.full = dfixed_const(temp);
3227 mem_bw.full = dfixed_mul(mclk_ff, temp_ff);
3231 peak_disp_bw.full = 0;
3233 temp_ff.full = dfixed_const(1000);
3234 pix_clk.full = dfixed_const(mode1->clock); /* convert to fixed point */
3235 pix_clk.full = dfixed_div(pix_clk, temp_ff);
3236 temp_ff.full = dfixed_const(pixel_bytes1);
3237 peak_disp_bw.full += dfixed_mul(pix_clk, temp_ff);
3240 temp_ff.full = dfixed_const(1000);
3241 pix_clk2.full = dfixed_const(mode2->clock); /* convert to fixed point */
3242 pix_clk2.full = dfixed_div(pix_clk2, temp_ff);
3243 temp_ff.full = dfixed_const(pixel_bytes2);
3244 peak_disp_bw.full += dfixed_mul(pix_clk2, temp_ff);
3247 mem_bw.full = dfixed_mul(mem_bw, min_mem_eff);
3248 if (peak_disp_bw.full >= mem_bw.full) {
3249 DRM_ERROR("You may not have enough display bandwidth for current mode\n"
3250 "If you have flickering problem, try to lower resolution, refresh rate, or color depth\n");
3253 /* Get values from the EXT_MEM_CNTL register...converting its contents. */
3254 temp = RREG32(RADEON_MEM_TIMING_CNTL);
3255 if ((rdev->family == CHIP_RV100) || (rdev->flags & RADEON_IS_IGP)) { /* RV100, M6, IGPs */
3256 mem_trcd = ((temp >> 2) & 0x3) + 1;
3257 mem_trp = ((temp & 0x3)) + 1;
3258 mem_tras = ((temp & 0x70) >> 4) + 1;
3259 } else if (rdev->family == CHIP_R300 ||
3260 rdev->family == CHIP_R350) { /* r300, r350 */
3261 mem_trcd = (temp & 0x7) + 1;
3262 mem_trp = ((temp >> 8) & 0x7) + 1;
3263 mem_tras = ((temp >> 11) & 0xf) + 4;
3264 } else if (rdev->family == CHIP_RV350 ||
3265 rdev->family <= CHIP_RV380) {
3267 mem_trcd = (temp & 0x7) + 3;
3268 mem_trp = ((temp >> 8) & 0x7) + 3;
3269 mem_tras = ((temp >> 11) & 0xf) + 6;
3270 } else if (rdev->family == CHIP_R420 ||
3271 rdev->family == CHIP_R423 ||
3272 rdev->family == CHIP_RV410) {
3274 mem_trcd = (temp & 0xf) + 3;
3277 mem_trp = ((temp >> 8) & 0xf) + 3;
3280 mem_tras = ((temp >> 12) & 0x1f) + 6;
3283 } else { /* RV200, R200 */
3284 mem_trcd = (temp & 0x7) + 1;
3285 mem_trp = ((temp >> 8) & 0x7) + 1;
3286 mem_tras = ((temp >> 12) & 0xf) + 4;
3289 trcd_ff.full = dfixed_const(mem_trcd);
3290 trp_ff.full = dfixed_const(mem_trp);
3291 tras_ff.full = dfixed_const(mem_tras);
3293 /* Get values from the MEM_SDRAM_MODE_REG register...converting its */
3294 temp = RREG32(RADEON_MEM_SDRAM_MODE_REG);
3295 data = (temp & (7 << 20)) >> 20;
3296 if ((rdev->family == CHIP_RV100) || rdev->flags & RADEON_IS_IGP) {
3297 if (rdev->family == CHIP_RS480) /* don't think rs400 */
3298 tcas_ff = memtcas_rs480_ff[data];
3300 tcas_ff = memtcas_ff[data];
3302 tcas_ff = memtcas2_ff[data];
3304 if (rdev->family == CHIP_RS400 ||
3305 rdev->family == CHIP_RS480) {
3306 /* extra cas latency stored in bits 23-25 0-4 clocks */
3307 data = (temp >> 23) & 0x7;
3309 tcas_ff.full += dfixed_const(data);
3312 if (ASIC_IS_R300(rdev) && !(rdev->flags & RADEON_IS_IGP)) {
3313 /* on the R300, Tcas is included in Trbs.
3315 temp = RREG32(RADEON_MEM_CNTL);
3316 data = (R300_MEM_NUM_CHANNELS_MASK & temp);
3318 if (R300_MEM_USE_CD_CH_ONLY & temp) {
3319 temp = RREG32(R300_MC_IND_INDEX);
3320 temp &= ~R300_MC_IND_ADDR_MASK;
3321 temp |= R300_MC_READ_CNTL_CD_mcind;
3322 WREG32(R300_MC_IND_INDEX, temp);
3323 temp = RREG32(R300_MC_IND_DATA);
3324 data = (R300_MEM_RBS_POSITION_C_MASK & temp);
3326 temp = RREG32(R300_MC_READ_CNTL_AB);
3327 data = (R300_MEM_RBS_POSITION_A_MASK & temp);
3330 temp = RREG32(R300_MC_READ_CNTL_AB);
3331 data = (R300_MEM_RBS_POSITION_A_MASK & temp);
3333 if (rdev->family == CHIP_RV410 ||
3334 rdev->family == CHIP_R420 ||
3335 rdev->family == CHIP_R423)
3336 trbs_ff = memtrbs_r4xx[data];
3338 trbs_ff = memtrbs[data];
3339 tcas_ff.full += trbs_ff.full;
3342 sclk_eff_ff.full = sclk_ff.full;
3344 if (rdev->flags & RADEON_IS_AGP) {
3345 fixed20_12 agpmode_ff;
3346 agpmode_ff.full = dfixed_const(radeon_agpmode);
3347 temp_ff.full = dfixed_const_666(16);
3348 sclk_eff_ff.full -= dfixed_mul(agpmode_ff, temp_ff);
3350 /* TODO PCIE lanes may affect this - agpmode == 16?? */
3352 if (ASIC_IS_R300(rdev)) {
3353 sclk_delay_ff.full = dfixed_const(250);
3355 if ((rdev->family == CHIP_RV100) ||
3356 rdev->flags & RADEON_IS_IGP) {
3357 if (rdev->mc.vram_is_ddr)
3358 sclk_delay_ff.full = dfixed_const(41);
3360 sclk_delay_ff.full = dfixed_const(33);
3362 if (rdev->mc.vram_width == 128)
3363 sclk_delay_ff.full = dfixed_const(57);
3365 sclk_delay_ff.full = dfixed_const(41);
3369 mc_latency_sclk.full = dfixed_div(sclk_delay_ff, sclk_eff_ff);
3371 if (rdev->mc.vram_is_ddr) {
3372 if (rdev->mc.vram_width == 32) {
3373 k1.full = dfixed_const(40);
3376 k1.full = dfixed_const(20);
3380 k1.full = dfixed_const(40);
3384 temp_ff.full = dfixed_const(2);
3385 mc_latency_mclk.full = dfixed_mul(trcd_ff, temp_ff);
3386 temp_ff.full = dfixed_const(c);
3387 mc_latency_mclk.full += dfixed_mul(tcas_ff, temp_ff);
3388 temp_ff.full = dfixed_const(4);
3389 mc_latency_mclk.full += dfixed_mul(tras_ff, temp_ff);
3390 mc_latency_mclk.full += dfixed_mul(trp_ff, temp_ff);
3391 mc_latency_mclk.full += k1.full;
3393 mc_latency_mclk.full = dfixed_div(mc_latency_mclk, mclk_ff);
3394 mc_latency_mclk.full += dfixed_div(temp_ff, sclk_eff_ff);
3397 HW cursor time assuming worst case of full size colour cursor.
3399 temp_ff.full = dfixed_const((2 * (cur_size - (rdev->mc.vram_is_ddr + 1))));
3400 temp_ff.full += trcd_ff.full;
3401 if (temp_ff.full < tras_ff.full)
3402 temp_ff.full = tras_ff.full;
3403 cur_latency_mclk.full = dfixed_div(temp_ff, mclk_ff);
3405 temp_ff.full = dfixed_const(cur_size);
3406 cur_latency_sclk.full = dfixed_div(temp_ff, sclk_eff_ff);
3408 Find the total latency for the display data.
3410 disp_latency_overhead.full = dfixed_const(8);
3411 disp_latency_overhead.full = dfixed_div(disp_latency_overhead, sclk_ff);
3412 mc_latency_mclk.full += disp_latency_overhead.full + cur_latency_mclk.full;
3413 mc_latency_sclk.full += disp_latency_overhead.full + cur_latency_sclk.full;
3415 if (mc_latency_mclk.full > mc_latency_sclk.full)
3416 disp_latency.full = mc_latency_mclk.full;
3418 disp_latency.full = mc_latency_sclk.full;
3420 /* setup Max GRPH_STOP_REQ default value */
3421 if (ASIC_IS_RV100(rdev))
3422 max_stop_req = 0x5c;
3424 max_stop_req = 0x7c;
3428 Set GRPH_BUFFER_CNTL register using h/w defined optimal values.
3429 GRPH_STOP_REQ <= MIN[ 0x7C, (CRTC_H_DISP + 1) * (bit depth) / 0x10 ]
3431 stop_req = mode1->hdisplay * pixel_bytes1 / 16;
3433 if (stop_req > max_stop_req)
3434 stop_req = max_stop_req;
3437 Find the drain rate of the display buffer.
3439 temp_ff.full = dfixed_const((16/pixel_bytes1));
3440 disp_drain_rate.full = dfixed_div(pix_clk, temp_ff);
3443 Find the critical point of the display buffer.
3445 crit_point_ff.full = dfixed_mul(disp_drain_rate, disp_latency);
3446 crit_point_ff.full += dfixed_const_half(0);
3448 critical_point = dfixed_trunc(crit_point_ff);
3450 if (rdev->disp_priority == 2) {
3455 The critical point should never be above max_stop_req-4. Setting
3456 GRPH_CRITICAL_CNTL = 0 will thus force high priority all the time.
3458 if (max_stop_req - critical_point < 4)
3461 if (critical_point == 0 && mode2 && rdev->family == CHIP_R300) {
3462 /* some R300 cards have problem with this set to 0, when CRTC2 is enabled.*/
3463 critical_point = 0x10;
3466 temp = RREG32(RADEON_GRPH_BUFFER_CNTL);
3467 temp &= ~(RADEON_GRPH_STOP_REQ_MASK);
3468 temp |= (stop_req << RADEON_GRPH_STOP_REQ_SHIFT);
3469 temp &= ~(RADEON_GRPH_START_REQ_MASK);
3470 if ((rdev->family == CHIP_R350) &&
3471 (stop_req > 0x15)) {
3474 temp |= (stop_req << RADEON_GRPH_START_REQ_SHIFT);
3475 temp |= RADEON_GRPH_BUFFER_SIZE;
3476 temp &= ~(RADEON_GRPH_CRITICAL_CNTL |
3477 RADEON_GRPH_CRITICAL_AT_SOF |
3478 RADEON_GRPH_STOP_CNTL);
3480 Write the result into the register.
3482 WREG32(RADEON_GRPH_BUFFER_CNTL, ((temp & ~RADEON_GRPH_CRITICAL_POINT_MASK) |
3483 (critical_point << RADEON_GRPH_CRITICAL_POINT_SHIFT)));
3486 if ((rdev->family == CHIP_RS400) ||
3487 (rdev->family == CHIP_RS480)) {
3488 /* attempt to program RS400 disp regs correctly ??? */
3489 temp = RREG32(RS400_DISP1_REG_CNTL);
3490 temp &= ~(RS400_DISP1_START_REQ_LEVEL_MASK |
3491 RS400_DISP1_STOP_REQ_LEVEL_MASK);
3492 WREG32(RS400_DISP1_REQ_CNTL1, (temp |
3493 (critical_point << RS400_DISP1_START_REQ_LEVEL_SHIFT) |
3494 (critical_point << RS400_DISP1_STOP_REQ_LEVEL_SHIFT)));
3495 temp = RREG32(RS400_DMIF_MEM_CNTL1);
3496 temp &= ~(RS400_DISP1_CRITICAL_POINT_START_MASK |
3497 RS400_DISP1_CRITICAL_POINT_STOP_MASK);
3498 WREG32(RS400_DMIF_MEM_CNTL1, (temp |
3499 (critical_point << RS400_DISP1_CRITICAL_POINT_START_SHIFT) |
3500 (critical_point << RS400_DISP1_CRITICAL_POINT_STOP_SHIFT)));
3504 DRM_DEBUG_KMS("GRPH_BUFFER_CNTL from to %x\n",
3505 /* (unsigned int)info->SavedReg->grph_buffer_cntl, */
3506 (unsigned int)RREG32(RADEON_GRPH_BUFFER_CNTL));
3511 stop_req = mode2->hdisplay * pixel_bytes2 / 16;
3513 if (stop_req > max_stop_req)
3514 stop_req = max_stop_req;
3517 Find the drain rate of the display buffer.
3519 temp_ff.full = dfixed_const((16/pixel_bytes2));
3520 disp_drain_rate2.full = dfixed_div(pix_clk2, temp_ff);
3522 grph2_cntl = RREG32(RADEON_GRPH2_BUFFER_CNTL);
3523 grph2_cntl &= ~(RADEON_GRPH_STOP_REQ_MASK);
3524 grph2_cntl |= (stop_req << RADEON_GRPH_STOP_REQ_SHIFT);
3525 grph2_cntl &= ~(RADEON_GRPH_START_REQ_MASK);
3526 if ((rdev->family == CHIP_R350) &&
3527 (stop_req > 0x15)) {
3530 grph2_cntl |= (stop_req << RADEON_GRPH_START_REQ_SHIFT);
3531 grph2_cntl |= RADEON_GRPH_BUFFER_SIZE;
3532 grph2_cntl &= ~(RADEON_GRPH_CRITICAL_CNTL |
3533 RADEON_GRPH_CRITICAL_AT_SOF |
3534 RADEON_GRPH_STOP_CNTL);
3536 if ((rdev->family == CHIP_RS100) ||
3537 (rdev->family == CHIP_RS200))
3538 critical_point2 = 0;
3540 temp = (rdev->mc.vram_width * rdev->mc.vram_is_ddr + 1)/128;
3541 temp_ff.full = dfixed_const(temp);
3542 temp_ff.full = dfixed_mul(mclk_ff, temp_ff);
3543 if (sclk_ff.full < temp_ff.full)
3544 temp_ff.full = sclk_ff.full;
3546 read_return_rate.full = temp_ff.full;
3549 temp_ff.full = read_return_rate.full - disp_drain_rate.full;
3550 time_disp1_drop_priority.full = dfixed_div(crit_point_ff, temp_ff);
3552 time_disp1_drop_priority.full = 0;
3554 crit_point_ff.full = disp_latency.full + time_disp1_drop_priority.full + disp_latency.full;
3555 crit_point_ff.full = dfixed_mul(crit_point_ff, disp_drain_rate2);
3556 crit_point_ff.full += dfixed_const_half(0);
3558 critical_point2 = dfixed_trunc(crit_point_ff);
3560 if (rdev->disp_priority == 2) {
3561 critical_point2 = 0;
3564 if (max_stop_req - critical_point2 < 4)
3565 critical_point2 = 0;
3569 if (critical_point2 == 0 && rdev->family == CHIP_R300) {
3570 /* some R300 cards have problem with this set to 0 */
3571 critical_point2 = 0x10;
3574 WREG32(RADEON_GRPH2_BUFFER_CNTL, ((grph2_cntl & ~RADEON_GRPH_CRITICAL_POINT_MASK) |
3575 (critical_point2 << RADEON_GRPH_CRITICAL_POINT_SHIFT)));
3577 if ((rdev->family == CHIP_RS400) ||
3578 (rdev->family == CHIP_RS480)) {
3580 /* attempt to program RS400 disp2 regs correctly ??? */
3581 temp = RREG32(RS400_DISP2_REQ_CNTL1);
3582 temp &= ~(RS400_DISP2_START_REQ_LEVEL_MASK |
3583 RS400_DISP2_STOP_REQ_LEVEL_MASK);
3584 WREG32(RS400_DISP2_REQ_CNTL1, (temp |
3585 (critical_point2 << RS400_DISP1_START_REQ_LEVEL_SHIFT) |
3586 (critical_point2 << RS400_DISP1_STOP_REQ_LEVEL_SHIFT)));
3587 temp = RREG32(RS400_DISP2_REQ_CNTL2);
3588 temp &= ~(RS400_DISP2_CRITICAL_POINT_START_MASK |
3589 RS400_DISP2_CRITICAL_POINT_STOP_MASK);
3590 WREG32(RS400_DISP2_REQ_CNTL2, (temp |
3591 (critical_point2 << RS400_DISP2_CRITICAL_POINT_START_SHIFT) |
3592 (critical_point2 << RS400_DISP2_CRITICAL_POINT_STOP_SHIFT)));
3594 WREG32(RS400_DISP2_REQ_CNTL1, 0x105DC1CC);
3595 WREG32(RS400_DISP2_REQ_CNTL2, 0x2749D000);
3596 WREG32(RS400_DMIF_MEM_CNTL1, 0x29CA71DC);
3597 WREG32(RS400_DISP1_REQ_CNTL1, 0x28FBC3AC);
3600 DRM_DEBUG_KMS("GRPH2_BUFFER_CNTL from to %x\n",
3601 (unsigned int)RREG32(RADEON_GRPH2_BUFFER_CNTL));
3605 int r100_ring_test(struct radeon_device *rdev, struct radeon_ring *ring)
3612 r = radeon_scratch_get(rdev, &scratch);
3614 DRM_ERROR("radeon: cp failed to get scratch reg (%d).\n", r);
3617 WREG32(scratch, 0xCAFEDEAD);
3618 r = radeon_ring_lock(rdev, ring, 2);
3620 DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
3621 radeon_scratch_free(rdev, scratch);
3624 radeon_ring_write(ring, PACKET0(scratch, 0));
3625 radeon_ring_write(ring, 0xDEADBEEF);
3626 radeon_ring_unlock_commit(rdev, ring);
3627 for (i = 0; i < rdev->usec_timeout; i++) {
3628 tmp = RREG32(scratch);
3629 if (tmp == 0xDEADBEEF) {
3634 if (i < rdev->usec_timeout) {
3635 DRM_INFO("ring test succeeded in %d usecs\n", i);
3637 DRM_ERROR("radeon: ring test failed (scratch(0x%04X)=0x%08X)\n",
3641 radeon_scratch_free(rdev, scratch);
3645 void r100_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
3647 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
3649 if (ring->rptr_save_reg) {
3650 u32 next_rptr = ring->wptr + 2 + 3;
3651 radeon_ring_write(ring, PACKET0(ring->rptr_save_reg, 0));
3652 radeon_ring_write(ring, next_rptr);
3655 radeon_ring_write(ring, PACKET0(RADEON_CP_IB_BASE, 1));
3656 radeon_ring_write(ring, ib->gpu_addr);
3657 radeon_ring_write(ring, ib->length_dw);
3660 int r100_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
3662 struct radeon_ib ib;
3668 r = radeon_scratch_get(rdev, &scratch);
3670 DRM_ERROR("radeon: failed to get scratch reg (%d).\n", r);
3673 WREG32(scratch, 0xCAFEDEAD);
3674 r = radeon_ib_get(rdev, RADEON_RING_TYPE_GFX_INDEX, &ib, NULL, 256);
3676 DRM_ERROR("radeon: failed to get ib (%d).\n", r);
3679 ib.ptr[0] = PACKET0(scratch, 0);
3680 ib.ptr[1] = 0xDEADBEEF;
3681 ib.ptr[2] = PACKET2(0);
3682 ib.ptr[3] = PACKET2(0);
3683 ib.ptr[4] = PACKET2(0);
3684 ib.ptr[5] = PACKET2(0);
3685 ib.ptr[6] = PACKET2(0);
3686 ib.ptr[7] = PACKET2(0);
3688 r = radeon_ib_schedule(rdev, &ib, NULL);
3690 DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
3693 r = radeon_fence_wait(ib.fence, false);
3695 DRM_ERROR("radeon: fence wait failed (%d).\n", r);
3698 for (i = 0; i < rdev->usec_timeout; i++) {
3699 tmp = RREG32(scratch);
3700 if (tmp == 0xDEADBEEF) {
3705 if (i < rdev->usec_timeout) {
3706 DRM_INFO("ib test succeeded in %u usecs\n", i);
3708 DRM_ERROR("radeon: ib test failed (scratch(0x%04X)=0x%08X)\n",
3713 radeon_ib_free(rdev, &ib);
3715 radeon_scratch_free(rdev, scratch);
3719 void r100_mc_stop(struct radeon_device *rdev, struct r100_mc_save *save)
3721 /* Shutdown CP we shouldn't need to do that but better be safe than
3724 rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
3725 WREG32(R_000740_CP_CSQ_CNTL, 0);
3727 /* Save few CRTC registers */
3728 save->GENMO_WT = RREG8(R_0003C2_GENMO_WT);
3729 save->CRTC_EXT_CNTL = RREG32(R_000054_CRTC_EXT_CNTL);
3730 save->CRTC_GEN_CNTL = RREG32(R_000050_CRTC_GEN_CNTL);
3731 save->CUR_OFFSET = RREG32(R_000260_CUR_OFFSET);
3732 if (!(rdev->flags & RADEON_SINGLE_CRTC)) {
3733 save->CRTC2_GEN_CNTL = RREG32(R_0003F8_CRTC2_GEN_CNTL);
3734 save->CUR2_OFFSET = RREG32(R_000360_CUR2_OFFSET);
3737 /* Disable VGA aperture access */
3738 WREG8(R_0003C2_GENMO_WT, C_0003C2_VGA_RAM_EN & save->GENMO_WT);
3739 /* Disable cursor, overlay, crtc */
3740 WREG32(R_000260_CUR_OFFSET, save->CUR_OFFSET | S_000260_CUR_LOCK(1));
3741 WREG32(R_000054_CRTC_EXT_CNTL, save->CRTC_EXT_CNTL |
3742 S_000054_CRTC_DISPLAY_DIS(1));
3743 WREG32(R_000050_CRTC_GEN_CNTL,
3744 (C_000050_CRTC_CUR_EN & save->CRTC_GEN_CNTL) |
3745 S_000050_CRTC_DISP_REQ_EN_B(1));
3746 WREG32(R_000420_OV0_SCALE_CNTL,
3747 C_000420_OV0_OVERLAY_EN & RREG32(R_000420_OV0_SCALE_CNTL));
3748 WREG32(R_000260_CUR_OFFSET, C_000260_CUR_LOCK & save->CUR_OFFSET);
3749 if (!(rdev->flags & RADEON_SINGLE_CRTC)) {
3750 WREG32(R_000360_CUR2_OFFSET, save->CUR2_OFFSET |
3751 S_000360_CUR2_LOCK(1));
3752 WREG32(R_0003F8_CRTC2_GEN_CNTL,
3753 (C_0003F8_CRTC2_CUR_EN & save->CRTC2_GEN_CNTL) |
3754 S_0003F8_CRTC2_DISPLAY_DIS(1) |
3755 S_0003F8_CRTC2_DISP_REQ_EN_B(1));
3756 WREG32(R_000360_CUR2_OFFSET,
3757 C_000360_CUR2_LOCK & save->CUR2_OFFSET);
3761 void r100_mc_resume(struct radeon_device *rdev, struct r100_mc_save *save)
3763 /* Update base address for crtc */
3764 WREG32(R_00023C_DISPLAY_BASE_ADDR, rdev->mc.vram_start);
3765 if (!(rdev->flags & RADEON_SINGLE_CRTC)) {
3766 WREG32(R_00033C_CRTC2_DISPLAY_BASE_ADDR, rdev->mc.vram_start);
3768 /* Restore CRTC registers */
3769 WREG8(R_0003C2_GENMO_WT, save->GENMO_WT);
3770 WREG32(R_000054_CRTC_EXT_CNTL, save->CRTC_EXT_CNTL);
3771 WREG32(R_000050_CRTC_GEN_CNTL, save->CRTC_GEN_CNTL);
3772 if (!(rdev->flags & RADEON_SINGLE_CRTC)) {
3773 WREG32(R_0003F8_CRTC2_GEN_CNTL, save->CRTC2_GEN_CNTL);
3777 void r100_vga_render_disable(struct radeon_device *rdev)
3781 tmp = RREG8(R_0003C2_GENMO_WT);
3782 WREG8(R_0003C2_GENMO_WT, C_0003C2_VGA_RAM_EN & tmp);
3785 static void r100_debugfs(struct radeon_device *rdev)
3789 r = r100_debugfs_mc_info_init(rdev);
3791 dev_warn(rdev->dev, "Failed to create r100_mc debugfs file.\n");
3794 static void r100_mc_program(struct radeon_device *rdev)
3796 struct r100_mc_save save;
3798 /* Stops all mc clients */
3799 r100_mc_stop(rdev, &save);
3800 if (rdev->flags & RADEON_IS_AGP) {
3801 WREG32(R_00014C_MC_AGP_LOCATION,
3802 S_00014C_MC_AGP_START(rdev->mc.gtt_start >> 16) |
3803 S_00014C_MC_AGP_TOP(rdev->mc.gtt_end >> 16));
3804 WREG32(R_000170_AGP_BASE, lower_32_bits(rdev->mc.agp_base));
3805 if (rdev->family > CHIP_RV200)
3806 WREG32(R_00015C_AGP_BASE_2,
3807 upper_32_bits(rdev->mc.agp_base) & 0xff);
3809 WREG32(R_00014C_MC_AGP_LOCATION, 0x0FFFFFFF);
3810 WREG32(R_000170_AGP_BASE, 0);
3811 if (rdev->family > CHIP_RV200)
3812 WREG32(R_00015C_AGP_BASE_2, 0);
3814 /* Wait for mc idle */
3815 if (r100_mc_wait_for_idle(rdev))
3816 dev_warn(rdev->dev, "Wait for MC idle timeout.\n");
3817 /* Program MC, should be a 32bits limited address space */
3818 WREG32(R_000148_MC_FB_LOCATION,
3819 S_000148_MC_FB_START(rdev->mc.vram_start >> 16) |
3820 S_000148_MC_FB_TOP(rdev->mc.vram_end >> 16));
3821 r100_mc_resume(rdev, &save);
3824 static void r100_clock_startup(struct radeon_device *rdev)
3828 if (radeon_dynclks != -1 && radeon_dynclks)
3829 radeon_legacy_set_clock_gating(rdev, 1);
3830 /* We need to force on some of the block */
3831 tmp = RREG32_PLL(R_00000D_SCLK_CNTL);
3832 tmp |= S_00000D_FORCE_CP(1) | S_00000D_FORCE_VIP(1);
3833 if ((rdev->family == CHIP_RV250) || (rdev->family == CHIP_RV280))
3834 tmp |= S_00000D_FORCE_DISP1(1) | S_00000D_FORCE_DISP2(1);
3835 WREG32_PLL(R_00000D_SCLK_CNTL, tmp);
3838 static int r100_startup(struct radeon_device *rdev)
3842 /* set common regs */
3843 r100_set_common_regs(rdev);
3845 r100_mc_program(rdev);
3847 r100_clock_startup(rdev);
3848 /* Initialize GART (initialize after TTM so we can allocate
3849 * memory through TTM but finalize after TTM) */
3850 r100_enable_bm(rdev);
3851 if (rdev->flags & RADEON_IS_PCI) {
3852 r = r100_pci_gart_enable(rdev);
3857 /* allocate wb buffer */
3858 r = radeon_wb_init(rdev);
3862 r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
3864 dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
3869 if (!rdev->irq.installed) {
3870 r = radeon_irq_kms_init(rdev);
3876 rdev->config.r100.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
3877 /* 1M ring buffer */
3878 r = r100_cp_init(rdev, 1024 * 1024);
3880 dev_err(rdev->dev, "failed initializing CP (%d).\n", r);
3884 r = radeon_ib_pool_init(rdev);
3886 dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
3893 int r100_resume(struct radeon_device *rdev)
3897 /* Make sur GART are not working */
3898 if (rdev->flags & RADEON_IS_PCI)
3899 r100_pci_gart_disable(rdev);
3900 /* Resume clock before doing reset */
3901 r100_clock_startup(rdev);
3902 /* Reset gpu before posting otherwise ATOM will enter infinite loop */
3903 if (radeon_asic_reset(rdev)) {
3904 dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
3905 RREG32(R_000E40_RBBM_STATUS),
3906 RREG32(R_0007C0_CP_STAT));
3909 radeon_combios_asic_init(rdev->ddev);
3910 /* Resume clock after posting */
3911 r100_clock_startup(rdev);
3912 /* Initialize surface registers */
3913 radeon_surface_init(rdev);
3915 rdev->accel_working = true;
3916 r = r100_startup(rdev);
3918 rdev->accel_working = false;
3923 int r100_suspend(struct radeon_device *rdev)
3925 r100_cp_disable(rdev);
3926 radeon_wb_disable(rdev);
3927 r100_irq_disable(rdev);
3928 if (rdev->flags & RADEON_IS_PCI)
3929 r100_pci_gart_disable(rdev);
3933 void r100_fini(struct radeon_device *rdev)
3936 radeon_wb_fini(rdev);
3937 radeon_ib_pool_fini(rdev);
3938 radeon_gem_fini(rdev);
3939 if (rdev->flags & RADEON_IS_PCI)
3940 r100_pci_gart_fini(rdev);
3941 radeon_agp_fini(rdev);
3942 radeon_irq_kms_fini(rdev);
3943 radeon_fence_driver_fini(rdev);
3944 radeon_bo_fini(rdev);
3945 radeon_atombios_fini(rdev);
3951 * Due to how kexec works, it can leave the hw fully initialised when it
3952 * boots the new kernel. However doing our init sequence with the CP and
3953 * WB stuff setup causes GPU hangs on the RN50 at least. So at startup
3954 * do some quick sanity checks and restore sane values to avoid this
3957 void r100_restore_sanity(struct radeon_device *rdev)
3961 tmp = RREG32(RADEON_CP_CSQ_CNTL);
3963 WREG32(RADEON_CP_CSQ_CNTL, 0);
3965 tmp = RREG32(RADEON_CP_RB_CNTL);
3967 WREG32(RADEON_CP_RB_CNTL, 0);
3969 tmp = RREG32(RADEON_SCRATCH_UMSK);
3971 WREG32(RADEON_SCRATCH_UMSK, 0);
3975 int r100_init(struct radeon_device *rdev)
3979 /* Register debugfs file specific to this group of asics */
3982 r100_vga_render_disable(rdev);
3983 /* Initialize scratch registers */
3984 radeon_scratch_init(rdev);
3985 /* Initialize surface registers */
3986 radeon_surface_init(rdev);
3987 /* sanity check some register to avoid hangs like after kexec */
3988 r100_restore_sanity(rdev);
3989 /* TODO: disable VGA need to use VGA request */
3991 if (!radeon_get_bios(rdev)) {
3992 if (ASIC_IS_AVIVO(rdev))
3995 if (rdev->is_atom_bios) {
3996 dev_err(rdev->dev, "Expecting combios for RS400/RS480 GPU\n");
3999 r = radeon_combios_init(rdev);
4003 /* Reset gpu before posting otherwise ATOM will enter infinite loop */
4004 if (radeon_asic_reset(rdev)) {
4006 "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
4007 RREG32(R_000E40_RBBM_STATUS),
4008 RREG32(R_0007C0_CP_STAT));
4010 /* check if cards are posted or not */
4011 if (radeon_boot_test_post_card(rdev) == false)
4013 /* Set asic errata */
4015 /* Initialize clocks */
4016 radeon_get_clock_info(rdev->ddev);
4017 /* initialize AGP */
4018 if (rdev->flags & RADEON_IS_AGP) {
4019 r = radeon_agp_init(rdev);
4021 radeon_agp_disable(rdev);
4024 /* initialize VRAM */
4027 r = radeon_fence_driver_init(rdev);
4030 /* Memory manager */
4031 r = radeon_bo_init(rdev);
4034 if (rdev->flags & RADEON_IS_PCI) {
4035 r = r100_pci_gart_init(rdev);
4039 r100_set_safe_registers(rdev);
4041 rdev->accel_working = true;
4042 r = r100_startup(rdev);
4044 /* Somethings want wront with the accel init stop accel */
4045 dev_err(rdev->dev, "Disabling GPU acceleration\n");
4047 radeon_wb_fini(rdev);
4048 radeon_ib_pool_fini(rdev);
4049 radeon_irq_kms_fini(rdev);
4050 if (rdev->flags & RADEON_IS_PCI)
4051 r100_pci_gart_fini(rdev);
4052 rdev->accel_working = false;
4057 uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg,
4058 bool always_indirect)
4060 if (reg < rdev->rmmio_size && !always_indirect)
4061 return readl(((void __iomem *)rdev->rmmio) + reg);
4063 unsigned long flags;
4066 spin_lock_irqsave(&rdev->mmio_idx_lock, flags);
4067 writel(reg, ((void __iomem *)rdev->rmmio) + RADEON_MM_INDEX);
4068 ret = readl(((void __iomem *)rdev->rmmio) + RADEON_MM_DATA);
4069 spin_unlock_irqrestore(&rdev->mmio_idx_lock, flags);
4075 void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v,
4076 bool always_indirect)
4078 if (reg < rdev->rmmio_size && !always_indirect)
4079 writel(v, ((void __iomem *)rdev->rmmio) + reg);
4081 unsigned long flags;
4083 spin_lock_irqsave(&rdev->mmio_idx_lock, flags);
4084 writel(reg, ((void __iomem *)rdev->rmmio) + RADEON_MM_INDEX);
4085 writel(v, ((void __iomem *)rdev->rmmio) + RADEON_MM_DATA);
4086 spin_unlock_irqrestore(&rdev->mmio_idx_lock, flags);
4090 u32 r100_io_rreg(struct radeon_device *rdev, u32 reg)
4092 if (reg < rdev->rio_mem_size)
4093 return ioread32(rdev->rio_mem + reg);
4095 iowrite32(reg, rdev->rio_mem + RADEON_MM_INDEX);
4096 return ioread32(rdev->rio_mem + RADEON_MM_DATA);
4100 void r100_io_wreg(struct radeon_device *rdev, u32 reg, u32 v)
4102 if (reg < rdev->rio_mem_size)
4103 iowrite32(v, rdev->rio_mem + reg);
4105 iowrite32(reg, rdev->rio_mem + RADEON_MM_INDEX);
4106 iowrite32(v, rdev->rio_mem + RADEON_MM_DATA);