Merge branch 'drm-next' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied...
authorLinus Torvalds <torvalds@linux-foundation.org>
Wed, 30 Sep 2009 15:03:00 +0000 (08:03 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Wed, 30 Sep 2009 15:03:00 +0000 (08:03 -0700)
* 'drm-next' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6: (25 commits)
  drm/radeon/kms: Convert R520 to new init path and associated cleanup
  drm/radeon/kms: Convert RV515 to new init path and associated cleanup
  drm: fix radeon DRM warnings when !CONFIG_DEBUG_FS
  drm: fix drm_fb_helper warning when !CONFIG_MAGIC_SYSRQ
  drm/r600: fix memory leak introduced with 64k malloc avoidance fix.
  drm/kms: make fb helper work for all drivers.
  drm/radeon/r600: fix offset handling in CS parser
  drm/radeon/kms/r600: fix forcing pci mode on agp cards
  drm/radeon/kms: fix for the extra pages copying.
  drm/radeon/kms/r600: add support for vline relocs
  drm/radeon/kms: fix some bugs in vline reloc
  drm/radeon/kms/r600: clamp vram to aperture size
  drm/kms: protect against fb helper not being created.
  drm/r600: get values from the passed in IB not the copy.
  drm: create gitignore file for radeon
  drm/radeon/kms: remove unneeded master create/destroy functions.
  drm/kms: start adding command line interface using fb.
  fb: change rules for global rules match.
  drm/radeon/kms: don't require up to 64k allocations. (v2)
  drm/radeon/kms: enable dac load detection by default.
  ...

Trivial conflicts in drivers/gpu/drm/radeon/radeon_asic.h due to adding
'->vga_set_state' function pointers.

1  2 
drivers/gpu/drm/radeon/r100.c
drivers/gpu/drm/radeon/r600.c
drivers/gpu/drm/radeon/radeon.h
drivers/gpu/drm/radeon/radeon_asic.h
drivers/gpu/drm/radeon/radeon_device.c
drivers/gpu/drm/radeon/radeon_ttm.c
drivers/video/fbmem.c

index be51c5f7d0f659f9baa1efaae4dfe662c498d6e5,d2099146fc40e078dacc9eecd9104ac6e3002956..e6cce24de8020a2830a5d675e5ffc2d99e1b2dc8
@@@ -863,13 -863,11 +863,11 @@@ int r100_cs_parse_packet0(struct radeon
  void r100_cs_dump_packet(struct radeon_cs_parser *p,
                         struct radeon_cs_packet *pkt)
  {
-       struct radeon_cs_chunk *ib_chunk;
        volatile uint32_t *ib;
        unsigned i;
        unsigned idx;
  
        ib = p->ib->ptr;
-       ib_chunk = &p->chunks[p->chunk_ib_idx];
        idx = pkt->idx;
        for (i = 0; i <= (pkt->count + 1); i++, idx++) {
                DRM_INFO("ib[%d]=0x%08X\n", idx, ib[idx]);
@@@ -896,7 -894,7 +894,7 @@@ int r100_cs_packet_parse(struct radeon_
                          idx, ib_chunk->length_dw);
                return -EINVAL;
        }
-       header = ib_chunk->kdata[idx];
+       header = radeon_get_ib_value(p, idx);
        pkt->idx = idx;
        pkt->type = CP_PACKET_GET_TYPE(header);
        pkt->count = CP_PACKET_GET_COUNT(header);
   */
  int r100_cs_packet_parse_vline(struct radeon_cs_parser *p)
  {
-       struct radeon_cs_chunk *ib_chunk;
        struct drm_mode_object *obj;
        struct drm_crtc *crtc;
        struct radeon_crtc *radeon_crtc;
        int crtc_id;
        int r;
        uint32_t header, h_idx, reg;
+       volatile uint32_t *ib;
  
-       ib_chunk = &p->chunks[p->chunk_ib_idx];
+       ib = p->ib->ptr;
  
        /* parse the wait until */
        r = r100_cs_packet_parse(p, &waitreloc, p->idx);
                return r;
        }
  
-       if (ib_chunk->kdata[waitreloc.idx + 1] != RADEON_WAIT_CRTC_VLINE) {
+       if (radeon_get_ib_value(p, waitreloc.idx + 1) != RADEON_WAIT_CRTC_VLINE) {
                DRM_ERROR("vline wait had illegal wait until\n");
                r = -EINVAL;
                return r;
        }
  
        /* jump over the NOP */
-       r = r100_cs_packet_parse(p, &p3reloc, p->idx);
+       r = r100_cs_packet_parse(p, &p3reloc, p->idx + waitreloc.count + 2);
        if (r)
                return r;
  
        h_idx = p->idx - 2;
-       p->idx += waitreloc.count;
-       p->idx += p3reloc.count;
+       p->idx += waitreloc.count + 2;
+       p->idx += p3reloc.count + 2;
  
-       header = ib_chunk->kdata[h_idx];
-       crtc_id = ib_chunk->kdata[h_idx + 5];
-       reg = ib_chunk->kdata[h_idx] >> 2;
+       header = radeon_get_ib_value(p, h_idx);
+       crtc_id = radeon_get_ib_value(p, h_idx + 5);
+       reg = header >> 2;
        mutex_lock(&p->rdev->ddev->mode_config.mutex);
        obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC);
        if (!obj) {
  
        if (!crtc->enabled) {
                /* if the CRTC isn't enabled - we need to nop out the wait until */
-               ib_chunk->kdata[h_idx + 2] = PACKET2(0);
-               ib_chunk->kdata[h_idx + 3] = PACKET2(0);
+               ib[h_idx + 2] = PACKET2(0);
+               ib[h_idx + 3] = PACKET2(0);
        } else if (crtc_id == 1) {
                switch (reg) {
                case AVIVO_D1MODE_VLINE_START_END:
-                       header &= R300_CP_PACKET0_REG_MASK;
+                       header &= ~R300_CP_PACKET0_REG_MASK;
                        header |= AVIVO_D2MODE_VLINE_START_END >> 2;
                        break;
                case RADEON_CRTC_GUI_TRIG_VLINE:
-                       header &= R300_CP_PACKET0_REG_MASK;
+                       header &= ~R300_CP_PACKET0_REG_MASK;
                        header |= RADEON_CRTC2_GUI_TRIG_VLINE >> 2;
                        break;
                default:
                        r = -EINVAL;
                        goto out;
                }
-               ib_chunk->kdata[h_idx] = header;
-               ib_chunk->kdata[h_idx + 3] |= RADEON_ENG_DISPLAY_SELECT_CRTC1;
+               ib[h_idx] = header;
+               ib[h_idx + 3] |= RADEON_ENG_DISPLAY_SELECT_CRTC1;
        }
  out:
        mutex_unlock(&p->rdev->ddev->mode_config.mutex);
  int r100_cs_packet_next_reloc(struct radeon_cs_parser *p,
                              struct radeon_cs_reloc **cs_reloc)
  {
-       struct radeon_cs_chunk *ib_chunk;
        struct radeon_cs_chunk *relocs_chunk;
        struct radeon_cs_packet p3reloc;
        unsigned idx;
                return -EINVAL;
        }
        *cs_reloc = NULL;
-       ib_chunk = &p->chunks[p->chunk_ib_idx];
        relocs_chunk = &p->chunks[p->chunk_relocs_idx];
        r = r100_cs_packet_parse(p, &p3reloc, p->idx);
        if (r) {
                r100_cs_dump_packet(p, &p3reloc);
                return -EINVAL;
        }
-       idx = ib_chunk->kdata[p3reloc.idx + 1];
+       idx = radeon_get_ib_value(p, p3reloc.idx + 1);
        if (idx >= relocs_chunk->length_dw) {
                DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
                          idx, relocs_chunk->length_dw);
@@@ -1126,7 -1122,6 +1122,6 @@@ static int r100_packet0_check(struct ra
                              struct radeon_cs_packet *pkt,
                              unsigned idx, unsigned reg)
  {
-       struct radeon_cs_chunk *ib_chunk;
        struct radeon_cs_reloc *reloc;
        struct r100_cs_track *track;
        volatile uint32_t *ib;
        int r;
        int i, face;
        u32 tile_flags = 0;
+       u32 idx_value;
  
        ib = p->ib->ptr;
-       ib_chunk = &p->chunks[p->chunk_ib_idx];
        track = (struct r100_cs_track *)p->track;
  
+       idx_value = radeon_get_ib_value(p, idx);
        switch (reg) {
        case RADEON_CRTC_GUI_TRIG_VLINE:
                r = r100_cs_packet_parse_vline(p);
                        return r;
                }
                track->zb.robj = reloc->robj;
-               track->zb.offset = ib_chunk->kdata[idx];
-               ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset);
+               track->zb.offset = idx_value;
+               ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
                break;
        case RADEON_RB3D_COLOROFFSET:
                r = r100_cs_packet_next_reloc(p, &reloc);
                        return r;
                }
                track->cb[0].robj = reloc->robj;
-               track->cb[0].offset = ib_chunk->kdata[idx];
-               ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset);
+               track->cb[0].offset = idx_value;
+               ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
                break;
        case RADEON_PP_TXOFFSET_0:
        case RADEON_PP_TXOFFSET_1:
                        r100_cs_dump_packet(p, pkt);
                        return r;
                }
-               ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset);
+               ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
                track->textures[i].robj = reloc->robj;
                break;
        case RADEON_PP_CUBIC_OFFSET_T0_0:
                        r100_cs_dump_packet(p, pkt);
                        return r;
                }
-               track->textures[0].cube_info[i].offset = ib_chunk->kdata[idx];
-               ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset);
+               track->textures[0].cube_info[i].offset = idx_value;
+               ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
                track->textures[0].cube_info[i].robj = reloc->robj;
                break;
        case RADEON_PP_CUBIC_OFFSET_T1_0:
                        r100_cs_dump_packet(p, pkt);
                        return r;
                }
-               track->textures[1].cube_info[i].offset = ib_chunk->kdata[idx];
-               ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset);
+               track->textures[1].cube_info[i].offset = idx_value;
+               ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
                track->textures[1].cube_info[i].robj = reloc->robj;
                break;
        case RADEON_PP_CUBIC_OFFSET_T2_0:
                        r100_cs_dump_packet(p, pkt);
                        return r;
                }
-               track->textures[2].cube_info[i].offset = ib_chunk->kdata[idx];
-               ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset);
+               track->textures[2].cube_info[i].offset = idx_value;
+               ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
                track->textures[2].cube_info[i].robj = reloc->robj;
                break;
        case RADEON_RE_WIDTH_HEIGHT:
-               track->maxy = ((ib_chunk->kdata[idx] >> 16) & 0x7FF);
+               track->maxy = ((idx_value >> 16) & 0x7FF);
                break;
        case RADEON_RB3D_COLORPITCH:
                r = r100_cs_packet_next_reloc(p, &reloc);
                if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
                        tile_flags |= RADEON_COLOR_MICROTILE_ENABLE;
  
-               tmp = ib_chunk->kdata[idx] & ~(0x7 << 16);
+               tmp = idx_value & ~(0x7 << 16);
                tmp |= tile_flags;
                ib[idx] = tmp;
  
-               track->cb[0].pitch = ib_chunk->kdata[idx] & RADEON_COLORPITCH_MASK;
+               track->cb[0].pitch = idx_value & RADEON_COLORPITCH_MASK;
                break;
        case RADEON_RB3D_DEPTHPITCH:
-               track->zb.pitch = ib_chunk->kdata[idx] & RADEON_DEPTHPITCH_MASK;
+               track->zb.pitch = idx_value & RADEON_DEPTHPITCH_MASK;
                break;
        case RADEON_RB3D_CNTL:
-               switch ((ib_chunk->kdata[idx] >> RADEON_RB3D_COLOR_FORMAT_SHIFT) & 0x1f) {
+               switch ((idx_value >> RADEON_RB3D_COLOR_FORMAT_SHIFT) & 0x1f) {
                case 7:
                case 8:
                case 9:
                        break;
                default:
                        DRM_ERROR("Invalid color buffer format (%d) !\n",
-                                 ((ib_chunk->kdata[idx] >> RADEON_RB3D_COLOR_FORMAT_SHIFT) & 0x1f));
+                                 ((idx_value >> RADEON_RB3D_COLOR_FORMAT_SHIFT) & 0x1f));
                        return -EINVAL;
                }
-               track->z_enabled = !!(ib_chunk->kdata[idx] & RADEON_Z_ENABLE);
+               track->z_enabled = !!(idx_value & RADEON_Z_ENABLE);
                break;
        case RADEON_RB3D_ZSTENCILCNTL:
-               switch (ib_chunk->kdata[idx] & 0xf) {
+               switch (idx_value & 0xf) {
                case 0:
                        track->zb.cpp = 2;
                        break;
                        r100_cs_dump_packet(p, pkt);
                        return r;
                }
-               ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset);
+               ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
                break;
        case RADEON_PP_CNTL:
                {
-                       uint32_t temp = ib_chunk->kdata[idx] >> 4;
+                       uint32_t temp = idx_value >> 4;
                        for (i = 0; i < track->num_texture; i++)
                                track->textures[i].enabled = !!(temp & (1 << i));
                }
                break;
        case RADEON_SE_VF_CNTL:
-               track->vap_vf_cntl = ib_chunk->kdata[idx];
+               track->vap_vf_cntl = idx_value;
                break;
        case RADEON_SE_VTX_FMT:
-               track->vtx_size = r100_get_vtx_size(ib_chunk->kdata[idx]);
+               track->vtx_size = r100_get_vtx_size(idx_value);
                break;
        case RADEON_PP_TEX_SIZE_0:
        case RADEON_PP_TEX_SIZE_1:
        case RADEON_PP_TEX_SIZE_2:
                i = (reg - RADEON_PP_TEX_SIZE_0) / 8;
-               track->textures[i].width = (ib_chunk->kdata[idx] & RADEON_TEX_USIZE_MASK) + 1;
-               track->textures[i].height = ((ib_chunk->kdata[idx] & RADEON_TEX_VSIZE_MASK) >> RADEON_TEX_VSIZE_SHIFT) + 1;
+               track->textures[i].width = (idx_value & RADEON_TEX_USIZE_MASK) + 1;
+               track->textures[i].height = ((idx_value & RADEON_TEX_VSIZE_MASK) >> RADEON_TEX_VSIZE_SHIFT) + 1;
                break;
        case RADEON_PP_TEX_PITCH_0:
        case RADEON_PP_TEX_PITCH_1:
        case RADEON_PP_TEX_PITCH_2:
                i = (reg - RADEON_PP_TEX_PITCH_0) / 8;
-               track->textures[i].pitch = ib_chunk->kdata[idx] + 32;
+               track->textures[i].pitch = idx_value + 32;
                break;
        case RADEON_PP_TXFILTER_0:
        case RADEON_PP_TXFILTER_1:
        case RADEON_PP_TXFILTER_2:
                i = (reg - RADEON_PP_TXFILTER_0) / 24;
-               track->textures[i].num_levels = ((ib_chunk->kdata[idx] & RADEON_MAX_MIP_LEVEL_MASK)
+               track->textures[i].num_levels = ((idx_value & RADEON_MAX_MIP_LEVEL_MASK)
                                                 >> RADEON_MAX_MIP_LEVEL_SHIFT);
-               tmp = (ib_chunk->kdata[idx] >> 23) & 0x7;
+               tmp = (idx_value >> 23) & 0x7;
                if (tmp == 2 || tmp == 6)
                        track->textures[i].roundup_w = false;
-               tmp = (ib_chunk->kdata[idx] >> 27) & 0x7;
+               tmp = (idx_value >> 27) & 0x7;
                if (tmp == 2 || tmp == 6)
                        track->textures[i].roundup_h = false;
                break;
        case RADEON_PP_TXFORMAT_1:
        case RADEON_PP_TXFORMAT_2:
                i = (reg - RADEON_PP_TXFORMAT_0) / 24;
-               if (ib_chunk->kdata[idx] & RADEON_TXFORMAT_NON_POWER2) {
+               if (idx_value & RADEON_TXFORMAT_NON_POWER2) {
                        track->textures[i].use_pitch = 1;
                } else {
                        track->textures[i].use_pitch = 0;
-                       track->textures[i].width = 1 << ((ib_chunk->kdata[idx] >> RADEON_TXFORMAT_WIDTH_SHIFT) & RADEON_TXFORMAT_WIDTH_MASK);
-                       track->textures[i].height = 1 << ((ib_chunk->kdata[idx] >> RADEON_TXFORMAT_HEIGHT_SHIFT) & RADEON_TXFORMAT_HEIGHT_MASK);
+                       track->textures[i].width = 1 << ((idx_value >> RADEON_TXFORMAT_WIDTH_SHIFT) & RADEON_TXFORMAT_WIDTH_MASK);
+                       track->textures[i].height = 1 << ((idx_value >> RADEON_TXFORMAT_HEIGHT_SHIFT) & RADEON_TXFORMAT_HEIGHT_MASK);
                }
-               if (ib_chunk->kdata[idx] & RADEON_TXFORMAT_CUBIC_MAP_ENABLE)
+               if (idx_value & RADEON_TXFORMAT_CUBIC_MAP_ENABLE)
                        track->textures[i].tex_coord_type = 2;
-               switch ((ib_chunk->kdata[idx] & RADEON_TXFORMAT_FORMAT_MASK)) {
+               switch ((idx_value & RADEON_TXFORMAT_FORMAT_MASK)) {
                case RADEON_TXFORMAT_I8:
                case RADEON_TXFORMAT_RGB332:
                case RADEON_TXFORMAT_Y8:
                        track->textures[i].cpp = 4;
                        break;
                }
-               track->textures[i].cube_info[4].width = 1 << ((ib_chunk->kdata[idx] >> 16) & 0xf);
-               track->textures[i].cube_info[4].height = 1 << ((ib_chunk->kdata[idx] >> 20) & 0xf);
+               track->textures[i].cube_info[4].width = 1 << ((idx_value >> 16) & 0xf);
+               track->textures[i].cube_info[4].height = 1 << ((idx_value >> 20) & 0xf);
                break;
        case RADEON_PP_CUBIC_FACES_0:
        case RADEON_PP_CUBIC_FACES_1:
        case RADEON_PP_CUBIC_FACES_2:
-               tmp = ib_chunk->kdata[idx];
+               tmp = idx_value;
                i = (reg - RADEON_PP_CUBIC_FACES_0) / 4;
                for (face = 0; face < 4; face++) {
                        track->textures[i].cube_info[face].width = 1 << ((tmp >> (face * 8)) & 0xf);
@@@ -1427,15 -1424,14 +1424,14 @@@ int r100_cs_track_check_pkt3_indx_buffe
                                         struct radeon_cs_packet *pkt,
                                         struct radeon_object *robj)
  {
-       struct radeon_cs_chunk *ib_chunk;
        unsigned idx;
-       ib_chunk = &p->chunks[p->chunk_ib_idx];
+       u32 value;
        idx = pkt->idx + 1;
-       if ((ib_chunk->kdata[idx+2] + 1) > radeon_object_size(robj)) {
+       value = radeon_get_ib_value(p, idx + 2);
+       if ((value + 1) > radeon_object_size(robj)) {
                DRM_ERROR("[drm] Buffer too small for PACKET3 INDX_BUFFER "
                          "(need %u have %lu) !\n",
-                         ib_chunk->kdata[idx+2] + 1,
+                         value + 1,
                          radeon_object_size(robj));
                return -EINVAL;
        }
  static int r100_packet3_check(struct radeon_cs_parser *p,
                              struct radeon_cs_packet *pkt)
  {
-       struct radeon_cs_chunk *ib_chunk;
        struct radeon_cs_reloc *reloc;
        struct r100_cs_track *track;
        unsigned idx;
-       unsigned i, c;
        volatile uint32_t *ib;
        int r;
  
        ib = p->ib->ptr;
-       ib_chunk = &p->chunks[p->chunk_ib_idx];
        idx = pkt->idx + 1;
        track = (struct r100_cs_track *)p->track;
        switch (pkt->opcode) {
        case PACKET3_3D_LOAD_VBPNTR:
-               c = ib_chunk->kdata[idx++];
-               track->num_arrays = c;
-               for (i = 0; i < (c - 1); i += 2, idx += 3) {
-                       r = r100_cs_packet_next_reloc(p, &reloc);
-                       if (r) {
-                               DRM_ERROR("No reloc for packet3 %d\n",
-                                         pkt->opcode);
-                               r100_cs_dump_packet(p, pkt);
-                               return r;
-                       }
-                       ib[idx+1] = ib_chunk->kdata[idx+1] + ((u32)reloc->lobj.gpu_offset);
-                       track->arrays[i + 0].robj = reloc->robj;
-                       track->arrays[i + 0].esize = ib_chunk->kdata[idx] >> 8;
-                       track->arrays[i + 0].esize &= 0x7F;
-                       r = r100_cs_packet_next_reloc(p, &reloc);
-                       if (r) {
-                               DRM_ERROR("No reloc for packet3 %d\n",
-                                         pkt->opcode);
-                               r100_cs_dump_packet(p, pkt);
-                               return r;
-                       }
-                       ib[idx+2] = ib_chunk->kdata[idx+2] + ((u32)reloc->lobj.gpu_offset);
-                       track->arrays[i + 1].robj = reloc->robj;
-                       track->arrays[i + 1].esize = ib_chunk->kdata[idx] >> 24;
-                       track->arrays[i + 1].esize &= 0x7F;
-               }
-               if (c & 1) {
-                       r = r100_cs_packet_next_reloc(p, &reloc);
-                       if (r) {
-                               DRM_ERROR("No reloc for packet3 %d\n",
-                                         pkt->opcode);
-                               r100_cs_dump_packet(p, pkt);
-                               return r;
-                       }
-                       ib[idx+1] = ib_chunk->kdata[idx+1] + ((u32)reloc->lobj.gpu_offset);
-                       track->arrays[i + 0].robj = reloc->robj;
-                       track->arrays[i + 0].esize = ib_chunk->kdata[idx] >> 8;
-                       track->arrays[i + 0].esize &= 0x7F;
-               }
+               r = r100_packet3_load_vbpntr(p, pkt, idx);
+               if (r)
+                       return r;
                break;
        case PACKET3_INDX_BUFFER:
                r = r100_cs_packet_next_reloc(p, &reloc);
                        r100_cs_dump_packet(p, pkt);
                        return r;
                }
-               ib[idx+1] = ib_chunk->kdata[idx+1] + ((u32)reloc->lobj.gpu_offset);
+               ib[idx+1] = radeon_get_ib_value(p, idx+1) + ((u32)reloc->lobj.gpu_offset);
                r = r100_cs_track_check_pkt3_indx_buffer(p, pkt, reloc->robj);
                if (r) {
                        return r;
                        r100_cs_dump_packet(p, pkt);
                        return r;
                }
-               ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset);
+               ib[idx] = radeon_get_ib_value(p, idx) + ((u32)reloc->lobj.gpu_offset);
                track->num_arrays = 1;
-               track->vtx_size = r100_get_vtx_size(ib_chunk->kdata[idx+2]);
+               track->vtx_size = r100_get_vtx_size(radeon_get_ib_value(p, idx + 2));
  
                track->arrays[0].robj = reloc->robj;
                track->arrays[0].esize = track->vtx_size;
  
-               track->max_indx = ib_chunk->kdata[idx+1];
+               track->max_indx = radeon_get_ib_value(p, idx+1);
  
-               track->vap_vf_cntl = ib_chunk->kdata[idx+3];
+               track->vap_vf_cntl = radeon_get_ib_value(p, idx+3);
                track->immd_dwords = pkt->count - 1;
                r = r100_cs_track_check(p->rdev, track);
                if (r)
                        return r;
                break;
        case PACKET3_3D_DRAW_IMMD:
-               if (((ib_chunk->kdata[idx+1] >> 4) & 0x3) != 3) {
+               if (((radeon_get_ib_value(p, idx + 1) >> 4) & 0x3) != 3) {
                        DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n");
                        return -EINVAL;
                }
-               track->vap_vf_cntl = ib_chunk->kdata[idx+1];
+               track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1);
                track->immd_dwords = pkt->count - 1;
                r = r100_cs_track_check(p->rdev, track);
                if (r)
                break;
                /* triggers drawing using in-packet vertex data */
        case PACKET3_3D_DRAW_IMMD_2:
-               if (((ib_chunk->kdata[idx] >> 4) & 0x3) != 3) {
+               if (((radeon_get_ib_value(p, idx) >> 4) & 0x3) != 3) {
                        DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n");
                        return -EINVAL;
                }
-               track->vap_vf_cntl = ib_chunk->kdata[idx];
+               track->vap_vf_cntl = radeon_get_ib_value(p, idx);
                track->immd_dwords = pkt->count;
                r = r100_cs_track_check(p->rdev, track);
                if (r)
                break;
                /* triggers drawing using in-packet vertex data */
        case PACKET3_3D_DRAW_VBUF_2:
-               track->vap_vf_cntl = ib_chunk->kdata[idx];
+               track->vap_vf_cntl = radeon_get_ib_value(p, idx);
                r = r100_cs_track_check(p->rdev, track);
                if (r)
                        return r;
                break;
                /* triggers drawing of vertex buffers setup elsewhere */
        case PACKET3_3D_DRAW_INDX_2:
-               track->vap_vf_cntl = ib_chunk->kdata[idx];
+               track->vap_vf_cntl = radeon_get_ib_value(p, idx);
                r = r100_cs_track_check(p->rdev, track);
                if (r)
                        return r;
                break;
                /* triggers drawing using indices to vertex buffer */
        case PACKET3_3D_DRAW_VBUF:
-               track->vap_vf_cntl = ib_chunk->kdata[idx + 1];
+               track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1);
                r = r100_cs_track_check(p->rdev, track);
                if (r)
                        return r;
                break;
                /* triggers drawing of vertex buffers setup elsewhere */
        case PACKET3_3D_DRAW_INDX:
-               track->vap_vf_cntl = ib_chunk->kdata[idx + 1];
+               track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1);
                r = r100_cs_track_check(p->rdev, track);
                if (r)
                        return r;
@@@ -1955,20 -1912,6 +1912,20 @@@ void r100_vram_init_sizes(struct radeon
                rdev->mc.real_vram_size = rdev->mc.aper_size;
  }
  
 +void r100_vga_set_state(struct radeon_device *rdev, bool state)
 +{
 +      uint32_t temp;
 +
 +      temp = RREG32(RADEON_CONFIG_CNTL);
 +      if (state == false) {
 +              temp &= ~(1<<8);
 +              temp |= (1<<9);
 +      } else {
 +              temp &= ~(1<<9);
 +      }
 +      WREG32(RADEON_CONFIG_CNTL, temp);
 +}
 +
  void r100_vram_info(struct radeon_device *rdev)
  {
        r100_vram_get_type(rdev);
index eab31c1d6df1612c4b91a80f4eb3de876e803c6d,6b7a40b501c0e9901b540a48c1021c45e80336c3..2e4e60edbff4e3a08fb80b727b617a18164af5e9
@@@ -33,8 -33,8 +33,8 @@@
  #include "radeon.h"
  #include "radeon_mode.h"
  #include "r600d.h"
- #include "avivod.h"
  #include "atom.h"
+ #include "avivod.h"
  
  #define PFP_UCODE_SIZE 576
  #define PM4_UCODE_SIZE 1792
@@@ -342,7 -342,7 +342,7 @@@ static void r600_mc_resume(struct radeo
  
        /* we need to own VRAM, so turn off the VGA renderer here
         * to stop it overwriting our objects */
-       radeon_avivo_vga_render_disable(rdev);
+       rv515_vga_render_disable(rdev);
  }
  
  int r600_mc_init(struct radeon_device *rdev)
        /* Setup GPU memory space */
        rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE);
        rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE);
+       if (rdev->mc.mc_vram_size > rdev->mc.aper_size)
+               rdev->mc.mc_vram_size = rdev->mc.aper_size;
+       if (rdev->mc.real_vram_size > rdev->mc.aper_size)
+               rdev->mc.real_vram_size = rdev->mc.aper_size;
        if (rdev->flags & RADEON_IS_AGP) {
                r = radeon_agp_init(rdev);
                if (r)
@@@ -1499,20 -1506,6 +1506,20 @@@ int r600_startup(struct radeon_device *
        return 0;
  }
  
 +void r600_vga_set_state(struct radeon_device *rdev, bool state)
 +{
 +      uint32_t temp;
 +
 +      temp = RREG32(CONFIG_CNTL);
 +      if (state == false) {
 +              temp &= ~(1<<0);
 +              temp |= (1<<1);
 +      } else {
 +              temp &= ~(1<<1);
 +      }
 +      WREG32(CONFIG_CNTL, temp);
 +}
 +
  int r600_resume(struct radeon_device *rdev)
  {
        int r;
index 6311b1362594bfa55c8d569464253ffb31299fa8,7b0965f5495871b03376a6c9be588d07cdd84949..950b346e343ff5a66dd8ae438e588782409e4bf9
   *    - TESTING, TESTING, TESTING
   */
  
+ /* Initialization path:
+  *  We expect that acceleration initialization might fail for various
+  *  reasons even thought we work hard to make it works on most
+  *  configurations. In order to still have a working userspace in such
+  *  situation the init path must succeed up to the memory controller
+  *  initialization point. Failure before this point are considered as
+  *  fatal error. Here is the init callchain :
+  *      radeon_device_init  perform common structure, mutex initialization
+  *      asic_init           setup the GPU memory layout and perform all
+  *                          one time initialization (failure in this
+  *                          function are considered fatal)
+  *      asic_startup        setup the GPU acceleration, in order to
+  *                          follow guideline the first thing this
+  *                          function should do is setting the GPU
+  *                          memory controller (only MC setup failure
+  *                          are considered as fatal)
+  */
  #include <asm/atomic.h>
  #include <linux/wait.h>
  #include <linux/list.h>
@@@ -342,7 -360,7 +360,7 @@@ struct radeon_ib 
        unsigned long           idx;
        uint64_t                gpu_addr;
        struct radeon_fence     *fence;
-       volatile uint32_t       *ptr;
+       uint32_t        *ptr;
        uint32_t                length_dw;
  };
  
@@@ -415,7 -433,12 +433,12 @@@ struct radeon_cs_reloc 
  struct radeon_cs_chunk {
        uint32_t                chunk_id;
        uint32_t                length_dw;
+       int kpage_idx[2];
+       uint32_t                *kpage[2];
        uint32_t                *kdata;
+       void __user *user_ptr;
+       int last_copied_page;
+       int last_page_index;
  };
  
  struct radeon_cs_parser {
        struct radeon_ib        *ib;
        void                    *track;
        unsigned                family;
+       int parser_error;
  };
  
+ extern int radeon_cs_update_pages(struct radeon_cs_parser *p, int pg_idx);
+ extern int radeon_cs_finish_pages(struct radeon_cs_parser *p);
+ static inline u32 radeon_get_ib_value(struct radeon_cs_parser *p, int idx)
+ {
+       struct radeon_cs_chunk *ibc = &p->chunks[p->chunk_ib_idx];
+       u32 pg_idx, pg_offset;
+       u32 idx_value = 0;
+       int new_page;
+       pg_idx = (idx * 4) / PAGE_SIZE;
+       pg_offset = (idx * 4) % PAGE_SIZE;
+       if (ibc->kpage_idx[0] == pg_idx)
+               return ibc->kpage[0][pg_offset/4];
+       if (ibc->kpage_idx[1] == pg_idx)
+               return ibc->kpage[1][pg_offset/4];
+       new_page = radeon_cs_update_pages(p, pg_idx);
+       if (new_page < 0) {
+               p->parser_error = new_page;
+               return 0;
+       }
+       idx_value = ibc->kpage[new_page][pg_offset/4];
+       return idx_value;
+ }
  struct radeon_cs_packet {
        unsigned        idx;
        unsigned        type;
@@@ -539,7 -592,6 +592,7 @@@ struct radeon_asic 
        int (*suspend)(struct radeon_device *rdev);
        void (*errata)(struct radeon_device *rdev);
        void (*vram_info)(struct radeon_device *rdev);
 +      void (*vga_set_state)(struct radeon_device *rdev, bool state);
        int (*gpu_reset)(struct radeon_device *rdev);
        int (*mc_init)(struct radeon_device *rdev);
        void (*mc_fini)(struct radeon_device *rdev);
@@@ -898,7 -950,6 +951,7 @@@ static inline void radeon_ring_write(st
  #define radeon_cs_parse(p) rdev->asic->cs_parse((p))
  #define radeon_errata(rdev) (rdev)->asic->errata((rdev))
  #define radeon_vram_info(rdev) (rdev)->asic->vram_info((rdev))
 +#define radeon_vga_set_state(rdev, state) (rdev)->asic->vga_set_state((rdev), (state))
  #define radeon_gpu_reset(rdev) (rdev)->asic->gpu_reset((rdev))
  #define radeon_mc_init(rdev) (rdev)->asic->mc_init((rdev))
  #define radeon_mc_fini(rdev) (rdev)->asic->mc_fini((rdev))
@@@ -943,6 -994,7 +996,7 @@@ extern void radeon_clocks_fini(struct r
  extern void radeon_scratch_init(struct radeon_device *rdev);
  extern void radeon_surface_init(struct radeon_device *rdev);
  extern int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data);
+ extern void radeon_atom_set_clock_gating(struct radeon_device *rdev, int enable);
  
  /* r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280 */
  struct r100_mc_save {
@@@ -974,6 -1026,9 +1028,9 @@@ extern void r100_vram_init_sizes(struc
  extern void r100_wb_disable(struct radeon_device *rdev);
  extern void r100_wb_fini(struct radeon_device *rdev);
  extern int r100_wb_init(struct radeon_device *rdev);
+ extern void r100_hdp_reset(struct radeon_device *rdev);
+ extern int r100_rb2d_reset(struct radeon_device *rdev);
+ extern int r100_cp_reset(struct radeon_device *rdev);
  
  /* r300,r350,rv350,rv370,rv380 */
  extern void r300_set_reg_safe(struct radeon_device *rdev);
@@@ -985,12 -1040,29 +1042,29 @@@ extern int rv370_pcie_gart_enable(struc
  extern void rv370_pcie_gart_disable(struct radeon_device *rdev);
  
  /* r420,r423,rv410 */
+ extern int r420_mc_init(struct radeon_device *rdev);
  extern u32 r420_mc_rreg(struct radeon_device *rdev, u32 reg);
  extern void r420_mc_wreg(struct radeon_device *rdev, u32 reg, u32 v);
  extern int r420_debugfs_pipes_info_init(struct radeon_device *rdev);
+ extern void r420_pipes_init(struct radeon_device *rdev);
  
  /* rv515 */
+ struct rv515_mc_save {
+       u32 d1vga_control;
+       u32 d2vga_control;
+       u32 vga_render_control;
+       u32 vga_hdp_control;
+       u32 d1crtc_control;
+       u32 d2crtc_control;
+ };
  extern void rv515_bandwidth_avivo_update(struct radeon_device *rdev);
+ extern void rv515_vga_render_disable(struct radeon_device *rdev);
+ extern void rv515_set_safe_registers(struct radeon_device *rdev);
+ extern void rv515_mc_stop(struct radeon_device *rdev, struct rv515_mc_save *save);
+ extern void rv515_mc_resume(struct radeon_device *rdev, struct rv515_mc_save *save);
+ extern void rv515_clock_startup(struct radeon_device *rdev);
+ extern void rv515_debugfs(struct radeon_device *rdev);
+ extern int rv515_suspend(struct radeon_device *rdev);
  
  /* rs690, rs740 */
  extern void rs690_line_buffer_adjust(struct radeon_device *rdev,
index 8968f78fa1e30e8424b101a8ccf477d4885286d5,bce0cb0638677da017b01748bcd5891d476a5e77..c8a4e7b5663dfcfe38d07730b104fa18d4e57a4d
@@@ -47,7 -47,6 +47,7 @@@ uint32_t r100_mm_rreg(struct radeon_dev
  void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
  void r100_errata(struct radeon_device *rdev);
  void r100_vram_info(struct radeon_device *rdev);
 +void r100_vga_set_state(struct radeon_device *rdev, bool state);
  int r100_gpu_reset(struct radeon_device *rdev);
  int r100_mc_init(struct radeon_device *rdev);
  void r100_mc_fini(struct radeon_device *rdev);
@@@ -90,7 -89,6 +90,7 @@@ static struct radeon_asic r100_asic = 
        .init = &r100_init,
        .errata = &r100_errata,
        .vram_info = &r100_vram_info,
 +      .vga_set_state = &r100_vga_set_state,
        .gpu_reset = &r100_gpu_reset,
        .mc_init = &r100_mc_init,
        .mc_fini = &r100_mc_fini,
@@@ -160,7 -158,6 +160,7 @@@ static struct radeon_asic r300_asic = 
        .init = &r300_init,
        .errata = &r300_errata,
        .vram_info = &r300_vram_info,
 +      .vga_set_state = &r100_vga_set_state,
        .gpu_reset = &r300_gpu_reset,
        .mc_init = &r300_mc_init,
        .mc_fini = &r300_mc_fini,
@@@ -211,7 -208,6 +211,7 @@@ static struct radeon_asic r420_asic = 
        .resume = &r420_resume,
        .errata = NULL,
        .vram_info = NULL,
 +      .vga_set_state = &r100_vga_set_state,
        .gpu_reset = &r300_gpu_reset,
        .mc_init = NULL,
        .mc_fini = NULL,
@@@ -266,7 -262,6 +266,7 @@@ static struct radeon_asic rs400_asic = 
        .init = &r300_init,
        .errata = &rs400_errata,
        .vram_info = &rs400_vram_info,
 +      .vga_set_state = &r100_vga_set_state,
        .gpu_reset = &r300_gpu_reset,
        .mc_init = &rs400_mc_init,
        .mc_fini = &rs400_mc_fini,
@@@ -328,7 -323,6 +328,7 @@@ static struct radeon_asic rs600_asic = 
        .init = &rs600_init,
        .errata = &rs600_errata,
        .vram_info = &rs600_vram_info,
 +      .vga_set_state = &r100_vga_set_state,
        .gpu_reset = &r300_gpu_reset,
        .mc_init = &rs600_mc_init,
        .mc_fini = &rs600_mc_fini,
@@@ -378,7 -372,6 +378,7 @@@ static struct radeon_asic rs690_asic = 
        .init = &rs600_init,
        .errata = &rs690_errata,
        .vram_info = &rs690_vram_info,
 +      .vga_set_state = &r100_vga_set_state,
        .gpu_reset = &r300_gpu_reset,
        .mc_init = &rs690_mc_init,
        .mc_fini = &rs690_mc_fini,
   * rv515
   */
  int rv515_init(struct radeon_device *rdev);
- void rv515_errata(struct radeon_device *rdev);
- void rv515_vram_info(struct radeon_device *rdev);
+ void rv515_fini(struct radeon_device *rdev);
  int rv515_gpu_reset(struct radeon_device *rdev);
- int rv515_mc_init(struct radeon_device *rdev);
- void rv515_mc_fini(struct radeon_device *rdev);
  uint32_t rv515_mc_rreg(struct radeon_device *rdev, uint32_t reg);
  void rv515_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
  void rv515_ring_start(struct radeon_device *rdev);
  uint32_t rv515_pcie_rreg(struct radeon_device *rdev, uint32_t reg);
  void rv515_pcie_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
  void rv515_bandwidth_update(struct radeon_device *rdev);
+ int rv515_resume(struct radeon_device *rdev);
+ int rv515_suspend(struct radeon_device *rdev);
  static struct radeon_asic rv515_asic = {
        .init = &rv515_init,
-       .errata = &rv515_errata,
-       .vram_info = &rv515_vram_info,
+       .fini = &rv515_fini,
+       .suspend = &rv515_suspend,
+       .resume = &rv515_resume,
+       .errata = NULL,
+       .vram_info = NULL,
 +      .vga_set_state = &r100_vga_set_state,
        .gpu_reset = &rv515_gpu_reset,
-       .mc_init = &rv515_mc_init,
-       .mc_fini = &rv515_mc_fini,
-       .wb_init = &r100_wb_init,
-       .wb_fini = &r100_wb_fini,
+       .mc_init = NULL,
+       .mc_fini = NULL,
+       .wb_init = NULL,
+       .wb_fini = NULL,
        .gart_init = &rv370_pcie_gart_init,
        .gart_fini = &rv370_pcie_gart_fini,
-       .gart_enable = &rv370_pcie_gart_enable,
-       .gart_disable = &rv370_pcie_gart_disable,
+       .gart_enable = NULL,
+       .gart_disable = NULL,
        .gart_tlb_flush = &rv370_pcie_gart_tlb_flush,
        .gart_set_page = &rv370_pcie_gart_set_page,
-       .cp_init = &r100_cp_init,
-       .cp_fini = &r100_cp_fini,
-       .cp_disable = &r100_cp_disable,
+       .cp_init = NULL,
+       .cp_fini = NULL,
+       .cp_disable = NULL,
        .cp_commit = &r100_cp_commit,
        .ring_start = &rv515_ring_start,
        .ring_test = &r100_ring_test,
        .ring_ib_execute = &r100_ring_ib_execute,
-       .ib_test = &r100_ib_test,
+       .ib_test = NULL,
        .irq_set = &rs600_irq_set,
        .irq_process = &rs600_irq_process,
        .get_vblank_counter = &rs600_get_vblank_counter,
  /*
   * r520,rv530,rv560,rv570,r580
   */
- void r520_errata(struct radeon_device *rdev);
- void r520_vram_info(struct radeon_device *rdev);
- int r520_mc_init(struct radeon_device *rdev);
- void r520_mc_fini(struct radeon_device *rdev);
- void r520_bandwidth_update(struct radeon_device *rdev);
+ int r520_init(struct radeon_device *rdev);
+ int r520_resume(struct radeon_device *rdev);
  static struct radeon_asic r520_asic = {
-       .init = &rv515_init,
-       .errata = &r520_errata,
-       .vram_info = &r520_vram_info,
+       .init = &r520_init,
+       .fini = &rv515_fini,
+       .suspend = &rv515_suspend,
+       .resume = &r520_resume,
+       .errata = NULL,
+       .vram_info = NULL,
 +      .vga_set_state = &r100_vga_set_state,
        .gpu_reset = &rv515_gpu_reset,
-       .mc_init = &r520_mc_init,
-       .mc_fini = &r520_mc_fini,
-       .wb_init = &r100_wb_init,
-       .wb_fini = &r100_wb_fini,
-       .gart_init = &rv370_pcie_gart_init,
-       .gart_fini = &rv370_pcie_gart_fini,
-       .gart_enable = &rv370_pcie_gart_enable,
-       .gart_disable = &rv370_pcie_gart_disable,
+       .mc_init = NULL,
+       .mc_fini = NULL,
+       .wb_init = NULL,
+       .wb_fini = NULL,
+       .gart_init = NULL,
+       .gart_fini = NULL,
+       .gart_enable = NULL,
+       .gart_disable = NULL,
        .gart_tlb_flush = &rv370_pcie_gart_tlb_flush,
        .gart_set_page = &rv370_pcie_gart_set_page,
-       .cp_init = &r100_cp_init,
-       .cp_fini = &r100_cp_fini,
-       .cp_disable = &r100_cp_disable,
+       .cp_init = NULL,
+       .cp_fini = NULL,
+       .cp_disable = NULL,
        .cp_commit = &r100_cp_commit,
        .ring_start = &rv515_ring_start,
        .ring_test = &r100_ring_test,
        .ring_ib_execute = &r100_ring_ib_execute,
-       .ib_test = &r100_ib_test,
+       .ib_test = NULL,
        .irq_set = &rs600_irq_set,
        .irq_process = &rs600_irq_process,
        .get_vblank_counter = &rs600_get_vblank_counter,
        .set_clock_gating = &radeon_atom_set_clock_gating,
        .set_surface_reg = r100_set_surface_reg,
        .clear_surface_reg = r100_clear_surface_reg,
-       .bandwidth_update = &r520_bandwidth_update,
+       .bandwidth_update = &rv515_bandwidth_update,
  };
  
  /*
@@@ -529,7 -522,6 +531,7 @@@ int r600_init(struct radeon_device *rde
  void r600_fini(struct radeon_device *rdev);
  int r600_suspend(struct radeon_device *rdev);
  int r600_resume(struct radeon_device *rdev);
 +void r600_vga_set_state(struct radeon_device *rdev, bool state);
  int r600_wb_init(struct radeon_device *rdev);
  void r600_wb_fini(struct radeon_device *rdev);
  void r600_cp_commit(struct radeon_device *rdev);
@@@ -566,7 -558,6 +568,7 @@@ static struct radeon_asic r600_asic = 
        .resume = &r600_resume,
        .cp_commit = &r600_cp_commit,
        .vram_info = NULL,
 +      .vga_set_state = &r600_vga_set_state,
        .gpu_reset = &r600_gpu_reset,
        .mc_init = NULL,
        .mc_fini = NULL,
        .set_clock_gating = &radeon_atom_set_clock_gating,
        .set_surface_reg = r600_set_surface_reg,
        .clear_surface_reg = r600_clear_surface_reg,
-       .bandwidth_update = &r520_bandwidth_update,
+       .bandwidth_update = &rv515_bandwidth_update,
  };
  
  /*
@@@ -617,7 -608,6 +619,7 @@@ static struct radeon_asic rv770_asic = 
        .cp_commit = &r600_cp_commit,
        .vram_info = NULL,
        .gpu_reset = &rv770_gpu_reset,
 +      .vga_set_state = &r600_vga_set_state,
        .mc_init = NULL,
        .mc_fini = NULL,
        .wb_init = &r600_wb_init,
        .set_clock_gating = &radeon_atom_set_clock_gating,
        .set_surface_reg = r600_set_surface_reg,
        .clear_surface_reg = r600_clear_surface_reg,
-       .bandwidth_update = &r520_bandwidth_update,
+       .bandwidth_update = &rv515_bandwidth_update,
  };
  
  #endif
index daf5db780956c2bc7738912a48f5407e1d1182ca,a6733cff1fb83c74ac7b51c597266e0c1708e7d5..ec835d56d30ab97b7b7b00e2a002d4daa396aeaf
@@@ -29,7 -29,6 +29,7 @@@
  #include <drm/drmP.h>
  #include <drm/drm_crtc_helper.h>
  #include <drm/radeon_drm.h>
 +#include <linux/vgaarb.h>
  #include "radeon_reg.h"
  #include "radeon.h"
  #include "radeon_asic.h"
@@@ -481,18 -480,7 +481,18 @@@ void radeon_combios_fini(struct radeon_
  {
  }
  
 +/* if we get transitioned to only one device, tak VGA back */
 +static unsigned int radeon_vga_set_decode(void *cookie, bool state)
 +{
 +      struct radeon_device *rdev = cookie;
  
 +      radeon_vga_set_state(rdev, state);
 +      if (state)
 +              return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
 +                     VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
 +      else
 +              return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
 +}
  /*
   * Radeon device.
   */
@@@ -532,10 -520,13 +532,13 @@@ int radeon_device_init(struct radeon_de
  
        if (radeon_agpmode == -1) {
                rdev->flags &= ~RADEON_IS_AGP;
-               if (rdev->family >= CHIP_RV515 ||
-                   rdev->family == CHIP_RV380 ||
-                   rdev->family == CHIP_RV410 ||
-                   rdev->family == CHIP_R423) {
+               if (rdev->family >= CHIP_R600) {
+                       DRM_INFO("Forcing AGP to PCIE mode\n");
+                       rdev->flags |= RADEON_IS_PCIE;
+               } else if (rdev->family >= CHIP_RV515 ||
+                          rdev->family == CHIP_RV380 ||
+                          rdev->family == CHIP_RV410 ||
+                          rdev->family == CHIP_R423) {
                        DRM_INFO("Forcing AGP to PCIE mode\n");
                        rdev->flags |= RADEON_IS_PCIE;
                        rdev->asic->gart_init = &rv370_pcie_gart_init;
        if (r) {
                return r;
        }
 +
 +      /* if we have > 1 VGA cards, then disable the radeon VGA resources */
 +      r = vga_client_register(rdev->pdev, rdev, NULL, radeon_vga_set_decode);
 +      if (r) {
 +              return -EINVAL;
 +      }
 +
        if (!rdev->new_init_path) {
                /* Setup errata flags */
                radeon_errata(rdev);
                /* Initialize surface registers */
                radeon_surface_init(rdev);
  
 -              /* TODO: disable VGA need to use VGA request */
                /* BIOS*/
                if (!radeon_get_bios(rdev)) {
                        if (ASIC_IS_AVIVO(rdev))
@@@ -715,7 -700,6 +718,7 @@@ void radeon_device_fini(struct radeon_d
                radeon_agp_fini(rdev);
  #endif
                radeon_irq_kms_fini(rdev);
 +              vga_client_register(rdev->pdev, NULL, NULL, NULL);
                radeon_fence_driver_fini(rdev);
                radeon_clocks_fini(rdev);
                radeon_object_fini(rdev);
index 5b1cf04a011adf4fe26e46311f4df6b285e4dd1f,c729cd1a750662c22f77074bcb990dfdb8372eba..765bd184b6fc15382fc10b2dfaadd91414709054
@@@ -530,7 -530,7 +530,7 @@@ void radeon_ttm_fini(struct radeon_devi
  }
  
  static struct vm_operations_struct radeon_ttm_vm_ops;
 -static struct vm_operations_struct *ttm_vm_ops = NULL;
 +static const struct vm_operations_struct *ttm_vm_ops = NULL;
  
  static int radeon_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
  {
@@@ -689,9 -689,6 +689,6 @@@ struct ttm_backend *radeon_ttm_backend_
  
  #define RADEON_DEBUGFS_MEM_TYPES 2
  
- static struct drm_info_list radeon_mem_types_list[RADEON_DEBUGFS_MEM_TYPES];
- static char radeon_mem_types_names[RADEON_DEBUGFS_MEM_TYPES][32];
  #if defined(CONFIG_DEBUG_FS)
  static int radeon_mm_dump_table(struct seq_file *m, void *data)
  {
  
  static int radeon_ttm_debugfs_init(struct radeon_device *rdev)
  {
+ #if defined(CONFIG_DEBUG_FS)
+       static struct drm_info_list radeon_mem_types_list[RADEON_DEBUGFS_MEM_TYPES];
+       static char radeon_mem_types_names[RADEON_DEBUGFS_MEM_TYPES][32];
        unsigned i;
  
- #if defined(CONFIG_DEBUG_FS)
        for (i = 0; i < RADEON_DEBUGFS_MEM_TYPES; i++) {
                if (i == 0)
                        sprintf(radeon_mem_types_names[i], "radeon_vram_mm");
diff --combined drivers/video/fbmem.c
index a1f2e7ce730bd1881a4c2519617bb549761f4f72,750c71f141f4d4298c59926c4b4330689f9dfb64..99bbd282ce634186e5d28f9a13cd9ba78658ddc2
@@@ -871,8 -871,8 +871,8 @@@ fb_pan_display(struct fb_info *info, st
                err = -EINVAL;
  
        if (err || !info->fbops->fb_pan_display ||
 -          var->yoffset + yres > info->var.yres_virtual ||
 -          var->xoffset + info->var.xres > info->var.xres_virtual)
 +          var->yoffset > info->var.yres_virtual - yres ||
 +          var->xoffset > info->var.xres_virtual - info->var.xres)
                return -EINVAL;
  
        if ((err = info->fbops->fb_pan_display(var, info)))
@@@ -954,7 -954,6 +954,7 @@@ fb_set_var(struct fb_info *info, struc
                        goto done;
  
                if ((var->activate & FB_ACTIVATE_MASK) == FB_ACTIVATE_NOW) {
 +                      struct fb_var_screeninfo old_var;
                        struct fb_videomode mode;
  
                        if (info->fbops->fb_get_caps) {
                                        goto done;
                        }
  
 +                      old_var = info->var;
                        info->var = *var;
  
 -                      if (info->fbops->fb_set_par)
 -                              info->fbops->fb_set_par(info);
 +                      if (info->fbops->fb_set_par) {
 +                              ret = info->fbops->fb_set_par(info);
 +
 +                              if (ret) {
 +                                      info->var = old_var;
 +                                      printk(KERN_WARNING "detected "
 +                                              "fb_set_par error, "
 +                                              "error code: %d\n", ret);
 +                                      goto done;
 +                              }
 +                      }
  
                        fb_pan_display(info, &info->var);
                        fb_set_cmap(&info->cmap, info);
@@@ -1800,7 -1789,7 +1800,7 @@@ static int __init video_setup(char *opt
                global = 1;
        }
  
-       if (!global && !strstr(options, "fb:")) {
+       if (!global && !strchr(options, ':')) {
                fb_mode_option = options;
                global = 1;
        }