From: Daniel Vetter Date: Mon, 7 May 2012 09:30:46 +0000 (+0200) Subject: Merge tag 'v3.4-rc6' into drm-intel-next X-Git-Tag: firefly_0821_release~3680^2~2761^2~39^2 X-Git-Url: http://demsky.eecs.uci.edu/git/?a=commitdiff_plain;h=dc257cf154be708ecc47b8b89c12ad8cd2cc35e4;p=firefly-linux-kernel-4.4.55.git Merge tag 'v3.4-rc6' into drm-intel-next Conflicts: drivers/gpu/drm/i915/intel_display.c Ok, this is a fun story of git totally messing things up. There /shouldn't/ be any conflict in here, because the fixes in -rc6 do only touch functions that have not been changed in -next. The offending commits in drm-next are 14415745b2..1fa611065 which simply move a few functions from intel_display.c to intel_pm.c. The problem seems to be that git diff gets completely confused: $ git diff 14415745b2..1fa611065 is a nice mess in intel_display.c, and the diff leaks into totally unrelated functions, whereas $git diff --minimal 14415745b2..1fa611065 is exactly what we want. Unfortunately there seems to be no way to teach similar smarts to the merge diff and conflict generation code, because with the minimal diff there really shouldn't be any conflicts. For added hilarity, every time something in that area changes the + and - lines in the diff move around like crazy, again resulting in new conflicts. So I fear this mess will stay with us for a little longer (and might result in another backmerge down the road). Signed-Off-by: Daniel Vetter --- dc257cf154be708ecc47b8b89c12ad8cd2cc35e4 diff --cc drivers/gpu/drm/i915/intel_display.c index 4c844c68ec80,1b1cf3b3ff51..8c239f2d6bcd --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@@ -5357,977 -7627,1257 +5360,985 @@@ static void intel_increase_pllclock(str if (HAS_PCH_SPLIT(dev)) return; - /* Who knows what state these registers were left in by the BIOS or - * grub? - * - * If we leave the registers in a conflicting state (e.g. with the - * display plane reading from the other pipe than the one we intend - * to use) then when we attempt to teardown the active mode, we will - * not disable the pipes and planes in the correct order -- leaving - * a plane reading from a disabled pipe and possibly leading to - * undefined behaviour. - */ + if (!dev_priv->lvds_downclock_avail) + return; - reg = DSPCNTR(plane); - val = I915_READ(reg); + dpll = I915_READ(dpll_reg); + if (!HAS_PIPE_CXSR(dev) && (dpll & DISPLAY_RATE_SELECT_FPA1)) { + DRM_DEBUG_DRIVER("upclocking LVDS\n"); - if ((val & DISPLAY_PLANE_ENABLE) == 0) - return; - if (!!(val & DISPPLANE_SEL_PIPE_MASK) == pipe) - return; + assert_panel_unlocked(dev_priv, pipe); - /* This display plane is active and attached to the other CPU pipe. */ - pipe = !pipe; + dpll &= ~DISPLAY_RATE_SELECT_FPA1; + I915_WRITE(dpll_reg, dpll); + intel_wait_for_vblank(dev, pipe); - /* Disable the plane and wait for it to stop reading from the pipe. */ - intel_disable_plane(dev_priv, plane, pipe); - intel_disable_pipe(dev_priv, pipe); + dpll = I915_READ(dpll_reg); + if (dpll & DISPLAY_RATE_SELECT_FPA1) + DRM_DEBUG_DRIVER("failed to upclock LVDS!\n"); + } + + /* Schedule downclock */ + mod_timer(&intel_crtc->idle_timer, jiffies + + msecs_to_jiffies(CRTC_IDLE_TIMEOUT)); } -static void intel_crtc_reset(struct drm_crtc *crtc) +static void intel_decrease_pllclock(struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; + drm_i915_private_t *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - int pipe = intel_crtc->pipe; - int dpll_reg = DPLL(pipe); - int dpll = I915_READ(dpll_reg); - /* Reset flags back to the 'unknown' status so that they - * will be correctly set on the initial modeset. - */ - intel_crtc->dpms_mode = -1; + if (HAS_PCH_SPLIT(dev)) + return; - /* We need to fix up any BIOS configuration that conflicts with - * our expectations. + if (!dev_priv->lvds_downclock_avail) + return; + + /* + * Since this is called by a timer, we should never get here in + * the manual case. */ - intel_sanitize_modesetting(dev, intel_crtc->pipe, intel_crtc->plane); -} + if (!HAS_PIPE_CXSR(dev) && intel_crtc->lowfreq_avail) { ++ int pipe = intel_crtc->pipe; ++ int dpll_reg = DPLL(pipe); ++ int dpll; + -static struct drm_crtc_helper_funcs intel_helper_funcs = { - .dpms = intel_crtc_dpms, - .mode_fixup = intel_crtc_mode_fixup, - .mode_set = intel_crtc_mode_set, - .mode_set_base = intel_pipe_set_base, - .mode_set_base_atomic = intel_pipe_set_base_atomic, - .load_lut = intel_crtc_load_lut, - .disable = intel_crtc_disable, -}; + DRM_DEBUG_DRIVER("downclocking LVDS\n"); -static const struct drm_crtc_funcs intel_crtc_funcs = { - .reset = intel_crtc_reset, - .cursor_set = intel_crtc_cursor_set, - .cursor_move = intel_crtc_cursor_move, - .gamma_set = intel_crtc_gamma_set, - .set_config = drm_crtc_helper_set_config, - .destroy = intel_crtc_destroy, - .page_flip = intel_crtc_page_flip, -}; + assert_panel_unlocked(dev_priv, pipe); -static void intel_crtc_init(struct drm_device *dev, int pipe) ++ dpll = I915_READ(dpll_reg); + dpll |= DISPLAY_RATE_SELECT_FPA1; + I915_WRITE(dpll_reg, dpll); + intel_wait_for_vblank(dev, pipe); + dpll = I915_READ(dpll_reg); + if (!(dpll & DISPLAY_RATE_SELECT_FPA1)) + DRM_DEBUG_DRIVER("failed to downclock LVDS!\n"); + } + +} + +/** + * intel_idle_update - adjust clocks for idleness + * @work: work struct + * + * Either the GPU or display (or both) went idle. Check the busy status + * here and adjust the CRTC and GPU clocks as necessary. + */ +static void intel_idle_update(struct work_struct *work) { - drm_i915_private_t *dev_priv = dev->dev_private; + drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t, + idle_work); + struct drm_device *dev = dev_priv->dev; + struct drm_crtc *crtc; struct intel_crtc *intel_crtc; - int i; - intel_crtc = kzalloc(sizeof(struct intel_crtc) + (INTELFB_CONN_LIMIT * sizeof(struct drm_connector *)), GFP_KERNEL); - if (intel_crtc == NULL) + if (!i915_powersave) return; - drm_crtc_init(dev, &intel_crtc->base, &intel_crtc_funcs); + mutex_lock(&dev->struct_mutex); - drm_mode_crtc_set_gamma_size(&intel_crtc->base, 256); - for (i = 0; i < 256; i++) { - intel_crtc->lut_r[i] = i; - intel_crtc->lut_g[i] = i; - intel_crtc->lut_b[i] = i; - } + i915_update_gfx_val(dev_priv); - /* Swap pipes & planes for FBC on pre-965 */ - intel_crtc->pipe = pipe; - intel_crtc->plane = pipe; - if (IS_MOBILE(dev) && IS_GEN3(dev)) { - DRM_DEBUG_KMS("swapping pipes & planes for FBC\n"); - intel_crtc->plane = !pipe; + list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { + /* Skip inactive CRTCs */ + if (!crtc->fb) + continue; + + intel_crtc = to_intel_crtc(crtc); + if (!intel_crtc->busy) + intel_decrease_pllclock(crtc); } - BUG_ON(pipe >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) || - dev_priv->plane_to_crtc_mapping[intel_crtc->plane] != NULL); - dev_priv->plane_to_crtc_mapping[intel_crtc->plane] = &intel_crtc->base; - dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = &intel_crtc->base; - intel_crtc_reset(&intel_crtc->base); - intel_crtc->active = true; /* force the pipe off on setup_init_config */ - intel_crtc->bpp = 24; /* default for pre-Ironlake */ + mutex_unlock(&dev->struct_mutex); +} - if (HAS_PCH_SPLIT(dev)) { - if (pipe == 2 && IS_IVYBRIDGE(dev)) - intel_crtc->no_pll = true; - intel_helper_funcs.prepare = ironlake_crtc_prepare; - intel_helper_funcs.commit = ironlake_crtc_commit; - } else { - intel_helper_funcs.prepare = i9xx_crtc_prepare; - intel_helper_funcs.commit = i9xx_crtc_commit; - } +/** + * intel_mark_busy - mark the GPU and possibly the display busy + * @dev: drm device + * @obj: object we're operating on + * + * Callers can use this function to indicate that the GPU is busy processing + * commands. If @obj matches one of the CRTC objects (i.e. it's a scanout + * buffer), we'll also mark the display as busy, so we know to increase its + * clock frequency. + */ +void intel_mark_busy(struct drm_device *dev, struct drm_i915_gem_object *obj) +{ + drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_crtc *crtc = NULL; + struct intel_framebuffer *intel_fb; + struct intel_crtc *intel_crtc; - drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs); + if (!drm_core_check_feature(dev, DRIVER_MODESET)) + return; - intel_crtc->busy = false; + if (!dev_priv->busy) + dev_priv->busy = true; + else + mod_timer(&dev_priv->idle_timer, jiffies + + msecs_to_jiffies(GPU_IDLE_TIMEOUT)); - setup_timer(&intel_crtc->idle_timer, intel_crtc_idle_timer, - (unsigned long)intel_crtc); + list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { + if (!crtc->fb) + continue; + + intel_crtc = to_intel_crtc(crtc); + intel_fb = to_intel_framebuffer(crtc->fb); + if (intel_fb->obj == obj) { + if (!intel_crtc->busy) { + /* Non-busy -> busy, upclock */ + intel_increase_pllclock(crtc); + intel_crtc->busy = true; + } else { + /* Busy -> busy, put off timer */ + mod_timer(&intel_crtc->idle_timer, jiffies + + msecs_to_jiffies(CRTC_IDLE_TIMEOUT)); + } + } + } } -int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data, - struct drm_file *file) +static void intel_crtc_destroy(struct drm_crtc *crtc) { - drm_i915_private_t *dev_priv = dev->dev_private; - struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data; - struct drm_mode_object *drmmode_obj; - struct intel_crtc *crtc; - - if (!dev_priv) { - DRM_ERROR("called with no initialization\n"); - return -EINVAL; - } + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + struct drm_device *dev = crtc->dev; + struct intel_unpin_work *work; + unsigned long flags; - drmmode_obj = drm_mode_object_find(dev, pipe_from_crtc_id->crtc_id, - DRM_MODE_OBJECT_CRTC); + spin_lock_irqsave(&dev->event_lock, flags); + work = intel_crtc->unpin_work; + intel_crtc->unpin_work = NULL; + spin_unlock_irqrestore(&dev->event_lock, flags); - if (!drmmode_obj) { - DRM_ERROR("no such CRTC id\n"); - return -EINVAL; + if (work) { + cancel_work_sync(&work->work); + kfree(work); } - crtc = to_intel_crtc(obj_to_crtc(drmmode_obj)); - pipe_from_crtc_id->pipe = crtc->pipe; + drm_crtc_cleanup(crtc); - return 0; + kfree(intel_crtc); } -static int intel_encoder_clones(struct drm_device *dev, int type_mask) +static void intel_unpin_work_fn(struct work_struct *__work) { - struct intel_encoder *encoder; - int index_mask = 0; - int entry = 0; + struct intel_unpin_work *work = + container_of(__work, struct intel_unpin_work, work); - list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) { - if (type_mask & encoder->clone_mask) - index_mask |= (1 << entry); - entry++; - } + mutex_lock(&work->dev->struct_mutex); + intel_unpin_fb_obj(work->old_fb_obj); + drm_gem_object_unreference(&work->pending_flip_obj->base); + drm_gem_object_unreference(&work->old_fb_obj->base); - return index_mask; + intel_update_fbc(work->dev); + mutex_unlock(&work->dev->struct_mutex); + kfree(work); } -static bool has_edp_a(struct drm_device *dev) +static void do_intel_finish_page_flip(struct drm_device *dev, + struct drm_crtc *crtc) { - struct drm_i915_private *dev_priv = dev->dev_private; - - if (!IS_MOBILE(dev)) - return false; + drm_i915_private_t *dev_priv = dev->dev_private; + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + struct intel_unpin_work *work; + struct drm_i915_gem_object *obj; + struct drm_pending_vblank_event *e; + struct timeval tnow, tvbl; + unsigned long flags; - if ((I915_READ(DP_A) & DP_DETECTED) == 0) - return false; + /* Ignore early vblank irqs */ + if (intel_crtc == NULL) + return; - if (IS_GEN5(dev) && - (I915_READ(ILK_DISPLAY_CHICKEN_FUSES) & ILK_eDP_A_DISABLE)) - return false; + do_gettimeofday(&tnow); - return true; -} + spin_lock_irqsave(&dev->event_lock, flags); + work = intel_crtc->unpin_work; + if (work == NULL || !work->pending) { + spin_unlock_irqrestore(&dev->event_lock, flags); + return; + } -static void intel_setup_outputs(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_encoder *encoder; - bool dpd_is_edp = false; - bool has_lvds; + intel_crtc->unpin_work = NULL; - has_lvds = intel_lvds_init(dev); - if (!has_lvds && !HAS_PCH_SPLIT(dev)) { - /* disable the panel fitter on everything but LVDS */ - I915_WRITE(PFIT_CONTROL, 0); - } + if (work->event) { + e = work->event; + e->event.sequence = drm_vblank_count_and_time(dev, intel_crtc->pipe, &tvbl); - if (HAS_PCH_SPLIT(dev)) { - dpd_is_edp = intel_dpd_is_edp(dev); + /* Called before vblank count and timestamps have + * been updated for the vblank interval of flip + * completion? Need to increment vblank count and + * add one videorefresh duration to returned timestamp + * to account for this. We assume this happened if we + * get called over 0.9 frame durations after the last + * timestamped vblank. + * + * This calculation can not be used with vrefresh rates + * below 5Hz (10Hz to be on the safe side) without + * promoting to 64 integers. + */ + if (10 * (timeval_to_ns(&tnow) - timeval_to_ns(&tvbl)) > + 9 * crtc->framedur_ns) { + e->event.sequence++; + tvbl = ns_to_timeval(timeval_to_ns(&tvbl) + + crtc->framedur_ns); + } - if (has_edp_a(dev)) - intel_dp_init(dev, DP_A); + e->event.tv_sec = tvbl.tv_sec; + e->event.tv_usec = tvbl.tv_usec; - if (dpd_is_edp && (I915_READ(PCH_DP_D) & DP_DETECTED)) - intel_dp_init(dev, PCH_DP_D); + list_add_tail(&e->base.link, + &e->base.file_priv->event_list); + wake_up_interruptible(&e->base.file_priv->event_wait); } - intel_crt_init(dev); - - if (HAS_PCH_SPLIT(dev)) { - int found; + drm_vblank_put(dev, intel_crtc->pipe); - if (I915_READ(HDMIB) & PORT_DETECTED) { - /* PCH SDVOB multiplex with HDMIB */ - found = intel_sdvo_init(dev, PCH_SDVOB); - if (!found) - intel_hdmi_init(dev, HDMIB); - if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED)) - intel_dp_init(dev, PCH_DP_B); - } + spin_unlock_irqrestore(&dev->event_lock, flags); - if (I915_READ(HDMIC) & PORT_DETECTED) - intel_hdmi_init(dev, HDMIC); + obj = work->old_fb_obj; - if (I915_READ(HDMID) & PORT_DETECTED) - intel_hdmi_init(dev, HDMID); + atomic_clear_mask(1 << intel_crtc->plane, + &obj->pending_flip.counter); + if (atomic_read(&obj->pending_flip) == 0) + wake_up(&dev_priv->pending_flip_queue); - if (I915_READ(PCH_DP_C) & DP_DETECTED) - intel_dp_init(dev, PCH_DP_C); + schedule_work(&work->work); - if (!dpd_is_edp && (I915_READ(PCH_DP_D) & DP_DETECTED)) - intel_dp_init(dev, PCH_DP_D); + trace_i915_flip_complete(intel_crtc->plane, work->pending_flip_obj); +} - } else if (SUPPORTS_DIGITAL_OUTPUTS(dev)) { - bool found = false; +void intel_finish_page_flip(struct drm_device *dev, int pipe) +{ + drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; - if (I915_READ(SDVOB) & SDVO_DETECTED) { - DRM_DEBUG_KMS("probing SDVOB\n"); - found = intel_sdvo_init(dev, SDVOB); - if (!found && SUPPORTS_INTEGRATED_HDMI(dev)) { - DRM_DEBUG_KMS("probing HDMI on SDVOB\n"); - intel_hdmi_init(dev, SDVOB); - } + do_intel_finish_page_flip(dev, crtc); +} - if (!found && SUPPORTS_INTEGRATED_DP(dev)) { - DRM_DEBUG_KMS("probing DP_B\n"); - intel_dp_init(dev, DP_B); - } - } +void intel_finish_page_flip_plane(struct drm_device *dev, int plane) +{ + drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_crtc *crtc = dev_priv->plane_to_crtc_mapping[plane]; - /* Before G4X SDVOC doesn't have its own detect register */ + do_intel_finish_page_flip(dev, crtc); +} - if (I915_READ(SDVOB) & SDVO_DETECTED) { - DRM_DEBUG_KMS("probing SDVOC\n"); - found = intel_sdvo_init(dev, SDVOC); - } +void intel_prepare_page_flip(struct drm_device *dev, int plane) +{ + drm_i915_private_t *dev_priv = dev->dev_private; + struct intel_crtc *intel_crtc = + to_intel_crtc(dev_priv->plane_to_crtc_mapping[plane]); + unsigned long flags; - if (!found && (I915_READ(SDVOC) & SDVO_DETECTED)) { + spin_lock_irqsave(&dev->event_lock, flags); + if (intel_crtc->unpin_work) { + if ((++intel_crtc->unpin_work->pending) > 1) + DRM_ERROR("Prepared flip multiple times\n"); + } else { + DRM_DEBUG_DRIVER("preparing flip with no unpin work?\n"); + } + spin_unlock_irqrestore(&dev->event_lock, flags); +} - if (SUPPORTS_INTEGRATED_HDMI(dev)) { - DRM_DEBUG_KMS("probing HDMI on SDVOC\n"); - intel_hdmi_init(dev, SDVOC); - } - if (SUPPORTS_INTEGRATED_DP(dev)) { - DRM_DEBUG_KMS("probing DP_C\n"); - intel_dp_init(dev, DP_C); - } - } +static int intel_gen2_queue_flip(struct drm_device *dev, + struct drm_crtc *crtc, + struct drm_framebuffer *fb, + struct drm_i915_gem_object *obj) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + unsigned long offset; + u32 flip_mask; + int ret; - if (SUPPORTS_INTEGRATED_DP(dev) && - (I915_READ(DP_D) & DP_DETECTED)) { - DRM_DEBUG_KMS("probing DP_D\n"); - intel_dp_init(dev, DP_D); - } - } else if (IS_GEN2(dev)) - intel_dvo_init(dev); + ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv)); + if (ret) + goto err; - if (SUPPORTS_TV(dev)) - intel_tv_init(dev); + /* Offset into the new buffer for cases of shared fbs between CRTCs */ + offset = crtc->y * fb->pitches[0] + crtc->x * fb->bits_per_pixel/8; - list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) { - encoder->base.possible_crtcs = encoder->crtc_mask; - encoder->base.possible_clones = - intel_encoder_clones(dev, encoder->clone_mask); - } + ret = BEGIN_LP_RING(6); + if (ret) + goto err_unpin; - /* disable all the possible outputs/crtcs before entering KMS mode */ - drm_helper_disable_unused_functions(dev); + /* Can't queue multiple flips, so wait for the previous + * one to finish before executing the next. + */ + if (intel_crtc->plane) + flip_mask = MI_WAIT_FOR_PLANE_B_FLIP; + else + flip_mask = MI_WAIT_FOR_PLANE_A_FLIP; + OUT_RING(MI_WAIT_FOR_EVENT | flip_mask); + OUT_RING(MI_NOOP); + OUT_RING(MI_DISPLAY_FLIP | + MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); + OUT_RING(fb->pitches[0]); + OUT_RING(obj->gtt_offset + offset); + OUT_RING(0); /* aux display base address, unused */ + ADVANCE_LP_RING(); + return 0; - if (HAS_PCH_SPLIT(dev)) - ironlake_init_pch_refclk(dev); +err_unpin: + intel_unpin_fb_obj(obj); +err: + return ret; } -static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb) +static int intel_gen3_queue_flip(struct drm_device *dev, + struct drm_crtc *crtc, + struct drm_framebuffer *fb, + struct drm_i915_gem_object *obj) { - struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + unsigned long offset; + u32 flip_mask; + int ret; - drm_framebuffer_cleanup(fb); - drm_gem_object_unreference_unlocked(&intel_fb->obj->base); + ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv)); + if (ret) + goto err; - kfree(intel_fb); -} + /* Offset into the new buffer for cases of shared fbs between CRTCs */ + offset = crtc->y * fb->pitches[0] + crtc->x * fb->bits_per_pixel/8; -static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb, - struct drm_file *file, - unsigned int *handle) -{ - struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); - struct drm_i915_gem_object *obj = intel_fb->obj; + ret = BEGIN_LP_RING(6); + if (ret) + goto err_unpin; - return drm_gem_handle_create(file, &obj->base, handle); -} + if (intel_crtc->plane) + flip_mask = MI_WAIT_FOR_PLANE_B_FLIP; + else + flip_mask = MI_WAIT_FOR_PLANE_A_FLIP; + OUT_RING(MI_WAIT_FOR_EVENT | flip_mask); + OUT_RING(MI_NOOP); + OUT_RING(MI_DISPLAY_FLIP_I915 | + MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); + OUT_RING(fb->pitches[0]); + OUT_RING(obj->gtt_offset + offset); + OUT_RING(MI_NOOP); -static const struct drm_framebuffer_funcs intel_fb_funcs = { - .destroy = intel_user_framebuffer_destroy, - .create_handle = intel_user_framebuffer_create_handle, -}; + ADVANCE_LP_RING(); + return 0; -int intel_framebuffer_init(struct drm_device *dev, - struct intel_framebuffer *intel_fb, - struct drm_mode_fb_cmd2 *mode_cmd, - struct drm_i915_gem_object *obj) +err_unpin: + intel_unpin_fb_obj(obj); +err: + return ret; +} + +static int intel_gen4_queue_flip(struct drm_device *dev, + struct drm_crtc *crtc, + struct drm_framebuffer *fb, + struct drm_i915_gem_object *obj) { + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + uint32_t pf, pipesrc; int ret; - if (obj->tiling_mode == I915_TILING_Y) - return -EINVAL; - - if (mode_cmd->pitches[0] & 63) - return -EINVAL; + ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv)); + if (ret) + goto err; - switch (mode_cmd->pixel_format) { - case DRM_FORMAT_RGB332: - case DRM_FORMAT_RGB565: - case DRM_FORMAT_XRGB8888: - case DRM_FORMAT_XBGR8888: - case DRM_FORMAT_ARGB8888: - case DRM_FORMAT_XRGB2101010: - case DRM_FORMAT_ARGB2101010: - /* RGB formats are common across chipsets */ - break; - case DRM_FORMAT_YUYV: - case DRM_FORMAT_UYVY: - case DRM_FORMAT_YVYU: - case DRM_FORMAT_VYUY: - break; - default: - DRM_DEBUG_KMS("unsupported pixel format %u\n", - mode_cmd->pixel_format); - return -EINVAL; - } + ret = BEGIN_LP_RING(4); + if (ret) + goto err_unpin; - ret = drm_framebuffer_init(dev, &intel_fb->base, &intel_fb_funcs); - if (ret) { - DRM_ERROR("framebuffer init failed %d\n", ret); - return ret; - } + /* i965+ uses the linear or tiled offsets from the + * Display Registers (which do not change across a page-flip) + * so we need only reprogram the base address. + */ + OUT_RING(MI_DISPLAY_FLIP | + MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); + OUT_RING(fb->pitches[0]); + OUT_RING(obj->gtt_offset | obj->tiling_mode); - drm_helper_mode_fill_fb_struct(&intel_fb->base, mode_cmd); - intel_fb->obj = obj; + /* XXX Enabling the panel-fitter across page-flip is so far + * untested on non-native modes, so ignore it for now. + * pf = I915_READ(pipe == 0 ? PFA_CTL_1 : PFB_CTL_1) & PF_ENABLE; + */ + pf = 0; + pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff; + OUT_RING(pf | pipesrc); + ADVANCE_LP_RING(); return 0; -} - -static struct drm_framebuffer * -intel_user_framebuffer_create(struct drm_device *dev, - struct drm_file *filp, - struct drm_mode_fb_cmd2 *mode_cmd) -{ - struct drm_i915_gem_object *obj; - obj = to_intel_bo(drm_gem_object_lookup(dev, filp, - mode_cmd->handles[0])); - if (&obj->base == NULL) - return ERR_PTR(-ENOENT); - - return intel_framebuffer_create(dev, mode_cmd, obj); +err_unpin: + intel_unpin_fb_obj(obj); +err: + return ret; } -static const struct drm_mode_config_funcs intel_mode_funcs = { - .fb_create = intel_user_framebuffer_create, - .output_poll_changed = intel_fb_output_poll_changed, -}; - -static struct drm_i915_gem_object * -intel_alloc_context_page(struct drm_device *dev) +static int intel_gen6_queue_flip(struct drm_device *dev, + struct drm_crtc *crtc, + struct drm_framebuffer *fb, + struct drm_i915_gem_object *obj) { - struct drm_i915_gem_object *ctx; + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + uint32_t pf, pipesrc; int ret; - WARN_ON(!mutex_is_locked(&dev->struct_mutex)); - - ctx = i915_gem_alloc_object(dev, 4096); - if (!ctx) { - DRM_DEBUG("failed to alloc power context, RC6 disabled\n"); - return NULL; - } - - ret = i915_gem_object_pin(ctx, 4096, true); - if (ret) { - DRM_ERROR("failed to pin power context: %d\n", ret); - goto err_unref; - } + ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv)); + if (ret) + goto err; - ret = i915_gem_object_set_to_gtt_domain(ctx, 1); - if (ret) { - DRM_ERROR("failed to set-domain on power context: %d\n", ret); + ret = BEGIN_LP_RING(4); + if (ret) goto err_unpin; - } - return ctx; + OUT_RING(MI_DISPLAY_FLIP | + MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); + OUT_RING(fb->pitches[0] | obj->tiling_mode); + OUT_RING(obj->gtt_offset); + - pf = I915_READ(PF_CTL(intel_crtc->pipe)) & PF_ENABLE; ++ /* Contrary to the suggestions in the documentation, ++ * "Enable Panel Fitter" does not seem to be required when page ++ * flipping with a non-native mode, and worse causes a normal ++ * modeset to fail. ++ * pf = I915_READ(PF_CTL(intel_crtc->pipe)) & PF_ENABLE; ++ */ ++ pf = 0; + pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff; + OUT_RING(pf | pipesrc); + ADVANCE_LP_RING(); + return 0; err_unpin: - i915_gem_object_unpin(ctx); -err_unref: - drm_gem_object_unreference(&ctx->base); - mutex_unlock(&dev->struct_mutex); - return NULL; + intel_unpin_fb_obj(obj); +err: + return ret; } -bool ironlake_set_drps(struct drm_device *dev, u8 val) +/* + * On gen7 we currently use the blit ring because (in early silicon at least) + * the render ring doesn't give us interrpts for page flip completion, which + * means clients will hang after the first flip is queued. Fortunately the + * blit ring generates interrupts properly, so use it instead. + */ +static int intel_gen7_queue_flip(struct drm_device *dev, + struct drm_crtc *crtc, + struct drm_framebuffer *fb, + struct drm_i915_gem_object *obj) { struct drm_i915_private *dev_priv = dev->dev_private; - u16 rgvswctl; + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + struct intel_ring_buffer *ring = &dev_priv->ring[BCS]; + int ret; - rgvswctl = I915_READ16(MEMSWCTL); - if (rgvswctl & MEMCTL_CMD_STS) { - DRM_DEBUG("gpu busy, RCS change rejected\n"); - return false; /* still busy with another command */ - } + ret = intel_pin_and_fence_fb_obj(dev, obj, ring); + if (ret) + goto err; + + ret = intel_ring_begin(ring, 4); + if (ret) + goto err_unpin; - rgvswctl = (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) | - (val << MEMCTL_FREQ_SHIFT) | MEMCTL_SFCAVM; - I915_WRITE16(MEMSWCTL, rgvswctl); - POSTING_READ16(MEMSWCTL); + intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | (intel_crtc->plane << 19)); + intel_ring_emit(ring, (fb->pitches[0] | obj->tiling_mode)); + intel_ring_emit(ring, (obj->gtt_offset)); + intel_ring_emit(ring, (MI_NOOP)); + intel_ring_advance(ring); + return 0; - rgvswctl |= MEMCTL_CMD_STS; - I915_WRITE16(MEMSWCTL, rgvswctl); +err_unpin: + intel_unpin_fb_obj(obj); +err: + return ret; +} - return true; +static int intel_default_queue_flip(struct drm_device *dev, + struct drm_crtc *crtc, + struct drm_framebuffer *fb, + struct drm_i915_gem_object *obj) +{ + return -ENODEV; } -void ironlake_enable_drps(struct drm_device *dev) +static int intel_crtc_page_flip(struct drm_crtc *crtc, + struct drm_framebuffer *fb, + struct drm_pending_vblank_event *event) { + struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = dev->dev_private; - u32 rgvmodectl = I915_READ(MEMMODECTL); - u8 fmax, fmin, fstart, vstart; + struct intel_framebuffer *intel_fb; + struct drm_i915_gem_object *obj; + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + struct intel_unpin_work *work; + unsigned long flags; + int ret; + + work = kzalloc(sizeof *work, GFP_KERNEL); + if (work == NULL) + return -ENOMEM; + + work->event = event; + work->dev = crtc->dev; + intel_fb = to_intel_framebuffer(crtc->fb); + work->old_fb_obj = intel_fb->obj; + INIT_WORK(&work->work, intel_unpin_work_fn); + + ret = drm_vblank_get(dev, intel_crtc->pipe); + if (ret) + goto free_work; + + /* We borrow the event spin lock for protecting unpin_work */ + spin_lock_irqsave(&dev->event_lock, flags); + if (intel_crtc->unpin_work) { + spin_unlock_irqrestore(&dev->event_lock, flags); + kfree(work); + drm_vblank_put(dev, intel_crtc->pipe); - /* Enable temp reporting */ - I915_WRITE16(PMMISC, I915_READ(PMMISC) | MCPPCE_EN); - I915_WRITE16(TSC1, I915_READ(TSC1) | TSE); + DRM_DEBUG_DRIVER("flip queue: crtc already busy\n"); + return -EBUSY; + } + intel_crtc->unpin_work = work; + spin_unlock_irqrestore(&dev->event_lock, flags); - /* 100ms RC evaluation intervals */ - I915_WRITE(RCUPEI, 100000); - I915_WRITE(RCDNEI, 100000); + intel_fb = to_intel_framebuffer(fb); + obj = intel_fb->obj; - /* Set max/min thresholds to 90ms and 80ms respectively */ - I915_WRITE(RCBMAXAVG, 90000); - I915_WRITE(RCBMINAVG, 80000); + mutex_lock(&dev->struct_mutex); - I915_WRITE(MEMIHYST, 1); + /* Reference the objects for the scheduled work. */ + drm_gem_object_reference(&work->old_fb_obj->base); + drm_gem_object_reference(&obj->base); - /* Set up min, max, and cur for interrupt handling */ - fmax = (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT; - fmin = (rgvmodectl & MEMMODE_FMIN_MASK); - fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >> - MEMMODE_FSTART_SHIFT; + crtc->fb = fb; - vstart = (I915_READ(PXVFREQ_BASE + (fstart * 4)) & PXVFREQ_PX_MASK) >> - PXVFREQ_PX_SHIFT; + work->pending_flip_obj = obj; - dev_priv->fmax = fmax; /* IPS callback will increase this */ - dev_priv->fstart = fstart; + work->enable_stall_check = true; - dev_priv->max_delay = fstart; - dev_priv->min_delay = fmin; - dev_priv->cur_delay = fstart; + /* Block clients from rendering to the new back buffer until + * the flip occurs and the object is no longer visible. + */ + atomic_add(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip); - DRM_DEBUG_DRIVER("fmax: %d, fmin: %d, fstart: %d\n", - fmax, fmin, fstart); + ret = dev_priv->display.queue_flip(dev, crtc, fb, obj); + if (ret) + goto cleanup_pending; - I915_WRITE(MEMINTREN, MEMINT_CX_SUPR_EN | MEMINT_EVAL_CHG_EN); + intel_disable_fbc(dev); + mutex_unlock(&dev->struct_mutex); - /* - * Interrupts will be enabled in ironlake_irq_postinstall - */ + trace_i915_flip_request(intel_crtc->plane, obj); - I915_WRITE(VIDSTART, vstart); - POSTING_READ(VIDSTART); + return 0; - rgvmodectl |= MEMMODE_SWMODE_EN; - I915_WRITE(MEMMODECTL, rgvmodectl); +cleanup_pending: + atomic_sub(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip); + drm_gem_object_unreference(&work->old_fb_obj->base); + drm_gem_object_unreference(&obj->base); + mutex_unlock(&dev->struct_mutex); - if (wait_for((I915_READ(MEMSWCTL) & MEMCTL_CMD_STS) == 0, 10)) - DRM_ERROR("stuck trying to change perf mode\n"); - msleep(1); + spin_lock_irqsave(&dev->event_lock, flags); + intel_crtc->unpin_work = NULL; + spin_unlock_irqrestore(&dev->event_lock, flags); - ironlake_set_drps(dev, fstart); + drm_vblank_put(dev, intel_crtc->pipe); +free_work: + kfree(work); - dev_priv->last_count1 = I915_READ(0x112e4) + I915_READ(0x112e8) + - I915_READ(0x112e0); - dev_priv->last_time1 = jiffies_to_msecs(jiffies); - dev_priv->last_count2 = I915_READ(0x112f4); - getrawmonotonic(&dev_priv->last_time2); + return ret; } -void ironlake_disable_drps(struct drm_device *dev) +static void intel_sanitize_modesetting(struct drm_device *dev, + int pipe, int plane) { struct drm_i915_private *dev_priv = dev->dev_private; - u16 rgvswctl = I915_READ16(MEMSWCTL); - - /* Ack interrupts, disable EFC interrupt */ - I915_WRITE(MEMINTREN, I915_READ(MEMINTREN) & ~MEMINT_EVAL_CHG_EN); - I915_WRITE(MEMINTRSTS, MEMINT_EVAL_CHG); - I915_WRITE(DEIER, I915_READ(DEIER) & ~DE_PCU_EVENT); - I915_WRITE(DEIIR, DE_PCU_EVENT); - I915_WRITE(DEIMR, I915_READ(DEIMR) | DE_PCU_EVENT); - - /* Go back to the starting frequency */ - ironlake_set_drps(dev, dev_priv->fstart); - msleep(1); - rgvswctl |= MEMCTL_CMD_STS; - I915_WRITE(MEMSWCTL, rgvswctl); - msleep(1); + u32 reg, val; -} + /* Clear any frame start delays used for debugging left by the BIOS */ + for_each_pipe(pipe) { + reg = PIPECONF(pipe); + I915_WRITE(reg, I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK); + } -void gen6_set_rps(struct drm_device *dev, u8 val) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - u32 swreq; + if (HAS_PCH_SPLIT(dev)) + return; - swreq = (val & 0x3ff) << 25; - I915_WRITE(GEN6_RPNSWREQ, swreq); -} + /* Who knows what state these registers were left in by the BIOS or + * grub? + * + * If we leave the registers in a conflicting state (e.g. with the + * display plane reading from the other pipe than the one we intend + * to use) then when we attempt to teardown the active mode, we will + * not disable the pipes and planes in the correct order -- leaving + * a plane reading from a disabled pipe and possibly leading to + * undefined behaviour. + */ -void gen6_disable_rps(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; + reg = DSPCNTR(plane); + val = I915_READ(reg); - I915_WRITE(GEN6_RPNSWREQ, 1 << 31); - I915_WRITE(GEN6_PMINTRMSK, 0xffffffff); - I915_WRITE(GEN6_PMIER, 0); - /* Complete PM interrupt masking here doesn't race with the rps work - * item again unmasking PM interrupts because that is using a different - * register (PMIMR) to mask PM interrupts. The only risk is in leaving - * stale bits in PMIIR and PMIMR which gen6_enable_rps will clean up. */ + if ((val & DISPLAY_PLANE_ENABLE) == 0) + return; + if (!!(val & DISPPLANE_SEL_PIPE_MASK) == pipe) + return; - spin_lock_irq(&dev_priv->rps_lock); - dev_priv->pm_iir = 0; - spin_unlock_irq(&dev_priv->rps_lock); + /* This display plane is active and attached to the other CPU pipe. */ + pipe = !pipe; - I915_WRITE(GEN6_PMIIR, I915_READ(GEN6_PMIIR)); + /* Disable the plane and wait for it to stop reading from the pipe. */ + intel_disable_plane(dev_priv, plane, pipe); + intel_disable_pipe(dev_priv, pipe); } -static unsigned long intel_pxfreq(u32 vidfreq) +static void intel_crtc_reset(struct drm_crtc *crtc) { - unsigned long freq; - int div = (vidfreq & 0x3f0000) >> 16; - int post = (vidfreq & 0x3000) >> 12; - int pre = (vidfreq & 0x7); - - if (!pre) - return 0; + struct drm_device *dev = crtc->dev; + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - freq = ((div * 133333) / ((1<dpms_mode = -1; - return freq; + /* We need to fix up any BIOS configuration that conflicts with + * our expectations. + */ + intel_sanitize_modesetting(dev, intel_crtc->pipe, intel_crtc->plane); } -void intel_init_emon(struct drm_device *dev) +static struct drm_crtc_helper_funcs intel_helper_funcs = { + .dpms = intel_crtc_dpms, + .mode_fixup = intel_crtc_mode_fixup, + .mode_set = intel_crtc_mode_set, + .mode_set_base = intel_pipe_set_base, + .mode_set_base_atomic = intel_pipe_set_base_atomic, + .load_lut = intel_crtc_load_lut, + .disable = intel_crtc_disable, +}; + +static const struct drm_crtc_funcs intel_crtc_funcs = { + .reset = intel_crtc_reset, + .cursor_set = intel_crtc_cursor_set, + .cursor_move = intel_crtc_cursor_move, + .gamma_set = intel_crtc_gamma_set, + .set_config = drm_crtc_helper_set_config, + .destroy = intel_crtc_destroy, + .page_flip = intel_crtc_page_flip, +}; + +static void intel_crtc_init(struct drm_device *dev, int pipe) { - struct drm_i915_private *dev_priv = dev->dev_private; - u32 lcfuse; - u8 pxw[16]; + drm_i915_private_t *dev_priv = dev->dev_private; + struct intel_crtc *intel_crtc; int i; - /* Disable to program */ - I915_WRITE(ECR, 0); - POSTING_READ(ECR); - - /* Program energy weights for various events */ - I915_WRITE(SDEW, 0x15040d00); - I915_WRITE(CSIEW0, 0x007f0000); - I915_WRITE(CSIEW1, 0x1e220004); - I915_WRITE(CSIEW2, 0x04000004); - - for (i = 0; i < 5; i++) - I915_WRITE(PEW + (i * 4), 0); - for (i = 0; i < 3; i++) - I915_WRITE(DEW + (i * 4), 0); - - /* Program P-state weights to account for frequency power adjustment */ - for (i = 0; i < 16; i++) { - u32 pxvidfreq = I915_READ(PXVFREQ_BASE + (i * 4)); - unsigned long freq = intel_pxfreq(pxvidfreq); - unsigned long vid = (pxvidfreq & PXVFREQ_PX_MASK) >> - PXVFREQ_PX_SHIFT; - unsigned long val; - - val = vid * vid; - val *= (freq / 1000); - val *= 255; - val /= (127*127*900); - if (val > 0xff) - DRM_ERROR("bad pxval: %ld\n", val); - pxw[i] = val; - } - /* Render standby states get 0 weight */ - pxw[14] = 0; - pxw[15] = 0; + intel_crtc = kzalloc(sizeof(struct intel_crtc) + (INTELFB_CONN_LIMIT * sizeof(struct drm_connector *)), GFP_KERNEL); + if (intel_crtc == NULL) + return; - for (i = 0; i < 4; i++) { - u32 val = (pxw[i*4] << 24) | (pxw[(i*4)+1] << 16) | - (pxw[(i*4)+2] << 8) | (pxw[(i*4)+3]); - I915_WRITE(PXW + (i * 4), val); - } + drm_crtc_init(dev, &intel_crtc->base, &intel_crtc_funcs); - /* Adjust magic regs to magic values (more experimental results) */ - I915_WRITE(OGW0, 0); - I915_WRITE(OGW1, 0); - I915_WRITE(EG0, 0x00007f00); - I915_WRITE(EG1, 0x0000000e); - I915_WRITE(EG2, 0x000e0000); - I915_WRITE(EG3, 0x68000300); - I915_WRITE(EG4, 0x42000000); - I915_WRITE(EG5, 0x00140031); - I915_WRITE(EG6, 0); - I915_WRITE(EG7, 0); + drm_mode_crtc_set_gamma_size(&intel_crtc->base, 256); + for (i = 0; i < 256; i++) { + intel_crtc->lut_r[i] = i; + intel_crtc->lut_g[i] = i; + intel_crtc->lut_b[i] = i; + } - for (i = 0; i < 8; i++) - I915_WRITE(PXWL + (i * 4), 0); + /* Swap pipes & planes for FBC on pre-965 */ + intel_crtc->pipe = pipe; + intel_crtc->plane = pipe; + if (IS_MOBILE(dev) && IS_GEN3(dev)) { + DRM_DEBUG_KMS("swapping pipes & planes for FBC\n"); + intel_crtc->plane = !pipe; + } - /* Enable PMON + select events */ - I915_WRITE(ECR, 0x80000019); + BUG_ON(pipe >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) || + dev_priv->plane_to_crtc_mapping[intel_crtc->plane] != NULL); + dev_priv->plane_to_crtc_mapping[intel_crtc->plane] = &intel_crtc->base; + dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = &intel_crtc->base; - lcfuse = I915_READ(LCFUSE02); + intel_crtc_reset(&intel_crtc->base); + intel_crtc->active = true; /* force the pipe off on setup_init_config */ + intel_crtc->bpp = 24; /* default for pre-Ironlake */ - dev_priv->corr = (lcfuse & LCFUSE_HIV_MASK); -} + if (HAS_PCH_SPLIT(dev)) { + if (pipe == 2 && IS_IVYBRIDGE(dev)) + intel_crtc->no_pll = true; + intel_helper_funcs.prepare = ironlake_crtc_prepare; + intel_helper_funcs.commit = ironlake_crtc_commit; + } else { + intel_helper_funcs.prepare = i9xx_crtc_prepare; + intel_helper_funcs.commit = i9xx_crtc_commit; + } -static int intel_enable_rc6(struct drm_device *dev) -{ - /* - * Respect the kernel parameter if it is set - */ - if (i915_enable_rc6 >= 0) - return i915_enable_rc6; + drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs); - /* - * Disable RC6 on Ironlake - */ - if (INTEL_INFO(dev)->gen == 5) - return 0; + intel_crtc->busy = false; - /* - * Disable rc6 on Sandybridge - */ - if (INTEL_INFO(dev)->gen == 6) { - DRM_DEBUG_DRIVER("Sandybridge: deep RC6 disabled\n"); - return INTEL_RC6_ENABLE; - } - DRM_DEBUG_DRIVER("RC6 and deep RC6 enabled\n"); - return (INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE); + setup_timer(&intel_crtc->idle_timer, intel_crtc_idle_timer, + (unsigned long)intel_crtc); } -void gen6_enable_rps(struct drm_i915_private *dev_priv) +int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data, + struct drm_file *file) { - u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); - u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS); - u32 pcu_mbox, rc6_mask = 0; - u32 gtfifodbg; - int cur_freq, min_freq, max_freq; - int rc6_mode; - int i; - - /* Here begins a magic sequence of register writes to enable - * auto-downclocking. - * - * Perhaps there might be some value in exposing these to - * userspace... - */ - I915_WRITE(GEN6_RC_STATE, 0); - mutex_lock(&dev_priv->dev->struct_mutex); - - /* Clear the DBG now so we don't confuse earlier errors */ - if ((gtfifodbg = I915_READ(GTFIFODBG))) { - DRM_ERROR("GT fifo had a previous error %x\n", gtfifodbg); - I915_WRITE(GTFIFODBG, gtfifodbg); - } - - gen6_gt_force_wake_get(dev_priv); - - /* disable the counters and set deterministic thresholds */ - I915_WRITE(GEN6_RC_CONTROL, 0); - - I915_WRITE(GEN6_RC1_WAKE_RATE_LIMIT, 1000 << 16); - I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16 | 30); - I915_WRITE(GEN6_RC6pp_WAKE_RATE_LIMIT, 30); - I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); - I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); - - for (i = 0; i < I915_NUM_RINGS; i++) - I915_WRITE(RING_MAX_IDLE(dev_priv->ring[i].mmio_base), 10); - - I915_WRITE(GEN6_RC_SLEEP, 0); - I915_WRITE(GEN6_RC1e_THRESHOLD, 1000); - I915_WRITE(GEN6_RC6_THRESHOLD, 50000); - I915_WRITE(GEN6_RC6p_THRESHOLD, 100000); - I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */ - - rc6_mode = intel_enable_rc6(dev_priv->dev); - if (rc6_mode & INTEL_RC6_ENABLE) - rc6_mask |= GEN6_RC_CTL_RC6_ENABLE; - - if (rc6_mode & INTEL_RC6p_ENABLE) - rc6_mask |= GEN6_RC_CTL_RC6p_ENABLE; - - if (rc6_mode & INTEL_RC6pp_ENABLE) - rc6_mask |= GEN6_RC_CTL_RC6pp_ENABLE; - - DRM_INFO("Enabling RC6 states: RC6 %s, RC6p %s, RC6pp %s\n", - (rc6_mode & INTEL_RC6_ENABLE) ? "on" : "off", - (rc6_mode & INTEL_RC6p_ENABLE) ? "on" : "off", - (rc6_mode & INTEL_RC6pp_ENABLE) ? "on" : "off"); - - I915_WRITE(GEN6_RC_CONTROL, - rc6_mask | - GEN6_RC_CTL_EI_MODE(1) | - GEN6_RC_CTL_HW_ENABLE); - - I915_WRITE(GEN6_RPNSWREQ, - GEN6_FREQUENCY(10) | - GEN6_OFFSET(0) | - GEN6_AGGRESSIVE_TURBO); - I915_WRITE(GEN6_RC_VIDEO_FREQ, - GEN6_FREQUENCY(12)); - - I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000); - I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, - 18 << 24 | - 6 << 16); - I915_WRITE(GEN6_RP_UP_THRESHOLD, 10000); - I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 1000000); - I915_WRITE(GEN6_RP_UP_EI, 100000); - I915_WRITE(GEN6_RP_DOWN_EI, 5000000); - I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10); - I915_WRITE(GEN6_RP_CONTROL, - GEN6_RP_MEDIA_TURBO | - GEN6_RP_MEDIA_HW_MODE | - GEN6_RP_MEDIA_IS_GFX | - GEN6_RP_ENABLE | - GEN6_RP_UP_BUSY_AVG | - GEN6_RP_DOWN_IDLE_CONT); - - if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0, - 500)) - DRM_ERROR("timeout waiting for pcode mailbox to become idle\n"); - - I915_WRITE(GEN6_PCODE_DATA, 0); - I915_WRITE(GEN6_PCODE_MAILBOX, - GEN6_PCODE_READY | - GEN6_PCODE_WRITE_MIN_FREQ_TABLE); - if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0, - 500)) - DRM_ERROR("timeout waiting for pcode mailbox to finish\n"); - - min_freq = (rp_state_cap & 0xff0000) >> 16; - max_freq = rp_state_cap & 0xff; - cur_freq = (gt_perf_status & 0xff00) >> 8; - - /* Check for overclock support */ - if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0, - 500)) - DRM_ERROR("timeout waiting for pcode mailbox to become idle\n"); - I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_READ_OC_PARAMS); - pcu_mbox = I915_READ(GEN6_PCODE_DATA); - if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0, - 500)) - DRM_ERROR("timeout waiting for pcode mailbox to finish\n"); - if (pcu_mbox & (1<<31)) { /* OC supported */ - max_freq = pcu_mbox & 0xff; - DRM_DEBUG_DRIVER("overclocking supported, adjusting frequency max to %dMHz\n", pcu_mbox * 50); - } - - /* In units of 100MHz */ - dev_priv->max_delay = max_freq; - dev_priv->min_delay = min_freq; - dev_priv->cur_delay = cur_freq; - - /* requires MSI enabled */ - I915_WRITE(GEN6_PMIER, - GEN6_PM_MBOX_EVENT | - GEN6_PM_THERMAL_EVENT | - GEN6_PM_RP_DOWN_TIMEOUT | - GEN6_PM_RP_UP_THRESHOLD | - GEN6_PM_RP_DOWN_THRESHOLD | - GEN6_PM_RP_UP_EI_EXPIRED | - GEN6_PM_RP_DOWN_EI_EXPIRED); - spin_lock_irq(&dev_priv->rps_lock); - WARN_ON(dev_priv->pm_iir != 0); - I915_WRITE(GEN6_PMIMR, 0); - spin_unlock_irq(&dev_priv->rps_lock); - /* enable all PM interrupts */ - I915_WRITE(GEN6_PMINTRMSK, 0); - - gen6_gt_force_wake_put(dev_priv); - mutex_unlock(&dev_priv->dev->struct_mutex); -} - -void gen6_update_ring_freq(struct drm_i915_private *dev_priv) -{ - int min_freq = 15; - int gpu_freq, ia_freq, max_ia_freq; - int scaling_factor = 180; - - max_ia_freq = cpufreq_quick_get_max(0); - /* - * Default to measured freq if none found, PCU will ensure we don't go - * over - */ - if (!max_ia_freq) - max_ia_freq = tsc_khz; - - /* Convert from kHz to MHz */ - max_ia_freq /= 1000; + drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data; + struct drm_mode_object *drmmode_obj; + struct intel_crtc *crtc; - mutex_lock(&dev_priv->dev->struct_mutex); + if (!dev_priv) { + DRM_ERROR("called with no initialization\n"); + return -EINVAL; + } - /* - * For each potential GPU frequency, load a ring frequency we'd like - * to use for memory access. We do this by specifying the IA frequency - * the PCU should use as a reference to determine the ring frequency. - */ - for (gpu_freq = dev_priv->max_delay; gpu_freq >= dev_priv->min_delay; - gpu_freq--) { - int diff = dev_priv->max_delay - gpu_freq; + drmmode_obj = drm_mode_object_find(dev, pipe_from_crtc_id->crtc_id, + DRM_MODE_OBJECT_CRTC); - /* - * For GPU frequencies less than 750MHz, just use the lowest - * ring freq. - */ - if (gpu_freq < min_freq) - ia_freq = 800; - else - ia_freq = max_ia_freq - ((diff * scaling_factor) / 2); - ia_freq = DIV_ROUND_CLOSEST(ia_freq, 100); - - I915_WRITE(GEN6_PCODE_DATA, - (ia_freq << GEN6_PCODE_FREQ_IA_RATIO_SHIFT) | - gpu_freq); - I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | - GEN6_PCODE_WRITE_MIN_FREQ_TABLE); - if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & - GEN6_PCODE_READY) == 0, 10)) { - DRM_ERROR("pcode write of freq table timed out\n"); - continue; - } + if (!drmmode_obj) { + DRM_ERROR("no such CRTC id\n"); + return -EINVAL; } - mutex_unlock(&dev_priv->dev->struct_mutex); + crtc = to_intel_crtc(obj_to_crtc(drmmode_obj)); + pipe_from_crtc_id->pipe = crtc->pipe; + + return 0; } -static void ironlake_init_clock_gating(struct drm_device *dev) +static int intel_encoder_clones(struct drm_device *dev, int type_mask) { - struct drm_i915_private *dev_priv = dev->dev_private; - uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE; - - /* Required for FBC */ - dspclk_gate |= DPFCUNIT_CLOCK_GATE_DISABLE | - DPFCRUNIT_CLOCK_GATE_DISABLE | - DPFDUNIT_CLOCK_GATE_DISABLE; - /* Required for CxSR */ - dspclk_gate |= DPARBUNIT_CLOCK_GATE_DISABLE; - - I915_WRITE(PCH_3DCGDIS0, - MARIUNIT_CLOCK_GATE_DISABLE | - SVSMUNIT_CLOCK_GATE_DISABLE); - I915_WRITE(PCH_3DCGDIS1, - VFMUNIT_CLOCK_GATE_DISABLE); - - I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate); - - /* - * According to the spec the following bits should be set in - * order to enable memory self-refresh - * The bit 22/21 of 0x42004 - * The bit 5 of 0x42020 - * The bit 15 of 0x45000 - */ - I915_WRITE(ILK_DISPLAY_CHICKEN2, - (I915_READ(ILK_DISPLAY_CHICKEN2) | - ILK_DPARB_GATE | ILK_VSDPFD_FULL)); - I915_WRITE(ILK_DSPCLK_GATE, - (I915_READ(ILK_DSPCLK_GATE) | - ILK_DPARB_CLK_GATE)); - I915_WRITE(DISP_ARB_CTL, - (I915_READ(DISP_ARB_CTL) | - DISP_FBC_WM_DIS)); - I915_WRITE(WM3_LP_ILK, 0); - I915_WRITE(WM2_LP_ILK, 0); - I915_WRITE(WM1_LP_ILK, 0); + struct intel_encoder *encoder; + int index_mask = 0; + int entry = 0; - /* - * Based on the document from hardware guys the following bits - * should be set unconditionally in order to enable FBC. - * The bit 22 of 0x42000 - * The bit 22 of 0x42004 - * The bit 7,8,9 of 0x42020. - */ - if (IS_IRONLAKE_M(dev)) { - I915_WRITE(ILK_DISPLAY_CHICKEN1, - I915_READ(ILK_DISPLAY_CHICKEN1) | - ILK_FBCQ_DIS); - I915_WRITE(ILK_DISPLAY_CHICKEN2, - I915_READ(ILK_DISPLAY_CHICKEN2) | - ILK_DPARB_GATE); - I915_WRITE(ILK_DSPCLK_GATE, - I915_READ(ILK_DSPCLK_GATE) | - ILK_DPFC_DIS1 | - ILK_DPFC_DIS2 | - ILK_CLK_FBC); + list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) { + if (type_mask & encoder->clone_mask) + index_mask |= (1 << entry); + entry++; } - I915_WRITE(ILK_DISPLAY_CHICKEN2, - I915_READ(ILK_DISPLAY_CHICKEN2) | - ILK_ELPIN_409_SELECT); - I915_WRITE(_3D_CHICKEN2, - _3D_CHICKEN2_WM_READ_PIPELINED << 16 | - _3D_CHICKEN2_WM_READ_PIPELINED); + return index_mask; } -static void gen6_init_clock_gating(struct drm_device *dev) +static bool has_edp_a(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; - int pipe; - uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE; - - I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate); - - I915_WRITE(ILK_DISPLAY_CHICKEN2, - I915_READ(ILK_DISPLAY_CHICKEN2) | - ILK_ELPIN_409_SELECT); - I915_WRITE(WM3_LP_ILK, 0); - I915_WRITE(WM2_LP_ILK, 0); - I915_WRITE(WM1_LP_ILK, 0); - - I915_WRITE(GEN6_UCGCTL1, - I915_READ(GEN6_UCGCTL1) | - GEN6_BLBUNIT_CLOCK_GATE_DISABLE); + if (!IS_MOBILE(dev)) + return false; - /* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock - * gating disable must be set. Failure to set it results in - * flickering pixels due to Z write ordering failures after - * some amount of runtime in the Mesa "fire" demo, and Unigine - * Sanctuary and Tropics, and apparently anything else with - * alpha test or pixel discard. - * - * According to the spec, bit 11 (RCCUNIT) must also be set, - * but we didn't debug actual testcases to find it out. - */ - I915_WRITE(GEN6_UCGCTL2, - GEN6_RCPBUNIT_CLOCK_GATE_DISABLE | - GEN6_RCCUNIT_CLOCK_GATE_DISABLE); + if ((I915_READ(DP_A) & DP_DETECTED) == 0) + return false; - /* - * According to the spec the following bits should be - * set in order to enable memory self-refresh and fbc: - * The bit21 and bit22 of 0x42000 - * The bit21 and bit22 of 0x42004 - * The bit5 and bit7 of 0x42020 - * The bit14 of 0x70180 - * The bit14 of 0x71180 - */ - I915_WRITE(ILK_DISPLAY_CHICKEN1, - I915_READ(ILK_DISPLAY_CHICKEN1) | - ILK_FBCQ_DIS | ILK_PABSTRETCH_DIS); - I915_WRITE(ILK_DISPLAY_CHICKEN2, - I915_READ(ILK_DISPLAY_CHICKEN2) | - ILK_DPARB_GATE | ILK_VSDPFD_FULL); - I915_WRITE(ILK_DSPCLK_GATE, - I915_READ(ILK_DSPCLK_GATE) | - ILK_DPARB_CLK_GATE | - ILK_DPFD_CLK_GATE); + if (IS_GEN5(dev) && + (I915_READ(ILK_DISPLAY_CHICKEN_FUSES) & ILK_eDP_A_DISABLE)) + return false; - for_each_pipe(pipe) { - I915_WRITE(DSPCNTR(pipe), - I915_READ(DSPCNTR(pipe)) | - DISPPLANE_TRICKLE_FEED_DISABLE); - intel_flush_display_plane(dev_priv, pipe); - } + return true; } -static void ivybridge_init_clock_gating(struct drm_device *dev) +static void intel_setup_outputs(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; - int pipe; - uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE; - - I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate); - - I915_WRITE(WM3_LP_ILK, 0); - I915_WRITE(WM2_LP_ILK, 0); - I915_WRITE(WM1_LP_ILK, 0); - - /* According to the spec, bit 13 (RCZUNIT) must be set on IVB. - * This implements the WaDisableRCZUnitClockGating workaround. - */ - I915_WRITE(GEN6_UCGCTL2, GEN6_RCZUNIT_CLOCK_GATE_DISABLE); - - I915_WRITE(ILK_DSPCLK_GATE, IVB_VRHUNIT_CLK_GATE); - - I915_WRITE(IVB_CHICKEN3, - CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE | - CHICKEN3_DGMG_DONE_FIX_DISABLE); - - /* Apply the WaDisableRHWOOptimizationForRenderHang workaround. */ - I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1, - GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC); + struct intel_encoder *encoder; + bool dpd_is_edp = false; + bool has_lvds; - /* WaApplyL3ControlAndL3ChickenMode requires those two on Ivy Bridge */ - I915_WRITE(GEN7_L3CNTLREG1, - GEN7_WA_FOR_GEN7_L3_CONTROL); - I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER, - GEN7_WA_L3_CHICKEN_MODE); + has_lvds = intel_lvds_init(dev); + if (!has_lvds && !HAS_PCH_SPLIT(dev)) { + /* disable the panel fitter on everything but LVDS */ + I915_WRITE(PFIT_CONTROL, 0); + } - /* This is required by WaCatErrorRejectionIssue */ - I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG, - I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) | - GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB); + if (HAS_PCH_SPLIT(dev)) { + dpd_is_edp = intel_dpd_is_edp(dev); - for_each_pipe(pipe) { - I915_WRITE(DSPCNTR(pipe), - I915_READ(DSPCNTR(pipe)) | - DISPPLANE_TRICKLE_FEED_DISABLE); - intel_flush_display_plane(dev_priv, pipe); - } -} + if (has_edp_a(dev)) + intel_dp_init(dev, DP_A); -static void g4x_init_clock_gating(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - uint32_t dspclk_gate; + if (dpd_is_edp && (I915_READ(PCH_DP_D) & DP_DETECTED)) + intel_dp_init(dev, PCH_DP_D); + } - I915_WRITE(RENCLK_GATE_D1, 0); - I915_WRITE(RENCLK_GATE_D2, VF_UNIT_CLOCK_GATE_DISABLE | - GS_UNIT_CLOCK_GATE_DISABLE | - CL_UNIT_CLOCK_GATE_DISABLE); - I915_WRITE(RAMCLK_GATE_D, 0); - dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE | - OVRUNIT_CLOCK_GATE_DISABLE | - OVCUNIT_CLOCK_GATE_DISABLE; - if (IS_GM45(dev)) - dspclk_gate |= DSSUNIT_CLOCK_GATE_DISABLE; - I915_WRITE(DSPCLK_GATE_D, dspclk_gate); -} + intel_crt_init(dev); -static void crestline_init_clock_gating(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; + if (HAS_PCH_SPLIT(dev)) { + int found; - I915_WRITE(RENCLK_GATE_D1, I965_RCC_CLOCK_GATE_DISABLE); - I915_WRITE(RENCLK_GATE_D2, 0); - I915_WRITE(DSPCLK_GATE_D, 0); - I915_WRITE(RAMCLK_GATE_D, 0); - I915_WRITE16(DEUC, 0); -} + if (I915_READ(HDMIB) & PORT_DETECTED) { + /* PCH SDVOB multiplex with HDMIB */ + found = intel_sdvo_init(dev, PCH_SDVOB, true); + if (!found) + intel_hdmi_init(dev, HDMIB); + if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED)) + intel_dp_init(dev, PCH_DP_B); + } -static void broadwater_init_clock_gating(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; + if (I915_READ(HDMIC) & PORT_DETECTED) + intel_hdmi_init(dev, HDMIC); - I915_WRITE(RENCLK_GATE_D1, I965_RCZ_CLOCK_GATE_DISABLE | - I965_RCC_CLOCK_GATE_DISABLE | - I965_RCPB_CLOCK_GATE_DISABLE | - I965_ISC_CLOCK_GATE_DISABLE | - I965_FBC_CLOCK_GATE_DISABLE); - I915_WRITE(RENCLK_GATE_D2, 0); -} + if (I915_READ(HDMID) & PORT_DETECTED) + intel_hdmi_init(dev, HDMID); -static void gen3_init_clock_gating(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - u32 dstate = I915_READ(D_STATE); + if (I915_READ(PCH_DP_C) & DP_DETECTED) + intel_dp_init(dev, PCH_DP_C); - dstate |= DSTATE_PLL_D3_OFF | DSTATE_GFX_CLOCK_GATING | - DSTATE_DOT_CLOCK_GATING; - I915_WRITE(D_STATE, dstate); -} + if (!dpd_is_edp && (I915_READ(PCH_DP_D) & DP_DETECTED)) + intel_dp_init(dev, PCH_DP_D); -static void i85x_init_clock_gating(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; + } else if (SUPPORTS_DIGITAL_OUTPUTS(dev)) { + bool found = false; - I915_WRITE(RENCLK_GATE_D1, SV_CLOCK_GATE_DISABLE); -} + if (I915_READ(SDVOB) & SDVO_DETECTED) { + DRM_DEBUG_KMS("probing SDVOB\n"); + found = intel_sdvo_init(dev, SDVOB, true); + if (!found && SUPPORTS_INTEGRATED_HDMI(dev)) { + DRM_DEBUG_KMS("probing HDMI on SDVOB\n"); + intel_hdmi_init(dev, SDVOB); + } -static void i830_init_clock_gating(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; + if (!found && SUPPORTS_INTEGRATED_DP(dev)) { + DRM_DEBUG_KMS("probing DP_B\n"); + intel_dp_init(dev, DP_B); + } + } - I915_WRITE(DSPCLK_GATE_D, OVRUNIT_CLOCK_GATE_DISABLE); -} + /* Before G4X SDVOC doesn't have its own detect register */ -static void ibx_init_clock_gating(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; + if (I915_READ(SDVOB) & SDVO_DETECTED) { + DRM_DEBUG_KMS("probing SDVOC\n"); + found = intel_sdvo_init(dev, SDVOC, false); + } - /* - * On Ibex Peak and Cougar Point, we need to disable clock - * gating for the panel power sequencer or it will fail to - * start up when no ports are active. - */ - I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE); -} + if (!found && (I915_READ(SDVOC) & SDVO_DETECTED)) { -static void cpt_init_clock_gating(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - int pipe; + if (SUPPORTS_INTEGRATED_HDMI(dev)) { + DRM_DEBUG_KMS("probing HDMI on SDVOC\n"); + intel_hdmi_init(dev, SDVOC); + } + if (SUPPORTS_INTEGRATED_DP(dev)) { + DRM_DEBUG_KMS("probing DP_C\n"); + intel_dp_init(dev, DP_C); + } + } - /* - * On Ibex Peak and Cougar Point, we need to disable clock - * gating for the panel power sequencer or it will fail to - * start up when no ports are active. - */ - I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE); - I915_WRITE(SOUTH_CHICKEN2, I915_READ(SOUTH_CHICKEN2) | - DPLS_EDP_PPS_FIX_DIS); - /* Without this, mode sets may fail silently on FDI */ - for_each_pipe(pipe) - I915_WRITE(TRANS_CHICKEN2(pipe), TRANS_AUTOTRAIN_GEN_STALL_DIS); -} + if (SUPPORTS_INTEGRATED_DP(dev) && + (I915_READ(DP_D) & DP_DETECTED)) { + DRM_DEBUG_KMS("probing DP_D\n"); + intel_dp_init(dev, DP_D); + } + } else if (IS_GEN2(dev)) + intel_dvo_init(dev); -static void ironlake_teardown_rc6(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; + if (SUPPORTS_TV(dev)) + intel_tv_init(dev); - if (dev_priv->renderctx) { - i915_gem_object_unpin(dev_priv->renderctx); - drm_gem_object_unreference(&dev_priv->renderctx->base); - dev_priv->renderctx = NULL; + list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) { + encoder->base.possible_crtcs = encoder->crtc_mask; + encoder->base.possible_clones = + intel_encoder_clones(dev, encoder->clone_mask); } - if (dev_priv->pwrctx) { - i915_gem_object_unpin(dev_priv->pwrctx); - drm_gem_object_unreference(&dev_priv->pwrctx->base); - dev_priv->pwrctx = NULL; - } + /* disable all the possible outputs/crtcs before entering KMS mode */ + drm_helper_disable_unused_functions(dev); + + if (HAS_PCH_SPLIT(dev)) + ironlake_init_pch_refclk(dev); } -static void ironlake_disable_rc6(struct drm_device *dev) +static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb) { - struct drm_i915_private *dev_priv = dev->dev_private; - - if (I915_READ(PWRCTXA)) { - /* Wake the GPU, prevent RC6, then restore RSTDBYCTL */ - I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) | RCX_SW_EXIT); - wait_for(((I915_READ(RSTDBYCTL) & RSX_STATUS_MASK) == RSX_STATUS_ON), - 50); - - I915_WRITE(PWRCTXA, 0); - POSTING_READ(PWRCTXA); + struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); - I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT); - POSTING_READ(RSTDBYCTL); - } + drm_framebuffer_cleanup(fb); + drm_gem_object_unreference_unlocked(&intel_fb->obj->base); - ironlake_teardown_rc6(dev); + kfree(intel_fb); } -static int ironlake_setup_rc6(struct drm_device *dev) +static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb, + struct drm_file *file, + unsigned int *handle) { - struct drm_i915_private *dev_priv = dev->dev_private; - - if (dev_priv->renderctx == NULL) - dev_priv->renderctx = intel_alloc_context_page(dev); - if (!dev_priv->renderctx) - return -ENOMEM; - - if (dev_priv->pwrctx == NULL) - dev_priv->pwrctx = intel_alloc_context_page(dev); - if (!dev_priv->pwrctx) { - ironlake_teardown_rc6(dev); - return -ENOMEM; - } + struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); + struct drm_i915_gem_object *obj = intel_fb->obj; - return 0; + return drm_gem_handle_create(file, &obj->base, handle); } -void ironlake_enable_rc6(struct drm_device *dev) +static const struct drm_framebuffer_funcs intel_fb_funcs = { + .destroy = intel_user_framebuffer_destroy, + .create_handle = intel_user_framebuffer_create_handle, +}; + +int intel_framebuffer_init(struct drm_device *dev, + struct intel_framebuffer *intel_fb, + struct drm_mode_fb_cmd2 *mode_cmd, + struct drm_i915_gem_object *obj) { - struct drm_i915_private *dev_priv = dev->dev_private; int ret; - /* rc6 disabled by default due to repeated reports of hanging during - * boot and resume. - */ - if (!intel_enable_rc6(dev)) - return; + if (obj->tiling_mode == I915_TILING_Y) + return -EINVAL; - mutex_lock(&dev->struct_mutex); - ret = ironlake_setup_rc6(dev); - if (ret) { - mutex_unlock(&dev->struct_mutex); - return; - } + if (mode_cmd->pitches[0] & 63) + return -EINVAL; - /* - * GPU can automatically power down the render unit if given a page - * to save state. - */ - ret = BEGIN_LP_RING(6); - if (ret) { - ironlake_teardown_rc6(dev); - mutex_unlock(&dev->struct_mutex); - return; + switch (mode_cmd->pixel_format) { + case DRM_FORMAT_RGB332: + case DRM_FORMAT_RGB565: + case DRM_FORMAT_XRGB8888: + case DRM_FORMAT_XBGR8888: + case DRM_FORMAT_ARGB8888: + case DRM_FORMAT_XRGB2101010: + case DRM_FORMAT_ARGB2101010: + /* RGB formats are common across chipsets */ + break; + case DRM_FORMAT_YUYV: + case DRM_FORMAT_UYVY: + case DRM_FORMAT_YVYU: + case DRM_FORMAT_VYUY: + break; + default: + DRM_DEBUG_KMS("unsupported pixel format %u\n", + mode_cmd->pixel_format); + return -EINVAL; } - OUT_RING(MI_SUSPEND_FLUSH | MI_SUSPEND_FLUSH_EN); - OUT_RING(MI_SET_CONTEXT); - OUT_RING(dev_priv->renderctx->gtt_offset | - MI_MM_SPACE_GTT | - MI_SAVE_EXT_STATE_EN | - MI_RESTORE_EXT_STATE_EN | - MI_RESTORE_INHIBIT); - OUT_RING(MI_SUSPEND_FLUSH); - OUT_RING(MI_NOOP); - OUT_RING(MI_FLUSH); - ADVANCE_LP_RING(); - - /* - * Wait for the command parser to advance past MI_SET_CONTEXT. The HW - * does an implicit flush, combined with MI_FLUSH above, it should be - * safe to assume that renderctx is valid - */ - ret = intel_wait_ring_idle(LP_RING(dev_priv)); + ret = drm_framebuffer_init(dev, &intel_fb->base, &intel_fb_funcs); if (ret) { - DRM_ERROR("failed to enable ironlake power power savings\n"); - ironlake_teardown_rc6(dev); - mutex_unlock(&dev->struct_mutex); - return; + DRM_ERROR("framebuffer init failed %d\n", ret); + return ret; } - I915_WRITE(PWRCTXA, dev_priv->pwrctx->gtt_offset | PWRCTX_EN); - I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT); - mutex_unlock(&dev->struct_mutex); + drm_helper_mode_fill_fb_struct(&intel_fb->base, mode_cmd); + intel_fb->obj = obj; + return 0; } -void intel_init_clock_gating(struct drm_device *dev) +static struct drm_framebuffer * +intel_user_framebuffer_create(struct drm_device *dev, + struct drm_file *filp, + struct drm_mode_fb_cmd2 *mode_cmd) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_gem_object *obj; - dev_priv->display.init_clock_gating(dev); + obj = to_intel_bo(drm_gem_object_lookup(dev, filp, + mode_cmd->handles[0])); + if (&obj->base == NULL) + return ERR_PTR(-ENOENT); - if (dev_priv->display.init_pch_clock_gating) - dev_priv->display.init_pch_clock_gating(dev); + return intel_framebuffer_create(dev, mode_cmd, obj); } +static const struct drm_mode_config_funcs intel_mode_funcs = { + .fb_create = intel_user_framebuffer_create, + .output_poll_changed = intel_fb_output_poll_changed, +}; + /* Set up chip specific display functions */ static void intel_init_display(struct drm_device *dev) {