2 * Copyright © 2014 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
25 * DOC: Panel Self Refresh (PSR/SRD)
27 * Since Haswell Display controller supports Panel Self-Refresh on display
28 * panels witch have a remote frame buffer (RFB) implemented according to PSR
29 * spec in eDP1.3. PSR feature allows the display to go to lower standby states
30 * when system is idle but display is on as it eliminates display refresh
31 * request to DDR memory completely as long as the frame buffer for that
32 * display is unchanged.
34 * Panel Self Refresh must be supported by both Hardware (source) and
37 * PSR saves power by caching the framebuffer in the panel RFB, which allows us
38 * to power down the link and memory controller. For DSI panels the same idea
39 * is called "manual mode".
41 * The implementation uses the hardware-based PSR support which automatically
42 * enters/exits self-refresh mode. The hardware takes care of sending the
43 * required DP aux message and could even retrain the link (that part isn't
44 * enabled yet though). The hardware also keeps track of any frontbuffer
45 * changes to know when to exit self-refresh mode again. Unfortunately that
46 * part doesn't work too well, hence why the i915 PSR support uses the
47 * software frontbuffer tracking to make sure it doesn't miss a screen
48 * update. For this integration intel_psr_invalidate() and intel_psr_flush()
49 * get called by the frontbuffer tracking code. Note that because of locking
50 * issues the self-refresh re-enable code is done from a work queue, which
51 * must be correctly synchronized/cancelled when shutting down the pipe."
56 #include "intel_drv.h"
59 static bool is_edp_psr(struct intel_dp *intel_dp)
61 return intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED;
64 static void intel_psr_write_vsc(struct intel_dp *intel_dp,
65 struct edp_vsc_psr *vsc_psr)
67 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
68 struct drm_device *dev = dig_port->base.base.dev;
69 struct drm_i915_private *dev_priv = dev->dev_private;
70 struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
71 u32 ctl_reg = HSW_TVIDEO_DIP_CTL(crtc->config.cpu_transcoder);
72 u32 data_reg = HSW_TVIDEO_DIP_VSC_DATA(crtc->config.cpu_transcoder);
73 uint32_t *data = (uint32_t *) vsc_psr;
76 /* As per BSPec (Pipe Video Data Island Packet), we need to disable
77 the video DIP being updated before program video DIP data buffer
78 registers for DIP being updated. */
79 I915_WRITE(ctl_reg, 0);
80 POSTING_READ(ctl_reg);
82 for (i = 0; i < VIDEO_DIP_VSC_DATA_SIZE; i += 4) {
83 if (i < sizeof(struct edp_vsc_psr))
84 I915_WRITE(data_reg + i, *data++);
86 I915_WRITE(data_reg + i, 0);
89 I915_WRITE(ctl_reg, VIDEO_DIP_ENABLE_VSC_HSW);
90 POSTING_READ(ctl_reg);
93 static void intel_psr_setup_vsc(struct intel_dp *intel_dp)
95 struct edp_vsc_psr psr_vsc;
97 /* Prepare VSC packet as per EDP 1.3 spec, Table 3.10 */
98 memset(&psr_vsc, 0, sizeof(psr_vsc));
99 psr_vsc.sdp_header.HB0 = 0;
100 psr_vsc.sdp_header.HB1 = 0x7;
101 psr_vsc.sdp_header.HB2 = 0x2;
102 psr_vsc.sdp_header.HB3 = 0x8;
103 intel_psr_write_vsc(intel_dp, &psr_vsc);
106 static void intel_psr_enable_sink(struct intel_dp *intel_dp)
108 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
109 struct drm_device *dev = dig_port->base.base.dev;
110 struct drm_i915_private *dev_priv = dev->dev_private;
111 uint32_t aux_clock_divider;
113 bool only_standby = dev_priv->vbt.psr.full_link;
114 static const uint8_t aux_msg[] = {
115 [0] = DP_AUX_NATIVE_WRITE << 4,
116 [1] = DP_SET_POWER >> 8,
117 [2] = DP_SET_POWER & 0xff,
119 [4] = DP_SET_POWER_D0,
123 BUILD_BUG_ON(sizeof(aux_msg) > 20);
125 aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, 0);
127 if (IS_BROADWELL(dev) && dig_port->port != PORT_A)
130 /* Enable PSR in sink */
131 if (intel_dp->psr_dpcd[1] & DP_PSR_NO_TRAIN_ON_EXIT || only_standby)
132 drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG,
133 DP_PSR_ENABLE & ~DP_PSR_MAIN_LINK_ACTIVE);
135 drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG,
136 DP_PSR_ENABLE | DP_PSR_MAIN_LINK_ACTIVE);
138 /* Setup AUX registers */
139 for (i = 0; i < sizeof(aux_msg); i += 4)
140 I915_WRITE(EDP_PSR_AUX_DATA1(dev) + i,
141 intel_dp_pack_aux(&aux_msg[i], sizeof(aux_msg) - i));
143 I915_WRITE(EDP_PSR_AUX_CTL(dev),
144 DP_AUX_CH_CTL_TIME_OUT_400us |
145 (sizeof(aux_msg) << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
146 (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
147 (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT));
150 static void intel_psr_enable_source(struct intel_dp *intel_dp)
152 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
153 struct drm_device *dev = dig_port->base.base.dev;
154 struct drm_i915_private *dev_priv = dev->dev_private;
155 uint32_t max_sleep_time = 0x1f;
156 /* Lately it was identified that depending on panel idle frame count
157 * calculated at HW can be off by 1. So let's use what came
158 * from VBT + 1 and at minimum 2 to be on the safe side.
160 uint32_t idle_frames = dev_priv->vbt.psr.idle_frames ?
161 dev_priv->vbt.psr.idle_frames + 1 : 2;
163 const uint32_t link_entry_time = EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES;
164 bool only_standby = false;
166 if (IS_BROADWELL(dev) && dig_port->port != PORT_A)
169 if (intel_dp->psr_dpcd[1] & DP_PSR_NO_TRAIN_ON_EXIT || only_standby) {
170 val |= EDP_PSR_LINK_STANDBY;
171 val |= EDP_PSR_TP2_TP3_TIME_0us;
172 val |= EDP_PSR_TP1_TIME_0us;
173 val |= EDP_PSR_SKIP_AUX_EXIT;
175 val |= EDP_PSR_LINK_DISABLE;
177 I915_WRITE(EDP_PSR_CTL(dev), val |
178 (IS_BROADWELL(dev) ? 0 : link_entry_time) |
179 max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT |
180 idle_frames << EDP_PSR_IDLE_FRAME_SHIFT |
184 static bool intel_psr_match_conditions(struct intel_dp *intel_dp)
186 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
187 struct drm_device *dev = dig_port->base.base.dev;
188 struct drm_i915_private *dev_priv = dev->dev_private;
189 struct drm_crtc *crtc = dig_port->base.base.crtc;
190 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
192 lockdep_assert_held(&dev_priv->psr.lock);
193 WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
194 WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
196 dev_priv->psr.source_ok = false;
198 if (IS_HASWELL(dev) && dig_port->port != PORT_A) {
199 DRM_DEBUG_KMS("HSW ties PSR to DDI A (eDP)\n");
203 if (!i915.enable_psr) {
204 DRM_DEBUG_KMS("PSR disable by flag\n");
208 /* Below limitations aren't valid for Broadwell */
209 if (IS_BROADWELL(dev))
212 if (I915_READ(HSW_STEREO_3D_CTL(intel_crtc->config.cpu_transcoder)) &
214 DRM_DEBUG_KMS("PSR condition failed: Stereo 3D is Enabled\n");
218 if (intel_crtc->config.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
219 DRM_DEBUG_KMS("PSR condition failed: Interlaced is Enabled\n");
224 dev_priv->psr.source_ok = true;
228 static void intel_psr_do_enable(struct intel_dp *intel_dp)
230 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
231 struct drm_device *dev = intel_dig_port->base.base.dev;
232 struct drm_i915_private *dev_priv = dev->dev_private;
234 WARN_ON(I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE);
235 WARN_ON(dev_priv->psr.active);
236 lockdep_assert_held(&dev_priv->psr.lock);
238 /* Enable/Re-enable PSR on the host */
239 intel_psr_enable_source(intel_dp);
241 dev_priv->psr.active = true;
245 * intel_psr_enable - Enable PSR
246 * @intel_dp: Intel DP
248 * This function can only be called after the pipe is fully trained and enabled.
250 void intel_psr_enable(struct intel_dp *intel_dp)
252 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
253 struct drm_device *dev = intel_dig_port->base.base.dev;
254 struct drm_i915_private *dev_priv = dev->dev_private;
257 DRM_DEBUG_KMS("PSR not supported on this platform\n");
261 if (!is_edp_psr(intel_dp)) {
262 DRM_DEBUG_KMS("PSR not supported by this panel\n");
266 mutex_lock(&dev_priv->psr.lock);
267 if (dev_priv->psr.enabled) {
268 DRM_DEBUG_KMS("PSR already in use\n");
272 if (!intel_psr_match_conditions(intel_dp))
275 dev_priv->psr.busy_frontbuffer_bits = 0;
277 intel_psr_setup_vsc(intel_dp);
279 /* Avoid continuous PSR exit by masking memup and hpd */
280 I915_WRITE(EDP_PSR_DEBUG_CTL(dev), EDP_PSR_DEBUG_MASK_MEMUP |
281 EDP_PSR_DEBUG_MASK_HPD | EDP_PSR_DEBUG_MASK_LPSP);
283 /* Enable PSR on the panel */
284 intel_psr_enable_sink(intel_dp);
286 dev_priv->psr.enabled = intel_dp;
288 mutex_unlock(&dev_priv->psr.lock);
292 * intel_psr_disable - Disable PSR
293 * @intel_dp: Intel DP
295 * This function needs to be called before disabling pipe.
297 void intel_psr_disable(struct intel_dp *intel_dp)
299 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
300 struct drm_device *dev = intel_dig_port->base.base.dev;
301 struct drm_i915_private *dev_priv = dev->dev_private;
303 mutex_lock(&dev_priv->psr.lock);
304 if (!dev_priv->psr.enabled) {
305 mutex_unlock(&dev_priv->psr.lock);
309 if (dev_priv->psr.active) {
310 I915_WRITE(EDP_PSR_CTL(dev),
311 I915_READ(EDP_PSR_CTL(dev)) & ~EDP_PSR_ENABLE);
313 /* Wait till PSR is idle */
314 if (_wait_for((I915_READ(EDP_PSR_STATUS_CTL(dev)) &
315 EDP_PSR_STATUS_STATE_MASK) == 0, 2000, 10))
316 DRM_ERROR("Timed out waiting for PSR Idle State\n");
318 dev_priv->psr.active = false;
320 WARN_ON(I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE);
323 dev_priv->psr.enabled = NULL;
324 mutex_unlock(&dev_priv->psr.lock);
326 cancel_delayed_work_sync(&dev_priv->psr.work);
329 static void intel_psr_work(struct work_struct *work)
331 struct drm_i915_private *dev_priv =
332 container_of(work, typeof(*dev_priv), psr.work.work);
333 struct intel_dp *intel_dp = dev_priv->psr.enabled;
335 /* We have to make sure PSR is ready for re-enable
336 * otherwise it keeps disabled until next full enable/disable cycle.
337 * PSR might take some time to get fully disabled
338 * and be ready for re-enable.
340 if (wait_for((I915_READ(EDP_PSR_STATUS_CTL(dev_priv->dev)) &
341 EDP_PSR_STATUS_STATE_MASK) == 0, 50)) {
342 DRM_ERROR("Timed out waiting for PSR Idle for re-enable\n");
346 mutex_lock(&dev_priv->psr.lock);
347 intel_dp = dev_priv->psr.enabled;
353 * The delayed work can race with an invalidate hence we need to
354 * recheck. Since psr_flush first clears this and then reschedules we
355 * won't ever miss a flush when bailing out here.
357 if (dev_priv->psr.busy_frontbuffer_bits)
360 intel_psr_do_enable(intel_dp);
362 mutex_unlock(&dev_priv->psr.lock);
365 static void intel_psr_exit(struct drm_device *dev)
367 struct drm_i915_private *dev_priv = dev->dev_private;
369 if (dev_priv->psr.active) {
370 u32 val = I915_READ(EDP_PSR_CTL(dev));
372 WARN_ON(!(val & EDP_PSR_ENABLE));
374 I915_WRITE(EDP_PSR_CTL(dev), val & ~EDP_PSR_ENABLE);
376 dev_priv->psr.active = false;
382 * intel_psr_invalidate - Invalidade PSR
384 * @frontbuffer_bits: frontbuffer plane tracking bits
386 * Since the hardware frontbuffer tracking has gaps we need to integrate
387 * with the software frontbuffer tracking. This function gets called every
388 * time frontbuffer rendering starts and a buffer gets dirtied. PSR must be
389 * disabled if the frontbuffer mask contains a buffer relevant to PSR.
391 * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits."
393 void intel_psr_invalidate(struct drm_device *dev,
394 unsigned frontbuffer_bits)
396 struct drm_i915_private *dev_priv = dev->dev_private;
397 struct drm_crtc *crtc;
400 mutex_lock(&dev_priv->psr.lock);
401 if (!dev_priv->psr.enabled) {
402 mutex_unlock(&dev_priv->psr.lock);
406 crtc = dp_to_dig_port(dev_priv->psr.enabled)->base.base.crtc;
407 pipe = to_intel_crtc(crtc)->pipe;
411 frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
413 dev_priv->psr.busy_frontbuffer_bits |= frontbuffer_bits;
414 mutex_unlock(&dev_priv->psr.lock);
418 * intel_psr_flush - Flush PSR
420 * @frontbuffer_bits: frontbuffer plane tracking bits
422 * Since the hardware frontbuffer tracking has gaps we need to integrate
423 * with the software frontbuffer tracking. This function gets called every
424 * time frontbuffer rendering has completed and flushed out to memory. PSR
425 * can be enabled again if no other frontbuffer relevant to PSR is dirty.
427 * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits.
429 void intel_psr_flush(struct drm_device *dev,
430 unsigned frontbuffer_bits)
432 struct drm_i915_private *dev_priv = dev->dev_private;
433 struct drm_crtc *crtc;
436 mutex_lock(&dev_priv->psr.lock);
437 if (!dev_priv->psr.enabled) {
438 mutex_unlock(&dev_priv->psr.lock);
442 crtc = dp_to_dig_port(dev_priv->psr.enabled)->base.base.crtc;
443 pipe = to_intel_crtc(crtc)->pipe;
444 dev_priv->psr.busy_frontbuffer_bits &= ~frontbuffer_bits;
447 * On Haswell sprite plane updates don't result in a psr invalidating
448 * signal in the hardware. Which means we need to manually fake this in
449 * software for all flushes, not just when we've seen a preceding
450 * invalidation through frontbuffer rendering.
452 if (IS_HASWELL(dev) &&
453 (frontbuffer_bits & INTEL_FRONTBUFFER_SPRITE(pipe)))
456 if (!dev_priv->psr.active && !dev_priv->psr.busy_frontbuffer_bits)
457 schedule_delayed_work(&dev_priv->psr.work,
458 msecs_to_jiffies(100));
459 mutex_unlock(&dev_priv->psr.lock);
463 * intel_psr_init - Init basic PSR work and mutex.
466 * This function is called only once at driver load to initialize basic
469 void intel_psr_init(struct drm_device *dev)
471 struct drm_i915_private *dev_priv = dev->dev_private;
473 INIT_DELAYED_WORK(&dev_priv->psr.work, intel_psr_work);
474 mutex_init(&dev_priv->psr.lock);