2 * Copyright © 2008 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Keith Packard <keithp@keithp.com>
28 #include <linux/i2c.h>
29 #include <linux/slab.h>
30 #include <linux/export.h>
31 #include <linux/notifier.h>
32 #include <linux/reboot.h>
34 #include <drm/drm_atomic_helper.h>
35 #include <drm/drm_crtc.h>
36 #include <drm/drm_crtc_helper.h>
37 #include <drm/drm_edid.h>
38 #include "intel_drv.h"
39 #include <drm/i915_drm.h>
42 #define DP_LINK_CHECK_TIMEOUT (10 * 1000)
49 static const struct dp_link_dpll gen4_dpll[] = {
51 { .p1 = 2, .p2 = 10, .n = 2, .m1 = 23, .m2 = 8 } },
53 { .p1 = 1, .p2 = 10, .n = 1, .m1 = 14, .m2 = 2 } }
56 static const struct dp_link_dpll pch_dpll[] = {
58 { .p1 = 2, .p2 = 10, .n = 1, .m1 = 12, .m2 = 9 } },
60 { .p1 = 1, .p2 = 10, .n = 2, .m1 = 14, .m2 = 8 } }
63 static const struct dp_link_dpll vlv_dpll[] = {
65 { .p1 = 3, .p2 = 2, .n = 5, .m1 = 3, .m2 = 81 } },
67 { .p1 = 2, .p2 = 2, .n = 1, .m1 = 2, .m2 = 27 } }
71 * CHV supports eDP 1.4 that have more link rates.
72 * Below only provides the fixed rate but exclude variable rate.
74 static const struct dp_link_dpll chv_dpll[] = {
76 * CHV requires to program fractional division for m2.
77 * m2 is stored in fixed point format using formula below
78 * (m2_int << 22) | m2_fraction
80 { DP_LINK_BW_1_62, /* m2_int = 32, m2_fraction = 1677722 */
81 { .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a } },
82 { DP_LINK_BW_2_7, /* m2_int = 27, m2_fraction = 0 */
83 { .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } },
84 { DP_LINK_BW_5_4, /* m2_int = 27, m2_fraction = 0 */
85 { .p1 = 2, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } }
87 /* Skylake supports following rates */
88 static const int gen9_rates[] = { 162000, 216000, 270000,
89 324000, 432000, 540000 };
90 static const int chv_rates[] = { 162000, 202500, 210000, 216000,
91 243000, 270000, 324000, 405000,
92 420000, 432000, 540000 };
93 static const int default_rates[] = { 162000, 270000, 540000 };
96 * is_edp - is the given port attached to an eDP panel (either CPU or PCH)
97 * @intel_dp: DP struct
99 * If a CPU or PCH DP output is attached to an eDP panel, this function
100 * will return true, and false otherwise.
102 static bool is_edp(struct intel_dp *intel_dp)
104 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
106 return intel_dig_port->base.type == INTEL_OUTPUT_EDP;
109 static struct drm_device *intel_dp_to_dev(struct intel_dp *intel_dp)
111 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
113 return intel_dig_port->base.base.dev;
116 static struct intel_dp *intel_attached_dp(struct drm_connector *connector)
118 return enc_to_intel_dp(&intel_attached_encoder(connector)->base);
121 static void intel_dp_link_down(struct intel_dp *intel_dp);
122 static bool edp_panel_vdd_on(struct intel_dp *intel_dp);
123 static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
124 static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp);
125 static void vlv_steal_power_sequencer(struct drm_device *dev,
129 intel_dp_max_link_bw(struct intel_dp *intel_dp)
131 int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE];
133 switch (max_link_bw) {
134 case DP_LINK_BW_1_62:
139 WARN(1, "invalid max DP link bw val %x, using 1.62Gbps\n",
141 max_link_bw = DP_LINK_BW_1_62;
147 static u8 intel_dp_max_lane_count(struct intel_dp *intel_dp)
149 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
150 struct drm_device *dev = intel_dig_port->base.base.dev;
151 u8 source_max, sink_max;
154 if (HAS_DDI(dev) && intel_dig_port->port == PORT_A &&
155 (intel_dig_port->saved_port_bits & DDI_A_4_LANES) == 0)
158 sink_max = drm_dp_max_lane_count(intel_dp->dpcd);
160 return min(source_max, sink_max);
164 * The units on the numbers in the next two are... bizarre. Examples will
165 * make it clearer; this one parallels an example in the eDP spec.
167 * intel_dp_max_data_rate for one lane of 2.7GHz evaluates as:
169 * 270000 * 1 * 8 / 10 == 216000
171 * The actual data capacity of that configuration is 2.16Gbit/s, so the
172 * units are decakilobits. ->clock in a drm_display_mode is in kilohertz -
173 * or equivalently, kilopixels per second - so for 1680x1050R it'd be
174 * 119000. At 18bpp that's 2142000 kilobits per second.
176 * Thus the strange-looking division by 10 in intel_dp_link_required, to
177 * get the result in decakilobits instead of kilobits.
181 intel_dp_link_required(int pixel_clock, int bpp)
183 return (pixel_clock * bpp + 9) / 10;
187 intel_dp_max_data_rate(int max_link_clock, int max_lanes)
189 return (max_link_clock * max_lanes * 8) / 10;
192 static enum drm_mode_status
193 intel_dp_mode_valid(struct drm_connector *connector,
194 struct drm_display_mode *mode)
196 struct intel_dp *intel_dp = intel_attached_dp(connector);
197 struct intel_connector *intel_connector = to_intel_connector(connector);
198 struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
199 int target_clock = mode->clock;
200 int max_rate, mode_rate, max_lanes, max_link_clock;
202 if (is_edp(intel_dp) && fixed_mode) {
203 if (mode->hdisplay > fixed_mode->hdisplay)
206 if (mode->vdisplay > fixed_mode->vdisplay)
209 target_clock = fixed_mode->clock;
212 max_link_clock = intel_dp_max_link_rate(intel_dp);
213 max_lanes = intel_dp_max_lane_count(intel_dp);
215 max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
216 mode_rate = intel_dp_link_required(target_clock, 18);
218 if (mode_rate > max_rate)
219 return MODE_CLOCK_HIGH;
221 if (mode->clock < 10000)
222 return MODE_CLOCK_LOW;
224 if (mode->flags & DRM_MODE_FLAG_DBLCLK)
225 return MODE_H_ILLEGAL;
230 uint32_t intel_dp_pack_aux(const uint8_t *src, int src_bytes)
237 for (i = 0; i < src_bytes; i++)
238 v |= ((uint32_t) src[i]) << ((3-i) * 8);
242 static void intel_dp_unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes)
247 for (i = 0; i < dst_bytes; i++)
248 dst[i] = src >> ((3-i) * 8);
251 /* hrawclock is 1/4 the FSB frequency */
253 intel_hrawclk(struct drm_device *dev)
255 struct drm_i915_private *dev_priv = dev->dev_private;
258 /* There is no CLKCFG reg in Valleyview. VLV hrawclk is 200 MHz */
259 if (IS_VALLEYVIEW(dev))
262 clkcfg = I915_READ(CLKCFG);
263 switch (clkcfg & CLKCFG_FSB_MASK) {
272 case CLKCFG_FSB_1067:
274 case CLKCFG_FSB_1333:
276 /* these two are just a guess; one of them might be right */
277 case CLKCFG_FSB_1600:
278 case CLKCFG_FSB_1600_ALT:
286 intel_dp_init_panel_power_sequencer(struct drm_device *dev,
287 struct intel_dp *intel_dp);
289 intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
290 struct intel_dp *intel_dp);
292 static void pps_lock(struct intel_dp *intel_dp)
294 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
295 struct intel_encoder *encoder = &intel_dig_port->base;
296 struct drm_device *dev = encoder->base.dev;
297 struct drm_i915_private *dev_priv = dev->dev_private;
298 enum intel_display_power_domain power_domain;
301 * See vlv_power_sequencer_reset() why we need
302 * a power domain reference here.
304 power_domain = intel_display_port_power_domain(encoder);
305 intel_display_power_get(dev_priv, power_domain);
307 mutex_lock(&dev_priv->pps_mutex);
310 static void pps_unlock(struct intel_dp *intel_dp)
312 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
313 struct intel_encoder *encoder = &intel_dig_port->base;
314 struct drm_device *dev = encoder->base.dev;
315 struct drm_i915_private *dev_priv = dev->dev_private;
316 enum intel_display_power_domain power_domain;
318 mutex_unlock(&dev_priv->pps_mutex);
320 power_domain = intel_display_port_power_domain(encoder);
321 intel_display_power_put(dev_priv, power_domain);
325 vlv_power_sequencer_kick(struct intel_dp *intel_dp)
327 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
328 struct drm_device *dev = intel_dig_port->base.base.dev;
329 struct drm_i915_private *dev_priv = dev->dev_private;
330 enum pipe pipe = intel_dp->pps_pipe;
334 if (WARN(I915_READ(intel_dp->output_reg) & DP_PORT_EN,
335 "skipping pipe %c power seqeuncer kick due to port %c being active\n",
336 pipe_name(pipe), port_name(intel_dig_port->port)))
339 DRM_DEBUG_KMS("kicking pipe %c power sequencer for port %c\n",
340 pipe_name(pipe), port_name(intel_dig_port->port));
342 /* Preserve the BIOS-computed detected bit. This is
343 * supposed to be read-only.
345 DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
346 DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
347 DP |= DP_PORT_WIDTH(1);
348 DP |= DP_LINK_TRAIN_PAT_1;
350 if (IS_CHERRYVIEW(dev))
351 DP |= DP_PIPE_SELECT_CHV(pipe);
352 else if (pipe == PIPE_B)
353 DP |= DP_PIPEB_SELECT;
355 pll_enabled = I915_READ(DPLL(pipe)) & DPLL_VCO_ENABLE;
358 * The DPLL for the pipe must be enabled for this to work.
359 * So enable temporarily it if it's not already enabled.
362 vlv_force_pll_on(dev, pipe, IS_CHERRYVIEW(dev) ?
363 &chv_dpll[0].dpll : &vlv_dpll[0].dpll);
366 * Similar magic as in intel_dp_enable_port().
367 * We _must_ do this port enable + disable trick
368 * to make this power seqeuencer lock onto the port.
369 * Otherwise even VDD force bit won't work.
371 I915_WRITE(intel_dp->output_reg, DP);
372 POSTING_READ(intel_dp->output_reg);
374 I915_WRITE(intel_dp->output_reg, DP | DP_PORT_EN);
375 POSTING_READ(intel_dp->output_reg);
377 I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
378 POSTING_READ(intel_dp->output_reg);
381 vlv_force_pll_off(dev, pipe);
385 vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
387 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
388 struct drm_device *dev = intel_dig_port->base.base.dev;
389 struct drm_i915_private *dev_priv = dev->dev_private;
390 struct intel_encoder *encoder;
391 unsigned int pipes = (1 << PIPE_A) | (1 << PIPE_B);
394 lockdep_assert_held(&dev_priv->pps_mutex);
396 /* We should never land here with regular DP ports */
397 WARN_ON(!is_edp(intel_dp));
399 if (intel_dp->pps_pipe != INVALID_PIPE)
400 return intel_dp->pps_pipe;
403 * We don't have power sequencer currently.
404 * Pick one that's not used by other ports.
406 list_for_each_entry(encoder, &dev->mode_config.encoder_list,
408 struct intel_dp *tmp;
410 if (encoder->type != INTEL_OUTPUT_EDP)
413 tmp = enc_to_intel_dp(&encoder->base);
415 if (tmp->pps_pipe != INVALID_PIPE)
416 pipes &= ~(1 << tmp->pps_pipe);
420 * Didn't find one. This should not happen since there
421 * are two power sequencers and up to two eDP ports.
423 if (WARN_ON(pipes == 0))
426 pipe = ffs(pipes) - 1;
428 vlv_steal_power_sequencer(dev, pipe);
429 intel_dp->pps_pipe = pipe;
431 DRM_DEBUG_KMS("picked pipe %c power sequencer for port %c\n",
432 pipe_name(intel_dp->pps_pipe),
433 port_name(intel_dig_port->port));
435 /* init power sequencer on this pipe and port */
436 intel_dp_init_panel_power_sequencer(dev, intel_dp);
437 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
440 * Even vdd force doesn't work until we've made
441 * the power sequencer lock in on the port.
443 vlv_power_sequencer_kick(intel_dp);
445 return intel_dp->pps_pipe;
448 typedef bool (*vlv_pipe_check)(struct drm_i915_private *dev_priv,
451 static bool vlv_pipe_has_pp_on(struct drm_i915_private *dev_priv,
454 return I915_READ(VLV_PIPE_PP_STATUS(pipe)) & PP_ON;
457 static bool vlv_pipe_has_vdd_on(struct drm_i915_private *dev_priv,
460 return I915_READ(VLV_PIPE_PP_CONTROL(pipe)) & EDP_FORCE_VDD;
463 static bool vlv_pipe_any(struct drm_i915_private *dev_priv,
470 vlv_initial_pps_pipe(struct drm_i915_private *dev_priv,
472 vlv_pipe_check pipe_check)
476 for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) {
477 u32 port_sel = I915_READ(VLV_PIPE_PP_ON_DELAYS(pipe)) &
478 PANEL_PORT_SELECT_MASK;
480 if (port_sel != PANEL_PORT_SELECT_VLV(port))
483 if (!pipe_check(dev_priv, pipe))
493 vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
495 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
496 struct drm_device *dev = intel_dig_port->base.base.dev;
497 struct drm_i915_private *dev_priv = dev->dev_private;
498 enum port port = intel_dig_port->port;
500 lockdep_assert_held(&dev_priv->pps_mutex);
502 /* try to find a pipe with this port selected */
503 /* first pick one where the panel is on */
504 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
506 /* didn't find one? pick one where vdd is on */
507 if (intel_dp->pps_pipe == INVALID_PIPE)
508 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
509 vlv_pipe_has_vdd_on);
510 /* didn't find one? pick one with just the correct port */
511 if (intel_dp->pps_pipe == INVALID_PIPE)
512 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
515 /* didn't find one? just let vlv_power_sequencer_pipe() pick one when needed */
516 if (intel_dp->pps_pipe == INVALID_PIPE) {
517 DRM_DEBUG_KMS("no initial power sequencer for port %c\n",
522 DRM_DEBUG_KMS("initial power sequencer for port %c: pipe %c\n",
523 port_name(port), pipe_name(intel_dp->pps_pipe));
525 intel_dp_init_panel_power_sequencer(dev, intel_dp);
526 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
529 void vlv_power_sequencer_reset(struct drm_i915_private *dev_priv)
531 struct drm_device *dev = dev_priv->dev;
532 struct intel_encoder *encoder;
534 if (WARN_ON(!IS_VALLEYVIEW(dev)))
538 * We can't grab pps_mutex here due to deadlock with power_domain
539 * mutex when power_domain functions are called while holding pps_mutex.
540 * That also means that in order to use pps_pipe the code needs to
541 * hold both a power domain reference and pps_mutex, and the power domain
542 * reference get/put must be done while _not_ holding pps_mutex.
543 * pps_{lock,unlock}() do these steps in the correct order, so one
544 * should use them always.
547 list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
548 struct intel_dp *intel_dp;
550 if (encoder->type != INTEL_OUTPUT_EDP)
553 intel_dp = enc_to_intel_dp(&encoder->base);
554 intel_dp->pps_pipe = INVALID_PIPE;
558 static u32 _pp_ctrl_reg(struct intel_dp *intel_dp)
560 struct drm_device *dev = intel_dp_to_dev(intel_dp);
562 if (HAS_PCH_SPLIT(dev))
563 return PCH_PP_CONTROL;
565 return VLV_PIPE_PP_CONTROL(vlv_power_sequencer_pipe(intel_dp));
568 static u32 _pp_stat_reg(struct intel_dp *intel_dp)
570 struct drm_device *dev = intel_dp_to_dev(intel_dp);
572 if (HAS_PCH_SPLIT(dev))
573 return PCH_PP_STATUS;
575 return VLV_PIPE_PP_STATUS(vlv_power_sequencer_pipe(intel_dp));
578 /* Reboot notifier handler to shutdown panel power to guarantee T12 timing
579 This function only applicable when panel PM state is not to be tracked */
580 static int edp_notify_handler(struct notifier_block *this, unsigned long code,
583 struct intel_dp *intel_dp = container_of(this, typeof(* intel_dp),
585 struct drm_device *dev = intel_dp_to_dev(intel_dp);
586 struct drm_i915_private *dev_priv = dev->dev_private;
588 u32 pp_ctrl_reg, pp_div_reg;
590 if (!is_edp(intel_dp) || code != SYS_RESTART)
595 if (IS_VALLEYVIEW(dev)) {
596 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
598 pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
599 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
600 pp_div = I915_READ(pp_div_reg);
601 pp_div &= PP_REFERENCE_DIVIDER_MASK;
603 /* 0x1F write to PP_DIV_REG sets max cycle delay */
604 I915_WRITE(pp_div_reg, pp_div | 0x1F);
605 I915_WRITE(pp_ctrl_reg, PANEL_UNLOCK_REGS | PANEL_POWER_OFF);
606 msleep(intel_dp->panel_power_cycle_delay);
609 pps_unlock(intel_dp);
614 static bool edp_have_panel_power(struct intel_dp *intel_dp)
616 struct drm_device *dev = intel_dp_to_dev(intel_dp);
617 struct drm_i915_private *dev_priv = dev->dev_private;
619 lockdep_assert_held(&dev_priv->pps_mutex);
621 if (IS_VALLEYVIEW(dev) &&
622 intel_dp->pps_pipe == INVALID_PIPE)
625 return (I915_READ(_pp_stat_reg(intel_dp)) & PP_ON) != 0;
628 static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
630 struct drm_device *dev = intel_dp_to_dev(intel_dp);
631 struct drm_i915_private *dev_priv = dev->dev_private;
633 lockdep_assert_held(&dev_priv->pps_mutex);
635 if (IS_VALLEYVIEW(dev) &&
636 intel_dp->pps_pipe == INVALID_PIPE)
639 return I915_READ(_pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD;
643 intel_dp_check_edp(struct intel_dp *intel_dp)
645 struct drm_device *dev = intel_dp_to_dev(intel_dp);
646 struct drm_i915_private *dev_priv = dev->dev_private;
648 if (!is_edp(intel_dp))
651 if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) {
652 WARN(1, "eDP powered off while attempting aux channel communication.\n");
653 DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n",
654 I915_READ(_pp_stat_reg(intel_dp)),
655 I915_READ(_pp_ctrl_reg(intel_dp)));
660 intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq)
662 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
663 struct drm_device *dev = intel_dig_port->base.base.dev;
664 struct drm_i915_private *dev_priv = dev->dev_private;
665 uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
669 #define C (((status = I915_READ_NOTRACE(ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
671 done = wait_event_timeout(dev_priv->gmbus_wait_queue, C,
672 msecs_to_jiffies_timeout(10));
674 done = wait_for_atomic(C, 10) == 0;
676 DRM_ERROR("dp aux hw did not signal timeout (has irq: %i)!\n",
683 static uint32_t i9xx_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
685 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
686 struct drm_device *dev = intel_dig_port->base.base.dev;
689 * The clock divider is based off the hrawclk, and would like to run at
690 * 2MHz. So, take the hrawclk value and divide by 2 and use that
692 return index ? 0 : intel_hrawclk(dev) / 2;
695 static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
697 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
698 struct drm_device *dev = intel_dig_port->base.base.dev;
703 if (intel_dig_port->port == PORT_A) {
704 if (IS_GEN6(dev) || IS_GEN7(dev))
705 return 200; /* SNB & IVB eDP input clock at 400Mhz */
707 return 225; /* eDP input clock at 450Mhz */
709 return DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
713 static uint32_t hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
715 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
716 struct drm_device *dev = intel_dig_port->base.base.dev;
717 struct drm_i915_private *dev_priv = dev->dev_private;
719 if (intel_dig_port->port == PORT_A) {
722 return DIV_ROUND_CLOSEST(intel_ddi_get_cdclk_freq(dev_priv), 2000);
723 } else if (dev_priv->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
724 /* Workaround for non-ULT HSW */
731 return index ? 0 : DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
735 static uint32_t vlv_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
737 return index ? 0 : 100;
740 static uint32_t skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
743 * SKL doesn't need us to program the AUX clock divider (Hardware will
744 * derive the clock from CDCLK automatically). We still implement the
745 * get_aux_clock_divider vfunc to plug-in into the existing code.
747 return index ? 0 : 1;
750 static uint32_t i9xx_get_aux_send_ctl(struct intel_dp *intel_dp,
753 uint32_t aux_clock_divider)
755 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
756 struct drm_device *dev = intel_dig_port->base.base.dev;
757 uint32_t precharge, timeout;
764 if (IS_BROADWELL(dev) && intel_dp->aux_ch_ctl_reg == DPA_AUX_CH_CTL)
765 timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
767 timeout = DP_AUX_CH_CTL_TIME_OUT_400us;
769 return DP_AUX_CH_CTL_SEND_BUSY |
771 (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
772 DP_AUX_CH_CTL_TIME_OUT_ERROR |
774 DP_AUX_CH_CTL_RECEIVE_ERROR |
775 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
776 (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
777 (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT);
780 static uint32_t skl_get_aux_send_ctl(struct intel_dp *intel_dp,
785 return DP_AUX_CH_CTL_SEND_BUSY |
787 (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
788 DP_AUX_CH_CTL_TIME_OUT_ERROR |
789 DP_AUX_CH_CTL_TIME_OUT_1600us |
790 DP_AUX_CH_CTL_RECEIVE_ERROR |
791 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
792 DP_AUX_CH_CTL_SYNC_PULSE_SKL(32);
796 intel_dp_aux_ch(struct intel_dp *intel_dp,
797 const uint8_t *send, int send_bytes,
798 uint8_t *recv, int recv_size)
800 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
801 struct drm_device *dev = intel_dig_port->base.base.dev;
802 struct drm_i915_private *dev_priv = dev->dev_private;
803 uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
804 uint32_t ch_data = ch_ctl + 4;
805 uint32_t aux_clock_divider;
806 int i, ret, recv_bytes;
809 bool has_aux_irq = HAS_AUX_IRQ(dev);
815 * We will be called with VDD already enabled for dpcd/edid/oui reads.
816 * In such cases we want to leave VDD enabled and it's up to upper layers
817 * to turn it off. But for eg. i2c-dev access we need to turn it on/off
820 vdd = edp_panel_vdd_on(intel_dp);
822 /* dp aux is extremely sensitive to irq latency, hence request the
823 * lowest possible wakeup latency and so prevent the cpu from going into
826 pm_qos_update_request(&dev_priv->pm_qos, 0);
828 intel_dp_check_edp(intel_dp);
830 intel_aux_display_runtime_get(dev_priv);
832 /* Try to wait for any previous AUX channel activity */
833 for (try = 0; try < 3; try++) {
834 status = I915_READ_NOTRACE(ch_ctl);
835 if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
841 WARN(1, "dp_aux_ch not started status 0x%08x\n",
847 /* Only 5 data registers! */
848 if (WARN_ON(send_bytes > 20 || recv_size > 20)) {
853 while ((aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, clock++))) {
854 u32 send_ctl = intel_dp->get_aux_send_ctl(intel_dp,
859 /* Must try at least 3 times according to DP spec */
860 for (try = 0; try < 5; try++) {
861 /* Load the send data into the aux channel data registers */
862 for (i = 0; i < send_bytes; i += 4)
863 I915_WRITE(ch_data + i,
864 intel_dp_pack_aux(send + i,
867 /* Send the command and wait for it to complete */
868 I915_WRITE(ch_ctl, send_ctl);
870 status = intel_dp_aux_wait_done(intel_dp, has_aux_irq);
872 /* Clear done status and any errors */
876 DP_AUX_CH_CTL_TIME_OUT_ERROR |
877 DP_AUX_CH_CTL_RECEIVE_ERROR);
879 if (status & (DP_AUX_CH_CTL_TIME_OUT_ERROR |
880 DP_AUX_CH_CTL_RECEIVE_ERROR))
882 if (status & DP_AUX_CH_CTL_DONE)
887 if ((status & DP_AUX_CH_CTL_DONE) == 0) {
888 DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status);
894 /* Check for timeout or receive error.
895 * Timeouts occur when the sink is not connected
897 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
898 DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status);
903 /* Timeouts occur when the device isn't connected, so they're
904 * "normal" -- don't fill the kernel log with these */
905 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
906 DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status);
911 /* Unload any bytes sent back from the other side */
912 recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >>
913 DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
914 if (recv_bytes > recv_size)
915 recv_bytes = recv_size;
917 for (i = 0; i < recv_bytes; i += 4)
918 intel_dp_unpack_aux(I915_READ(ch_data + i),
919 recv + i, recv_bytes - i);
923 pm_qos_update_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE);
924 intel_aux_display_runtime_put(dev_priv);
927 edp_panel_vdd_off(intel_dp, false);
929 pps_unlock(intel_dp);
934 #define BARE_ADDRESS_SIZE 3
935 #define HEADER_SIZE (BARE_ADDRESS_SIZE + 1)
937 intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
939 struct intel_dp *intel_dp = container_of(aux, struct intel_dp, aux);
940 uint8_t txbuf[20], rxbuf[20];
941 size_t txsize, rxsize;
944 txbuf[0] = (msg->request << 4) |
945 ((msg->address >> 16) & 0xf);
946 txbuf[1] = (msg->address >> 8) & 0xff;
947 txbuf[2] = msg->address & 0xff;
948 txbuf[3] = msg->size - 1;
950 switch (msg->request & ~DP_AUX_I2C_MOT) {
951 case DP_AUX_NATIVE_WRITE:
952 case DP_AUX_I2C_WRITE:
953 txsize = msg->size ? HEADER_SIZE + msg->size : BARE_ADDRESS_SIZE;
954 rxsize = 2; /* 0 or 1 data bytes */
956 if (WARN_ON(txsize > 20))
959 memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size);
961 ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
963 msg->reply = rxbuf[0] >> 4;
966 /* Number of bytes written in a short write. */
967 ret = clamp_t(int, rxbuf[1], 0, msg->size);
969 /* Return payload size. */
975 case DP_AUX_NATIVE_READ:
976 case DP_AUX_I2C_READ:
977 txsize = msg->size ? HEADER_SIZE : BARE_ADDRESS_SIZE;
978 rxsize = msg->size + 1;
980 if (WARN_ON(rxsize > 20))
983 ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
985 msg->reply = rxbuf[0] >> 4;
987 * Assume happy day, and copy the data. The caller is
988 * expected to check msg->reply before touching it.
990 * Return payload size.
993 memcpy(msg->buffer, rxbuf + 1, ret);
1006 intel_dp_aux_init(struct intel_dp *intel_dp, struct intel_connector *connector)
1008 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1009 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1010 enum port port = intel_dig_port->port;
1011 const char *name = NULL;
1016 intel_dp->aux_ch_ctl_reg = DPA_AUX_CH_CTL;
1020 intel_dp->aux_ch_ctl_reg = PCH_DPB_AUX_CH_CTL;
1024 intel_dp->aux_ch_ctl_reg = PCH_DPC_AUX_CH_CTL;
1028 intel_dp->aux_ch_ctl_reg = PCH_DPD_AUX_CH_CTL;
1036 * The AUX_CTL register is usually DP_CTL + 0x10.
1038 * On Haswell and Broadwell though:
1039 * - Both port A DDI_BUF_CTL and DDI_AUX_CTL are on the CPU
1040 * - Port B/C/D AUX channels are on the PCH, DDI_BUF_CTL on the CPU
1042 * Skylake moves AUX_CTL back next to DDI_BUF_CTL, on the CPU.
1044 if (!IS_HASWELL(dev) && !IS_BROADWELL(dev))
1045 intel_dp->aux_ch_ctl_reg = intel_dp->output_reg + 0x10;
1047 intel_dp->aux.name = name;
1048 intel_dp->aux.dev = dev->dev;
1049 intel_dp->aux.transfer = intel_dp_aux_transfer;
1051 DRM_DEBUG_KMS("registering %s bus for %s\n", name,
1052 connector->base.kdev->kobj.name);
1054 ret = drm_dp_aux_register(&intel_dp->aux);
1056 DRM_ERROR("drm_dp_aux_register() for %s failed (%d)\n",
1061 ret = sysfs_create_link(&connector->base.kdev->kobj,
1062 &intel_dp->aux.ddc.dev.kobj,
1063 intel_dp->aux.ddc.dev.kobj.name);
1065 DRM_ERROR("sysfs_create_link() for %s failed (%d)\n", name, ret);
1066 drm_dp_aux_unregister(&intel_dp->aux);
1071 intel_dp_connector_unregister(struct intel_connector *intel_connector)
1073 struct intel_dp *intel_dp = intel_attached_dp(&intel_connector->base);
1075 if (!intel_connector->mst_port)
1076 sysfs_remove_link(&intel_connector->base.kdev->kobj,
1077 intel_dp->aux.ddc.dev.kobj.name);
1078 intel_connector_unregister(intel_connector);
1082 skl_edp_set_pll_config(struct intel_crtc_state *pipe_config, int link_clock)
1086 pipe_config->ddi_pll_sel = SKL_DPLL0;
1087 pipe_config->dpll_hw_state.cfgcr1 = 0;
1088 pipe_config->dpll_hw_state.cfgcr2 = 0;
1090 ctrl1 = DPLL_CTRL1_OVERRIDE(SKL_DPLL0);
1091 switch (link_clock / 2) {
1093 ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_810,
1097 ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_1350,
1101 ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_2700,
1105 ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_1620,
1108 /* TBD: For DP link rates 2.16 GHz and 4.32 GHz, VCO is 8640 which
1109 results in CDCLK change. Need to handle the change of CDCLK by
1110 disabling pipes and re-enabling them */
1112 ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_1080,
1116 ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_2160,
1121 pipe_config->dpll_hw_state.ctrl1 = ctrl1;
1125 hsw_dp_set_ddi_pll_sel(struct intel_crtc_state *pipe_config, int link_bw)
1128 case DP_LINK_BW_1_62:
1129 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_810;
1131 case DP_LINK_BW_2_7:
1132 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_1350;
1134 case DP_LINK_BW_5_4:
1135 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_2700;
1141 intel_dp_sink_rates(struct intel_dp *intel_dp, const int **sink_rates)
1143 if (intel_dp->num_sink_rates) {
1144 *sink_rates = intel_dp->sink_rates;
1145 return intel_dp->num_sink_rates;
1148 *sink_rates = default_rates;
1150 return (intel_dp_max_link_bw(intel_dp) >> 3) + 1;
1154 intel_dp_source_rates(struct drm_device *dev, const int **source_rates)
1156 if (INTEL_INFO(dev)->gen >= 9) {
1157 *source_rates = gen9_rates;
1158 return ARRAY_SIZE(gen9_rates);
1159 } else if (IS_CHERRYVIEW(dev)) {
1160 *source_rates = chv_rates;
1161 return ARRAY_SIZE(chv_rates);
1164 *source_rates = default_rates;
1166 if (IS_SKYLAKE(dev) && INTEL_REVID(dev) <= SKL_REVID_B0)
1167 /* WaDisableHBR2:skl */
1168 return (DP_LINK_BW_2_7 >> 3) + 1;
1169 else if (INTEL_INFO(dev)->gen >= 8 ||
1170 (IS_HASWELL(dev) && !IS_HSW_ULX(dev)))
1171 return (DP_LINK_BW_5_4 >> 3) + 1;
1173 return (DP_LINK_BW_2_7 >> 3) + 1;
1177 intel_dp_set_clock(struct intel_encoder *encoder,
1178 struct intel_crtc_state *pipe_config, int link_bw)
1180 struct drm_device *dev = encoder->base.dev;
1181 const struct dp_link_dpll *divisor = NULL;
1185 divisor = gen4_dpll;
1186 count = ARRAY_SIZE(gen4_dpll);
1187 } else if (HAS_PCH_SPLIT(dev)) {
1189 count = ARRAY_SIZE(pch_dpll);
1190 } else if (IS_CHERRYVIEW(dev)) {
1192 count = ARRAY_SIZE(chv_dpll);
1193 } else if (IS_VALLEYVIEW(dev)) {
1195 count = ARRAY_SIZE(vlv_dpll);
1198 if (divisor && count) {
1199 for (i = 0; i < count; i++) {
1200 if (link_bw == divisor[i].link_bw) {
1201 pipe_config->dpll = divisor[i].dpll;
1202 pipe_config->clock_set = true;
1209 static int intersect_rates(const int *source_rates, int source_len,
1210 const int *sink_rates, int sink_len,
1213 int i = 0, j = 0, k = 0;
1215 while (i < source_len && j < sink_len) {
1216 if (source_rates[i] == sink_rates[j]) {
1217 if (WARN_ON(k >= DP_MAX_SUPPORTED_RATES))
1219 common_rates[k] = source_rates[i];
1223 } else if (source_rates[i] < sink_rates[j]) {
1232 static int intel_dp_common_rates(struct intel_dp *intel_dp,
1235 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1236 const int *source_rates, *sink_rates;
1237 int source_len, sink_len;
1239 sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
1240 source_len = intel_dp_source_rates(dev, &source_rates);
1242 return intersect_rates(source_rates, source_len,
1243 sink_rates, sink_len,
1247 static void snprintf_int_array(char *str, size_t len,
1248 const int *array, int nelem)
1254 for (i = 0; i < nelem; i++) {
1255 int r = snprintf(str, len, "%d,", array[i]);
1263 static void intel_dp_print_rates(struct intel_dp *intel_dp)
1265 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1266 const int *source_rates, *sink_rates;
1267 int source_len, sink_len, common_len;
1268 int common_rates[DP_MAX_SUPPORTED_RATES];
1269 char str[128]; /* FIXME: too big for stack? */
1271 if ((drm_debug & DRM_UT_KMS) == 0)
1274 source_len = intel_dp_source_rates(dev, &source_rates);
1275 snprintf_int_array(str, sizeof(str), source_rates, source_len);
1276 DRM_DEBUG_KMS("source rates: %s\n", str);
1278 sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
1279 snprintf_int_array(str, sizeof(str), sink_rates, sink_len);
1280 DRM_DEBUG_KMS("sink rates: %s\n", str);
1282 common_len = intel_dp_common_rates(intel_dp, common_rates);
1283 snprintf_int_array(str, sizeof(str), common_rates, common_len);
1284 DRM_DEBUG_KMS("common rates: %s\n", str);
1287 static int rate_to_index(int find, const int *rates)
1291 for (i = 0; i < DP_MAX_SUPPORTED_RATES; ++i)
1292 if (find == rates[i])
1299 intel_dp_max_link_rate(struct intel_dp *intel_dp)
1301 int rates[DP_MAX_SUPPORTED_RATES] = {};
1304 len = intel_dp_common_rates(intel_dp, rates);
1305 if (WARN_ON(len <= 0))
1308 return rates[rate_to_index(0, rates) - 1];
1311 int intel_dp_rate_select(struct intel_dp *intel_dp, int rate)
1313 return rate_to_index(rate, intel_dp->sink_rates);
1317 intel_dp_compute_config(struct intel_encoder *encoder,
1318 struct intel_crtc_state *pipe_config)
1320 struct drm_device *dev = encoder->base.dev;
1321 struct drm_i915_private *dev_priv = dev->dev_private;
1322 struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
1323 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1324 enum port port = dp_to_dig_port(intel_dp)->port;
1325 struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc);
1326 struct intel_connector *intel_connector = intel_dp->attached_connector;
1327 int lane_count, clock;
1328 int min_lane_count = 1;
1329 int max_lane_count = intel_dp_max_lane_count(intel_dp);
1330 /* Conveniently, the link BW constants become indices with a shift...*/
1334 int link_avail, link_clock;
1335 int common_rates[DP_MAX_SUPPORTED_RATES] = {};
1338 common_len = intel_dp_common_rates(intel_dp, common_rates);
1340 /* No common link rates between source and sink */
1341 WARN_ON(common_len <= 0);
1343 max_clock = common_len - 1;
1345 if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev) && port != PORT_A)
1346 pipe_config->has_pch_encoder = true;
1348 pipe_config->has_dp_encoder = true;
1349 pipe_config->has_drrs = false;
1350 pipe_config->has_audio = intel_dp->has_audio && port != PORT_A;
1352 if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
1353 intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
1355 if (!HAS_PCH_SPLIT(dev))
1356 intel_gmch_panel_fitting(intel_crtc, pipe_config,
1357 intel_connector->panel.fitting_mode);
1359 intel_pch_panel_fitting(intel_crtc, pipe_config,
1360 intel_connector->panel.fitting_mode);
1363 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
1366 DRM_DEBUG_KMS("DP link computation with max lane count %i "
1367 "max bw %d pixel clock %iKHz\n",
1368 max_lane_count, common_rates[max_clock],
1369 adjusted_mode->crtc_clock);
1371 /* Walk through all bpp values. Luckily they're all nicely spaced with 2
1372 * bpc in between. */
1373 bpp = pipe_config->pipe_bpp;
1374 if (is_edp(intel_dp)) {
1375 if (dev_priv->vbt.edp_bpp && dev_priv->vbt.edp_bpp < bpp) {
1376 DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n",
1377 dev_priv->vbt.edp_bpp);
1378 bpp = dev_priv->vbt.edp_bpp;
1382 * Use the maximum clock and number of lanes the eDP panel
1383 * advertizes being capable of. The panels are generally
1384 * designed to support only a single clock and lane
1385 * configuration, and typically these values correspond to the
1386 * native resolution of the panel.
1388 min_lane_count = max_lane_count;
1389 min_clock = max_clock;
1392 for (; bpp >= 6*3; bpp -= 2*3) {
1393 mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
1396 for (clock = min_clock; clock <= max_clock; clock++) {
1397 for (lane_count = min_lane_count;
1398 lane_count <= max_lane_count;
1401 link_clock = common_rates[clock];
1402 link_avail = intel_dp_max_data_rate(link_clock,
1405 if (mode_rate <= link_avail) {
1415 if (intel_dp->color_range_auto) {
1418 * CEA-861-E - 5.1 Default Encoding Parameters
1419 * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry
1421 if (bpp != 18 && drm_match_cea_mode(adjusted_mode) > 1)
1422 intel_dp->color_range = DP_COLOR_RANGE_16_235;
1424 intel_dp->color_range = 0;
1427 if (intel_dp->color_range)
1428 pipe_config->limited_color_range = true;
1430 intel_dp->lane_count = lane_count;
1432 if (intel_dp->num_sink_rates) {
1433 intel_dp->link_bw = 0;
1434 intel_dp->rate_select =
1435 intel_dp_rate_select(intel_dp, common_rates[clock]);
1438 drm_dp_link_rate_to_bw_code(common_rates[clock]);
1439 intel_dp->rate_select = 0;
1442 pipe_config->pipe_bpp = bpp;
1443 pipe_config->port_clock = common_rates[clock];
1445 DRM_DEBUG_KMS("DP link bw %02x lane count %d clock %d bpp %d\n",
1446 intel_dp->link_bw, intel_dp->lane_count,
1447 pipe_config->port_clock, bpp);
1448 DRM_DEBUG_KMS("DP link bw required %i available %i\n",
1449 mode_rate, link_avail);
1451 intel_link_compute_m_n(bpp, lane_count,
1452 adjusted_mode->crtc_clock,
1453 pipe_config->port_clock,
1454 &pipe_config->dp_m_n);
1456 if (intel_connector->panel.downclock_mode != NULL &&
1457 dev_priv->drrs.type == SEAMLESS_DRRS_SUPPORT) {
1458 pipe_config->has_drrs = true;
1459 intel_link_compute_m_n(bpp, lane_count,
1460 intel_connector->panel.downclock_mode->clock,
1461 pipe_config->port_clock,
1462 &pipe_config->dp_m2_n2);
1465 if (IS_SKYLAKE(dev) && is_edp(intel_dp))
1466 skl_edp_set_pll_config(pipe_config, common_rates[clock]);
1467 else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
1468 hsw_dp_set_ddi_pll_sel(pipe_config, intel_dp->link_bw);
1470 intel_dp_set_clock(encoder, pipe_config, intel_dp->link_bw);
1475 static void ironlake_set_pll_cpu_edp(struct intel_dp *intel_dp)
1477 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1478 struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
1479 struct drm_device *dev = crtc->base.dev;
1480 struct drm_i915_private *dev_priv = dev->dev_private;
1483 DRM_DEBUG_KMS("eDP PLL enable for clock %d\n",
1484 crtc->config->port_clock);
1485 dpa_ctl = I915_READ(DP_A);
1486 dpa_ctl &= ~DP_PLL_FREQ_MASK;
1488 if (crtc->config->port_clock == 162000) {
1489 /* For a long time we've carried around a ILK-DevA w/a for the
1490 * 160MHz clock. If we're really unlucky, it's still required.
1492 DRM_DEBUG_KMS("160MHz cpu eDP clock, might need ilk devA w/a\n");
1493 dpa_ctl |= DP_PLL_FREQ_160MHZ;
1494 intel_dp->DP |= DP_PLL_FREQ_160MHZ;
1496 dpa_ctl |= DP_PLL_FREQ_270MHZ;
1497 intel_dp->DP |= DP_PLL_FREQ_270MHZ;
1500 I915_WRITE(DP_A, dpa_ctl);
1506 static void intel_dp_prepare(struct intel_encoder *encoder)
1508 struct drm_device *dev = encoder->base.dev;
1509 struct drm_i915_private *dev_priv = dev->dev_private;
1510 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1511 enum port port = dp_to_dig_port(intel_dp)->port;
1512 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
1513 struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
1516 * There are four kinds of DP registers:
1523 * IBX PCH and CPU are the same for almost everything,
1524 * except that the CPU DP PLL is configured in this
1527 * CPT PCH is quite different, having many bits moved
1528 * to the TRANS_DP_CTL register instead. That
1529 * configuration happens (oddly) in ironlake_pch_enable
1532 /* Preserve the BIOS-computed detected bit. This is
1533 * supposed to be read-only.
1535 intel_dp->DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
1537 /* Handle DP bits in common between all three register formats */
1538 intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
1539 intel_dp->DP |= DP_PORT_WIDTH(intel_dp->lane_count);
1541 if (crtc->config->has_audio)
1542 intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
1544 /* Split out the IBX/CPU vs CPT settings */
1546 if (port == PORT_A && IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) {
1547 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1548 intel_dp->DP |= DP_SYNC_HS_HIGH;
1549 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1550 intel_dp->DP |= DP_SYNC_VS_HIGH;
1551 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
1553 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1554 intel_dp->DP |= DP_ENHANCED_FRAMING;
1556 intel_dp->DP |= crtc->pipe << 29;
1557 } else if (!HAS_PCH_CPT(dev) || port == PORT_A) {
1558 if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev))
1559 intel_dp->DP |= intel_dp->color_range;
1561 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1562 intel_dp->DP |= DP_SYNC_HS_HIGH;
1563 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1564 intel_dp->DP |= DP_SYNC_VS_HIGH;
1565 intel_dp->DP |= DP_LINK_TRAIN_OFF;
1567 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1568 intel_dp->DP |= DP_ENHANCED_FRAMING;
1570 if (!IS_CHERRYVIEW(dev)) {
1571 if (crtc->pipe == 1)
1572 intel_dp->DP |= DP_PIPEB_SELECT;
1574 intel_dp->DP |= DP_PIPE_SELECT_CHV(crtc->pipe);
1577 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
1581 #define IDLE_ON_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK)
1582 #define IDLE_ON_VALUE (PP_ON | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_ON_IDLE)
1584 #define IDLE_OFF_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | 0)
1585 #define IDLE_OFF_VALUE (0 | PP_SEQUENCE_NONE | 0 | 0)
1587 #define IDLE_CYCLE_MASK (PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
1588 #define IDLE_CYCLE_VALUE (0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE)
1590 static void wait_panel_status(struct intel_dp *intel_dp,
1594 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1595 struct drm_i915_private *dev_priv = dev->dev_private;
1596 u32 pp_stat_reg, pp_ctrl_reg;
1598 lockdep_assert_held(&dev_priv->pps_mutex);
1600 pp_stat_reg = _pp_stat_reg(intel_dp);
1601 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1603 DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n",
1605 I915_READ(pp_stat_reg),
1606 I915_READ(pp_ctrl_reg));
1608 if (_wait_for((I915_READ(pp_stat_reg) & mask) == value, 5000, 10)) {
1609 DRM_ERROR("Panel status timeout: status %08x control %08x\n",
1610 I915_READ(pp_stat_reg),
1611 I915_READ(pp_ctrl_reg));
1614 DRM_DEBUG_KMS("Wait complete\n");
1617 static void wait_panel_on(struct intel_dp *intel_dp)
1619 DRM_DEBUG_KMS("Wait for panel power on\n");
1620 wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
1623 static void wait_panel_off(struct intel_dp *intel_dp)
1625 DRM_DEBUG_KMS("Wait for panel power off time\n");
1626 wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
1629 static void wait_panel_power_cycle(struct intel_dp *intel_dp)
1631 DRM_DEBUG_KMS("Wait for panel power cycle\n");
1633 /* When we disable the VDD override bit last we have to do the manual
1635 wait_remaining_ms_from_jiffies(intel_dp->last_power_cycle,
1636 intel_dp->panel_power_cycle_delay);
1638 wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
1641 static void wait_backlight_on(struct intel_dp *intel_dp)
1643 wait_remaining_ms_from_jiffies(intel_dp->last_power_on,
1644 intel_dp->backlight_on_delay);
1647 static void edp_wait_backlight_off(struct intel_dp *intel_dp)
1649 wait_remaining_ms_from_jiffies(intel_dp->last_backlight_off,
1650 intel_dp->backlight_off_delay);
1653 /* Read the current pp_control value, unlocking the register if it
1657 static u32 ironlake_get_pp_control(struct intel_dp *intel_dp)
1659 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1660 struct drm_i915_private *dev_priv = dev->dev_private;
1663 lockdep_assert_held(&dev_priv->pps_mutex);
1665 control = I915_READ(_pp_ctrl_reg(intel_dp));
1666 control &= ~PANEL_UNLOCK_MASK;
1667 control |= PANEL_UNLOCK_REGS;
1672 * Must be paired with edp_panel_vdd_off().
1673 * Must hold pps_mutex around the whole on/off sequence.
1674 * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
1676 static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
1678 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1679 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1680 struct intel_encoder *intel_encoder = &intel_dig_port->base;
1681 struct drm_i915_private *dev_priv = dev->dev_private;
1682 enum intel_display_power_domain power_domain;
1684 u32 pp_stat_reg, pp_ctrl_reg;
1685 bool need_to_disable = !intel_dp->want_panel_vdd;
1687 lockdep_assert_held(&dev_priv->pps_mutex);
1689 if (!is_edp(intel_dp))
1692 cancel_delayed_work(&intel_dp->panel_vdd_work);
1693 intel_dp->want_panel_vdd = true;
1695 if (edp_have_panel_vdd(intel_dp))
1696 return need_to_disable;
1698 power_domain = intel_display_port_power_domain(intel_encoder);
1699 intel_display_power_get(dev_priv, power_domain);
1701 DRM_DEBUG_KMS("Turning eDP port %c VDD on\n",
1702 port_name(intel_dig_port->port));
1704 if (!edp_have_panel_power(intel_dp))
1705 wait_panel_power_cycle(intel_dp);
1707 pp = ironlake_get_pp_control(intel_dp);
1708 pp |= EDP_FORCE_VDD;
1710 pp_stat_reg = _pp_stat_reg(intel_dp);
1711 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1713 I915_WRITE(pp_ctrl_reg, pp);
1714 POSTING_READ(pp_ctrl_reg);
1715 DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1716 I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
1718 * If the panel wasn't on, delay before accessing aux channel
1720 if (!edp_have_panel_power(intel_dp)) {
1721 DRM_DEBUG_KMS("eDP port %c panel power wasn't enabled\n",
1722 port_name(intel_dig_port->port));
1723 msleep(intel_dp->panel_power_up_delay);
1726 return need_to_disable;
1730 * Must be paired with intel_edp_panel_vdd_off() or
1731 * intel_edp_panel_off().
1732 * Nested calls to these functions are not allowed since
1733 * we drop the lock. Caller must use some higher level
1734 * locking to prevent nested calls from other threads.
1736 void intel_edp_panel_vdd_on(struct intel_dp *intel_dp)
1740 if (!is_edp(intel_dp))
1744 vdd = edp_panel_vdd_on(intel_dp);
1745 pps_unlock(intel_dp);
1747 I915_STATE_WARN(!vdd, "eDP port %c VDD already requested on\n",
1748 port_name(dp_to_dig_port(intel_dp)->port));
1751 static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
1753 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1754 struct drm_i915_private *dev_priv = dev->dev_private;
1755 struct intel_digital_port *intel_dig_port =
1756 dp_to_dig_port(intel_dp);
1757 struct intel_encoder *intel_encoder = &intel_dig_port->base;
1758 enum intel_display_power_domain power_domain;
1760 u32 pp_stat_reg, pp_ctrl_reg;
1762 lockdep_assert_held(&dev_priv->pps_mutex);
1764 WARN_ON(intel_dp->want_panel_vdd);
1766 if (!edp_have_panel_vdd(intel_dp))
1769 DRM_DEBUG_KMS("Turning eDP port %c VDD off\n",
1770 port_name(intel_dig_port->port));
1772 pp = ironlake_get_pp_control(intel_dp);
1773 pp &= ~EDP_FORCE_VDD;
1775 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1776 pp_stat_reg = _pp_stat_reg(intel_dp);
1778 I915_WRITE(pp_ctrl_reg, pp);
1779 POSTING_READ(pp_ctrl_reg);
1781 /* Make sure sequencer is idle before allowing subsequent activity */
1782 DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1783 I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
1785 if ((pp & POWER_TARGET_ON) == 0)
1786 intel_dp->last_power_cycle = jiffies;
1788 power_domain = intel_display_port_power_domain(intel_encoder);
1789 intel_display_power_put(dev_priv, power_domain);
1792 static void edp_panel_vdd_work(struct work_struct *__work)
1794 struct intel_dp *intel_dp = container_of(to_delayed_work(__work),
1795 struct intel_dp, panel_vdd_work);
1798 if (!intel_dp->want_panel_vdd)
1799 edp_panel_vdd_off_sync(intel_dp);
1800 pps_unlock(intel_dp);
1803 static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp)
1805 unsigned long delay;
1808 * Queue the timer to fire a long time from now (relative to the power
1809 * down delay) to keep the panel power up across a sequence of
1812 delay = msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5);
1813 schedule_delayed_work(&intel_dp->panel_vdd_work, delay);
1817 * Must be paired with edp_panel_vdd_on().
1818 * Must hold pps_mutex around the whole on/off sequence.
1819 * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
1821 static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
1823 struct drm_i915_private *dev_priv =
1824 intel_dp_to_dev(intel_dp)->dev_private;
1826 lockdep_assert_held(&dev_priv->pps_mutex);
1828 if (!is_edp(intel_dp))
1831 I915_STATE_WARN(!intel_dp->want_panel_vdd, "eDP port %c VDD not forced on",
1832 port_name(dp_to_dig_port(intel_dp)->port));
1834 intel_dp->want_panel_vdd = false;
1837 edp_panel_vdd_off_sync(intel_dp);
1839 edp_panel_vdd_schedule_off(intel_dp);
1842 static void edp_panel_on(struct intel_dp *intel_dp)
1844 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1845 struct drm_i915_private *dev_priv = dev->dev_private;
1849 lockdep_assert_held(&dev_priv->pps_mutex);
1851 if (!is_edp(intel_dp))
1854 DRM_DEBUG_KMS("Turn eDP port %c panel power on\n",
1855 port_name(dp_to_dig_port(intel_dp)->port));
1857 if (WARN(edp_have_panel_power(intel_dp),
1858 "eDP port %c panel power already on\n",
1859 port_name(dp_to_dig_port(intel_dp)->port)))
1862 wait_panel_power_cycle(intel_dp);
1864 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1865 pp = ironlake_get_pp_control(intel_dp);
1867 /* ILK workaround: disable reset around power sequence */
1868 pp &= ~PANEL_POWER_RESET;
1869 I915_WRITE(pp_ctrl_reg, pp);
1870 POSTING_READ(pp_ctrl_reg);
1873 pp |= POWER_TARGET_ON;
1875 pp |= PANEL_POWER_RESET;
1877 I915_WRITE(pp_ctrl_reg, pp);
1878 POSTING_READ(pp_ctrl_reg);
1880 wait_panel_on(intel_dp);
1881 intel_dp->last_power_on = jiffies;
1884 pp |= PANEL_POWER_RESET; /* restore panel reset bit */
1885 I915_WRITE(pp_ctrl_reg, pp);
1886 POSTING_READ(pp_ctrl_reg);
1890 void intel_edp_panel_on(struct intel_dp *intel_dp)
1892 if (!is_edp(intel_dp))
1896 edp_panel_on(intel_dp);
1897 pps_unlock(intel_dp);
1901 static void edp_panel_off(struct intel_dp *intel_dp)
1903 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1904 struct intel_encoder *intel_encoder = &intel_dig_port->base;
1905 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1906 struct drm_i915_private *dev_priv = dev->dev_private;
1907 enum intel_display_power_domain power_domain;
1911 lockdep_assert_held(&dev_priv->pps_mutex);
1913 if (!is_edp(intel_dp))
1916 DRM_DEBUG_KMS("Turn eDP port %c panel power off\n",
1917 port_name(dp_to_dig_port(intel_dp)->port));
1919 WARN(!intel_dp->want_panel_vdd, "Need eDP port %c VDD to turn off panel\n",
1920 port_name(dp_to_dig_port(intel_dp)->port));
1922 pp = ironlake_get_pp_control(intel_dp);
1923 /* We need to switch off panel power _and_ force vdd, for otherwise some
1924 * panels get very unhappy and cease to work. */
1925 pp &= ~(POWER_TARGET_ON | PANEL_POWER_RESET | EDP_FORCE_VDD |
1928 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1930 intel_dp->want_panel_vdd = false;
1932 I915_WRITE(pp_ctrl_reg, pp);
1933 POSTING_READ(pp_ctrl_reg);
1935 intel_dp->last_power_cycle = jiffies;
1936 wait_panel_off(intel_dp);
1938 /* We got a reference when we enabled the VDD. */
1939 power_domain = intel_display_port_power_domain(intel_encoder);
1940 intel_display_power_put(dev_priv, power_domain);
1943 void intel_edp_panel_off(struct intel_dp *intel_dp)
1945 if (!is_edp(intel_dp))
1949 edp_panel_off(intel_dp);
1950 pps_unlock(intel_dp);
1953 /* Enable backlight in the panel power control. */
1954 static void _intel_edp_backlight_on(struct intel_dp *intel_dp)
1956 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1957 struct drm_device *dev = intel_dig_port->base.base.dev;
1958 struct drm_i915_private *dev_priv = dev->dev_private;
1963 * If we enable the backlight right away following a panel power
1964 * on, we may see slight flicker as the panel syncs with the eDP
1965 * link. So delay a bit to make sure the image is solid before
1966 * allowing it to appear.
1968 wait_backlight_on(intel_dp);
1972 pp = ironlake_get_pp_control(intel_dp);
1973 pp |= EDP_BLC_ENABLE;
1975 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1977 I915_WRITE(pp_ctrl_reg, pp);
1978 POSTING_READ(pp_ctrl_reg);
1980 pps_unlock(intel_dp);
1983 /* Enable backlight PWM and backlight PP control. */
1984 void intel_edp_backlight_on(struct intel_dp *intel_dp)
1986 if (!is_edp(intel_dp))
1989 DRM_DEBUG_KMS("\n");
1991 intel_panel_enable_backlight(intel_dp->attached_connector);
1992 _intel_edp_backlight_on(intel_dp);
1995 /* Disable backlight in the panel power control. */
1996 static void _intel_edp_backlight_off(struct intel_dp *intel_dp)
1998 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1999 struct drm_i915_private *dev_priv = dev->dev_private;
2003 if (!is_edp(intel_dp))
2008 pp = ironlake_get_pp_control(intel_dp);
2009 pp &= ~EDP_BLC_ENABLE;
2011 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2013 I915_WRITE(pp_ctrl_reg, pp);
2014 POSTING_READ(pp_ctrl_reg);
2016 pps_unlock(intel_dp);
2018 intel_dp->last_backlight_off = jiffies;
2019 edp_wait_backlight_off(intel_dp);
2022 /* Disable backlight PP control and backlight PWM. */
2023 void intel_edp_backlight_off(struct intel_dp *intel_dp)
2025 if (!is_edp(intel_dp))
2028 DRM_DEBUG_KMS("\n");
2030 _intel_edp_backlight_off(intel_dp);
2031 intel_panel_disable_backlight(intel_dp->attached_connector);
2035 * Hook for controlling the panel power control backlight through the bl_power
2036 * sysfs attribute. Take care to handle multiple calls.
2038 static void intel_edp_backlight_power(struct intel_connector *connector,
2041 struct intel_dp *intel_dp = intel_attached_dp(&connector->base);
2045 is_enabled = ironlake_get_pp_control(intel_dp) & EDP_BLC_ENABLE;
2046 pps_unlock(intel_dp);
2048 if (is_enabled == enable)
2051 DRM_DEBUG_KMS("panel power control backlight %s\n",
2052 enable ? "enable" : "disable");
2055 _intel_edp_backlight_on(intel_dp);
2057 _intel_edp_backlight_off(intel_dp);
2060 static void ironlake_edp_pll_on(struct intel_dp *intel_dp)
2062 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2063 struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
2064 struct drm_device *dev = crtc->dev;
2065 struct drm_i915_private *dev_priv = dev->dev_private;
2068 assert_pipe_disabled(dev_priv,
2069 to_intel_crtc(crtc)->pipe);
2071 DRM_DEBUG_KMS("\n");
2072 dpa_ctl = I915_READ(DP_A);
2073 WARN(dpa_ctl & DP_PLL_ENABLE, "dp pll on, should be off\n");
2074 WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
2076 /* We don't adjust intel_dp->DP while tearing down the link, to
2077 * facilitate link retraining (e.g. after hotplug). Hence clear all
2078 * enable bits here to ensure that we don't enable too much. */
2079 intel_dp->DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
2080 intel_dp->DP |= DP_PLL_ENABLE;
2081 I915_WRITE(DP_A, intel_dp->DP);
2086 static void ironlake_edp_pll_off(struct intel_dp *intel_dp)
2088 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2089 struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
2090 struct drm_device *dev = crtc->dev;
2091 struct drm_i915_private *dev_priv = dev->dev_private;
2094 assert_pipe_disabled(dev_priv,
2095 to_intel_crtc(crtc)->pipe);
2097 dpa_ctl = I915_READ(DP_A);
2098 WARN((dpa_ctl & DP_PLL_ENABLE) == 0,
2099 "dp pll off, should be on\n");
2100 WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
2102 /* We can't rely on the value tracked for the DP register in
2103 * intel_dp->DP because link_down must not change that (otherwise link
2104 * re-training will fail. */
2105 dpa_ctl &= ~DP_PLL_ENABLE;
2106 I915_WRITE(DP_A, dpa_ctl);
2111 /* If the sink supports it, try to set the power state appropriately */
2112 void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
2116 /* Should have a valid DPCD by this point */
2117 if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
2120 if (mode != DRM_MODE_DPMS_ON) {
2121 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2125 * When turning on, we need to retry for 1ms to give the sink
2128 for (i = 0; i < 3; i++) {
2129 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2138 DRM_DEBUG_KMS("failed to %s sink power state\n",
2139 mode == DRM_MODE_DPMS_ON ? "enable" : "disable");
2142 static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
2145 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2146 enum port port = dp_to_dig_port(intel_dp)->port;
2147 struct drm_device *dev = encoder->base.dev;
2148 struct drm_i915_private *dev_priv = dev->dev_private;
2149 enum intel_display_power_domain power_domain;
2152 power_domain = intel_display_port_power_domain(encoder);
2153 if (!intel_display_power_is_enabled(dev_priv, power_domain))
2156 tmp = I915_READ(intel_dp->output_reg);
2158 if (!(tmp & DP_PORT_EN))
2161 if (port == PORT_A && IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) {
2162 *pipe = PORT_TO_PIPE_CPT(tmp);
2163 } else if (IS_CHERRYVIEW(dev)) {
2164 *pipe = DP_PORT_TO_PIPE_CHV(tmp);
2165 } else if (!HAS_PCH_CPT(dev) || port == PORT_A) {
2166 *pipe = PORT_TO_PIPE(tmp);
2172 switch (intel_dp->output_reg) {
2174 trans_sel = TRANS_DP_PORT_SEL_B;
2177 trans_sel = TRANS_DP_PORT_SEL_C;
2180 trans_sel = TRANS_DP_PORT_SEL_D;
2186 for_each_pipe(dev_priv, i) {
2187 trans_dp = I915_READ(TRANS_DP_CTL(i));
2188 if ((trans_dp & TRANS_DP_PORT_SEL_MASK) == trans_sel) {
2194 DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n",
2195 intel_dp->output_reg);
2201 static void intel_dp_get_config(struct intel_encoder *encoder,
2202 struct intel_crtc_state *pipe_config)
2204 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2206 struct drm_device *dev = encoder->base.dev;
2207 struct drm_i915_private *dev_priv = dev->dev_private;
2208 enum port port = dp_to_dig_port(intel_dp)->port;
2209 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2212 tmp = I915_READ(intel_dp->output_reg);
2214 pipe_config->has_audio = tmp & DP_AUDIO_OUTPUT_ENABLE && port != PORT_A;
2216 if ((port == PORT_A) || !HAS_PCH_CPT(dev)) {
2217 if (tmp & DP_SYNC_HS_HIGH)
2218 flags |= DRM_MODE_FLAG_PHSYNC;
2220 flags |= DRM_MODE_FLAG_NHSYNC;
2222 if (tmp & DP_SYNC_VS_HIGH)
2223 flags |= DRM_MODE_FLAG_PVSYNC;
2225 flags |= DRM_MODE_FLAG_NVSYNC;
2227 tmp = I915_READ(TRANS_DP_CTL(crtc->pipe));
2228 if (tmp & TRANS_DP_HSYNC_ACTIVE_HIGH)
2229 flags |= DRM_MODE_FLAG_PHSYNC;
2231 flags |= DRM_MODE_FLAG_NHSYNC;
2233 if (tmp & TRANS_DP_VSYNC_ACTIVE_HIGH)
2234 flags |= DRM_MODE_FLAG_PVSYNC;
2236 flags |= DRM_MODE_FLAG_NVSYNC;
2239 pipe_config->base.adjusted_mode.flags |= flags;
2241 if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev) &&
2242 tmp & DP_COLOR_RANGE_16_235)
2243 pipe_config->limited_color_range = true;
2245 pipe_config->has_dp_encoder = true;
2247 intel_dp_get_m_n(crtc, pipe_config);
2249 if (port == PORT_A) {
2250 if ((I915_READ(DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_160MHZ)
2251 pipe_config->port_clock = 162000;
2253 pipe_config->port_clock = 270000;
2256 dotclock = intel_dotclock_calculate(pipe_config->port_clock,
2257 &pipe_config->dp_m_n);
2259 if (HAS_PCH_SPLIT(dev_priv->dev) && port != PORT_A)
2260 ironlake_check_encoder_dotclock(pipe_config, dotclock);
2262 pipe_config->base.adjusted_mode.crtc_clock = dotclock;
2264 if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp &&
2265 pipe_config->pipe_bpp > dev_priv->vbt.edp_bpp) {
2267 * This is a big fat ugly hack.
2269 * Some machines in UEFI boot mode provide us a VBT that has 18
2270 * bpp and 1.62 GHz link bandwidth for eDP, which for reasons
2271 * unknown we fail to light up. Yet the same BIOS boots up with
2272 * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as
2273 * max, not what it tells us to use.
2275 * Note: This will still be broken if the eDP panel is not lit
2276 * up by the BIOS, and thus we can't get the mode at module
2279 DRM_DEBUG_KMS("pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
2280 pipe_config->pipe_bpp, dev_priv->vbt.edp_bpp);
2281 dev_priv->vbt.edp_bpp = pipe_config->pipe_bpp;
2285 static void intel_disable_dp(struct intel_encoder *encoder)
2287 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2288 struct drm_device *dev = encoder->base.dev;
2289 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2291 if (crtc->config->has_audio)
2292 intel_audio_codec_disable(encoder);
2294 if (HAS_PSR(dev) && !HAS_DDI(dev))
2295 intel_psr_disable(intel_dp);
2297 /* Make sure the panel is off before trying to change the mode. But also
2298 * ensure that we have vdd while we switch off the panel. */
2299 intel_edp_panel_vdd_on(intel_dp);
2300 intel_edp_backlight_off(intel_dp);
2301 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
2302 intel_edp_panel_off(intel_dp);
2304 /* disable the port before the pipe on g4x */
2305 if (INTEL_INFO(dev)->gen < 5)
2306 intel_dp_link_down(intel_dp);
2309 static void ilk_post_disable_dp(struct intel_encoder *encoder)
2311 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2312 enum port port = dp_to_dig_port(intel_dp)->port;
2314 intel_dp_link_down(intel_dp);
2316 ironlake_edp_pll_off(intel_dp);
2319 static void vlv_post_disable_dp(struct intel_encoder *encoder)
2321 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2323 intel_dp_link_down(intel_dp);
2326 static void chv_post_disable_dp(struct intel_encoder *encoder)
2328 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2329 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2330 struct drm_device *dev = encoder->base.dev;
2331 struct drm_i915_private *dev_priv = dev->dev_private;
2332 struct intel_crtc *intel_crtc =
2333 to_intel_crtc(encoder->base.crtc);
2334 enum dpio_channel ch = vlv_dport_to_channel(dport);
2335 enum pipe pipe = intel_crtc->pipe;
2338 intel_dp_link_down(intel_dp);
2340 mutex_lock(&dev_priv->dpio_lock);
2342 /* Propagate soft reset to data lane reset */
2343 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
2344 val |= CHV_PCS_REQ_SOFTRESET_EN;
2345 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
2347 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
2348 val |= CHV_PCS_REQ_SOFTRESET_EN;
2349 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
2351 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
2352 val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2353 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
2355 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
2356 val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2357 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
2359 mutex_unlock(&dev_priv->dpio_lock);
2363 _intel_dp_set_link_train(struct intel_dp *intel_dp,
2365 uint8_t dp_train_pat)
2367 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2368 struct drm_device *dev = intel_dig_port->base.base.dev;
2369 struct drm_i915_private *dev_priv = dev->dev_private;
2370 enum port port = intel_dig_port->port;
2373 uint32_t temp = I915_READ(DP_TP_CTL(port));
2375 if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
2376 temp |= DP_TP_CTL_SCRAMBLE_DISABLE;
2378 temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE;
2380 temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
2381 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2382 case DP_TRAINING_PATTERN_DISABLE:
2383 temp |= DP_TP_CTL_LINK_TRAIN_NORMAL;
2386 case DP_TRAINING_PATTERN_1:
2387 temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
2389 case DP_TRAINING_PATTERN_2:
2390 temp |= DP_TP_CTL_LINK_TRAIN_PAT2;
2392 case DP_TRAINING_PATTERN_3:
2393 temp |= DP_TP_CTL_LINK_TRAIN_PAT3;
2396 I915_WRITE(DP_TP_CTL(port), temp);
2398 } else if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || port != PORT_A)) {
2399 *DP &= ~DP_LINK_TRAIN_MASK_CPT;
2401 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2402 case DP_TRAINING_PATTERN_DISABLE:
2403 *DP |= DP_LINK_TRAIN_OFF_CPT;
2405 case DP_TRAINING_PATTERN_1:
2406 *DP |= DP_LINK_TRAIN_PAT_1_CPT;
2408 case DP_TRAINING_PATTERN_2:
2409 *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2411 case DP_TRAINING_PATTERN_3:
2412 DRM_ERROR("DP training pattern 3 not supported\n");
2413 *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2418 if (IS_CHERRYVIEW(dev))
2419 *DP &= ~DP_LINK_TRAIN_MASK_CHV;
2421 *DP &= ~DP_LINK_TRAIN_MASK;
2423 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2424 case DP_TRAINING_PATTERN_DISABLE:
2425 *DP |= DP_LINK_TRAIN_OFF;
2427 case DP_TRAINING_PATTERN_1:
2428 *DP |= DP_LINK_TRAIN_PAT_1;
2430 case DP_TRAINING_PATTERN_2:
2431 *DP |= DP_LINK_TRAIN_PAT_2;
2433 case DP_TRAINING_PATTERN_3:
2434 if (IS_CHERRYVIEW(dev)) {
2435 *DP |= DP_LINK_TRAIN_PAT_3_CHV;
2437 DRM_ERROR("DP training pattern 3 not supported\n");
2438 *DP |= DP_LINK_TRAIN_PAT_2;
2445 static void intel_dp_enable_port(struct intel_dp *intel_dp)
2447 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2448 struct drm_i915_private *dev_priv = dev->dev_private;
2450 /* enable with pattern 1 (as per spec) */
2451 _intel_dp_set_link_train(intel_dp, &intel_dp->DP,
2452 DP_TRAINING_PATTERN_1);
2454 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2455 POSTING_READ(intel_dp->output_reg);
2458 * Magic for VLV/CHV. We _must_ first set up the register
2459 * without actually enabling the port, and then do another
2460 * write to enable the port. Otherwise link training will
2461 * fail when the power sequencer is freshly used for this port.
2463 intel_dp->DP |= DP_PORT_EN;
2465 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2466 POSTING_READ(intel_dp->output_reg);
2469 static void intel_enable_dp(struct intel_encoder *encoder)
2471 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2472 struct drm_device *dev = encoder->base.dev;
2473 struct drm_i915_private *dev_priv = dev->dev_private;
2474 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2475 uint32_t dp_reg = I915_READ(intel_dp->output_reg);
2477 if (WARN_ON(dp_reg & DP_PORT_EN))
2482 if (IS_VALLEYVIEW(dev))
2483 vlv_init_panel_power_sequencer(intel_dp);
2485 intel_dp_enable_port(intel_dp);
2487 edp_panel_vdd_on(intel_dp);
2488 edp_panel_on(intel_dp);
2489 edp_panel_vdd_off(intel_dp, true);
2491 pps_unlock(intel_dp);
2493 if (IS_VALLEYVIEW(dev))
2494 vlv_wait_port_ready(dev_priv, dp_to_dig_port(intel_dp));
2496 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
2497 intel_dp_start_link_train(intel_dp);
2498 intel_dp_complete_link_train(intel_dp);
2499 intel_dp_stop_link_train(intel_dp);
2501 if (crtc->config->has_audio) {
2502 DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
2503 pipe_name(crtc->pipe));
2504 intel_audio_codec_enable(encoder);
2508 static void g4x_enable_dp(struct intel_encoder *encoder)
2510 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2512 intel_enable_dp(encoder);
2513 intel_edp_backlight_on(intel_dp);
2516 static void vlv_enable_dp(struct intel_encoder *encoder)
2518 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2520 intel_edp_backlight_on(intel_dp);
2521 intel_psr_enable(intel_dp);
2524 static void g4x_pre_enable_dp(struct intel_encoder *encoder)
2526 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2527 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2529 intel_dp_prepare(encoder);
2531 /* Only ilk+ has port A */
2532 if (dport->port == PORT_A) {
2533 ironlake_set_pll_cpu_edp(intel_dp);
2534 ironlake_edp_pll_on(intel_dp);
2538 static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
2540 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2541 struct drm_i915_private *dev_priv = intel_dig_port->base.base.dev->dev_private;
2542 enum pipe pipe = intel_dp->pps_pipe;
2543 int pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
2545 edp_panel_vdd_off_sync(intel_dp);
2548 * VLV seems to get confused when multiple power seqeuencers
2549 * have the same port selected (even if only one has power/vdd
2550 * enabled). The failure manifests as vlv_wait_port_ready() failing
2551 * CHV on the other hand doesn't seem to mind having the same port
2552 * selected in multiple power seqeuencers, but let's clear the
2553 * port select always when logically disconnecting a power sequencer
2556 DRM_DEBUG_KMS("detaching pipe %c power sequencer from port %c\n",
2557 pipe_name(pipe), port_name(intel_dig_port->port));
2558 I915_WRITE(pp_on_reg, 0);
2559 POSTING_READ(pp_on_reg);
2561 intel_dp->pps_pipe = INVALID_PIPE;
2564 static void vlv_steal_power_sequencer(struct drm_device *dev,
2567 struct drm_i915_private *dev_priv = dev->dev_private;
2568 struct intel_encoder *encoder;
2570 lockdep_assert_held(&dev_priv->pps_mutex);
2572 if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
2575 list_for_each_entry(encoder, &dev->mode_config.encoder_list,
2577 struct intel_dp *intel_dp;
2580 if (encoder->type != INTEL_OUTPUT_EDP)
2583 intel_dp = enc_to_intel_dp(&encoder->base);
2584 port = dp_to_dig_port(intel_dp)->port;
2586 if (intel_dp->pps_pipe != pipe)
2589 DRM_DEBUG_KMS("stealing pipe %c power sequencer from port %c\n",
2590 pipe_name(pipe), port_name(port));
2592 WARN(encoder->connectors_active,
2593 "stealing pipe %c power sequencer from active eDP port %c\n",
2594 pipe_name(pipe), port_name(port));
2596 /* make sure vdd is off before we steal it */
2597 vlv_detach_power_sequencer(intel_dp);
2601 static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp)
2603 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2604 struct intel_encoder *encoder = &intel_dig_port->base;
2605 struct drm_device *dev = encoder->base.dev;
2606 struct drm_i915_private *dev_priv = dev->dev_private;
2607 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2609 lockdep_assert_held(&dev_priv->pps_mutex);
2611 if (!is_edp(intel_dp))
2614 if (intel_dp->pps_pipe == crtc->pipe)
2618 * If another power sequencer was being used on this
2619 * port previously make sure to turn off vdd there while
2620 * we still have control of it.
2622 if (intel_dp->pps_pipe != INVALID_PIPE)
2623 vlv_detach_power_sequencer(intel_dp);
2626 * We may be stealing the power
2627 * sequencer from another port.
2629 vlv_steal_power_sequencer(dev, crtc->pipe);
2631 /* now it's all ours */
2632 intel_dp->pps_pipe = crtc->pipe;
2634 DRM_DEBUG_KMS("initializing pipe %c power sequencer for port %c\n",
2635 pipe_name(intel_dp->pps_pipe), port_name(intel_dig_port->port));
2637 /* init power sequencer on this pipe and port */
2638 intel_dp_init_panel_power_sequencer(dev, intel_dp);
2639 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
2642 static void vlv_pre_enable_dp(struct intel_encoder *encoder)
2644 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2645 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2646 struct drm_device *dev = encoder->base.dev;
2647 struct drm_i915_private *dev_priv = dev->dev_private;
2648 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
2649 enum dpio_channel port = vlv_dport_to_channel(dport);
2650 int pipe = intel_crtc->pipe;
2653 mutex_lock(&dev_priv->dpio_lock);
2655 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(port));
2662 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW8(port), val);
2663 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW14(port), 0x00760018);
2664 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW23(port), 0x00400888);
2666 mutex_unlock(&dev_priv->dpio_lock);
2668 intel_enable_dp(encoder);
2671 static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder)
2673 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
2674 struct drm_device *dev = encoder->base.dev;
2675 struct drm_i915_private *dev_priv = dev->dev_private;
2676 struct intel_crtc *intel_crtc =
2677 to_intel_crtc(encoder->base.crtc);
2678 enum dpio_channel port = vlv_dport_to_channel(dport);
2679 int pipe = intel_crtc->pipe;
2681 intel_dp_prepare(encoder);
2683 /* Program Tx lane resets to default */
2684 mutex_lock(&dev_priv->dpio_lock);
2685 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port),
2686 DPIO_PCS_TX_LANE2_RESET |
2687 DPIO_PCS_TX_LANE1_RESET);
2688 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port),
2689 DPIO_PCS_CLK_CRI_RXEB_EIOS_EN |
2690 DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN |
2691 (1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) |
2692 DPIO_PCS_CLK_SOFT_RESET);
2694 /* Fix up inter-pair skew failure */
2695 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW12(port), 0x00750f00);
2696 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW11(port), 0x00001500);
2697 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW14(port), 0x40400000);
2698 mutex_unlock(&dev_priv->dpio_lock);
2701 static void chv_pre_enable_dp(struct intel_encoder *encoder)
2703 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2704 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2705 struct drm_device *dev = encoder->base.dev;
2706 struct drm_i915_private *dev_priv = dev->dev_private;
2707 struct intel_crtc *intel_crtc =
2708 to_intel_crtc(encoder->base.crtc);
2709 enum dpio_channel ch = vlv_dport_to_channel(dport);
2710 int pipe = intel_crtc->pipe;
2714 mutex_lock(&dev_priv->dpio_lock);
2716 /* allow hardware to manage TX FIFO reset source */
2717 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
2718 val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
2719 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
2721 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
2722 val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
2723 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
2725 /* Deassert soft data lane reset*/
2726 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
2727 val |= CHV_PCS_REQ_SOFTRESET_EN;
2728 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
2730 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
2731 val |= CHV_PCS_REQ_SOFTRESET_EN;
2732 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
2734 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
2735 val |= (DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2736 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
2738 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
2739 val |= (DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2740 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
2742 /* Program Tx lane latency optimal setting*/
2743 for (i = 0; i < 4; i++) {
2744 /* Set the upar bit */
2745 data = (i == 1) ? 0x0 : 0x1;
2746 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW14(ch, i),
2747 data << DPIO_UPAR_SHIFT);
2750 /* Data lane stagger programming */
2751 /* FIXME: Fix up value only after power analysis */
2753 mutex_unlock(&dev_priv->dpio_lock);
2755 intel_enable_dp(encoder);
2758 static void chv_dp_pre_pll_enable(struct intel_encoder *encoder)
2760 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
2761 struct drm_device *dev = encoder->base.dev;
2762 struct drm_i915_private *dev_priv = dev->dev_private;
2763 struct intel_crtc *intel_crtc =
2764 to_intel_crtc(encoder->base.crtc);
2765 enum dpio_channel ch = vlv_dport_to_channel(dport);
2766 enum pipe pipe = intel_crtc->pipe;
2769 intel_dp_prepare(encoder);
2771 mutex_lock(&dev_priv->dpio_lock);
2773 /* program left/right clock distribution */
2774 if (pipe != PIPE_B) {
2775 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
2776 val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
2778 val |= CHV_BUFLEFTENA1_FORCE;
2780 val |= CHV_BUFRIGHTENA1_FORCE;
2781 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
2783 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
2784 val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
2786 val |= CHV_BUFLEFTENA2_FORCE;
2788 val |= CHV_BUFRIGHTENA2_FORCE;
2789 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
2792 /* program clock channel usage */
2793 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(ch));
2794 val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
2796 val &= ~CHV_PCS_USEDCLKCHANNEL;
2798 val |= CHV_PCS_USEDCLKCHANNEL;
2799 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW8(ch), val);
2801 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW8(ch));
2802 val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
2804 val &= ~CHV_PCS_USEDCLKCHANNEL;
2806 val |= CHV_PCS_USEDCLKCHANNEL;
2807 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW8(ch), val);
2810 * This a a bit weird since generally CL
2811 * matches the pipe, but here we need to
2812 * pick the CL based on the port.
2814 val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW19(ch));
2816 val &= ~CHV_CMN_USEDCLKCHANNEL;
2818 val |= CHV_CMN_USEDCLKCHANNEL;
2819 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW19(ch), val);
2821 mutex_unlock(&dev_priv->dpio_lock);
2825 * Native read with retry for link status and receiver capability reads for
2826 * cases where the sink may still be asleep.
2828 * Sinks are *supposed* to come up within 1ms from an off state, but we're also
2829 * supposed to retry 3 times per the spec.
2832 intel_dp_dpcd_read_wake(struct drm_dp_aux *aux, unsigned int offset,
2833 void *buffer, size_t size)
2839 * Sometime we just get the same incorrect byte repeated
2840 * over the entire buffer. Doing just one throw away read
2841 * initially seems to "solve" it.
2843 drm_dp_dpcd_read(aux, DP_DPCD_REV, buffer, 1);
2845 for (i = 0; i < 3; i++) {
2846 ret = drm_dp_dpcd_read(aux, offset, buffer, size);
2856 * Fetch AUX CH registers 0x202 - 0x207 which contain
2857 * link status information
2860 intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
2862 return intel_dp_dpcd_read_wake(&intel_dp->aux,
2865 DP_LINK_STATUS_SIZE) == DP_LINK_STATUS_SIZE;
2868 /* These are source-specific values. */
2870 intel_dp_voltage_max(struct intel_dp *intel_dp)
2872 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2873 struct drm_i915_private *dev_priv = dev->dev_private;
2874 enum port port = dp_to_dig_port(intel_dp)->port;
2876 if (INTEL_INFO(dev)->gen >= 9) {
2877 if (dev_priv->vbt.edp_low_vswing && port == PORT_A)
2878 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
2879 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
2880 } else if (IS_VALLEYVIEW(dev))
2881 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
2882 else if (IS_GEN7(dev) && port == PORT_A)
2883 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
2884 else if (HAS_PCH_CPT(dev) && port != PORT_A)
2885 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
2887 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
2891 intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
2893 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2894 enum port port = dp_to_dig_port(intel_dp)->port;
2896 if (INTEL_INFO(dev)->gen >= 9) {
2897 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
2898 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2899 return DP_TRAIN_PRE_EMPH_LEVEL_3;
2900 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2901 return DP_TRAIN_PRE_EMPH_LEVEL_2;
2902 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2903 return DP_TRAIN_PRE_EMPH_LEVEL_1;
2904 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
2905 return DP_TRAIN_PRE_EMPH_LEVEL_0;
2907 return DP_TRAIN_PRE_EMPH_LEVEL_0;
2909 } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
2910 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
2911 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2912 return DP_TRAIN_PRE_EMPH_LEVEL_3;
2913 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2914 return DP_TRAIN_PRE_EMPH_LEVEL_2;
2915 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2916 return DP_TRAIN_PRE_EMPH_LEVEL_1;
2917 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
2919 return DP_TRAIN_PRE_EMPH_LEVEL_0;
2921 } else if (IS_VALLEYVIEW(dev)) {
2922 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
2923 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2924 return DP_TRAIN_PRE_EMPH_LEVEL_3;
2925 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2926 return DP_TRAIN_PRE_EMPH_LEVEL_2;
2927 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2928 return DP_TRAIN_PRE_EMPH_LEVEL_1;
2929 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
2931 return DP_TRAIN_PRE_EMPH_LEVEL_0;
2933 } else if (IS_GEN7(dev) && port == PORT_A) {
2934 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
2935 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2936 return DP_TRAIN_PRE_EMPH_LEVEL_2;
2937 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2938 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2939 return DP_TRAIN_PRE_EMPH_LEVEL_1;
2941 return DP_TRAIN_PRE_EMPH_LEVEL_0;
2944 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
2945 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2946 return DP_TRAIN_PRE_EMPH_LEVEL_2;
2947 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2948 return DP_TRAIN_PRE_EMPH_LEVEL_2;
2949 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2950 return DP_TRAIN_PRE_EMPH_LEVEL_1;
2951 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
2953 return DP_TRAIN_PRE_EMPH_LEVEL_0;
2958 static uint32_t intel_vlv_signal_levels(struct intel_dp *intel_dp)
2960 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2961 struct drm_i915_private *dev_priv = dev->dev_private;
2962 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2963 struct intel_crtc *intel_crtc =
2964 to_intel_crtc(dport->base.base.crtc);
2965 unsigned long demph_reg_value, preemph_reg_value,
2966 uniqtranscale_reg_value;
2967 uint8_t train_set = intel_dp->train_set[0];
2968 enum dpio_channel port = vlv_dport_to_channel(dport);
2969 int pipe = intel_crtc->pipe;
2971 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
2972 case DP_TRAIN_PRE_EMPH_LEVEL_0:
2973 preemph_reg_value = 0x0004000;
2974 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
2975 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2976 demph_reg_value = 0x2B405555;
2977 uniqtranscale_reg_value = 0x552AB83A;
2979 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2980 demph_reg_value = 0x2B404040;
2981 uniqtranscale_reg_value = 0x5548B83A;
2983 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2984 demph_reg_value = 0x2B245555;
2985 uniqtranscale_reg_value = 0x5560B83A;
2987 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
2988 demph_reg_value = 0x2B405555;
2989 uniqtranscale_reg_value = 0x5598DA3A;
2995 case DP_TRAIN_PRE_EMPH_LEVEL_1:
2996 preemph_reg_value = 0x0002000;
2997 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
2998 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2999 demph_reg_value = 0x2B404040;
3000 uniqtranscale_reg_value = 0x5552B83A;
3002 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3003 demph_reg_value = 0x2B404848;
3004 uniqtranscale_reg_value = 0x5580B83A;
3006 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3007 demph_reg_value = 0x2B404040;
3008 uniqtranscale_reg_value = 0x55ADDA3A;
3014 case DP_TRAIN_PRE_EMPH_LEVEL_2:
3015 preemph_reg_value = 0x0000000;
3016 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3017 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3018 demph_reg_value = 0x2B305555;
3019 uniqtranscale_reg_value = 0x5570B83A;
3021 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3022 demph_reg_value = 0x2B2B4040;
3023 uniqtranscale_reg_value = 0x55ADDA3A;
3029 case DP_TRAIN_PRE_EMPH_LEVEL_3:
3030 preemph_reg_value = 0x0006000;
3031 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3032 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3033 demph_reg_value = 0x1B405555;
3034 uniqtranscale_reg_value = 0x55ADDA3A;
3044 mutex_lock(&dev_priv->dpio_lock);
3045 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x00000000);
3046 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW4(port), demph_reg_value);
3047 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW2(port),
3048 uniqtranscale_reg_value);
3049 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW3(port), 0x0C782040);
3050 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW11(port), 0x00030000);
3051 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW9(port), preemph_reg_value);
3052 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x80000000);
3053 mutex_unlock(&dev_priv->dpio_lock);
3058 static uint32_t intel_chv_signal_levels(struct intel_dp *intel_dp)
3060 struct drm_device *dev = intel_dp_to_dev(intel_dp);
3061 struct drm_i915_private *dev_priv = dev->dev_private;
3062 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
3063 struct intel_crtc *intel_crtc = to_intel_crtc(dport->base.base.crtc);
3064 u32 deemph_reg_value, margin_reg_value, val;
3065 uint8_t train_set = intel_dp->train_set[0];
3066 enum dpio_channel ch = vlv_dport_to_channel(dport);
3067 enum pipe pipe = intel_crtc->pipe;
3070 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3071 case DP_TRAIN_PRE_EMPH_LEVEL_0:
3072 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3073 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3074 deemph_reg_value = 128;
3075 margin_reg_value = 52;
3077 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3078 deemph_reg_value = 128;
3079 margin_reg_value = 77;
3081 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3082 deemph_reg_value = 128;
3083 margin_reg_value = 102;
3085 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3086 deemph_reg_value = 128;
3087 margin_reg_value = 154;
3088 /* FIXME extra to set for 1200 */
3094 case DP_TRAIN_PRE_EMPH_LEVEL_1:
3095 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3096 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3097 deemph_reg_value = 85;
3098 margin_reg_value = 78;
3100 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3101 deemph_reg_value = 85;
3102 margin_reg_value = 116;
3104 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3105 deemph_reg_value = 85;
3106 margin_reg_value = 154;
3112 case DP_TRAIN_PRE_EMPH_LEVEL_2:
3113 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3114 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3115 deemph_reg_value = 64;
3116 margin_reg_value = 104;
3118 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3119 deemph_reg_value = 64;
3120 margin_reg_value = 154;
3126 case DP_TRAIN_PRE_EMPH_LEVEL_3:
3127 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3128 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3129 deemph_reg_value = 43;
3130 margin_reg_value = 154;
3140 mutex_lock(&dev_priv->dpio_lock);
3142 /* Clear calc init */
3143 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
3144 val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
3145 val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
3146 val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
3147 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
3149 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
3150 val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
3151 val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
3152 val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
3153 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
3155 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW9(ch));
3156 val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
3157 val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
3158 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW9(ch), val);
3160 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW9(ch));
3161 val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
3162 val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
3163 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW9(ch), val);
3165 /* Program swing deemph */
3166 for (i = 0; i < 4; i++) {
3167 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW4(ch, i));
3168 val &= ~DPIO_SWING_DEEMPH9P5_MASK;
3169 val |= deemph_reg_value << DPIO_SWING_DEEMPH9P5_SHIFT;
3170 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW4(ch, i), val);
3173 /* Program swing margin */
3174 for (i = 0; i < 4; i++) {
3175 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
3176 val &= ~DPIO_SWING_MARGIN000_MASK;
3177 val |= margin_reg_value << DPIO_SWING_MARGIN000_SHIFT;
3178 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
3181 /* Disable unique transition scale */
3182 for (i = 0; i < 4; i++) {
3183 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
3184 val &= ~DPIO_TX_UNIQ_TRANS_SCALE_EN;
3185 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
3188 if (((train_set & DP_TRAIN_PRE_EMPHASIS_MASK)
3189 == DP_TRAIN_PRE_EMPH_LEVEL_0) &&
3190 ((train_set & DP_TRAIN_VOLTAGE_SWING_MASK)
3191 == DP_TRAIN_VOLTAGE_SWING_LEVEL_3)) {
3194 * The document said it needs to set bit 27 for ch0 and bit 26
3195 * for ch1. Might be a typo in the doc.
3196 * For now, for this unique transition scale selection, set bit
3197 * 27 for ch0 and ch1.
3199 for (i = 0; i < 4; i++) {
3200 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
3201 val |= DPIO_TX_UNIQ_TRANS_SCALE_EN;
3202 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
3205 for (i = 0; i < 4; i++) {
3206 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
3207 val &= ~(0xff << DPIO_UNIQ_TRANS_SCALE_SHIFT);
3208 val |= (0x9a << DPIO_UNIQ_TRANS_SCALE_SHIFT);
3209 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
3213 /* Start swing calculation */
3214 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
3215 val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
3216 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
3218 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
3219 val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
3220 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
3223 val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW30);
3224 val |= DPIO_LRC_BYPASS;
3225 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW30, val);
3227 mutex_unlock(&dev_priv->dpio_lock);
3233 intel_get_adjust_train(struct intel_dp *intel_dp,
3234 const uint8_t link_status[DP_LINK_STATUS_SIZE])
3239 uint8_t voltage_max;
3240 uint8_t preemph_max;
3242 for (lane = 0; lane < intel_dp->lane_count; lane++) {
3243 uint8_t this_v = drm_dp_get_adjust_request_voltage(link_status, lane);
3244 uint8_t this_p = drm_dp_get_adjust_request_pre_emphasis(link_status, lane);
3252 voltage_max = intel_dp_voltage_max(intel_dp);
3253 if (v >= voltage_max)
3254 v = voltage_max | DP_TRAIN_MAX_SWING_REACHED;
3256 preemph_max = intel_dp_pre_emphasis_max(intel_dp, v);
3257 if (p >= preemph_max)
3258 p = preemph_max | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
3260 for (lane = 0; lane < 4; lane++)
3261 intel_dp->train_set[lane] = v | p;
3265 intel_gen4_signal_levels(uint8_t train_set)
3267 uint32_t signal_levels = 0;
3269 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3270 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3272 signal_levels |= DP_VOLTAGE_0_4;
3274 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3275 signal_levels |= DP_VOLTAGE_0_6;
3277 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3278 signal_levels |= DP_VOLTAGE_0_8;
3280 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3281 signal_levels |= DP_VOLTAGE_1_2;
3284 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3285 case DP_TRAIN_PRE_EMPH_LEVEL_0:
3287 signal_levels |= DP_PRE_EMPHASIS_0;
3289 case DP_TRAIN_PRE_EMPH_LEVEL_1:
3290 signal_levels |= DP_PRE_EMPHASIS_3_5;
3292 case DP_TRAIN_PRE_EMPH_LEVEL_2:
3293 signal_levels |= DP_PRE_EMPHASIS_6;
3295 case DP_TRAIN_PRE_EMPH_LEVEL_3:
3296 signal_levels |= DP_PRE_EMPHASIS_9_5;
3299 return signal_levels;
3302 /* Gen6's DP voltage swing and pre-emphasis control */
3304 intel_gen6_edp_signal_levels(uint8_t train_set)
3306 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3307 DP_TRAIN_PRE_EMPHASIS_MASK);
3308 switch (signal_levels) {
3309 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3310 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3311 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
3312 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3313 return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B;
3314 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3315 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3316 return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B;
3317 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3318 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3319 return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B;
3320 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3321 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3322 return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B;
3324 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3325 "0x%x\n", signal_levels);
3326 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
3330 /* Gen7's DP voltage swing and pre-emphasis control */
3332 intel_gen7_edp_signal_levels(uint8_t train_set)
3334 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3335 DP_TRAIN_PRE_EMPHASIS_MASK);
3336 switch (signal_levels) {
3337 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3338 return EDP_LINK_TRAIN_400MV_0DB_IVB;
3339 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3340 return EDP_LINK_TRAIN_400MV_3_5DB_IVB;
3341 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3342 return EDP_LINK_TRAIN_400MV_6DB_IVB;
3344 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3345 return EDP_LINK_TRAIN_600MV_0DB_IVB;
3346 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3347 return EDP_LINK_TRAIN_600MV_3_5DB_IVB;
3349 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3350 return EDP_LINK_TRAIN_800MV_0DB_IVB;
3351 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3352 return EDP_LINK_TRAIN_800MV_3_5DB_IVB;
3355 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3356 "0x%x\n", signal_levels);
3357 return EDP_LINK_TRAIN_500MV_0DB_IVB;
3361 /* Gen7.5's (HSW) DP voltage swing and pre-emphasis control */
3363 intel_hsw_signal_levels(uint8_t train_set)
3365 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3366 DP_TRAIN_PRE_EMPHASIS_MASK);
3367 switch (signal_levels) {
3368 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3369 return DDI_BUF_TRANS_SELECT(0);
3370 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3371 return DDI_BUF_TRANS_SELECT(1);
3372 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3373 return DDI_BUF_TRANS_SELECT(2);
3374 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_3:
3375 return DDI_BUF_TRANS_SELECT(3);
3377 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3378 return DDI_BUF_TRANS_SELECT(4);
3379 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3380 return DDI_BUF_TRANS_SELECT(5);
3381 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3382 return DDI_BUF_TRANS_SELECT(6);
3384 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3385 return DDI_BUF_TRANS_SELECT(7);
3386 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3387 return DDI_BUF_TRANS_SELECT(8);
3389 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3390 return DDI_BUF_TRANS_SELECT(9);
3392 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3393 "0x%x\n", signal_levels);
3394 return DDI_BUF_TRANS_SELECT(0);
3398 /* Properly updates "DP" with the correct signal levels. */
3400 intel_dp_set_signal_levels(struct intel_dp *intel_dp, uint32_t *DP)
3402 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3403 enum port port = intel_dig_port->port;
3404 struct drm_device *dev = intel_dig_port->base.base.dev;
3405 uint32_t signal_levels, mask;
3406 uint8_t train_set = intel_dp->train_set[0];
3408 if (IS_HASWELL(dev) || IS_BROADWELL(dev) || INTEL_INFO(dev)->gen >= 9) {
3409 signal_levels = intel_hsw_signal_levels(train_set);
3410 mask = DDI_BUF_EMP_MASK;
3411 } else if (IS_CHERRYVIEW(dev)) {
3412 signal_levels = intel_chv_signal_levels(intel_dp);
3414 } else if (IS_VALLEYVIEW(dev)) {
3415 signal_levels = intel_vlv_signal_levels(intel_dp);
3417 } else if (IS_GEN7(dev) && port == PORT_A) {
3418 signal_levels = intel_gen7_edp_signal_levels(train_set);
3419 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB;
3420 } else if (IS_GEN6(dev) && port == PORT_A) {
3421 signal_levels = intel_gen6_edp_signal_levels(train_set);
3422 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_SNB;
3424 signal_levels = intel_gen4_signal_levels(train_set);
3425 mask = DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK;
3428 DRM_DEBUG_KMS("Using signal levels %08x\n", signal_levels);
3430 *DP = (*DP & ~mask) | signal_levels;
3434 intel_dp_set_link_train(struct intel_dp *intel_dp,
3436 uint8_t dp_train_pat)
3438 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3439 struct drm_device *dev = intel_dig_port->base.base.dev;
3440 struct drm_i915_private *dev_priv = dev->dev_private;
3441 uint8_t buf[sizeof(intel_dp->train_set) + 1];
3444 _intel_dp_set_link_train(intel_dp, DP, dp_train_pat);
3446 I915_WRITE(intel_dp->output_reg, *DP);
3447 POSTING_READ(intel_dp->output_reg);
3449 buf[0] = dp_train_pat;
3450 if ((dp_train_pat & DP_TRAINING_PATTERN_MASK) ==
3451 DP_TRAINING_PATTERN_DISABLE) {
3452 /* don't write DP_TRAINING_LANEx_SET on disable */
3455 /* DP_TRAINING_LANEx_SET follow DP_TRAINING_PATTERN_SET */
3456 memcpy(buf + 1, intel_dp->train_set, intel_dp->lane_count);
3457 len = intel_dp->lane_count + 1;
3460 ret = drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_PATTERN_SET,
3467 intel_dp_reset_link_train(struct intel_dp *intel_dp, uint32_t *DP,
3468 uint8_t dp_train_pat)
3470 memset(intel_dp->train_set, 0, sizeof(intel_dp->train_set));
3471 intel_dp_set_signal_levels(intel_dp, DP);
3472 return intel_dp_set_link_train(intel_dp, DP, dp_train_pat);
3476 intel_dp_update_link_train(struct intel_dp *intel_dp, uint32_t *DP,
3477 const uint8_t link_status[DP_LINK_STATUS_SIZE])
3479 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3480 struct drm_device *dev = intel_dig_port->base.base.dev;
3481 struct drm_i915_private *dev_priv = dev->dev_private;
3484 intel_get_adjust_train(intel_dp, link_status);
3485 intel_dp_set_signal_levels(intel_dp, DP);
3487 I915_WRITE(intel_dp->output_reg, *DP);
3488 POSTING_READ(intel_dp->output_reg);
3490 ret = drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_LANE0_SET,
3491 intel_dp->train_set, intel_dp->lane_count);
3493 return ret == intel_dp->lane_count;
3496 static void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
3498 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3499 struct drm_device *dev = intel_dig_port->base.base.dev;
3500 struct drm_i915_private *dev_priv = dev->dev_private;
3501 enum port port = intel_dig_port->port;
3507 val = I915_READ(DP_TP_CTL(port));
3508 val &= ~DP_TP_CTL_LINK_TRAIN_MASK;
3509 val |= DP_TP_CTL_LINK_TRAIN_IDLE;
3510 I915_WRITE(DP_TP_CTL(port), val);
3513 * On PORT_A we can have only eDP in SST mode. There the only reason
3514 * we need to set idle transmission mode is to work around a HW issue
3515 * where we enable the pipe while not in idle link-training mode.
3516 * In this case there is requirement to wait for a minimum number of
3517 * idle patterns to be sent.
3522 if (wait_for((I915_READ(DP_TP_STATUS(port)) & DP_TP_STATUS_IDLE_DONE),
3524 DRM_ERROR("Timed out waiting for DP idle patterns\n");
3527 /* Enable corresponding port and start training pattern 1 */
3529 intel_dp_start_link_train(struct intel_dp *intel_dp)
3531 struct drm_encoder *encoder = &dp_to_dig_port(intel_dp)->base.base;
3532 struct drm_device *dev = encoder->dev;
3535 int voltage_tries, loop_tries;
3536 uint32_t DP = intel_dp->DP;
3537 uint8_t link_config[2];
3540 intel_ddi_prepare_link_retrain(encoder);
3542 /* Write the link configuration data */
3543 link_config[0] = intel_dp->link_bw;
3544 link_config[1] = intel_dp->lane_count;
3545 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
3546 link_config[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
3547 drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_BW_SET, link_config, 2);
3548 if (intel_dp->num_sink_rates)
3549 drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_RATE_SET,
3550 &intel_dp->rate_select, 1);
3553 link_config[1] = DP_SET_ANSI_8B10B;
3554 drm_dp_dpcd_write(&intel_dp->aux, DP_DOWNSPREAD_CTRL, link_config, 2);
3558 /* clock recovery */
3559 if (!intel_dp_reset_link_train(intel_dp, &DP,
3560 DP_TRAINING_PATTERN_1 |
3561 DP_LINK_SCRAMBLING_DISABLE)) {
3562 DRM_ERROR("failed to enable link training\n");
3570 uint8_t link_status[DP_LINK_STATUS_SIZE];
3572 drm_dp_link_train_clock_recovery_delay(intel_dp->dpcd);
3573 if (!intel_dp_get_link_status(intel_dp, link_status)) {
3574 DRM_ERROR("failed to get link status\n");
3578 if (drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
3579 DRM_DEBUG_KMS("clock recovery OK\n");
3583 /* Check to see if we've tried the max voltage */
3584 for (i = 0; i < intel_dp->lane_count; i++)
3585 if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
3587 if (i == intel_dp->lane_count) {
3589 if (loop_tries == 5) {
3590 DRM_ERROR("too many full retries, give up\n");
3593 intel_dp_reset_link_train(intel_dp, &DP,
3594 DP_TRAINING_PATTERN_1 |
3595 DP_LINK_SCRAMBLING_DISABLE);
3600 /* Check to see if we've tried the same voltage 5 times */
3601 if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) {
3603 if (voltage_tries == 5) {
3604 DRM_ERROR("too many voltage retries, give up\n");
3609 voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
3611 /* Update training set as requested by target */
3612 if (!intel_dp_update_link_train(intel_dp, &DP, link_status)) {
3613 DRM_ERROR("failed to update link training\n");
3622 intel_dp_complete_link_train(struct intel_dp *intel_dp)
3624 bool channel_eq = false;
3625 int tries, cr_tries;
3626 uint32_t DP = intel_dp->DP;
3627 uint32_t training_pattern = DP_TRAINING_PATTERN_2;
3629 /* Training Pattern 3 for HBR2 ot 1.2 devices that support it*/
3630 if (intel_dp->link_bw == DP_LINK_BW_5_4 || intel_dp->use_tps3)
3631 training_pattern = DP_TRAINING_PATTERN_3;
3633 /* channel equalization */
3634 if (!intel_dp_set_link_train(intel_dp, &DP,
3636 DP_LINK_SCRAMBLING_DISABLE)) {
3637 DRM_ERROR("failed to start channel equalization\n");
3645 uint8_t link_status[DP_LINK_STATUS_SIZE];
3648 DRM_ERROR("failed to train DP, aborting\n");
3652 drm_dp_link_train_channel_eq_delay(intel_dp->dpcd);
3653 if (!intel_dp_get_link_status(intel_dp, link_status)) {
3654 DRM_ERROR("failed to get link status\n");
3658 /* Make sure clock is still ok */
3659 if (!drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
3660 intel_dp_start_link_train(intel_dp);
3661 intel_dp_set_link_train(intel_dp, &DP,
3663 DP_LINK_SCRAMBLING_DISABLE);
3668 if (drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
3673 /* Try 5 times, then try clock recovery if that fails */
3675 intel_dp_start_link_train(intel_dp);
3676 intel_dp_set_link_train(intel_dp, &DP,
3678 DP_LINK_SCRAMBLING_DISABLE);
3684 /* Update training set as requested by target */
3685 if (!intel_dp_update_link_train(intel_dp, &DP, link_status)) {
3686 DRM_ERROR("failed to update link training\n");
3692 intel_dp_set_idle_link_train(intel_dp);
3697 DRM_DEBUG_KMS("Channel EQ done. DP Training successful\n");
3701 void intel_dp_stop_link_train(struct intel_dp *intel_dp)
3703 intel_dp_set_link_train(intel_dp, &intel_dp->DP,
3704 DP_TRAINING_PATTERN_DISABLE);
3708 intel_dp_link_down(struct intel_dp *intel_dp)
3710 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3711 enum port port = intel_dig_port->port;
3712 struct drm_device *dev = intel_dig_port->base.base.dev;
3713 struct drm_i915_private *dev_priv = dev->dev_private;
3714 uint32_t DP = intel_dp->DP;
3716 if (WARN_ON(HAS_DDI(dev)))
3719 if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0))
3722 DRM_DEBUG_KMS("\n");
3724 if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || port != PORT_A)) {
3725 DP &= ~DP_LINK_TRAIN_MASK_CPT;
3726 I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE_CPT);
3728 if (IS_CHERRYVIEW(dev))
3729 DP &= ~DP_LINK_TRAIN_MASK_CHV;
3731 DP &= ~DP_LINK_TRAIN_MASK;
3732 I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE);
3734 POSTING_READ(intel_dp->output_reg);
3736 if (HAS_PCH_IBX(dev) &&
3737 I915_READ(intel_dp->output_reg) & DP_PIPEB_SELECT) {
3738 /* Hardware workaround: leaving our transcoder select
3739 * set to transcoder B while it's off will prevent the
3740 * corresponding HDMI output on transcoder A.
3742 * Combine this with another hardware workaround:
3743 * transcoder select bit can only be cleared while the
3746 DP &= ~DP_PIPEB_SELECT;
3747 I915_WRITE(intel_dp->output_reg, DP);
3748 POSTING_READ(intel_dp->output_reg);
3751 DP &= ~DP_AUDIO_OUTPUT_ENABLE;
3752 I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
3753 POSTING_READ(intel_dp->output_reg);
3754 msleep(intel_dp->panel_power_down_delay);
3758 intel_dp_get_dpcd(struct intel_dp *intel_dp)
3760 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3761 struct drm_device *dev = dig_port->base.base.dev;
3762 struct drm_i915_private *dev_priv = dev->dev_private;
3765 if (intel_dp_dpcd_read_wake(&intel_dp->aux, 0x000, intel_dp->dpcd,
3766 sizeof(intel_dp->dpcd)) < 0)
3767 return false; /* aux transfer failed */
3769 DRM_DEBUG_KMS("DPCD: %*ph\n", (int) sizeof(intel_dp->dpcd), intel_dp->dpcd);
3771 if (intel_dp->dpcd[DP_DPCD_REV] == 0)
3772 return false; /* DPCD not present */
3774 /* Check if the panel supports PSR */
3775 memset(intel_dp->psr_dpcd, 0, sizeof(intel_dp->psr_dpcd));
3776 if (is_edp(intel_dp)) {
3777 intel_dp_dpcd_read_wake(&intel_dp->aux, DP_PSR_SUPPORT,
3779 sizeof(intel_dp->psr_dpcd));
3780 if (intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED) {
3781 dev_priv->psr.sink_support = true;
3782 DRM_DEBUG_KMS("Detected EDP PSR Panel.\n");
3786 /* Training Pattern 3 support, both source and sink */
3787 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x12 &&
3788 intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_TPS3_SUPPORTED &&
3789 (IS_HASWELL(dev_priv) || INTEL_INFO(dev_priv)->gen >= 8)) {
3790 intel_dp->use_tps3 = true;
3791 DRM_DEBUG_KMS("Displayport TPS3 supported\n");
3793 intel_dp->use_tps3 = false;
3795 /* Intermediate frequency support */
3796 if (is_edp(intel_dp) &&
3797 (intel_dp->dpcd[DP_EDP_CONFIGURATION_CAP] & DP_DPCD_DISPLAY_CONTROL_CAPABLE) &&
3798 (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_EDP_DPCD_REV, &rev, 1) == 1) &&
3799 (rev >= 0x03)) { /* eDp v1.4 or higher */
3800 __le16 sink_rates[DP_MAX_SUPPORTED_RATES];
3803 intel_dp_dpcd_read_wake(&intel_dp->aux,
3804 DP_SUPPORTED_LINK_RATES,
3806 sizeof(sink_rates));
3808 for (i = 0; i < ARRAY_SIZE(sink_rates); i++) {
3809 int val = le16_to_cpu(sink_rates[i]);
3814 /* Value read is in kHz while drm clock is saved in deca-kHz */
3815 intel_dp->sink_rates[i] = (val * 200) / 10;
3817 intel_dp->num_sink_rates = i;
3820 intel_dp_print_rates(intel_dp);
3822 if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
3823 DP_DWN_STRM_PORT_PRESENT))
3824 return true; /* native DP sink */
3826 if (intel_dp->dpcd[DP_DPCD_REV] == 0x10)
3827 return true; /* no per-port downstream info */
3829 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_DOWNSTREAM_PORT_0,
3830 intel_dp->downstream_ports,
3831 DP_MAX_DOWNSTREAM_PORTS) < 0)
3832 return false; /* downstream port status fetch failed */
3838 intel_dp_probe_oui(struct intel_dp *intel_dp)
3842 if (!(intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT))
3845 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_OUI, buf, 3) == 3)
3846 DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n",
3847 buf[0], buf[1], buf[2]);
3849 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_BRANCH_OUI, buf, 3) == 3)
3850 DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n",
3851 buf[0], buf[1], buf[2]);
3855 intel_dp_probe_mst(struct intel_dp *intel_dp)
3859 if (!intel_dp->can_mst)
3862 if (intel_dp->dpcd[DP_DPCD_REV] < 0x12)
3865 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_MSTM_CAP, buf, 1)) {
3866 if (buf[0] & DP_MST_CAP) {
3867 DRM_DEBUG_KMS("Sink is MST capable\n");
3868 intel_dp->is_mst = true;
3870 DRM_DEBUG_KMS("Sink is not MST capable\n");
3871 intel_dp->is_mst = false;
3875 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
3876 return intel_dp->is_mst;
3879 int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc)
3881 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3882 struct drm_device *dev = intel_dig_port->base.base.dev;
3883 struct intel_crtc *intel_crtc =
3884 to_intel_crtc(intel_dig_port->base.base.crtc);
3889 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0)
3892 if (!(buf & DP_TEST_CRC_SUPPORTED))
3895 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0)
3898 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
3899 buf | DP_TEST_SINK_START) < 0)
3902 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0)
3904 test_crc_count = buf & DP_TEST_COUNT_MASK;
3907 if (drm_dp_dpcd_readb(&intel_dp->aux,
3908 DP_TEST_SINK_MISC, &buf) < 0)
3910 intel_wait_for_vblank(dev, intel_crtc->pipe);
3911 } while (--attempts && (buf & DP_TEST_COUNT_MASK) == test_crc_count);
3913 if (attempts == 0) {
3914 DRM_DEBUG_KMS("Panel is unable to calculate CRC after 6 vblanks\n");
3918 if (drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_CRC_R_CR, crc, 6) < 0)
3921 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0)
3923 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
3924 buf & ~DP_TEST_SINK_START) < 0)
3931 intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector)
3933 return intel_dp_dpcd_read_wake(&intel_dp->aux,
3934 DP_DEVICE_SERVICE_IRQ_VECTOR,
3935 sink_irq_vector, 1) == 1;
3939 intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector)
3943 ret = intel_dp_dpcd_read_wake(&intel_dp->aux,
3945 sink_irq_vector, 14);
3953 intel_dp_handle_test_request(struct intel_dp *intel_dp)
3955 /* NAK by default */
3956 drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_RESPONSE, DP_TEST_NAK);
3960 intel_dp_check_mst_status(struct intel_dp *intel_dp)
3964 if (intel_dp->is_mst) {
3969 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
3973 /* check link status - esi[10] = 0x200c */
3974 if (intel_dp->active_mst_links && !drm_dp_channel_eq_ok(&esi[10], intel_dp->lane_count)) {
3975 DRM_DEBUG_KMS("channel EQ not ok, retraining\n");
3976 intel_dp_start_link_train(intel_dp);
3977 intel_dp_complete_link_train(intel_dp);
3978 intel_dp_stop_link_train(intel_dp);
3981 DRM_DEBUG_KMS("got esi %3ph\n", esi);
3982 ret = drm_dp_mst_hpd_irq(&intel_dp->mst_mgr, esi, &handled);
3985 for (retry = 0; retry < 3; retry++) {
3987 wret = drm_dp_dpcd_write(&intel_dp->aux,
3988 DP_SINK_COUNT_ESI+1,
3995 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
3997 DRM_DEBUG_KMS("got esi2 %3ph\n", esi);
4005 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4006 DRM_DEBUG_KMS("failed to get ESI - device may have failed\n");
4007 intel_dp->is_mst = false;
4008 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
4009 /* send a hotplug event */
4010 drm_kms_helper_hotplug_event(intel_dig_port->base.base.dev);
4017 * According to DP spec
4020 * 2. Configure link according to Receiver Capabilities
4021 * 3. Use Link Training from 2.5.3.3 and 3.5.1.3
4022 * 4. Check link status on receipt of hot-plug interrupt
4025 intel_dp_check_link_status(struct intel_dp *intel_dp)
4027 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4028 struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
4030 u8 link_status[DP_LINK_STATUS_SIZE];
4032 WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
4034 if (!intel_encoder->connectors_active)
4037 if (WARN_ON(!intel_encoder->base.crtc))
4040 if (!to_intel_crtc(intel_encoder->base.crtc)->active)
4043 /* Try to read receiver status if the link appears to be up */
4044 if (!intel_dp_get_link_status(intel_dp, link_status)) {
4048 /* Now read the DPCD to see if it's actually running */
4049 if (!intel_dp_get_dpcd(intel_dp)) {
4053 /* Try to read the source of the interrupt */
4054 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4055 intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
4056 /* Clear interrupt source */
4057 drm_dp_dpcd_writeb(&intel_dp->aux,
4058 DP_DEVICE_SERVICE_IRQ_VECTOR,
4061 if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
4062 intel_dp_handle_test_request(intel_dp);
4063 if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
4064 DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
4067 if (!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
4068 DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n",
4069 intel_encoder->base.name);
4070 intel_dp_start_link_train(intel_dp);
4071 intel_dp_complete_link_train(intel_dp);
4072 intel_dp_stop_link_train(intel_dp);
4076 /* XXX this is probably wrong for multiple downstream ports */
4077 static enum drm_connector_status
4078 intel_dp_detect_dpcd(struct intel_dp *intel_dp)
4080 uint8_t *dpcd = intel_dp->dpcd;
4083 if (!intel_dp_get_dpcd(intel_dp))
4084 return connector_status_disconnected;
4086 /* if there's no downstream port, we're done */
4087 if (!(dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT))
4088 return connector_status_connected;
4090 /* If we're HPD-aware, SINK_COUNT changes dynamically */
4091 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4092 intel_dp->downstream_ports[0] & DP_DS_PORT_HPD) {
4095 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_COUNT,
4097 return connector_status_unknown;
4099 return DP_GET_SINK_COUNT(reg) ? connector_status_connected
4100 : connector_status_disconnected;
4103 /* If no HPD, poke DDC gently */
4104 if (drm_probe_ddc(&intel_dp->aux.ddc))
4105 return connector_status_connected;
4107 /* Well we tried, say unknown for unreliable port types */
4108 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) {
4109 type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
4110 if (type == DP_DS_PORT_TYPE_VGA ||
4111 type == DP_DS_PORT_TYPE_NON_EDID)
4112 return connector_status_unknown;
4114 type = intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
4115 DP_DWN_STRM_PORT_TYPE_MASK;
4116 if (type == DP_DWN_STRM_PORT_TYPE_ANALOG ||
4117 type == DP_DWN_STRM_PORT_TYPE_OTHER)
4118 return connector_status_unknown;
4121 /* Anything else is out of spec, warn and ignore */
4122 DRM_DEBUG_KMS("Broken DP branch device, ignoring\n");
4123 return connector_status_disconnected;
4126 static enum drm_connector_status
4127 edp_detect(struct intel_dp *intel_dp)
4129 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4130 enum drm_connector_status status;
4132 status = intel_panel_detect(dev);
4133 if (status == connector_status_unknown)
4134 status = connector_status_connected;
4139 static enum drm_connector_status
4140 ironlake_dp_detect(struct intel_dp *intel_dp)
4142 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4143 struct drm_i915_private *dev_priv = dev->dev_private;
4144 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4146 if (!ibx_digital_port_connected(dev_priv, intel_dig_port))
4147 return connector_status_disconnected;
4149 return intel_dp_detect_dpcd(intel_dp);
4152 static int g4x_digital_port_connected(struct drm_device *dev,
4153 struct intel_digital_port *intel_dig_port)
4155 struct drm_i915_private *dev_priv = dev->dev_private;
4158 if (IS_VALLEYVIEW(dev)) {
4159 switch (intel_dig_port->port) {
4161 bit = PORTB_HOTPLUG_LIVE_STATUS_VLV;
4164 bit = PORTC_HOTPLUG_LIVE_STATUS_VLV;
4167 bit = PORTD_HOTPLUG_LIVE_STATUS_VLV;
4173 switch (intel_dig_port->port) {
4175 bit = PORTB_HOTPLUG_LIVE_STATUS_G4X;
4178 bit = PORTC_HOTPLUG_LIVE_STATUS_G4X;
4181 bit = PORTD_HOTPLUG_LIVE_STATUS_G4X;
4188 if ((I915_READ(PORT_HOTPLUG_STAT) & bit) == 0)
4193 static enum drm_connector_status
4194 g4x_dp_detect(struct intel_dp *intel_dp)
4196 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4197 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4200 /* Can't disconnect eDP, but you can close the lid... */
4201 if (is_edp(intel_dp)) {
4202 enum drm_connector_status status;
4204 status = intel_panel_detect(dev);
4205 if (status == connector_status_unknown)
4206 status = connector_status_connected;
4210 ret = g4x_digital_port_connected(dev, intel_dig_port);
4212 return connector_status_unknown;
4214 return connector_status_disconnected;
4216 return intel_dp_detect_dpcd(intel_dp);
4219 static struct edid *
4220 intel_dp_get_edid(struct intel_dp *intel_dp)
4222 struct intel_connector *intel_connector = intel_dp->attached_connector;
4224 /* use cached edid if we have one */
4225 if (intel_connector->edid) {
4227 if (IS_ERR(intel_connector->edid))
4230 return drm_edid_duplicate(intel_connector->edid);
4232 return drm_get_edid(&intel_connector->base,
4233 &intel_dp->aux.ddc);
4237 intel_dp_set_edid(struct intel_dp *intel_dp)
4239 struct intel_connector *intel_connector = intel_dp->attached_connector;
4242 edid = intel_dp_get_edid(intel_dp);
4243 intel_connector->detect_edid = edid;
4245 if (intel_dp->force_audio != HDMI_AUDIO_AUTO)
4246 intel_dp->has_audio = intel_dp->force_audio == HDMI_AUDIO_ON;
4248 intel_dp->has_audio = drm_detect_monitor_audio(edid);
4252 intel_dp_unset_edid(struct intel_dp *intel_dp)
4254 struct intel_connector *intel_connector = intel_dp->attached_connector;
4256 kfree(intel_connector->detect_edid);
4257 intel_connector->detect_edid = NULL;
4259 intel_dp->has_audio = false;
4262 static enum intel_display_power_domain
4263 intel_dp_power_get(struct intel_dp *dp)
4265 struct intel_encoder *encoder = &dp_to_dig_port(dp)->base;
4266 enum intel_display_power_domain power_domain;
4268 power_domain = intel_display_port_power_domain(encoder);
4269 intel_display_power_get(to_i915(encoder->base.dev), power_domain);
4271 return power_domain;
4275 intel_dp_power_put(struct intel_dp *dp,
4276 enum intel_display_power_domain power_domain)
4278 struct intel_encoder *encoder = &dp_to_dig_port(dp)->base;
4279 intel_display_power_put(to_i915(encoder->base.dev), power_domain);
4282 static enum drm_connector_status
4283 intel_dp_detect(struct drm_connector *connector, bool force)
4285 struct intel_dp *intel_dp = intel_attached_dp(connector);
4286 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4287 struct intel_encoder *intel_encoder = &intel_dig_port->base;
4288 struct drm_device *dev = connector->dev;
4289 enum drm_connector_status status;
4290 enum intel_display_power_domain power_domain;
4293 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
4294 connector->base.id, connector->name);
4295 intel_dp_unset_edid(intel_dp);
4297 if (intel_dp->is_mst) {
4298 /* MST devices are disconnected from a monitor POV */
4299 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4300 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4301 return connector_status_disconnected;
4304 power_domain = intel_dp_power_get(intel_dp);
4306 /* Can't disconnect eDP, but you can close the lid... */
4307 if (is_edp(intel_dp))
4308 status = edp_detect(intel_dp);
4309 else if (HAS_PCH_SPLIT(dev))
4310 status = ironlake_dp_detect(intel_dp);
4312 status = g4x_dp_detect(intel_dp);
4313 if (status != connector_status_connected)
4316 intel_dp_probe_oui(intel_dp);
4318 ret = intel_dp_probe_mst(intel_dp);
4320 /* if we are in MST mode then this connector
4321 won't appear connected or have anything with EDID on it */
4322 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4323 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4324 status = connector_status_disconnected;
4328 intel_dp_set_edid(intel_dp);
4330 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4331 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4332 status = connector_status_connected;
4335 intel_dp_power_put(intel_dp, power_domain);
4340 intel_dp_force(struct drm_connector *connector)
4342 struct intel_dp *intel_dp = intel_attached_dp(connector);
4343 struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
4344 enum intel_display_power_domain power_domain;
4346 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
4347 connector->base.id, connector->name);
4348 intel_dp_unset_edid(intel_dp);
4350 if (connector->status != connector_status_connected)
4353 power_domain = intel_dp_power_get(intel_dp);
4355 intel_dp_set_edid(intel_dp);
4357 intel_dp_power_put(intel_dp, power_domain);
4359 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4360 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4363 static int intel_dp_get_modes(struct drm_connector *connector)
4365 struct intel_connector *intel_connector = to_intel_connector(connector);
4368 edid = intel_connector->detect_edid;
4370 int ret = intel_connector_update_modes(connector, edid);
4375 /* if eDP has no EDID, fall back to fixed mode */
4376 if (is_edp(intel_attached_dp(connector)) &&
4377 intel_connector->panel.fixed_mode) {
4378 struct drm_display_mode *mode;
4380 mode = drm_mode_duplicate(connector->dev,
4381 intel_connector->panel.fixed_mode);
4383 drm_mode_probed_add(connector, mode);
4392 intel_dp_detect_audio(struct drm_connector *connector)
4394 bool has_audio = false;
4397 edid = to_intel_connector(connector)->detect_edid;
4399 has_audio = drm_detect_monitor_audio(edid);
4405 intel_dp_set_property(struct drm_connector *connector,
4406 struct drm_property *property,
4409 struct drm_i915_private *dev_priv = connector->dev->dev_private;
4410 struct intel_connector *intel_connector = to_intel_connector(connector);
4411 struct intel_encoder *intel_encoder = intel_attached_encoder(connector);
4412 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
4415 ret = drm_object_property_set_value(&connector->base, property, val);
4419 if (property == dev_priv->force_audio_property) {
4423 if (i == intel_dp->force_audio)
4426 intel_dp->force_audio = i;
4428 if (i == HDMI_AUDIO_AUTO)
4429 has_audio = intel_dp_detect_audio(connector);
4431 has_audio = (i == HDMI_AUDIO_ON);
4433 if (has_audio == intel_dp->has_audio)
4436 intel_dp->has_audio = has_audio;
4440 if (property == dev_priv->broadcast_rgb_property) {
4441 bool old_auto = intel_dp->color_range_auto;
4442 uint32_t old_range = intel_dp->color_range;
4445 case INTEL_BROADCAST_RGB_AUTO:
4446 intel_dp->color_range_auto = true;
4448 case INTEL_BROADCAST_RGB_FULL:
4449 intel_dp->color_range_auto = false;
4450 intel_dp->color_range = 0;
4452 case INTEL_BROADCAST_RGB_LIMITED:
4453 intel_dp->color_range_auto = false;
4454 intel_dp->color_range = DP_COLOR_RANGE_16_235;
4460 if (old_auto == intel_dp->color_range_auto &&
4461 old_range == intel_dp->color_range)
4467 if (is_edp(intel_dp) &&
4468 property == connector->dev->mode_config.scaling_mode_property) {
4469 if (val == DRM_MODE_SCALE_NONE) {
4470 DRM_DEBUG_KMS("no scaling not supported\n");
4474 if (intel_connector->panel.fitting_mode == val) {
4475 /* the eDP scaling property is not changed */
4478 intel_connector->panel.fitting_mode = val;
4486 if (intel_encoder->base.crtc)
4487 intel_crtc_restore_mode(intel_encoder->base.crtc);
4493 intel_dp_connector_destroy(struct drm_connector *connector)
4495 struct intel_connector *intel_connector = to_intel_connector(connector);
4497 kfree(intel_connector->detect_edid);
4499 if (!IS_ERR_OR_NULL(intel_connector->edid))
4500 kfree(intel_connector->edid);
4502 /* Can't call is_edp() since the encoder may have been destroyed
4504 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
4505 intel_panel_fini(&intel_connector->panel);
4507 drm_connector_cleanup(connector);
4511 void intel_dp_encoder_destroy(struct drm_encoder *encoder)
4513 struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
4514 struct intel_dp *intel_dp = &intel_dig_port->dp;
4516 drm_dp_aux_unregister(&intel_dp->aux);
4517 intel_dp_mst_encoder_cleanup(intel_dig_port);
4518 if (is_edp(intel_dp)) {
4519 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
4521 * vdd might still be enabled do to the delayed vdd off.
4522 * Make sure vdd is actually turned off here.
4525 edp_panel_vdd_off_sync(intel_dp);
4526 pps_unlock(intel_dp);
4528 if (intel_dp->edp_notifier.notifier_call) {
4529 unregister_reboot_notifier(&intel_dp->edp_notifier);
4530 intel_dp->edp_notifier.notifier_call = NULL;
4533 drm_encoder_cleanup(encoder);
4534 kfree(intel_dig_port);
4537 static void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
4539 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
4541 if (!is_edp(intel_dp))
4545 * vdd might still be enabled do to the delayed vdd off.
4546 * Make sure vdd is actually turned off here.
4548 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
4550 edp_panel_vdd_off_sync(intel_dp);
4551 pps_unlock(intel_dp);
4554 static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
4556 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4557 struct drm_device *dev = intel_dig_port->base.base.dev;
4558 struct drm_i915_private *dev_priv = dev->dev_private;
4559 enum intel_display_power_domain power_domain;
4561 lockdep_assert_held(&dev_priv->pps_mutex);
4563 if (!edp_have_panel_vdd(intel_dp))
4567 * The VDD bit needs a power domain reference, so if the bit is
4568 * already enabled when we boot or resume, grab this reference and
4569 * schedule a vdd off, so we don't hold on to the reference
4572 DRM_DEBUG_KMS("VDD left on by BIOS, adjusting state tracking\n");
4573 power_domain = intel_display_port_power_domain(&intel_dig_port->base);
4574 intel_display_power_get(dev_priv, power_domain);
4576 edp_panel_vdd_schedule_off(intel_dp);
4579 static void intel_dp_encoder_reset(struct drm_encoder *encoder)
4581 struct intel_dp *intel_dp;
4583 if (to_intel_encoder(encoder)->type != INTEL_OUTPUT_EDP)
4586 intel_dp = enc_to_intel_dp(encoder);
4591 * Read out the current power sequencer assignment,
4592 * in case the BIOS did something with it.
4594 if (IS_VALLEYVIEW(encoder->dev))
4595 vlv_initial_power_sequencer_setup(intel_dp);
4597 intel_edp_panel_vdd_sanitize(intel_dp);
4599 pps_unlock(intel_dp);
4602 static const struct drm_connector_funcs intel_dp_connector_funcs = {
4603 .dpms = intel_connector_dpms,
4604 .detect = intel_dp_detect,
4605 .force = intel_dp_force,
4606 .fill_modes = drm_helper_probe_single_connector_modes,
4607 .set_property = intel_dp_set_property,
4608 .atomic_get_property = intel_connector_atomic_get_property,
4609 .destroy = intel_dp_connector_destroy,
4610 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
4611 .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
4614 static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
4615 .get_modes = intel_dp_get_modes,
4616 .mode_valid = intel_dp_mode_valid,
4617 .best_encoder = intel_best_encoder,
4620 static const struct drm_encoder_funcs intel_dp_enc_funcs = {
4621 .reset = intel_dp_encoder_reset,
4622 .destroy = intel_dp_encoder_destroy,
4626 intel_dp_hot_plug(struct intel_encoder *intel_encoder)
4632 intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
4634 struct intel_dp *intel_dp = &intel_dig_port->dp;
4635 struct intel_encoder *intel_encoder = &intel_dig_port->base;
4636 struct drm_device *dev = intel_dig_port->base.base.dev;
4637 struct drm_i915_private *dev_priv = dev->dev_private;
4638 enum intel_display_power_domain power_domain;
4639 enum irqreturn ret = IRQ_NONE;
4641 if (intel_dig_port->base.type != INTEL_OUTPUT_EDP)
4642 intel_dig_port->base.type = INTEL_OUTPUT_DISPLAYPORT;
4644 if (long_hpd && intel_dig_port->base.type == INTEL_OUTPUT_EDP) {
4646 * vdd off can generate a long pulse on eDP which
4647 * would require vdd on to handle it, and thus we
4648 * would end up in an endless cycle of
4649 * "vdd off -> long hpd -> vdd on -> detect -> vdd off -> ..."
4651 DRM_DEBUG_KMS("ignoring long hpd on eDP port %c\n",
4652 port_name(intel_dig_port->port));
4656 DRM_DEBUG_KMS("got hpd irq on port %c - %s\n",
4657 port_name(intel_dig_port->port),
4658 long_hpd ? "long" : "short");
4660 power_domain = intel_display_port_power_domain(intel_encoder);
4661 intel_display_power_get(dev_priv, power_domain);
4665 if (HAS_PCH_SPLIT(dev)) {
4666 if (!ibx_digital_port_connected(dev_priv, intel_dig_port))
4669 if (g4x_digital_port_connected(dev, intel_dig_port) != 1)
4673 if (!intel_dp_get_dpcd(intel_dp)) {
4677 intel_dp_probe_oui(intel_dp);
4679 if (!intel_dp_probe_mst(intel_dp))
4683 if (intel_dp->is_mst) {
4684 if (intel_dp_check_mst_status(intel_dp) == -EINVAL)
4688 if (!intel_dp->is_mst) {
4690 * we'll check the link status via the normal hot plug path later -
4691 * but for short hpds we should check it now
4693 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
4694 intel_dp_check_link_status(intel_dp);
4695 drm_modeset_unlock(&dev->mode_config.connection_mutex);
4703 /* if we were in MST mode, and device is not there get out of MST mode */
4704 if (intel_dp->is_mst) {
4705 DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n", intel_dp->is_mst, intel_dp->mst_mgr.mst_state);
4706 intel_dp->is_mst = false;
4707 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
4710 intel_display_power_put(dev_priv, power_domain);
4715 /* Return which DP Port should be selected for Transcoder DP control */
4717 intel_trans_dp_port_sel(struct drm_crtc *crtc)
4719 struct drm_device *dev = crtc->dev;
4720 struct intel_encoder *intel_encoder;
4721 struct intel_dp *intel_dp;
4723 for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
4724 intel_dp = enc_to_intel_dp(&intel_encoder->base);
4726 if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
4727 intel_encoder->type == INTEL_OUTPUT_EDP)
4728 return intel_dp->output_reg;
4734 /* check the VBT to see whether the eDP is on DP-D port */
4735 bool intel_dp_is_edp(struct drm_device *dev, enum port port)
4737 struct drm_i915_private *dev_priv = dev->dev_private;
4738 union child_device_config *p_child;
4740 static const short port_mapping[] = {
4741 [PORT_B] = PORT_IDPB,
4742 [PORT_C] = PORT_IDPC,
4743 [PORT_D] = PORT_IDPD,
4749 if (!dev_priv->vbt.child_dev_num)
4752 for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
4753 p_child = dev_priv->vbt.child_dev + i;
4755 if (p_child->common.dvo_port == port_mapping[port] &&
4756 (p_child->common.device_type & DEVICE_TYPE_eDP_BITS) ==
4757 (DEVICE_TYPE_eDP & DEVICE_TYPE_eDP_BITS))
4764 intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
4766 struct intel_connector *intel_connector = to_intel_connector(connector);
4768 intel_attach_force_audio_property(connector);
4769 intel_attach_broadcast_rgb_property(connector);
4770 intel_dp->color_range_auto = true;
4772 if (is_edp(intel_dp)) {
4773 drm_mode_create_scaling_mode_property(connector->dev);
4774 drm_object_attach_property(
4776 connector->dev->mode_config.scaling_mode_property,
4777 DRM_MODE_SCALE_ASPECT);
4778 intel_connector->panel.fitting_mode = DRM_MODE_SCALE_ASPECT;
4782 static void intel_dp_init_panel_power_timestamps(struct intel_dp *intel_dp)
4784 intel_dp->last_power_cycle = jiffies;
4785 intel_dp->last_power_on = jiffies;
4786 intel_dp->last_backlight_off = jiffies;
4790 intel_dp_init_panel_power_sequencer(struct drm_device *dev,
4791 struct intel_dp *intel_dp)
4793 struct drm_i915_private *dev_priv = dev->dev_private;
4794 struct edp_power_seq cur, vbt, spec,
4795 *final = &intel_dp->pps_delays;
4796 u32 pp_on, pp_off, pp_div, pp;
4797 int pp_ctrl_reg, pp_on_reg, pp_off_reg, pp_div_reg;
4799 lockdep_assert_held(&dev_priv->pps_mutex);
4801 /* already initialized? */
4802 if (final->t11_t12 != 0)
4805 if (HAS_PCH_SPLIT(dev)) {
4806 pp_ctrl_reg = PCH_PP_CONTROL;
4807 pp_on_reg = PCH_PP_ON_DELAYS;
4808 pp_off_reg = PCH_PP_OFF_DELAYS;
4809 pp_div_reg = PCH_PP_DIVISOR;
4811 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
4813 pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
4814 pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
4815 pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
4816 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
4819 /* Workaround: Need to write PP_CONTROL with the unlock key as
4820 * the very first thing. */
4821 pp = ironlake_get_pp_control(intel_dp);
4822 I915_WRITE(pp_ctrl_reg, pp);
4824 pp_on = I915_READ(pp_on_reg);
4825 pp_off = I915_READ(pp_off_reg);
4826 pp_div = I915_READ(pp_div_reg);
4828 /* Pull timing values out of registers */
4829 cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >>
4830 PANEL_POWER_UP_DELAY_SHIFT;
4832 cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >>
4833 PANEL_LIGHT_ON_DELAY_SHIFT;
4835 cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >>
4836 PANEL_LIGHT_OFF_DELAY_SHIFT;
4838 cur.t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >>
4839 PANEL_POWER_DOWN_DELAY_SHIFT;
4841 cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >>
4842 PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000;
4844 DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
4845 cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12);
4847 vbt = dev_priv->vbt.edp_pps;
4849 /* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
4850 * our hw here, which are all in 100usec. */
4851 spec.t1_t3 = 210 * 10;
4852 spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */
4853 spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */
4854 spec.t10 = 500 * 10;
4855 /* This one is special and actually in units of 100ms, but zero
4856 * based in the hw (so we need to add 100 ms). But the sw vbt
4857 * table multiplies it with 1000 to make it in units of 100usec,
4859 spec.t11_t12 = (510 + 100) * 10;
4861 DRM_DEBUG_KMS("vbt t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
4862 vbt.t1_t3, vbt.t8, vbt.t9, vbt.t10, vbt.t11_t12);
4864 /* Use the max of the register settings and vbt. If both are
4865 * unset, fall back to the spec limits. */
4866 #define assign_final(field) final->field = (max(cur.field, vbt.field) == 0 ? \
4868 max(cur.field, vbt.field))
4869 assign_final(t1_t3);
4873 assign_final(t11_t12);
4876 #define get_delay(field) (DIV_ROUND_UP(final->field, 10))
4877 intel_dp->panel_power_up_delay = get_delay(t1_t3);
4878 intel_dp->backlight_on_delay = get_delay(t8);
4879 intel_dp->backlight_off_delay = get_delay(t9);
4880 intel_dp->panel_power_down_delay = get_delay(t10);
4881 intel_dp->panel_power_cycle_delay = get_delay(t11_t12);
4884 DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n",
4885 intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay,
4886 intel_dp->panel_power_cycle_delay);
4888 DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
4889 intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
4893 intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
4894 struct intel_dp *intel_dp)
4896 struct drm_i915_private *dev_priv = dev->dev_private;
4897 u32 pp_on, pp_off, pp_div, port_sel = 0;
4898 int div = HAS_PCH_SPLIT(dev) ? intel_pch_rawclk(dev) : intel_hrawclk(dev);
4899 int pp_on_reg, pp_off_reg, pp_div_reg;
4900 enum port port = dp_to_dig_port(intel_dp)->port;
4901 const struct edp_power_seq *seq = &intel_dp->pps_delays;
4903 lockdep_assert_held(&dev_priv->pps_mutex);
4905 if (HAS_PCH_SPLIT(dev)) {
4906 pp_on_reg = PCH_PP_ON_DELAYS;
4907 pp_off_reg = PCH_PP_OFF_DELAYS;
4908 pp_div_reg = PCH_PP_DIVISOR;
4910 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
4912 pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
4913 pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
4914 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
4918 * And finally store the new values in the power sequencer. The
4919 * backlight delays are set to 1 because we do manual waits on them. For
4920 * T8, even BSpec recommends doing it. For T9, if we don't do this,
4921 * we'll end up waiting for the backlight off delay twice: once when we
4922 * do the manual sleep, and once when we disable the panel and wait for
4923 * the PP_STATUS bit to become zero.
4925 pp_on = (seq->t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) |
4926 (1 << PANEL_LIGHT_ON_DELAY_SHIFT);
4927 pp_off = (1 << PANEL_LIGHT_OFF_DELAY_SHIFT) |
4928 (seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT);
4929 /* Compute the divisor for the pp clock, simply match the Bspec
4931 pp_div = ((100 * div)/2 - 1) << PP_REFERENCE_DIVIDER_SHIFT;
4932 pp_div |= (DIV_ROUND_UP(seq->t11_t12, 1000)
4933 << PANEL_POWER_CYCLE_DELAY_SHIFT);
4935 /* Haswell doesn't have any port selection bits for the panel
4936 * power sequencer any more. */
4937 if (IS_VALLEYVIEW(dev)) {
4938 port_sel = PANEL_PORT_SELECT_VLV(port);
4939 } else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
4941 port_sel = PANEL_PORT_SELECT_DPA;
4943 port_sel = PANEL_PORT_SELECT_DPD;
4948 I915_WRITE(pp_on_reg, pp_on);
4949 I915_WRITE(pp_off_reg, pp_off);
4950 I915_WRITE(pp_div_reg, pp_div);
4952 DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
4953 I915_READ(pp_on_reg),
4954 I915_READ(pp_off_reg),
4955 I915_READ(pp_div_reg));
4959 * intel_dp_set_drrs_state - program registers for RR switch to take effect
4961 * @refresh_rate: RR to be programmed
4963 * This function gets called when refresh rate (RR) has to be changed from
4964 * one frequency to another. Switches can be between high and low RR
4965 * supported by the panel or to any other RR based on media playback (in
4966 * this case, RR value needs to be passed from user space).
4968 * The caller of this function needs to take a lock on dev_priv->drrs.
4970 static void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate)
4972 struct drm_i915_private *dev_priv = dev->dev_private;
4973 struct intel_encoder *encoder;
4974 struct intel_digital_port *dig_port = NULL;
4975 struct intel_dp *intel_dp = dev_priv->drrs.dp;
4976 struct intel_crtc_state *config = NULL;
4977 struct intel_crtc *intel_crtc = NULL;
4979 enum drrs_refresh_rate_type index = DRRS_HIGH_RR;
4981 if (refresh_rate <= 0) {
4982 DRM_DEBUG_KMS("Refresh rate should be positive non-zero.\n");
4986 if (intel_dp == NULL) {
4987 DRM_DEBUG_KMS("DRRS not supported.\n");
4992 * FIXME: This needs proper synchronization with psr state for some
4993 * platforms that cannot have PSR and DRRS enabled at the same time.
4996 dig_port = dp_to_dig_port(intel_dp);
4997 encoder = &dig_port->base;
4998 intel_crtc = to_intel_crtc(encoder->base.crtc);
5001 DRM_DEBUG_KMS("DRRS: intel_crtc not initialized\n");
5005 config = intel_crtc->config;
5007 if (dev_priv->drrs.type < SEAMLESS_DRRS_SUPPORT) {
5008 DRM_DEBUG_KMS("Only Seamless DRRS supported.\n");
5012 if (intel_dp->attached_connector->panel.downclock_mode->vrefresh ==
5014 index = DRRS_LOW_RR;
5016 if (index == dev_priv->drrs.refresh_rate_type) {
5018 "DRRS requested for previously set RR...ignoring\n");
5022 if (!intel_crtc->active) {
5023 DRM_DEBUG_KMS("eDP encoder disabled. CRTC not Active\n");
5027 if (INTEL_INFO(dev)->gen >= 8 && !IS_CHERRYVIEW(dev)) {
5030 intel_dp_set_m_n(intel_crtc, M1_N1);
5033 intel_dp_set_m_n(intel_crtc, M2_N2);
5037 DRM_ERROR("Unsupported refreshrate type\n");
5039 } else if (INTEL_INFO(dev)->gen > 6) {
5040 reg = PIPECONF(intel_crtc->config->cpu_transcoder);
5041 val = I915_READ(reg);
5043 if (index > DRRS_HIGH_RR) {
5044 if (IS_VALLEYVIEW(dev))
5045 val |= PIPECONF_EDP_RR_MODE_SWITCH_VLV;
5047 val |= PIPECONF_EDP_RR_MODE_SWITCH;
5049 if (IS_VALLEYVIEW(dev))
5050 val &= ~PIPECONF_EDP_RR_MODE_SWITCH_VLV;
5052 val &= ~PIPECONF_EDP_RR_MODE_SWITCH;
5054 I915_WRITE(reg, val);
5057 dev_priv->drrs.refresh_rate_type = index;
5059 DRM_DEBUG_KMS("eDP Refresh Rate set to : %dHz\n", refresh_rate);
5063 * intel_edp_drrs_enable - init drrs struct if supported
5064 * @intel_dp: DP struct
5066 * Initializes frontbuffer_bits and drrs.dp
5068 void intel_edp_drrs_enable(struct intel_dp *intel_dp)
5070 struct drm_device *dev = intel_dp_to_dev(intel_dp);
5071 struct drm_i915_private *dev_priv = dev->dev_private;
5072 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5073 struct drm_crtc *crtc = dig_port->base.base.crtc;
5074 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5076 if (!intel_crtc->config->has_drrs) {
5077 DRM_DEBUG_KMS("Panel doesn't support DRRS\n");
5081 mutex_lock(&dev_priv->drrs.mutex);
5082 if (WARN_ON(dev_priv->drrs.dp)) {
5083 DRM_ERROR("DRRS already enabled\n");
5087 dev_priv->drrs.busy_frontbuffer_bits = 0;
5089 dev_priv->drrs.dp = intel_dp;
5092 mutex_unlock(&dev_priv->drrs.mutex);
5096 * intel_edp_drrs_disable - Disable DRRS
5097 * @intel_dp: DP struct
5100 void intel_edp_drrs_disable(struct intel_dp *intel_dp)
5102 struct drm_device *dev = intel_dp_to_dev(intel_dp);
5103 struct drm_i915_private *dev_priv = dev->dev_private;
5104 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5105 struct drm_crtc *crtc = dig_port->base.base.crtc;
5106 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5108 if (!intel_crtc->config->has_drrs)
5111 mutex_lock(&dev_priv->drrs.mutex);
5112 if (!dev_priv->drrs.dp) {
5113 mutex_unlock(&dev_priv->drrs.mutex);
5117 if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
5118 intel_dp_set_drrs_state(dev_priv->dev,
5119 intel_dp->attached_connector->panel.
5120 fixed_mode->vrefresh);
5122 dev_priv->drrs.dp = NULL;
5123 mutex_unlock(&dev_priv->drrs.mutex);
5125 cancel_delayed_work_sync(&dev_priv->drrs.work);
5128 static void intel_edp_drrs_downclock_work(struct work_struct *work)
5130 struct drm_i915_private *dev_priv =
5131 container_of(work, typeof(*dev_priv), drrs.work.work);
5132 struct intel_dp *intel_dp;
5134 mutex_lock(&dev_priv->drrs.mutex);
5136 intel_dp = dev_priv->drrs.dp;
5142 * The delayed work can race with an invalidate hence we need to
5146 if (dev_priv->drrs.busy_frontbuffer_bits)
5149 if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR)
5150 intel_dp_set_drrs_state(dev_priv->dev,
5151 intel_dp->attached_connector->panel.
5152 downclock_mode->vrefresh);
5155 mutex_unlock(&dev_priv->drrs.mutex);
5159 * intel_edp_drrs_invalidate - Invalidate DRRS
5161 * @frontbuffer_bits: frontbuffer plane tracking bits
5163 * When there is a disturbance on screen (due to cursor movement/time
5164 * update etc), DRRS needs to be invalidated, i.e. need to switch to
5167 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5169 void intel_edp_drrs_invalidate(struct drm_device *dev,
5170 unsigned frontbuffer_bits)
5172 struct drm_i915_private *dev_priv = dev->dev_private;
5173 struct drm_crtc *crtc;
5176 if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
5179 cancel_delayed_work(&dev_priv->drrs.work);
5181 mutex_lock(&dev_priv->drrs.mutex);
5182 if (!dev_priv->drrs.dp) {
5183 mutex_unlock(&dev_priv->drrs.mutex);
5187 crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5188 pipe = to_intel_crtc(crtc)->pipe;
5190 if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR) {
5191 intel_dp_set_drrs_state(dev_priv->dev,
5192 dev_priv->drrs.dp->attached_connector->panel.
5193 fixed_mode->vrefresh);
5196 frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
5198 dev_priv->drrs.busy_frontbuffer_bits |= frontbuffer_bits;
5199 mutex_unlock(&dev_priv->drrs.mutex);
5203 * intel_edp_drrs_flush - Flush DRRS
5205 * @frontbuffer_bits: frontbuffer plane tracking bits
5207 * When there is no movement on screen, DRRS work can be scheduled.
5208 * This DRRS work is responsible for setting relevant registers after a
5209 * timeout of 1 second.
5211 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5213 void intel_edp_drrs_flush(struct drm_device *dev,
5214 unsigned frontbuffer_bits)
5216 struct drm_i915_private *dev_priv = dev->dev_private;
5217 struct drm_crtc *crtc;
5220 if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
5223 cancel_delayed_work(&dev_priv->drrs.work);
5225 mutex_lock(&dev_priv->drrs.mutex);
5226 if (!dev_priv->drrs.dp) {
5227 mutex_unlock(&dev_priv->drrs.mutex);
5231 crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5232 pipe = to_intel_crtc(crtc)->pipe;
5233 dev_priv->drrs.busy_frontbuffer_bits &= ~frontbuffer_bits;
5235 if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR &&
5236 !dev_priv->drrs.busy_frontbuffer_bits)
5237 schedule_delayed_work(&dev_priv->drrs.work,
5238 msecs_to_jiffies(1000));
5239 mutex_unlock(&dev_priv->drrs.mutex);
5243 * DOC: Display Refresh Rate Switching (DRRS)
5245 * Display Refresh Rate Switching (DRRS) is a power conservation feature
5246 * which enables swtching between low and high refresh rates,
5247 * dynamically, based on the usage scenario. This feature is applicable
5248 * for internal panels.
5250 * Indication that the panel supports DRRS is given by the panel EDID, which
5251 * would list multiple refresh rates for one resolution.
5253 * DRRS is of 2 types - static and seamless.
5254 * Static DRRS involves changing refresh rate (RR) by doing a full modeset
5255 * (may appear as a blink on screen) and is used in dock-undock scenario.
5256 * Seamless DRRS involves changing RR without any visual effect to the user
5257 * and can be used during normal system usage. This is done by programming
5258 * certain registers.
5260 * Support for static/seamless DRRS may be indicated in the VBT based on
5261 * inputs from the panel spec.
5263 * DRRS saves power by switching to low RR based on usage scenarios.
5266 * The implementation is based on frontbuffer tracking implementation.
5267 * When there is a disturbance on the screen triggered by user activity or a
5268 * periodic system activity, DRRS is disabled (RR is changed to high RR).
5269 * When there is no movement on screen, after a timeout of 1 second, a switch
5270 * to low RR is made.
5271 * For integration with frontbuffer tracking code,
5272 * intel_edp_drrs_invalidate() and intel_edp_drrs_flush() are called.
5274 * DRRS can be further extended to support other internal panels and also
5275 * the scenario of video playback wherein RR is set based on the rate
5276 * requested by userspace.
5280 * intel_dp_drrs_init - Init basic DRRS work and mutex.
5281 * @intel_connector: eDP connector
5282 * @fixed_mode: preferred mode of panel
5284 * This function is called only once at driver load to initialize basic
5288 * Downclock mode if panel supports it, else return NULL.
5289 * DRRS support is determined by the presence of downclock mode (apart
5290 * from VBT setting).
5292 static struct drm_display_mode *
5293 intel_dp_drrs_init(struct intel_connector *intel_connector,
5294 struct drm_display_mode *fixed_mode)
5296 struct drm_connector *connector = &intel_connector->base;
5297 struct drm_device *dev = connector->dev;
5298 struct drm_i915_private *dev_priv = dev->dev_private;
5299 struct drm_display_mode *downclock_mode = NULL;
5301 INIT_DELAYED_WORK(&dev_priv->drrs.work, intel_edp_drrs_downclock_work);
5302 mutex_init(&dev_priv->drrs.mutex);
5304 if (INTEL_INFO(dev)->gen <= 6) {
5305 DRM_DEBUG_KMS("DRRS supported for Gen7 and above\n");
5309 if (dev_priv->vbt.drrs_type != SEAMLESS_DRRS_SUPPORT) {
5310 DRM_DEBUG_KMS("VBT doesn't support DRRS\n");
5314 downclock_mode = intel_find_panel_downclock
5315 (dev, fixed_mode, connector);
5317 if (!downclock_mode) {
5318 DRM_DEBUG_KMS("Downclock mode is not found. DRRS not supported\n");
5322 dev_priv->drrs.type = dev_priv->vbt.drrs_type;
5324 dev_priv->drrs.refresh_rate_type = DRRS_HIGH_RR;
5325 DRM_DEBUG_KMS("seamless DRRS supported for eDP panel.\n");
5326 return downclock_mode;
5329 static bool intel_edp_init_connector(struct intel_dp *intel_dp,
5330 struct intel_connector *intel_connector)
5332 struct drm_connector *connector = &intel_connector->base;
5333 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
5334 struct intel_encoder *intel_encoder = &intel_dig_port->base;
5335 struct drm_device *dev = intel_encoder->base.dev;
5336 struct drm_i915_private *dev_priv = dev->dev_private;
5337 struct drm_display_mode *fixed_mode = NULL;
5338 struct drm_display_mode *downclock_mode = NULL;
5340 struct drm_display_mode *scan;
5342 enum pipe pipe = INVALID_PIPE;
5344 if (!is_edp(intel_dp))
5348 intel_edp_panel_vdd_sanitize(intel_dp);
5349 pps_unlock(intel_dp);
5351 /* Cache DPCD and EDID for edp. */
5352 has_dpcd = intel_dp_get_dpcd(intel_dp);
5355 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
5356 dev_priv->no_aux_handshake =
5357 intel_dp->dpcd[DP_MAX_DOWNSPREAD] &
5358 DP_NO_AUX_HANDSHAKE_LINK_TRAINING;
5360 /* if this fails, presume the device is a ghost */
5361 DRM_INFO("failed to retrieve link info, disabling eDP\n");
5365 /* We now know it's not a ghost, init power sequence regs. */
5367 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
5368 pps_unlock(intel_dp);
5370 mutex_lock(&dev->mode_config.mutex);
5371 edid = drm_get_edid(connector, &intel_dp->aux.ddc);
5373 if (drm_add_edid_modes(connector, edid)) {
5374 drm_mode_connector_update_edid_property(connector,
5376 drm_edid_to_eld(connector, edid);
5379 edid = ERR_PTR(-EINVAL);
5382 edid = ERR_PTR(-ENOENT);
5384 intel_connector->edid = edid;
5386 /* prefer fixed mode from EDID if available */
5387 list_for_each_entry(scan, &connector->probed_modes, head) {
5388 if ((scan->type & DRM_MODE_TYPE_PREFERRED)) {
5389 fixed_mode = drm_mode_duplicate(dev, scan);
5390 downclock_mode = intel_dp_drrs_init(
5391 intel_connector, fixed_mode);
5396 /* fallback to VBT if available for eDP */
5397 if (!fixed_mode && dev_priv->vbt.lfp_lvds_vbt_mode) {
5398 fixed_mode = drm_mode_duplicate(dev,
5399 dev_priv->vbt.lfp_lvds_vbt_mode);
5401 fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
5403 mutex_unlock(&dev->mode_config.mutex);
5405 if (IS_VALLEYVIEW(dev)) {
5406 intel_dp->edp_notifier.notifier_call = edp_notify_handler;
5407 register_reboot_notifier(&intel_dp->edp_notifier);
5410 * Figure out the current pipe for the initial backlight setup.
5411 * If the current pipe isn't valid, try the PPS pipe, and if that
5412 * fails just assume pipe A.
5414 if (IS_CHERRYVIEW(dev))
5415 pipe = DP_PORT_TO_PIPE_CHV(intel_dp->DP);
5417 pipe = PORT_TO_PIPE(intel_dp->DP);
5419 if (pipe != PIPE_A && pipe != PIPE_B)
5420 pipe = intel_dp->pps_pipe;
5422 if (pipe != PIPE_A && pipe != PIPE_B)
5425 DRM_DEBUG_KMS("using pipe %c for initial backlight setup\n",
5429 intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
5430 intel_connector->panel.backlight_power = intel_edp_backlight_power;
5431 intel_panel_setup_backlight(connector, pipe);
5437 intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
5438 struct intel_connector *intel_connector)
5440 struct drm_connector *connector = &intel_connector->base;
5441 struct intel_dp *intel_dp = &intel_dig_port->dp;
5442 struct intel_encoder *intel_encoder = &intel_dig_port->base;
5443 struct drm_device *dev = intel_encoder->base.dev;
5444 struct drm_i915_private *dev_priv = dev->dev_private;
5445 enum port port = intel_dig_port->port;
5448 intel_dp->pps_pipe = INVALID_PIPE;
5450 /* intel_dp vfuncs */
5451 if (INTEL_INFO(dev)->gen >= 9)
5452 intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider;
5453 else if (IS_VALLEYVIEW(dev))
5454 intel_dp->get_aux_clock_divider = vlv_get_aux_clock_divider;
5455 else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
5456 intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider;
5457 else if (HAS_PCH_SPLIT(dev))
5458 intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider;
5460 intel_dp->get_aux_clock_divider = i9xx_get_aux_clock_divider;
5462 if (INTEL_INFO(dev)->gen >= 9)
5463 intel_dp->get_aux_send_ctl = skl_get_aux_send_ctl;
5465 intel_dp->get_aux_send_ctl = i9xx_get_aux_send_ctl;
5467 /* Preserve the current hw state. */
5468 intel_dp->DP = I915_READ(intel_dp->output_reg);
5469 intel_dp->attached_connector = intel_connector;
5471 if (intel_dp_is_edp(dev, port))
5472 type = DRM_MODE_CONNECTOR_eDP;
5474 type = DRM_MODE_CONNECTOR_DisplayPort;
5477 * For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but
5478 * for DP the encoder type can be set by the caller to
5479 * INTEL_OUTPUT_UNKNOWN for DDI, so don't rewrite it.
5481 if (type == DRM_MODE_CONNECTOR_eDP)
5482 intel_encoder->type = INTEL_OUTPUT_EDP;
5484 /* eDP only on port B and/or C on vlv/chv */
5485 if (WARN_ON(IS_VALLEYVIEW(dev) && is_edp(intel_dp) &&
5486 port != PORT_B && port != PORT_C))
5489 DRM_DEBUG_KMS("Adding %s connector on port %c\n",
5490 type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP",
5493 drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
5494 drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
5496 connector->interlace_allowed = true;
5497 connector->doublescan_allowed = 0;
5499 INIT_DELAYED_WORK(&intel_dp->panel_vdd_work,
5500 edp_panel_vdd_work);
5502 intel_connector_attach_encoder(intel_connector, intel_encoder);
5503 drm_connector_register(connector);
5506 intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
5508 intel_connector->get_hw_state = intel_connector_get_hw_state;
5509 intel_connector->unregister = intel_dp_connector_unregister;
5511 /* Set up the hotplug pin. */
5514 intel_encoder->hpd_pin = HPD_PORT_A;
5517 intel_encoder->hpd_pin = HPD_PORT_B;
5520 intel_encoder->hpd_pin = HPD_PORT_C;
5523 intel_encoder->hpd_pin = HPD_PORT_D;
5529 if (is_edp(intel_dp)) {
5531 intel_dp_init_panel_power_timestamps(intel_dp);
5532 if (IS_VALLEYVIEW(dev))
5533 vlv_initial_power_sequencer_setup(intel_dp);
5535 intel_dp_init_panel_power_sequencer(dev, intel_dp);
5536 pps_unlock(intel_dp);
5539 intel_dp_aux_init(intel_dp, intel_connector);
5541 /* init MST on ports that can support it */
5542 if (IS_HASWELL(dev) || IS_BROADWELL(dev) || INTEL_INFO(dev)->gen >= 9) {
5543 if (port == PORT_B || port == PORT_C || port == PORT_D) {
5544 intel_dp_mst_encoder_init(intel_dig_port,
5545 intel_connector->base.base.id);
5549 if (!intel_edp_init_connector(intel_dp, intel_connector)) {
5550 drm_dp_aux_unregister(&intel_dp->aux);
5551 if (is_edp(intel_dp)) {
5552 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
5554 * vdd might still be enabled do to the delayed vdd off.
5555 * Make sure vdd is actually turned off here.
5558 edp_panel_vdd_off_sync(intel_dp);
5559 pps_unlock(intel_dp);
5561 drm_connector_unregister(connector);
5562 drm_connector_cleanup(connector);
5566 intel_dp_add_properties(intel_dp, connector);
5568 /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
5569 * 0xd. Failure to do so will result in spurious interrupts being
5570 * generated on the port when a cable is not attached.
5572 if (IS_G4X(dev) && !IS_GM45(dev)) {
5573 u32 temp = I915_READ(PEG_BAND_GAP_DATA);
5574 I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
5581 intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
5583 struct drm_i915_private *dev_priv = dev->dev_private;
5584 struct intel_digital_port *intel_dig_port;
5585 struct intel_encoder *intel_encoder;
5586 struct drm_encoder *encoder;
5587 struct intel_connector *intel_connector;
5589 intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL);
5590 if (!intel_dig_port)
5593 intel_connector = intel_connector_alloc();
5594 if (!intel_connector) {
5595 kfree(intel_dig_port);
5599 intel_encoder = &intel_dig_port->base;
5600 encoder = &intel_encoder->base;
5602 drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs,
5603 DRM_MODE_ENCODER_TMDS);
5605 intel_encoder->compute_config = intel_dp_compute_config;
5606 intel_encoder->disable = intel_disable_dp;
5607 intel_encoder->get_hw_state = intel_dp_get_hw_state;
5608 intel_encoder->get_config = intel_dp_get_config;
5609 intel_encoder->suspend = intel_dp_encoder_suspend;
5610 if (IS_CHERRYVIEW(dev)) {
5611 intel_encoder->pre_pll_enable = chv_dp_pre_pll_enable;
5612 intel_encoder->pre_enable = chv_pre_enable_dp;
5613 intel_encoder->enable = vlv_enable_dp;
5614 intel_encoder->post_disable = chv_post_disable_dp;
5615 } else if (IS_VALLEYVIEW(dev)) {
5616 intel_encoder->pre_pll_enable = vlv_dp_pre_pll_enable;
5617 intel_encoder->pre_enable = vlv_pre_enable_dp;
5618 intel_encoder->enable = vlv_enable_dp;
5619 intel_encoder->post_disable = vlv_post_disable_dp;
5621 intel_encoder->pre_enable = g4x_pre_enable_dp;
5622 intel_encoder->enable = g4x_enable_dp;
5623 if (INTEL_INFO(dev)->gen >= 5)
5624 intel_encoder->post_disable = ilk_post_disable_dp;
5627 intel_dig_port->port = port;
5628 intel_dig_port->dp.output_reg = output_reg;
5630 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
5631 if (IS_CHERRYVIEW(dev)) {
5633 intel_encoder->crtc_mask = 1 << 2;
5635 intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
5637 intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
5639 intel_encoder->cloneable = 0;
5640 intel_encoder->hot_plug = intel_dp_hot_plug;
5642 intel_dig_port->hpd_pulse = intel_dp_hpd_pulse;
5643 dev_priv->hpd_irq_port[port] = intel_dig_port;
5645 if (!intel_dp_init_connector(intel_dig_port, intel_connector)) {
5646 drm_encoder_cleanup(encoder);
5647 kfree(intel_dig_port);
5648 kfree(intel_connector);
5652 void intel_dp_mst_suspend(struct drm_device *dev)
5654 struct drm_i915_private *dev_priv = dev->dev_private;
5658 for (i = 0; i < I915_MAX_PORTS; i++) {
5659 struct intel_digital_port *intel_dig_port = dev_priv->hpd_irq_port[i];
5660 if (!intel_dig_port)
5663 if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
5664 if (!intel_dig_port->dp.can_mst)
5666 if (intel_dig_port->dp.is_mst)
5667 drm_dp_mst_topology_mgr_suspend(&intel_dig_port->dp.mst_mgr);
5672 void intel_dp_mst_resume(struct drm_device *dev)
5674 struct drm_i915_private *dev_priv = dev->dev_private;
5677 for (i = 0; i < I915_MAX_PORTS; i++) {
5678 struct intel_digital_port *intel_dig_port = dev_priv->hpd_irq_port[i];
5679 if (!intel_dig_port)
5681 if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
5684 if (!intel_dig_port->dp.can_mst)
5687 ret = drm_dp_mst_topology_mgr_resume(&intel_dig_port->dp.mst_mgr);
5689 intel_dp_check_mst_status(&intel_dig_port->dp);