2 * Copyright © 2008 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Keith Packard <keithp@keithp.com>
28 #include <linux/i2c.h>
29 #include <linux/slab.h>
30 #include <linux/export.h>
31 #include <linux/notifier.h>
32 #include <linux/reboot.h>
34 #include <drm/drm_atomic_helper.h>
35 #include <drm/drm_crtc.h>
36 #include <drm/drm_crtc_helper.h>
37 #include <drm/drm_edid.h>
38 #include "intel_drv.h"
39 #include <drm/i915_drm.h>
42 #define DP_LINK_CHECK_TIMEOUT (10 * 1000)
44 /* Compliance test status bits */
45 #define INTEL_DP_RESOLUTION_SHIFT_MASK 0
46 #define INTEL_DP_RESOLUTION_PREFERRED (1 << INTEL_DP_RESOLUTION_SHIFT_MASK)
47 #define INTEL_DP_RESOLUTION_STANDARD (2 << INTEL_DP_RESOLUTION_SHIFT_MASK)
48 #define INTEL_DP_RESOLUTION_FAILSAFE (3 << INTEL_DP_RESOLUTION_SHIFT_MASK)
55 static const struct dp_link_dpll gen4_dpll[] = {
57 { .p1 = 2, .p2 = 10, .n = 2, .m1 = 23, .m2 = 8 } },
59 { .p1 = 1, .p2 = 10, .n = 1, .m1 = 14, .m2 = 2 } }
62 static const struct dp_link_dpll pch_dpll[] = {
64 { .p1 = 2, .p2 = 10, .n = 1, .m1 = 12, .m2 = 9 } },
66 { .p1 = 1, .p2 = 10, .n = 2, .m1 = 14, .m2 = 8 } }
69 static const struct dp_link_dpll vlv_dpll[] = {
71 { .p1 = 3, .p2 = 2, .n = 5, .m1 = 3, .m2 = 81 } },
73 { .p1 = 2, .p2 = 2, .n = 1, .m1 = 2, .m2 = 27 } }
77 * CHV supports eDP 1.4 that have more link rates.
78 * Below only provides the fixed rate but exclude variable rate.
80 static const struct dp_link_dpll chv_dpll[] = {
82 * CHV requires to program fractional division for m2.
83 * m2 is stored in fixed point format using formula below
84 * (m2_int << 22) | m2_fraction
86 { DP_LINK_BW_1_62, /* m2_int = 32, m2_fraction = 1677722 */
87 { .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a } },
88 { DP_LINK_BW_2_7, /* m2_int = 27, m2_fraction = 0 */
89 { .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } },
90 { DP_LINK_BW_5_4, /* m2_int = 27, m2_fraction = 0 */
91 { .p1 = 2, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } }
94 static const int skl_rates[] = { 162000, 216000, 270000,
95 324000, 432000, 540000 };
96 static const int chv_rates[] = { 162000, 202500, 210000, 216000,
97 243000, 270000, 324000, 405000,
98 420000, 432000, 540000 };
99 static const int default_rates[] = { 162000, 270000, 540000 };
102 * is_edp - is the given port attached to an eDP panel (either CPU or PCH)
103 * @intel_dp: DP struct
105 * If a CPU or PCH DP output is attached to an eDP panel, this function
106 * will return true, and false otherwise.
108 static bool is_edp(struct intel_dp *intel_dp)
110 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
112 return intel_dig_port->base.type == INTEL_OUTPUT_EDP;
115 static struct drm_device *intel_dp_to_dev(struct intel_dp *intel_dp)
117 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
119 return intel_dig_port->base.base.dev;
122 static struct intel_dp *intel_attached_dp(struct drm_connector *connector)
124 return enc_to_intel_dp(&intel_attached_encoder(connector)->base);
127 static void intel_dp_link_down(struct intel_dp *intel_dp);
128 static bool edp_panel_vdd_on(struct intel_dp *intel_dp);
129 static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
130 static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp);
131 static void vlv_steal_power_sequencer(struct drm_device *dev,
135 intel_dp_max_link_bw(struct intel_dp *intel_dp)
137 int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE];
139 switch (max_link_bw) {
140 case DP_LINK_BW_1_62:
145 WARN(1, "invalid max DP link bw val %x, using 1.62Gbps\n",
147 max_link_bw = DP_LINK_BW_1_62;
153 static u8 intel_dp_max_lane_count(struct intel_dp *intel_dp)
155 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
156 struct drm_device *dev = intel_dig_port->base.base.dev;
157 u8 source_max, sink_max;
160 if (HAS_DDI(dev) && intel_dig_port->port == PORT_A &&
161 (intel_dig_port->saved_port_bits & DDI_A_4_LANES) == 0)
164 sink_max = drm_dp_max_lane_count(intel_dp->dpcd);
166 return min(source_max, sink_max);
170 * The units on the numbers in the next two are... bizarre. Examples will
171 * make it clearer; this one parallels an example in the eDP spec.
173 * intel_dp_max_data_rate for one lane of 2.7GHz evaluates as:
175 * 270000 * 1 * 8 / 10 == 216000
177 * The actual data capacity of that configuration is 2.16Gbit/s, so the
178 * units are decakilobits. ->clock in a drm_display_mode is in kilohertz -
179 * or equivalently, kilopixels per second - so for 1680x1050R it'd be
180 * 119000. At 18bpp that's 2142000 kilobits per second.
182 * Thus the strange-looking division by 10 in intel_dp_link_required, to
183 * get the result in decakilobits instead of kilobits.
187 intel_dp_link_required(int pixel_clock, int bpp)
189 return (pixel_clock * bpp + 9) / 10;
193 intel_dp_max_data_rate(int max_link_clock, int max_lanes)
195 return (max_link_clock * max_lanes * 8) / 10;
198 static enum drm_mode_status
199 intel_dp_mode_valid(struct drm_connector *connector,
200 struct drm_display_mode *mode)
202 struct intel_dp *intel_dp = intel_attached_dp(connector);
203 struct intel_connector *intel_connector = to_intel_connector(connector);
204 struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
205 int target_clock = mode->clock;
206 int max_rate, mode_rate, max_lanes, max_link_clock;
208 if (is_edp(intel_dp) && fixed_mode) {
209 if (mode->hdisplay > fixed_mode->hdisplay)
212 if (mode->vdisplay > fixed_mode->vdisplay)
215 target_clock = fixed_mode->clock;
218 max_link_clock = intel_dp_max_link_rate(intel_dp);
219 max_lanes = intel_dp_max_lane_count(intel_dp);
221 max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
222 mode_rate = intel_dp_link_required(target_clock, 18);
224 if (mode_rate > max_rate)
225 return MODE_CLOCK_HIGH;
227 if (mode->clock < 10000)
228 return MODE_CLOCK_LOW;
230 if (mode->flags & DRM_MODE_FLAG_DBLCLK)
231 return MODE_H_ILLEGAL;
236 uint32_t intel_dp_pack_aux(const uint8_t *src, int src_bytes)
243 for (i = 0; i < src_bytes; i++)
244 v |= ((uint32_t) src[i]) << ((3-i) * 8);
248 static void intel_dp_unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes)
253 for (i = 0; i < dst_bytes; i++)
254 dst[i] = src >> ((3-i) * 8);
257 /* hrawclock is 1/4 the FSB frequency */
259 intel_hrawclk(struct drm_device *dev)
261 struct drm_i915_private *dev_priv = dev->dev_private;
264 /* There is no CLKCFG reg in Valleyview. VLV hrawclk is 200 MHz */
265 if (IS_VALLEYVIEW(dev))
268 clkcfg = I915_READ(CLKCFG);
269 switch (clkcfg & CLKCFG_FSB_MASK) {
278 case CLKCFG_FSB_1067:
280 case CLKCFG_FSB_1333:
282 /* these two are just a guess; one of them might be right */
283 case CLKCFG_FSB_1600:
284 case CLKCFG_FSB_1600_ALT:
292 intel_dp_init_panel_power_sequencer(struct drm_device *dev,
293 struct intel_dp *intel_dp);
295 intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
296 struct intel_dp *intel_dp);
298 static void pps_lock(struct intel_dp *intel_dp)
300 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
301 struct intel_encoder *encoder = &intel_dig_port->base;
302 struct drm_device *dev = encoder->base.dev;
303 struct drm_i915_private *dev_priv = dev->dev_private;
304 enum intel_display_power_domain power_domain;
307 * See vlv_power_sequencer_reset() why we need
308 * a power domain reference here.
310 power_domain = intel_display_port_power_domain(encoder);
311 intel_display_power_get(dev_priv, power_domain);
313 mutex_lock(&dev_priv->pps_mutex);
316 static void pps_unlock(struct intel_dp *intel_dp)
318 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
319 struct intel_encoder *encoder = &intel_dig_port->base;
320 struct drm_device *dev = encoder->base.dev;
321 struct drm_i915_private *dev_priv = dev->dev_private;
322 enum intel_display_power_domain power_domain;
324 mutex_unlock(&dev_priv->pps_mutex);
326 power_domain = intel_display_port_power_domain(encoder);
327 intel_display_power_put(dev_priv, power_domain);
331 vlv_power_sequencer_kick(struct intel_dp *intel_dp)
333 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
334 struct drm_device *dev = intel_dig_port->base.base.dev;
335 struct drm_i915_private *dev_priv = dev->dev_private;
336 enum pipe pipe = intel_dp->pps_pipe;
340 if (WARN(I915_READ(intel_dp->output_reg) & DP_PORT_EN,
341 "skipping pipe %c power seqeuncer kick due to port %c being active\n",
342 pipe_name(pipe), port_name(intel_dig_port->port)))
345 DRM_DEBUG_KMS("kicking pipe %c power sequencer for port %c\n",
346 pipe_name(pipe), port_name(intel_dig_port->port));
348 /* Preserve the BIOS-computed detected bit. This is
349 * supposed to be read-only.
351 DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
352 DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
353 DP |= DP_PORT_WIDTH(1);
354 DP |= DP_LINK_TRAIN_PAT_1;
356 if (IS_CHERRYVIEW(dev))
357 DP |= DP_PIPE_SELECT_CHV(pipe);
358 else if (pipe == PIPE_B)
359 DP |= DP_PIPEB_SELECT;
361 pll_enabled = I915_READ(DPLL(pipe)) & DPLL_VCO_ENABLE;
364 * The DPLL for the pipe must be enabled for this to work.
365 * So enable temporarily it if it's not already enabled.
368 vlv_force_pll_on(dev, pipe, IS_CHERRYVIEW(dev) ?
369 &chv_dpll[0].dpll : &vlv_dpll[0].dpll);
372 * Similar magic as in intel_dp_enable_port().
373 * We _must_ do this port enable + disable trick
374 * to make this power seqeuencer lock onto the port.
375 * Otherwise even VDD force bit won't work.
377 I915_WRITE(intel_dp->output_reg, DP);
378 POSTING_READ(intel_dp->output_reg);
380 I915_WRITE(intel_dp->output_reg, DP | DP_PORT_EN);
381 POSTING_READ(intel_dp->output_reg);
383 I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
384 POSTING_READ(intel_dp->output_reg);
387 vlv_force_pll_off(dev, pipe);
391 vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
393 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
394 struct drm_device *dev = intel_dig_port->base.base.dev;
395 struct drm_i915_private *dev_priv = dev->dev_private;
396 struct intel_encoder *encoder;
397 unsigned int pipes = (1 << PIPE_A) | (1 << PIPE_B);
400 lockdep_assert_held(&dev_priv->pps_mutex);
402 /* We should never land here with regular DP ports */
403 WARN_ON(!is_edp(intel_dp));
405 if (intel_dp->pps_pipe != INVALID_PIPE)
406 return intel_dp->pps_pipe;
409 * We don't have power sequencer currently.
410 * Pick one that's not used by other ports.
412 list_for_each_entry(encoder, &dev->mode_config.encoder_list,
414 struct intel_dp *tmp;
416 if (encoder->type != INTEL_OUTPUT_EDP)
419 tmp = enc_to_intel_dp(&encoder->base);
421 if (tmp->pps_pipe != INVALID_PIPE)
422 pipes &= ~(1 << tmp->pps_pipe);
426 * Didn't find one. This should not happen since there
427 * are two power sequencers and up to two eDP ports.
429 if (WARN_ON(pipes == 0))
432 pipe = ffs(pipes) - 1;
434 vlv_steal_power_sequencer(dev, pipe);
435 intel_dp->pps_pipe = pipe;
437 DRM_DEBUG_KMS("picked pipe %c power sequencer for port %c\n",
438 pipe_name(intel_dp->pps_pipe),
439 port_name(intel_dig_port->port));
441 /* init power sequencer on this pipe and port */
442 intel_dp_init_panel_power_sequencer(dev, intel_dp);
443 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
446 * Even vdd force doesn't work until we've made
447 * the power sequencer lock in on the port.
449 vlv_power_sequencer_kick(intel_dp);
451 return intel_dp->pps_pipe;
454 typedef bool (*vlv_pipe_check)(struct drm_i915_private *dev_priv,
457 static bool vlv_pipe_has_pp_on(struct drm_i915_private *dev_priv,
460 return I915_READ(VLV_PIPE_PP_STATUS(pipe)) & PP_ON;
463 static bool vlv_pipe_has_vdd_on(struct drm_i915_private *dev_priv,
466 return I915_READ(VLV_PIPE_PP_CONTROL(pipe)) & EDP_FORCE_VDD;
469 static bool vlv_pipe_any(struct drm_i915_private *dev_priv,
476 vlv_initial_pps_pipe(struct drm_i915_private *dev_priv,
478 vlv_pipe_check pipe_check)
482 for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) {
483 u32 port_sel = I915_READ(VLV_PIPE_PP_ON_DELAYS(pipe)) &
484 PANEL_PORT_SELECT_MASK;
486 if (port_sel != PANEL_PORT_SELECT_VLV(port))
489 if (!pipe_check(dev_priv, pipe))
499 vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
501 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
502 struct drm_device *dev = intel_dig_port->base.base.dev;
503 struct drm_i915_private *dev_priv = dev->dev_private;
504 enum port port = intel_dig_port->port;
506 lockdep_assert_held(&dev_priv->pps_mutex);
508 /* try to find a pipe with this port selected */
509 /* first pick one where the panel is on */
510 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
512 /* didn't find one? pick one where vdd is on */
513 if (intel_dp->pps_pipe == INVALID_PIPE)
514 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
515 vlv_pipe_has_vdd_on);
516 /* didn't find one? pick one with just the correct port */
517 if (intel_dp->pps_pipe == INVALID_PIPE)
518 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
521 /* didn't find one? just let vlv_power_sequencer_pipe() pick one when needed */
522 if (intel_dp->pps_pipe == INVALID_PIPE) {
523 DRM_DEBUG_KMS("no initial power sequencer for port %c\n",
528 DRM_DEBUG_KMS("initial power sequencer for port %c: pipe %c\n",
529 port_name(port), pipe_name(intel_dp->pps_pipe));
531 intel_dp_init_panel_power_sequencer(dev, intel_dp);
532 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
535 void vlv_power_sequencer_reset(struct drm_i915_private *dev_priv)
537 struct drm_device *dev = dev_priv->dev;
538 struct intel_encoder *encoder;
540 if (WARN_ON(!IS_VALLEYVIEW(dev)))
544 * We can't grab pps_mutex here due to deadlock with power_domain
545 * mutex when power_domain functions are called while holding pps_mutex.
546 * That also means that in order to use pps_pipe the code needs to
547 * hold both a power domain reference and pps_mutex, and the power domain
548 * reference get/put must be done while _not_ holding pps_mutex.
549 * pps_{lock,unlock}() do these steps in the correct order, so one
550 * should use them always.
553 list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
554 struct intel_dp *intel_dp;
556 if (encoder->type != INTEL_OUTPUT_EDP)
559 intel_dp = enc_to_intel_dp(&encoder->base);
560 intel_dp->pps_pipe = INVALID_PIPE;
564 static u32 _pp_ctrl_reg(struct intel_dp *intel_dp)
566 struct drm_device *dev = intel_dp_to_dev(intel_dp);
568 if (HAS_PCH_SPLIT(dev))
569 return PCH_PP_CONTROL;
571 return VLV_PIPE_PP_CONTROL(vlv_power_sequencer_pipe(intel_dp));
574 static u32 _pp_stat_reg(struct intel_dp *intel_dp)
576 struct drm_device *dev = intel_dp_to_dev(intel_dp);
578 if (HAS_PCH_SPLIT(dev))
579 return PCH_PP_STATUS;
581 return VLV_PIPE_PP_STATUS(vlv_power_sequencer_pipe(intel_dp));
584 /* Reboot notifier handler to shutdown panel power to guarantee T12 timing
585 This function only applicable when panel PM state is not to be tracked */
586 static int edp_notify_handler(struct notifier_block *this, unsigned long code,
589 struct intel_dp *intel_dp = container_of(this, typeof(* intel_dp),
591 struct drm_device *dev = intel_dp_to_dev(intel_dp);
592 struct drm_i915_private *dev_priv = dev->dev_private;
594 u32 pp_ctrl_reg, pp_div_reg;
596 if (!is_edp(intel_dp) || code != SYS_RESTART)
601 if (IS_VALLEYVIEW(dev)) {
602 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
604 pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
605 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
606 pp_div = I915_READ(pp_div_reg);
607 pp_div &= PP_REFERENCE_DIVIDER_MASK;
609 /* 0x1F write to PP_DIV_REG sets max cycle delay */
610 I915_WRITE(pp_div_reg, pp_div | 0x1F);
611 I915_WRITE(pp_ctrl_reg, PANEL_UNLOCK_REGS | PANEL_POWER_OFF);
612 msleep(intel_dp->panel_power_cycle_delay);
615 pps_unlock(intel_dp);
620 static bool edp_have_panel_power(struct intel_dp *intel_dp)
622 struct drm_device *dev = intel_dp_to_dev(intel_dp);
623 struct drm_i915_private *dev_priv = dev->dev_private;
625 lockdep_assert_held(&dev_priv->pps_mutex);
627 if (IS_VALLEYVIEW(dev) &&
628 intel_dp->pps_pipe == INVALID_PIPE)
631 return (I915_READ(_pp_stat_reg(intel_dp)) & PP_ON) != 0;
634 static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
636 struct drm_device *dev = intel_dp_to_dev(intel_dp);
637 struct drm_i915_private *dev_priv = dev->dev_private;
639 lockdep_assert_held(&dev_priv->pps_mutex);
641 if (IS_VALLEYVIEW(dev) &&
642 intel_dp->pps_pipe == INVALID_PIPE)
645 return I915_READ(_pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD;
649 intel_dp_check_edp(struct intel_dp *intel_dp)
651 struct drm_device *dev = intel_dp_to_dev(intel_dp);
652 struct drm_i915_private *dev_priv = dev->dev_private;
654 if (!is_edp(intel_dp))
657 if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) {
658 WARN(1, "eDP powered off while attempting aux channel communication.\n");
659 DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n",
660 I915_READ(_pp_stat_reg(intel_dp)),
661 I915_READ(_pp_ctrl_reg(intel_dp)));
666 intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq)
668 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
669 struct drm_device *dev = intel_dig_port->base.base.dev;
670 struct drm_i915_private *dev_priv = dev->dev_private;
671 uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
675 #define C (((status = I915_READ_NOTRACE(ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
677 done = wait_event_timeout(dev_priv->gmbus_wait_queue, C,
678 msecs_to_jiffies_timeout(10));
680 done = wait_for_atomic(C, 10) == 0;
682 DRM_ERROR("dp aux hw did not signal timeout (has irq: %i)!\n",
689 static uint32_t i9xx_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
691 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
692 struct drm_device *dev = intel_dig_port->base.base.dev;
695 * The clock divider is based off the hrawclk, and would like to run at
696 * 2MHz. So, take the hrawclk value and divide by 2 and use that
698 return index ? 0 : intel_hrawclk(dev) / 2;
701 static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
703 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
704 struct drm_device *dev = intel_dig_port->base.base.dev;
705 struct drm_i915_private *dev_priv = dev->dev_private;
710 if (intel_dig_port->port == PORT_A) {
711 return DIV_ROUND_UP(dev_priv->display.get_display_clock_speed(dev), 2000);
713 return DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
717 static uint32_t hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
719 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
720 struct drm_device *dev = intel_dig_port->base.base.dev;
721 struct drm_i915_private *dev_priv = dev->dev_private;
723 if (intel_dig_port->port == PORT_A) {
726 return DIV_ROUND_CLOSEST(dev_priv->display.get_display_clock_speed(dev), 2000);
727 } else if (dev_priv->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
728 /* Workaround for non-ULT HSW */
735 return index ? 0 : DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
739 static uint32_t vlv_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
741 return index ? 0 : 100;
744 static uint32_t skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
747 * SKL doesn't need us to program the AUX clock divider (Hardware will
748 * derive the clock from CDCLK automatically). We still implement the
749 * get_aux_clock_divider vfunc to plug-in into the existing code.
751 return index ? 0 : 1;
754 static uint32_t i9xx_get_aux_send_ctl(struct intel_dp *intel_dp,
757 uint32_t aux_clock_divider)
759 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
760 struct drm_device *dev = intel_dig_port->base.base.dev;
761 uint32_t precharge, timeout;
768 if (IS_BROADWELL(dev) && intel_dp->aux_ch_ctl_reg == DPA_AUX_CH_CTL)
769 timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
771 timeout = DP_AUX_CH_CTL_TIME_OUT_400us;
773 return DP_AUX_CH_CTL_SEND_BUSY |
775 (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
776 DP_AUX_CH_CTL_TIME_OUT_ERROR |
778 DP_AUX_CH_CTL_RECEIVE_ERROR |
779 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
780 (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
781 (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT);
784 static uint32_t skl_get_aux_send_ctl(struct intel_dp *intel_dp,
789 return DP_AUX_CH_CTL_SEND_BUSY |
791 (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
792 DP_AUX_CH_CTL_TIME_OUT_ERROR |
793 DP_AUX_CH_CTL_TIME_OUT_1600us |
794 DP_AUX_CH_CTL_RECEIVE_ERROR |
795 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
796 DP_AUX_CH_CTL_SYNC_PULSE_SKL(32);
800 intel_dp_aux_ch(struct intel_dp *intel_dp,
801 const uint8_t *send, int send_bytes,
802 uint8_t *recv, int recv_size)
804 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
805 struct drm_device *dev = intel_dig_port->base.base.dev;
806 struct drm_i915_private *dev_priv = dev->dev_private;
807 uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
808 uint32_t ch_data = ch_ctl + 4;
809 uint32_t aux_clock_divider;
810 int i, ret, recv_bytes;
813 bool has_aux_irq = HAS_AUX_IRQ(dev);
819 * We will be called with VDD already enabled for dpcd/edid/oui reads.
820 * In such cases we want to leave VDD enabled and it's up to upper layers
821 * to turn it off. But for eg. i2c-dev access we need to turn it on/off
824 vdd = edp_panel_vdd_on(intel_dp);
826 /* dp aux is extremely sensitive to irq latency, hence request the
827 * lowest possible wakeup latency and so prevent the cpu from going into
830 pm_qos_update_request(&dev_priv->pm_qos, 0);
832 intel_dp_check_edp(intel_dp);
834 intel_aux_display_runtime_get(dev_priv);
836 /* Try to wait for any previous AUX channel activity */
837 for (try = 0; try < 3; try++) {
838 status = I915_READ_NOTRACE(ch_ctl);
839 if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
845 WARN(1, "dp_aux_ch not started status 0x%08x\n",
851 /* Only 5 data registers! */
852 if (WARN_ON(send_bytes > 20 || recv_size > 20)) {
857 while ((aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, clock++))) {
858 u32 send_ctl = intel_dp->get_aux_send_ctl(intel_dp,
863 /* Must try at least 3 times according to DP spec */
864 for (try = 0; try < 5; try++) {
865 /* Load the send data into the aux channel data registers */
866 for (i = 0; i < send_bytes; i += 4)
867 I915_WRITE(ch_data + i,
868 intel_dp_pack_aux(send + i,
871 /* Send the command and wait for it to complete */
872 I915_WRITE(ch_ctl, send_ctl);
874 status = intel_dp_aux_wait_done(intel_dp, has_aux_irq);
876 /* Clear done status and any errors */
880 DP_AUX_CH_CTL_TIME_OUT_ERROR |
881 DP_AUX_CH_CTL_RECEIVE_ERROR);
883 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR)
886 /* DP CTS 1.2 Core Rev 1.1, 4.2.1.1 & 4.2.1.2
887 * 400us delay required for errors and timeouts
888 * Timeout errors from the HW already meet this
889 * requirement so skip to next iteration
891 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
892 usleep_range(400, 500);
895 if (status & DP_AUX_CH_CTL_DONE)
900 if ((status & DP_AUX_CH_CTL_DONE) == 0) {
901 DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status);
907 /* Check for timeout or receive error.
908 * Timeouts occur when the sink is not connected
910 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
911 DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status);
916 /* Timeouts occur when the device isn't connected, so they're
917 * "normal" -- don't fill the kernel log with these */
918 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
919 DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status);
924 /* Unload any bytes sent back from the other side */
925 recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >>
926 DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
927 if (recv_bytes > recv_size)
928 recv_bytes = recv_size;
930 for (i = 0; i < recv_bytes; i += 4)
931 intel_dp_unpack_aux(I915_READ(ch_data + i),
932 recv + i, recv_bytes - i);
936 pm_qos_update_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE);
937 intel_aux_display_runtime_put(dev_priv);
940 edp_panel_vdd_off(intel_dp, false);
942 pps_unlock(intel_dp);
947 #define BARE_ADDRESS_SIZE 3
948 #define HEADER_SIZE (BARE_ADDRESS_SIZE + 1)
950 intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
952 struct intel_dp *intel_dp = container_of(aux, struct intel_dp, aux);
953 uint8_t txbuf[20], rxbuf[20];
954 size_t txsize, rxsize;
957 txbuf[0] = (msg->request << 4) |
958 ((msg->address >> 16) & 0xf);
959 txbuf[1] = (msg->address >> 8) & 0xff;
960 txbuf[2] = msg->address & 0xff;
961 txbuf[3] = msg->size - 1;
963 switch (msg->request & ~DP_AUX_I2C_MOT) {
964 case DP_AUX_NATIVE_WRITE:
965 case DP_AUX_I2C_WRITE:
966 txsize = msg->size ? HEADER_SIZE + msg->size : BARE_ADDRESS_SIZE;
967 rxsize = 2; /* 0 or 1 data bytes */
969 if (WARN_ON(txsize > 20))
972 memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size);
974 ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
976 msg->reply = rxbuf[0] >> 4;
979 /* Number of bytes written in a short write. */
980 ret = clamp_t(int, rxbuf[1], 0, msg->size);
982 /* Return payload size. */
988 case DP_AUX_NATIVE_READ:
989 case DP_AUX_I2C_READ:
990 txsize = msg->size ? HEADER_SIZE : BARE_ADDRESS_SIZE;
991 rxsize = msg->size + 1;
993 if (WARN_ON(rxsize > 20))
996 ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
998 msg->reply = rxbuf[0] >> 4;
1000 * Assume happy day, and copy the data. The caller is
1001 * expected to check msg->reply before touching it.
1003 * Return payload size.
1006 memcpy(msg->buffer, rxbuf + 1, ret);
1019 intel_dp_aux_init(struct intel_dp *intel_dp, struct intel_connector *connector)
1021 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1022 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1023 enum port port = intel_dig_port->port;
1024 const char *name = NULL;
1029 intel_dp->aux_ch_ctl_reg = DPA_AUX_CH_CTL;
1033 intel_dp->aux_ch_ctl_reg = PCH_DPB_AUX_CH_CTL;
1037 intel_dp->aux_ch_ctl_reg = PCH_DPC_AUX_CH_CTL;
1041 intel_dp->aux_ch_ctl_reg = PCH_DPD_AUX_CH_CTL;
1049 * The AUX_CTL register is usually DP_CTL + 0x10.
1051 * On Haswell and Broadwell though:
1052 * - Both port A DDI_BUF_CTL and DDI_AUX_CTL are on the CPU
1053 * - Port B/C/D AUX channels are on the PCH, DDI_BUF_CTL on the CPU
1055 * Skylake moves AUX_CTL back next to DDI_BUF_CTL, on the CPU.
1057 if (!IS_HASWELL(dev) && !IS_BROADWELL(dev))
1058 intel_dp->aux_ch_ctl_reg = intel_dp->output_reg + 0x10;
1060 intel_dp->aux.name = name;
1061 intel_dp->aux.dev = dev->dev;
1062 intel_dp->aux.transfer = intel_dp_aux_transfer;
1064 DRM_DEBUG_KMS("registering %s bus for %s\n", name,
1065 connector->base.kdev->kobj.name);
1067 ret = drm_dp_aux_register(&intel_dp->aux);
1069 DRM_ERROR("drm_dp_aux_register() for %s failed (%d)\n",
1074 ret = sysfs_create_link(&connector->base.kdev->kobj,
1075 &intel_dp->aux.ddc.dev.kobj,
1076 intel_dp->aux.ddc.dev.kobj.name);
1078 DRM_ERROR("sysfs_create_link() for %s failed (%d)\n", name, ret);
1079 drm_dp_aux_unregister(&intel_dp->aux);
1084 intel_dp_connector_unregister(struct intel_connector *intel_connector)
1086 struct intel_dp *intel_dp = intel_attached_dp(&intel_connector->base);
1088 if (!intel_connector->mst_port)
1089 sysfs_remove_link(&intel_connector->base.kdev->kobj,
1090 intel_dp->aux.ddc.dev.kobj.name);
1091 intel_connector_unregister(intel_connector);
1095 skl_edp_set_pll_config(struct intel_crtc_state *pipe_config, int link_clock)
1099 memset(&pipe_config->dpll_hw_state, 0,
1100 sizeof(pipe_config->dpll_hw_state));
1102 pipe_config->ddi_pll_sel = SKL_DPLL0;
1103 pipe_config->dpll_hw_state.cfgcr1 = 0;
1104 pipe_config->dpll_hw_state.cfgcr2 = 0;
1106 ctrl1 = DPLL_CTRL1_OVERRIDE(SKL_DPLL0);
1107 switch (link_clock / 2) {
1109 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810,
1113 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350,
1117 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700,
1121 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620,
1124 /* TBD: For DP link rates 2.16 GHz and 4.32 GHz, VCO is 8640 which
1125 results in CDCLK change. Need to handle the change of CDCLK by
1126 disabling pipes and re-enabling them */
1128 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080,
1132 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2160,
1137 pipe_config->dpll_hw_state.ctrl1 = ctrl1;
1141 hsw_dp_set_ddi_pll_sel(struct intel_crtc_state *pipe_config, int link_bw)
1144 case DP_LINK_BW_1_62:
1145 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_810;
1147 case DP_LINK_BW_2_7:
1148 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_1350;
1150 case DP_LINK_BW_5_4:
1151 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_2700;
1157 intel_dp_sink_rates(struct intel_dp *intel_dp, const int **sink_rates)
1159 if (intel_dp->num_sink_rates) {
1160 *sink_rates = intel_dp->sink_rates;
1161 return intel_dp->num_sink_rates;
1164 *sink_rates = default_rates;
1166 return (intel_dp_max_link_bw(intel_dp) >> 3) + 1;
1170 intel_dp_source_rates(struct drm_device *dev, const int **source_rates)
1172 if (IS_SKYLAKE(dev)) {
1173 *source_rates = skl_rates;
1174 return ARRAY_SIZE(skl_rates);
1175 } else if (IS_CHERRYVIEW(dev)) {
1176 *source_rates = chv_rates;
1177 return ARRAY_SIZE(chv_rates);
1180 *source_rates = default_rates;
1182 if (IS_SKYLAKE(dev) && INTEL_REVID(dev) <= SKL_REVID_B0)
1183 /* WaDisableHBR2:skl */
1184 return (DP_LINK_BW_2_7 >> 3) + 1;
1185 else if (INTEL_INFO(dev)->gen >= 8 ||
1186 (IS_HASWELL(dev) && !IS_HSW_ULX(dev)))
1187 return (DP_LINK_BW_5_4 >> 3) + 1;
1189 return (DP_LINK_BW_2_7 >> 3) + 1;
1193 intel_dp_set_clock(struct intel_encoder *encoder,
1194 struct intel_crtc_state *pipe_config, int link_bw)
1196 struct drm_device *dev = encoder->base.dev;
1197 const struct dp_link_dpll *divisor = NULL;
1201 divisor = gen4_dpll;
1202 count = ARRAY_SIZE(gen4_dpll);
1203 } else if (HAS_PCH_SPLIT(dev)) {
1205 count = ARRAY_SIZE(pch_dpll);
1206 } else if (IS_CHERRYVIEW(dev)) {
1208 count = ARRAY_SIZE(chv_dpll);
1209 } else if (IS_VALLEYVIEW(dev)) {
1211 count = ARRAY_SIZE(vlv_dpll);
1214 if (divisor && count) {
1215 for (i = 0; i < count; i++) {
1216 if (link_bw == divisor[i].link_bw) {
1217 pipe_config->dpll = divisor[i].dpll;
1218 pipe_config->clock_set = true;
1225 static int intersect_rates(const int *source_rates, int source_len,
1226 const int *sink_rates, int sink_len,
1229 int i = 0, j = 0, k = 0;
1231 while (i < source_len && j < sink_len) {
1232 if (source_rates[i] == sink_rates[j]) {
1233 if (WARN_ON(k >= DP_MAX_SUPPORTED_RATES))
1235 common_rates[k] = source_rates[i];
1239 } else if (source_rates[i] < sink_rates[j]) {
1248 static int intel_dp_common_rates(struct intel_dp *intel_dp,
1251 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1252 const int *source_rates, *sink_rates;
1253 int source_len, sink_len;
1255 sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
1256 source_len = intel_dp_source_rates(dev, &source_rates);
1258 return intersect_rates(source_rates, source_len,
1259 sink_rates, sink_len,
1263 static void snprintf_int_array(char *str, size_t len,
1264 const int *array, int nelem)
1270 for (i = 0; i < nelem; i++) {
1271 int r = snprintf(str, len, "%s%d", i ? ", " : "", array[i]);
1279 static void intel_dp_print_rates(struct intel_dp *intel_dp)
1281 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1282 const int *source_rates, *sink_rates;
1283 int source_len, sink_len, common_len;
1284 int common_rates[DP_MAX_SUPPORTED_RATES];
1285 char str[128]; /* FIXME: too big for stack? */
1287 if ((drm_debug & DRM_UT_KMS) == 0)
1290 source_len = intel_dp_source_rates(dev, &source_rates);
1291 snprintf_int_array(str, sizeof(str), source_rates, source_len);
1292 DRM_DEBUG_KMS("source rates: %s\n", str);
1294 sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
1295 snprintf_int_array(str, sizeof(str), sink_rates, sink_len);
1296 DRM_DEBUG_KMS("sink rates: %s\n", str);
1298 common_len = intel_dp_common_rates(intel_dp, common_rates);
1299 snprintf_int_array(str, sizeof(str), common_rates, common_len);
1300 DRM_DEBUG_KMS("common rates: %s\n", str);
1303 static int rate_to_index(int find, const int *rates)
1307 for (i = 0; i < DP_MAX_SUPPORTED_RATES; ++i)
1308 if (find == rates[i])
1315 intel_dp_max_link_rate(struct intel_dp *intel_dp)
1317 int rates[DP_MAX_SUPPORTED_RATES] = {};
1320 len = intel_dp_common_rates(intel_dp, rates);
1321 if (WARN_ON(len <= 0))
1324 return rates[rate_to_index(0, rates) - 1];
1327 int intel_dp_rate_select(struct intel_dp *intel_dp, int rate)
1329 return rate_to_index(rate, intel_dp->sink_rates);
1333 intel_dp_compute_config(struct intel_encoder *encoder,
1334 struct intel_crtc_state *pipe_config)
1336 struct drm_device *dev = encoder->base.dev;
1337 struct drm_i915_private *dev_priv = dev->dev_private;
1338 struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
1339 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1340 enum port port = dp_to_dig_port(intel_dp)->port;
1341 struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc);
1342 struct intel_connector *intel_connector = intel_dp->attached_connector;
1343 int lane_count, clock;
1344 int min_lane_count = 1;
1345 int max_lane_count = intel_dp_max_lane_count(intel_dp);
1346 /* Conveniently, the link BW constants become indices with a shift...*/
1350 int link_avail, link_clock;
1351 int common_rates[DP_MAX_SUPPORTED_RATES] = {};
1354 common_len = intel_dp_common_rates(intel_dp, common_rates);
1356 /* No common link rates between source and sink */
1357 WARN_ON(common_len <= 0);
1359 max_clock = common_len - 1;
1361 if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev) && port != PORT_A)
1362 pipe_config->has_pch_encoder = true;
1364 pipe_config->has_dp_encoder = true;
1365 pipe_config->has_drrs = false;
1366 pipe_config->has_audio = intel_dp->has_audio && port != PORT_A;
1368 if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
1369 intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
1372 if (INTEL_INFO(dev)->gen >= 9) {
1374 ret = skl_update_scaler_users(intel_crtc, pipe_config, NULL, NULL, 0);
1379 if (!HAS_PCH_SPLIT(dev))
1380 intel_gmch_panel_fitting(intel_crtc, pipe_config,
1381 intel_connector->panel.fitting_mode);
1383 intel_pch_panel_fitting(intel_crtc, pipe_config,
1384 intel_connector->panel.fitting_mode);
1387 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
1390 DRM_DEBUG_KMS("DP link computation with max lane count %i "
1391 "max bw %d pixel clock %iKHz\n",
1392 max_lane_count, common_rates[max_clock],
1393 adjusted_mode->crtc_clock);
1395 /* Walk through all bpp values. Luckily they're all nicely spaced with 2
1396 * bpc in between. */
1397 bpp = pipe_config->pipe_bpp;
1398 if (is_edp(intel_dp)) {
1399 if (dev_priv->vbt.edp_bpp && dev_priv->vbt.edp_bpp < bpp) {
1400 DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n",
1401 dev_priv->vbt.edp_bpp);
1402 bpp = dev_priv->vbt.edp_bpp;
1406 * Use the maximum clock and number of lanes the eDP panel
1407 * advertizes being capable of. The panels are generally
1408 * designed to support only a single clock and lane
1409 * configuration, and typically these values correspond to the
1410 * native resolution of the panel.
1412 min_lane_count = max_lane_count;
1413 min_clock = max_clock;
1416 for (; bpp >= 6*3; bpp -= 2*3) {
1417 mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
1420 for (clock = min_clock; clock <= max_clock; clock++) {
1421 for (lane_count = min_lane_count;
1422 lane_count <= max_lane_count;
1425 link_clock = common_rates[clock];
1426 link_avail = intel_dp_max_data_rate(link_clock,
1429 if (mode_rate <= link_avail) {
1439 if (intel_dp->color_range_auto) {
1442 * CEA-861-E - 5.1 Default Encoding Parameters
1443 * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry
1445 if (bpp != 18 && drm_match_cea_mode(adjusted_mode) > 1)
1446 intel_dp->color_range = DP_COLOR_RANGE_16_235;
1448 intel_dp->color_range = 0;
1451 if (intel_dp->color_range)
1452 pipe_config->limited_color_range = true;
1454 intel_dp->lane_count = lane_count;
1456 if (intel_dp->num_sink_rates) {
1457 intel_dp->link_bw = 0;
1458 intel_dp->rate_select =
1459 intel_dp_rate_select(intel_dp, common_rates[clock]);
1462 drm_dp_link_rate_to_bw_code(common_rates[clock]);
1463 intel_dp->rate_select = 0;
1466 pipe_config->pipe_bpp = bpp;
1467 pipe_config->port_clock = common_rates[clock];
1469 DRM_DEBUG_KMS("DP link bw %02x lane count %d clock %d bpp %d\n",
1470 intel_dp->link_bw, intel_dp->lane_count,
1471 pipe_config->port_clock, bpp);
1472 DRM_DEBUG_KMS("DP link bw required %i available %i\n",
1473 mode_rate, link_avail);
1475 intel_link_compute_m_n(bpp, lane_count,
1476 adjusted_mode->crtc_clock,
1477 pipe_config->port_clock,
1478 &pipe_config->dp_m_n);
1480 if (intel_connector->panel.downclock_mode != NULL &&
1481 dev_priv->drrs.type == SEAMLESS_DRRS_SUPPORT) {
1482 pipe_config->has_drrs = true;
1483 intel_link_compute_m_n(bpp, lane_count,
1484 intel_connector->panel.downclock_mode->clock,
1485 pipe_config->port_clock,
1486 &pipe_config->dp_m2_n2);
1489 if (IS_SKYLAKE(dev) && is_edp(intel_dp))
1490 skl_edp_set_pll_config(pipe_config, common_rates[clock]);
1491 else if (IS_BROXTON(dev))
1492 /* handled in ddi */;
1493 else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
1494 hsw_dp_set_ddi_pll_sel(pipe_config, intel_dp->link_bw);
1496 intel_dp_set_clock(encoder, pipe_config, intel_dp->link_bw);
1501 static void ironlake_set_pll_cpu_edp(struct intel_dp *intel_dp)
1503 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1504 struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
1505 struct drm_device *dev = crtc->base.dev;
1506 struct drm_i915_private *dev_priv = dev->dev_private;
1509 DRM_DEBUG_KMS("eDP PLL enable for clock %d\n",
1510 crtc->config->port_clock);
1511 dpa_ctl = I915_READ(DP_A);
1512 dpa_ctl &= ~DP_PLL_FREQ_MASK;
1514 if (crtc->config->port_clock == 162000) {
1515 /* For a long time we've carried around a ILK-DevA w/a for the
1516 * 160MHz clock. If we're really unlucky, it's still required.
1518 DRM_DEBUG_KMS("160MHz cpu eDP clock, might need ilk devA w/a\n");
1519 dpa_ctl |= DP_PLL_FREQ_160MHZ;
1520 intel_dp->DP |= DP_PLL_FREQ_160MHZ;
1522 dpa_ctl |= DP_PLL_FREQ_270MHZ;
1523 intel_dp->DP |= DP_PLL_FREQ_270MHZ;
1526 I915_WRITE(DP_A, dpa_ctl);
1532 static void intel_dp_prepare(struct intel_encoder *encoder)
1534 struct drm_device *dev = encoder->base.dev;
1535 struct drm_i915_private *dev_priv = dev->dev_private;
1536 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1537 enum port port = dp_to_dig_port(intel_dp)->port;
1538 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
1539 struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
1542 * There are four kinds of DP registers:
1549 * IBX PCH and CPU are the same for almost everything,
1550 * except that the CPU DP PLL is configured in this
1553 * CPT PCH is quite different, having many bits moved
1554 * to the TRANS_DP_CTL register instead. That
1555 * configuration happens (oddly) in ironlake_pch_enable
1558 /* Preserve the BIOS-computed detected bit. This is
1559 * supposed to be read-only.
1561 intel_dp->DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
1563 /* Handle DP bits in common between all three register formats */
1564 intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
1565 intel_dp->DP |= DP_PORT_WIDTH(intel_dp->lane_count);
1567 if (crtc->config->has_audio)
1568 intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
1570 /* Split out the IBX/CPU vs CPT settings */
1572 if (IS_GEN7(dev) && port == PORT_A) {
1573 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1574 intel_dp->DP |= DP_SYNC_HS_HIGH;
1575 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1576 intel_dp->DP |= DP_SYNC_VS_HIGH;
1577 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
1579 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1580 intel_dp->DP |= DP_ENHANCED_FRAMING;
1582 intel_dp->DP |= crtc->pipe << 29;
1583 } else if (HAS_PCH_CPT(dev) && port != PORT_A) {
1586 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
1588 trans_dp = I915_READ(TRANS_DP_CTL(crtc->pipe));
1589 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1590 trans_dp |= TRANS_DP_ENH_FRAMING;
1592 trans_dp &= ~TRANS_DP_ENH_FRAMING;
1593 I915_WRITE(TRANS_DP_CTL(crtc->pipe), trans_dp);
1595 if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev))
1596 intel_dp->DP |= intel_dp->color_range;
1598 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1599 intel_dp->DP |= DP_SYNC_HS_HIGH;
1600 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1601 intel_dp->DP |= DP_SYNC_VS_HIGH;
1602 intel_dp->DP |= DP_LINK_TRAIN_OFF;
1604 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1605 intel_dp->DP |= DP_ENHANCED_FRAMING;
1607 if (IS_CHERRYVIEW(dev))
1608 intel_dp->DP |= DP_PIPE_SELECT_CHV(crtc->pipe);
1609 else if (crtc->pipe == PIPE_B)
1610 intel_dp->DP |= DP_PIPEB_SELECT;
1614 #define IDLE_ON_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK)
1615 #define IDLE_ON_VALUE (PP_ON | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_ON_IDLE)
1617 #define IDLE_OFF_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | 0)
1618 #define IDLE_OFF_VALUE (0 | PP_SEQUENCE_NONE | 0 | 0)
1620 #define IDLE_CYCLE_MASK (PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
1621 #define IDLE_CYCLE_VALUE (0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE)
1623 static void wait_panel_status(struct intel_dp *intel_dp,
1627 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1628 struct drm_i915_private *dev_priv = dev->dev_private;
1629 u32 pp_stat_reg, pp_ctrl_reg;
1631 lockdep_assert_held(&dev_priv->pps_mutex);
1633 pp_stat_reg = _pp_stat_reg(intel_dp);
1634 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1636 DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n",
1638 I915_READ(pp_stat_reg),
1639 I915_READ(pp_ctrl_reg));
1641 if (_wait_for((I915_READ(pp_stat_reg) & mask) == value, 5000, 10)) {
1642 DRM_ERROR("Panel status timeout: status %08x control %08x\n",
1643 I915_READ(pp_stat_reg),
1644 I915_READ(pp_ctrl_reg));
1647 DRM_DEBUG_KMS("Wait complete\n");
1650 static void wait_panel_on(struct intel_dp *intel_dp)
1652 DRM_DEBUG_KMS("Wait for panel power on\n");
1653 wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
1656 static void wait_panel_off(struct intel_dp *intel_dp)
1658 DRM_DEBUG_KMS("Wait for panel power off time\n");
1659 wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
1662 static void wait_panel_power_cycle(struct intel_dp *intel_dp)
1664 DRM_DEBUG_KMS("Wait for panel power cycle\n");
1666 /* When we disable the VDD override bit last we have to do the manual
1668 wait_remaining_ms_from_jiffies(intel_dp->last_power_cycle,
1669 intel_dp->panel_power_cycle_delay);
1671 wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
1674 static void wait_backlight_on(struct intel_dp *intel_dp)
1676 wait_remaining_ms_from_jiffies(intel_dp->last_power_on,
1677 intel_dp->backlight_on_delay);
1680 static void edp_wait_backlight_off(struct intel_dp *intel_dp)
1682 wait_remaining_ms_from_jiffies(intel_dp->last_backlight_off,
1683 intel_dp->backlight_off_delay);
1686 /* Read the current pp_control value, unlocking the register if it
1690 static u32 ironlake_get_pp_control(struct intel_dp *intel_dp)
1692 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1693 struct drm_i915_private *dev_priv = dev->dev_private;
1696 lockdep_assert_held(&dev_priv->pps_mutex);
1698 control = I915_READ(_pp_ctrl_reg(intel_dp));
1699 control &= ~PANEL_UNLOCK_MASK;
1700 control |= PANEL_UNLOCK_REGS;
1705 * Must be paired with edp_panel_vdd_off().
1706 * Must hold pps_mutex around the whole on/off sequence.
1707 * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
1709 static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
1711 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1712 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1713 struct intel_encoder *intel_encoder = &intel_dig_port->base;
1714 struct drm_i915_private *dev_priv = dev->dev_private;
1715 enum intel_display_power_domain power_domain;
1717 u32 pp_stat_reg, pp_ctrl_reg;
1718 bool need_to_disable = !intel_dp->want_panel_vdd;
1720 lockdep_assert_held(&dev_priv->pps_mutex);
1722 if (!is_edp(intel_dp))
1725 cancel_delayed_work(&intel_dp->panel_vdd_work);
1726 intel_dp->want_panel_vdd = true;
1728 if (edp_have_panel_vdd(intel_dp))
1729 return need_to_disable;
1731 power_domain = intel_display_port_power_domain(intel_encoder);
1732 intel_display_power_get(dev_priv, power_domain);
1734 DRM_DEBUG_KMS("Turning eDP port %c VDD on\n",
1735 port_name(intel_dig_port->port));
1737 if (!edp_have_panel_power(intel_dp))
1738 wait_panel_power_cycle(intel_dp);
1740 pp = ironlake_get_pp_control(intel_dp);
1741 pp |= EDP_FORCE_VDD;
1743 pp_stat_reg = _pp_stat_reg(intel_dp);
1744 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1746 I915_WRITE(pp_ctrl_reg, pp);
1747 POSTING_READ(pp_ctrl_reg);
1748 DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1749 I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
1751 * If the panel wasn't on, delay before accessing aux channel
1753 if (!edp_have_panel_power(intel_dp)) {
1754 DRM_DEBUG_KMS("eDP port %c panel power wasn't enabled\n",
1755 port_name(intel_dig_port->port));
1756 msleep(intel_dp->panel_power_up_delay);
1759 return need_to_disable;
1763 * Must be paired with intel_edp_panel_vdd_off() or
1764 * intel_edp_panel_off().
1765 * Nested calls to these functions are not allowed since
1766 * we drop the lock. Caller must use some higher level
1767 * locking to prevent nested calls from other threads.
1769 void intel_edp_panel_vdd_on(struct intel_dp *intel_dp)
1773 if (!is_edp(intel_dp))
1777 vdd = edp_panel_vdd_on(intel_dp);
1778 pps_unlock(intel_dp);
1780 I915_STATE_WARN(!vdd, "eDP port %c VDD already requested on\n",
1781 port_name(dp_to_dig_port(intel_dp)->port));
1784 static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
1786 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1787 struct drm_i915_private *dev_priv = dev->dev_private;
1788 struct intel_digital_port *intel_dig_port =
1789 dp_to_dig_port(intel_dp);
1790 struct intel_encoder *intel_encoder = &intel_dig_port->base;
1791 enum intel_display_power_domain power_domain;
1793 u32 pp_stat_reg, pp_ctrl_reg;
1795 lockdep_assert_held(&dev_priv->pps_mutex);
1797 WARN_ON(intel_dp->want_panel_vdd);
1799 if (!edp_have_panel_vdd(intel_dp))
1802 DRM_DEBUG_KMS("Turning eDP port %c VDD off\n",
1803 port_name(intel_dig_port->port));
1805 pp = ironlake_get_pp_control(intel_dp);
1806 pp &= ~EDP_FORCE_VDD;
1808 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1809 pp_stat_reg = _pp_stat_reg(intel_dp);
1811 I915_WRITE(pp_ctrl_reg, pp);
1812 POSTING_READ(pp_ctrl_reg);
1814 /* Make sure sequencer is idle before allowing subsequent activity */
1815 DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1816 I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
1818 if ((pp & POWER_TARGET_ON) == 0)
1819 intel_dp->last_power_cycle = jiffies;
1821 power_domain = intel_display_port_power_domain(intel_encoder);
1822 intel_display_power_put(dev_priv, power_domain);
1825 static void edp_panel_vdd_work(struct work_struct *__work)
1827 struct intel_dp *intel_dp = container_of(to_delayed_work(__work),
1828 struct intel_dp, panel_vdd_work);
1831 if (!intel_dp->want_panel_vdd)
1832 edp_panel_vdd_off_sync(intel_dp);
1833 pps_unlock(intel_dp);
1836 static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp)
1838 unsigned long delay;
1841 * Queue the timer to fire a long time from now (relative to the power
1842 * down delay) to keep the panel power up across a sequence of
1845 delay = msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5);
1846 schedule_delayed_work(&intel_dp->panel_vdd_work, delay);
1850 * Must be paired with edp_panel_vdd_on().
1851 * Must hold pps_mutex around the whole on/off sequence.
1852 * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
1854 static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
1856 struct drm_i915_private *dev_priv =
1857 intel_dp_to_dev(intel_dp)->dev_private;
1859 lockdep_assert_held(&dev_priv->pps_mutex);
1861 if (!is_edp(intel_dp))
1864 I915_STATE_WARN(!intel_dp->want_panel_vdd, "eDP port %c VDD not forced on",
1865 port_name(dp_to_dig_port(intel_dp)->port));
1867 intel_dp->want_panel_vdd = false;
1870 edp_panel_vdd_off_sync(intel_dp);
1872 edp_panel_vdd_schedule_off(intel_dp);
1875 static void edp_panel_on(struct intel_dp *intel_dp)
1877 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1878 struct drm_i915_private *dev_priv = dev->dev_private;
1882 lockdep_assert_held(&dev_priv->pps_mutex);
1884 if (!is_edp(intel_dp))
1887 DRM_DEBUG_KMS("Turn eDP port %c panel power on\n",
1888 port_name(dp_to_dig_port(intel_dp)->port));
1890 if (WARN(edp_have_panel_power(intel_dp),
1891 "eDP port %c panel power already on\n",
1892 port_name(dp_to_dig_port(intel_dp)->port)))
1895 wait_panel_power_cycle(intel_dp);
1897 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1898 pp = ironlake_get_pp_control(intel_dp);
1900 /* ILK workaround: disable reset around power sequence */
1901 pp &= ~PANEL_POWER_RESET;
1902 I915_WRITE(pp_ctrl_reg, pp);
1903 POSTING_READ(pp_ctrl_reg);
1906 pp |= POWER_TARGET_ON;
1908 pp |= PANEL_POWER_RESET;
1910 I915_WRITE(pp_ctrl_reg, pp);
1911 POSTING_READ(pp_ctrl_reg);
1913 wait_panel_on(intel_dp);
1914 intel_dp->last_power_on = jiffies;
1917 pp |= PANEL_POWER_RESET; /* restore panel reset bit */
1918 I915_WRITE(pp_ctrl_reg, pp);
1919 POSTING_READ(pp_ctrl_reg);
1923 void intel_edp_panel_on(struct intel_dp *intel_dp)
1925 if (!is_edp(intel_dp))
1929 edp_panel_on(intel_dp);
1930 pps_unlock(intel_dp);
1934 static void edp_panel_off(struct intel_dp *intel_dp)
1936 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1937 struct intel_encoder *intel_encoder = &intel_dig_port->base;
1938 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1939 struct drm_i915_private *dev_priv = dev->dev_private;
1940 enum intel_display_power_domain power_domain;
1944 lockdep_assert_held(&dev_priv->pps_mutex);
1946 if (!is_edp(intel_dp))
1949 DRM_DEBUG_KMS("Turn eDP port %c panel power off\n",
1950 port_name(dp_to_dig_port(intel_dp)->port));
1952 WARN(!intel_dp->want_panel_vdd, "Need eDP port %c VDD to turn off panel\n",
1953 port_name(dp_to_dig_port(intel_dp)->port));
1955 pp = ironlake_get_pp_control(intel_dp);
1956 /* We need to switch off panel power _and_ force vdd, for otherwise some
1957 * panels get very unhappy and cease to work. */
1958 pp &= ~(POWER_TARGET_ON | PANEL_POWER_RESET | EDP_FORCE_VDD |
1961 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1963 intel_dp->want_panel_vdd = false;
1965 I915_WRITE(pp_ctrl_reg, pp);
1966 POSTING_READ(pp_ctrl_reg);
1968 intel_dp->last_power_cycle = jiffies;
1969 wait_panel_off(intel_dp);
1971 /* We got a reference when we enabled the VDD. */
1972 power_domain = intel_display_port_power_domain(intel_encoder);
1973 intel_display_power_put(dev_priv, power_domain);
1976 void intel_edp_panel_off(struct intel_dp *intel_dp)
1978 if (!is_edp(intel_dp))
1982 edp_panel_off(intel_dp);
1983 pps_unlock(intel_dp);
1986 /* Enable backlight in the panel power control. */
1987 static void _intel_edp_backlight_on(struct intel_dp *intel_dp)
1989 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1990 struct drm_device *dev = intel_dig_port->base.base.dev;
1991 struct drm_i915_private *dev_priv = dev->dev_private;
1996 * If we enable the backlight right away following a panel power
1997 * on, we may see slight flicker as the panel syncs with the eDP
1998 * link. So delay a bit to make sure the image is solid before
1999 * allowing it to appear.
2001 wait_backlight_on(intel_dp);
2005 pp = ironlake_get_pp_control(intel_dp);
2006 pp |= EDP_BLC_ENABLE;
2008 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2010 I915_WRITE(pp_ctrl_reg, pp);
2011 POSTING_READ(pp_ctrl_reg);
2013 pps_unlock(intel_dp);
2016 /* Enable backlight PWM and backlight PP control. */
2017 void intel_edp_backlight_on(struct intel_dp *intel_dp)
2019 if (!is_edp(intel_dp))
2022 DRM_DEBUG_KMS("\n");
2024 intel_panel_enable_backlight(intel_dp->attached_connector);
2025 _intel_edp_backlight_on(intel_dp);
2028 /* Disable backlight in the panel power control. */
2029 static void _intel_edp_backlight_off(struct intel_dp *intel_dp)
2031 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2032 struct drm_i915_private *dev_priv = dev->dev_private;
2036 if (!is_edp(intel_dp))
2041 pp = ironlake_get_pp_control(intel_dp);
2042 pp &= ~EDP_BLC_ENABLE;
2044 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2046 I915_WRITE(pp_ctrl_reg, pp);
2047 POSTING_READ(pp_ctrl_reg);
2049 pps_unlock(intel_dp);
2051 intel_dp->last_backlight_off = jiffies;
2052 edp_wait_backlight_off(intel_dp);
2055 /* Disable backlight PP control and backlight PWM. */
2056 void intel_edp_backlight_off(struct intel_dp *intel_dp)
2058 if (!is_edp(intel_dp))
2061 DRM_DEBUG_KMS("\n");
2063 _intel_edp_backlight_off(intel_dp);
2064 intel_panel_disable_backlight(intel_dp->attached_connector);
2068 * Hook for controlling the panel power control backlight through the bl_power
2069 * sysfs attribute. Take care to handle multiple calls.
2071 static void intel_edp_backlight_power(struct intel_connector *connector,
2074 struct intel_dp *intel_dp = intel_attached_dp(&connector->base);
2078 is_enabled = ironlake_get_pp_control(intel_dp) & EDP_BLC_ENABLE;
2079 pps_unlock(intel_dp);
2081 if (is_enabled == enable)
2084 DRM_DEBUG_KMS("panel power control backlight %s\n",
2085 enable ? "enable" : "disable");
2088 _intel_edp_backlight_on(intel_dp);
2090 _intel_edp_backlight_off(intel_dp);
2093 static void ironlake_edp_pll_on(struct intel_dp *intel_dp)
2095 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2096 struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
2097 struct drm_device *dev = crtc->dev;
2098 struct drm_i915_private *dev_priv = dev->dev_private;
2101 assert_pipe_disabled(dev_priv,
2102 to_intel_crtc(crtc)->pipe);
2104 DRM_DEBUG_KMS("\n");
2105 dpa_ctl = I915_READ(DP_A);
2106 WARN(dpa_ctl & DP_PLL_ENABLE, "dp pll on, should be off\n");
2107 WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
2109 /* We don't adjust intel_dp->DP while tearing down the link, to
2110 * facilitate link retraining (e.g. after hotplug). Hence clear all
2111 * enable bits here to ensure that we don't enable too much. */
2112 intel_dp->DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
2113 intel_dp->DP |= DP_PLL_ENABLE;
2114 I915_WRITE(DP_A, intel_dp->DP);
2119 static void ironlake_edp_pll_off(struct intel_dp *intel_dp)
2121 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2122 struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
2123 struct drm_device *dev = crtc->dev;
2124 struct drm_i915_private *dev_priv = dev->dev_private;
2127 assert_pipe_disabled(dev_priv,
2128 to_intel_crtc(crtc)->pipe);
2130 dpa_ctl = I915_READ(DP_A);
2131 WARN((dpa_ctl & DP_PLL_ENABLE) == 0,
2132 "dp pll off, should be on\n");
2133 WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
2135 /* We can't rely on the value tracked for the DP register in
2136 * intel_dp->DP because link_down must not change that (otherwise link
2137 * re-training will fail. */
2138 dpa_ctl &= ~DP_PLL_ENABLE;
2139 I915_WRITE(DP_A, dpa_ctl);
2144 /* If the sink supports it, try to set the power state appropriately */
2145 void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
2149 /* Should have a valid DPCD by this point */
2150 if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
2153 if (mode != DRM_MODE_DPMS_ON) {
2154 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2158 * When turning on, we need to retry for 1ms to give the sink
2161 for (i = 0; i < 3; i++) {
2162 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2171 DRM_DEBUG_KMS("failed to %s sink power state\n",
2172 mode == DRM_MODE_DPMS_ON ? "enable" : "disable");
2175 static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
2178 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2179 enum port port = dp_to_dig_port(intel_dp)->port;
2180 struct drm_device *dev = encoder->base.dev;
2181 struct drm_i915_private *dev_priv = dev->dev_private;
2182 enum intel_display_power_domain power_domain;
2185 power_domain = intel_display_port_power_domain(encoder);
2186 if (!intel_display_power_is_enabled(dev_priv, power_domain))
2189 tmp = I915_READ(intel_dp->output_reg);
2191 if (!(tmp & DP_PORT_EN))
2194 if (IS_GEN7(dev) && port == PORT_A) {
2195 *pipe = PORT_TO_PIPE_CPT(tmp);
2196 } else if (HAS_PCH_CPT(dev) && port != PORT_A) {
2199 for_each_pipe(dev_priv, p) {
2200 u32 trans_dp = I915_READ(TRANS_DP_CTL(p));
2201 if (TRANS_DP_PIPE_TO_PORT(trans_dp) == port) {
2207 DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n",
2208 intel_dp->output_reg);
2209 } else if (IS_CHERRYVIEW(dev)) {
2210 *pipe = DP_PORT_TO_PIPE_CHV(tmp);
2212 *pipe = PORT_TO_PIPE(tmp);
2218 static void intel_dp_get_config(struct intel_encoder *encoder,
2219 struct intel_crtc_state *pipe_config)
2221 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2223 struct drm_device *dev = encoder->base.dev;
2224 struct drm_i915_private *dev_priv = dev->dev_private;
2225 enum port port = dp_to_dig_port(intel_dp)->port;
2226 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2229 tmp = I915_READ(intel_dp->output_reg);
2231 pipe_config->has_audio = tmp & DP_AUDIO_OUTPUT_ENABLE && port != PORT_A;
2233 if (HAS_PCH_CPT(dev) && port != PORT_A) {
2234 tmp = I915_READ(TRANS_DP_CTL(crtc->pipe));
2235 if (tmp & TRANS_DP_HSYNC_ACTIVE_HIGH)
2236 flags |= DRM_MODE_FLAG_PHSYNC;
2238 flags |= DRM_MODE_FLAG_NHSYNC;
2240 if (tmp & TRANS_DP_VSYNC_ACTIVE_HIGH)
2241 flags |= DRM_MODE_FLAG_PVSYNC;
2243 flags |= DRM_MODE_FLAG_NVSYNC;
2245 if (tmp & DP_SYNC_HS_HIGH)
2246 flags |= DRM_MODE_FLAG_PHSYNC;
2248 flags |= DRM_MODE_FLAG_NHSYNC;
2250 if (tmp & DP_SYNC_VS_HIGH)
2251 flags |= DRM_MODE_FLAG_PVSYNC;
2253 flags |= DRM_MODE_FLAG_NVSYNC;
2256 pipe_config->base.adjusted_mode.flags |= flags;
2258 if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev) &&
2259 tmp & DP_COLOR_RANGE_16_235)
2260 pipe_config->limited_color_range = true;
2262 pipe_config->has_dp_encoder = true;
2264 intel_dp_get_m_n(crtc, pipe_config);
2266 if (port == PORT_A) {
2267 if ((I915_READ(DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_160MHZ)
2268 pipe_config->port_clock = 162000;
2270 pipe_config->port_clock = 270000;
2273 dotclock = intel_dotclock_calculate(pipe_config->port_clock,
2274 &pipe_config->dp_m_n);
2276 if (HAS_PCH_SPLIT(dev_priv->dev) && port != PORT_A)
2277 ironlake_check_encoder_dotclock(pipe_config, dotclock);
2279 pipe_config->base.adjusted_mode.crtc_clock = dotclock;
2281 if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp &&
2282 pipe_config->pipe_bpp > dev_priv->vbt.edp_bpp) {
2284 * This is a big fat ugly hack.
2286 * Some machines in UEFI boot mode provide us a VBT that has 18
2287 * bpp and 1.62 GHz link bandwidth for eDP, which for reasons
2288 * unknown we fail to light up. Yet the same BIOS boots up with
2289 * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as
2290 * max, not what it tells us to use.
2292 * Note: This will still be broken if the eDP panel is not lit
2293 * up by the BIOS, and thus we can't get the mode at module
2296 DRM_DEBUG_KMS("pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
2297 pipe_config->pipe_bpp, dev_priv->vbt.edp_bpp);
2298 dev_priv->vbt.edp_bpp = pipe_config->pipe_bpp;
2302 static void intel_disable_dp(struct intel_encoder *encoder)
2304 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2305 struct drm_device *dev = encoder->base.dev;
2306 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2308 if (crtc->config->has_audio)
2309 intel_audio_codec_disable(encoder);
2311 if (HAS_PSR(dev) && !HAS_DDI(dev))
2312 intel_psr_disable(intel_dp);
2314 /* Make sure the panel is off before trying to change the mode. But also
2315 * ensure that we have vdd while we switch off the panel. */
2316 intel_edp_panel_vdd_on(intel_dp);
2317 intel_edp_backlight_off(intel_dp);
2318 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
2319 intel_edp_panel_off(intel_dp);
2321 /* disable the port before the pipe on g4x */
2322 if (INTEL_INFO(dev)->gen < 5)
2323 intel_dp_link_down(intel_dp);
2326 static void ilk_post_disable_dp(struct intel_encoder *encoder)
2328 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2329 enum port port = dp_to_dig_port(intel_dp)->port;
2331 intel_dp_link_down(intel_dp);
2333 ironlake_edp_pll_off(intel_dp);
2336 static void vlv_post_disable_dp(struct intel_encoder *encoder)
2338 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2340 intel_dp_link_down(intel_dp);
2343 static void chv_post_disable_dp(struct intel_encoder *encoder)
2345 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2346 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2347 struct drm_device *dev = encoder->base.dev;
2348 struct drm_i915_private *dev_priv = dev->dev_private;
2349 struct intel_crtc *intel_crtc =
2350 to_intel_crtc(encoder->base.crtc);
2351 enum dpio_channel ch = vlv_dport_to_channel(dport);
2352 enum pipe pipe = intel_crtc->pipe;
2355 intel_dp_link_down(intel_dp);
2357 mutex_lock(&dev_priv->sb_lock);
2359 /* Propagate soft reset to data lane reset */
2360 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
2361 val |= CHV_PCS_REQ_SOFTRESET_EN;
2362 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
2364 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
2365 val |= CHV_PCS_REQ_SOFTRESET_EN;
2366 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
2368 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
2369 val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2370 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
2372 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
2373 val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2374 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
2376 mutex_unlock(&dev_priv->sb_lock);
2380 _intel_dp_set_link_train(struct intel_dp *intel_dp,
2382 uint8_t dp_train_pat)
2384 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2385 struct drm_device *dev = intel_dig_port->base.base.dev;
2386 struct drm_i915_private *dev_priv = dev->dev_private;
2387 enum port port = intel_dig_port->port;
2390 uint32_t temp = I915_READ(DP_TP_CTL(port));
2392 if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
2393 temp |= DP_TP_CTL_SCRAMBLE_DISABLE;
2395 temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE;
2397 temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
2398 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2399 case DP_TRAINING_PATTERN_DISABLE:
2400 temp |= DP_TP_CTL_LINK_TRAIN_NORMAL;
2403 case DP_TRAINING_PATTERN_1:
2404 temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
2406 case DP_TRAINING_PATTERN_2:
2407 temp |= DP_TP_CTL_LINK_TRAIN_PAT2;
2409 case DP_TRAINING_PATTERN_3:
2410 temp |= DP_TP_CTL_LINK_TRAIN_PAT3;
2413 I915_WRITE(DP_TP_CTL(port), temp);
2415 } else if ((IS_GEN7(dev) && port == PORT_A) ||
2416 (HAS_PCH_CPT(dev) && port != PORT_A)) {
2417 *DP &= ~DP_LINK_TRAIN_MASK_CPT;
2419 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2420 case DP_TRAINING_PATTERN_DISABLE:
2421 *DP |= DP_LINK_TRAIN_OFF_CPT;
2423 case DP_TRAINING_PATTERN_1:
2424 *DP |= DP_LINK_TRAIN_PAT_1_CPT;
2426 case DP_TRAINING_PATTERN_2:
2427 *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2429 case DP_TRAINING_PATTERN_3:
2430 DRM_ERROR("DP training pattern 3 not supported\n");
2431 *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2436 if (IS_CHERRYVIEW(dev))
2437 *DP &= ~DP_LINK_TRAIN_MASK_CHV;
2439 *DP &= ~DP_LINK_TRAIN_MASK;
2441 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2442 case DP_TRAINING_PATTERN_DISABLE:
2443 *DP |= DP_LINK_TRAIN_OFF;
2445 case DP_TRAINING_PATTERN_1:
2446 *DP |= DP_LINK_TRAIN_PAT_1;
2448 case DP_TRAINING_PATTERN_2:
2449 *DP |= DP_LINK_TRAIN_PAT_2;
2451 case DP_TRAINING_PATTERN_3:
2452 if (IS_CHERRYVIEW(dev)) {
2453 *DP |= DP_LINK_TRAIN_PAT_3_CHV;
2455 DRM_ERROR("DP training pattern 3 not supported\n");
2456 *DP |= DP_LINK_TRAIN_PAT_2;
2463 static void intel_dp_enable_port(struct intel_dp *intel_dp)
2465 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2466 struct drm_i915_private *dev_priv = dev->dev_private;
2468 /* enable with pattern 1 (as per spec) */
2469 _intel_dp_set_link_train(intel_dp, &intel_dp->DP,
2470 DP_TRAINING_PATTERN_1);
2472 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2473 POSTING_READ(intel_dp->output_reg);
2476 * Magic for VLV/CHV. We _must_ first set up the register
2477 * without actually enabling the port, and then do another
2478 * write to enable the port. Otherwise link training will
2479 * fail when the power sequencer is freshly used for this port.
2481 intel_dp->DP |= DP_PORT_EN;
2483 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2484 POSTING_READ(intel_dp->output_reg);
2487 static void intel_enable_dp(struct intel_encoder *encoder)
2489 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2490 struct drm_device *dev = encoder->base.dev;
2491 struct drm_i915_private *dev_priv = dev->dev_private;
2492 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2493 uint32_t dp_reg = I915_READ(intel_dp->output_reg);
2494 unsigned int lane_mask = 0x0;
2496 if (WARN_ON(dp_reg & DP_PORT_EN))
2501 if (IS_VALLEYVIEW(dev))
2502 vlv_init_panel_power_sequencer(intel_dp);
2504 intel_dp_enable_port(intel_dp);
2506 edp_panel_vdd_on(intel_dp);
2507 edp_panel_on(intel_dp);
2508 edp_panel_vdd_off(intel_dp, true);
2510 pps_unlock(intel_dp);
2512 if (IS_VALLEYVIEW(dev))
2513 vlv_wait_port_ready(dev_priv, dp_to_dig_port(intel_dp),
2516 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
2517 intel_dp_start_link_train(intel_dp);
2518 intel_dp_complete_link_train(intel_dp);
2519 intel_dp_stop_link_train(intel_dp);
2521 if (crtc->config->has_audio) {
2522 DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
2523 pipe_name(crtc->pipe));
2524 intel_audio_codec_enable(encoder);
2528 static void g4x_enable_dp(struct intel_encoder *encoder)
2530 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2532 intel_enable_dp(encoder);
2533 intel_edp_backlight_on(intel_dp);
2536 static void vlv_enable_dp(struct intel_encoder *encoder)
2538 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2540 intel_edp_backlight_on(intel_dp);
2541 intel_psr_enable(intel_dp);
2544 static void g4x_pre_enable_dp(struct intel_encoder *encoder)
2546 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2547 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2549 intel_dp_prepare(encoder);
2551 /* Only ilk+ has port A */
2552 if (dport->port == PORT_A) {
2553 ironlake_set_pll_cpu_edp(intel_dp);
2554 ironlake_edp_pll_on(intel_dp);
2558 static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
2560 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2561 struct drm_i915_private *dev_priv = intel_dig_port->base.base.dev->dev_private;
2562 enum pipe pipe = intel_dp->pps_pipe;
2563 int pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
2565 edp_panel_vdd_off_sync(intel_dp);
2568 * VLV seems to get confused when multiple power seqeuencers
2569 * have the same port selected (even if only one has power/vdd
2570 * enabled). The failure manifests as vlv_wait_port_ready() failing
2571 * CHV on the other hand doesn't seem to mind having the same port
2572 * selected in multiple power seqeuencers, but let's clear the
2573 * port select always when logically disconnecting a power sequencer
2576 DRM_DEBUG_KMS("detaching pipe %c power sequencer from port %c\n",
2577 pipe_name(pipe), port_name(intel_dig_port->port));
2578 I915_WRITE(pp_on_reg, 0);
2579 POSTING_READ(pp_on_reg);
2581 intel_dp->pps_pipe = INVALID_PIPE;
2584 static void vlv_steal_power_sequencer(struct drm_device *dev,
2587 struct drm_i915_private *dev_priv = dev->dev_private;
2588 struct intel_encoder *encoder;
2590 lockdep_assert_held(&dev_priv->pps_mutex);
2592 if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
2595 list_for_each_entry(encoder, &dev->mode_config.encoder_list,
2597 struct intel_dp *intel_dp;
2600 if (encoder->type != INTEL_OUTPUT_EDP)
2603 intel_dp = enc_to_intel_dp(&encoder->base);
2604 port = dp_to_dig_port(intel_dp)->port;
2606 if (intel_dp->pps_pipe != pipe)
2609 DRM_DEBUG_KMS("stealing pipe %c power sequencer from port %c\n",
2610 pipe_name(pipe), port_name(port));
2612 WARN(encoder->connectors_active,
2613 "stealing pipe %c power sequencer from active eDP port %c\n",
2614 pipe_name(pipe), port_name(port));
2616 /* make sure vdd is off before we steal it */
2617 vlv_detach_power_sequencer(intel_dp);
2621 static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp)
2623 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2624 struct intel_encoder *encoder = &intel_dig_port->base;
2625 struct drm_device *dev = encoder->base.dev;
2626 struct drm_i915_private *dev_priv = dev->dev_private;
2627 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2629 lockdep_assert_held(&dev_priv->pps_mutex);
2631 if (!is_edp(intel_dp))
2634 if (intel_dp->pps_pipe == crtc->pipe)
2638 * If another power sequencer was being used on this
2639 * port previously make sure to turn off vdd there while
2640 * we still have control of it.
2642 if (intel_dp->pps_pipe != INVALID_PIPE)
2643 vlv_detach_power_sequencer(intel_dp);
2646 * We may be stealing the power
2647 * sequencer from another port.
2649 vlv_steal_power_sequencer(dev, crtc->pipe);
2651 /* now it's all ours */
2652 intel_dp->pps_pipe = crtc->pipe;
2654 DRM_DEBUG_KMS("initializing pipe %c power sequencer for port %c\n",
2655 pipe_name(intel_dp->pps_pipe), port_name(intel_dig_port->port));
2657 /* init power sequencer on this pipe and port */
2658 intel_dp_init_panel_power_sequencer(dev, intel_dp);
2659 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
2662 static void vlv_pre_enable_dp(struct intel_encoder *encoder)
2664 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2665 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2666 struct drm_device *dev = encoder->base.dev;
2667 struct drm_i915_private *dev_priv = dev->dev_private;
2668 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
2669 enum dpio_channel port = vlv_dport_to_channel(dport);
2670 int pipe = intel_crtc->pipe;
2673 mutex_lock(&dev_priv->sb_lock);
2675 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(port));
2682 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW8(port), val);
2683 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW14(port), 0x00760018);
2684 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW23(port), 0x00400888);
2686 mutex_unlock(&dev_priv->sb_lock);
2688 intel_enable_dp(encoder);
2691 static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder)
2693 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
2694 struct drm_device *dev = encoder->base.dev;
2695 struct drm_i915_private *dev_priv = dev->dev_private;
2696 struct intel_crtc *intel_crtc =
2697 to_intel_crtc(encoder->base.crtc);
2698 enum dpio_channel port = vlv_dport_to_channel(dport);
2699 int pipe = intel_crtc->pipe;
2701 intel_dp_prepare(encoder);
2703 /* Program Tx lane resets to default */
2704 mutex_lock(&dev_priv->sb_lock);
2705 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port),
2706 DPIO_PCS_TX_LANE2_RESET |
2707 DPIO_PCS_TX_LANE1_RESET);
2708 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port),
2709 DPIO_PCS_CLK_CRI_RXEB_EIOS_EN |
2710 DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN |
2711 (1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) |
2712 DPIO_PCS_CLK_SOFT_RESET);
2714 /* Fix up inter-pair skew failure */
2715 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW12(port), 0x00750f00);
2716 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW11(port), 0x00001500);
2717 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW14(port), 0x40400000);
2718 mutex_unlock(&dev_priv->sb_lock);
2721 static void chv_pre_enable_dp(struct intel_encoder *encoder)
2723 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2724 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2725 struct drm_device *dev = encoder->base.dev;
2726 struct drm_i915_private *dev_priv = dev->dev_private;
2727 struct intel_crtc *intel_crtc =
2728 to_intel_crtc(encoder->base.crtc);
2729 enum dpio_channel ch = vlv_dport_to_channel(dport);
2730 int pipe = intel_crtc->pipe;
2731 int data, i, stagger;
2734 mutex_lock(&dev_priv->sb_lock);
2736 /* allow hardware to manage TX FIFO reset source */
2737 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
2738 val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
2739 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
2741 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
2742 val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
2743 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
2745 /* Deassert soft data lane reset*/
2746 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
2747 val |= CHV_PCS_REQ_SOFTRESET_EN;
2748 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
2750 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
2751 val |= CHV_PCS_REQ_SOFTRESET_EN;
2752 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
2754 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
2755 val |= (DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2756 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
2758 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
2759 val |= (DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2760 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
2762 /* Program Tx lane latency optimal setting*/
2763 for (i = 0; i < 4; i++) {
2764 /* Set the upar bit */
2765 data = (i == 1) ? 0x0 : 0x1;
2766 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW14(ch, i),
2767 data << DPIO_UPAR_SHIFT);
2770 /* Data lane stagger programming */
2771 if (intel_crtc->config->port_clock > 270000)
2773 else if (intel_crtc->config->port_clock > 135000)
2775 else if (intel_crtc->config->port_clock > 67500)
2777 else if (intel_crtc->config->port_clock > 33750)
2782 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
2783 val |= DPIO_TX2_STAGGER_MASK(0x1f);
2784 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
2786 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
2787 val |= DPIO_TX2_STAGGER_MASK(0x1f);
2788 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
2790 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW12(ch),
2791 DPIO_LANESTAGGER_STRAP(stagger) |
2792 DPIO_LANESTAGGER_STRAP_OVRD |
2793 DPIO_TX1_STAGGER_MASK(0x1f) |
2794 DPIO_TX1_STAGGER_MULT(6) |
2795 DPIO_TX2_STAGGER_MULT(0));
2797 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW12(ch),
2798 DPIO_LANESTAGGER_STRAP(stagger) |
2799 DPIO_LANESTAGGER_STRAP_OVRD |
2800 DPIO_TX1_STAGGER_MASK(0x1f) |
2801 DPIO_TX1_STAGGER_MULT(7) |
2802 DPIO_TX2_STAGGER_MULT(5));
2804 mutex_unlock(&dev_priv->sb_lock);
2806 intel_enable_dp(encoder);
2809 static void chv_dp_pre_pll_enable(struct intel_encoder *encoder)
2811 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
2812 struct drm_device *dev = encoder->base.dev;
2813 struct drm_i915_private *dev_priv = dev->dev_private;
2814 struct intel_crtc *intel_crtc =
2815 to_intel_crtc(encoder->base.crtc);
2816 enum dpio_channel ch = vlv_dport_to_channel(dport);
2817 enum pipe pipe = intel_crtc->pipe;
2820 intel_dp_prepare(encoder);
2822 mutex_lock(&dev_priv->sb_lock);
2824 /* program left/right clock distribution */
2825 if (pipe != PIPE_B) {
2826 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
2827 val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
2829 val |= CHV_BUFLEFTENA1_FORCE;
2831 val |= CHV_BUFRIGHTENA1_FORCE;
2832 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
2834 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
2835 val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
2837 val |= CHV_BUFLEFTENA2_FORCE;
2839 val |= CHV_BUFRIGHTENA2_FORCE;
2840 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
2843 /* program clock channel usage */
2844 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(ch));
2845 val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
2847 val &= ~CHV_PCS_USEDCLKCHANNEL;
2849 val |= CHV_PCS_USEDCLKCHANNEL;
2850 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW8(ch), val);
2852 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW8(ch));
2853 val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
2855 val &= ~CHV_PCS_USEDCLKCHANNEL;
2857 val |= CHV_PCS_USEDCLKCHANNEL;
2858 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW8(ch), val);
2861 * This a a bit weird since generally CL
2862 * matches the pipe, but here we need to
2863 * pick the CL based on the port.
2865 val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW19(ch));
2867 val &= ~CHV_CMN_USEDCLKCHANNEL;
2869 val |= CHV_CMN_USEDCLKCHANNEL;
2870 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW19(ch), val);
2872 mutex_unlock(&dev_priv->sb_lock);
2876 * Native read with retry for link status and receiver capability reads for
2877 * cases where the sink may still be asleep.
2879 * Sinks are *supposed* to come up within 1ms from an off state, but we're also
2880 * supposed to retry 3 times per the spec.
2883 intel_dp_dpcd_read_wake(struct drm_dp_aux *aux, unsigned int offset,
2884 void *buffer, size_t size)
2890 * Sometime we just get the same incorrect byte repeated
2891 * over the entire buffer. Doing just one throw away read
2892 * initially seems to "solve" it.
2894 drm_dp_dpcd_read(aux, DP_DPCD_REV, buffer, 1);
2896 for (i = 0; i < 3; i++) {
2897 ret = drm_dp_dpcd_read(aux, offset, buffer, size);
2907 * Fetch AUX CH registers 0x202 - 0x207 which contain
2908 * link status information
2911 intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
2913 return intel_dp_dpcd_read_wake(&intel_dp->aux,
2916 DP_LINK_STATUS_SIZE) == DP_LINK_STATUS_SIZE;
2919 /* These are source-specific values. */
2921 intel_dp_voltage_max(struct intel_dp *intel_dp)
2923 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2924 struct drm_i915_private *dev_priv = dev->dev_private;
2925 enum port port = dp_to_dig_port(intel_dp)->port;
2927 if (IS_BROXTON(dev))
2928 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
2929 else if (INTEL_INFO(dev)->gen >= 9) {
2930 if (dev_priv->edp_low_vswing && port == PORT_A)
2931 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
2932 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
2933 } else if (IS_VALLEYVIEW(dev))
2934 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
2935 else if (IS_GEN7(dev) && port == PORT_A)
2936 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
2937 else if (HAS_PCH_CPT(dev) && port != PORT_A)
2938 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
2940 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
2944 intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
2946 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2947 enum port port = dp_to_dig_port(intel_dp)->port;
2949 if (INTEL_INFO(dev)->gen >= 9) {
2950 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
2951 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2952 return DP_TRAIN_PRE_EMPH_LEVEL_3;
2953 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2954 return DP_TRAIN_PRE_EMPH_LEVEL_2;
2955 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2956 return DP_TRAIN_PRE_EMPH_LEVEL_1;
2957 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
2958 return DP_TRAIN_PRE_EMPH_LEVEL_0;
2960 return DP_TRAIN_PRE_EMPH_LEVEL_0;
2962 } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
2963 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
2964 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2965 return DP_TRAIN_PRE_EMPH_LEVEL_3;
2966 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2967 return DP_TRAIN_PRE_EMPH_LEVEL_2;
2968 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2969 return DP_TRAIN_PRE_EMPH_LEVEL_1;
2970 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
2972 return DP_TRAIN_PRE_EMPH_LEVEL_0;
2974 } else if (IS_VALLEYVIEW(dev)) {
2975 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
2976 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2977 return DP_TRAIN_PRE_EMPH_LEVEL_3;
2978 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2979 return DP_TRAIN_PRE_EMPH_LEVEL_2;
2980 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2981 return DP_TRAIN_PRE_EMPH_LEVEL_1;
2982 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
2984 return DP_TRAIN_PRE_EMPH_LEVEL_0;
2986 } else if (IS_GEN7(dev) && port == PORT_A) {
2987 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
2988 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2989 return DP_TRAIN_PRE_EMPH_LEVEL_2;
2990 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2991 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2992 return DP_TRAIN_PRE_EMPH_LEVEL_1;
2994 return DP_TRAIN_PRE_EMPH_LEVEL_0;
2997 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
2998 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2999 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3000 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3001 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3002 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3003 return DP_TRAIN_PRE_EMPH_LEVEL_1;
3004 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3006 return DP_TRAIN_PRE_EMPH_LEVEL_0;
3011 static uint32_t vlv_signal_levels(struct intel_dp *intel_dp)
3013 struct drm_device *dev = intel_dp_to_dev(intel_dp);
3014 struct drm_i915_private *dev_priv = dev->dev_private;
3015 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
3016 struct intel_crtc *intel_crtc =
3017 to_intel_crtc(dport->base.base.crtc);
3018 unsigned long demph_reg_value, preemph_reg_value,
3019 uniqtranscale_reg_value;
3020 uint8_t train_set = intel_dp->train_set[0];
3021 enum dpio_channel port = vlv_dport_to_channel(dport);
3022 int pipe = intel_crtc->pipe;
3024 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3025 case DP_TRAIN_PRE_EMPH_LEVEL_0:
3026 preemph_reg_value = 0x0004000;
3027 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3028 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3029 demph_reg_value = 0x2B405555;
3030 uniqtranscale_reg_value = 0x552AB83A;
3032 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3033 demph_reg_value = 0x2B404040;
3034 uniqtranscale_reg_value = 0x5548B83A;
3036 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3037 demph_reg_value = 0x2B245555;
3038 uniqtranscale_reg_value = 0x5560B83A;
3040 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3041 demph_reg_value = 0x2B405555;
3042 uniqtranscale_reg_value = 0x5598DA3A;
3048 case DP_TRAIN_PRE_EMPH_LEVEL_1:
3049 preemph_reg_value = 0x0002000;
3050 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3051 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3052 demph_reg_value = 0x2B404040;
3053 uniqtranscale_reg_value = 0x5552B83A;
3055 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3056 demph_reg_value = 0x2B404848;
3057 uniqtranscale_reg_value = 0x5580B83A;
3059 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3060 demph_reg_value = 0x2B404040;
3061 uniqtranscale_reg_value = 0x55ADDA3A;
3067 case DP_TRAIN_PRE_EMPH_LEVEL_2:
3068 preemph_reg_value = 0x0000000;
3069 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3070 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3071 demph_reg_value = 0x2B305555;
3072 uniqtranscale_reg_value = 0x5570B83A;
3074 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3075 demph_reg_value = 0x2B2B4040;
3076 uniqtranscale_reg_value = 0x55ADDA3A;
3082 case DP_TRAIN_PRE_EMPH_LEVEL_3:
3083 preemph_reg_value = 0x0006000;
3084 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3085 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3086 demph_reg_value = 0x1B405555;
3087 uniqtranscale_reg_value = 0x55ADDA3A;
3097 mutex_lock(&dev_priv->sb_lock);
3098 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x00000000);
3099 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW4(port), demph_reg_value);
3100 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW2(port),
3101 uniqtranscale_reg_value);
3102 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW3(port), 0x0C782040);
3103 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW11(port), 0x00030000);
3104 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW9(port), preemph_reg_value);
3105 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x80000000);
3106 mutex_unlock(&dev_priv->sb_lock);
3111 static uint32_t chv_signal_levels(struct intel_dp *intel_dp)
3113 struct drm_device *dev = intel_dp_to_dev(intel_dp);
3114 struct drm_i915_private *dev_priv = dev->dev_private;
3115 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
3116 struct intel_crtc *intel_crtc = to_intel_crtc(dport->base.base.crtc);
3117 u32 deemph_reg_value, margin_reg_value, val;
3118 uint8_t train_set = intel_dp->train_set[0];
3119 enum dpio_channel ch = vlv_dport_to_channel(dport);
3120 enum pipe pipe = intel_crtc->pipe;
3123 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3124 case DP_TRAIN_PRE_EMPH_LEVEL_0:
3125 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3126 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3127 deemph_reg_value = 128;
3128 margin_reg_value = 52;
3130 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3131 deemph_reg_value = 128;
3132 margin_reg_value = 77;
3134 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3135 deemph_reg_value = 128;
3136 margin_reg_value = 102;
3138 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3139 deemph_reg_value = 128;
3140 margin_reg_value = 154;
3141 /* FIXME extra to set for 1200 */
3147 case DP_TRAIN_PRE_EMPH_LEVEL_1:
3148 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3149 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3150 deemph_reg_value = 85;
3151 margin_reg_value = 78;
3153 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3154 deemph_reg_value = 85;
3155 margin_reg_value = 116;
3157 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3158 deemph_reg_value = 85;
3159 margin_reg_value = 154;
3165 case DP_TRAIN_PRE_EMPH_LEVEL_2:
3166 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3167 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3168 deemph_reg_value = 64;
3169 margin_reg_value = 104;
3171 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3172 deemph_reg_value = 64;
3173 margin_reg_value = 154;
3179 case DP_TRAIN_PRE_EMPH_LEVEL_3:
3180 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3181 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3182 deemph_reg_value = 43;
3183 margin_reg_value = 154;
3193 mutex_lock(&dev_priv->sb_lock);
3195 /* Clear calc init */
3196 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
3197 val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
3198 val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
3199 val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
3200 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
3202 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
3203 val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
3204 val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
3205 val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
3206 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
3208 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW9(ch));
3209 val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
3210 val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
3211 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW9(ch), val);
3213 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW9(ch));
3214 val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
3215 val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
3216 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW9(ch), val);
3218 /* Program swing deemph */
3219 for (i = 0; i < 4; i++) {
3220 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW4(ch, i));
3221 val &= ~DPIO_SWING_DEEMPH9P5_MASK;
3222 val |= deemph_reg_value << DPIO_SWING_DEEMPH9P5_SHIFT;
3223 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW4(ch, i), val);
3226 /* Program swing margin */
3227 for (i = 0; i < 4; i++) {
3228 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
3229 val &= ~DPIO_SWING_MARGIN000_MASK;
3230 val |= margin_reg_value << DPIO_SWING_MARGIN000_SHIFT;
3231 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
3234 /* Disable unique transition scale */
3235 for (i = 0; i < 4; i++) {
3236 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
3237 val &= ~DPIO_TX_UNIQ_TRANS_SCALE_EN;
3238 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
3241 if (((train_set & DP_TRAIN_PRE_EMPHASIS_MASK)
3242 == DP_TRAIN_PRE_EMPH_LEVEL_0) &&
3243 ((train_set & DP_TRAIN_VOLTAGE_SWING_MASK)
3244 == DP_TRAIN_VOLTAGE_SWING_LEVEL_3)) {
3247 * The document said it needs to set bit 27 for ch0 and bit 26
3248 * for ch1. Might be a typo in the doc.
3249 * For now, for this unique transition scale selection, set bit
3250 * 27 for ch0 and ch1.
3252 for (i = 0; i < 4; i++) {
3253 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
3254 val |= DPIO_TX_UNIQ_TRANS_SCALE_EN;
3255 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
3258 for (i = 0; i < 4; i++) {
3259 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
3260 val &= ~(0xff << DPIO_UNIQ_TRANS_SCALE_SHIFT);
3261 val |= (0x9a << DPIO_UNIQ_TRANS_SCALE_SHIFT);
3262 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
3266 /* Start swing calculation */
3267 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
3268 val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
3269 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
3271 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
3272 val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
3273 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
3276 val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW30);
3277 val |= DPIO_LRC_BYPASS;
3278 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW30, val);
3280 mutex_unlock(&dev_priv->sb_lock);
3286 intel_get_adjust_train(struct intel_dp *intel_dp,
3287 const uint8_t link_status[DP_LINK_STATUS_SIZE])
3292 uint8_t voltage_max;
3293 uint8_t preemph_max;
3295 for (lane = 0; lane < intel_dp->lane_count; lane++) {
3296 uint8_t this_v = drm_dp_get_adjust_request_voltage(link_status, lane);
3297 uint8_t this_p = drm_dp_get_adjust_request_pre_emphasis(link_status, lane);
3305 voltage_max = intel_dp_voltage_max(intel_dp);
3306 if (v >= voltage_max)
3307 v = voltage_max | DP_TRAIN_MAX_SWING_REACHED;
3309 preemph_max = intel_dp_pre_emphasis_max(intel_dp, v);
3310 if (p >= preemph_max)
3311 p = preemph_max | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
3313 for (lane = 0; lane < 4; lane++)
3314 intel_dp->train_set[lane] = v | p;
3318 gen4_signal_levels(uint8_t train_set)
3320 uint32_t signal_levels = 0;
3322 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3323 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3325 signal_levels |= DP_VOLTAGE_0_4;
3327 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3328 signal_levels |= DP_VOLTAGE_0_6;
3330 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3331 signal_levels |= DP_VOLTAGE_0_8;
3333 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3334 signal_levels |= DP_VOLTAGE_1_2;
3337 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3338 case DP_TRAIN_PRE_EMPH_LEVEL_0:
3340 signal_levels |= DP_PRE_EMPHASIS_0;
3342 case DP_TRAIN_PRE_EMPH_LEVEL_1:
3343 signal_levels |= DP_PRE_EMPHASIS_3_5;
3345 case DP_TRAIN_PRE_EMPH_LEVEL_2:
3346 signal_levels |= DP_PRE_EMPHASIS_6;
3348 case DP_TRAIN_PRE_EMPH_LEVEL_3:
3349 signal_levels |= DP_PRE_EMPHASIS_9_5;
3352 return signal_levels;
3355 /* Gen6's DP voltage swing and pre-emphasis control */
3357 gen6_edp_signal_levels(uint8_t train_set)
3359 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3360 DP_TRAIN_PRE_EMPHASIS_MASK);
3361 switch (signal_levels) {
3362 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3363 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3364 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
3365 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3366 return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B;
3367 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3368 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3369 return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B;
3370 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3371 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3372 return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B;
3373 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3374 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3375 return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B;
3377 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3378 "0x%x\n", signal_levels);
3379 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
3383 /* Gen7's DP voltage swing and pre-emphasis control */
3385 gen7_edp_signal_levels(uint8_t train_set)
3387 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3388 DP_TRAIN_PRE_EMPHASIS_MASK);
3389 switch (signal_levels) {
3390 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3391 return EDP_LINK_TRAIN_400MV_0DB_IVB;
3392 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3393 return EDP_LINK_TRAIN_400MV_3_5DB_IVB;
3394 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3395 return EDP_LINK_TRAIN_400MV_6DB_IVB;
3397 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3398 return EDP_LINK_TRAIN_600MV_0DB_IVB;
3399 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3400 return EDP_LINK_TRAIN_600MV_3_5DB_IVB;
3402 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3403 return EDP_LINK_TRAIN_800MV_0DB_IVB;
3404 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3405 return EDP_LINK_TRAIN_800MV_3_5DB_IVB;
3408 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3409 "0x%x\n", signal_levels);
3410 return EDP_LINK_TRAIN_500MV_0DB_IVB;
3414 /* Gen7.5's (HSW) DP voltage swing and pre-emphasis control */
3416 hsw_signal_levels(uint8_t train_set)
3418 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3419 DP_TRAIN_PRE_EMPHASIS_MASK);
3420 switch (signal_levels) {
3421 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3422 return DDI_BUF_TRANS_SELECT(0);
3423 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3424 return DDI_BUF_TRANS_SELECT(1);
3425 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3426 return DDI_BUF_TRANS_SELECT(2);
3427 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_3:
3428 return DDI_BUF_TRANS_SELECT(3);
3430 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3431 return DDI_BUF_TRANS_SELECT(4);
3432 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3433 return DDI_BUF_TRANS_SELECT(5);
3434 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3435 return DDI_BUF_TRANS_SELECT(6);
3437 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3438 return DDI_BUF_TRANS_SELECT(7);
3439 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3440 return DDI_BUF_TRANS_SELECT(8);
3442 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3443 return DDI_BUF_TRANS_SELECT(9);
3445 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3446 "0x%x\n", signal_levels);
3447 return DDI_BUF_TRANS_SELECT(0);
3451 static void bxt_signal_levels(struct intel_dp *intel_dp)
3453 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
3454 enum port port = dport->port;
3455 struct drm_device *dev = dport->base.base.dev;
3456 struct intel_encoder *encoder = &dport->base;
3457 uint8_t train_set = intel_dp->train_set[0];
3460 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3461 DP_TRAIN_PRE_EMPHASIS_MASK);
3462 switch (signal_levels) {
3464 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emph level\n");
3465 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3468 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3471 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3474 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_3:
3477 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3480 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3483 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3486 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3489 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3492 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3497 bxt_ddi_vswing_sequence(dev, level, port, encoder->type);
3500 /* Properly updates "DP" with the correct signal levels. */
3502 intel_dp_set_signal_levels(struct intel_dp *intel_dp, uint32_t *DP)
3504 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3505 enum port port = intel_dig_port->port;
3506 struct drm_device *dev = intel_dig_port->base.base.dev;
3507 uint32_t signal_levels, mask;
3508 uint8_t train_set = intel_dp->train_set[0];
3510 if (IS_BROXTON(dev)) {
3512 bxt_signal_levels(intel_dp);
3514 } else if (HAS_DDI(dev)) {
3515 signal_levels = hsw_signal_levels(train_set);
3516 mask = DDI_BUF_EMP_MASK;
3517 } else if (IS_CHERRYVIEW(dev)) {
3518 signal_levels = chv_signal_levels(intel_dp);
3520 } else if (IS_VALLEYVIEW(dev)) {
3521 signal_levels = vlv_signal_levels(intel_dp);
3523 } else if (IS_GEN7(dev) && port == PORT_A) {
3524 signal_levels = gen7_edp_signal_levels(train_set);
3525 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB;
3526 } else if (IS_GEN6(dev) && port == PORT_A) {
3527 signal_levels = gen6_edp_signal_levels(train_set);
3528 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_SNB;
3530 signal_levels = gen4_signal_levels(train_set);
3531 mask = DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK;
3535 DRM_DEBUG_KMS("Using signal levels %08x\n", signal_levels);
3537 DRM_DEBUG_KMS("Using vswing level %d\n",
3538 train_set & DP_TRAIN_VOLTAGE_SWING_MASK);
3539 DRM_DEBUG_KMS("Using pre-emphasis level %d\n",
3540 (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) >>
3541 DP_TRAIN_PRE_EMPHASIS_SHIFT);
3543 *DP = (*DP & ~mask) | signal_levels;
3547 intel_dp_set_link_train(struct intel_dp *intel_dp,
3549 uint8_t dp_train_pat)
3551 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3552 struct drm_device *dev = intel_dig_port->base.base.dev;
3553 struct drm_i915_private *dev_priv = dev->dev_private;
3554 uint8_t buf[sizeof(intel_dp->train_set) + 1];
3557 _intel_dp_set_link_train(intel_dp, DP, dp_train_pat);
3559 I915_WRITE(intel_dp->output_reg, *DP);
3560 POSTING_READ(intel_dp->output_reg);
3562 buf[0] = dp_train_pat;
3563 if ((dp_train_pat & DP_TRAINING_PATTERN_MASK) ==
3564 DP_TRAINING_PATTERN_DISABLE) {
3565 /* don't write DP_TRAINING_LANEx_SET on disable */
3568 /* DP_TRAINING_LANEx_SET follow DP_TRAINING_PATTERN_SET */
3569 memcpy(buf + 1, intel_dp->train_set, intel_dp->lane_count);
3570 len = intel_dp->lane_count + 1;
3573 ret = drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_PATTERN_SET,
3580 intel_dp_reset_link_train(struct intel_dp *intel_dp, uint32_t *DP,
3581 uint8_t dp_train_pat)
3583 if (!intel_dp->train_set_valid)
3584 memset(intel_dp->train_set, 0, sizeof(intel_dp->train_set));
3585 intel_dp_set_signal_levels(intel_dp, DP);
3586 return intel_dp_set_link_train(intel_dp, DP, dp_train_pat);
3590 intel_dp_update_link_train(struct intel_dp *intel_dp, uint32_t *DP,
3591 const uint8_t link_status[DP_LINK_STATUS_SIZE])
3593 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3594 struct drm_device *dev = intel_dig_port->base.base.dev;
3595 struct drm_i915_private *dev_priv = dev->dev_private;
3598 intel_get_adjust_train(intel_dp, link_status);
3599 intel_dp_set_signal_levels(intel_dp, DP);
3601 I915_WRITE(intel_dp->output_reg, *DP);
3602 POSTING_READ(intel_dp->output_reg);
3604 ret = drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_LANE0_SET,
3605 intel_dp->train_set, intel_dp->lane_count);
3607 return ret == intel_dp->lane_count;
3610 static void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
3612 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3613 struct drm_device *dev = intel_dig_port->base.base.dev;
3614 struct drm_i915_private *dev_priv = dev->dev_private;
3615 enum port port = intel_dig_port->port;
3621 val = I915_READ(DP_TP_CTL(port));
3622 val &= ~DP_TP_CTL_LINK_TRAIN_MASK;
3623 val |= DP_TP_CTL_LINK_TRAIN_IDLE;
3624 I915_WRITE(DP_TP_CTL(port), val);
3627 * On PORT_A we can have only eDP in SST mode. There the only reason
3628 * we need to set idle transmission mode is to work around a HW issue
3629 * where we enable the pipe while not in idle link-training mode.
3630 * In this case there is requirement to wait for a minimum number of
3631 * idle patterns to be sent.
3636 if (wait_for((I915_READ(DP_TP_STATUS(port)) & DP_TP_STATUS_IDLE_DONE),
3638 DRM_ERROR("Timed out waiting for DP idle patterns\n");
3641 /* Enable corresponding port and start training pattern 1 */
3643 intel_dp_start_link_train(struct intel_dp *intel_dp)
3645 struct drm_encoder *encoder = &dp_to_dig_port(intel_dp)->base.base;
3646 struct drm_device *dev = encoder->dev;
3649 int voltage_tries, loop_tries;
3650 uint32_t DP = intel_dp->DP;
3651 uint8_t link_config[2];
3654 intel_ddi_prepare_link_retrain(encoder);
3656 /* Write the link configuration data */
3657 link_config[0] = intel_dp->link_bw;
3658 link_config[1] = intel_dp->lane_count;
3659 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
3660 link_config[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
3661 drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_BW_SET, link_config, 2);
3662 if (intel_dp->num_sink_rates)
3663 drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_RATE_SET,
3664 &intel_dp->rate_select, 1);
3667 link_config[1] = DP_SET_ANSI_8B10B;
3668 drm_dp_dpcd_write(&intel_dp->aux, DP_DOWNSPREAD_CTRL, link_config, 2);
3672 /* clock recovery */
3673 if (!intel_dp_reset_link_train(intel_dp, &DP,
3674 DP_TRAINING_PATTERN_1 |
3675 DP_LINK_SCRAMBLING_DISABLE)) {
3676 DRM_ERROR("failed to enable link training\n");
3684 uint8_t link_status[DP_LINK_STATUS_SIZE];
3686 drm_dp_link_train_clock_recovery_delay(intel_dp->dpcd);
3687 if (!intel_dp_get_link_status(intel_dp, link_status)) {
3688 DRM_ERROR("failed to get link status\n");
3692 if (drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
3693 DRM_DEBUG_KMS("clock recovery OK\n");
3698 * if we used previously trained voltage and pre-emphasis values
3699 * and we don't get clock recovery, reset link training values
3701 if (intel_dp->train_set_valid) {
3702 DRM_DEBUG_KMS("clock recovery not ok, reset");
3703 /* clear the flag as we are not reusing train set */
3704 intel_dp->train_set_valid = false;
3705 if (!intel_dp_reset_link_train(intel_dp, &DP,
3706 DP_TRAINING_PATTERN_1 |
3707 DP_LINK_SCRAMBLING_DISABLE)) {
3708 DRM_ERROR("failed to enable link training\n");
3714 /* Check to see if we've tried the max voltage */
3715 for (i = 0; i < intel_dp->lane_count; i++)
3716 if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
3718 if (i == intel_dp->lane_count) {
3720 if (loop_tries == 5) {
3721 DRM_ERROR("too many full retries, give up\n");
3724 intel_dp_reset_link_train(intel_dp, &DP,
3725 DP_TRAINING_PATTERN_1 |
3726 DP_LINK_SCRAMBLING_DISABLE);
3731 /* Check to see if we've tried the same voltage 5 times */
3732 if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) {
3734 if (voltage_tries == 5) {
3735 DRM_ERROR("too many voltage retries, give up\n");
3740 voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
3742 /* Update training set as requested by target */
3743 if (!intel_dp_update_link_train(intel_dp, &DP, link_status)) {
3744 DRM_ERROR("failed to update link training\n");
3753 intel_dp_complete_link_train(struct intel_dp *intel_dp)
3755 bool channel_eq = false;
3756 int tries, cr_tries;
3757 uint32_t DP = intel_dp->DP;
3758 uint32_t training_pattern = DP_TRAINING_PATTERN_2;
3760 /* Training Pattern 3 for HBR2 ot 1.2 devices that support it*/
3761 if (intel_dp->link_bw == DP_LINK_BW_5_4 || intel_dp->use_tps3)
3762 training_pattern = DP_TRAINING_PATTERN_3;
3764 /* channel equalization */
3765 if (!intel_dp_set_link_train(intel_dp, &DP,
3767 DP_LINK_SCRAMBLING_DISABLE)) {
3768 DRM_ERROR("failed to start channel equalization\n");
3776 uint8_t link_status[DP_LINK_STATUS_SIZE];
3779 DRM_ERROR("failed to train DP, aborting\n");
3783 drm_dp_link_train_channel_eq_delay(intel_dp->dpcd);
3784 if (!intel_dp_get_link_status(intel_dp, link_status)) {
3785 DRM_ERROR("failed to get link status\n");
3789 /* Make sure clock is still ok */
3790 if (!drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
3791 intel_dp->train_set_valid = false;
3792 intel_dp_start_link_train(intel_dp);
3793 intel_dp_set_link_train(intel_dp, &DP,
3795 DP_LINK_SCRAMBLING_DISABLE);
3800 if (drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
3805 /* Try 5 times, then try clock recovery if that fails */
3807 intel_dp->train_set_valid = false;
3808 intel_dp_start_link_train(intel_dp);
3809 intel_dp_set_link_train(intel_dp, &DP,
3811 DP_LINK_SCRAMBLING_DISABLE);
3817 /* Update training set as requested by target */
3818 if (!intel_dp_update_link_train(intel_dp, &DP, link_status)) {
3819 DRM_ERROR("failed to update link training\n");
3825 intel_dp_set_idle_link_train(intel_dp);
3830 intel_dp->train_set_valid = true;
3831 DRM_DEBUG_KMS("Channel EQ done. DP Training successful\n");
3835 void intel_dp_stop_link_train(struct intel_dp *intel_dp)
3837 intel_dp_set_link_train(intel_dp, &intel_dp->DP,
3838 DP_TRAINING_PATTERN_DISABLE);
3842 intel_dp_link_down(struct intel_dp *intel_dp)
3844 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3845 struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
3846 enum port port = intel_dig_port->port;
3847 struct drm_device *dev = intel_dig_port->base.base.dev;
3848 struct drm_i915_private *dev_priv = dev->dev_private;
3849 uint32_t DP = intel_dp->DP;
3851 if (WARN_ON(HAS_DDI(dev)))
3854 if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0))
3857 DRM_DEBUG_KMS("\n");
3859 if ((IS_GEN7(dev) && port == PORT_A) ||
3860 (HAS_PCH_CPT(dev) && port != PORT_A)) {
3861 DP &= ~DP_LINK_TRAIN_MASK_CPT;
3862 DP |= DP_LINK_TRAIN_PAT_IDLE_CPT;
3864 if (IS_CHERRYVIEW(dev))
3865 DP &= ~DP_LINK_TRAIN_MASK_CHV;
3867 DP &= ~DP_LINK_TRAIN_MASK;
3868 DP |= DP_LINK_TRAIN_PAT_IDLE;
3870 I915_WRITE(intel_dp->output_reg, DP);
3871 POSTING_READ(intel_dp->output_reg);
3873 DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
3874 I915_WRITE(intel_dp->output_reg, DP);
3875 POSTING_READ(intel_dp->output_reg);
3878 * HW workaround for IBX, we need to move the port
3879 * to transcoder A after disabling it to allow the
3880 * matching HDMI port to be enabled on transcoder A.
3882 if (HAS_PCH_IBX(dev) && crtc->pipe == PIPE_B && port != PORT_A) {
3883 /* always enable with pattern 1 (as per spec) */
3884 DP &= ~(DP_PIPEB_SELECT | DP_LINK_TRAIN_MASK);
3885 DP |= DP_PORT_EN | DP_LINK_TRAIN_PAT_1;
3886 I915_WRITE(intel_dp->output_reg, DP);
3887 POSTING_READ(intel_dp->output_reg);
3890 I915_WRITE(intel_dp->output_reg, DP);
3891 POSTING_READ(intel_dp->output_reg);
3894 msleep(intel_dp->panel_power_down_delay);
3898 intel_dp_get_dpcd(struct intel_dp *intel_dp)
3900 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3901 struct drm_device *dev = dig_port->base.base.dev;
3902 struct drm_i915_private *dev_priv = dev->dev_private;
3905 if (intel_dp_dpcd_read_wake(&intel_dp->aux, 0x000, intel_dp->dpcd,
3906 sizeof(intel_dp->dpcd)) < 0)
3907 return false; /* aux transfer failed */
3909 DRM_DEBUG_KMS("DPCD: %*ph\n", (int) sizeof(intel_dp->dpcd), intel_dp->dpcd);
3911 if (intel_dp->dpcd[DP_DPCD_REV] == 0)
3912 return false; /* DPCD not present */
3914 /* Check if the panel supports PSR */
3915 memset(intel_dp->psr_dpcd, 0, sizeof(intel_dp->psr_dpcd));
3916 if (is_edp(intel_dp)) {
3917 intel_dp_dpcd_read_wake(&intel_dp->aux, DP_PSR_SUPPORT,
3919 sizeof(intel_dp->psr_dpcd));
3920 if (intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED) {
3921 dev_priv->psr.sink_support = true;
3922 DRM_DEBUG_KMS("Detected EDP PSR Panel.\n");
3925 if (INTEL_INFO(dev)->gen >= 9 &&
3926 (intel_dp->psr_dpcd[0] & DP_PSR2_IS_SUPPORTED)) {
3927 uint8_t frame_sync_cap;
3929 dev_priv->psr.sink_support = true;
3930 intel_dp_dpcd_read_wake(&intel_dp->aux,
3931 DP_SINK_DEVICE_AUX_FRAME_SYNC_CAP,
3932 &frame_sync_cap, 1);
3933 dev_priv->psr.aux_frame_sync = frame_sync_cap ? true : false;
3934 /* PSR2 needs frame sync as well */
3935 dev_priv->psr.psr2_support = dev_priv->psr.aux_frame_sync;
3936 DRM_DEBUG_KMS("PSR2 %s on sink",
3937 dev_priv->psr.psr2_support ? "supported" : "not supported");
3941 /* Training Pattern 3 support, both source and sink */
3942 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x12 &&
3943 intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_TPS3_SUPPORTED &&
3944 (IS_HASWELL(dev_priv) || INTEL_INFO(dev_priv)->gen >= 8)) {
3945 intel_dp->use_tps3 = true;
3946 DRM_DEBUG_KMS("Displayport TPS3 supported\n");
3948 intel_dp->use_tps3 = false;
3950 /* Intermediate frequency support */
3951 if (is_edp(intel_dp) &&
3952 (intel_dp->dpcd[DP_EDP_CONFIGURATION_CAP] & DP_DPCD_DISPLAY_CONTROL_CAPABLE) &&
3953 (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_EDP_DPCD_REV, &rev, 1) == 1) &&
3954 (rev >= 0x03)) { /* eDp v1.4 or higher */
3955 __le16 sink_rates[DP_MAX_SUPPORTED_RATES];
3958 intel_dp_dpcd_read_wake(&intel_dp->aux,
3959 DP_SUPPORTED_LINK_RATES,
3961 sizeof(sink_rates));
3963 for (i = 0; i < ARRAY_SIZE(sink_rates); i++) {
3964 int val = le16_to_cpu(sink_rates[i]);
3969 /* Value read is in kHz while drm clock is saved in deca-kHz */
3970 intel_dp->sink_rates[i] = (val * 200) / 10;
3972 intel_dp->num_sink_rates = i;
3975 intel_dp_print_rates(intel_dp);
3977 if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
3978 DP_DWN_STRM_PORT_PRESENT))
3979 return true; /* native DP sink */
3981 if (intel_dp->dpcd[DP_DPCD_REV] == 0x10)
3982 return true; /* no per-port downstream info */
3984 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_DOWNSTREAM_PORT_0,
3985 intel_dp->downstream_ports,
3986 DP_MAX_DOWNSTREAM_PORTS) < 0)
3987 return false; /* downstream port status fetch failed */
3993 intel_dp_probe_oui(struct intel_dp *intel_dp)
3997 if (!(intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT))
4000 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_OUI, buf, 3) == 3)
4001 DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n",
4002 buf[0], buf[1], buf[2]);
4004 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_BRANCH_OUI, buf, 3) == 3)
4005 DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n",
4006 buf[0], buf[1], buf[2]);
4010 intel_dp_probe_mst(struct intel_dp *intel_dp)
4014 if (!intel_dp->can_mst)
4017 if (intel_dp->dpcd[DP_DPCD_REV] < 0x12)
4020 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_MSTM_CAP, buf, 1)) {
4021 if (buf[0] & DP_MST_CAP) {
4022 DRM_DEBUG_KMS("Sink is MST capable\n");
4023 intel_dp->is_mst = true;
4025 DRM_DEBUG_KMS("Sink is not MST capable\n");
4026 intel_dp->is_mst = false;
4030 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
4031 return intel_dp->is_mst;
4034 int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc)
4036 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4037 struct drm_device *dev = intel_dig_port->base.base.dev;
4038 struct intel_crtc *intel_crtc =
4039 to_intel_crtc(intel_dig_port->base.base.crtc);
4045 hsw_disable_ips(intel_crtc);
4047 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0) {
4052 if (!(buf & DP_TEST_CRC_SUPPORTED)) {
4057 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0) {
4062 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
4063 buf | DP_TEST_SINK_START) < 0) {
4068 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0) {
4073 test_crc_count = buf & DP_TEST_COUNT_MASK;
4076 if (drm_dp_dpcd_readb(&intel_dp->aux,
4077 DP_TEST_SINK_MISC, &buf) < 0) {
4081 intel_wait_for_vblank(dev, intel_crtc->pipe);
4082 } while (--attempts && (buf & DP_TEST_COUNT_MASK) == test_crc_count);
4084 if (attempts == 0) {
4085 DRM_DEBUG_KMS("Panel is unable to calculate CRC after 6 vblanks\n");
4090 if (drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_CRC_R_CR, crc, 6) < 0) {
4095 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0) {
4099 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
4100 buf & ~DP_TEST_SINK_START) < 0) {
4105 hsw_enable_ips(intel_crtc);
4110 intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector)
4112 return intel_dp_dpcd_read_wake(&intel_dp->aux,
4113 DP_DEVICE_SERVICE_IRQ_VECTOR,
4114 sink_irq_vector, 1) == 1;
4118 intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector)
4122 ret = intel_dp_dpcd_read_wake(&intel_dp->aux,
4124 sink_irq_vector, 14);
4131 static uint8_t intel_dp_autotest_link_training(struct intel_dp *intel_dp)
4133 uint8_t test_result = DP_TEST_ACK;
4137 static uint8_t intel_dp_autotest_video_pattern(struct intel_dp *intel_dp)
4139 uint8_t test_result = DP_TEST_NAK;
4143 static uint8_t intel_dp_autotest_edid(struct intel_dp *intel_dp)
4145 uint8_t test_result = DP_TEST_NAK;
4146 struct intel_connector *intel_connector = intel_dp->attached_connector;
4147 struct drm_connector *connector = &intel_connector->base;
4149 if (intel_connector->detect_edid == NULL ||
4150 connector->edid_corrupt ||
4151 intel_dp->aux.i2c_defer_count > 6) {
4152 /* Check EDID read for NACKs, DEFERs and corruption
4153 * (DP CTS 1.2 Core r1.1)
4154 * 4.2.2.4 : Failed EDID read, I2C_NAK
4155 * 4.2.2.5 : Failed EDID read, I2C_DEFER
4156 * 4.2.2.6 : EDID corruption detected
4157 * Use failsafe mode for all cases
4159 if (intel_dp->aux.i2c_nack_count > 0 ||
4160 intel_dp->aux.i2c_defer_count > 0)
4161 DRM_DEBUG_KMS("EDID read had %d NACKs, %d DEFERs\n",
4162 intel_dp->aux.i2c_nack_count,
4163 intel_dp->aux.i2c_defer_count);
4164 intel_dp->compliance_test_data = INTEL_DP_RESOLUTION_FAILSAFE;
4166 if (!drm_dp_dpcd_write(&intel_dp->aux,
4167 DP_TEST_EDID_CHECKSUM,
4168 &intel_connector->detect_edid->checksum,
4170 DRM_DEBUG_KMS("Failed to write EDID checksum\n");
4172 test_result = DP_TEST_ACK | DP_TEST_EDID_CHECKSUM_WRITE;
4173 intel_dp->compliance_test_data = INTEL_DP_RESOLUTION_STANDARD;
4176 /* Set test active flag here so userspace doesn't interrupt things */
4177 intel_dp->compliance_test_active = 1;
4182 static uint8_t intel_dp_autotest_phy_pattern(struct intel_dp *intel_dp)
4184 uint8_t test_result = DP_TEST_NAK;
4188 static void intel_dp_handle_test_request(struct intel_dp *intel_dp)
4190 uint8_t response = DP_TEST_NAK;
4194 intel_dp->compliance_test_active = 0;
4195 intel_dp->compliance_test_type = 0;
4196 intel_dp->compliance_test_data = 0;
4198 intel_dp->aux.i2c_nack_count = 0;
4199 intel_dp->aux.i2c_defer_count = 0;
4201 status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_REQUEST, &rxdata, 1);
4203 DRM_DEBUG_KMS("Could not read test request from sink\n");
4208 case DP_TEST_LINK_TRAINING:
4209 DRM_DEBUG_KMS("LINK_TRAINING test requested\n");
4210 intel_dp->compliance_test_type = DP_TEST_LINK_TRAINING;
4211 response = intel_dp_autotest_link_training(intel_dp);
4213 case DP_TEST_LINK_VIDEO_PATTERN:
4214 DRM_DEBUG_KMS("TEST_PATTERN test requested\n");
4215 intel_dp->compliance_test_type = DP_TEST_LINK_VIDEO_PATTERN;
4216 response = intel_dp_autotest_video_pattern(intel_dp);
4218 case DP_TEST_LINK_EDID_READ:
4219 DRM_DEBUG_KMS("EDID test requested\n");
4220 intel_dp->compliance_test_type = DP_TEST_LINK_EDID_READ;
4221 response = intel_dp_autotest_edid(intel_dp);
4223 case DP_TEST_LINK_PHY_TEST_PATTERN:
4224 DRM_DEBUG_KMS("PHY_PATTERN test requested\n");
4225 intel_dp->compliance_test_type = DP_TEST_LINK_PHY_TEST_PATTERN;
4226 response = intel_dp_autotest_phy_pattern(intel_dp);
4229 DRM_DEBUG_KMS("Invalid test request '%02x'\n", rxdata);
4234 status = drm_dp_dpcd_write(&intel_dp->aux,
4238 DRM_DEBUG_KMS("Could not write test response to sink\n");
4242 intel_dp_check_mst_status(struct intel_dp *intel_dp)
4246 if (intel_dp->is_mst) {
4251 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
4255 /* check link status - esi[10] = 0x200c */
4256 if (intel_dp->active_mst_links && !drm_dp_channel_eq_ok(&esi[10], intel_dp->lane_count)) {
4257 DRM_DEBUG_KMS("channel EQ not ok, retraining\n");
4258 intel_dp_start_link_train(intel_dp);
4259 intel_dp_complete_link_train(intel_dp);
4260 intel_dp_stop_link_train(intel_dp);
4263 DRM_DEBUG_KMS("got esi %3ph\n", esi);
4264 ret = drm_dp_mst_hpd_irq(&intel_dp->mst_mgr, esi, &handled);
4267 for (retry = 0; retry < 3; retry++) {
4269 wret = drm_dp_dpcd_write(&intel_dp->aux,
4270 DP_SINK_COUNT_ESI+1,
4277 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
4279 DRM_DEBUG_KMS("got esi2 %3ph\n", esi);
4287 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4288 DRM_DEBUG_KMS("failed to get ESI - device may have failed\n");
4289 intel_dp->is_mst = false;
4290 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
4291 /* send a hotplug event */
4292 drm_kms_helper_hotplug_event(intel_dig_port->base.base.dev);
4299 * According to DP spec
4302 * 2. Configure link according to Receiver Capabilities
4303 * 3. Use Link Training from 2.5.3.3 and 3.5.1.3
4304 * 4. Check link status on receipt of hot-plug interrupt
4307 intel_dp_check_link_status(struct intel_dp *intel_dp)
4309 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4310 struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
4312 u8 link_status[DP_LINK_STATUS_SIZE];
4314 WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
4316 if (!intel_encoder->connectors_active)
4319 if (WARN_ON(!intel_encoder->base.crtc))
4322 if (!to_intel_crtc(intel_encoder->base.crtc)->active)
4325 /* Try to read receiver status if the link appears to be up */
4326 if (!intel_dp_get_link_status(intel_dp, link_status)) {
4330 /* Now read the DPCD to see if it's actually running */
4331 if (!intel_dp_get_dpcd(intel_dp)) {
4335 /* Try to read the source of the interrupt */
4336 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4337 intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
4338 /* Clear interrupt source */
4339 drm_dp_dpcd_writeb(&intel_dp->aux,
4340 DP_DEVICE_SERVICE_IRQ_VECTOR,
4343 if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
4344 DRM_DEBUG_DRIVER("Test request in short pulse not handled\n");
4345 if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
4346 DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
4349 if (!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
4350 DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n",
4351 intel_encoder->base.name);
4352 intel_dp_start_link_train(intel_dp);
4353 intel_dp_complete_link_train(intel_dp);
4354 intel_dp_stop_link_train(intel_dp);
4358 /* XXX this is probably wrong for multiple downstream ports */
4359 static enum drm_connector_status
4360 intel_dp_detect_dpcd(struct intel_dp *intel_dp)
4362 uint8_t *dpcd = intel_dp->dpcd;
4365 if (!intel_dp_get_dpcd(intel_dp))
4366 return connector_status_disconnected;
4368 /* if there's no downstream port, we're done */
4369 if (!(dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT))
4370 return connector_status_connected;
4372 /* If we're HPD-aware, SINK_COUNT changes dynamically */
4373 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4374 intel_dp->downstream_ports[0] & DP_DS_PORT_HPD) {
4377 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_COUNT,
4379 return connector_status_unknown;
4381 return DP_GET_SINK_COUNT(reg) ? connector_status_connected
4382 : connector_status_disconnected;
4385 /* If no HPD, poke DDC gently */
4386 if (drm_probe_ddc(&intel_dp->aux.ddc))
4387 return connector_status_connected;
4389 /* Well we tried, say unknown for unreliable port types */
4390 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) {
4391 type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
4392 if (type == DP_DS_PORT_TYPE_VGA ||
4393 type == DP_DS_PORT_TYPE_NON_EDID)
4394 return connector_status_unknown;
4396 type = intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
4397 DP_DWN_STRM_PORT_TYPE_MASK;
4398 if (type == DP_DWN_STRM_PORT_TYPE_ANALOG ||
4399 type == DP_DWN_STRM_PORT_TYPE_OTHER)
4400 return connector_status_unknown;
4403 /* Anything else is out of spec, warn and ignore */
4404 DRM_DEBUG_KMS("Broken DP branch device, ignoring\n");
4405 return connector_status_disconnected;
4408 static enum drm_connector_status
4409 edp_detect(struct intel_dp *intel_dp)
4411 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4412 enum drm_connector_status status;
4414 status = intel_panel_detect(dev);
4415 if (status == connector_status_unknown)
4416 status = connector_status_connected;
4421 static enum drm_connector_status
4422 ironlake_dp_detect(struct intel_dp *intel_dp)
4424 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4425 struct drm_i915_private *dev_priv = dev->dev_private;
4426 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4428 if (!ibx_digital_port_connected(dev_priv, intel_dig_port))
4429 return connector_status_disconnected;
4431 return intel_dp_detect_dpcd(intel_dp);
4434 static int g4x_digital_port_connected(struct drm_device *dev,
4435 struct intel_digital_port *intel_dig_port)
4437 struct drm_i915_private *dev_priv = dev->dev_private;
4440 if (IS_VALLEYVIEW(dev)) {
4441 switch (intel_dig_port->port) {
4443 bit = PORTB_HOTPLUG_LIVE_STATUS_VLV;
4446 bit = PORTC_HOTPLUG_LIVE_STATUS_VLV;
4449 bit = PORTD_HOTPLUG_LIVE_STATUS_VLV;
4455 switch (intel_dig_port->port) {
4457 bit = PORTB_HOTPLUG_LIVE_STATUS_G4X;
4460 bit = PORTC_HOTPLUG_LIVE_STATUS_G4X;
4463 bit = PORTD_HOTPLUG_LIVE_STATUS_G4X;
4470 if ((I915_READ(PORT_HOTPLUG_STAT) & bit) == 0)
4475 static enum drm_connector_status
4476 g4x_dp_detect(struct intel_dp *intel_dp)
4478 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4479 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4482 /* Can't disconnect eDP, but you can close the lid... */
4483 if (is_edp(intel_dp)) {
4484 enum drm_connector_status status;
4486 status = intel_panel_detect(dev);
4487 if (status == connector_status_unknown)
4488 status = connector_status_connected;
4492 ret = g4x_digital_port_connected(dev, intel_dig_port);
4494 return connector_status_unknown;
4496 return connector_status_disconnected;
4498 return intel_dp_detect_dpcd(intel_dp);
4501 static struct edid *
4502 intel_dp_get_edid(struct intel_dp *intel_dp)
4504 struct intel_connector *intel_connector = intel_dp->attached_connector;
4506 /* use cached edid if we have one */
4507 if (intel_connector->edid) {
4509 if (IS_ERR(intel_connector->edid))
4512 return drm_edid_duplicate(intel_connector->edid);
4514 return drm_get_edid(&intel_connector->base,
4515 &intel_dp->aux.ddc);
4519 intel_dp_set_edid(struct intel_dp *intel_dp)
4521 struct intel_connector *intel_connector = intel_dp->attached_connector;
4524 edid = intel_dp_get_edid(intel_dp);
4525 intel_connector->detect_edid = edid;
4527 if (intel_dp->force_audio != HDMI_AUDIO_AUTO)
4528 intel_dp->has_audio = intel_dp->force_audio == HDMI_AUDIO_ON;
4530 intel_dp->has_audio = drm_detect_monitor_audio(edid);
4534 intel_dp_unset_edid(struct intel_dp *intel_dp)
4536 struct intel_connector *intel_connector = intel_dp->attached_connector;
4538 kfree(intel_connector->detect_edid);
4539 intel_connector->detect_edid = NULL;
4541 intel_dp->has_audio = false;
4544 static enum intel_display_power_domain
4545 intel_dp_power_get(struct intel_dp *dp)
4547 struct intel_encoder *encoder = &dp_to_dig_port(dp)->base;
4548 enum intel_display_power_domain power_domain;
4550 power_domain = intel_display_port_power_domain(encoder);
4551 intel_display_power_get(to_i915(encoder->base.dev), power_domain);
4553 return power_domain;
4557 intel_dp_power_put(struct intel_dp *dp,
4558 enum intel_display_power_domain power_domain)
4560 struct intel_encoder *encoder = &dp_to_dig_port(dp)->base;
4561 intel_display_power_put(to_i915(encoder->base.dev), power_domain);
4564 static enum drm_connector_status
4565 intel_dp_detect(struct drm_connector *connector, bool force)
4567 struct intel_dp *intel_dp = intel_attached_dp(connector);
4568 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4569 struct intel_encoder *intel_encoder = &intel_dig_port->base;
4570 struct drm_device *dev = connector->dev;
4571 enum drm_connector_status status;
4572 enum intel_display_power_domain power_domain;
4576 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
4577 connector->base.id, connector->name);
4578 intel_dp_unset_edid(intel_dp);
4580 if (intel_dp->is_mst) {
4581 /* MST devices are disconnected from a monitor POV */
4582 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4583 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4584 return connector_status_disconnected;
4587 power_domain = intel_dp_power_get(intel_dp);
4589 /* Can't disconnect eDP, but you can close the lid... */
4590 if (is_edp(intel_dp))
4591 status = edp_detect(intel_dp);
4592 else if (HAS_PCH_SPLIT(dev))
4593 status = ironlake_dp_detect(intel_dp);
4595 status = g4x_dp_detect(intel_dp);
4596 if (status != connector_status_connected)
4599 intel_dp_probe_oui(intel_dp);
4601 ret = intel_dp_probe_mst(intel_dp);
4603 /* if we are in MST mode then this connector
4604 won't appear connected or have anything with EDID on it */
4605 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4606 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4607 status = connector_status_disconnected;
4611 intel_dp_set_edid(intel_dp);
4613 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4614 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4615 status = connector_status_connected;
4617 /* Try to read the source of the interrupt */
4618 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4619 intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
4620 /* Clear interrupt source */
4621 drm_dp_dpcd_writeb(&intel_dp->aux,
4622 DP_DEVICE_SERVICE_IRQ_VECTOR,
4625 if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
4626 intel_dp_handle_test_request(intel_dp);
4627 if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
4628 DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
4632 intel_dp_power_put(intel_dp, power_domain);
4637 intel_dp_force(struct drm_connector *connector)
4639 struct intel_dp *intel_dp = intel_attached_dp(connector);
4640 struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
4641 enum intel_display_power_domain power_domain;
4643 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
4644 connector->base.id, connector->name);
4645 intel_dp_unset_edid(intel_dp);
4647 if (connector->status != connector_status_connected)
4650 power_domain = intel_dp_power_get(intel_dp);
4652 intel_dp_set_edid(intel_dp);
4654 intel_dp_power_put(intel_dp, power_domain);
4656 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4657 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4660 static int intel_dp_get_modes(struct drm_connector *connector)
4662 struct intel_connector *intel_connector = to_intel_connector(connector);
4665 edid = intel_connector->detect_edid;
4667 int ret = intel_connector_update_modes(connector, edid);
4672 /* if eDP has no EDID, fall back to fixed mode */
4673 if (is_edp(intel_attached_dp(connector)) &&
4674 intel_connector->panel.fixed_mode) {
4675 struct drm_display_mode *mode;
4677 mode = drm_mode_duplicate(connector->dev,
4678 intel_connector->panel.fixed_mode);
4680 drm_mode_probed_add(connector, mode);
4689 intel_dp_detect_audio(struct drm_connector *connector)
4691 bool has_audio = false;
4694 edid = to_intel_connector(connector)->detect_edid;
4696 has_audio = drm_detect_monitor_audio(edid);
4702 intel_dp_set_property(struct drm_connector *connector,
4703 struct drm_property *property,
4706 struct drm_i915_private *dev_priv = connector->dev->dev_private;
4707 struct intel_connector *intel_connector = to_intel_connector(connector);
4708 struct intel_encoder *intel_encoder = intel_attached_encoder(connector);
4709 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
4712 ret = drm_object_property_set_value(&connector->base, property, val);
4716 if (property == dev_priv->force_audio_property) {
4720 if (i == intel_dp->force_audio)
4723 intel_dp->force_audio = i;
4725 if (i == HDMI_AUDIO_AUTO)
4726 has_audio = intel_dp_detect_audio(connector);
4728 has_audio = (i == HDMI_AUDIO_ON);
4730 if (has_audio == intel_dp->has_audio)
4733 intel_dp->has_audio = has_audio;
4737 if (property == dev_priv->broadcast_rgb_property) {
4738 bool old_auto = intel_dp->color_range_auto;
4739 uint32_t old_range = intel_dp->color_range;
4742 case INTEL_BROADCAST_RGB_AUTO:
4743 intel_dp->color_range_auto = true;
4745 case INTEL_BROADCAST_RGB_FULL:
4746 intel_dp->color_range_auto = false;
4747 intel_dp->color_range = 0;
4749 case INTEL_BROADCAST_RGB_LIMITED:
4750 intel_dp->color_range_auto = false;
4751 intel_dp->color_range = DP_COLOR_RANGE_16_235;
4757 if (old_auto == intel_dp->color_range_auto &&
4758 old_range == intel_dp->color_range)
4764 if (is_edp(intel_dp) &&
4765 property == connector->dev->mode_config.scaling_mode_property) {
4766 if (val == DRM_MODE_SCALE_NONE) {
4767 DRM_DEBUG_KMS("no scaling not supported\n");
4771 if (intel_connector->panel.fitting_mode == val) {
4772 /* the eDP scaling property is not changed */
4775 intel_connector->panel.fitting_mode = val;
4783 if (intel_encoder->base.crtc)
4784 intel_crtc_restore_mode(intel_encoder->base.crtc);
4790 intel_dp_connector_destroy(struct drm_connector *connector)
4792 struct intel_connector *intel_connector = to_intel_connector(connector);
4794 kfree(intel_connector->detect_edid);
4796 if (!IS_ERR_OR_NULL(intel_connector->edid))
4797 kfree(intel_connector->edid);
4799 /* Can't call is_edp() since the encoder may have been destroyed
4801 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
4802 intel_panel_fini(&intel_connector->panel);
4804 drm_connector_cleanup(connector);
4808 void intel_dp_encoder_destroy(struct drm_encoder *encoder)
4810 struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
4811 struct intel_dp *intel_dp = &intel_dig_port->dp;
4813 drm_dp_aux_unregister(&intel_dp->aux);
4814 intel_dp_mst_encoder_cleanup(intel_dig_port);
4815 if (is_edp(intel_dp)) {
4816 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
4818 * vdd might still be enabled do to the delayed vdd off.
4819 * Make sure vdd is actually turned off here.
4822 edp_panel_vdd_off_sync(intel_dp);
4823 pps_unlock(intel_dp);
4825 if (intel_dp->edp_notifier.notifier_call) {
4826 unregister_reboot_notifier(&intel_dp->edp_notifier);
4827 intel_dp->edp_notifier.notifier_call = NULL;
4830 drm_encoder_cleanup(encoder);
4831 kfree(intel_dig_port);
4834 static void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
4836 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
4838 if (!is_edp(intel_dp))
4842 * vdd might still be enabled do to the delayed vdd off.
4843 * Make sure vdd is actually turned off here.
4845 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
4847 edp_panel_vdd_off_sync(intel_dp);
4848 pps_unlock(intel_dp);
4851 static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
4853 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4854 struct drm_device *dev = intel_dig_port->base.base.dev;
4855 struct drm_i915_private *dev_priv = dev->dev_private;
4856 enum intel_display_power_domain power_domain;
4858 lockdep_assert_held(&dev_priv->pps_mutex);
4860 if (!edp_have_panel_vdd(intel_dp))
4864 * The VDD bit needs a power domain reference, so if the bit is
4865 * already enabled when we boot or resume, grab this reference and
4866 * schedule a vdd off, so we don't hold on to the reference
4869 DRM_DEBUG_KMS("VDD left on by BIOS, adjusting state tracking\n");
4870 power_domain = intel_display_port_power_domain(&intel_dig_port->base);
4871 intel_display_power_get(dev_priv, power_domain);
4873 edp_panel_vdd_schedule_off(intel_dp);
4876 static void intel_dp_encoder_reset(struct drm_encoder *encoder)
4878 struct intel_dp *intel_dp;
4880 if (to_intel_encoder(encoder)->type != INTEL_OUTPUT_EDP)
4883 intel_dp = enc_to_intel_dp(encoder);
4888 * Read out the current power sequencer assignment,
4889 * in case the BIOS did something with it.
4891 if (IS_VALLEYVIEW(encoder->dev))
4892 vlv_initial_power_sequencer_setup(intel_dp);
4894 intel_edp_panel_vdd_sanitize(intel_dp);
4896 pps_unlock(intel_dp);
4899 static const struct drm_connector_funcs intel_dp_connector_funcs = {
4900 .dpms = intel_connector_dpms,
4901 .detect = intel_dp_detect,
4902 .force = intel_dp_force,
4903 .fill_modes = drm_helper_probe_single_connector_modes,
4904 .set_property = intel_dp_set_property,
4905 .atomic_get_property = intel_connector_atomic_get_property,
4906 .destroy = intel_dp_connector_destroy,
4907 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
4908 .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
4911 static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
4912 .get_modes = intel_dp_get_modes,
4913 .mode_valid = intel_dp_mode_valid,
4914 .best_encoder = intel_best_encoder,
4917 static const struct drm_encoder_funcs intel_dp_enc_funcs = {
4918 .reset = intel_dp_encoder_reset,
4919 .destroy = intel_dp_encoder_destroy,
4923 intel_dp_hot_plug(struct intel_encoder *intel_encoder)
4929 intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
4931 struct intel_dp *intel_dp = &intel_dig_port->dp;
4932 struct intel_encoder *intel_encoder = &intel_dig_port->base;
4933 struct drm_device *dev = intel_dig_port->base.base.dev;
4934 struct drm_i915_private *dev_priv = dev->dev_private;
4935 enum intel_display_power_domain power_domain;
4936 enum irqreturn ret = IRQ_NONE;
4938 if (intel_dig_port->base.type != INTEL_OUTPUT_EDP)
4939 intel_dig_port->base.type = INTEL_OUTPUT_DISPLAYPORT;
4941 if (long_hpd && intel_dig_port->base.type == INTEL_OUTPUT_EDP) {
4943 * vdd off can generate a long pulse on eDP which
4944 * would require vdd on to handle it, and thus we
4945 * would end up in an endless cycle of
4946 * "vdd off -> long hpd -> vdd on -> detect -> vdd off -> ..."
4948 DRM_DEBUG_KMS("ignoring long hpd on eDP port %c\n",
4949 port_name(intel_dig_port->port));
4953 DRM_DEBUG_KMS("got hpd irq on port %c - %s\n",
4954 port_name(intel_dig_port->port),
4955 long_hpd ? "long" : "short");
4957 power_domain = intel_display_port_power_domain(intel_encoder);
4958 intel_display_power_get(dev_priv, power_domain);
4961 /* indicate that we need to restart link training */
4962 intel_dp->train_set_valid = false;
4964 if (HAS_PCH_SPLIT(dev)) {
4965 if (!ibx_digital_port_connected(dev_priv, intel_dig_port))
4968 if (g4x_digital_port_connected(dev, intel_dig_port) != 1)
4972 if (!intel_dp_get_dpcd(intel_dp)) {
4976 intel_dp_probe_oui(intel_dp);
4978 if (!intel_dp_probe_mst(intel_dp))
4982 if (intel_dp->is_mst) {
4983 if (intel_dp_check_mst_status(intel_dp) == -EINVAL)
4987 if (!intel_dp->is_mst) {
4989 * we'll check the link status via the normal hot plug path later -
4990 * but for short hpds we should check it now
4992 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
4993 intel_dp_check_link_status(intel_dp);
4994 drm_modeset_unlock(&dev->mode_config.connection_mutex);
5002 /* if we were in MST mode, and device is not there get out of MST mode */
5003 if (intel_dp->is_mst) {
5004 DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n", intel_dp->is_mst, intel_dp->mst_mgr.mst_state);
5005 intel_dp->is_mst = false;
5006 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
5009 intel_display_power_put(dev_priv, power_domain);
5014 /* Return which DP Port should be selected for Transcoder DP control */
5016 intel_trans_dp_port_sel(struct drm_crtc *crtc)
5018 struct drm_device *dev = crtc->dev;
5019 struct intel_encoder *intel_encoder;
5020 struct intel_dp *intel_dp;
5022 for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
5023 intel_dp = enc_to_intel_dp(&intel_encoder->base);
5025 if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
5026 intel_encoder->type == INTEL_OUTPUT_EDP)
5027 return intel_dp->output_reg;
5033 /* check the VBT to see whether the eDP is on DP-D port */
5034 bool intel_dp_is_edp(struct drm_device *dev, enum port port)
5036 struct drm_i915_private *dev_priv = dev->dev_private;
5037 union child_device_config *p_child;
5039 static const short port_mapping[] = {
5040 [PORT_B] = PORT_IDPB,
5041 [PORT_C] = PORT_IDPC,
5042 [PORT_D] = PORT_IDPD,
5048 if (!dev_priv->vbt.child_dev_num)
5051 for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
5052 p_child = dev_priv->vbt.child_dev + i;
5054 if (p_child->common.dvo_port == port_mapping[port] &&
5055 (p_child->common.device_type & DEVICE_TYPE_eDP_BITS) ==
5056 (DEVICE_TYPE_eDP & DEVICE_TYPE_eDP_BITS))
5063 intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
5065 struct intel_connector *intel_connector = to_intel_connector(connector);
5067 intel_attach_force_audio_property(connector);
5068 intel_attach_broadcast_rgb_property(connector);
5069 intel_dp->color_range_auto = true;
5071 if (is_edp(intel_dp)) {
5072 drm_mode_create_scaling_mode_property(connector->dev);
5073 drm_object_attach_property(
5075 connector->dev->mode_config.scaling_mode_property,
5076 DRM_MODE_SCALE_ASPECT);
5077 intel_connector->panel.fitting_mode = DRM_MODE_SCALE_ASPECT;
5081 static void intel_dp_init_panel_power_timestamps(struct intel_dp *intel_dp)
5083 intel_dp->last_power_cycle = jiffies;
5084 intel_dp->last_power_on = jiffies;
5085 intel_dp->last_backlight_off = jiffies;
5089 intel_dp_init_panel_power_sequencer(struct drm_device *dev,
5090 struct intel_dp *intel_dp)
5092 struct drm_i915_private *dev_priv = dev->dev_private;
5093 struct edp_power_seq cur, vbt, spec,
5094 *final = &intel_dp->pps_delays;
5095 u32 pp_on, pp_off, pp_div, pp;
5096 int pp_ctrl_reg, pp_on_reg, pp_off_reg, pp_div_reg;
5098 lockdep_assert_held(&dev_priv->pps_mutex);
5100 /* already initialized? */
5101 if (final->t11_t12 != 0)
5104 if (HAS_PCH_SPLIT(dev)) {
5105 pp_ctrl_reg = PCH_PP_CONTROL;
5106 pp_on_reg = PCH_PP_ON_DELAYS;
5107 pp_off_reg = PCH_PP_OFF_DELAYS;
5108 pp_div_reg = PCH_PP_DIVISOR;
5110 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
5112 pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
5113 pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
5114 pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
5115 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
5118 /* Workaround: Need to write PP_CONTROL with the unlock key as
5119 * the very first thing. */
5120 pp = ironlake_get_pp_control(intel_dp);
5121 I915_WRITE(pp_ctrl_reg, pp);
5123 pp_on = I915_READ(pp_on_reg);
5124 pp_off = I915_READ(pp_off_reg);
5125 pp_div = I915_READ(pp_div_reg);
5127 /* Pull timing values out of registers */
5128 cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >>
5129 PANEL_POWER_UP_DELAY_SHIFT;
5131 cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >>
5132 PANEL_LIGHT_ON_DELAY_SHIFT;
5134 cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >>
5135 PANEL_LIGHT_OFF_DELAY_SHIFT;
5137 cur.t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >>
5138 PANEL_POWER_DOWN_DELAY_SHIFT;
5140 cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >>
5141 PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000;
5143 DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
5144 cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12);
5146 vbt = dev_priv->vbt.edp_pps;
5148 /* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
5149 * our hw here, which are all in 100usec. */
5150 spec.t1_t3 = 210 * 10;
5151 spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */
5152 spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */
5153 spec.t10 = 500 * 10;
5154 /* This one is special and actually in units of 100ms, but zero
5155 * based in the hw (so we need to add 100 ms). But the sw vbt
5156 * table multiplies it with 1000 to make it in units of 100usec,
5158 spec.t11_t12 = (510 + 100) * 10;
5160 DRM_DEBUG_KMS("vbt t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
5161 vbt.t1_t3, vbt.t8, vbt.t9, vbt.t10, vbt.t11_t12);
5163 /* Use the max of the register settings and vbt. If both are
5164 * unset, fall back to the spec limits. */
5165 #define assign_final(field) final->field = (max(cur.field, vbt.field) == 0 ? \
5167 max(cur.field, vbt.field))
5168 assign_final(t1_t3);
5172 assign_final(t11_t12);
5175 #define get_delay(field) (DIV_ROUND_UP(final->field, 10))
5176 intel_dp->panel_power_up_delay = get_delay(t1_t3);
5177 intel_dp->backlight_on_delay = get_delay(t8);
5178 intel_dp->backlight_off_delay = get_delay(t9);
5179 intel_dp->panel_power_down_delay = get_delay(t10);
5180 intel_dp->panel_power_cycle_delay = get_delay(t11_t12);
5183 DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n",
5184 intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay,
5185 intel_dp->panel_power_cycle_delay);
5187 DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
5188 intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
5192 intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
5193 struct intel_dp *intel_dp)
5195 struct drm_i915_private *dev_priv = dev->dev_private;
5196 u32 pp_on, pp_off, pp_div, port_sel = 0;
5197 int div = HAS_PCH_SPLIT(dev) ? intel_pch_rawclk(dev) : intel_hrawclk(dev);
5198 int pp_on_reg, pp_off_reg, pp_div_reg;
5199 enum port port = dp_to_dig_port(intel_dp)->port;
5200 const struct edp_power_seq *seq = &intel_dp->pps_delays;
5202 lockdep_assert_held(&dev_priv->pps_mutex);
5204 if (HAS_PCH_SPLIT(dev)) {
5205 pp_on_reg = PCH_PP_ON_DELAYS;
5206 pp_off_reg = PCH_PP_OFF_DELAYS;
5207 pp_div_reg = PCH_PP_DIVISOR;
5209 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
5211 pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
5212 pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
5213 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
5217 * And finally store the new values in the power sequencer. The
5218 * backlight delays are set to 1 because we do manual waits on them. For
5219 * T8, even BSpec recommends doing it. For T9, if we don't do this,
5220 * we'll end up waiting for the backlight off delay twice: once when we
5221 * do the manual sleep, and once when we disable the panel and wait for
5222 * the PP_STATUS bit to become zero.
5224 pp_on = (seq->t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) |
5225 (1 << PANEL_LIGHT_ON_DELAY_SHIFT);
5226 pp_off = (1 << PANEL_LIGHT_OFF_DELAY_SHIFT) |
5227 (seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT);
5228 /* Compute the divisor for the pp clock, simply match the Bspec
5230 pp_div = ((100 * div)/2 - 1) << PP_REFERENCE_DIVIDER_SHIFT;
5231 pp_div |= (DIV_ROUND_UP(seq->t11_t12, 1000)
5232 << PANEL_POWER_CYCLE_DELAY_SHIFT);
5234 /* Haswell doesn't have any port selection bits for the panel
5235 * power sequencer any more. */
5236 if (IS_VALLEYVIEW(dev)) {
5237 port_sel = PANEL_PORT_SELECT_VLV(port);
5238 } else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
5240 port_sel = PANEL_PORT_SELECT_DPA;
5242 port_sel = PANEL_PORT_SELECT_DPD;
5247 I915_WRITE(pp_on_reg, pp_on);
5248 I915_WRITE(pp_off_reg, pp_off);
5249 I915_WRITE(pp_div_reg, pp_div);
5251 DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
5252 I915_READ(pp_on_reg),
5253 I915_READ(pp_off_reg),
5254 I915_READ(pp_div_reg));
5258 * intel_dp_set_drrs_state - program registers for RR switch to take effect
5260 * @refresh_rate: RR to be programmed
5262 * This function gets called when refresh rate (RR) has to be changed from
5263 * one frequency to another. Switches can be between high and low RR
5264 * supported by the panel or to any other RR based on media playback (in
5265 * this case, RR value needs to be passed from user space).
5267 * The caller of this function needs to take a lock on dev_priv->drrs.
5269 static void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate)
5271 struct drm_i915_private *dev_priv = dev->dev_private;
5272 struct intel_encoder *encoder;
5273 struct intel_digital_port *dig_port = NULL;
5274 struct intel_dp *intel_dp = dev_priv->drrs.dp;
5275 struct intel_crtc_state *config = NULL;
5276 struct intel_crtc *intel_crtc = NULL;
5278 enum drrs_refresh_rate_type index = DRRS_HIGH_RR;
5280 if (refresh_rate <= 0) {
5281 DRM_DEBUG_KMS("Refresh rate should be positive non-zero.\n");
5285 if (intel_dp == NULL) {
5286 DRM_DEBUG_KMS("DRRS not supported.\n");
5291 * FIXME: This needs proper synchronization with psr state for some
5292 * platforms that cannot have PSR and DRRS enabled at the same time.
5295 dig_port = dp_to_dig_port(intel_dp);
5296 encoder = &dig_port->base;
5297 intel_crtc = to_intel_crtc(encoder->base.crtc);
5300 DRM_DEBUG_KMS("DRRS: intel_crtc not initialized\n");
5304 config = intel_crtc->config;
5306 if (dev_priv->drrs.type < SEAMLESS_DRRS_SUPPORT) {
5307 DRM_DEBUG_KMS("Only Seamless DRRS supported.\n");
5311 if (intel_dp->attached_connector->panel.downclock_mode->vrefresh ==
5313 index = DRRS_LOW_RR;
5315 if (index == dev_priv->drrs.refresh_rate_type) {
5317 "DRRS requested for previously set RR...ignoring\n");
5321 if (!intel_crtc->active) {
5322 DRM_DEBUG_KMS("eDP encoder disabled. CRTC not Active\n");
5326 if (INTEL_INFO(dev)->gen >= 8 && !IS_CHERRYVIEW(dev)) {
5329 intel_dp_set_m_n(intel_crtc, M1_N1);
5332 intel_dp_set_m_n(intel_crtc, M2_N2);
5336 DRM_ERROR("Unsupported refreshrate type\n");
5338 } else if (INTEL_INFO(dev)->gen > 6) {
5339 reg = PIPECONF(intel_crtc->config->cpu_transcoder);
5340 val = I915_READ(reg);
5342 if (index > DRRS_HIGH_RR) {
5343 if (IS_VALLEYVIEW(dev))
5344 val |= PIPECONF_EDP_RR_MODE_SWITCH_VLV;
5346 val |= PIPECONF_EDP_RR_MODE_SWITCH;
5348 if (IS_VALLEYVIEW(dev))
5349 val &= ~PIPECONF_EDP_RR_MODE_SWITCH_VLV;
5351 val &= ~PIPECONF_EDP_RR_MODE_SWITCH;
5353 I915_WRITE(reg, val);
5356 dev_priv->drrs.refresh_rate_type = index;
5358 DRM_DEBUG_KMS("eDP Refresh Rate set to : %dHz\n", refresh_rate);
5362 * intel_edp_drrs_enable - init drrs struct if supported
5363 * @intel_dp: DP struct
5365 * Initializes frontbuffer_bits and drrs.dp
5367 void intel_edp_drrs_enable(struct intel_dp *intel_dp)
5369 struct drm_device *dev = intel_dp_to_dev(intel_dp);
5370 struct drm_i915_private *dev_priv = dev->dev_private;
5371 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5372 struct drm_crtc *crtc = dig_port->base.base.crtc;
5373 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5375 if (!intel_crtc->config->has_drrs) {
5376 DRM_DEBUG_KMS("Panel doesn't support DRRS\n");
5380 mutex_lock(&dev_priv->drrs.mutex);
5381 if (WARN_ON(dev_priv->drrs.dp)) {
5382 DRM_ERROR("DRRS already enabled\n");
5386 dev_priv->drrs.busy_frontbuffer_bits = 0;
5388 dev_priv->drrs.dp = intel_dp;
5391 mutex_unlock(&dev_priv->drrs.mutex);
5395 * intel_edp_drrs_disable - Disable DRRS
5396 * @intel_dp: DP struct
5399 void intel_edp_drrs_disable(struct intel_dp *intel_dp)
5401 struct drm_device *dev = intel_dp_to_dev(intel_dp);
5402 struct drm_i915_private *dev_priv = dev->dev_private;
5403 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5404 struct drm_crtc *crtc = dig_port->base.base.crtc;
5405 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5407 if (!intel_crtc->config->has_drrs)
5410 mutex_lock(&dev_priv->drrs.mutex);
5411 if (!dev_priv->drrs.dp) {
5412 mutex_unlock(&dev_priv->drrs.mutex);
5416 if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
5417 intel_dp_set_drrs_state(dev_priv->dev,
5418 intel_dp->attached_connector->panel.
5419 fixed_mode->vrefresh);
5421 dev_priv->drrs.dp = NULL;
5422 mutex_unlock(&dev_priv->drrs.mutex);
5424 cancel_delayed_work_sync(&dev_priv->drrs.work);
5427 static void intel_edp_drrs_downclock_work(struct work_struct *work)
5429 struct drm_i915_private *dev_priv =
5430 container_of(work, typeof(*dev_priv), drrs.work.work);
5431 struct intel_dp *intel_dp;
5433 mutex_lock(&dev_priv->drrs.mutex);
5435 intel_dp = dev_priv->drrs.dp;
5441 * The delayed work can race with an invalidate hence we need to
5445 if (dev_priv->drrs.busy_frontbuffer_bits)
5448 if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR)
5449 intel_dp_set_drrs_state(dev_priv->dev,
5450 intel_dp->attached_connector->panel.
5451 downclock_mode->vrefresh);
5454 mutex_unlock(&dev_priv->drrs.mutex);
5458 * intel_edp_drrs_invalidate - Invalidate DRRS
5460 * @frontbuffer_bits: frontbuffer plane tracking bits
5462 * When there is a disturbance on screen (due to cursor movement/time
5463 * update etc), DRRS needs to be invalidated, i.e. need to switch to
5466 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5468 void intel_edp_drrs_invalidate(struct drm_device *dev,
5469 unsigned frontbuffer_bits)
5471 struct drm_i915_private *dev_priv = dev->dev_private;
5472 struct drm_crtc *crtc;
5475 if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
5478 cancel_delayed_work(&dev_priv->drrs.work);
5480 mutex_lock(&dev_priv->drrs.mutex);
5481 if (!dev_priv->drrs.dp) {
5482 mutex_unlock(&dev_priv->drrs.mutex);
5486 crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5487 pipe = to_intel_crtc(crtc)->pipe;
5489 if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR) {
5490 intel_dp_set_drrs_state(dev_priv->dev,
5491 dev_priv->drrs.dp->attached_connector->panel.
5492 fixed_mode->vrefresh);
5495 frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
5497 dev_priv->drrs.busy_frontbuffer_bits |= frontbuffer_bits;
5498 mutex_unlock(&dev_priv->drrs.mutex);
5502 * intel_edp_drrs_flush - Flush DRRS
5504 * @frontbuffer_bits: frontbuffer plane tracking bits
5506 * When there is no movement on screen, DRRS work can be scheduled.
5507 * This DRRS work is responsible for setting relevant registers after a
5508 * timeout of 1 second.
5510 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5512 void intel_edp_drrs_flush(struct drm_device *dev,
5513 unsigned frontbuffer_bits)
5515 struct drm_i915_private *dev_priv = dev->dev_private;
5516 struct drm_crtc *crtc;
5519 if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
5522 cancel_delayed_work(&dev_priv->drrs.work);
5524 mutex_lock(&dev_priv->drrs.mutex);
5525 if (!dev_priv->drrs.dp) {
5526 mutex_unlock(&dev_priv->drrs.mutex);
5530 crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5531 pipe = to_intel_crtc(crtc)->pipe;
5532 dev_priv->drrs.busy_frontbuffer_bits &= ~frontbuffer_bits;
5534 if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR &&
5535 !dev_priv->drrs.busy_frontbuffer_bits)
5536 schedule_delayed_work(&dev_priv->drrs.work,
5537 msecs_to_jiffies(1000));
5538 mutex_unlock(&dev_priv->drrs.mutex);
5542 * DOC: Display Refresh Rate Switching (DRRS)
5544 * Display Refresh Rate Switching (DRRS) is a power conservation feature
5545 * which enables swtching between low and high refresh rates,
5546 * dynamically, based on the usage scenario. This feature is applicable
5547 * for internal panels.
5549 * Indication that the panel supports DRRS is given by the panel EDID, which
5550 * would list multiple refresh rates for one resolution.
5552 * DRRS is of 2 types - static and seamless.
5553 * Static DRRS involves changing refresh rate (RR) by doing a full modeset
5554 * (may appear as a blink on screen) and is used in dock-undock scenario.
5555 * Seamless DRRS involves changing RR without any visual effect to the user
5556 * and can be used during normal system usage. This is done by programming
5557 * certain registers.
5559 * Support for static/seamless DRRS may be indicated in the VBT based on
5560 * inputs from the panel spec.
5562 * DRRS saves power by switching to low RR based on usage scenarios.
5565 * The implementation is based on frontbuffer tracking implementation.
5566 * When there is a disturbance on the screen triggered by user activity or a
5567 * periodic system activity, DRRS is disabled (RR is changed to high RR).
5568 * When there is no movement on screen, after a timeout of 1 second, a switch
5569 * to low RR is made.
5570 * For integration with frontbuffer tracking code,
5571 * intel_edp_drrs_invalidate() and intel_edp_drrs_flush() are called.
5573 * DRRS can be further extended to support other internal panels and also
5574 * the scenario of video playback wherein RR is set based on the rate
5575 * requested by userspace.
5579 * intel_dp_drrs_init - Init basic DRRS work and mutex.
5580 * @intel_connector: eDP connector
5581 * @fixed_mode: preferred mode of panel
5583 * This function is called only once at driver load to initialize basic
5587 * Downclock mode if panel supports it, else return NULL.
5588 * DRRS support is determined by the presence of downclock mode (apart
5589 * from VBT setting).
5591 static struct drm_display_mode *
5592 intel_dp_drrs_init(struct intel_connector *intel_connector,
5593 struct drm_display_mode *fixed_mode)
5595 struct drm_connector *connector = &intel_connector->base;
5596 struct drm_device *dev = connector->dev;
5597 struct drm_i915_private *dev_priv = dev->dev_private;
5598 struct drm_display_mode *downclock_mode = NULL;
5600 INIT_DELAYED_WORK(&dev_priv->drrs.work, intel_edp_drrs_downclock_work);
5601 mutex_init(&dev_priv->drrs.mutex);
5603 if (INTEL_INFO(dev)->gen <= 6) {
5604 DRM_DEBUG_KMS("DRRS supported for Gen7 and above\n");
5608 if (dev_priv->vbt.drrs_type != SEAMLESS_DRRS_SUPPORT) {
5609 DRM_DEBUG_KMS("VBT doesn't support DRRS\n");
5613 downclock_mode = intel_find_panel_downclock
5614 (dev, fixed_mode, connector);
5616 if (!downclock_mode) {
5617 DRM_DEBUG_KMS("Downclock mode is not found. DRRS not supported\n");
5621 dev_priv->drrs.type = dev_priv->vbt.drrs_type;
5623 dev_priv->drrs.refresh_rate_type = DRRS_HIGH_RR;
5624 DRM_DEBUG_KMS("seamless DRRS supported for eDP panel.\n");
5625 return downclock_mode;
5628 static bool intel_edp_init_connector(struct intel_dp *intel_dp,
5629 struct intel_connector *intel_connector)
5631 struct drm_connector *connector = &intel_connector->base;
5632 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
5633 struct intel_encoder *intel_encoder = &intel_dig_port->base;
5634 struct drm_device *dev = intel_encoder->base.dev;
5635 struct drm_i915_private *dev_priv = dev->dev_private;
5636 struct drm_display_mode *fixed_mode = NULL;
5637 struct drm_display_mode *downclock_mode = NULL;
5639 struct drm_display_mode *scan;
5641 enum pipe pipe = INVALID_PIPE;
5643 if (!is_edp(intel_dp))
5647 intel_edp_panel_vdd_sanitize(intel_dp);
5648 pps_unlock(intel_dp);
5650 /* Cache DPCD and EDID for edp. */
5651 has_dpcd = intel_dp_get_dpcd(intel_dp);
5654 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
5655 dev_priv->no_aux_handshake =
5656 intel_dp->dpcd[DP_MAX_DOWNSPREAD] &
5657 DP_NO_AUX_HANDSHAKE_LINK_TRAINING;
5659 /* if this fails, presume the device is a ghost */
5660 DRM_INFO("failed to retrieve link info, disabling eDP\n");
5664 /* We now know it's not a ghost, init power sequence regs. */
5666 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
5667 pps_unlock(intel_dp);
5669 mutex_lock(&dev->mode_config.mutex);
5670 edid = drm_get_edid(connector, &intel_dp->aux.ddc);
5672 if (drm_add_edid_modes(connector, edid)) {
5673 drm_mode_connector_update_edid_property(connector,
5675 drm_edid_to_eld(connector, edid);
5678 edid = ERR_PTR(-EINVAL);
5681 edid = ERR_PTR(-ENOENT);
5683 intel_connector->edid = edid;
5685 /* prefer fixed mode from EDID if available */
5686 list_for_each_entry(scan, &connector->probed_modes, head) {
5687 if ((scan->type & DRM_MODE_TYPE_PREFERRED)) {
5688 fixed_mode = drm_mode_duplicate(dev, scan);
5689 downclock_mode = intel_dp_drrs_init(
5690 intel_connector, fixed_mode);
5695 /* fallback to VBT if available for eDP */
5696 if (!fixed_mode && dev_priv->vbt.lfp_lvds_vbt_mode) {
5697 fixed_mode = drm_mode_duplicate(dev,
5698 dev_priv->vbt.lfp_lvds_vbt_mode);
5700 fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
5702 mutex_unlock(&dev->mode_config.mutex);
5704 if (IS_VALLEYVIEW(dev)) {
5705 intel_dp->edp_notifier.notifier_call = edp_notify_handler;
5706 register_reboot_notifier(&intel_dp->edp_notifier);
5709 * Figure out the current pipe for the initial backlight setup.
5710 * If the current pipe isn't valid, try the PPS pipe, and if that
5711 * fails just assume pipe A.
5713 if (IS_CHERRYVIEW(dev))
5714 pipe = DP_PORT_TO_PIPE_CHV(intel_dp->DP);
5716 pipe = PORT_TO_PIPE(intel_dp->DP);
5718 if (pipe != PIPE_A && pipe != PIPE_B)
5719 pipe = intel_dp->pps_pipe;
5721 if (pipe != PIPE_A && pipe != PIPE_B)
5724 DRM_DEBUG_KMS("using pipe %c for initial backlight setup\n",
5728 intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
5729 intel_connector->panel.backlight_power = intel_edp_backlight_power;
5730 intel_panel_setup_backlight(connector, pipe);
5736 intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
5737 struct intel_connector *intel_connector)
5739 struct drm_connector *connector = &intel_connector->base;
5740 struct intel_dp *intel_dp = &intel_dig_port->dp;
5741 struct intel_encoder *intel_encoder = &intel_dig_port->base;
5742 struct drm_device *dev = intel_encoder->base.dev;
5743 struct drm_i915_private *dev_priv = dev->dev_private;
5744 enum port port = intel_dig_port->port;
5747 intel_dp->pps_pipe = INVALID_PIPE;
5749 /* intel_dp vfuncs */
5750 if (INTEL_INFO(dev)->gen >= 9)
5751 intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider;
5752 else if (IS_VALLEYVIEW(dev))
5753 intel_dp->get_aux_clock_divider = vlv_get_aux_clock_divider;
5754 else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
5755 intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider;
5756 else if (HAS_PCH_SPLIT(dev))
5757 intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider;
5759 intel_dp->get_aux_clock_divider = i9xx_get_aux_clock_divider;
5761 if (INTEL_INFO(dev)->gen >= 9)
5762 intel_dp->get_aux_send_ctl = skl_get_aux_send_ctl;
5764 intel_dp->get_aux_send_ctl = i9xx_get_aux_send_ctl;
5766 /* Preserve the current hw state. */
5767 intel_dp->DP = I915_READ(intel_dp->output_reg);
5768 intel_dp->attached_connector = intel_connector;
5770 if (intel_dp_is_edp(dev, port))
5771 type = DRM_MODE_CONNECTOR_eDP;
5773 type = DRM_MODE_CONNECTOR_DisplayPort;
5776 * For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but
5777 * for DP the encoder type can be set by the caller to
5778 * INTEL_OUTPUT_UNKNOWN for DDI, so don't rewrite it.
5780 if (type == DRM_MODE_CONNECTOR_eDP)
5781 intel_encoder->type = INTEL_OUTPUT_EDP;
5783 /* eDP only on port B and/or C on vlv/chv */
5784 if (WARN_ON(IS_VALLEYVIEW(dev) && is_edp(intel_dp) &&
5785 port != PORT_B && port != PORT_C))
5788 DRM_DEBUG_KMS("Adding %s connector on port %c\n",
5789 type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP",
5792 drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
5793 drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
5795 connector->interlace_allowed = true;
5796 connector->doublescan_allowed = 0;
5798 INIT_DELAYED_WORK(&intel_dp->panel_vdd_work,
5799 edp_panel_vdd_work);
5801 intel_connector_attach_encoder(intel_connector, intel_encoder);
5802 drm_connector_register(connector);
5805 intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
5807 intel_connector->get_hw_state = intel_connector_get_hw_state;
5808 intel_connector->unregister = intel_dp_connector_unregister;
5810 /* Set up the hotplug pin. */
5813 intel_encoder->hpd_pin = HPD_PORT_A;
5816 intel_encoder->hpd_pin = HPD_PORT_B;
5819 intel_encoder->hpd_pin = HPD_PORT_C;
5822 intel_encoder->hpd_pin = HPD_PORT_D;
5828 if (is_edp(intel_dp)) {
5830 intel_dp_init_panel_power_timestamps(intel_dp);
5831 if (IS_VALLEYVIEW(dev))
5832 vlv_initial_power_sequencer_setup(intel_dp);
5834 intel_dp_init_panel_power_sequencer(dev, intel_dp);
5835 pps_unlock(intel_dp);
5838 intel_dp_aux_init(intel_dp, intel_connector);
5840 /* init MST on ports that can support it */
5841 if (HAS_DP_MST(dev) &&
5842 (port == PORT_B || port == PORT_C || port == PORT_D))
5843 intel_dp_mst_encoder_init(intel_dig_port,
5844 intel_connector->base.base.id);
5846 if (!intel_edp_init_connector(intel_dp, intel_connector)) {
5847 drm_dp_aux_unregister(&intel_dp->aux);
5848 if (is_edp(intel_dp)) {
5849 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
5851 * vdd might still be enabled do to the delayed vdd off.
5852 * Make sure vdd is actually turned off here.
5855 edp_panel_vdd_off_sync(intel_dp);
5856 pps_unlock(intel_dp);
5858 drm_connector_unregister(connector);
5859 drm_connector_cleanup(connector);
5863 intel_dp_add_properties(intel_dp, connector);
5865 /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
5866 * 0xd. Failure to do so will result in spurious interrupts being
5867 * generated on the port when a cable is not attached.
5869 if (IS_G4X(dev) && !IS_GM45(dev)) {
5870 u32 temp = I915_READ(PEG_BAND_GAP_DATA);
5871 I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
5874 i915_debugfs_connector_add(connector);
5880 intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
5882 struct drm_i915_private *dev_priv = dev->dev_private;
5883 struct intel_digital_port *intel_dig_port;
5884 struct intel_encoder *intel_encoder;
5885 struct drm_encoder *encoder;
5886 struct intel_connector *intel_connector;
5888 intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL);
5889 if (!intel_dig_port)
5892 intel_connector = intel_connector_alloc();
5893 if (!intel_connector) {
5894 kfree(intel_dig_port);
5898 intel_encoder = &intel_dig_port->base;
5899 encoder = &intel_encoder->base;
5901 drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs,
5902 DRM_MODE_ENCODER_TMDS);
5904 intel_encoder->compute_config = intel_dp_compute_config;
5905 intel_encoder->disable = intel_disable_dp;
5906 intel_encoder->get_hw_state = intel_dp_get_hw_state;
5907 intel_encoder->get_config = intel_dp_get_config;
5908 intel_encoder->suspend = intel_dp_encoder_suspend;
5909 if (IS_CHERRYVIEW(dev)) {
5910 intel_encoder->pre_pll_enable = chv_dp_pre_pll_enable;
5911 intel_encoder->pre_enable = chv_pre_enable_dp;
5912 intel_encoder->enable = vlv_enable_dp;
5913 intel_encoder->post_disable = chv_post_disable_dp;
5914 } else if (IS_VALLEYVIEW(dev)) {
5915 intel_encoder->pre_pll_enable = vlv_dp_pre_pll_enable;
5916 intel_encoder->pre_enable = vlv_pre_enable_dp;
5917 intel_encoder->enable = vlv_enable_dp;
5918 intel_encoder->post_disable = vlv_post_disable_dp;
5920 intel_encoder->pre_enable = g4x_pre_enable_dp;
5921 intel_encoder->enable = g4x_enable_dp;
5922 if (INTEL_INFO(dev)->gen >= 5)
5923 intel_encoder->post_disable = ilk_post_disable_dp;
5926 intel_dig_port->port = port;
5927 intel_dig_port->dp.output_reg = output_reg;
5929 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
5930 if (IS_CHERRYVIEW(dev)) {
5932 intel_encoder->crtc_mask = 1 << 2;
5934 intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
5936 intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
5938 intel_encoder->cloneable = 0;
5939 intel_encoder->hot_plug = intel_dp_hot_plug;
5941 intel_dig_port->hpd_pulse = intel_dp_hpd_pulse;
5942 dev_priv->hpd_irq_port[port] = intel_dig_port;
5944 if (!intel_dp_init_connector(intel_dig_port, intel_connector)) {
5945 drm_encoder_cleanup(encoder);
5946 kfree(intel_dig_port);
5947 kfree(intel_connector);
5951 void intel_dp_mst_suspend(struct drm_device *dev)
5953 struct drm_i915_private *dev_priv = dev->dev_private;
5957 for (i = 0; i < I915_MAX_PORTS; i++) {
5958 struct intel_digital_port *intel_dig_port = dev_priv->hpd_irq_port[i];
5959 if (!intel_dig_port)
5962 if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
5963 if (!intel_dig_port->dp.can_mst)
5965 if (intel_dig_port->dp.is_mst)
5966 drm_dp_mst_topology_mgr_suspend(&intel_dig_port->dp.mst_mgr);
5971 void intel_dp_mst_resume(struct drm_device *dev)
5973 struct drm_i915_private *dev_priv = dev->dev_private;
5976 for (i = 0; i < I915_MAX_PORTS; i++) {
5977 struct intel_digital_port *intel_dig_port = dev_priv->hpd_irq_port[i];
5978 if (!intel_dig_port)
5980 if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
5983 if (!intel_dig_port->dp.can_mst)
5986 ret = drm_dp_mst_topology_mgr_resume(&intel_dig_port->dp.mst_mgr);
5988 intel_dp_check_mst_status(&intel_dig_port->dp);