2 * Copyright © 2008 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Keith Packard <keithp@keithp.com>
28 #include <linux/i2c.h>
29 #include <linux/slab.h>
30 #include <linux/export.h>
31 #include <linux/notifier.h>
32 #include <linux/reboot.h>
34 #include <drm/drm_atomic_helper.h>
35 #include <drm/drm_crtc.h>
36 #include <drm/drm_crtc_helper.h>
37 #include <drm/drm_edid.h>
38 #include "intel_drv.h"
39 #include <drm/i915_drm.h>
42 #define DP_LINK_CHECK_TIMEOUT (10 * 1000)
44 /* Compliance test status bits */
45 #define INTEL_DP_RESOLUTION_SHIFT_MASK 0
46 #define INTEL_DP_RESOLUTION_PREFERRED (1 << INTEL_DP_RESOLUTION_SHIFT_MASK)
47 #define INTEL_DP_RESOLUTION_STANDARD (2 << INTEL_DP_RESOLUTION_SHIFT_MASK)
48 #define INTEL_DP_RESOLUTION_FAILSAFE (3 << INTEL_DP_RESOLUTION_SHIFT_MASK)
55 static const struct dp_link_dpll gen4_dpll[] = {
57 { .p1 = 2, .p2 = 10, .n = 2, .m1 = 23, .m2 = 8 } },
59 { .p1 = 1, .p2 = 10, .n = 1, .m1 = 14, .m2 = 2 } }
62 static const struct dp_link_dpll pch_dpll[] = {
64 { .p1 = 2, .p2 = 10, .n = 1, .m1 = 12, .m2 = 9 } },
66 { .p1 = 1, .p2 = 10, .n = 2, .m1 = 14, .m2 = 8 } }
69 static const struct dp_link_dpll vlv_dpll[] = {
71 { .p1 = 3, .p2 = 2, .n = 5, .m1 = 3, .m2 = 81 } },
73 { .p1 = 2, .p2 = 2, .n = 1, .m1 = 2, .m2 = 27 } }
77 * CHV supports eDP 1.4 that have more link rates.
78 * Below only provides the fixed rate but exclude variable rate.
80 static const struct dp_link_dpll chv_dpll[] = {
82 * CHV requires to program fractional division for m2.
83 * m2 is stored in fixed point format using formula below
84 * (m2_int << 22) | m2_fraction
86 { 162000, /* m2_int = 32, m2_fraction = 1677722 */
87 { .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a } },
88 { 270000, /* m2_int = 27, m2_fraction = 0 */
89 { .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } },
90 { 540000, /* m2_int = 27, m2_fraction = 0 */
91 { .p1 = 2, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } }
94 static const int bxt_rates[] = { 162000, 216000, 243000, 270000,
95 324000, 432000, 540000 };
96 static const int skl_rates[] = { 162000, 216000, 270000,
97 324000, 432000, 540000 };
98 static const int chv_rates[] = { 162000, 202500, 210000, 216000,
99 243000, 270000, 324000, 405000,
100 420000, 432000, 540000 };
101 static const int default_rates[] = { 162000, 270000, 540000 };
104 * is_edp - is the given port attached to an eDP panel (either CPU or PCH)
105 * @intel_dp: DP struct
107 * If a CPU or PCH DP output is attached to an eDP panel, this function
108 * will return true, and false otherwise.
110 static bool is_edp(struct intel_dp *intel_dp)
112 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
114 return intel_dig_port->base.type == INTEL_OUTPUT_EDP;
117 static struct drm_device *intel_dp_to_dev(struct intel_dp *intel_dp)
119 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
121 return intel_dig_port->base.base.dev;
124 static struct intel_dp *intel_attached_dp(struct drm_connector *connector)
126 return enc_to_intel_dp(&intel_attached_encoder(connector)->base);
129 static void intel_dp_link_down(struct intel_dp *intel_dp);
130 static bool edp_panel_vdd_on(struct intel_dp *intel_dp);
131 static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
132 static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp);
133 static void vlv_steal_power_sequencer(struct drm_device *dev,
136 static unsigned int intel_dp_unused_lane_mask(int lane_count)
138 return ~((1 << lane_count) - 1) & 0xf;
142 intel_dp_max_link_bw(struct intel_dp *intel_dp)
144 int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE];
146 switch (max_link_bw) {
147 case DP_LINK_BW_1_62:
152 WARN(1, "invalid max DP link bw val %x, using 1.62Gbps\n",
154 max_link_bw = DP_LINK_BW_1_62;
160 static u8 intel_dp_max_lane_count(struct intel_dp *intel_dp)
162 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
163 struct drm_device *dev = intel_dig_port->base.base.dev;
164 u8 source_max, sink_max;
167 if (HAS_DDI(dev) && intel_dig_port->port == PORT_A &&
168 (intel_dig_port->saved_port_bits & DDI_A_4_LANES) == 0)
171 sink_max = drm_dp_max_lane_count(intel_dp->dpcd);
173 return min(source_max, sink_max);
177 * The units on the numbers in the next two are... bizarre. Examples will
178 * make it clearer; this one parallels an example in the eDP spec.
180 * intel_dp_max_data_rate for one lane of 2.7GHz evaluates as:
182 * 270000 * 1 * 8 / 10 == 216000
184 * The actual data capacity of that configuration is 2.16Gbit/s, so the
185 * units are decakilobits. ->clock in a drm_display_mode is in kilohertz -
186 * or equivalently, kilopixels per second - so for 1680x1050R it'd be
187 * 119000. At 18bpp that's 2142000 kilobits per second.
189 * Thus the strange-looking division by 10 in intel_dp_link_required, to
190 * get the result in decakilobits instead of kilobits.
194 intel_dp_link_required(int pixel_clock, int bpp)
196 return (pixel_clock * bpp + 9) / 10;
200 intel_dp_max_data_rate(int max_link_clock, int max_lanes)
202 return (max_link_clock * max_lanes * 8) / 10;
205 static enum drm_mode_status
206 intel_dp_mode_valid(struct drm_connector *connector,
207 struct drm_display_mode *mode)
209 struct intel_dp *intel_dp = intel_attached_dp(connector);
210 struct intel_connector *intel_connector = to_intel_connector(connector);
211 struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
212 int target_clock = mode->clock;
213 int max_rate, mode_rate, max_lanes, max_link_clock;
215 if (is_edp(intel_dp) && fixed_mode) {
216 if (mode->hdisplay > fixed_mode->hdisplay)
219 if (mode->vdisplay > fixed_mode->vdisplay)
222 target_clock = fixed_mode->clock;
225 max_link_clock = intel_dp_max_link_rate(intel_dp);
226 max_lanes = intel_dp_max_lane_count(intel_dp);
228 max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
229 mode_rate = intel_dp_link_required(target_clock, 18);
231 if (mode_rate > max_rate)
232 return MODE_CLOCK_HIGH;
234 if (mode->clock < 10000)
235 return MODE_CLOCK_LOW;
237 if (mode->flags & DRM_MODE_FLAG_DBLCLK)
238 return MODE_H_ILLEGAL;
243 uint32_t intel_dp_pack_aux(const uint8_t *src, int src_bytes)
250 for (i = 0; i < src_bytes; i++)
251 v |= ((uint32_t) src[i]) << ((3-i) * 8);
255 static void intel_dp_unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes)
260 for (i = 0; i < dst_bytes; i++)
261 dst[i] = src >> ((3-i) * 8);
264 /* hrawclock is 1/4 the FSB frequency */
266 intel_hrawclk(struct drm_device *dev)
268 struct drm_i915_private *dev_priv = dev->dev_private;
271 /* There is no CLKCFG reg in Valleyview. VLV hrawclk is 200 MHz */
272 if (IS_VALLEYVIEW(dev))
275 clkcfg = I915_READ(CLKCFG);
276 switch (clkcfg & CLKCFG_FSB_MASK) {
285 case CLKCFG_FSB_1067:
287 case CLKCFG_FSB_1333:
289 /* these two are just a guess; one of them might be right */
290 case CLKCFG_FSB_1600:
291 case CLKCFG_FSB_1600_ALT:
299 intel_dp_init_panel_power_sequencer(struct drm_device *dev,
300 struct intel_dp *intel_dp);
302 intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
303 struct intel_dp *intel_dp);
305 static void pps_lock(struct intel_dp *intel_dp)
307 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
308 struct intel_encoder *encoder = &intel_dig_port->base;
309 struct drm_device *dev = encoder->base.dev;
310 struct drm_i915_private *dev_priv = dev->dev_private;
311 enum intel_display_power_domain power_domain;
314 * See vlv_power_sequencer_reset() why we need
315 * a power domain reference here.
317 power_domain = intel_display_port_power_domain(encoder);
318 intel_display_power_get(dev_priv, power_domain);
320 mutex_lock(&dev_priv->pps_mutex);
323 static void pps_unlock(struct intel_dp *intel_dp)
325 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
326 struct intel_encoder *encoder = &intel_dig_port->base;
327 struct drm_device *dev = encoder->base.dev;
328 struct drm_i915_private *dev_priv = dev->dev_private;
329 enum intel_display_power_domain power_domain;
331 mutex_unlock(&dev_priv->pps_mutex);
333 power_domain = intel_display_port_power_domain(encoder);
334 intel_display_power_put(dev_priv, power_domain);
338 vlv_power_sequencer_kick(struct intel_dp *intel_dp)
340 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
341 struct drm_device *dev = intel_dig_port->base.base.dev;
342 struct drm_i915_private *dev_priv = dev->dev_private;
343 enum pipe pipe = intel_dp->pps_pipe;
344 bool pll_enabled, release_cl_override = false;
345 enum dpio_phy phy = DPIO_PHY(pipe);
346 enum dpio_channel ch = vlv_pipe_to_channel(pipe);
349 if (WARN(I915_READ(intel_dp->output_reg) & DP_PORT_EN,
350 "skipping pipe %c power seqeuncer kick due to port %c being active\n",
351 pipe_name(pipe), port_name(intel_dig_port->port)))
354 DRM_DEBUG_KMS("kicking pipe %c power sequencer for port %c\n",
355 pipe_name(pipe), port_name(intel_dig_port->port));
357 /* Preserve the BIOS-computed detected bit. This is
358 * supposed to be read-only.
360 DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
361 DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
362 DP |= DP_PORT_WIDTH(1);
363 DP |= DP_LINK_TRAIN_PAT_1;
365 if (IS_CHERRYVIEW(dev))
366 DP |= DP_PIPE_SELECT_CHV(pipe);
367 else if (pipe == PIPE_B)
368 DP |= DP_PIPEB_SELECT;
370 pll_enabled = I915_READ(DPLL(pipe)) & DPLL_VCO_ENABLE;
373 * The DPLL for the pipe must be enabled for this to work.
374 * So enable temporarily it if it's not already enabled.
377 release_cl_override = IS_CHERRYVIEW(dev) &&
378 !chv_phy_powergate_ch(dev_priv, phy, ch, true);
380 vlv_force_pll_on(dev, pipe, IS_CHERRYVIEW(dev) ?
381 &chv_dpll[0].dpll : &vlv_dpll[0].dpll);
385 * Similar magic as in intel_dp_enable_port().
386 * We _must_ do this port enable + disable trick
387 * to make this power seqeuencer lock onto the port.
388 * Otherwise even VDD force bit won't work.
390 I915_WRITE(intel_dp->output_reg, DP);
391 POSTING_READ(intel_dp->output_reg);
393 I915_WRITE(intel_dp->output_reg, DP | DP_PORT_EN);
394 POSTING_READ(intel_dp->output_reg);
396 I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
397 POSTING_READ(intel_dp->output_reg);
400 vlv_force_pll_off(dev, pipe);
402 if (release_cl_override)
403 chv_phy_powergate_ch(dev_priv, phy, ch, false);
408 vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
410 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
411 struct drm_device *dev = intel_dig_port->base.base.dev;
412 struct drm_i915_private *dev_priv = dev->dev_private;
413 struct intel_encoder *encoder;
414 unsigned int pipes = (1 << PIPE_A) | (1 << PIPE_B);
417 lockdep_assert_held(&dev_priv->pps_mutex);
419 /* We should never land here with regular DP ports */
420 WARN_ON(!is_edp(intel_dp));
422 if (intel_dp->pps_pipe != INVALID_PIPE)
423 return intel_dp->pps_pipe;
426 * We don't have power sequencer currently.
427 * Pick one that's not used by other ports.
429 list_for_each_entry(encoder, &dev->mode_config.encoder_list,
431 struct intel_dp *tmp;
433 if (encoder->type != INTEL_OUTPUT_EDP)
436 tmp = enc_to_intel_dp(&encoder->base);
438 if (tmp->pps_pipe != INVALID_PIPE)
439 pipes &= ~(1 << tmp->pps_pipe);
443 * Didn't find one. This should not happen since there
444 * are two power sequencers and up to two eDP ports.
446 if (WARN_ON(pipes == 0))
449 pipe = ffs(pipes) - 1;
451 vlv_steal_power_sequencer(dev, pipe);
452 intel_dp->pps_pipe = pipe;
454 DRM_DEBUG_KMS("picked pipe %c power sequencer for port %c\n",
455 pipe_name(intel_dp->pps_pipe),
456 port_name(intel_dig_port->port));
458 /* init power sequencer on this pipe and port */
459 intel_dp_init_panel_power_sequencer(dev, intel_dp);
460 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
463 * Even vdd force doesn't work until we've made
464 * the power sequencer lock in on the port.
466 vlv_power_sequencer_kick(intel_dp);
468 return intel_dp->pps_pipe;
471 typedef bool (*vlv_pipe_check)(struct drm_i915_private *dev_priv,
474 static bool vlv_pipe_has_pp_on(struct drm_i915_private *dev_priv,
477 return I915_READ(VLV_PIPE_PP_STATUS(pipe)) & PP_ON;
480 static bool vlv_pipe_has_vdd_on(struct drm_i915_private *dev_priv,
483 return I915_READ(VLV_PIPE_PP_CONTROL(pipe)) & EDP_FORCE_VDD;
486 static bool vlv_pipe_any(struct drm_i915_private *dev_priv,
493 vlv_initial_pps_pipe(struct drm_i915_private *dev_priv,
495 vlv_pipe_check pipe_check)
499 for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) {
500 u32 port_sel = I915_READ(VLV_PIPE_PP_ON_DELAYS(pipe)) &
501 PANEL_PORT_SELECT_MASK;
503 if (port_sel != PANEL_PORT_SELECT_VLV(port))
506 if (!pipe_check(dev_priv, pipe))
516 vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
518 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
519 struct drm_device *dev = intel_dig_port->base.base.dev;
520 struct drm_i915_private *dev_priv = dev->dev_private;
521 enum port port = intel_dig_port->port;
523 lockdep_assert_held(&dev_priv->pps_mutex);
525 /* try to find a pipe with this port selected */
526 /* first pick one where the panel is on */
527 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
529 /* didn't find one? pick one where vdd is on */
530 if (intel_dp->pps_pipe == INVALID_PIPE)
531 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
532 vlv_pipe_has_vdd_on);
533 /* didn't find one? pick one with just the correct port */
534 if (intel_dp->pps_pipe == INVALID_PIPE)
535 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
538 /* didn't find one? just let vlv_power_sequencer_pipe() pick one when needed */
539 if (intel_dp->pps_pipe == INVALID_PIPE) {
540 DRM_DEBUG_KMS("no initial power sequencer for port %c\n",
545 DRM_DEBUG_KMS("initial power sequencer for port %c: pipe %c\n",
546 port_name(port), pipe_name(intel_dp->pps_pipe));
548 intel_dp_init_panel_power_sequencer(dev, intel_dp);
549 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
552 void vlv_power_sequencer_reset(struct drm_i915_private *dev_priv)
554 struct drm_device *dev = dev_priv->dev;
555 struct intel_encoder *encoder;
557 if (WARN_ON(!IS_VALLEYVIEW(dev)))
561 * We can't grab pps_mutex here due to deadlock with power_domain
562 * mutex when power_domain functions are called while holding pps_mutex.
563 * That also means that in order to use pps_pipe the code needs to
564 * hold both a power domain reference and pps_mutex, and the power domain
565 * reference get/put must be done while _not_ holding pps_mutex.
566 * pps_{lock,unlock}() do these steps in the correct order, so one
567 * should use them always.
570 list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
571 struct intel_dp *intel_dp;
573 if (encoder->type != INTEL_OUTPUT_EDP)
576 intel_dp = enc_to_intel_dp(&encoder->base);
577 intel_dp->pps_pipe = INVALID_PIPE;
581 static u32 _pp_ctrl_reg(struct intel_dp *intel_dp)
583 struct drm_device *dev = intel_dp_to_dev(intel_dp);
586 return BXT_PP_CONTROL(0);
587 else if (HAS_PCH_SPLIT(dev))
588 return PCH_PP_CONTROL;
590 return VLV_PIPE_PP_CONTROL(vlv_power_sequencer_pipe(intel_dp));
593 static u32 _pp_stat_reg(struct intel_dp *intel_dp)
595 struct drm_device *dev = intel_dp_to_dev(intel_dp);
598 return BXT_PP_STATUS(0);
599 else if (HAS_PCH_SPLIT(dev))
600 return PCH_PP_STATUS;
602 return VLV_PIPE_PP_STATUS(vlv_power_sequencer_pipe(intel_dp));
605 /* Reboot notifier handler to shutdown panel power to guarantee T12 timing
606 This function only applicable when panel PM state is not to be tracked */
607 static int edp_notify_handler(struct notifier_block *this, unsigned long code,
610 struct intel_dp *intel_dp = container_of(this, typeof(* intel_dp),
612 struct drm_device *dev = intel_dp_to_dev(intel_dp);
613 struct drm_i915_private *dev_priv = dev->dev_private;
615 u32 pp_ctrl_reg, pp_div_reg;
617 if (!is_edp(intel_dp) || code != SYS_RESTART)
622 if (IS_VALLEYVIEW(dev)) {
623 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
625 pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
626 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
627 pp_div = I915_READ(pp_div_reg);
628 pp_div &= PP_REFERENCE_DIVIDER_MASK;
630 /* 0x1F write to PP_DIV_REG sets max cycle delay */
631 I915_WRITE(pp_div_reg, pp_div | 0x1F);
632 I915_WRITE(pp_ctrl_reg, PANEL_UNLOCK_REGS | PANEL_POWER_OFF);
633 msleep(intel_dp->panel_power_cycle_delay);
636 pps_unlock(intel_dp);
641 static bool edp_have_panel_power(struct intel_dp *intel_dp)
643 struct drm_device *dev = intel_dp_to_dev(intel_dp);
644 struct drm_i915_private *dev_priv = dev->dev_private;
646 lockdep_assert_held(&dev_priv->pps_mutex);
648 if (IS_VALLEYVIEW(dev) &&
649 intel_dp->pps_pipe == INVALID_PIPE)
652 return (I915_READ(_pp_stat_reg(intel_dp)) & PP_ON) != 0;
655 static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
657 struct drm_device *dev = intel_dp_to_dev(intel_dp);
658 struct drm_i915_private *dev_priv = dev->dev_private;
660 lockdep_assert_held(&dev_priv->pps_mutex);
662 if (IS_VALLEYVIEW(dev) &&
663 intel_dp->pps_pipe == INVALID_PIPE)
666 return I915_READ(_pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD;
670 intel_dp_check_edp(struct intel_dp *intel_dp)
672 struct drm_device *dev = intel_dp_to_dev(intel_dp);
673 struct drm_i915_private *dev_priv = dev->dev_private;
675 if (!is_edp(intel_dp))
678 if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) {
679 WARN(1, "eDP powered off while attempting aux channel communication.\n");
680 DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n",
681 I915_READ(_pp_stat_reg(intel_dp)),
682 I915_READ(_pp_ctrl_reg(intel_dp)));
687 intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq)
689 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
690 struct drm_device *dev = intel_dig_port->base.base.dev;
691 struct drm_i915_private *dev_priv = dev->dev_private;
692 uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
696 #define C (((status = I915_READ_NOTRACE(ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
698 done = wait_event_timeout(dev_priv->gmbus_wait_queue, C,
699 msecs_to_jiffies_timeout(10));
701 done = wait_for_atomic(C, 10) == 0;
703 DRM_ERROR("dp aux hw did not signal timeout (has irq: %i)!\n",
710 static uint32_t i9xx_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
712 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
713 struct drm_device *dev = intel_dig_port->base.base.dev;
716 * The clock divider is based off the hrawclk, and would like to run at
717 * 2MHz. So, take the hrawclk value and divide by 2 and use that
719 return index ? 0 : intel_hrawclk(dev) / 2;
722 static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
724 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
725 struct drm_device *dev = intel_dig_port->base.base.dev;
726 struct drm_i915_private *dev_priv = dev->dev_private;
731 if (intel_dig_port->port == PORT_A) {
732 return DIV_ROUND_UP(dev_priv->cdclk_freq, 2000);
735 return DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
739 static uint32_t hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
741 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
742 struct drm_device *dev = intel_dig_port->base.base.dev;
743 struct drm_i915_private *dev_priv = dev->dev_private;
745 if (intel_dig_port->port == PORT_A) {
748 return DIV_ROUND_CLOSEST(dev_priv->cdclk_freq, 2000);
749 } else if (dev_priv->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
750 /* Workaround for non-ULT HSW */
757 return index ? 0 : DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
761 static uint32_t vlv_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
763 return index ? 0 : 100;
766 static uint32_t skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
769 * SKL doesn't need us to program the AUX clock divider (Hardware will
770 * derive the clock from CDCLK automatically). We still implement the
771 * get_aux_clock_divider vfunc to plug-in into the existing code.
773 return index ? 0 : 1;
776 static uint32_t i9xx_get_aux_send_ctl(struct intel_dp *intel_dp,
779 uint32_t aux_clock_divider)
781 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
782 struct drm_device *dev = intel_dig_port->base.base.dev;
783 uint32_t precharge, timeout;
790 if (IS_BROADWELL(dev) && intel_dp->aux_ch_ctl_reg == DPA_AUX_CH_CTL)
791 timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
793 timeout = DP_AUX_CH_CTL_TIME_OUT_400us;
795 return DP_AUX_CH_CTL_SEND_BUSY |
797 (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
798 DP_AUX_CH_CTL_TIME_OUT_ERROR |
800 DP_AUX_CH_CTL_RECEIVE_ERROR |
801 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
802 (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
803 (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT);
806 static uint32_t skl_get_aux_send_ctl(struct intel_dp *intel_dp,
811 return DP_AUX_CH_CTL_SEND_BUSY |
813 (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
814 DP_AUX_CH_CTL_TIME_OUT_ERROR |
815 DP_AUX_CH_CTL_TIME_OUT_1600us |
816 DP_AUX_CH_CTL_RECEIVE_ERROR |
817 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
818 DP_AUX_CH_CTL_SYNC_PULSE_SKL(32);
822 intel_dp_aux_ch(struct intel_dp *intel_dp,
823 const uint8_t *send, int send_bytes,
824 uint8_t *recv, int recv_size)
826 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
827 struct drm_device *dev = intel_dig_port->base.base.dev;
828 struct drm_i915_private *dev_priv = dev->dev_private;
829 uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
830 uint32_t ch_data = ch_ctl + 4;
831 uint32_t aux_clock_divider;
832 int i, ret, recv_bytes;
835 bool has_aux_irq = HAS_AUX_IRQ(dev);
841 * We will be called with VDD already enabled for dpcd/edid/oui reads.
842 * In such cases we want to leave VDD enabled and it's up to upper layers
843 * to turn it off. But for eg. i2c-dev access we need to turn it on/off
846 vdd = edp_panel_vdd_on(intel_dp);
848 /* dp aux is extremely sensitive to irq latency, hence request the
849 * lowest possible wakeup latency and so prevent the cpu from going into
852 pm_qos_update_request(&dev_priv->pm_qos, 0);
854 intel_dp_check_edp(intel_dp);
856 intel_aux_display_runtime_get(dev_priv);
858 /* Try to wait for any previous AUX channel activity */
859 for (try = 0; try < 3; try++) {
860 status = I915_READ_NOTRACE(ch_ctl);
861 if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
867 static u32 last_status = -1;
868 const u32 status = I915_READ(ch_ctl);
870 if (status != last_status) {
871 WARN(1, "dp_aux_ch not started status 0x%08x\n",
873 last_status = status;
880 /* Only 5 data registers! */
881 if (WARN_ON(send_bytes > 20 || recv_size > 20)) {
886 while ((aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, clock++))) {
887 u32 send_ctl = intel_dp->get_aux_send_ctl(intel_dp,
892 /* Must try at least 3 times according to DP spec */
893 for (try = 0; try < 5; try++) {
894 /* Load the send data into the aux channel data registers */
895 for (i = 0; i < send_bytes; i += 4)
896 I915_WRITE(ch_data + i,
897 intel_dp_pack_aux(send + i,
900 /* Send the command and wait for it to complete */
901 I915_WRITE(ch_ctl, send_ctl);
903 status = intel_dp_aux_wait_done(intel_dp, has_aux_irq);
905 /* Clear done status and any errors */
909 DP_AUX_CH_CTL_TIME_OUT_ERROR |
910 DP_AUX_CH_CTL_RECEIVE_ERROR);
912 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR)
915 /* DP CTS 1.2 Core Rev 1.1, 4.2.1.1 & 4.2.1.2
916 * 400us delay required for errors and timeouts
917 * Timeout errors from the HW already meet this
918 * requirement so skip to next iteration
920 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
921 usleep_range(400, 500);
924 if (status & DP_AUX_CH_CTL_DONE)
929 if ((status & DP_AUX_CH_CTL_DONE) == 0) {
930 DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status);
936 /* Check for timeout or receive error.
937 * Timeouts occur when the sink is not connected
939 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
940 DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status);
945 /* Timeouts occur when the device isn't connected, so they're
946 * "normal" -- don't fill the kernel log with these */
947 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
948 DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status);
953 /* Unload any bytes sent back from the other side */
954 recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >>
955 DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
956 if (recv_bytes > recv_size)
957 recv_bytes = recv_size;
959 for (i = 0; i < recv_bytes; i += 4)
960 intel_dp_unpack_aux(I915_READ(ch_data + i),
961 recv + i, recv_bytes - i);
965 pm_qos_update_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE);
966 intel_aux_display_runtime_put(dev_priv);
969 edp_panel_vdd_off(intel_dp, false);
971 pps_unlock(intel_dp);
976 #define BARE_ADDRESS_SIZE 3
977 #define HEADER_SIZE (BARE_ADDRESS_SIZE + 1)
979 intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
981 struct intel_dp *intel_dp = container_of(aux, struct intel_dp, aux);
982 uint8_t txbuf[20], rxbuf[20];
983 size_t txsize, rxsize;
986 txbuf[0] = (msg->request << 4) |
987 ((msg->address >> 16) & 0xf);
988 txbuf[1] = (msg->address >> 8) & 0xff;
989 txbuf[2] = msg->address & 0xff;
990 txbuf[3] = msg->size - 1;
992 switch (msg->request & ~DP_AUX_I2C_MOT) {
993 case DP_AUX_NATIVE_WRITE:
994 case DP_AUX_I2C_WRITE:
995 txsize = msg->size ? HEADER_SIZE + msg->size : BARE_ADDRESS_SIZE;
996 rxsize = 2; /* 0 or 1 data bytes */
998 if (WARN_ON(txsize > 20))
1001 memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size);
1003 ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
1005 msg->reply = rxbuf[0] >> 4;
1008 /* Number of bytes written in a short write. */
1009 ret = clamp_t(int, rxbuf[1], 0, msg->size);
1011 /* Return payload size. */
1017 case DP_AUX_NATIVE_READ:
1018 case DP_AUX_I2C_READ:
1019 txsize = msg->size ? HEADER_SIZE : BARE_ADDRESS_SIZE;
1020 rxsize = msg->size + 1;
1022 if (WARN_ON(rxsize > 20))
1025 ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
1027 msg->reply = rxbuf[0] >> 4;
1029 * Assume happy day, and copy the data. The caller is
1030 * expected to check msg->reply before touching it.
1032 * Return payload size.
1035 memcpy(msg->buffer, rxbuf + 1, ret);
1048 intel_dp_aux_init(struct intel_dp *intel_dp, struct intel_connector *connector)
1050 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1051 struct drm_i915_private *dev_priv = dev->dev_private;
1052 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1053 enum port port = intel_dig_port->port;
1054 struct ddi_vbt_port_info *info = &dev_priv->vbt.ddi_port_info[port];
1055 const char *name = NULL;
1056 uint32_t porte_aux_ctl_reg = DPA_AUX_CH_CTL;
1059 /* On SKL we don't have Aux for port E so we rely on VBT to set
1060 * a proper alternate aux channel.
1062 if (IS_SKYLAKE(dev) && port == PORT_E) {
1063 switch (info->alternate_aux_channel) {
1065 porte_aux_ctl_reg = DPB_AUX_CH_CTL;
1068 porte_aux_ctl_reg = DPC_AUX_CH_CTL;
1071 porte_aux_ctl_reg = DPD_AUX_CH_CTL;
1075 porte_aux_ctl_reg = DPA_AUX_CH_CTL;
1081 intel_dp->aux_ch_ctl_reg = DPA_AUX_CH_CTL;
1085 intel_dp->aux_ch_ctl_reg = PCH_DPB_AUX_CH_CTL;
1089 intel_dp->aux_ch_ctl_reg = PCH_DPC_AUX_CH_CTL;
1093 intel_dp->aux_ch_ctl_reg = PCH_DPD_AUX_CH_CTL;
1097 intel_dp->aux_ch_ctl_reg = porte_aux_ctl_reg;
1105 * The AUX_CTL register is usually DP_CTL + 0x10.
1107 * On Haswell and Broadwell though:
1108 * - Both port A DDI_BUF_CTL and DDI_AUX_CTL are on the CPU
1109 * - Port B/C/D AUX channels are on the PCH, DDI_BUF_CTL on the CPU
1111 * Skylake moves AUX_CTL back next to DDI_BUF_CTL, on the CPU.
1113 if (!IS_HASWELL(dev) && !IS_BROADWELL(dev) && port != PORT_E)
1114 intel_dp->aux_ch_ctl_reg = intel_dp->output_reg + 0x10;
1116 intel_dp->aux.name = name;
1117 intel_dp->aux.dev = dev->dev;
1118 intel_dp->aux.transfer = intel_dp_aux_transfer;
1120 DRM_DEBUG_KMS("registering %s bus for %s\n", name,
1121 connector->base.kdev->kobj.name);
1123 ret = drm_dp_aux_register(&intel_dp->aux);
1125 DRM_ERROR("drm_dp_aux_register() for %s failed (%d)\n",
1130 ret = sysfs_create_link(&connector->base.kdev->kobj,
1131 &intel_dp->aux.ddc.dev.kobj,
1132 intel_dp->aux.ddc.dev.kobj.name);
1134 DRM_ERROR("sysfs_create_link() for %s failed (%d)\n", name, ret);
1135 drm_dp_aux_unregister(&intel_dp->aux);
1140 intel_dp_connector_unregister(struct intel_connector *intel_connector)
1142 struct intel_dp *intel_dp = intel_attached_dp(&intel_connector->base);
1144 if (!intel_connector->mst_port)
1145 sysfs_remove_link(&intel_connector->base.kdev->kobj,
1146 intel_dp->aux.ddc.dev.kobj.name);
1147 intel_connector_unregister(intel_connector);
1151 skl_edp_set_pll_config(struct intel_crtc_state *pipe_config)
1155 memset(&pipe_config->dpll_hw_state, 0,
1156 sizeof(pipe_config->dpll_hw_state));
1158 pipe_config->ddi_pll_sel = SKL_DPLL0;
1159 pipe_config->dpll_hw_state.cfgcr1 = 0;
1160 pipe_config->dpll_hw_state.cfgcr2 = 0;
1162 ctrl1 = DPLL_CTRL1_OVERRIDE(SKL_DPLL0);
1163 switch (pipe_config->port_clock / 2) {
1165 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810,
1169 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350,
1173 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700,
1177 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620,
1180 /* TBD: For DP link rates 2.16 GHz and 4.32 GHz, VCO is 8640 which
1181 results in CDCLK change. Need to handle the change of CDCLK by
1182 disabling pipes and re-enabling them */
1184 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080,
1188 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2160,
1193 pipe_config->dpll_hw_state.ctrl1 = ctrl1;
1197 hsw_dp_set_ddi_pll_sel(struct intel_crtc_state *pipe_config)
1199 memset(&pipe_config->dpll_hw_state, 0,
1200 sizeof(pipe_config->dpll_hw_state));
1202 switch (pipe_config->port_clock / 2) {
1204 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_810;
1207 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_1350;
1210 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_2700;
1216 intel_dp_sink_rates(struct intel_dp *intel_dp, const int **sink_rates)
1218 if (intel_dp->num_sink_rates) {
1219 *sink_rates = intel_dp->sink_rates;
1220 return intel_dp->num_sink_rates;
1223 *sink_rates = default_rates;
1225 return (intel_dp_max_link_bw(intel_dp) >> 3) + 1;
1229 intel_dp_source_rates(struct drm_device *dev, const int **source_rates)
1231 if (IS_BROXTON(dev)) {
1232 *source_rates = bxt_rates;
1233 return ARRAY_SIZE(bxt_rates);
1234 } else if (IS_SKYLAKE(dev)) {
1235 *source_rates = skl_rates;
1236 return ARRAY_SIZE(skl_rates);
1237 } else if (IS_CHERRYVIEW(dev)) {
1238 *source_rates = chv_rates;
1239 return ARRAY_SIZE(chv_rates);
1242 *source_rates = default_rates;
1244 if (IS_SKYLAKE(dev) && INTEL_REVID(dev) <= SKL_REVID_B0)
1245 /* WaDisableHBR2:skl */
1246 return (DP_LINK_BW_2_7 >> 3) + 1;
1247 else if (INTEL_INFO(dev)->gen >= 8 ||
1248 (IS_HASWELL(dev) && !IS_HSW_ULX(dev)))
1249 return (DP_LINK_BW_5_4 >> 3) + 1;
1251 return (DP_LINK_BW_2_7 >> 3) + 1;
1255 intel_dp_set_clock(struct intel_encoder *encoder,
1256 struct intel_crtc_state *pipe_config)
1258 struct drm_device *dev = encoder->base.dev;
1259 const struct dp_link_dpll *divisor = NULL;
1263 divisor = gen4_dpll;
1264 count = ARRAY_SIZE(gen4_dpll);
1265 } else if (HAS_PCH_SPLIT(dev)) {
1267 count = ARRAY_SIZE(pch_dpll);
1268 } else if (IS_CHERRYVIEW(dev)) {
1270 count = ARRAY_SIZE(chv_dpll);
1271 } else if (IS_VALLEYVIEW(dev)) {
1273 count = ARRAY_SIZE(vlv_dpll);
1276 if (divisor && count) {
1277 for (i = 0; i < count; i++) {
1278 if (pipe_config->port_clock == divisor[i].clock) {
1279 pipe_config->dpll = divisor[i].dpll;
1280 pipe_config->clock_set = true;
1287 static int intersect_rates(const int *source_rates, int source_len,
1288 const int *sink_rates, int sink_len,
1291 int i = 0, j = 0, k = 0;
1293 while (i < source_len && j < sink_len) {
1294 if (source_rates[i] == sink_rates[j]) {
1295 if (WARN_ON(k >= DP_MAX_SUPPORTED_RATES))
1297 common_rates[k] = source_rates[i];
1301 } else if (source_rates[i] < sink_rates[j]) {
1310 static int intel_dp_common_rates(struct intel_dp *intel_dp,
1313 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1314 const int *source_rates, *sink_rates;
1315 int source_len, sink_len;
1317 sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
1318 source_len = intel_dp_source_rates(dev, &source_rates);
1320 return intersect_rates(source_rates, source_len,
1321 sink_rates, sink_len,
1325 static void snprintf_int_array(char *str, size_t len,
1326 const int *array, int nelem)
1332 for (i = 0; i < nelem; i++) {
1333 int r = snprintf(str, len, "%s%d", i ? ", " : "", array[i]);
1341 static void intel_dp_print_rates(struct intel_dp *intel_dp)
1343 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1344 const int *source_rates, *sink_rates;
1345 int source_len, sink_len, common_len;
1346 int common_rates[DP_MAX_SUPPORTED_RATES];
1347 char str[128]; /* FIXME: too big for stack? */
1349 if ((drm_debug & DRM_UT_KMS) == 0)
1352 source_len = intel_dp_source_rates(dev, &source_rates);
1353 snprintf_int_array(str, sizeof(str), source_rates, source_len);
1354 DRM_DEBUG_KMS("source rates: %s\n", str);
1356 sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
1357 snprintf_int_array(str, sizeof(str), sink_rates, sink_len);
1358 DRM_DEBUG_KMS("sink rates: %s\n", str);
1360 common_len = intel_dp_common_rates(intel_dp, common_rates);
1361 snprintf_int_array(str, sizeof(str), common_rates, common_len);
1362 DRM_DEBUG_KMS("common rates: %s\n", str);
1365 static int rate_to_index(int find, const int *rates)
1369 for (i = 0; i < DP_MAX_SUPPORTED_RATES; ++i)
1370 if (find == rates[i])
1377 intel_dp_max_link_rate(struct intel_dp *intel_dp)
1379 int rates[DP_MAX_SUPPORTED_RATES] = {};
1382 len = intel_dp_common_rates(intel_dp, rates);
1383 if (WARN_ON(len <= 0))
1386 return rates[rate_to_index(0, rates) - 1];
1389 int intel_dp_rate_select(struct intel_dp *intel_dp, int rate)
1391 return rate_to_index(rate, intel_dp->sink_rates);
1394 static void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock,
1395 uint8_t *link_bw, uint8_t *rate_select)
1397 if (intel_dp->num_sink_rates) {
1400 intel_dp_rate_select(intel_dp, port_clock);
1402 *link_bw = drm_dp_link_rate_to_bw_code(port_clock);
1408 intel_dp_compute_config(struct intel_encoder *encoder,
1409 struct intel_crtc_state *pipe_config)
1411 struct drm_device *dev = encoder->base.dev;
1412 struct drm_i915_private *dev_priv = dev->dev_private;
1413 struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
1414 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1415 enum port port = dp_to_dig_port(intel_dp)->port;
1416 struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc);
1417 struct intel_connector *intel_connector = intel_dp->attached_connector;
1418 int lane_count, clock;
1419 int min_lane_count = 1;
1420 int max_lane_count = intel_dp_max_lane_count(intel_dp);
1421 /* Conveniently, the link BW constants become indices with a shift...*/
1425 int link_avail, link_clock;
1426 int common_rates[DP_MAX_SUPPORTED_RATES] = {};
1428 uint8_t link_bw, rate_select;
1430 common_len = intel_dp_common_rates(intel_dp, common_rates);
1432 /* No common link rates between source and sink */
1433 WARN_ON(common_len <= 0);
1435 max_clock = common_len - 1;
1437 if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev) && port != PORT_A)
1438 pipe_config->has_pch_encoder = true;
1440 pipe_config->has_dp_encoder = true;
1441 pipe_config->has_drrs = false;
1442 pipe_config->has_audio = intel_dp->has_audio && port != PORT_A;
1444 if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
1445 intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
1448 if (INTEL_INFO(dev)->gen >= 9) {
1450 ret = skl_update_scaler_crtc(pipe_config);
1455 if (!HAS_PCH_SPLIT(dev))
1456 intel_gmch_panel_fitting(intel_crtc, pipe_config,
1457 intel_connector->panel.fitting_mode);
1459 intel_pch_panel_fitting(intel_crtc, pipe_config,
1460 intel_connector->panel.fitting_mode);
1463 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
1466 DRM_DEBUG_KMS("DP link computation with max lane count %i "
1467 "max bw %d pixel clock %iKHz\n",
1468 max_lane_count, common_rates[max_clock],
1469 adjusted_mode->crtc_clock);
1471 /* Walk through all bpp values. Luckily they're all nicely spaced with 2
1472 * bpc in between. */
1473 bpp = pipe_config->pipe_bpp;
1474 if (is_edp(intel_dp)) {
1476 /* Get bpp from vbt only for panels that dont have bpp in edid */
1477 if (intel_connector->base.display_info.bpc == 0 &&
1478 (dev_priv->vbt.edp_bpp && dev_priv->vbt.edp_bpp < bpp)) {
1479 DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n",
1480 dev_priv->vbt.edp_bpp);
1481 bpp = dev_priv->vbt.edp_bpp;
1485 * Use the maximum clock and number of lanes the eDP panel
1486 * advertizes being capable of. The panels are generally
1487 * designed to support only a single clock and lane
1488 * configuration, and typically these values correspond to the
1489 * native resolution of the panel.
1491 min_lane_count = max_lane_count;
1492 min_clock = max_clock;
1495 for (; bpp >= 6*3; bpp -= 2*3) {
1496 mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
1499 for (clock = min_clock; clock <= max_clock; clock++) {
1500 for (lane_count = min_lane_count;
1501 lane_count <= max_lane_count;
1504 link_clock = common_rates[clock];
1505 link_avail = intel_dp_max_data_rate(link_clock,
1508 if (mode_rate <= link_avail) {
1518 if (intel_dp->color_range_auto) {
1521 * CEA-861-E - 5.1 Default Encoding Parameters
1522 * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry
1524 pipe_config->limited_color_range =
1525 bpp != 18 && drm_match_cea_mode(adjusted_mode) > 1;
1527 pipe_config->limited_color_range =
1528 intel_dp->limited_color_range;
1531 pipe_config->lane_count = lane_count;
1533 pipe_config->pipe_bpp = bpp;
1534 pipe_config->port_clock = common_rates[clock];
1536 intel_dp_compute_rate(intel_dp, pipe_config->port_clock,
1537 &link_bw, &rate_select);
1539 DRM_DEBUG_KMS("DP link bw %02x rate select %02x lane count %d clock %d bpp %d\n",
1540 link_bw, rate_select, pipe_config->lane_count,
1541 pipe_config->port_clock, bpp);
1542 DRM_DEBUG_KMS("DP link bw required %i available %i\n",
1543 mode_rate, link_avail);
1545 intel_link_compute_m_n(bpp, lane_count,
1546 adjusted_mode->crtc_clock,
1547 pipe_config->port_clock,
1548 &pipe_config->dp_m_n);
1550 if (intel_connector->panel.downclock_mode != NULL &&
1551 dev_priv->drrs.type == SEAMLESS_DRRS_SUPPORT) {
1552 pipe_config->has_drrs = true;
1553 intel_link_compute_m_n(bpp, lane_count,
1554 intel_connector->panel.downclock_mode->clock,
1555 pipe_config->port_clock,
1556 &pipe_config->dp_m2_n2);
1559 if (IS_SKYLAKE(dev) && is_edp(intel_dp))
1560 skl_edp_set_pll_config(pipe_config);
1561 else if (IS_BROXTON(dev))
1562 /* handled in ddi */;
1563 else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
1564 hsw_dp_set_ddi_pll_sel(pipe_config);
1566 intel_dp_set_clock(encoder, pipe_config);
1571 static void ironlake_set_pll_cpu_edp(struct intel_dp *intel_dp)
1573 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1574 struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
1575 struct drm_device *dev = crtc->base.dev;
1576 struct drm_i915_private *dev_priv = dev->dev_private;
1579 DRM_DEBUG_KMS("eDP PLL enable for clock %d\n",
1580 crtc->config->port_clock);
1581 dpa_ctl = I915_READ(DP_A);
1582 dpa_ctl &= ~DP_PLL_FREQ_MASK;
1584 if (crtc->config->port_clock == 162000) {
1585 /* For a long time we've carried around a ILK-DevA w/a for the
1586 * 160MHz clock. If we're really unlucky, it's still required.
1588 DRM_DEBUG_KMS("160MHz cpu eDP clock, might need ilk devA w/a\n");
1589 dpa_ctl |= DP_PLL_FREQ_160MHZ;
1590 intel_dp->DP |= DP_PLL_FREQ_160MHZ;
1592 dpa_ctl |= DP_PLL_FREQ_270MHZ;
1593 intel_dp->DP |= DP_PLL_FREQ_270MHZ;
1596 I915_WRITE(DP_A, dpa_ctl);
1602 void intel_dp_set_link_params(struct intel_dp *intel_dp,
1603 const struct intel_crtc_state *pipe_config)
1605 intel_dp->link_rate = pipe_config->port_clock;
1606 intel_dp->lane_count = pipe_config->lane_count;
1609 static void intel_dp_prepare(struct intel_encoder *encoder)
1611 struct drm_device *dev = encoder->base.dev;
1612 struct drm_i915_private *dev_priv = dev->dev_private;
1613 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1614 enum port port = dp_to_dig_port(intel_dp)->port;
1615 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
1616 struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
1618 intel_dp_set_link_params(intel_dp, crtc->config);
1621 * There are four kinds of DP registers:
1628 * IBX PCH and CPU are the same for almost everything,
1629 * except that the CPU DP PLL is configured in this
1632 * CPT PCH is quite different, having many bits moved
1633 * to the TRANS_DP_CTL register instead. That
1634 * configuration happens (oddly) in ironlake_pch_enable
1637 /* Preserve the BIOS-computed detected bit. This is
1638 * supposed to be read-only.
1640 intel_dp->DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
1642 /* Handle DP bits in common between all three register formats */
1643 intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
1644 intel_dp->DP |= DP_PORT_WIDTH(crtc->config->lane_count);
1646 if (crtc->config->has_audio)
1647 intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
1649 /* Split out the IBX/CPU vs CPT settings */
1651 if (IS_GEN7(dev) && port == PORT_A) {
1652 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1653 intel_dp->DP |= DP_SYNC_HS_HIGH;
1654 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1655 intel_dp->DP |= DP_SYNC_VS_HIGH;
1656 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
1658 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1659 intel_dp->DP |= DP_ENHANCED_FRAMING;
1661 intel_dp->DP |= crtc->pipe << 29;
1662 } else if (HAS_PCH_CPT(dev) && port != PORT_A) {
1665 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
1667 trans_dp = I915_READ(TRANS_DP_CTL(crtc->pipe));
1668 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1669 trans_dp |= TRANS_DP_ENH_FRAMING;
1671 trans_dp &= ~TRANS_DP_ENH_FRAMING;
1672 I915_WRITE(TRANS_DP_CTL(crtc->pipe), trans_dp);
1674 if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev) &&
1675 crtc->config->limited_color_range)
1676 intel_dp->DP |= DP_COLOR_RANGE_16_235;
1678 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1679 intel_dp->DP |= DP_SYNC_HS_HIGH;
1680 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1681 intel_dp->DP |= DP_SYNC_VS_HIGH;
1682 intel_dp->DP |= DP_LINK_TRAIN_OFF;
1684 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1685 intel_dp->DP |= DP_ENHANCED_FRAMING;
1687 if (IS_CHERRYVIEW(dev))
1688 intel_dp->DP |= DP_PIPE_SELECT_CHV(crtc->pipe);
1689 else if (crtc->pipe == PIPE_B)
1690 intel_dp->DP |= DP_PIPEB_SELECT;
1694 #define IDLE_ON_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK)
1695 #define IDLE_ON_VALUE (PP_ON | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_ON_IDLE)
1697 #define IDLE_OFF_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | 0)
1698 #define IDLE_OFF_VALUE (0 | PP_SEQUENCE_NONE | 0 | 0)
1700 #define IDLE_CYCLE_MASK (PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
1701 #define IDLE_CYCLE_VALUE (0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE)
1703 static void wait_panel_status(struct intel_dp *intel_dp,
1707 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1708 struct drm_i915_private *dev_priv = dev->dev_private;
1709 u32 pp_stat_reg, pp_ctrl_reg;
1711 lockdep_assert_held(&dev_priv->pps_mutex);
1713 pp_stat_reg = _pp_stat_reg(intel_dp);
1714 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1716 DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n",
1718 I915_READ(pp_stat_reg),
1719 I915_READ(pp_ctrl_reg));
1721 if (_wait_for((I915_READ(pp_stat_reg) & mask) == value, 5000, 10)) {
1722 DRM_ERROR("Panel status timeout: status %08x control %08x\n",
1723 I915_READ(pp_stat_reg),
1724 I915_READ(pp_ctrl_reg));
1727 DRM_DEBUG_KMS("Wait complete\n");
1730 static void wait_panel_on(struct intel_dp *intel_dp)
1732 DRM_DEBUG_KMS("Wait for panel power on\n");
1733 wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
1736 static void wait_panel_off(struct intel_dp *intel_dp)
1738 DRM_DEBUG_KMS("Wait for panel power off time\n");
1739 wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
1742 static void wait_panel_power_cycle(struct intel_dp *intel_dp)
1744 DRM_DEBUG_KMS("Wait for panel power cycle\n");
1746 /* When we disable the VDD override bit last we have to do the manual
1748 wait_remaining_ms_from_jiffies(intel_dp->last_power_cycle,
1749 intel_dp->panel_power_cycle_delay);
1751 wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
1754 static void wait_backlight_on(struct intel_dp *intel_dp)
1756 wait_remaining_ms_from_jiffies(intel_dp->last_power_on,
1757 intel_dp->backlight_on_delay);
1760 static void edp_wait_backlight_off(struct intel_dp *intel_dp)
1762 wait_remaining_ms_from_jiffies(intel_dp->last_backlight_off,
1763 intel_dp->backlight_off_delay);
1766 /* Read the current pp_control value, unlocking the register if it
1770 static u32 ironlake_get_pp_control(struct intel_dp *intel_dp)
1772 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1773 struct drm_i915_private *dev_priv = dev->dev_private;
1776 lockdep_assert_held(&dev_priv->pps_mutex);
1778 control = I915_READ(_pp_ctrl_reg(intel_dp));
1779 if (!IS_BROXTON(dev)) {
1780 control &= ~PANEL_UNLOCK_MASK;
1781 control |= PANEL_UNLOCK_REGS;
1787 * Must be paired with edp_panel_vdd_off().
1788 * Must hold pps_mutex around the whole on/off sequence.
1789 * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
1791 static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
1793 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1794 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1795 struct intel_encoder *intel_encoder = &intel_dig_port->base;
1796 struct drm_i915_private *dev_priv = dev->dev_private;
1797 enum intel_display_power_domain power_domain;
1799 u32 pp_stat_reg, pp_ctrl_reg;
1800 bool need_to_disable = !intel_dp->want_panel_vdd;
1802 lockdep_assert_held(&dev_priv->pps_mutex);
1804 if (!is_edp(intel_dp))
1807 cancel_delayed_work(&intel_dp->panel_vdd_work);
1808 intel_dp->want_panel_vdd = true;
1810 if (edp_have_panel_vdd(intel_dp))
1811 return need_to_disable;
1813 power_domain = intel_display_port_power_domain(intel_encoder);
1814 intel_display_power_get(dev_priv, power_domain);
1816 DRM_DEBUG_KMS("Turning eDP port %c VDD on\n",
1817 port_name(intel_dig_port->port));
1819 if (!edp_have_panel_power(intel_dp))
1820 wait_panel_power_cycle(intel_dp);
1822 pp = ironlake_get_pp_control(intel_dp);
1823 pp |= EDP_FORCE_VDD;
1825 pp_stat_reg = _pp_stat_reg(intel_dp);
1826 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1828 I915_WRITE(pp_ctrl_reg, pp);
1829 POSTING_READ(pp_ctrl_reg);
1830 DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1831 I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
1833 * If the panel wasn't on, delay before accessing aux channel
1835 if (!edp_have_panel_power(intel_dp)) {
1836 DRM_DEBUG_KMS("eDP port %c panel power wasn't enabled\n",
1837 port_name(intel_dig_port->port));
1838 msleep(intel_dp->panel_power_up_delay);
1841 return need_to_disable;
1845 * Must be paired with intel_edp_panel_vdd_off() or
1846 * intel_edp_panel_off().
1847 * Nested calls to these functions are not allowed since
1848 * we drop the lock. Caller must use some higher level
1849 * locking to prevent nested calls from other threads.
1851 void intel_edp_panel_vdd_on(struct intel_dp *intel_dp)
1855 if (!is_edp(intel_dp))
1859 vdd = edp_panel_vdd_on(intel_dp);
1860 pps_unlock(intel_dp);
1862 I915_STATE_WARN(!vdd, "eDP port %c VDD already requested on\n",
1863 port_name(dp_to_dig_port(intel_dp)->port));
1866 static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
1868 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1869 struct drm_i915_private *dev_priv = dev->dev_private;
1870 struct intel_digital_port *intel_dig_port =
1871 dp_to_dig_port(intel_dp);
1872 struct intel_encoder *intel_encoder = &intel_dig_port->base;
1873 enum intel_display_power_domain power_domain;
1875 u32 pp_stat_reg, pp_ctrl_reg;
1877 lockdep_assert_held(&dev_priv->pps_mutex);
1879 WARN_ON(intel_dp->want_panel_vdd);
1881 if (!edp_have_panel_vdd(intel_dp))
1884 DRM_DEBUG_KMS("Turning eDP port %c VDD off\n",
1885 port_name(intel_dig_port->port));
1887 pp = ironlake_get_pp_control(intel_dp);
1888 pp &= ~EDP_FORCE_VDD;
1890 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1891 pp_stat_reg = _pp_stat_reg(intel_dp);
1893 I915_WRITE(pp_ctrl_reg, pp);
1894 POSTING_READ(pp_ctrl_reg);
1896 /* Make sure sequencer is idle before allowing subsequent activity */
1897 DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1898 I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
1900 if ((pp & POWER_TARGET_ON) == 0)
1901 intel_dp->last_power_cycle = jiffies;
1903 power_domain = intel_display_port_power_domain(intel_encoder);
1904 intel_display_power_put(dev_priv, power_domain);
1907 static void edp_panel_vdd_work(struct work_struct *__work)
1909 struct intel_dp *intel_dp = container_of(to_delayed_work(__work),
1910 struct intel_dp, panel_vdd_work);
1913 if (!intel_dp->want_panel_vdd)
1914 edp_panel_vdd_off_sync(intel_dp);
1915 pps_unlock(intel_dp);
1918 static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp)
1920 unsigned long delay;
1923 * Queue the timer to fire a long time from now (relative to the power
1924 * down delay) to keep the panel power up across a sequence of
1927 delay = msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5);
1928 schedule_delayed_work(&intel_dp->panel_vdd_work, delay);
1932 * Must be paired with edp_panel_vdd_on().
1933 * Must hold pps_mutex around the whole on/off sequence.
1934 * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
1936 static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
1938 struct drm_i915_private *dev_priv =
1939 intel_dp_to_dev(intel_dp)->dev_private;
1941 lockdep_assert_held(&dev_priv->pps_mutex);
1943 if (!is_edp(intel_dp))
1946 I915_STATE_WARN(!intel_dp->want_panel_vdd, "eDP port %c VDD not forced on",
1947 port_name(dp_to_dig_port(intel_dp)->port));
1949 intel_dp->want_panel_vdd = false;
1952 edp_panel_vdd_off_sync(intel_dp);
1954 edp_panel_vdd_schedule_off(intel_dp);
1957 static void edp_panel_on(struct intel_dp *intel_dp)
1959 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1960 struct drm_i915_private *dev_priv = dev->dev_private;
1964 lockdep_assert_held(&dev_priv->pps_mutex);
1966 if (!is_edp(intel_dp))
1969 DRM_DEBUG_KMS("Turn eDP port %c panel power on\n",
1970 port_name(dp_to_dig_port(intel_dp)->port));
1972 if (WARN(edp_have_panel_power(intel_dp),
1973 "eDP port %c panel power already on\n",
1974 port_name(dp_to_dig_port(intel_dp)->port)))
1977 wait_panel_power_cycle(intel_dp);
1979 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1980 pp = ironlake_get_pp_control(intel_dp);
1982 /* ILK workaround: disable reset around power sequence */
1983 pp &= ~PANEL_POWER_RESET;
1984 I915_WRITE(pp_ctrl_reg, pp);
1985 POSTING_READ(pp_ctrl_reg);
1988 pp |= POWER_TARGET_ON;
1990 pp |= PANEL_POWER_RESET;
1992 I915_WRITE(pp_ctrl_reg, pp);
1993 POSTING_READ(pp_ctrl_reg);
1995 wait_panel_on(intel_dp);
1996 intel_dp->last_power_on = jiffies;
1999 pp |= PANEL_POWER_RESET; /* restore panel reset bit */
2000 I915_WRITE(pp_ctrl_reg, pp);
2001 POSTING_READ(pp_ctrl_reg);
2005 void intel_edp_panel_on(struct intel_dp *intel_dp)
2007 if (!is_edp(intel_dp))
2011 edp_panel_on(intel_dp);
2012 pps_unlock(intel_dp);
2016 static void edp_panel_off(struct intel_dp *intel_dp)
2018 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2019 struct intel_encoder *intel_encoder = &intel_dig_port->base;
2020 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2021 struct drm_i915_private *dev_priv = dev->dev_private;
2022 enum intel_display_power_domain power_domain;
2026 lockdep_assert_held(&dev_priv->pps_mutex);
2028 if (!is_edp(intel_dp))
2031 DRM_DEBUG_KMS("Turn eDP port %c panel power off\n",
2032 port_name(dp_to_dig_port(intel_dp)->port));
2034 WARN(!intel_dp->want_panel_vdd, "Need eDP port %c VDD to turn off panel\n",
2035 port_name(dp_to_dig_port(intel_dp)->port));
2037 pp = ironlake_get_pp_control(intel_dp);
2038 /* We need to switch off panel power _and_ force vdd, for otherwise some
2039 * panels get very unhappy and cease to work. */
2040 pp &= ~(POWER_TARGET_ON | PANEL_POWER_RESET | EDP_FORCE_VDD |
2043 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2045 intel_dp->want_panel_vdd = false;
2047 I915_WRITE(pp_ctrl_reg, pp);
2048 POSTING_READ(pp_ctrl_reg);
2050 intel_dp->last_power_cycle = jiffies;
2051 wait_panel_off(intel_dp);
2053 /* We got a reference when we enabled the VDD. */
2054 power_domain = intel_display_port_power_domain(intel_encoder);
2055 intel_display_power_put(dev_priv, power_domain);
2058 void intel_edp_panel_off(struct intel_dp *intel_dp)
2060 if (!is_edp(intel_dp))
2064 edp_panel_off(intel_dp);
2065 pps_unlock(intel_dp);
2068 /* Enable backlight in the panel power control. */
2069 static void _intel_edp_backlight_on(struct intel_dp *intel_dp)
2071 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2072 struct drm_device *dev = intel_dig_port->base.base.dev;
2073 struct drm_i915_private *dev_priv = dev->dev_private;
2078 * If we enable the backlight right away following a panel power
2079 * on, we may see slight flicker as the panel syncs with the eDP
2080 * link. So delay a bit to make sure the image is solid before
2081 * allowing it to appear.
2083 wait_backlight_on(intel_dp);
2087 pp = ironlake_get_pp_control(intel_dp);
2088 pp |= EDP_BLC_ENABLE;
2090 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2092 I915_WRITE(pp_ctrl_reg, pp);
2093 POSTING_READ(pp_ctrl_reg);
2095 pps_unlock(intel_dp);
2098 /* Enable backlight PWM and backlight PP control. */
2099 void intel_edp_backlight_on(struct intel_dp *intel_dp)
2101 if (!is_edp(intel_dp))
2104 DRM_DEBUG_KMS("\n");
2106 intel_panel_enable_backlight(intel_dp->attached_connector);
2107 _intel_edp_backlight_on(intel_dp);
2110 /* Disable backlight in the panel power control. */
2111 static void _intel_edp_backlight_off(struct intel_dp *intel_dp)
2113 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2114 struct drm_i915_private *dev_priv = dev->dev_private;
2118 if (!is_edp(intel_dp))
2123 pp = ironlake_get_pp_control(intel_dp);
2124 pp &= ~EDP_BLC_ENABLE;
2126 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2128 I915_WRITE(pp_ctrl_reg, pp);
2129 POSTING_READ(pp_ctrl_reg);
2131 pps_unlock(intel_dp);
2133 intel_dp->last_backlight_off = jiffies;
2134 edp_wait_backlight_off(intel_dp);
2137 /* Disable backlight PP control and backlight PWM. */
2138 void intel_edp_backlight_off(struct intel_dp *intel_dp)
2140 if (!is_edp(intel_dp))
2143 DRM_DEBUG_KMS("\n");
2145 _intel_edp_backlight_off(intel_dp);
2146 intel_panel_disable_backlight(intel_dp->attached_connector);
2150 * Hook for controlling the panel power control backlight through the bl_power
2151 * sysfs attribute. Take care to handle multiple calls.
2153 static void intel_edp_backlight_power(struct intel_connector *connector,
2156 struct intel_dp *intel_dp = intel_attached_dp(&connector->base);
2160 is_enabled = ironlake_get_pp_control(intel_dp) & EDP_BLC_ENABLE;
2161 pps_unlock(intel_dp);
2163 if (is_enabled == enable)
2166 DRM_DEBUG_KMS("panel power control backlight %s\n",
2167 enable ? "enable" : "disable");
2170 _intel_edp_backlight_on(intel_dp);
2172 _intel_edp_backlight_off(intel_dp);
2175 static void ironlake_edp_pll_on(struct intel_dp *intel_dp)
2177 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2178 struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
2179 struct drm_device *dev = crtc->dev;
2180 struct drm_i915_private *dev_priv = dev->dev_private;
2183 assert_pipe_disabled(dev_priv,
2184 to_intel_crtc(crtc)->pipe);
2186 DRM_DEBUG_KMS("\n");
2187 dpa_ctl = I915_READ(DP_A);
2188 WARN(dpa_ctl & DP_PLL_ENABLE, "dp pll on, should be off\n");
2189 WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
2191 /* We don't adjust intel_dp->DP while tearing down the link, to
2192 * facilitate link retraining (e.g. after hotplug). Hence clear all
2193 * enable bits here to ensure that we don't enable too much. */
2194 intel_dp->DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
2195 intel_dp->DP |= DP_PLL_ENABLE;
2196 I915_WRITE(DP_A, intel_dp->DP);
2201 static void ironlake_edp_pll_off(struct intel_dp *intel_dp)
2203 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2204 struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
2205 struct drm_device *dev = crtc->dev;
2206 struct drm_i915_private *dev_priv = dev->dev_private;
2209 assert_pipe_disabled(dev_priv,
2210 to_intel_crtc(crtc)->pipe);
2212 dpa_ctl = I915_READ(DP_A);
2213 WARN((dpa_ctl & DP_PLL_ENABLE) == 0,
2214 "dp pll off, should be on\n");
2215 WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
2217 /* We can't rely on the value tracked for the DP register in
2218 * intel_dp->DP because link_down must not change that (otherwise link
2219 * re-training will fail. */
2220 dpa_ctl &= ~DP_PLL_ENABLE;
2221 I915_WRITE(DP_A, dpa_ctl);
2226 /* If the sink supports it, try to set the power state appropriately */
2227 void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
2231 /* Should have a valid DPCD by this point */
2232 if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
2235 if (mode != DRM_MODE_DPMS_ON) {
2236 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2240 * When turning on, we need to retry for 1ms to give the sink
2243 for (i = 0; i < 3; i++) {
2244 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2253 DRM_DEBUG_KMS("failed to %s sink power state\n",
2254 mode == DRM_MODE_DPMS_ON ? "enable" : "disable");
2257 static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
2260 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2261 enum port port = dp_to_dig_port(intel_dp)->port;
2262 struct drm_device *dev = encoder->base.dev;
2263 struct drm_i915_private *dev_priv = dev->dev_private;
2264 enum intel_display_power_domain power_domain;
2267 power_domain = intel_display_port_power_domain(encoder);
2268 if (!intel_display_power_is_enabled(dev_priv, power_domain))
2271 tmp = I915_READ(intel_dp->output_reg);
2273 if (!(tmp & DP_PORT_EN))
2276 if (IS_GEN7(dev) && port == PORT_A) {
2277 *pipe = PORT_TO_PIPE_CPT(tmp);
2278 } else if (HAS_PCH_CPT(dev) && port != PORT_A) {
2281 for_each_pipe(dev_priv, p) {
2282 u32 trans_dp = I915_READ(TRANS_DP_CTL(p));
2283 if (TRANS_DP_PIPE_TO_PORT(trans_dp) == port) {
2289 DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n",
2290 intel_dp->output_reg);
2291 } else if (IS_CHERRYVIEW(dev)) {
2292 *pipe = DP_PORT_TO_PIPE_CHV(tmp);
2294 *pipe = PORT_TO_PIPE(tmp);
2300 static void intel_dp_get_config(struct intel_encoder *encoder,
2301 struct intel_crtc_state *pipe_config)
2303 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2305 struct drm_device *dev = encoder->base.dev;
2306 struct drm_i915_private *dev_priv = dev->dev_private;
2307 enum port port = dp_to_dig_port(intel_dp)->port;
2308 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2311 tmp = I915_READ(intel_dp->output_reg);
2313 pipe_config->has_audio = tmp & DP_AUDIO_OUTPUT_ENABLE && port != PORT_A;
2315 if (HAS_PCH_CPT(dev) && port != PORT_A) {
2316 u32 trans_dp = I915_READ(TRANS_DP_CTL(crtc->pipe));
2318 if (trans_dp & TRANS_DP_HSYNC_ACTIVE_HIGH)
2319 flags |= DRM_MODE_FLAG_PHSYNC;
2321 flags |= DRM_MODE_FLAG_NHSYNC;
2323 if (trans_dp & TRANS_DP_VSYNC_ACTIVE_HIGH)
2324 flags |= DRM_MODE_FLAG_PVSYNC;
2326 flags |= DRM_MODE_FLAG_NVSYNC;
2328 if (tmp & DP_SYNC_HS_HIGH)
2329 flags |= DRM_MODE_FLAG_PHSYNC;
2331 flags |= DRM_MODE_FLAG_NHSYNC;
2333 if (tmp & DP_SYNC_VS_HIGH)
2334 flags |= DRM_MODE_FLAG_PVSYNC;
2336 flags |= DRM_MODE_FLAG_NVSYNC;
2339 pipe_config->base.adjusted_mode.flags |= flags;
2341 if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev) &&
2342 tmp & DP_COLOR_RANGE_16_235)
2343 pipe_config->limited_color_range = true;
2345 pipe_config->has_dp_encoder = true;
2347 pipe_config->lane_count =
2348 ((tmp & DP_PORT_WIDTH_MASK) >> DP_PORT_WIDTH_SHIFT) + 1;
2350 intel_dp_get_m_n(crtc, pipe_config);
2352 if (port == PORT_A) {
2353 if ((I915_READ(DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_160MHZ)
2354 pipe_config->port_clock = 162000;
2356 pipe_config->port_clock = 270000;
2359 dotclock = intel_dotclock_calculate(pipe_config->port_clock,
2360 &pipe_config->dp_m_n);
2362 if (HAS_PCH_SPLIT(dev_priv->dev) && port != PORT_A)
2363 ironlake_check_encoder_dotclock(pipe_config, dotclock);
2365 pipe_config->base.adjusted_mode.crtc_clock = dotclock;
2367 if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp &&
2368 pipe_config->pipe_bpp > dev_priv->vbt.edp_bpp) {
2370 * This is a big fat ugly hack.
2372 * Some machines in UEFI boot mode provide us a VBT that has 18
2373 * bpp and 1.62 GHz link bandwidth for eDP, which for reasons
2374 * unknown we fail to light up. Yet the same BIOS boots up with
2375 * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as
2376 * max, not what it tells us to use.
2378 * Note: This will still be broken if the eDP panel is not lit
2379 * up by the BIOS, and thus we can't get the mode at module
2382 DRM_DEBUG_KMS("pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
2383 pipe_config->pipe_bpp, dev_priv->vbt.edp_bpp);
2384 dev_priv->vbt.edp_bpp = pipe_config->pipe_bpp;
2388 static void intel_disable_dp(struct intel_encoder *encoder)
2390 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2391 struct drm_device *dev = encoder->base.dev;
2392 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2394 if (crtc->config->has_audio)
2395 intel_audio_codec_disable(encoder);
2397 if (HAS_PSR(dev) && !HAS_DDI(dev))
2398 intel_psr_disable(intel_dp);
2400 /* Make sure the panel is off before trying to change the mode. But also
2401 * ensure that we have vdd while we switch off the panel. */
2402 intel_edp_panel_vdd_on(intel_dp);
2403 intel_edp_backlight_off(intel_dp);
2404 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
2405 intel_edp_panel_off(intel_dp);
2407 /* disable the port before the pipe on g4x */
2408 if (INTEL_INFO(dev)->gen < 5)
2409 intel_dp_link_down(intel_dp);
2412 static void ilk_post_disable_dp(struct intel_encoder *encoder)
2414 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2415 enum port port = dp_to_dig_port(intel_dp)->port;
2417 intel_dp_link_down(intel_dp);
2419 ironlake_edp_pll_off(intel_dp);
2422 static void vlv_post_disable_dp(struct intel_encoder *encoder)
2424 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2426 intel_dp_link_down(intel_dp);
2429 static void chv_data_lane_soft_reset(struct intel_encoder *encoder,
2432 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
2433 enum dpio_channel ch = vlv_dport_to_channel(enc_to_dig_port(&encoder->base));
2434 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2435 enum pipe pipe = crtc->pipe;
2438 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
2440 val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2442 val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET;
2443 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
2445 if (crtc->config->lane_count > 2) {
2446 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
2448 val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2450 val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET;
2451 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
2454 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
2455 val |= CHV_PCS_REQ_SOFTRESET_EN;
2457 val &= ~DPIO_PCS_CLK_SOFT_RESET;
2459 val |= DPIO_PCS_CLK_SOFT_RESET;
2460 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
2462 if (crtc->config->lane_count > 2) {
2463 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
2464 val |= CHV_PCS_REQ_SOFTRESET_EN;
2466 val &= ~DPIO_PCS_CLK_SOFT_RESET;
2468 val |= DPIO_PCS_CLK_SOFT_RESET;
2469 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
2473 static void chv_post_disable_dp(struct intel_encoder *encoder)
2475 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2476 struct drm_device *dev = encoder->base.dev;
2477 struct drm_i915_private *dev_priv = dev->dev_private;
2479 intel_dp_link_down(intel_dp);
2481 mutex_lock(&dev_priv->sb_lock);
2483 /* Assert data lane reset */
2484 chv_data_lane_soft_reset(encoder, true);
2486 mutex_unlock(&dev_priv->sb_lock);
2490 _intel_dp_set_link_train(struct intel_dp *intel_dp,
2492 uint8_t dp_train_pat)
2494 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2495 struct drm_device *dev = intel_dig_port->base.base.dev;
2496 struct drm_i915_private *dev_priv = dev->dev_private;
2497 enum port port = intel_dig_port->port;
2500 uint32_t temp = I915_READ(DP_TP_CTL(port));
2502 if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
2503 temp |= DP_TP_CTL_SCRAMBLE_DISABLE;
2505 temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE;
2507 temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
2508 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2509 case DP_TRAINING_PATTERN_DISABLE:
2510 temp |= DP_TP_CTL_LINK_TRAIN_NORMAL;
2513 case DP_TRAINING_PATTERN_1:
2514 temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
2516 case DP_TRAINING_PATTERN_2:
2517 temp |= DP_TP_CTL_LINK_TRAIN_PAT2;
2519 case DP_TRAINING_PATTERN_3:
2520 temp |= DP_TP_CTL_LINK_TRAIN_PAT3;
2523 I915_WRITE(DP_TP_CTL(port), temp);
2525 } else if ((IS_GEN7(dev) && port == PORT_A) ||
2526 (HAS_PCH_CPT(dev) && port != PORT_A)) {
2527 *DP &= ~DP_LINK_TRAIN_MASK_CPT;
2529 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2530 case DP_TRAINING_PATTERN_DISABLE:
2531 *DP |= DP_LINK_TRAIN_OFF_CPT;
2533 case DP_TRAINING_PATTERN_1:
2534 *DP |= DP_LINK_TRAIN_PAT_1_CPT;
2536 case DP_TRAINING_PATTERN_2:
2537 *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2539 case DP_TRAINING_PATTERN_3:
2540 DRM_ERROR("DP training pattern 3 not supported\n");
2541 *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2546 if (IS_CHERRYVIEW(dev))
2547 *DP &= ~DP_LINK_TRAIN_MASK_CHV;
2549 *DP &= ~DP_LINK_TRAIN_MASK;
2551 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2552 case DP_TRAINING_PATTERN_DISABLE:
2553 *DP |= DP_LINK_TRAIN_OFF;
2555 case DP_TRAINING_PATTERN_1:
2556 *DP |= DP_LINK_TRAIN_PAT_1;
2558 case DP_TRAINING_PATTERN_2:
2559 *DP |= DP_LINK_TRAIN_PAT_2;
2561 case DP_TRAINING_PATTERN_3:
2562 if (IS_CHERRYVIEW(dev)) {
2563 *DP |= DP_LINK_TRAIN_PAT_3_CHV;
2565 DRM_ERROR("DP training pattern 3 not supported\n");
2566 *DP |= DP_LINK_TRAIN_PAT_2;
2573 static void intel_dp_enable_port(struct intel_dp *intel_dp)
2575 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2576 struct drm_i915_private *dev_priv = dev->dev_private;
2578 /* enable with pattern 1 (as per spec) */
2579 _intel_dp_set_link_train(intel_dp, &intel_dp->DP,
2580 DP_TRAINING_PATTERN_1);
2582 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2583 POSTING_READ(intel_dp->output_reg);
2586 * Magic for VLV/CHV. We _must_ first set up the register
2587 * without actually enabling the port, and then do another
2588 * write to enable the port. Otherwise link training will
2589 * fail when the power sequencer is freshly used for this port.
2591 intel_dp->DP |= DP_PORT_EN;
2593 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2594 POSTING_READ(intel_dp->output_reg);
2597 static void intel_enable_dp(struct intel_encoder *encoder)
2599 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2600 struct drm_device *dev = encoder->base.dev;
2601 struct drm_i915_private *dev_priv = dev->dev_private;
2602 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2603 uint32_t dp_reg = I915_READ(intel_dp->output_reg);
2605 if (WARN_ON(dp_reg & DP_PORT_EN))
2610 if (IS_VALLEYVIEW(dev))
2611 vlv_init_panel_power_sequencer(intel_dp);
2613 intel_dp_enable_port(intel_dp);
2615 edp_panel_vdd_on(intel_dp);
2616 edp_panel_on(intel_dp);
2617 edp_panel_vdd_off(intel_dp, true);
2619 pps_unlock(intel_dp);
2621 if (IS_VALLEYVIEW(dev)) {
2622 unsigned int lane_mask = 0x0;
2624 if (IS_CHERRYVIEW(dev))
2625 lane_mask = intel_dp_unused_lane_mask(crtc->config->lane_count);
2627 vlv_wait_port_ready(dev_priv, dp_to_dig_port(intel_dp),
2631 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
2632 intel_dp_start_link_train(intel_dp);
2633 intel_dp_complete_link_train(intel_dp);
2634 intel_dp_stop_link_train(intel_dp);
2636 if (crtc->config->has_audio) {
2637 DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
2638 pipe_name(crtc->pipe));
2639 intel_audio_codec_enable(encoder);
2643 static void g4x_enable_dp(struct intel_encoder *encoder)
2645 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2647 intel_enable_dp(encoder);
2648 intel_edp_backlight_on(intel_dp);
2651 static void vlv_enable_dp(struct intel_encoder *encoder)
2653 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2655 intel_edp_backlight_on(intel_dp);
2656 intel_psr_enable(intel_dp);
2659 static void g4x_pre_enable_dp(struct intel_encoder *encoder)
2661 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2662 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2664 intel_dp_prepare(encoder);
2666 /* Only ilk+ has port A */
2667 if (dport->port == PORT_A) {
2668 ironlake_set_pll_cpu_edp(intel_dp);
2669 ironlake_edp_pll_on(intel_dp);
2673 static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
2675 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2676 struct drm_i915_private *dev_priv = intel_dig_port->base.base.dev->dev_private;
2677 enum pipe pipe = intel_dp->pps_pipe;
2678 int pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
2680 edp_panel_vdd_off_sync(intel_dp);
2683 * VLV seems to get confused when multiple power seqeuencers
2684 * have the same port selected (even if only one has power/vdd
2685 * enabled). The failure manifests as vlv_wait_port_ready() failing
2686 * CHV on the other hand doesn't seem to mind having the same port
2687 * selected in multiple power seqeuencers, but let's clear the
2688 * port select always when logically disconnecting a power sequencer
2691 DRM_DEBUG_KMS("detaching pipe %c power sequencer from port %c\n",
2692 pipe_name(pipe), port_name(intel_dig_port->port));
2693 I915_WRITE(pp_on_reg, 0);
2694 POSTING_READ(pp_on_reg);
2696 intel_dp->pps_pipe = INVALID_PIPE;
2699 static void vlv_steal_power_sequencer(struct drm_device *dev,
2702 struct drm_i915_private *dev_priv = dev->dev_private;
2703 struct intel_encoder *encoder;
2705 lockdep_assert_held(&dev_priv->pps_mutex);
2707 if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
2710 list_for_each_entry(encoder, &dev->mode_config.encoder_list,
2712 struct intel_dp *intel_dp;
2715 if (encoder->type != INTEL_OUTPUT_EDP)
2718 intel_dp = enc_to_intel_dp(&encoder->base);
2719 port = dp_to_dig_port(intel_dp)->port;
2721 if (intel_dp->pps_pipe != pipe)
2724 DRM_DEBUG_KMS("stealing pipe %c power sequencer from port %c\n",
2725 pipe_name(pipe), port_name(port));
2727 WARN(encoder->base.crtc,
2728 "stealing pipe %c power sequencer from active eDP port %c\n",
2729 pipe_name(pipe), port_name(port));
2731 /* make sure vdd is off before we steal it */
2732 vlv_detach_power_sequencer(intel_dp);
2736 static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp)
2738 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2739 struct intel_encoder *encoder = &intel_dig_port->base;
2740 struct drm_device *dev = encoder->base.dev;
2741 struct drm_i915_private *dev_priv = dev->dev_private;
2742 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2744 lockdep_assert_held(&dev_priv->pps_mutex);
2746 if (!is_edp(intel_dp))
2749 if (intel_dp->pps_pipe == crtc->pipe)
2753 * If another power sequencer was being used on this
2754 * port previously make sure to turn off vdd there while
2755 * we still have control of it.
2757 if (intel_dp->pps_pipe != INVALID_PIPE)
2758 vlv_detach_power_sequencer(intel_dp);
2761 * We may be stealing the power
2762 * sequencer from another port.
2764 vlv_steal_power_sequencer(dev, crtc->pipe);
2766 /* now it's all ours */
2767 intel_dp->pps_pipe = crtc->pipe;
2769 DRM_DEBUG_KMS("initializing pipe %c power sequencer for port %c\n",
2770 pipe_name(intel_dp->pps_pipe), port_name(intel_dig_port->port));
2772 /* init power sequencer on this pipe and port */
2773 intel_dp_init_panel_power_sequencer(dev, intel_dp);
2774 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
2777 static void vlv_pre_enable_dp(struct intel_encoder *encoder)
2779 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2780 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2781 struct drm_device *dev = encoder->base.dev;
2782 struct drm_i915_private *dev_priv = dev->dev_private;
2783 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
2784 enum dpio_channel port = vlv_dport_to_channel(dport);
2785 int pipe = intel_crtc->pipe;
2788 mutex_lock(&dev_priv->sb_lock);
2790 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(port));
2797 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW8(port), val);
2798 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW14(port), 0x00760018);
2799 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW23(port), 0x00400888);
2801 mutex_unlock(&dev_priv->sb_lock);
2803 intel_enable_dp(encoder);
2806 static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder)
2808 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
2809 struct drm_device *dev = encoder->base.dev;
2810 struct drm_i915_private *dev_priv = dev->dev_private;
2811 struct intel_crtc *intel_crtc =
2812 to_intel_crtc(encoder->base.crtc);
2813 enum dpio_channel port = vlv_dport_to_channel(dport);
2814 int pipe = intel_crtc->pipe;
2816 intel_dp_prepare(encoder);
2818 /* Program Tx lane resets to default */
2819 mutex_lock(&dev_priv->sb_lock);
2820 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port),
2821 DPIO_PCS_TX_LANE2_RESET |
2822 DPIO_PCS_TX_LANE1_RESET);
2823 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port),
2824 DPIO_PCS_CLK_CRI_RXEB_EIOS_EN |
2825 DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN |
2826 (1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) |
2827 DPIO_PCS_CLK_SOFT_RESET);
2829 /* Fix up inter-pair skew failure */
2830 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW12(port), 0x00750f00);
2831 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW11(port), 0x00001500);
2832 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW14(port), 0x40400000);
2833 mutex_unlock(&dev_priv->sb_lock);
2836 static void chv_pre_enable_dp(struct intel_encoder *encoder)
2838 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2839 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2840 struct drm_device *dev = encoder->base.dev;
2841 struct drm_i915_private *dev_priv = dev->dev_private;
2842 struct intel_crtc *intel_crtc =
2843 to_intel_crtc(encoder->base.crtc);
2844 enum dpio_channel ch = vlv_dport_to_channel(dport);
2845 int pipe = intel_crtc->pipe;
2846 int data, i, stagger;
2849 mutex_lock(&dev_priv->sb_lock);
2851 /* allow hardware to manage TX FIFO reset source */
2852 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
2853 val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
2854 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
2856 if (intel_crtc->config->lane_count > 2) {
2857 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
2858 val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
2859 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
2862 /* Program Tx lane latency optimal setting*/
2863 for (i = 0; i < intel_crtc->config->lane_count; i++) {
2864 /* Set the upar bit */
2865 if (intel_crtc->config->lane_count == 1)
2868 data = (i == 1) ? 0x0 : 0x1;
2869 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW14(ch, i),
2870 data << DPIO_UPAR_SHIFT);
2873 /* Data lane stagger programming */
2874 if (intel_crtc->config->port_clock > 270000)
2876 else if (intel_crtc->config->port_clock > 135000)
2878 else if (intel_crtc->config->port_clock > 67500)
2880 else if (intel_crtc->config->port_clock > 33750)
2885 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
2886 val |= DPIO_TX2_STAGGER_MASK(0x1f);
2887 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
2889 if (intel_crtc->config->lane_count > 2) {
2890 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
2891 val |= DPIO_TX2_STAGGER_MASK(0x1f);
2892 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
2895 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW12(ch),
2896 DPIO_LANESTAGGER_STRAP(stagger) |
2897 DPIO_LANESTAGGER_STRAP_OVRD |
2898 DPIO_TX1_STAGGER_MASK(0x1f) |
2899 DPIO_TX1_STAGGER_MULT(6) |
2900 DPIO_TX2_STAGGER_MULT(0));
2902 if (intel_crtc->config->lane_count > 2) {
2903 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW12(ch),
2904 DPIO_LANESTAGGER_STRAP(stagger) |
2905 DPIO_LANESTAGGER_STRAP_OVRD |
2906 DPIO_TX1_STAGGER_MASK(0x1f) |
2907 DPIO_TX1_STAGGER_MULT(7) |
2908 DPIO_TX2_STAGGER_MULT(5));
2911 /* Deassert data lane reset */
2912 chv_data_lane_soft_reset(encoder, false);
2914 mutex_unlock(&dev_priv->sb_lock);
2916 intel_enable_dp(encoder);
2918 /* Second common lane will stay alive on its own now */
2919 if (dport->release_cl2_override) {
2920 chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, false);
2921 dport->release_cl2_override = false;
2925 static void chv_dp_pre_pll_enable(struct intel_encoder *encoder)
2927 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
2928 struct drm_device *dev = encoder->base.dev;
2929 struct drm_i915_private *dev_priv = dev->dev_private;
2930 struct intel_crtc *intel_crtc =
2931 to_intel_crtc(encoder->base.crtc);
2932 enum dpio_channel ch = vlv_dport_to_channel(dport);
2933 enum pipe pipe = intel_crtc->pipe;
2934 unsigned int lane_mask =
2935 intel_dp_unused_lane_mask(intel_crtc->config->lane_count);
2938 intel_dp_prepare(encoder);
2941 * Must trick the second common lane into life.
2942 * Otherwise we can't even access the PLL.
2944 if (ch == DPIO_CH0 && pipe == PIPE_B)
2945 dport->release_cl2_override =
2946 !chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, true);
2948 chv_phy_powergate_lanes(encoder, true, lane_mask);
2950 mutex_lock(&dev_priv->sb_lock);
2952 /* Assert data lane reset */
2953 chv_data_lane_soft_reset(encoder, true);
2955 /* program left/right clock distribution */
2956 if (pipe != PIPE_B) {
2957 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
2958 val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
2960 val |= CHV_BUFLEFTENA1_FORCE;
2962 val |= CHV_BUFRIGHTENA1_FORCE;
2963 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
2965 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
2966 val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
2968 val |= CHV_BUFLEFTENA2_FORCE;
2970 val |= CHV_BUFRIGHTENA2_FORCE;
2971 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
2974 /* program clock channel usage */
2975 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(ch));
2976 val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
2978 val &= ~CHV_PCS_USEDCLKCHANNEL;
2980 val |= CHV_PCS_USEDCLKCHANNEL;
2981 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW8(ch), val);
2983 if (intel_crtc->config->lane_count > 2) {
2984 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW8(ch));
2985 val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
2987 val &= ~CHV_PCS_USEDCLKCHANNEL;
2989 val |= CHV_PCS_USEDCLKCHANNEL;
2990 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW8(ch), val);
2994 * This a a bit weird since generally CL
2995 * matches the pipe, but here we need to
2996 * pick the CL based on the port.
2998 val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW19(ch));
3000 val &= ~CHV_CMN_USEDCLKCHANNEL;
3002 val |= CHV_CMN_USEDCLKCHANNEL;
3003 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW19(ch), val);
3005 mutex_unlock(&dev_priv->sb_lock);
3008 static void chv_dp_post_pll_disable(struct intel_encoder *encoder)
3010 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
3011 enum pipe pipe = to_intel_crtc(encoder->base.crtc)->pipe;
3014 mutex_lock(&dev_priv->sb_lock);
3016 /* disable left/right clock distribution */
3017 if (pipe != PIPE_B) {
3018 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
3019 val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
3020 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
3022 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
3023 val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
3024 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
3027 mutex_unlock(&dev_priv->sb_lock);
3030 * Leave the power down bit cleared for at least one
3031 * lane so that chv_powergate_phy_ch() will power
3032 * on something when the channel is otherwise unused.
3033 * When the port is off and the override is removed
3034 * the lanes power down anyway, so otherwise it doesn't
3035 * really matter what the state of power down bits is
3038 chv_phy_powergate_lanes(encoder, false, 0x0);
3042 * Native read with retry for link status and receiver capability reads for
3043 * cases where the sink may still be asleep.
3045 * Sinks are *supposed* to come up within 1ms from an off state, but we're also
3046 * supposed to retry 3 times per the spec.
3049 intel_dp_dpcd_read_wake(struct drm_dp_aux *aux, unsigned int offset,
3050 void *buffer, size_t size)
3056 * Sometime we just get the same incorrect byte repeated
3057 * over the entire buffer. Doing just one throw away read
3058 * initially seems to "solve" it.
3060 drm_dp_dpcd_read(aux, DP_DPCD_REV, buffer, 1);
3062 for (i = 0; i < 3; i++) {
3063 ret = drm_dp_dpcd_read(aux, offset, buffer, size);
3073 * Fetch AUX CH registers 0x202 - 0x207 which contain
3074 * link status information
3077 intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
3079 return intel_dp_dpcd_read_wake(&intel_dp->aux,
3082 DP_LINK_STATUS_SIZE) == DP_LINK_STATUS_SIZE;
3085 /* These are source-specific values. */
3087 intel_dp_voltage_max(struct intel_dp *intel_dp)
3089 struct drm_device *dev = intel_dp_to_dev(intel_dp);
3090 struct drm_i915_private *dev_priv = dev->dev_private;
3091 enum port port = dp_to_dig_port(intel_dp)->port;
3093 if (IS_BROXTON(dev))
3094 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3095 else if (INTEL_INFO(dev)->gen >= 9) {
3096 if (dev_priv->edp_low_vswing && port == PORT_A)
3097 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3098 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
3099 } else if (IS_VALLEYVIEW(dev))
3100 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3101 else if (IS_GEN7(dev) && port == PORT_A)
3102 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
3103 else if (HAS_PCH_CPT(dev) && port != PORT_A)
3104 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3106 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
3110 intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
3112 struct drm_device *dev = intel_dp_to_dev(intel_dp);
3113 enum port port = dp_to_dig_port(intel_dp)->port;
3115 if (INTEL_INFO(dev)->gen >= 9) {
3116 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3117 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3118 return DP_TRAIN_PRE_EMPH_LEVEL_3;
3119 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3120 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3121 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3122 return DP_TRAIN_PRE_EMPH_LEVEL_1;
3123 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3124 return DP_TRAIN_PRE_EMPH_LEVEL_0;
3126 return DP_TRAIN_PRE_EMPH_LEVEL_0;
3128 } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
3129 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3130 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3131 return DP_TRAIN_PRE_EMPH_LEVEL_3;
3132 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3133 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3134 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3135 return DP_TRAIN_PRE_EMPH_LEVEL_1;
3136 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3138 return DP_TRAIN_PRE_EMPH_LEVEL_0;
3140 } else if (IS_VALLEYVIEW(dev)) {
3141 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3142 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3143 return DP_TRAIN_PRE_EMPH_LEVEL_3;
3144 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3145 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3146 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3147 return DP_TRAIN_PRE_EMPH_LEVEL_1;
3148 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3150 return DP_TRAIN_PRE_EMPH_LEVEL_0;
3152 } else if (IS_GEN7(dev) && port == PORT_A) {
3153 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3154 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3155 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3156 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3157 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3158 return DP_TRAIN_PRE_EMPH_LEVEL_1;
3160 return DP_TRAIN_PRE_EMPH_LEVEL_0;
3163 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3164 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3165 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3166 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3167 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3168 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3169 return DP_TRAIN_PRE_EMPH_LEVEL_1;
3170 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3172 return DP_TRAIN_PRE_EMPH_LEVEL_0;
3177 static uint32_t vlv_signal_levels(struct intel_dp *intel_dp)
3179 struct drm_device *dev = intel_dp_to_dev(intel_dp);
3180 struct drm_i915_private *dev_priv = dev->dev_private;
3181 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
3182 struct intel_crtc *intel_crtc =
3183 to_intel_crtc(dport->base.base.crtc);
3184 unsigned long demph_reg_value, preemph_reg_value,
3185 uniqtranscale_reg_value;
3186 uint8_t train_set = intel_dp->train_set[0];
3187 enum dpio_channel port = vlv_dport_to_channel(dport);
3188 int pipe = intel_crtc->pipe;
3190 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3191 case DP_TRAIN_PRE_EMPH_LEVEL_0:
3192 preemph_reg_value = 0x0004000;
3193 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3194 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3195 demph_reg_value = 0x2B405555;
3196 uniqtranscale_reg_value = 0x552AB83A;
3198 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3199 demph_reg_value = 0x2B404040;
3200 uniqtranscale_reg_value = 0x5548B83A;
3202 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3203 demph_reg_value = 0x2B245555;
3204 uniqtranscale_reg_value = 0x5560B83A;
3206 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3207 demph_reg_value = 0x2B405555;
3208 uniqtranscale_reg_value = 0x5598DA3A;
3214 case DP_TRAIN_PRE_EMPH_LEVEL_1:
3215 preemph_reg_value = 0x0002000;
3216 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3217 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3218 demph_reg_value = 0x2B404040;
3219 uniqtranscale_reg_value = 0x5552B83A;
3221 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3222 demph_reg_value = 0x2B404848;
3223 uniqtranscale_reg_value = 0x5580B83A;
3225 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3226 demph_reg_value = 0x2B404040;
3227 uniqtranscale_reg_value = 0x55ADDA3A;
3233 case DP_TRAIN_PRE_EMPH_LEVEL_2:
3234 preemph_reg_value = 0x0000000;
3235 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3236 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3237 demph_reg_value = 0x2B305555;
3238 uniqtranscale_reg_value = 0x5570B83A;
3240 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3241 demph_reg_value = 0x2B2B4040;
3242 uniqtranscale_reg_value = 0x55ADDA3A;
3248 case DP_TRAIN_PRE_EMPH_LEVEL_3:
3249 preemph_reg_value = 0x0006000;
3250 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3251 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3252 demph_reg_value = 0x1B405555;
3253 uniqtranscale_reg_value = 0x55ADDA3A;
3263 mutex_lock(&dev_priv->sb_lock);
3264 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x00000000);
3265 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW4(port), demph_reg_value);
3266 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW2(port),
3267 uniqtranscale_reg_value);
3268 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW3(port), 0x0C782040);
3269 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW11(port), 0x00030000);
3270 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW9(port), preemph_reg_value);
3271 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x80000000);
3272 mutex_unlock(&dev_priv->sb_lock);
3277 static bool chv_need_uniq_trans_scale(uint8_t train_set)
3279 return (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) == DP_TRAIN_PRE_EMPH_LEVEL_0 &&
3280 (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) == DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3283 static uint32_t chv_signal_levels(struct intel_dp *intel_dp)
3285 struct drm_device *dev = intel_dp_to_dev(intel_dp);
3286 struct drm_i915_private *dev_priv = dev->dev_private;
3287 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
3288 struct intel_crtc *intel_crtc = to_intel_crtc(dport->base.base.crtc);
3289 u32 deemph_reg_value, margin_reg_value, val;
3290 uint8_t train_set = intel_dp->train_set[0];
3291 enum dpio_channel ch = vlv_dport_to_channel(dport);
3292 enum pipe pipe = intel_crtc->pipe;
3295 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3296 case DP_TRAIN_PRE_EMPH_LEVEL_0:
3297 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3298 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3299 deemph_reg_value = 128;
3300 margin_reg_value = 52;
3302 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3303 deemph_reg_value = 128;
3304 margin_reg_value = 77;
3306 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3307 deemph_reg_value = 128;
3308 margin_reg_value = 102;
3310 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3311 deemph_reg_value = 128;
3312 margin_reg_value = 154;
3313 /* FIXME extra to set for 1200 */
3319 case DP_TRAIN_PRE_EMPH_LEVEL_1:
3320 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3321 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3322 deemph_reg_value = 85;
3323 margin_reg_value = 78;
3325 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3326 deemph_reg_value = 85;
3327 margin_reg_value = 116;
3329 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3330 deemph_reg_value = 85;
3331 margin_reg_value = 154;
3337 case DP_TRAIN_PRE_EMPH_LEVEL_2:
3338 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3339 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3340 deemph_reg_value = 64;
3341 margin_reg_value = 104;
3343 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3344 deemph_reg_value = 64;
3345 margin_reg_value = 154;
3351 case DP_TRAIN_PRE_EMPH_LEVEL_3:
3352 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3353 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3354 deemph_reg_value = 43;
3355 margin_reg_value = 154;
3365 mutex_lock(&dev_priv->sb_lock);
3367 /* Clear calc init */
3368 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
3369 val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
3370 val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
3371 val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
3372 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
3374 if (intel_crtc->config->lane_count > 2) {
3375 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
3376 val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
3377 val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
3378 val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
3379 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
3382 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW9(ch));
3383 val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
3384 val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
3385 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW9(ch), val);
3387 if (intel_crtc->config->lane_count > 2) {
3388 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW9(ch));
3389 val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
3390 val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
3391 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW9(ch), val);
3394 /* Program swing deemph */
3395 for (i = 0; i < intel_crtc->config->lane_count; i++) {
3396 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW4(ch, i));
3397 val &= ~DPIO_SWING_DEEMPH9P5_MASK;
3398 val |= deemph_reg_value << DPIO_SWING_DEEMPH9P5_SHIFT;
3399 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW4(ch, i), val);
3402 /* Program swing margin */
3403 for (i = 0; i < intel_crtc->config->lane_count; i++) {
3404 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
3406 val &= ~DPIO_SWING_MARGIN000_MASK;
3407 val |= margin_reg_value << DPIO_SWING_MARGIN000_SHIFT;
3410 * Supposedly this value shouldn't matter when unique transition
3411 * scale is disabled, but in fact it does matter. Let's just
3412 * always program the same value and hope it's OK.
3414 val &= ~(0xff << DPIO_UNIQ_TRANS_SCALE_SHIFT);
3415 val |= 0x9a << DPIO_UNIQ_TRANS_SCALE_SHIFT;
3417 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
3421 * The document said it needs to set bit 27 for ch0 and bit 26
3422 * for ch1. Might be a typo in the doc.
3423 * For now, for this unique transition scale selection, set bit
3424 * 27 for ch0 and ch1.
3426 for (i = 0; i < intel_crtc->config->lane_count; i++) {
3427 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
3428 if (chv_need_uniq_trans_scale(train_set))
3429 val |= DPIO_TX_UNIQ_TRANS_SCALE_EN;
3431 val &= ~DPIO_TX_UNIQ_TRANS_SCALE_EN;
3432 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
3435 /* Start swing calculation */
3436 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
3437 val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
3438 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
3440 if (intel_crtc->config->lane_count > 2) {
3441 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
3442 val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
3443 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
3447 val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW30);
3448 val |= DPIO_LRC_BYPASS;
3449 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW30, val);
3451 mutex_unlock(&dev_priv->sb_lock);
3457 intel_get_adjust_train(struct intel_dp *intel_dp,
3458 const uint8_t link_status[DP_LINK_STATUS_SIZE])
3463 uint8_t voltage_max;
3464 uint8_t preemph_max;
3466 for (lane = 0; lane < intel_dp->lane_count; lane++) {
3467 uint8_t this_v = drm_dp_get_adjust_request_voltage(link_status, lane);
3468 uint8_t this_p = drm_dp_get_adjust_request_pre_emphasis(link_status, lane);
3476 voltage_max = intel_dp_voltage_max(intel_dp);
3477 if (v >= voltage_max)
3478 v = voltage_max | DP_TRAIN_MAX_SWING_REACHED;
3480 preemph_max = intel_dp_pre_emphasis_max(intel_dp, v);
3481 if (p >= preemph_max)
3482 p = preemph_max | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
3484 for (lane = 0; lane < 4; lane++)
3485 intel_dp->train_set[lane] = v | p;
3489 gen4_signal_levels(uint8_t train_set)
3491 uint32_t signal_levels = 0;
3493 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3494 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3496 signal_levels |= DP_VOLTAGE_0_4;
3498 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3499 signal_levels |= DP_VOLTAGE_0_6;
3501 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3502 signal_levels |= DP_VOLTAGE_0_8;
3504 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3505 signal_levels |= DP_VOLTAGE_1_2;
3508 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3509 case DP_TRAIN_PRE_EMPH_LEVEL_0:
3511 signal_levels |= DP_PRE_EMPHASIS_0;
3513 case DP_TRAIN_PRE_EMPH_LEVEL_1:
3514 signal_levels |= DP_PRE_EMPHASIS_3_5;
3516 case DP_TRAIN_PRE_EMPH_LEVEL_2:
3517 signal_levels |= DP_PRE_EMPHASIS_6;
3519 case DP_TRAIN_PRE_EMPH_LEVEL_3:
3520 signal_levels |= DP_PRE_EMPHASIS_9_5;
3523 return signal_levels;
3526 /* Gen6's DP voltage swing and pre-emphasis control */
3528 gen6_edp_signal_levels(uint8_t train_set)
3530 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3531 DP_TRAIN_PRE_EMPHASIS_MASK);
3532 switch (signal_levels) {
3533 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3534 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3535 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
3536 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3537 return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B;
3538 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3539 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3540 return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B;
3541 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3542 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3543 return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B;
3544 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3545 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3546 return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B;
3548 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3549 "0x%x\n", signal_levels);
3550 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
3554 /* Gen7's DP voltage swing and pre-emphasis control */
3556 gen7_edp_signal_levels(uint8_t train_set)
3558 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3559 DP_TRAIN_PRE_EMPHASIS_MASK);
3560 switch (signal_levels) {
3561 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3562 return EDP_LINK_TRAIN_400MV_0DB_IVB;
3563 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3564 return EDP_LINK_TRAIN_400MV_3_5DB_IVB;
3565 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3566 return EDP_LINK_TRAIN_400MV_6DB_IVB;
3568 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3569 return EDP_LINK_TRAIN_600MV_0DB_IVB;
3570 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3571 return EDP_LINK_TRAIN_600MV_3_5DB_IVB;
3573 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3574 return EDP_LINK_TRAIN_800MV_0DB_IVB;
3575 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3576 return EDP_LINK_TRAIN_800MV_3_5DB_IVB;
3579 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3580 "0x%x\n", signal_levels);
3581 return EDP_LINK_TRAIN_500MV_0DB_IVB;
3585 /* Properly updates "DP" with the correct signal levels. */
3587 intel_dp_set_signal_levels(struct intel_dp *intel_dp, uint32_t *DP)
3589 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3590 enum port port = intel_dig_port->port;
3591 struct drm_device *dev = intel_dig_port->base.base.dev;
3592 uint32_t signal_levels, mask = 0;
3593 uint8_t train_set = intel_dp->train_set[0];
3596 signal_levels = ddi_signal_levels(intel_dp);
3598 if (IS_BROXTON(dev))
3601 mask = DDI_BUF_EMP_MASK;
3602 } else if (IS_CHERRYVIEW(dev)) {
3603 signal_levels = chv_signal_levels(intel_dp);
3604 } else if (IS_VALLEYVIEW(dev)) {
3605 signal_levels = vlv_signal_levels(intel_dp);
3606 } else if (IS_GEN7(dev) && port == PORT_A) {
3607 signal_levels = gen7_edp_signal_levels(train_set);
3608 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB;
3609 } else if (IS_GEN6(dev) && port == PORT_A) {
3610 signal_levels = gen6_edp_signal_levels(train_set);
3611 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_SNB;
3613 signal_levels = gen4_signal_levels(train_set);
3614 mask = DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK;
3618 DRM_DEBUG_KMS("Using signal levels %08x\n", signal_levels);
3620 DRM_DEBUG_KMS("Using vswing level %d\n",
3621 train_set & DP_TRAIN_VOLTAGE_SWING_MASK);
3622 DRM_DEBUG_KMS("Using pre-emphasis level %d\n",
3623 (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) >>
3624 DP_TRAIN_PRE_EMPHASIS_SHIFT);
3626 *DP = (*DP & ~mask) | signal_levels;
3630 intel_dp_set_link_train(struct intel_dp *intel_dp,
3632 uint8_t dp_train_pat)
3634 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3635 struct drm_i915_private *dev_priv =
3636 to_i915(intel_dig_port->base.base.dev);
3637 uint8_t buf[sizeof(intel_dp->train_set) + 1];
3640 _intel_dp_set_link_train(intel_dp, DP, dp_train_pat);
3642 I915_WRITE(intel_dp->output_reg, *DP);
3643 POSTING_READ(intel_dp->output_reg);
3645 buf[0] = dp_train_pat;
3646 if ((dp_train_pat & DP_TRAINING_PATTERN_MASK) ==
3647 DP_TRAINING_PATTERN_DISABLE) {
3648 /* don't write DP_TRAINING_LANEx_SET on disable */
3651 /* DP_TRAINING_LANEx_SET follow DP_TRAINING_PATTERN_SET */
3652 memcpy(buf + 1, intel_dp->train_set, intel_dp->lane_count);
3653 len = intel_dp->lane_count + 1;
3656 ret = drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_PATTERN_SET,
3663 intel_dp_reset_link_train(struct intel_dp *intel_dp, uint32_t *DP,
3664 uint8_t dp_train_pat)
3666 if (!intel_dp->train_set_valid)
3667 memset(intel_dp->train_set, 0, sizeof(intel_dp->train_set));
3668 intel_dp_set_signal_levels(intel_dp, DP);
3669 return intel_dp_set_link_train(intel_dp, DP, dp_train_pat);
3673 intel_dp_update_link_train(struct intel_dp *intel_dp, uint32_t *DP,
3674 const uint8_t link_status[DP_LINK_STATUS_SIZE])
3676 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3677 struct drm_i915_private *dev_priv =
3678 to_i915(intel_dig_port->base.base.dev);
3681 intel_get_adjust_train(intel_dp, link_status);
3682 intel_dp_set_signal_levels(intel_dp, DP);
3684 I915_WRITE(intel_dp->output_reg, *DP);
3685 POSTING_READ(intel_dp->output_reg);
3687 ret = drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_LANE0_SET,
3688 intel_dp->train_set, intel_dp->lane_count);
3690 return ret == intel_dp->lane_count;
3693 static void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
3695 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3696 struct drm_device *dev = intel_dig_port->base.base.dev;
3697 struct drm_i915_private *dev_priv = dev->dev_private;
3698 enum port port = intel_dig_port->port;
3704 val = I915_READ(DP_TP_CTL(port));
3705 val &= ~DP_TP_CTL_LINK_TRAIN_MASK;
3706 val |= DP_TP_CTL_LINK_TRAIN_IDLE;
3707 I915_WRITE(DP_TP_CTL(port), val);
3710 * On PORT_A we can have only eDP in SST mode. There the only reason
3711 * we need to set idle transmission mode is to work around a HW issue
3712 * where we enable the pipe while not in idle link-training mode.
3713 * In this case there is requirement to wait for a minimum number of
3714 * idle patterns to be sent.
3719 if (wait_for((I915_READ(DP_TP_STATUS(port)) & DP_TP_STATUS_IDLE_DONE),
3721 DRM_ERROR("Timed out waiting for DP idle patterns\n");
3724 /* Enable corresponding port and start training pattern 1 */
3726 intel_dp_start_link_train(struct intel_dp *intel_dp)
3728 struct drm_encoder *encoder = &dp_to_dig_port(intel_dp)->base.base;
3729 struct drm_device *dev = encoder->dev;
3732 int voltage_tries, loop_tries;
3733 uint32_t DP = intel_dp->DP;
3734 uint8_t link_config[2];
3735 uint8_t link_bw, rate_select;
3738 intel_ddi_prepare_link_retrain(encoder);
3740 intel_dp_compute_rate(intel_dp, intel_dp->link_rate,
3741 &link_bw, &rate_select);
3743 /* Write the link configuration data */
3744 link_config[0] = link_bw;
3745 link_config[1] = intel_dp->lane_count;
3746 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
3747 link_config[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
3748 drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_BW_SET, link_config, 2);
3749 if (intel_dp->num_sink_rates)
3750 drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_RATE_SET,
3754 link_config[1] = DP_SET_ANSI_8B10B;
3755 drm_dp_dpcd_write(&intel_dp->aux, DP_DOWNSPREAD_CTRL, link_config, 2);
3759 /* clock recovery */
3760 if (!intel_dp_reset_link_train(intel_dp, &DP,
3761 DP_TRAINING_PATTERN_1 |
3762 DP_LINK_SCRAMBLING_DISABLE)) {
3763 DRM_ERROR("failed to enable link training\n");
3771 uint8_t link_status[DP_LINK_STATUS_SIZE];
3773 drm_dp_link_train_clock_recovery_delay(intel_dp->dpcd);
3774 if (!intel_dp_get_link_status(intel_dp, link_status)) {
3775 DRM_ERROR("failed to get link status\n");
3779 if (drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
3780 DRM_DEBUG_KMS("clock recovery OK\n");
3785 * if we used previously trained voltage and pre-emphasis values
3786 * and we don't get clock recovery, reset link training values
3788 if (intel_dp->train_set_valid) {
3789 DRM_DEBUG_KMS("clock recovery not ok, reset");
3790 /* clear the flag as we are not reusing train set */
3791 intel_dp->train_set_valid = false;
3792 if (!intel_dp_reset_link_train(intel_dp, &DP,
3793 DP_TRAINING_PATTERN_1 |
3794 DP_LINK_SCRAMBLING_DISABLE)) {
3795 DRM_ERROR("failed to enable link training\n");
3801 /* Check to see if we've tried the max voltage */
3802 for (i = 0; i < intel_dp->lane_count; i++)
3803 if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
3805 if (i == intel_dp->lane_count) {
3807 if (loop_tries == 5) {
3808 DRM_ERROR("too many full retries, give up\n");
3811 intel_dp_reset_link_train(intel_dp, &DP,
3812 DP_TRAINING_PATTERN_1 |
3813 DP_LINK_SCRAMBLING_DISABLE);
3818 /* Check to see if we've tried the same voltage 5 times */
3819 if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) {
3821 if (voltage_tries == 5) {
3822 DRM_ERROR("too many voltage retries, give up\n");
3827 voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
3829 /* Update training set as requested by target */
3830 if (!intel_dp_update_link_train(intel_dp, &DP, link_status)) {
3831 DRM_ERROR("failed to update link training\n");
3840 intel_dp_complete_link_train(struct intel_dp *intel_dp)
3842 bool channel_eq = false;
3843 int tries, cr_tries;
3844 uint32_t DP = intel_dp->DP;
3845 uint32_t training_pattern = DP_TRAINING_PATTERN_2;
3847 /* Training Pattern 3 for HBR2 or 1.2 devices that support it*/
3848 if (intel_dp->link_rate == 540000 || intel_dp->use_tps3)
3849 training_pattern = DP_TRAINING_PATTERN_3;
3851 /* channel equalization */
3852 if (!intel_dp_set_link_train(intel_dp, &DP,
3854 DP_LINK_SCRAMBLING_DISABLE)) {
3855 DRM_ERROR("failed to start channel equalization\n");
3863 uint8_t link_status[DP_LINK_STATUS_SIZE];
3866 DRM_ERROR("failed to train DP, aborting\n");
3870 drm_dp_link_train_channel_eq_delay(intel_dp->dpcd);
3871 if (!intel_dp_get_link_status(intel_dp, link_status)) {
3872 DRM_ERROR("failed to get link status\n");
3876 /* Make sure clock is still ok */
3877 if (!drm_dp_clock_recovery_ok(link_status,
3878 intel_dp->lane_count)) {
3879 intel_dp->train_set_valid = false;
3880 intel_dp_start_link_train(intel_dp);
3881 intel_dp_set_link_train(intel_dp, &DP,
3883 DP_LINK_SCRAMBLING_DISABLE);
3888 if (drm_dp_channel_eq_ok(link_status,
3889 intel_dp->lane_count)) {
3894 /* Try 5 times, then try clock recovery if that fails */
3896 intel_dp->train_set_valid = false;
3897 intel_dp_start_link_train(intel_dp);
3898 intel_dp_set_link_train(intel_dp, &DP,
3900 DP_LINK_SCRAMBLING_DISABLE);
3906 /* Update training set as requested by target */
3907 if (!intel_dp_update_link_train(intel_dp, &DP, link_status)) {
3908 DRM_ERROR("failed to update link training\n");
3914 intel_dp_set_idle_link_train(intel_dp);
3919 intel_dp->train_set_valid = true;
3920 DRM_DEBUG_KMS("Channel EQ done. DP Training successful\n");
3924 void intel_dp_stop_link_train(struct intel_dp *intel_dp)
3926 intel_dp_set_link_train(intel_dp, &intel_dp->DP,
3927 DP_TRAINING_PATTERN_DISABLE);
3931 intel_dp_link_down(struct intel_dp *intel_dp)
3933 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3934 struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
3935 enum port port = intel_dig_port->port;
3936 struct drm_device *dev = intel_dig_port->base.base.dev;
3937 struct drm_i915_private *dev_priv = dev->dev_private;
3938 uint32_t DP = intel_dp->DP;
3940 if (WARN_ON(HAS_DDI(dev)))
3943 if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0))
3946 DRM_DEBUG_KMS("\n");
3948 if ((IS_GEN7(dev) && port == PORT_A) ||
3949 (HAS_PCH_CPT(dev) && port != PORT_A)) {
3950 DP &= ~DP_LINK_TRAIN_MASK_CPT;
3951 DP |= DP_LINK_TRAIN_PAT_IDLE_CPT;
3953 if (IS_CHERRYVIEW(dev))
3954 DP &= ~DP_LINK_TRAIN_MASK_CHV;
3956 DP &= ~DP_LINK_TRAIN_MASK;
3957 DP |= DP_LINK_TRAIN_PAT_IDLE;
3959 I915_WRITE(intel_dp->output_reg, DP);
3960 POSTING_READ(intel_dp->output_reg);
3962 DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
3963 I915_WRITE(intel_dp->output_reg, DP);
3964 POSTING_READ(intel_dp->output_reg);
3967 * HW workaround for IBX, we need to move the port
3968 * to transcoder A after disabling it to allow the
3969 * matching HDMI port to be enabled on transcoder A.
3971 if (HAS_PCH_IBX(dev) && crtc->pipe == PIPE_B && port != PORT_A) {
3972 /* always enable with pattern 1 (as per spec) */
3973 DP &= ~(DP_PIPEB_SELECT | DP_LINK_TRAIN_MASK);
3974 DP |= DP_PORT_EN | DP_LINK_TRAIN_PAT_1;
3975 I915_WRITE(intel_dp->output_reg, DP);
3976 POSTING_READ(intel_dp->output_reg);
3979 I915_WRITE(intel_dp->output_reg, DP);
3980 POSTING_READ(intel_dp->output_reg);
3983 msleep(intel_dp->panel_power_down_delay);
3987 intel_dp_get_dpcd(struct intel_dp *intel_dp)
3989 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3990 struct drm_device *dev = dig_port->base.base.dev;
3991 struct drm_i915_private *dev_priv = dev->dev_private;
3994 if (intel_dp_dpcd_read_wake(&intel_dp->aux, 0x000, intel_dp->dpcd,
3995 sizeof(intel_dp->dpcd)) < 0)
3996 return false; /* aux transfer failed */
3998 DRM_DEBUG_KMS("DPCD: %*ph\n", (int) sizeof(intel_dp->dpcd), intel_dp->dpcd);
4000 if (intel_dp->dpcd[DP_DPCD_REV] == 0)
4001 return false; /* DPCD not present */
4003 /* Check if the panel supports PSR */
4004 memset(intel_dp->psr_dpcd, 0, sizeof(intel_dp->psr_dpcd));
4005 if (is_edp(intel_dp)) {
4006 intel_dp_dpcd_read_wake(&intel_dp->aux, DP_PSR_SUPPORT,
4008 sizeof(intel_dp->psr_dpcd));
4009 if (intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED) {
4010 dev_priv->psr.sink_support = true;
4011 DRM_DEBUG_KMS("Detected EDP PSR Panel.\n");
4014 if (INTEL_INFO(dev)->gen >= 9 &&
4015 (intel_dp->psr_dpcd[0] & DP_PSR2_IS_SUPPORTED)) {
4016 uint8_t frame_sync_cap;
4018 dev_priv->psr.sink_support = true;
4019 intel_dp_dpcd_read_wake(&intel_dp->aux,
4020 DP_SINK_DEVICE_AUX_FRAME_SYNC_CAP,
4021 &frame_sync_cap, 1);
4022 dev_priv->psr.aux_frame_sync = frame_sync_cap ? true : false;
4023 /* PSR2 needs frame sync as well */
4024 dev_priv->psr.psr2_support = dev_priv->psr.aux_frame_sync;
4025 DRM_DEBUG_KMS("PSR2 %s on sink",
4026 dev_priv->psr.psr2_support ? "supported" : "not supported");
4030 /* Training Pattern 3 support, both source and sink */
4031 if (drm_dp_tps3_supported(intel_dp->dpcd) &&
4032 (IS_HASWELL(dev_priv) || INTEL_INFO(dev_priv)->gen >= 8)) {
4033 intel_dp->use_tps3 = true;
4034 DRM_DEBUG_KMS("Displayport TPS3 supported\n");
4036 intel_dp->use_tps3 = false;
4038 /* Intermediate frequency support */
4039 if (is_edp(intel_dp) &&
4040 (intel_dp->dpcd[DP_EDP_CONFIGURATION_CAP] & DP_DPCD_DISPLAY_CONTROL_CAPABLE) &&
4041 (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_EDP_DPCD_REV, &rev, 1) == 1) &&
4042 (rev >= 0x03)) { /* eDp v1.4 or higher */
4043 __le16 sink_rates[DP_MAX_SUPPORTED_RATES];
4046 intel_dp_dpcd_read_wake(&intel_dp->aux,
4047 DP_SUPPORTED_LINK_RATES,
4049 sizeof(sink_rates));
4051 for (i = 0; i < ARRAY_SIZE(sink_rates); i++) {
4052 int val = le16_to_cpu(sink_rates[i]);
4057 /* Value read is in kHz while drm clock is saved in deca-kHz */
4058 intel_dp->sink_rates[i] = (val * 200) / 10;
4060 intel_dp->num_sink_rates = i;
4063 intel_dp_print_rates(intel_dp);
4065 if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
4066 DP_DWN_STRM_PORT_PRESENT))
4067 return true; /* native DP sink */
4069 if (intel_dp->dpcd[DP_DPCD_REV] == 0x10)
4070 return true; /* no per-port downstream info */
4072 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_DOWNSTREAM_PORT_0,
4073 intel_dp->downstream_ports,
4074 DP_MAX_DOWNSTREAM_PORTS) < 0)
4075 return false; /* downstream port status fetch failed */
4081 intel_dp_probe_oui(struct intel_dp *intel_dp)
4085 if (!(intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT))
4088 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_OUI, buf, 3) == 3)
4089 DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n",
4090 buf[0], buf[1], buf[2]);
4092 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_BRANCH_OUI, buf, 3) == 3)
4093 DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n",
4094 buf[0], buf[1], buf[2]);
4098 intel_dp_probe_mst(struct intel_dp *intel_dp)
4102 if (!intel_dp->can_mst)
4105 if (intel_dp->dpcd[DP_DPCD_REV] < 0x12)
4108 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_MSTM_CAP, buf, 1)) {
4109 if (buf[0] & DP_MST_CAP) {
4110 DRM_DEBUG_KMS("Sink is MST capable\n");
4111 intel_dp->is_mst = true;
4113 DRM_DEBUG_KMS("Sink is not MST capable\n");
4114 intel_dp->is_mst = false;
4118 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
4119 return intel_dp->is_mst;
4122 static int intel_dp_sink_crc_stop(struct intel_dp *intel_dp)
4124 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
4125 struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
4129 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0) {
4130 DRM_DEBUG_KMS("Sink CRC couldn't be stopped properly\n");
4135 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
4136 buf & ~DP_TEST_SINK_START) < 0) {
4137 DRM_DEBUG_KMS("Sink CRC couldn't be stopped properly\n");
4142 intel_dp->sink_crc.started = false;
4144 hsw_enable_ips(intel_crtc);
4148 static int intel_dp_sink_crc_start(struct intel_dp *intel_dp)
4150 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
4151 struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
4155 if (intel_dp->sink_crc.started) {
4156 ret = intel_dp_sink_crc_stop(intel_dp);
4161 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0)
4164 if (!(buf & DP_TEST_CRC_SUPPORTED))
4167 intel_dp->sink_crc.last_count = buf & DP_TEST_COUNT_MASK;
4169 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0)
4172 hsw_disable_ips(intel_crtc);
4174 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
4175 buf | DP_TEST_SINK_START) < 0) {
4176 hsw_enable_ips(intel_crtc);
4180 intel_dp->sink_crc.started = true;
4184 int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc)
4186 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
4187 struct drm_device *dev = dig_port->base.base.dev;
4188 struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
4194 ret = intel_dp_sink_crc_start(intel_dp);
4199 intel_wait_for_vblank(dev, intel_crtc->pipe);
4201 if (drm_dp_dpcd_readb(&intel_dp->aux,
4202 DP_TEST_SINK_MISC, &buf) < 0) {
4206 count = buf & DP_TEST_COUNT_MASK;
4209 * Count might be reset during the loop. In this case
4210 * last known count needs to be reset as well.
4213 intel_dp->sink_crc.last_count = 0;
4215 if (drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_CRC_R_CR, crc, 6) < 0) {
4220 old_equal_new = (count == intel_dp->sink_crc.last_count &&
4221 !memcmp(intel_dp->sink_crc.last_crc, crc,
4224 } while (--attempts && (count == 0 || old_equal_new));
4226 intel_dp->sink_crc.last_count = buf & DP_TEST_COUNT_MASK;
4227 memcpy(intel_dp->sink_crc.last_crc, crc, 6 * sizeof(u8));
4229 if (attempts == 0) {
4230 if (old_equal_new) {
4231 DRM_DEBUG_KMS("Unreliable Sink CRC counter: Current returned CRC is identical to the previous one\n");
4233 DRM_ERROR("Panel is unable to calculate any CRC after 6 vblanks\n");
4240 intel_dp_sink_crc_stop(intel_dp);
4245 intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector)
4247 return intel_dp_dpcd_read_wake(&intel_dp->aux,
4248 DP_DEVICE_SERVICE_IRQ_VECTOR,
4249 sink_irq_vector, 1) == 1;
4253 intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector)
4257 ret = intel_dp_dpcd_read_wake(&intel_dp->aux,
4259 sink_irq_vector, 14);
4266 static uint8_t intel_dp_autotest_link_training(struct intel_dp *intel_dp)
4268 uint8_t test_result = DP_TEST_ACK;
4272 static uint8_t intel_dp_autotest_video_pattern(struct intel_dp *intel_dp)
4274 uint8_t test_result = DP_TEST_NAK;
4278 static uint8_t intel_dp_autotest_edid(struct intel_dp *intel_dp)
4280 uint8_t test_result = DP_TEST_NAK;
4281 struct intel_connector *intel_connector = intel_dp->attached_connector;
4282 struct drm_connector *connector = &intel_connector->base;
4284 if (intel_connector->detect_edid == NULL ||
4285 connector->edid_corrupt ||
4286 intel_dp->aux.i2c_defer_count > 6) {
4287 /* Check EDID read for NACKs, DEFERs and corruption
4288 * (DP CTS 1.2 Core r1.1)
4289 * 4.2.2.4 : Failed EDID read, I2C_NAK
4290 * 4.2.2.5 : Failed EDID read, I2C_DEFER
4291 * 4.2.2.6 : EDID corruption detected
4292 * Use failsafe mode for all cases
4294 if (intel_dp->aux.i2c_nack_count > 0 ||
4295 intel_dp->aux.i2c_defer_count > 0)
4296 DRM_DEBUG_KMS("EDID read had %d NACKs, %d DEFERs\n",
4297 intel_dp->aux.i2c_nack_count,
4298 intel_dp->aux.i2c_defer_count);
4299 intel_dp->compliance_test_data = INTEL_DP_RESOLUTION_FAILSAFE;
4301 struct edid *block = intel_connector->detect_edid;
4303 /* We have to write the checksum
4304 * of the last block read
4306 block += intel_connector->detect_edid->extensions;
4308 if (!drm_dp_dpcd_write(&intel_dp->aux,
4309 DP_TEST_EDID_CHECKSUM,
4312 DRM_DEBUG_KMS("Failed to write EDID checksum\n");
4314 test_result = DP_TEST_ACK | DP_TEST_EDID_CHECKSUM_WRITE;
4315 intel_dp->compliance_test_data = INTEL_DP_RESOLUTION_STANDARD;
4318 /* Set test active flag here so userspace doesn't interrupt things */
4319 intel_dp->compliance_test_active = 1;
4324 static uint8_t intel_dp_autotest_phy_pattern(struct intel_dp *intel_dp)
4326 uint8_t test_result = DP_TEST_NAK;
4330 static void intel_dp_handle_test_request(struct intel_dp *intel_dp)
4332 uint8_t response = DP_TEST_NAK;
4336 intel_dp->compliance_test_active = 0;
4337 intel_dp->compliance_test_type = 0;
4338 intel_dp->compliance_test_data = 0;
4340 intel_dp->aux.i2c_nack_count = 0;
4341 intel_dp->aux.i2c_defer_count = 0;
4343 status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_REQUEST, &rxdata, 1);
4345 DRM_DEBUG_KMS("Could not read test request from sink\n");
4350 case DP_TEST_LINK_TRAINING:
4351 DRM_DEBUG_KMS("LINK_TRAINING test requested\n");
4352 intel_dp->compliance_test_type = DP_TEST_LINK_TRAINING;
4353 response = intel_dp_autotest_link_training(intel_dp);
4355 case DP_TEST_LINK_VIDEO_PATTERN:
4356 DRM_DEBUG_KMS("TEST_PATTERN test requested\n");
4357 intel_dp->compliance_test_type = DP_TEST_LINK_VIDEO_PATTERN;
4358 response = intel_dp_autotest_video_pattern(intel_dp);
4360 case DP_TEST_LINK_EDID_READ:
4361 DRM_DEBUG_KMS("EDID test requested\n");
4362 intel_dp->compliance_test_type = DP_TEST_LINK_EDID_READ;
4363 response = intel_dp_autotest_edid(intel_dp);
4365 case DP_TEST_LINK_PHY_TEST_PATTERN:
4366 DRM_DEBUG_KMS("PHY_PATTERN test requested\n");
4367 intel_dp->compliance_test_type = DP_TEST_LINK_PHY_TEST_PATTERN;
4368 response = intel_dp_autotest_phy_pattern(intel_dp);
4371 DRM_DEBUG_KMS("Invalid test request '%02x'\n", rxdata);
4376 status = drm_dp_dpcd_write(&intel_dp->aux,
4380 DRM_DEBUG_KMS("Could not write test response to sink\n");
4384 intel_dp_check_mst_status(struct intel_dp *intel_dp)
4388 if (intel_dp->is_mst) {
4393 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
4397 /* check link status - esi[10] = 0x200c */
4398 if (intel_dp->active_mst_links &&
4399 !drm_dp_channel_eq_ok(&esi[10], intel_dp->lane_count)) {
4400 DRM_DEBUG_KMS("channel EQ not ok, retraining\n");
4401 intel_dp_start_link_train(intel_dp);
4402 intel_dp_complete_link_train(intel_dp);
4403 intel_dp_stop_link_train(intel_dp);
4406 DRM_DEBUG_KMS("got esi %3ph\n", esi);
4407 ret = drm_dp_mst_hpd_irq(&intel_dp->mst_mgr, esi, &handled);
4410 for (retry = 0; retry < 3; retry++) {
4412 wret = drm_dp_dpcd_write(&intel_dp->aux,
4413 DP_SINK_COUNT_ESI+1,
4420 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
4422 DRM_DEBUG_KMS("got esi2 %3ph\n", esi);
4430 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4431 DRM_DEBUG_KMS("failed to get ESI - device may have failed\n");
4432 intel_dp->is_mst = false;
4433 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
4434 /* send a hotplug event */
4435 drm_kms_helper_hotplug_event(intel_dig_port->base.base.dev);
4442 * According to DP spec
4445 * 2. Configure link according to Receiver Capabilities
4446 * 3. Use Link Training from 2.5.3.3 and 3.5.1.3
4447 * 4. Check link status on receipt of hot-plug interrupt
4450 intel_dp_check_link_status(struct intel_dp *intel_dp)
4452 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4453 struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
4455 u8 link_status[DP_LINK_STATUS_SIZE];
4457 WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
4459 if (!intel_encoder->base.crtc)
4462 if (!to_intel_crtc(intel_encoder->base.crtc)->active)
4465 /* Try to read receiver status if the link appears to be up */
4466 if (!intel_dp_get_link_status(intel_dp, link_status)) {
4470 /* Now read the DPCD to see if it's actually running */
4471 if (!intel_dp_get_dpcd(intel_dp)) {
4475 /* Try to read the source of the interrupt */
4476 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4477 intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
4478 /* Clear interrupt source */
4479 drm_dp_dpcd_writeb(&intel_dp->aux,
4480 DP_DEVICE_SERVICE_IRQ_VECTOR,
4483 if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
4484 DRM_DEBUG_DRIVER("Test request in short pulse not handled\n");
4485 if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
4486 DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
4489 if (!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
4490 DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n",
4491 intel_encoder->base.name);
4492 intel_dp_start_link_train(intel_dp);
4493 intel_dp_complete_link_train(intel_dp);
4494 intel_dp_stop_link_train(intel_dp);
4498 /* XXX this is probably wrong for multiple downstream ports */
4499 static enum drm_connector_status
4500 intel_dp_detect_dpcd(struct intel_dp *intel_dp)
4502 uint8_t *dpcd = intel_dp->dpcd;
4505 if (!intel_dp_get_dpcd(intel_dp))
4506 return connector_status_disconnected;
4508 /* if there's no downstream port, we're done */
4509 if (!(dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT))
4510 return connector_status_connected;
4512 /* If we're HPD-aware, SINK_COUNT changes dynamically */
4513 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4514 intel_dp->downstream_ports[0] & DP_DS_PORT_HPD) {
4517 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_COUNT,
4519 return connector_status_unknown;
4521 return DP_GET_SINK_COUNT(reg) ? connector_status_connected
4522 : connector_status_disconnected;
4525 /* If no HPD, poke DDC gently */
4526 if (drm_probe_ddc(&intel_dp->aux.ddc))
4527 return connector_status_connected;
4529 /* Well we tried, say unknown for unreliable port types */
4530 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) {
4531 type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
4532 if (type == DP_DS_PORT_TYPE_VGA ||
4533 type == DP_DS_PORT_TYPE_NON_EDID)
4534 return connector_status_unknown;
4536 type = intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
4537 DP_DWN_STRM_PORT_TYPE_MASK;
4538 if (type == DP_DWN_STRM_PORT_TYPE_ANALOG ||
4539 type == DP_DWN_STRM_PORT_TYPE_OTHER)
4540 return connector_status_unknown;
4543 /* Anything else is out of spec, warn and ignore */
4544 DRM_DEBUG_KMS("Broken DP branch device, ignoring\n");
4545 return connector_status_disconnected;
4548 static enum drm_connector_status
4549 edp_detect(struct intel_dp *intel_dp)
4551 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4552 enum drm_connector_status status;
4554 status = intel_panel_detect(dev);
4555 if (status == connector_status_unknown)
4556 status = connector_status_connected;
4561 static bool ibx_digital_port_connected(struct drm_i915_private *dev_priv,
4562 struct intel_digital_port *port)
4566 switch (port->port) {
4570 bit = SDE_PORTB_HOTPLUG;
4573 bit = SDE_PORTC_HOTPLUG;
4576 bit = SDE_PORTD_HOTPLUG;
4579 MISSING_CASE(port->port);
4583 return I915_READ(SDEISR) & bit;
4586 static bool cpt_digital_port_connected(struct drm_i915_private *dev_priv,
4587 struct intel_digital_port *port)
4591 switch (port->port) {
4595 bit = SDE_PORTB_HOTPLUG_CPT;
4598 bit = SDE_PORTC_HOTPLUG_CPT;
4601 bit = SDE_PORTD_HOTPLUG_CPT;
4604 MISSING_CASE(port->port);
4608 return I915_READ(SDEISR) & bit;
4611 static bool g4x_digital_port_connected(struct drm_i915_private *dev_priv,
4612 struct intel_digital_port *port)
4616 switch (port->port) {
4618 bit = PORTB_HOTPLUG_LIVE_STATUS_G4X;
4621 bit = PORTC_HOTPLUG_LIVE_STATUS_G4X;
4624 bit = PORTD_HOTPLUG_LIVE_STATUS_G4X;
4627 MISSING_CASE(port->port);
4631 return I915_READ(PORT_HOTPLUG_STAT) & bit;
4634 static bool vlv_digital_port_connected(struct drm_i915_private *dev_priv,
4635 struct intel_digital_port *port)
4639 switch (port->port) {
4641 bit = PORTB_HOTPLUG_LIVE_STATUS_VLV;
4644 bit = PORTC_HOTPLUG_LIVE_STATUS_VLV;
4647 bit = PORTD_HOTPLUG_LIVE_STATUS_VLV;
4650 MISSING_CASE(port->port);
4654 return I915_READ(PORT_HOTPLUG_STAT) & bit;
4657 static bool bxt_digital_port_connected(struct drm_i915_private *dev_priv,
4658 struct intel_digital_port *port)
4662 switch (port->port) {
4664 bit = BXT_DE_PORT_HP_DDIA;
4667 bit = BXT_DE_PORT_HP_DDIB;
4670 bit = BXT_DE_PORT_HP_DDIC;
4673 MISSING_CASE(port->port);
4677 return I915_READ(GEN8_DE_PORT_ISR) & bit;
4681 * intel_digital_port_connected - is the specified port connected?
4682 * @dev_priv: i915 private structure
4683 * @port: the port to test
4685 * Return %true if @port is connected, %false otherwise.
4687 static bool intel_digital_port_connected(struct drm_i915_private *dev_priv,
4688 struct intel_digital_port *port)
4690 if (HAS_PCH_IBX(dev_priv))
4691 return ibx_digital_port_connected(dev_priv, port);
4692 if (HAS_PCH_SPLIT(dev_priv))
4693 return cpt_digital_port_connected(dev_priv, port);
4694 else if (IS_BROXTON(dev_priv))
4695 return bxt_digital_port_connected(dev_priv, port);
4696 else if (IS_VALLEYVIEW(dev_priv))
4697 return vlv_digital_port_connected(dev_priv, port);
4699 return g4x_digital_port_connected(dev_priv, port);
4702 static enum drm_connector_status
4703 ironlake_dp_detect(struct intel_dp *intel_dp)
4705 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4706 struct drm_i915_private *dev_priv = dev->dev_private;
4707 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4709 if (!intel_digital_port_connected(dev_priv, intel_dig_port))
4710 return connector_status_disconnected;
4712 return intel_dp_detect_dpcd(intel_dp);
4715 static enum drm_connector_status
4716 g4x_dp_detect(struct intel_dp *intel_dp)
4718 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4719 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4721 /* Can't disconnect eDP, but you can close the lid... */
4722 if (is_edp(intel_dp)) {
4723 enum drm_connector_status status;
4725 status = intel_panel_detect(dev);
4726 if (status == connector_status_unknown)
4727 status = connector_status_connected;
4731 if (!intel_digital_port_connected(dev->dev_private, intel_dig_port))
4732 return connector_status_disconnected;
4734 return intel_dp_detect_dpcd(intel_dp);
4737 static struct edid *
4738 intel_dp_get_edid(struct intel_dp *intel_dp)
4740 struct intel_connector *intel_connector = intel_dp->attached_connector;
4742 /* use cached edid if we have one */
4743 if (intel_connector->edid) {
4745 if (IS_ERR(intel_connector->edid))
4748 return drm_edid_duplicate(intel_connector->edid);
4750 return drm_get_edid(&intel_connector->base,
4751 &intel_dp->aux.ddc);
4755 intel_dp_set_edid(struct intel_dp *intel_dp)
4757 struct intel_connector *intel_connector = intel_dp->attached_connector;
4760 edid = intel_dp_get_edid(intel_dp);
4761 intel_connector->detect_edid = edid;
4763 if (intel_dp->force_audio != HDMI_AUDIO_AUTO)
4764 intel_dp->has_audio = intel_dp->force_audio == HDMI_AUDIO_ON;
4766 intel_dp->has_audio = drm_detect_monitor_audio(edid);
4770 intel_dp_unset_edid(struct intel_dp *intel_dp)
4772 struct intel_connector *intel_connector = intel_dp->attached_connector;
4774 kfree(intel_connector->detect_edid);
4775 intel_connector->detect_edid = NULL;
4777 intel_dp->has_audio = false;
4780 static enum intel_display_power_domain
4781 intel_dp_power_get(struct intel_dp *dp)
4783 struct intel_encoder *encoder = &dp_to_dig_port(dp)->base;
4784 enum intel_display_power_domain power_domain;
4786 power_domain = intel_display_port_power_domain(encoder);
4787 intel_display_power_get(to_i915(encoder->base.dev), power_domain);
4789 return power_domain;
4793 intel_dp_power_put(struct intel_dp *dp,
4794 enum intel_display_power_domain power_domain)
4796 struct intel_encoder *encoder = &dp_to_dig_port(dp)->base;
4797 intel_display_power_put(to_i915(encoder->base.dev), power_domain);
4800 static enum drm_connector_status
4801 intel_dp_detect(struct drm_connector *connector, bool force)
4803 struct intel_dp *intel_dp = intel_attached_dp(connector);
4804 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4805 struct intel_encoder *intel_encoder = &intel_dig_port->base;
4806 struct drm_device *dev = connector->dev;
4807 enum drm_connector_status status;
4808 enum intel_display_power_domain power_domain;
4812 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
4813 connector->base.id, connector->name);
4814 intel_dp_unset_edid(intel_dp);
4816 if (intel_dp->is_mst) {
4817 /* MST devices are disconnected from a monitor POV */
4818 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4819 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4820 return connector_status_disconnected;
4823 power_domain = intel_dp_power_get(intel_dp);
4825 /* Can't disconnect eDP, but you can close the lid... */
4826 if (is_edp(intel_dp))
4827 status = edp_detect(intel_dp);
4828 else if (HAS_PCH_SPLIT(dev))
4829 status = ironlake_dp_detect(intel_dp);
4831 status = g4x_dp_detect(intel_dp);
4832 if (status != connector_status_connected)
4835 intel_dp_probe_oui(intel_dp);
4837 ret = intel_dp_probe_mst(intel_dp);
4839 /* if we are in MST mode then this connector
4840 won't appear connected or have anything with EDID on it */
4841 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4842 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4843 status = connector_status_disconnected;
4847 intel_dp_set_edid(intel_dp);
4849 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4850 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4851 status = connector_status_connected;
4853 /* Try to read the source of the interrupt */
4854 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4855 intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
4856 /* Clear interrupt source */
4857 drm_dp_dpcd_writeb(&intel_dp->aux,
4858 DP_DEVICE_SERVICE_IRQ_VECTOR,
4861 if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
4862 intel_dp_handle_test_request(intel_dp);
4863 if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
4864 DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
4868 intel_dp_power_put(intel_dp, power_domain);
4873 intel_dp_force(struct drm_connector *connector)
4875 struct intel_dp *intel_dp = intel_attached_dp(connector);
4876 struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
4877 enum intel_display_power_domain power_domain;
4879 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
4880 connector->base.id, connector->name);
4881 intel_dp_unset_edid(intel_dp);
4883 if (connector->status != connector_status_connected)
4886 power_domain = intel_dp_power_get(intel_dp);
4888 intel_dp_set_edid(intel_dp);
4890 intel_dp_power_put(intel_dp, power_domain);
4892 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4893 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4896 static int intel_dp_get_modes(struct drm_connector *connector)
4898 struct intel_connector *intel_connector = to_intel_connector(connector);
4901 edid = intel_connector->detect_edid;
4903 int ret = intel_connector_update_modes(connector, edid);
4908 /* if eDP has no EDID, fall back to fixed mode */
4909 if (is_edp(intel_attached_dp(connector)) &&
4910 intel_connector->panel.fixed_mode) {
4911 struct drm_display_mode *mode;
4913 mode = drm_mode_duplicate(connector->dev,
4914 intel_connector->panel.fixed_mode);
4916 drm_mode_probed_add(connector, mode);
4925 intel_dp_detect_audio(struct drm_connector *connector)
4927 bool has_audio = false;
4930 edid = to_intel_connector(connector)->detect_edid;
4932 has_audio = drm_detect_monitor_audio(edid);
4938 intel_dp_set_property(struct drm_connector *connector,
4939 struct drm_property *property,
4942 struct drm_i915_private *dev_priv = connector->dev->dev_private;
4943 struct intel_connector *intel_connector = to_intel_connector(connector);
4944 struct intel_encoder *intel_encoder = intel_attached_encoder(connector);
4945 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
4948 ret = drm_object_property_set_value(&connector->base, property, val);
4952 if (property == dev_priv->force_audio_property) {
4956 if (i == intel_dp->force_audio)
4959 intel_dp->force_audio = i;
4961 if (i == HDMI_AUDIO_AUTO)
4962 has_audio = intel_dp_detect_audio(connector);
4964 has_audio = (i == HDMI_AUDIO_ON);
4966 if (has_audio == intel_dp->has_audio)
4969 intel_dp->has_audio = has_audio;
4973 if (property == dev_priv->broadcast_rgb_property) {
4974 bool old_auto = intel_dp->color_range_auto;
4975 bool old_range = intel_dp->limited_color_range;
4978 case INTEL_BROADCAST_RGB_AUTO:
4979 intel_dp->color_range_auto = true;
4981 case INTEL_BROADCAST_RGB_FULL:
4982 intel_dp->color_range_auto = false;
4983 intel_dp->limited_color_range = false;
4985 case INTEL_BROADCAST_RGB_LIMITED:
4986 intel_dp->color_range_auto = false;
4987 intel_dp->limited_color_range = true;
4993 if (old_auto == intel_dp->color_range_auto &&
4994 old_range == intel_dp->limited_color_range)
5000 if (is_edp(intel_dp) &&
5001 property == connector->dev->mode_config.scaling_mode_property) {
5002 if (val == DRM_MODE_SCALE_NONE) {
5003 DRM_DEBUG_KMS("no scaling not supported\n");
5007 if (intel_connector->panel.fitting_mode == val) {
5008 /* the eDP scaling property is not changed */
5011 intel_connector->panel.fitting_mode = val;
5019 if (intel_encoder->base.crtc)
5020 intel_crtc_restore_mode(intel_encoder->base.crtc);
5026 intel_dp_connector_destroy(struct drm_connector *connector)
5028 struct intel_connector *intel_connector = to_intel_connector(connector);
5030 kfree(intel_connector->detect_edid);
5032 if (!IS_ERR_OR_NULL(intel_connector->edid))
5033 kfree(intel_connector->edid);
5035 /* Can't call is_edp() since the encoder may have been destroyed
5037 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
5038 intel_panel_fini(&intel_connector->panel);
5040 drm_connector_cleanup(connector);
5044 void intel_dp_encoder_destroy(struct drm_encoder *encoder)
5046 struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
5047 struct intel_dp *intel_dp = &intel_dig_port->dp;
5049 drm_dp_aux_unregister(&intel_dp->aux);
5050 intel_dp_mst_encoder_cleanup(intel_dig_port);
5051 if (is_edp(intel_dp)) {
5052 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
5054 * vdd might still be enabled do to the delayed vdd off.
5055 * Make sure vdd is actually turned off here.
5058 edp_panel_vdd_off_sync(intel_dp);
5059 pps_unlock(intel_dp);
5061 if (intel_dp->edp_notifier.notifier_call) {
5062 unregister_reboot_notifier(&intel_dp->edp_notifier);
5063 intel_dp->edp_notifier.notifier_call = NULL;
5066 drm_encoder_cleanup(encoder);
5067 kfree(intel_dig_port);
5070 static void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
5072 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
5074 if (!is_edp(intel_dp))
5078 * vdd might still be enabled do to the delayed vdd off.
5079 * Make sure vdd is actually turned off here.
5081 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
5083 edp_panel_vdd_off_sync(intel_dp);
5084 pps_unlock(intel_dp);
5087 static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
5089 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
5090 struct drm_device *dev = intel_dig_port->base.base.dev;
5091 struct drm_i915_private *dev_priv = dev->dev_private;
5092 enum intel_display_power_domain power_domain;
5094 lockdep_assert_held(&dev_priv->pps_mutex);
5096 if (!edp_have_panel_vdd(intel_dp))
5100 * The VDD bit needs a power domain reference, so if the bit is
5101 * already enabled when we boot or resume, grab this reference and
5102 * schedule a vdd off, so we don't hold on to the reference
5105 DRM_DEBUG_KMS("VDD left on by BIOS, adjusting state tracking\n");
5106 power_domain = intel_display_port_power_domain(&intel_dig_port->base);
5107 intel_display_power_get(dev_priv, power_domain);
5109 edp_panel_vdd_schedule_off(intel_dp);
5112 static void intel_dp_encoder_reset(struct drm_encoder *encoder)
5114 struct intel_dp *intel_dp;
5116 if (to_intel_encoder(encoder)->type != INTEL_OUTPUT_EDP)
5119 intel_dp = enc_to_intel_dp(encoder);
5124 * Read out the current power sequencer assignment,
5125 * in case the BIOS did something with it.
5127 if (IS_VALLEYVIEW(encoder->dev))
5128 vlv_initial_power_sequencer_setup(intel_dp);
5130 intel_edp_panel_vdd_sanitize(intel_dp);
5132 pps_unlock(intel_dp);
5135 static const struct drm_connector_funcs intel_dp_connector_funcs = {
5136 .dpms = drm_atomic_helper_connector_dpms,
5137 .detect = intel_dp_detect,
5138 .force = intel_dp_force,
5139 .fill_modes = drm_helper_probe_single_connector_modes,
5140 .set_property = intel_dp_set_property,
5141 .atomic_get_property = intel_connector_atomic_get_property,
5142 .destroy = intel_dp_connector_destroy,
5143 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
5144 .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
5147 static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
5148 .get_modes = intel_dp_get_modes,
5149 .mode_valid = intel_dp_mode_valid,
5150 .best_encoder = intel_best_encoder,
5153 static const struct drm_encoder_funcs intel_dp_enc_funcs = {
5154 .reset = intel_dp_encoder_reset,
5155 .destroy = intel_dp_encoder_destroy,
5159 intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
5161 struct intel_dp *intel_dp = &intel_dig_port->dp;
5162 struct intel_encoder *intel_encoder = &intel_dig_port->base;
5163 struct drm_device *dev = intel_dig_port->base.base.dev;
5164 struct drm_i915_private *dev_priv = dev->dev_private;
5165 enum intel_display_power_domain power_domain;
5166 enum irqreturn ret = IRQ_NONE;
5168 if (intel_dig_port->base.type != INTEL_OUTPUT_EDP)
5169 intel_dig_port->base.type = INTEL_OUTPUT_DISPLAYPORT;
5171 if (long_hpd && intel_dig_port->base.type == INTEL_OUTPUT_EDP) {
5173 * vdd off can generate a long pulse on eDP which
5174 * would require vdd on to handle it, and thus we
5175 * would end up in an endless cycle of
5176 * "vdd off -> long hpd -> vdd on -> detect -> vdd off -> ..."
5178 DRM_DEBUG_KMS("ignoring long hpd on eDP port %c\n",
5179 port_name(intel_dig_port->port));
5183 DRM_DEBUG_KMS("got hpd irq on port %c - %s\n",
5184 port_name(intel_dig_port->port),
5185 long_hpd ? "long" : "short");
5187 power_domain = intel_display_port_power_domain(intel_encoder);
5188 intel_display_power_get(dev_priv, power_domain);
5191 /* indicate that we need to restart link training */
5192 intel_dp->train_set_valid = false;
5194 if (!intel_digital_port_connected(dev_priv, intel_dig_port))
5197 if (!intel_dp_get_dpcd(intel_dp)) {
5201 intel_dp_probe_oui(intel_dp);
5203 if (!intel_dp_probe_mst(intel_dp))
5207 if (intel_dp->is_mst) {
5208 if (intel_dp_check_mst_status(intel_dp) == -EINVAL)
5212 if (!intel_dp->is_mst) {
5214 * we'll check the link status via the normal hot plug path later -
5215 * but for short hpds we should check it now
5217 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
5218 intel_dp_check_link_status(intel_dp);
5219 drm_modeset_unlock(&dev->mode_config.connection_mutex);
5227 /* if we were in MST mode, and device is not there get out of MST mode */
5228 if (intel_dp->is_mst) {
5229 DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n", intel_dp->is_mst, intel_dp->mst_mgr.mst_state);
5230 intel_dp->is_mst = false;
5231 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
5234 intel_display_power_put(dev_priv, power_domain);
5239 /* Return which DP Port should be selected for Transcoder DP control */
5241 intel_trans_dp_port_sel(struct drm_crtc *crtc)
5243 struct drm_device *dev = crtc->dev;
5244 struct intel_encoder *intel_encoder;
5245 struct intel_dp *intel_dp;
5247 for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
5248 intel_dp = enc_to_intel_dp(&intel_encoder->base);
5250 if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
5251 intel_encoder->type == INTEL_OUTPUT_EDP)
5252 return intel_dp->output_reg;
5258 /* check the VBT to see whether the eDP is on DP-D port */
5259 bool intel_dp_is_edp(struct drm_device *dev, enum port port)
5261 struct drm_i915_private *dev_priv = dev->dev_private;
5262 union child_device_config *p_child;
5264 static const short port_mapping[] = {
5265 [PORT_B] = PORT_IDPB,
5266 [PORT_C] = PORT_IDPC,
5267 [PORT_D] = PORT_IDPD,
5273 if (!dev_priv->vbt.child_dev_num)
5276 for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
5277 p_child = dev_priv->vbt.child_dev + i;
5279 if (p_child->common.dvo_port == port_mapping[port] &&
5280 (p_child->common.device_type & DEVICE_TYPE_eDP_BITS) ==
5281 (DEVICE_TYPE_eDP & DEVICE_TYPE_eDP_BITS))
5288 intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
5290 struct intel_connector *intel_connector = to_intel_connector(connector);
5292 intel_attach_force_audio_property(connector);
5293 intel_attach_broadcast_rgb_property(connector);
5294 intel_dp->color_range_auto = true;
5296 if (is_edp(intel_dp)) {
5297 drm_mode_create_scaling_mode_property(connector->dev);
5298 drm_object_attach_property(
5300 connector->dev->mode_config.scaling_mode_property,
5301 DRM_MODE_SCALE_ASPECT);
5302 intel_connector->panel.fitting_mode = DRM_MODE_SCALE_ASPECT;
5306 static void intel_dp_init_panel_power_timestamps(struct intel_dp *intel_dp)
5308 intel_dp->last_power_cycle = jiffies;
5309 intel_dp->last_power_on = jiffies;
5310 intel_dp->last_backlight_off = jiffies;
5314 intel_dp_init_panel_power_sequencer(struct drm_device *dev,
5315 struct intel_dp *intel_dp)
5317 struct drm_i915_private *dev_priv = dev->dev_private;
5318 struct edp_power_seq cur, vbt, spec,
5319 *final = &intel_dp->pps_delays;
5320 u32 pp_on, pp_off, pp_div = 0, pp_ctl = 0;
5321 int pp_ctrl_reg, pp_on_reg, pp_off_reg, pp_div_reg = 0;
5323 lockdep_assert_held(&dev_priv->pps_mutex);
5325 /* already initialized? */
5326 if (final->t11_t12 != 0)
5329 if (IS_BROXTON(dev)) {
5331 * TODO: BXT has 2 sets of PPS registers.
5332 * Correct Register for Broxton need to be identified
5333 * using VBT. hardcoding for now
5335 pp_ctrl_reg = BXT_PP_CONTROL(0);
5336 pp_on_reg = BXT_PP_ON_DELAYS(0);
5337 pp_off_reg = BXT_PP_OFF_DELAYS(0);
5338 } else if (HAS_PCH_SPLIT(dev)) {
5339 pp_ctrl_reg = PCH_PP_CONTROL;
5340 pp_on_reg = PCH_PP_ON_DELAYS;
5341 pp_off_reg = PCH_PP_OFF_DELAYS;
5342 pp_div_reg = PCH_PP_DIVISOR;
5344 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
5346 pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
5347 pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
5348 pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
5349 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
5352 /* Workaround: Need to write PP_CONTROL with the unlock key as
5353 * the very first thing. */
5354 pp_ctl = ironlake_get_pp_control(intel_dp);
5356 pp_on = I915_READ(pp_on_reg);
5357 pp_off = I915_READ(pp_off_reg);
5358 if (!IS_BROXTON(dev)) {
5359 I915_WRITE(pp_ctrl_reg, pp_ctl);
5360 pp_div = I915_READ(pp_div_reg);
5363 /* Pull timing values out of registers */
5364 cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >>
5365 PANEL_POWER_UP_DELAY_SHIFT;
5367 cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >>
5368 PANEL_LIGHT_ON_DELAY_SHIFT;
5370 cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >>
5371 PANEL_LIGHT_OFF_DELAY_SHIFT;
5373 cur.t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >>
5374 PANEL_POWER_DOWN_DELAY_SHIFT;
5376 if (IS_BROXTON(dev)) {
5377 u16 tmp = (pp_ctl & BXT_POWER_CYCLE_DELAY_MASK) >>
5378 BXT_POWER_CYCLE_DELAY_SHIFT;
5380 cur.t11_t12 = (tmp - 1) * 1000;
5384 cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >>
5385 PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000;
5388 DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
5389 cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12);
5391 vbt = dev_priv->vbt.edp_pps;
5393 /* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
5394 * our hw here, which are all in 100usec. */
5395 spec.t1_t3 = 210 * 10;
5396 spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */
5397 spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */
5398 spec.t10 = 500 * 10;
5399 /* This one is special and actually in units of 100ms, but zero
5400 * based in the hw (so we need to add 100 ms). But the sw vbt
5401 * table multiplies it with 1000 to make it in units of 100usec,
5403 spec.t11_t12 = (510 + 100) * 10;
5405 DRM_DEBUG_KMS("vbt t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
5406 vbt.t1_t3, vbt.t8, vbt.t9, vbt.t10, vbt.t11_t12);
5408 /* Use the max of the register settings and vbt. If both are
5409 * unset, fall back to the spec limits. */
5410 #define assign_final(field) final->field = (max(cur.field, vbt.field) == 0 ? \
5412 max(cur.field, vbt.field))
5413 assign_final(t1_t3);
5417 assign_final(t11_t12);
5420 #define get_delay(field) (DIV_ROUND_UP(final->field, 10))
5421 intel_dp->panel_power_up_delay = get_delay(t1_t3);
5422 intel_dp->backlight_on_delay = get_delay(t8);
5423 intel_dp->backlight_off_delay = get_delay(t9);
5424 intel_dp->panel_power_down_delay = get_delay(t10);
5425 intel_dp->panel_power_cycle_delay = get_delay(t11_t12);
5428 DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n",
5429 intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay,
5430 intel_dp->panel_power_cycle_delay);
5432 DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
5433 intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
5437 intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
5438 struct intel_dp *intel_dp)
5440 struct drm_i915_private *dev_priv = dev->dev_private;
5441 u32 pp_on, pp_off, pp_div, port_sel = 0;
5442 int div = HAS_PCH_SPLIT(dev) ? intel_pch_rawclk(dev) : intel_hrawclk(dev);
5443 int pp_on_reg, pp_off_reg, pp_div_reg = 0, pp_ctrl_reg;
5444 enum port port = dp_to_dig_port(intel_dp)->port;
5445 const struct edp_power_seq *seq = &intel_dp->pps_delays;
5447 lockdep_assert_held(&dev_priv->pps_mutex);
5449 if (IS_BROXTON(dev)) {
5451 * TODO: BXT has 2 sets of PPS registers.
5452 * Correct Register for Broxton need to be identified
5453 * using VBT. hardcoding for now
5455 pp_ctrl_reg = BXT_PP_CONTROL(0);
5456 pp_on_reg = BXT_PP_ON_DELAYS(0);
5457 pp_off_reg = BXT_PP_OFF_DELAYS(0);
5459 } else if (HAS_PCH_SPLIT(dev)) {
5460 pp_on_reg = PCH_PP_ON_DELAYS;
5461 pp_off_reg = PCH_PP_OFF_DELAYS;
5462 pp_div_reg = PCH_PP_DIVISOR;
5464 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
5466 pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
5467 pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
5468 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
5472 * And finally store the new values in the power sequencer. The
5473 * backlight delays are set to 1 because we do manual waits on them. For
5474 * T8, even BSpec recommends doing it. For T9, if we don't do this,
5475 * we'll end up waiting for the backlight off delay twice: once when we
5476 * do the manual sleep, and once when we disable the panel and wait for
5477 * the PP_STATUS bit to become zero.
5479 pp_on = (seq->t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) |
5480 (1 << PANEL_LIGHT_ON_DELAY_SHIFT);
5481 pp_off = (1 << PANEL_LIGHT_OFF_DELAY_SHIFT) |
5482 (seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT);
5483 /* Compute the divisor for the pp clock, simply match the Bspec
5485 if (IS_BROXTON(dev)) {
5486 pp_div = I915_READ(pp_ctrl_reg);
5487 pp_div &= ~BXT_POWER_CYCLE_DELAY_MASK;
5488 pp_div |= (DIV_ROUND_UP((seq->t11_t12 + 1), 1000)
5489 << BXT_POWER_CYCLE_DELAY_SHIFT);
5491 pp_div = ((100 * div)/2 - 1) << PP_REFERENCE_DIVIDER_SHIFT;
5492 pp_div |= (DIV_ROUND_UP(seq->t11_t12, 1000)
5493 << PANEL_POWER_CYCLE_DELAY_SHIFT);
5496 /* Haswell doesn't have any port selection bits for the panel
5497 * power sequencer any more. */
5498 if (IS_VALLEYVIEW(dev)) {
5499 port_sel = PANEL_PORT_SELECT_VLV(port);
5500 } else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
5502 port_sel = PANEL_PORT_SELECT_DPA;
5504 port_sel = PANEL_PORT_SELECT_DPD;
5509 I915_WRITE(pp_on_reg, pp_on);
5510 I915_WRITE(pp_off_reg, pp_off);
5511 if (IS_BROXTON(dev))
5512 I915_WRITE(pp_ctrl_reg, pp_div);
5514 I915_WRITE(pp_div_reg, pp_div);
5516 DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
5517 I915_READ(pp_on_reg),
5518 I915_READ(pp_off_reg),
5520 (I915_READ(pp_ctrl_reg) & BXT_POWER_CYCLE_DELAY_MASK) :
5521 I915_READ(pp_div_reg));
5525 * intel_dp_set_drrs_state - program registers for RR switch to take effect
5527 * @refresh_rate: RR to be programmed
5529 * This function gets called when refresh rate (RR) has to be changed from
5530 * one frequency to another. Switches can be between high and low RR
5531 * supported by the panel or to any other RR based on media playback (in
5532 * this case, RR value needs to be passed from user space).
5534 * The caller of this function needs to take a lock on dev_priv->drrs.
5536 static void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate)
5538 struct drm_i915_private *dev_priv = dev->dev_private;
5539 struct intel_encoder *encoder;
5540 struct intel_digital_port *dig_port = NULL;
5541 struct intel_dp *intel_dp = dev_priv->drrs.dp;
5542 struct intel_crtc_state *config = NULL;
5543 struct intel_crtc *intel_crtc = NULL;
5545 enum drrs_refresh_rate_type index = DRRS_HIGH_RR;
5547 if (refresh_rate <= 0) {
5548 DRM_DEBUG_KMS("Refresh rate should be positive non-zero.\n");
5552 if (intel_dp == NULL) {
5553 DRM_DEBUG_KMS("DRRS not supported.\n");
5558 * FIXME: This needs proper synchronization with psr state for some
5559 * platforms that cannot have PSR and DRRS enabled at the same time.
5562 dig_port = dp_to_dig_port(intel_dp);
5563 encoder = &dig_port->base;
5564 intel_crtc = to_intel_crtc(encoder->base.crtc);
5567 DRM_DEBUG_KMS("DRRS: intel_crtc not initialized\n");
5571 config = intel_crtc->config;
5573 if (dev_priv->drrs.type < SEAMLESS_DRRS_SUPPORT) {
5574 DRM_DEBUG_KMS("Only Seamless DRRS supported.\n");
5578 if (intel_dp->attached_connector->panel.downclock_mode->vrefresh ==
5580 index = DRRS_LOW_RR;
5582 if (index == dev_priv->drrs.refresh_rate_type) {
5584 "DRRS requested for previously set RR...ignoring\n");
5588 if (!intel_crtc->active) {
5589 DRM_DEBUG_KMS("eDP encoder disabled. CRTC not Active\n");
5593 if (INTEL_INFO(dev)->gen >= 8 && !IS_CHERRYVIEW(dev)) {
5596 intel_dp_set_m_n(intel_crtc, M1_N1);
5599 intel_dp_set_m_n(intel_crtc, M2_N2);
5603 DRM_ERROR("Unsupported refreshrate type\n");
5605 } else if (INTEL_INFO(dev)->gen > 6) {
5606 reg = PIPECONF(intel_crtc->config->cpu_transcoder);
5607 val = I915_READ(reg);
5609 if (index > DRRS_HIGH_RR) {
5610 if (IS_VALLEYVIEW(dev))
5611 val |= PIPECONF_EDP_RR_MODE_SWITCH_VLV;
5613 val |= PIPECONF_EDP_RR_MODE_SWITCH;
5615 if (IS_VALLEYVIEW(dev))
5616 val &= ~PIPECONF_EDP_RR_MODE_SWITCH_VLV;
5618 val &= ~PIPECONF_EDP_RR_MODE_SWITCH;
5620 I915_WRITE(reg, val);
5623 dev_priv->drrs.refresh_rate_type = index;
5625 DRM_DEBUG_KMS("eDP Refresh Rate set to : %dHz\n", refresh_rate);
5629 * intel_edp_drrs_enable - init drrs struct if supported
5630 * @intel_dp: DP struct
5632 * Initializes frontbuffer_bits and drrs.dp
5634 void intel_edp_drrs_enable(struct intel_dp *intel_dp)
5636 struct drm_device *dev = intel_dp_to_dev(intel_dp);
5637 struct drm_i915_private *dev_priv = dev->dev_private;
5638 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5639 struct drm_crtc *crtc = dig_port->base.base.crtc;
5640 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5642 if (!intel_crtc->config->has_drrs) {
5643 DRM_DEBUG_KMS("Panel doesn't support DRRS\n");
5647 mutex_lock(&dev_priv->drrs.mutex);
5648 if (WARN_ON(dev_priv->drrs.dp)) {
5649 DRM_ERROR("DRRS already enabled\n");
5653 dev_priv->drrs.busy_frontbuffer_bits = 0;
5655 dev_priv->drrs.dp = intel_dp;
5658 mutex_unlock(&dev_priv->drrs.mutex);
5662 * intel_edp_drrs_disable - Disable DRRS
5663 * @intel_dp: DP struct
5666 void intel_edp_drrs_disable(struct intel_dp *intel_dp)
5668 struct drm_device *dev = intel_dp_to_dev(intel_dp);
5669 struct drm_i915_private *dev_priv = dev->dev_private;
5670 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5671 struct drm_crtc *crtc = dig_port->base.base.crtc;
5672 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5674 if (!intel_crtc->config->has_drrs)
5677 mutex_lock(&dev_priv->drrs.mutex);
5678 if (!dev_priv->drrs.dp) {
5679 mutex_unlock(&dev_priv->drrs.mutex);
5683 if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
5684 intel_dp_set_drrs_state(dev_priv->dev,
5685 intel_dp->attached_connector->panel.
5686 fixed_mode->vrefresh);
5688 dev_priv->drrs.dp = NULL;
5689 mutex_unlock(&dev_priv->drrs.mutex);
5691 cancel_delayed_work_sync(&dev_priv->drrs.work);
5694 static void intel_edp_drrs_downclock_work(struct work_struct *work)
5696 struct drm_i915_private *dev_priv =
5697 container_of(work, typeof(*dev_priv), drrs.work.work);
5698 struct intel_dp *intel_dp;
5700 mutex_lock(&dev_priv->drrs.mutex);
5702 intel_dp = dev_priv->drrs.dp;
5708 * The delayed work can race with an invalidate hence we need to
5712 if (dev_priv->drrs.busy_frontbuffer_bits)
5715 if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR)
5716 intel_dp_set_drrs_state(dev_priv->dev,
5717 intel_dp->attached_connector->panel.
5718 downclock_mode->vrefresh);
5721 mutex_unlock(&dev_priv->drrs.mutex);
5725 * intel_edp_drrs_invalidate - Disable Idleness DRRS
5727 * @frontbuffer_bits: frontbuffer plane tracking bits
5729 * This function gets called everytime rendering on the given planes start.
5730 * Hence DRRS needs to be Upclocked, i.e. (LOW_RR -> HIGH_RR).
5732 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5734 void intel_edp_drrs_invalidate(struct drm_device *dev,
5735 unsigned frontbuffer_bits)
5737 struct drm_i915_private *dev_priv = dev->dev_private;
5738 struct drm_crtc *crtc;
5741 if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
5744 cancel_delayed_work(&dev_priv->drrs.work);
5746 mutex_lock(&dev_priv->drrs.mutex);
5747 if (!dev_priv->drrs.dp) {
5748 mutex_unlock(&dev_priv->drrs.mutex);
5752 crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5753 pipe = to_intel_crtc(crtc)->pipe;
5755 frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
5756 dev_priv->drrs.busy_frontbuffer_bits |= frontbuffer_bits;
5758 /* invalidate means busy screen hence upclock */
5759 if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
5760 intel_dp_set_drrs_state(dev_priv->dev,
5761 dev_priv->drrs.dp->attached_connector->panel.
5762 fixed_mode->vrefresh);
5764 mutex_unlock(&dev_priv->drrs.mutex);
5768 * intel_edp_drrs_flush - Restart Idleness DRRS
5770 * @frontbuffer_bits: frontbuffer plane tracking bits
5772 * This function gets called every time rendering on the given planes has
5773 * completed or flip on a crtc is completed. So DRRS should be upclocked
5774 * (LOW_RR -> HIGH_RR). And also Idleness detection should be started again,
5775 * if no other planes are dirty.
5777 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5779 void intel_edp_drrs_flush(struct drm_device *dev,
5780 unsigned frontbuffer_bits)
5782 struct drm_i915_private *dev_priv = dev->dev_private;
5783 struct drm_crtc *crtc;
5786 if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
5789 cancel_delayed_work(&dev_priv->drrs.work);
5791 mutex_lock(&dev_priv->drrs.mutex);
5792 if (!dev_priv->drrs.dp) {
5793 mutex_unlock(&dev_priv->drrs.mutex);
5797 crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5798 pipe = to_intel_crtc(crtc)->pipe;
5800 frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
5801 dev_priv->drrs.busy_frontbuffer_bits &= ~frontbuffer_bits;
5803 /* flush means busy screen hence upclock */
5804 if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
5805 intel_dp_set_drrs_state(dev_priv->dev,
5806 dev_priv->drrs.dp->attached_connector->panel.
5807 fixed_mode->vrefresh);
5810 * flush also means no more activity hence schedule downclock, if all
5811 * other fbs are quiescent too
5813 if (!dev_priv->drrs.busy_frontbuffer_bits)
5814 schedule_delayed_work(&dev_priv->drrs.work,
5815 msecs_to_jiffies(1000));
5816 mutex_unlock(&dev_priv->drrs.mutex);
5820 * DOC: Display Refresh Rate Switching (DRRS)
5822 * Display Refresh Rate Switching (DRRS) is a power conservation feature
5823 * which enables swtching between low and high refresh rates,
5824 * dynamically, based on the usage scenario. This feature is applicable
5825 * for internal panels.
5827 * Indication that the panel supports DRRS is given by the panel EDID, which
5828 * would list multiple refresh rates for one resolution.
5830 * DRRS is of 2 types - static and seamless.
5831 * Static DRRS involves changing refresh rate (RR) by doing a full modeset
5832 * (may appear as a blink on screen) and is used in dock-undock scenario.
5833 * Seamless DRRS involves changing RR without any visual effect to the user
5834 * and can be used during normal system usage. This is done by programming
5835 * certain registers.
5837 * Support for static/seamless DRRS may be indicated in the VBT based on
5838 * inputs from the panel spec.
5840 * DRRS saves power by switching to low RR based on usage scenarios.
5843 * The implementation is based on frontbuffer tracking implementation.
5844 * When there is a disturbance on the screen triggered by user activity or a
5845 * periodic system activity, DRRS is disabled (RR is changed to high RR).
5846 * When there is no movement on screen, after a timeout of 1 second, a switch
5847 * to low RR is made.
5848 * For integration with frontbuffer tracking code,
5849 * intel_edp_drrs_invalidate() and intel_edp_drrs_flush() are called.
5851 * DRRS can be further extended to support other internal panels and also
5852 * the scenario of video playback wherein RR is set based on the rate
5853 * requested by userspace.
5857 * intel_dp_drrs_init - Init basic DRRS work and mutex.
5858 * @intel_connector: eDP connector
5859 * @fixed_mode: preferred mode of panel
5861 * This function is called only once at driver load to initialize basic
5865 * Downclock mode if panel supports it, else return NULL.
5866 * DRRS support is determined by the presence of downclock mode (apart
5867 * from VBT setting).
5869 static struct drm_display_mode *
5870 intel_dp_drrs_init(struct intel_connector *intel_connector,
5871 struct drm_display_mode *fixed_mode)
5873 struct drm_connector *connector = &intel_connector->base;
5874 struct drm_device *dev = connector->dev;
5875 struct drm_i915_private *dev_priv = dev->dev_private;
5876 struct drm_display_mode *downclock_mode = NULL;
5878 INIT_DELAYED_WORK(&dev_priv->drrs.work, intel_edp_drrs_downclock_work);
5879 mutex_init(&dev_priv->drrs.mutex);
5881 if (INTEL_INFO(dev)->gen <= 6) {
5882 DRM_DEBUG_KMS("DRRS supported for Gen7 and above\n");
5886 if (dev_priv->vbt.drrs_type != SEAMLESS_DRRS_SUPPORT) {
5887 DRM_DEBUG_KMS("VBT doesn't support DRRS\n");
5891 downclock_mode = intel_find_panel_downclock
5892 (dev, fixed_mode, connector);
5894 if (!downclock_mode) {
5895 DRM_DEBUG_KMS("Downclock mode is not found. DRRS not supported\n");
5899 dev_priv->drrs.type = dev_priv->vbt.drrs_type;
5901 dev_priv->drrs.refresh_rate_type = DRRS_HIGH_RR;
5902 DRM_DEBUG_KMS("seamless DRRS supported for eDP panel.\n");
5903 return downclock_mode;
5906 static bool intel_edp_init_connector(struct intel_dp *intel_dp,
5907 struct intel_connector *intel_connector)
5909 struct drm_connector *connector = &intel_connector->base;
5910 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
5911 struct intel_encoder *intel_encoder = &intel_dig_port->base;
5912 struct drm_device *dev = intel_encoder->base.dev;
5913 struct drm_i915_private *dev_priv = dev->dev_private;
5914 struct drm_display_mode *fixed_mode = NULL;
5915 struct drm_display_mode *downclock_mode = NULL;
5917 struct drm_display_mode *scan;
5919 enum pipe pipe = INVALID_PIPE;
5921 if (!is_edp(intel_dp))
5925 intel_edp_panel_vdd_sanitize(intel_dp);
5926 pps_unlock(intel_dp);
5928 /* Cache DPCD and EDID for edp. */
5929 has_dpcd = intel_dp_get_dpcd(intel_dp);
5932 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
5933 dev_priv->no_aux_handshake =
5934 intel_dp->dpcd[DP_MAX_DOWNSPREAD] &
5935 DP_NO_AUX_HANDSHAKE_LINK_TRAINING;
5937 /* if this fails, presume the device is a ghost */
5938 DRM_INFO("failed to retrieve link info, disabling eDP\n");
5942 /* We now know it's not a ghost, init power sequence regs. */
5944 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
5945 pps_unlock(intel_dp);
5947 mutex_lock(&dev->mode_config.mutex);
5948 edid = drm_get_edid(connector, &intel_dp->aux.ddc);
5950 if (drm_add_edid_modes(connector, edid)) {
5951 drm_mode_connector_update_edid_property(connector,
5953 drm_edid_to_eld(connector, edid);
5956 edid = ERR_PTR(-EINVAL);
5959 edid = ERR_PTR(-ENOENT);
5961 intel_connector->edid = edid;
5963 /* prefer fixed mode from EDID if available */
5964 list_for_each_entry(scan, &connector->probed_modes, head) {
5965 if ((scan->type & DRM_MODE_TYPE_PREFERRED)) {
5966 fixed_mode = drm_mode_duplicate(dev, scan);
5967 downclock_mode = intel_dp_drrs_init(
5968 intel_connector, fixed_mode);
5973 /* fallback to VBT if available for eDP */
5974 if (!fixed_mode && dev_priv->vbt.lfp_lvds_vbt_mode) {
5975 fixed_mode = drm_mode_duplicate(dev,
5976 dev_priv->vbt.lfp_lvds_vbt_mode);
5978 fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
5980 mutex_unlock(&dev->mode_config.mutex);
5982 if (IS_VALLEYVIEW(dev)) {
5983 intel_dp->edp_notifier.notifier_call = edp_notify_handler;
5984 register_reboot_notifier(&intel_dp->edp_notifier);
5987 * Figure out the current pipe for the initial backlight setup.
5988 * If the current pipe isn't valid, try the PPS pipe, and if that
5989 * fails just assume pipe A.
5991 if (IS_CHERRYVIEW(dev))
5992 pipe = DP_PORT_TO_PIPE_CHV(intel_dp->DP);
5994 pipe = PORT_TO_PIPE(intel_dp->DP);
5996 if (pipe != PIPE_A && pipe != PIPE_B)
5997 pipe = intel_dp->pps_pipe;
5999 if (pipe != PIPE_A && pipe != PIPE_B)
6002 DRM_DEBUG_KMS("using pipe %c for initial backlight setup\n",
6006 intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
6007 intel_connector->panel.backlight_power = intel_edp_backlight_power;
6008 intel_panel_setup_backlight(connector, pipe);
6014 intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
6015 struct intel_connector *intel_connector)
6017 struct drm_connector *connector = &intel_connector->base;
6018 struct intel_dp *intel_dp = &intel_dig_port->dp;
6019 struct intel_encoder *intel_encoder = &intel_dig_port->base;
6020 struct drm_device *dev = intel_encoder->base.dev;
6021 struct drm_i915_private *dev_priv = dev->dev_private;
6022 enum port port = intel_dig_port->port;
6025 intel_dp->pps_pipe = INVALID_PIPE;
6027 /* intel_dp vfuncs */
6028 if (INTEL_INFO(dev)->gen >= 9)
6029 intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider;
6030 else if (IS_VALLEYVIEW(dev))
6031 intel_dp->get_aux_clock_divider = vlv_get_aux_clock_divider;
6032 else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
6033 intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider;
6034 else if (HAS_PCH_SPLIT(dev))
6035 intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider;
6037 intel_dp->get_aux_clock_divider = i9xx_get_aux_clock_divider;
6039 if (INTEL_INFO(dev)->gen >= 9)
6040 intel_dp->get_aux_send_ctl = skl_get_aux_send_ctl;
6042 intel_dp->get_aux_send_ctl = i9xx_get_aux_send_ctl;
6044 /* Preserve the current hw state. */
6045 intel_dp->DP = I915_READ(intel_dp->output_reg);
6046 intel_dp->attached_connector = intel_connector;
6048 if (intel_dp_is_edp(dev, port))
6049 type = DRM_MODE_CONNECTOR_eDP;
6051 type = DRM_MODE_CONNECTOR_DisplayPort;
6054 * For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but
6055 * for DP the encoder type can be set by the caller to
6056 * INTEL_OUTPUT_UNKNOWN for DDI, so don't rewrite it.
6058 if (type == DRM_MODE_CONNECTOR_eDP)
6059 intel_encoder->type = INTEL_OUTPUT_EDP;
6061 /* eDP only on port B and/or C on vlv/chv */
6062 if (WARN_ON(IS_VALLEYVIEW(dev) && is_edp(intel_dp) &&
6063 port != PORT_B && port != PORT_C))
6066 DRM_DEBUG_KMS("Adding %s connector on port %c\n",
6067 type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP",
6070 drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
6071 drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
6073 connector->interlace_allowed = true;
6074 connector->doublescan_allowed = 0;
6076 INIT_DELAYED_WORK(&intel_dp->panel_vdd_work,
6077 edp_panel_vdd_work);
6079 intel_connector_attach_encoder(intel_connector, intel_encoder);
6080 drm_connector_register(connector);
6083 intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
6085 intel_connector->get_hw_state = intel_connector_get_hw_state;
6086 intel_connector->unregister = intel_dp_connector_unregister;
6088 /* Set up the hotplug pin. */
6091 intel_encoder->hpd_pin = HPD_PORT_A;
6094 intel_encoder->hpd_pin = HPD_PORT_B;
6095 if (IS_BROXTON(dev_priv) && (INTEL_REVID(dev) < BXT_REVID_B0))
6096 intel_encoder->hpd_pin = HPD_PORT_A;
6099 intel_encoder->hpd_pin = HPD_PORT_C;
6102 intel_encoder->hpd_pin = HPD_PORT_D;
6108 if (is_edp(intel_dp)) {
6110 intel_dp_init_panel_power_timestamps(intel_dp);
6111 if (IS_VALLEYVIEW(dev))
6112 vlv_initial_power_sequencer_setup(intel_dp);
6114 intel_dp_init_panel_power_sequencer(dev, intel_dp);
6115 pps_unlock(intel_dp);
6118 intel_dp_aux_init(intel_dp, intel_connector);
6120 /* init MST on ports that can support it */
6121 if (HAS_DP_MST(dev) &&
6122 (port == PORT_B || port == PORT_C || port == PORT_D))
6123 intel_dp_mst_encoder_init(intel_dig_port,
6124 intel_connector->base.base.id);
6126 if (!intel_edp_init_connector(intel_dp, intel_connector)) {
6127 drm_dp_aux_unregister(&intel_dp->aux);
6128 if (is_edp(intel_dp)) {
6129 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
6131 * vdd might still be enabled do to the delayed vdd off.
6132 * Make sure vdd is actually turned off here.
6135 edp_panel_vdd_off_sync(intel_dp);
6136 pps_unlock(intel_dp);
6138 drm_connector_unregister(connector);
6139 drm_connector_cleanup(connector);
6143 intel_dp_add_properties(intel_dp, connector);
6145 /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
6146 * 0xd. Failure to do so will result in spurious interrupts being
6147 * generated on the port when a cable is not attached.
6149 if (IS_G4X(dev) && !IS_GM45(dev)) {
6150 u32 temp = I915_READ(PEG_BAND_GAP_DATA);
6151 I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
6154 i915_debugfs_connector_add(connector);
6160 intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
6162 struct drm_i915_private *dev_priv = dev->dev_private;
6163 struct intel_digital_port *intel_dig_port;
6164 struct intel_encoder *intel_encoder;
6165 struct drm_encoder *encoder;
6166 struct intel_connector *intel_connector;
6168 intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL);
6169 if (!intel_dig_port)
6172 intel_connector = intel_connector_alloc();
6173 if (!intel_connector) {
6174 kfree(intel_dig_port);
6178 intel_encoder = &intel_dig_port->base;
6179 encoder = &intel_encoder->base;
6181 drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs,
6182 DRM_MODE_ENCODER_TMDS);
6184 intel_encoder->compute_config = intel_dp_compute_config;
6185 intel_encoder->disable = intel_disable_dp;
6186 intel_encoder->get_hw_state = intel_dp_get_hw_state;
6187 intel_encoder->get_config = intel_dp_get_config;
6188 intel_encoder->suspend = intel_dp_encoder_suspend;
6189 if (IS_CHERRYVIEW(dev)) {
6190 intel_encoder->pre_pll_enable = chv_dp_pre_pll_enable;
6191 intel_encoder->pre_enable = chv_pre_enable_dp;
6192 intel_encoder->enable = vlv_enable_dp;
6193 intel_encoder->post_disable = chv_post_disable_dp;
6194 intel_encoder->post_pll_disable = chv_dp_post_pll_disable;
6195 } else if (IS_VALLEYVIEW(dev)) {
6196 intel_encoder->pre_pll_enable = vlv_dp_pre_pll_enable;
6197 intel_encoder->pre_enable = vlv_pre_enable_dp;
6198 intel_encoder->enable = vlv_enable_dp;
6199 intel_encoder->post_disable = vlv_post_disable_dp;
6201 intel_encoder->pre_enable = g4x_pre_enable_dp;
6202 intel_encoder->enable = g4x_enable_dp;
6203 if (INTEL_INFO(dev)->gen >= 5)
6204 intel_encoder->post_disable = ilk_post_disable_dp;
6207 intel_dig_port->port = port;
6208 intel_dig_port->dp.output_reg = output_reg;
6210 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
6211 if (IS_CHERRYVIEW(dev)) {
6213 intel_encoder->crtc_mask = 1 << 2;
6215 intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
6217 intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
6219 intel_encoder->cloneable = 0;
6221 intel_dig_port->hpd_pulse = intel_dp_hpd_pulse;
6222 dev_priv->hotplug.irq_port[port] = intel_dig_port;
6224 if (!intel_dp_init_connector(intel_dig_port, intel_connector)) {
6225 drm_encoder_cleanup(encoder);
6226 kfree(intel_dig_port);
6227 kfree(intel_connector);
6231 void intel_dp_mst_suspend(struct drm_device *dev)
6233 struct drm_i915_private *dev_priv = dev->dev_private;
6237 for (i = 0; i < I915_MAX_PORTS; i++) {
6238 struct intel_digital_port *intel_dig_port = dev_priv->hotplug.irq_port[i];
6239 if (!intel_dig_port)
6242 if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
6243 if (!intel_dig_port->dp.can_mst)
6245 if (intel_dig_port->dp.is_mst)
6246 drm_dp_mst_topology_mgr_suspend(&intel_dig_port->dp.mst_mgr);
6251 void intel_dp_mst_resume(struct drm_device *dev)
6253 struct drm_i915_private *dev_priv = dev->dev_private;
6256 for (i = 0; i < I915_MAX_PORTS; i++) {
6257 struct intel_digital_port *intel_dig_port = dev_priv->hotplug.irq_port[i];
6258 if (!intel_dig_port)
6260 if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
6263 if (!intel_dig_port->dp.can_mst)
6266 ret = drm_dp_mst_topology_mgr_resume(&intel_dig_port->dp.mst_mgr);
6268 intel_dp_check_mst_status(&intel_dig_port->dp);