3a9c43d2c2c446f42220a11671df31b4f3811625
[firefly-linux-kernel-4.4.55.git] / drivers / gpu / drm / i915 / intel_dp.c
1 /*
2  * Copyright © 2008 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Keith Packard <keithp@keithp.com>
25  *
26  */
27
28 #include <linux/i2c.h>
29 #include <linux/slab.h>
30 #include <linux/export.h>
31 #include <linux/notifier.h>
32 #include <linux/reboot.h>
33 #include <drm/drmP.h>
34 #include <drm/drm_atomic_helper.h>
35 #include <drm/drm_crtc.h>
36 #include <drm/drm_crtc_helper.h>
37 #include <drm/drm_edid.h>
38 #include "intel_drv.h"
39 #include <drm/i915_drm.h>
40 #include "i915_drv.h"
41
42 #define DP_LINK_CHECK_TIMEOUT   (10 * 1000)
43
44 /* Compliance test status bits  */
45 #define INTEL_DP_RESOLUTION_SHIFT_MASK  0
46 #define INTEL_DP_RESOLUTION_PREFERRED   (1 << INTEL_DP_RESOLUTION_SHIFT_MASK)
47 #define INTEL_DP_RESOLUTION_STANDARD    (2 << INTEL_DP_RESOLUTION_SHIFT_MASK)
48 #define INTEL_DP_RESOLUTION_FAILSAFE    (3 << INTEL_DP_RESOLUTION_SHIFT_MASK)
49
50 struct dp_link_dpll {
51         int clock;
52         struct dpll dpll;
53 };
54
55 static const struct dp_link_dpll gen4_dpll[] = {
56         { 162000,
57                 { .p1 = 2, .p2 = 10, .n = 2, .m1 = 23, .m2 = 8 } },
58         { 270000,
59                 { .p1 = 1, .p2 = 10, .n = 1, .m1 = 14, .m2 = 2 } }
60 };
61
62 static const struct dp_link_dpll pch_dpll[] = {
63         { 162000,
64                 { .p1 = 2, .p2 = 10, .n = 1, .m1 = 12, .m2 = 9 } },
65         { 270000,
66                 { .p1 = 1, .p2 = 10, .n = 2, .m1 = 14, .m2 = 8 } }
67 };
68
69 static const struct dp_link_dpll vlv_dpll[] = {
70         { 162000,
71                 { .p1 = 3, .p2 = 2, .n = 5, .m1 = 3, .m2 = 81 } },
72         { 270000,
73                 { .p1 = 2, .p2 = 2, .n = 1, .m1 = 2, .m2 = 27 } }
74 };
75
76 /*
77  * CHV supports eDP 1.4 that have  more link rates.
78  * Below only provides the fixed rate but exclude variable rate.
79  */
80 static const struct dp_link_dpll chv_dpll[] = {
81         /*
82          * CHV requires to program fractional division for m2.
83          * m2 is stored in fixed point format using formula below
84          * (m2_int << 22) | m2_fraction
85          */
86         { 162000,       /* m2_int = 32, m2_fraction = 1677722 */
87                 { .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a } },
88         { 270000,       /* m2_int = 27, m2_fraction = 0 */
89                 { .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } },
90         { 540000,       /* m2_int = 27, m2_fraction = 0 */
91                 { .p1 = 2, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } }
92 };
93
94 static const int bxt_rates[] = { 162000, 216000, 243000, 270000,
95                                   324000, 432000, 540000 };
96 static const int skl_rates[] = { 162000, 216000, 270000,
97                                   324000, 432000, 540000 };
98 static const int chv_rates[] = { 162000, 202500, 210000, 216000,
99                                  243000, 270000, 324000, 405000,
100                                  420000, 432000, 540000 };
101 static const int default_rates[] = { 162000, 270000, 540000 };
102
103 /**
104  * is_edp - is the given port attached to an eDP panel (either CPU or PCH)
105  * @intel_dp: DP struct
106  *
107  * If a CPU or PCH DP output is attached to an eDP panel, this function
108  * will return true, and false otherwise.
109  */
110 static bool is_edp(struct intel_dp *intel_dp)
111 {
112         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
113
114         return intel_dig_port->base.type == INTEL_OUTPUT_EDP;
115 }
116
117 static struct drm_device *intel_dp_to_dev(struct intel_dp *intel_dp)
118 {
119         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
120
121         return intel_dig_port->base.base.dev;
122 }
123
124 static struct intel_dp *intel_attached_dp(struct drm_connector *connector)
125 {
126         return enc_to_intel_dp(&intel_attached_encoder(connector)->base);
127 }
128
129 static void intel_dp_link_down(struct intel_dp *intel_dp);
130 static bool edp_panel_vdd_on(struct intel_dp *intel_dp);
131 static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
132 static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp);
133 static void vlv_steal_power_sequencer(struct drm_device *dev,
134                                       enum pipe pipe);
135
136 static unsigned int intel_dp_unused_lane_mask(int lane_count)
137 {
138         return ~((1 << lane_count) - 1) & 0xf;
139 }
140
141 static int
142 intel_dp_max_link_bw(struct intel_dp  *intel_dp)
143 {
144         int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE];
145
146         switch (max_link_bw) {
147         case DP_LINK_BW_1_62:
148         case DP_LINK_BW_2_7:
149         case DP_LINK_BW_5_4:
150                 break;
151         default:
152                 WARN(1, "invalid max DP link bw val %x, using 1.62Gbps\n",
153                      max_link_bw);
154                 max_link_bw = DP_LINK_BW_1_62;
155                 break;
156         }
157         return max_link_bw;
158 }
159
160 static u8 intel_dp_max_lane_count(struct intel_dp *intel_dp)
161 {
162         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
163         struct drm_device *dev = intel_dig_port->base.base.dev;
164         u8 source_max, sink_max;
165
166         source_max = 4;
167         if (HAS_DDI(dev) && intel_dig_port->port == PORT_A &&
168             (intel_dig_port->saved_port_bits & DDI_A_4_LANES) == 0)
169                 source_max = 2;
170
171         sink_max = drm_dp_max_lane_count(intel_dp->dpcd);
172
173         return min(source_max, sink_max);
174 }
175
176 /*
177  * The units on the numbers in the next two are... bizarre.  Examples will
178  * make it clearer; this one parallels an example in the eDP spec.
179  *
180  * intel_dp_max_data_rate for one lane of 2.7GHz evaluates as:
181  *
182  *     270000 * 1 * 8 / 10 == 216000
183  *
184  * The actual data capacity of that configuration is 2.16Gbit/s, so the
185  * units are decakilobits.  ->clock in a drm_display_mode is in kilohertz -
186  * or equivalently, kilopixels per second - so for 1680x1050R it'd be
187  * 119000.  At 18bpp that's 2142000 kilobits per second.
188  *
189  * Thus the strange-looking division by 10 in intel_dp_link_required, to
190  * get the result in decakilobits instead of kilobits.
191  */
192
193 static int
194 intel_dp_link_required(int pixel_clock, int bpp)
195 {
196         return (pixel_clock * bpp + 9) / 10;
197 }
198
199 static int
200 intel_dp_max_data_rate(int max_link_clock, int max_lanes)
201 {
202         return (max_link_clock * max_lanes * 8) / 10;
203 }
204
205 static enum drm_mode_status
206 intel_dp_mode_valid(struct drm_connector *connector,
207                     struct drm_display_mode *mode)
208 {
209         struct intel_dp *intel_dp = intel_attached_dp(connector);
210         struct intel_connector *intel_connector = to_intel_connector(connector);
211         struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
212         int target_clock = mode->clock;
213         int max_rate, mode_rate, max_lanes, max_link_clock;
214
215         if (is_edp(intel_dp) && fixed_mode) {
216                 if (mode->hdisplay > fixed_mode->hdisplay)
217                         return MODE_PANEL;
218
219                 if (mode->vdisplay > fixed_mode->vdisplay)
220                         return MODE_PANEL;
221
222                 target_clock = fixed_mode->clock;
223         }
224
225         max_link_clock = intel_dp_max_link_rate(intel_dp);
226         max_lanes = intel_dp_max_lane_count(intel_dp);
227
228         max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
229         mode_rate = intel_dp_link_required(target_clock, 18);
230
231         if (mode_rate > max_rate)
232                 return MODE_CLOCK_HIGH;
233
234         if (mode->clock < 10000)
235                 return MODE_CLOCK_LOW;
236
237         if (mode->flags & DRM_MODE_FLAG_DBLCLK)
238                 return MODE_H_ILLEGAL;
239
240         return MODE_OK;
241 }
242
243 uint32_t intel_dp_pack_aux(const uint8_t *src, int src_bytes)
244 {
245         int     i;
246         uint32_t v = 0;
247
248         if (src_bytes > 4)
249                 src_bytes = 4;
250         for (i = 0; i < src_bytes; i++)
251                 v |= ((uint32_t) src[i]) << ((3-i) * 8);
252         return v;
253 }
254
255 static void intel_dp_unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes)
256 {
257         int i;
258         if (dst_bytes > 4)
259                 dst_bytes = 4;
260         for (i = 0; i < dst_bytes; i++)
261                 dst[i] = src >> ((3-i) * 8);
262 }
263
264 /* hrawclock is 1/4 the FSB frequency */
265 static int
266 intel_hrawclk(struct drm_device *dev)
267 {
268         struct drm_i915_private *dev_priv = dev->dev_private;
269         uint32_t clkcfg;
270
271         /* There is no CLKCFG reg in Valleyview. VLV hrawclk is 200 MHz */
272         if (IS_VALLEYVIEW(dev))
273                 return 200;
274
275         clkcfg = I915_READ(CLKCFG);
276         switch (clkcfg & CLKCFG_FSB_MASK) {
277         case CLKCFG_FSB_400:
278                 return 100;
279         case CLKCFG_FSB_533:
280                 return 133;
281         case CLKCFG_FSB_667:
282                 return 166;
283         case CLKCFG_FSB_800:
284                 return 200;
285         case CLKCFG_FSB_1067:
286                 return 266;
287         case CLKCFG_FSB_1333:
288                 return 333;
289         /* these two are just a guess; one of them might be right */
290         case CLKCFG_FSB_1600:
291         case CLKCFG_FSB_1600_ALT:
292                 return 400;
293         default:
294                 return 133;
295         }
296 }
297
298 static void
299 intel_dp_init_panel_power_sequencer(struct drm_device *dev,
300                                     struct intel_dp *intel_dp);
301 static void
302 intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
303                                               struct intel_dp *intel_dp);
304
305 static void pps_lock(struct intel_dp *intel_dp)
306 {
307         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
308         struct intel_encoder *encoder = &intel_dig_port->base;
309         struct drm_device *dev = encoder->base.dev;
310         struct drm_i915_private *dev_priv = dev->dev_private;
311         enum intel_display_power_domain power_domain;
312
313         /*
314          * See vlv_power_sequencer_reset() why we need
315          * a power domain reference here.
316          */
317         power_domain = intel_display_port_power_domain(encoder);
318         intel_display_power_get(dev_priv, power_domain);
319
320         mutex_lock(&dev_priv->pps_mutex);
321 }
322
323 static void pps_unlock(struct intel_dp *intel_dp)
324 {
325         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
326         struct intel_encoder *encoder = &intel_dig_port->base;
327         struct drm_device *dev = encoder->base.dev;
328         struct drm_i915_private *dev_priv = dev->dev_private;
329         enum intel_display_power_domain power_domain;
330
331         mutex_unlock(&dev_priv->pps_mutex);
332
333         power_domain = intel_display_port_power_domain(encoder);
334         intel_display_power_put(dev_priv, power_domain);
335 }
336
337 static void
338 vlv_power_sequencer_kick(struct intel_dp *intel_dp)
339 {
340         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
341         struct drm_device *dev = intel_dig_port->base.base.dev;
342         struct drm_i915_private *dev_priv = dev->dev_private;
343         enum pipe pipe = intel_dp->pps_pipe;
344         bool pll_enabled, release_cl_override = false;
345         enum dpio_phy phy = DPIO_PHY(pipe);
346         enum dpio_channel ch = vlv_pipe_to_channel(pipe);
347         uint32_t DP;
348
349         if (WARN(I915_READ(intel_dp->output_reg) & DP_PORT_EN,
350                  "skipping pipe %c power seqeuncer kick due to port %c being active\n",
351                  pipe_name(pipe), port_name(intel_dig_port->port)))
352                 return;
353
354         DRM_DEBUG_KMS("kicking pipe %c power sequencer for port %c\n",
355                       pipe_name(pipe), port_name(intel_dig_port->port));
356
357         /* Preserve the BIOS-computed detected bit. This is
358          * supposed to be read-only.
359          */
360         DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
361         DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
362         DP |= DP_PORT_WIDTH(1);
363         DP |= DP_LINK_TRAIN_PAT_1;
364
365         if (IS_CHERRYVIEW(dev))
366                 DP |= DP_PIPE_SELECT_CHV(pipe);
367         else if (pipe == PIPE_B)
368                 DP |= DP_PIPEB_SELECT;
369
370         pll_enabled = I915_READ(DPLL(pipe)) & DPLL_VCO_ENABLE;
371
372         /*
373          * The DPLL for the pipe must be enabled for this to work.
374          * So enable temporarily it if it's not already enabled.
375          */
376         if (!pll_enabled) {
377                 release_cl_override = IS_CHERRYVIEW(dev) &&
378                         !chv_phy_powergate_ch(dev_priv, phy, ch, true);
379
380                 vlv_force_pll_on(dev, pipe, IS_CHERRYVIEW(dev) ?
381                                  &chv_dpll[0].dpll : &vlv_dpll[0].dpll);
382         }
383
384         /*
385          * Similar magic as in intel_dp_enable_port().
386          * We _must_ do this port enable + disable trick
387          * to make this power seqeuencer lock onto the port.
388          * Otherwise even VDD force bit won't work.
389          */
390         I915_WRITE(intel_dp->output_reg, DP);
391         POSTING_READ(intel_dp->output_reg);
392
393         I915_WRITE(intel_dp->output_reg, DP | DP_PORT_EN);
394         POSTING_READ(intel_dp->output_reg);
395
396         I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
397         POSTING_READ(intel_dp->output_reg);
398
399         if (!pll_enabled) {
400                 vlv_force_pll_off(dev, pipe);
401
402                 if (release_cl_override)
403                         chv_phy_powergate_ch(dev_priv, phy, ch, false);
404         }
405 }
406
407 static enum pipe
408 vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
409 {
410         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
411         struct drm_device *dev = intel_dig_port->base.base.dev;
412         struct drm_i915_private *dev_priv = dev->dev_private;
413         struct intel_encoder *encoder;
414         unsigned int pipes = (1 << PIPE_A) | (1 << PIPE_B);
415         enum pipe pipe;
416
417         lockdep_assert_held(&dev_priv->pps_mutex);
418
419         /* We should never land here with regular DP ports */
420         WARN_ON(!is_edp(intel_dp));
421
422         if (intel_dp->pps_pipe != INVALID_PIPE)
423                 return intel_dp->pps_pipe;
424
425         /*
426          * We don't have power sequencer currently.
427          * Pick one that's not used by other ports.
428          */
429         list_for_each_entry(encoder, &dev->mode_config.encoder_list,
430                             base.head) {
431                 struct intel_dp *tmp;
432
433                 if (encoder->type != INTEL_OUTPUT_EDP)
434                         continue;
435
436                 tmp = enc_to_intel_dp(&encoder->base);
437
438                 if (tmp->pps_pipe != INVALID_PIPE)
439                         pipes &= ~(1 << tmp->pps_pipe);
440         }
441
442         /*
443          * Didn't find one. This should not happen since there
444          * are two power sequencers and up to two eDP ports.
445          */
446         if (WARN_ON(pipes == 0))
447                 pipe = PIPE_A;
448         else
449                 pipe = ffs(pipes) - 1;
450
451         vlv_steal_power_sequencer(dev, pipe);
452         intel_dp->pps_pipe = pipe;
453
454         DRM_DEBUG_KMS("picked pipe %c power sequencer for port %c\n",
455                       pipe_name(intel_dp->pps_pipe),
456                       port_name(intel_dig_port->port));
457
458         /* init power sequencer on this pipe and port */
459         intel_dp_init_panel_power_sequencer(dev, intel_dp);
460         intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
461
462         /*
463          * Even vdd force doesn't work until we've made
464          * the power sequencer lock in on the port.
465          */
466         vlv_power_sequencer_kick(intel_dp);
467
468         return intel_dp->pps_pipe;
469 }
470
471 typedef bool (*vlv_pipe_check)(struct drm_i915_private *dev_priv,
472                                enum pipe pipe);
473
474 static bool vlv_pipe_has_pp_on(struct drm_i915_private *dev_priv,
475                                enum pipe pipe)
476 {
477         return I915_READ(VLV_PIPE_PP_STATUS(pipe)) & PP_ON;
478 }
479
480 static bool vlv_pipe_has_vdd_on(struct drm_i915_private *dev_priv,
481                                 enum pipe pipe)
482 {
483         return I915_READ(VLV_PIPE_PP_CONTROL(pipe)) & EDP_FORCE_VDD;
484 }
485
486 static bool vlv_pipe_any(struct drm_i915_private *dev_priv,
487                          enum pipe pipe)
488 {
489         return true;
490 }
491
492 static enum pipe
493 vlv_initial_pps_pipe(struct drm_i915_private *dev_priv,
494                      enum port port,
495                      vlv_pipe_check pipe_check)
496 {
497         enum pipe pipe;
498
499         for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) {
500                 u32 port_sel = I915_READ(VLV_PIPE_PP_ON_DELAYS(pipe)) &
501                         PANEL_PORT_SELECT_MASK;
502
503                 if (port_sel != PANEL_PORT_SELECT_VLV(port))
504                         continue;
505
506                 if (!pipe_check(dev_priv, pipe))
507                         continue;
508
509                 return pipe;
510         }
511
512         return INVALID_PIPE;
513 }
514
515 static void
516 vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
517 {
518         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
519         struct drm_device *dev = intel_dig_port->base.base.dev;
520         struct drm_i915_private *dev_priv = dev->dev_private;
521         enum port port = intel_dig_port->port;
522
523         lockdep_assert_held(&dev_priv->pps_mutex);
524
525         /* try to find a pipe with this port selected */
526         /* first pick one where the panel is on */
527         intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
528                                                   vlv_pipe_has_pp_on);
529         /* didn't find one? pick one where vdd is on */
530         if (intel_dp->pps_pipe == INVALID_PIPE)
531                 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
532                                                           vlv_pipe_has_vdd_on);
533         /* didn't find one? pick one with just the correct port */
534         if (intel_dp->pps_pipe == INVALID_PIPE)
535                 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
536                                                           vlv_pipe_any);
537
538         /* didn't find one? just let vlv_power_sequencer_pipe() pick one when needed */
539         if (intel_dp->pps_pipe == INVALID_PIPE) {
540                 DRM_DEBUG_KMS("no initial power sequencer for port %c\n",
541                               port_name(port));
542                 return;
543         }
544
545         DRM_DEBUG_KMS("initial power sequencer for port %c: pipe %c\n",
546                       port_name(port), pipe_name(intel_dp->pps_pipe));
547
548         intel_dp_init_panel_power_sequencer(dev, intel_dp);
549         intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
550 }
551
552 void vlv_power_sequencer_reset(struct drm_i915_private *dev_priv)
553 {
554         struct drm_device *dev = dev_priv->dev;
555         struct intel_encoder *encoder;
556
557         if (WARN_ON(!IS_VALLEYVIEW(dev)))
558                 return;
559
560         /*
561          * We can't grab pps_mutex here due to deadlock with power_domain
562          * mutex when power_domain functions are called while holding pps_mutex.
563          * That also means that in order to use pps_pipe the code needs to
564          * hold both a power domain reference and pps_mutex, and the power domain
565          * reference get/put must be done while _not_ holding pps_mutex.
566          * pps_{lock,unlock}() do these steps in the correct order, so one
567          * should use them always.
568          */
569
570         list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
571                 struct intel_dp *intel_dp;
572
573                 if (encoder->type != INTEL_OUTPUT_EDP)
574                         continue;
575
576                 intel_dp = enc_to_intel_dp(&encoder->base);
577                 intel_dp->pps_pipe = INVALID_PIPE;
578         }
579 }
580
581 static u32 _pp_ctrl_reg(struct intel_dp *intel_dp)
582 {
583         struct drm_device *dev = intel_dp_to_dev(intel_dp);
584
585         if (IS_BROXTON(dev))
586                 return BXT_PP_CONTROL(0);
587         else if (HAS_PCH_SPLIT(dev))
588                 return PCH_PP_CONTROL;
589         else
590                 return VLV_PIPE_PP_CONTROL(vlv_power_sequencer_pipe(intel_dp));
591 }
592
593 static u32 _pp_stat_reg(struct intel_dp *intel_dp)
594 {
595         struct drm_device *dev = intel_dp_to_dev(intel_dp);
596
597         if (IS_BROXTON(dev))
598                 return BXT_PP_STATUS(0);
599         else if (HAS_PCH_SPLIT(dev))
600                 return PCH_PP_STATUS;
601         else
602                 return VLV_PIPE_PP_STATUS(vlv_power_sequencer_pipe(intel_dp));
603 }
604
605 /* Reboot notifier handler to shutdown panel power to guarantee T12 timing
606    This function only applicable when panel PM state is not to be tracked */
607 static int edp_notify_handler(struct notifier_block *this, unsigned long code,
608                               void *unused)
609 {
610         struct intel_dp *intel_dp = container_of(this, typeof(* intel_dp),
611                                                  edp_notifier);
612         struct drm_device *dev = intel_dp_to_dev(intel_dp);
613         struct drm_i915_private *dev_priv = dev->dev_private;
614         u32 pp_div;
615         u32 pp_ctrl_reg, pp_div_reg;
616
617         if (!is_edp(intel_dp) || code != SYS_RESTART)
618                 return 0;
619
620         pps_lock(intel_dp);
621
622         if (IS_VALLEYVIEW(dev)) {
623                 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
624
625                 pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
626                 pp_div_reg  = VLV_PIPE_PP_DIVISOR(pipe);
627                 pp_div = I915_READ(pp_div_reg);
628                 pp_div &= PP_REFERENCE_DIVIDER_MASK;
629
630                 /* 0x1F write to PP_DIV_REG sets max cycle delay */
631                 I915_WRITE(pp_div_reg, pp_div | 0x1F);
632                 I915_WRITE(pp_ctrl_reg, PANEL_UNLOCK_REGS | PANEL_POWER_OFF);
633                 msleep(intel_dp->panel_power_cycle_delay);
634         }
635
636         pps_unlock(intel_dp);
637
638         return 0;
639 }
640
641 static bool edp_have_panel_power(struct intel_dp *intel_dp)
642 {
643         struct drm_device *dev = intel_dp_to_dev(intel_dp);
644         struct drm_i915_private *dev_priv = dev->dev_private;
645
646         lockdep_assert_held(&dev_priv->pps_mutex);
647
648         if (IS_VALLEYVIEW(dev) &&
649             intel_dp->pps_pipe == INVALID_PIPE)
650                 return false;
651
652         return (I915_READ(_pp_stat_reg(intel_dp)) & PP_ON) != 0;
653 }
654
655 static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
656 {
657         struct drm_device *dev = intel_dp_to_dev(intel_dp);
658         struct drm_i915_private *dev_priv = dev->dev_private;
659
660         lockdep_assert_held(&dev_priv->pps_mutex);
661
662         if (IS_VALLEYVIEW(dev) &&
663             intel_dp->pps_pipe == INVALID_PIPE)
664                 return false;
665
666         return I915_READ(_pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD;
667 }
668
669 static void
670 intel_dp_check_edp(struct intel_dp *intel_dp)
671 {
672         struct drm_device *dev = intel_dp_to_dev(intel_dp);
673         struct drm_i915_private *dev_priv = dev->dev_private;
674
675         if (!is_edp(intel_dp))
676                 return;
677
678         if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) {
679                 WARN(1, "eDP powered off while attempting aux channel communication.\n");
680                 DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n",
681                               I915_READ(_pp_stat_reg(intel_dp)),
682                               I915_READ(_pp_ctrl_reg(intel_dp)));
683         }
684 }
685
686 static uint32_t
687 intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq)
688 {
689         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
690         struct drm_device *dev = intel_dig_port->base.base.dev;
691         struct drm_i915_private *dev_priv = dev->dev_private;
692         uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
693         uint32_t status;
694         bool done;
695
696 #define C (((status = I915_READ_NOTRACE(ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
697         if (has_aux_irq)
698                 done = wait_event_timeout(dev_priv->gmbus_wait_queue, C,
699                                           msecs_to_jiffies_timeout(10));
700         else
701                 done = wait_for_atomic(C, 10) == 0;
702         if (!done)
703                 DRM_ERROR("dp aux hw did not signal timeout (has irq: %i)!\n",
704                           has_aux_irq);
705 #undef C
706
707         return status;
708 }
709
710 static uint32_t i9xx_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
711 {
712         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
713         struct drm_device *dev = intel_dig_port->base.base.dev;
714
715         /*
716          * The clock divider is based off the hrawclk, and would like to run at
717          * 2MHz.  So, take the hrawclk value and divide by 2 and use that
718          */
719         return index ? 0 : intel_hrawclk(dev) / 2;
720 }
721
722 static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
723 {
724         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
725         struct drm_device *dev = intel_dig_port->base.base.dev;
726         struct drm_i915_private *dev_priv = dev->dev_private;
727
728         if (index)
729                 return 0;
730
731         if (intel_dig_port->port == PORT_A) {
732                 return DIV_ROUND_UP(dev_priv->cdclk_freq, 2000);
733
734         } else {
735                 return DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
736         }
737 }
738
739 static uint32_t hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
740 {
741         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
742         struct drm_device *dev = intel_dig_port->base.base.dev;
743         struct drm_i915_private *dev_priv = dev->dev_private;
744
745         if (intel_dig_port->port == PORT_A) {
746                 if (index)
747                         return 0;
748                 return DIV_ROUND_CLOSEST(dev_priv->cdclk_freq, 2000);
749         } else if (dev_priv->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
750                 /* Workaround for non-ULT HSW */
751                 switch (index) {
752                 case 0: return 63;
753                 case 1: return 72;
754                 default: return 0;
755                 }
756         } else  {
757                 return index ? 0 : DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
758         }
759 }
760
761 static uint32_t vlv_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
762 {
763         return index ? 0 : 100;
764 }
765
766 static uint32_t skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
767 {
768         /*
769          * SKL doesn't need us to program the AUX clock divider (Hardware will
770          * derive the clock from CDCLK automatically). We still implement the
771          * get_aux_clock_divider vfunc to plug-in into the existing code.
772          */
773         return index ? 0 : 1;
774 }
775
776 static uint32_t i9xx_get_aux_send_ctl(struct intel_dp *intel_dp,
777                                       bool has_aux_irq,
778                                       int send_bytes,
779                                       uint32_t aux_clock_divider)
780 {
781         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
782         struct drm_device *dev = intel_dig_port->base.base.dev;
783         uint32_t precharge, timeout;
784
785         if (IS_GEN6(dev))
786                 precharge = 3;
787         else
788                 precharge = 5;
789
790         if (IS_BROADWELL(dev) && intel_dp->aux_ch_ctl_reg == DPA_AUX_CH_CTL)
791                 timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
792         else
793                 timeout = DP_AUX_CH_CTL_TIME_OUT_400us;
794
795         return DP_AUX_CH_CTL_SEND_BUSY |
796                DP_AUX_CH_CTL_DONE |
797                (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
798                DP_AUX_CH_CTL_TIME_OUT_ERROR |
799                timeout |
800                DP_AUX_CH_CTL_RECEIVE_ERROR |
801                (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
802                (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
803                (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT);
804 }
805
806 static uint32_t skl_get_aux_send_ctl(struct intel_dp *intel_dp,
807                                       bool has_aux_irq,
808                                       int send_bytes,
809                                       uint32_t unused)
810 {
811         return DP_AUX_CH_CTL_SEND_BUSY |
812                DP_AUX_CH_CTL_DONE |
813                (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
814                DP_AUX_CH_CTL_TIME_OUT_ERROR |
815                DP_AUX_CH_CTL_TIME_OUT_1600us |
816                DP_AUX_CH_CTL_RECEIVE_ERROR |
817                (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
818                DP_AUX_CH_CTL_SYNC_PULSE_SKL(32);
819 }
820
821 static int
822 intel_dp_aux_ch(struct intel_dp *intel_dp,
823                 const uint8_t *send, int send_bytes,
824                 uint8_t *recv, int recv_size)
825 {
826         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
827         struct drm_device *dev = intel_dig_port->base.base.dev;
828         struct drm_i915_private *dev_priv = dev->dev_private;
829         uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
830         uint32_t ch_data = ch_ctl + 4;
831         uint32_t aux_clock_divider;
832         int i, ret, recv_bytes;
833         uint32_t status;
834         int try, clock = 0;
835         bool has_aux_irq = HAS_AUX_IRQ(dev);
836         bool vdd;
837
838         pps_lock(intel_dp);
839
840         /*
841          * We will be called with VDD already enabled for dpcd/edid/oui reads.
842          * In such cases we want to leave VDD enabled and it's up to upper layers
843          * to turn it off. But for eg. i2c-dev access we need to turn it on/off
844          * ourselves.
845          */
846         vdd = edp_panel_vdd_on(intel_dp);
847
848         /* dp aux is extremely sensitive to irq latency, hence request the
849          * lowest possible wakeup latency and so prevent the cpu from going into
850          * deep sleep states.
851          */
852         pm_qos_update_request(&dev_priv->pm_qos, 0);
853
854         intel_dp_check_edp(intel_dp);
855
856         intel_aux_display_runtime_get(dev_priv);
857
858         /* Try to wait for any previous AUX channel activity */
859         for (try = 0; try < 3; try++) {
860                 status = I915_READ_NOTRACE(ch_ctl);
861                 if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
862                         break;
863                 msleep(1);
864         }
865
866         if (try == 3) {
867                 static u32 last_status = -1;
868                 const u32 status = I915_READ(ch_ctl);
869
870                 if (status != last_status) {
871                         WARN(1, "dp_aux_ch not started status 0x%08x\n",
872                              status);
873                         last_status = status;
874                 }
875
876                 ret = -EBUSY;
877                 goto out;
878         }
879
880         /* Only 5 data registers! */
881         if (WARN_ON(send_bytes > 20 || recv_size > 20)) {
882                 ret = -E2BIG;
883                 goto out;
884         }
885
886         while ((aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, clock++))) {
887                 u32 send_ctl = intel_dp->get_aux_send_ctl(intel_dp,
888                                                           has_aux_irq,
889                                                           send_bytes,
890                                                           aux_clock_divider);
891
892                 /* Must try at least 3 times according to DP spec */
893                 for (try = 0; try < 5; try++) {
894                         /* Load the send data into the aux channel data registers */
895                         for (i = 0; i < send_bytes; i += 4)
896                                 I915_WRITE(ch_data + i,
897                                            intel_dp_pack_aux(send + i,
898                                                              send_bytes - i));
899
900                         /* Send the command and wait for it to complete */
901                         I915_WRITE(ch_ctl, send_ctl);
902
903                         status = intel_dp_aux_wait_done(intel_dp, has_aux_irq);
904
905                         /* Clear done status and any errors */
906                         I915_WRITE(ch_ctl,
907                                    status |
908                                    DP_AUX_CH_CTL_DONE |
909                                    DP_AUX_CH_CTL_TIME_OUT_ERROR |
910                                    DP_AUX_CH_CTL_RECEIVE_ERROR);
911
912                         if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR)
913                                 continue;
914
915                         /* DP CTS 1.2 Core Rev 1.1, 4.2.1.1 & 4.2.1.2
916                          *   400us delay required for errors and timeouts
917                          *   Timeout errors from the HW already meet this
918                          *   requirement so skip to next iteration
919                          */
920                         if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
921                                 usleep_range(400, 500);
922                                 continue;
923                         }
924                         if (status & DP_AUX_CH_CTL_DONE)
925                                 goto done;
926                 }
927         }
928
929         if ((status & DP_AUX_CH_CTL_DONE) == 0) {
930                 DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status);
931                 ret = -EBUSY;
932                 goto out;
933         }
934
935 done:
936         /* Check for timeout or receive error.
937          * Timeouts occur when the sink is not connected
938          */
939         if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
940                 DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status);
941                 ret = -EIO;
942                 goto out;
943         }
944
945         /* Timeouts occur when the device isn't connected, so they're
946          * "normal" -- don't fill the kernel log with these */
947         if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
948                 DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status);
949                 ret = -ETIMEDOUT;
950                 goto out;
951         }
952
953         /* Unload any bytes sent back from the other side */
954         recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >>
955                       DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
956         if (recv_bytes > recv_size)
957                 recv_bytes = recv_size;
958
959         for (i = 0; i < recv_bytes; i += 4)
960                 intel_dp_unpack_aux(I915_READ(ch_data + i),
961                                     recv + i, recv_bytes - i);
962
963         ret = recv_bytes;
964 out:
965         pm_qos_update_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE);
966         intel_aux_display_runtime_put(dev_priv);
967
968         if (vdd)
969                 edp_panel_vdd_off(intel_dp, false);
970
971         pps_unlock(intel_dp);
972
973         return ret;
974 }
975
976 #define BARE_ADDRESS_SIZE       3
977 #define HEADER_SIZE             (BARE_ADDRESS_SIZE + 1)
978 static ssize_t
979 intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
980 {
981         struct intel_dp *intel_dp = container_of(aux, struct intel_dp, aux);
982         uint8_t txbuf[20], rxbuf[20];
983         size_t txsize, rxsize;
984         int ret;
985
986         txbuf[0] = (msg->request << 4) |
987                 ((msg->address >> 16) & 0xf);
988         txbuf[1] = (msg->address >> 8) & 0xff;
989         txbuf[2] = msg->address & 0xff;
990         txbuf[3] = msg->size - 1;
991
992         switch (msg->request & ~DP_AUX_I2C_MOT) {
993         case DP_AUX_NATIVE_WRITE:
994         case DP_AUX_I2C_WRITE:
995                 txsize = msg->size ? HEADER_SIZE + msg->size : BARE_ADDRESS_SIZE;
996                 rxsize = 2; /* 0 or 1 data bytes */
997
998                 if (WARN_ON(txsize > 20))
999                         return -E2BIG;
1000
1001                 memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size);
1002
1003                 ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
1004                 if (ret > 0) {
1005                         msg->reply = rxbuf[0] >> 4;
1006
1007                         if (ret > 1) {
1008                                 /* Number of bytes written in a short write. */
1009                                 ret = clamp_t(int, rxbuf[1], 0, msg->size);
1010                         } else {
1011                                 /* Return payload size. */
1012                                 ret = msg->size;
1013                         }
1014                 }
1015                 break;
1016
1017         case DP_AUX_NATIVE_READ:
1018         case DP_AUX_I2C_READ:
1019                 txsize = msg->size ? HEADER_SIZE : BARE_ADDRESS_SIZE;
1020                 rxsize = msg->size + 1;
1021
1022                 if (WARN_ON(rxsize > 20))
1023                         return -E2BIG;
1024
1025                 ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
1026                 if (ret > 0) {
1027                         msg->reply = rxbuf[0] >> 4;
1028                         /*
1029                          * Assume happy day, and copy the data. The caller is
1030                          * expected to check msg->reply before touching it.
1031                          *
1032                          * Return payload size.
1033                          */
1034                         ret--;
1035                         memcpy(msg->buffer, rxbuf + 1, ret);
1036                 }
1037                 break;
1038
1039         default:
1040                 ret = -EINVAL;
1041                 break;
1042         }
1043
1044         return ret;
1045 }
1046
1047 static void
1048 intel_dp_aux_init(struct intel_dp *intel_dp, struct intel_connector *connector)
1049 {
1050         struct drm_device *dev = intel_dp_to_dev(intel_dp);
1051         struct drm_i915_private *dev_priv = dev->dev_private;
1052         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1053         enum port port = intel_dig_port->port;
1054         struct ddi_vbt_port_info *info = &dev_priv->vbt.ddi_port_info[port];
1055         const char *name = NULL;
1056         uint32_t porte_aux_ctl_reg = DPA_AUX_CH_CTL;
1057         int ret;
1058
1059         /* On SKL we don't have Aux for port E so we rely on VBT to set
1060          * a proper alternate aux channel.
1061          */
1062         if (IS_SKYLAKE(dev) && port == PORT_E) {
1063                 switch (info->alternate_aux_channel) {
1064                 case DP_AUX_B:
1065                         porte_aux_ctl_reg = DPB_AUX_CH_CTL;
1066                         break;
1067                 case DP_AUX_C:
1068                         porte_aux_ctl_reg = DPC_AUX_CH_CTL;
1069                         break;
1070                 case DP_AUX_D:
1071                         porte_aux_ctl_reg = DPD_AUX_CH_CTL;
1072                         break;
1073                 case DP_AUX_A:
1074                 default:
1075                         porte_aux_ctl_reg = DPA_AUX_CH_CTL;
1076                 }
1077         }
1078
1079         switch (port) {
1080         case PORT_A:
1081                 intel_dp->aux_ch_ctl_reg = DPA_AUX_CH_CTL;
1082                 name = "DPDDC-A";
1083                 break;
1084         case PORT_B:
1085                 intel_dp->aux_ch_ctl_reg = PCH_DPB_AUX_CH_CTL;
1086                 name = "DPDDC-B";
1087                 break;
1088         case PORT_C:
1089                 intel_dp->aux_ch_ctl_reg = PCH_DPC_AUX_CH_CTL;
1090                 name = "DPDDC-C";
1091                 break;
1092         case PORT_D:
1093                 intel_dp->aux_ch_ctl_reg = PCH_DPD_AUX_CH_CTL;
1094                 name = "DPDDC-D";
1095                 break;
1096         case PORT_E:
1097                 intel_dp->aux_ch_ctl_reg = porte_aux_ctl_reg;
1098                 name = "DPDDC-E";
1099                 break;
1100         default:
1101                 BUG();
1102         }
1103
1104         /*
1105          * The AUX_CTL register is usually DP_CTL + 0x10.
1106          *
1107          * On Haswell and Broadwell though:
1108          *   - Both port A DDI_BUF_CTL and DDI_AUX_CTL are on the CPU
1109          *   - Port B/C/D AUX channels are on the PCH, DDI_BUF_CTL on the CPU
1110          *
1111          * Skylake moves AUX_CTL back next to DDI_BUF_CTL, on the CPU.
1112          */
1113         if (!IS_HASWELL(dev) && !IS_BROADWELL(dev) && port != PORT_E)
1114                 intel_dp->aux_ch_ctl_reg = intel_dp->output_reg + 0x10;
1115
1116         intel_dp->aux.name = name;
1117         intel_dp->aux.dev = dev->dev;
1118         intel_dp->aux.transfer = intel_dp_aux_transfer;
1119
1120         DRM_DEBUG_KMS("registering %s bus for %s\n", name,
1121                       connector->base.kdev->kobj.name);
1122
1123         ret = drm_dp_aux_register(&intel_dp->aux);
1124         if (ret < 0) {
1125                 DRM_ERROR("drm_dp_aux_register() for %s failed (%d)\n",
1126                           name, ret);
1127                 return;
1128         }
1129
1130         ret = sysfs_create_link(&connector->base.kdev->kobj,
1131                                 &intel_dp->aux.ddc.dev.kobj,
1132                                 intel_dp->aux.ddc.dev.kobj.name);
1133         if (ret < 0) {
1134                 DRM_ERROR("sysfs_create_link() for %s failed (%d)\n", name, ret);
1135                 drm_dp_aux_unregister(&intel_dp->aux);
1136         }
1137 }
1138
1139 static void
1140 intel_dp_connector_unregister(struct intel_connector *intel_connector)
1141 {
1142         struct intel_dp *intel_dp = intel_attached_dp(&intel_connector->base);
1143
1144         if (!intel_connector->mst_port)
1145                 sysfs_remove_link(&intel_connector->base.kdev->kobj,
1146                                   intel_dp->aux.ddc.dev.kobj.name);
1147         intel_connector_unregister(intel_connector);
1148 }
1149
1150 static void
1151 skl_edp_set_pll_config(struct intel_crtc_state *pipe_config)
1152 {
1153         u32 ctrl1;
1154
1155         memset(&pipe_config->dpll_hw_state, 0,
1156                sizeof(pipe_config->dpll_hw_state));
1157
1158         pipe_config->ddi_pll_sel = SKL_DPLL0;
1159         pipe_config->dpll_hw_state.cfgcr1 = 0;
1160         pipe_config->dpll_hw_state.cfgcr2 = 0;
1161
1162         ctrl1 = DPLL_CTRL1_OVERRIDE(SKL_DPLL0);
1163         switch (pipe_config->port_clock / 2) {
1164         case 81000:
1165                 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810,
1166                                               SKL_DPLL0);
1167                 break;
1168         case 135000:
1169                 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350,
1170                                               SKL_DPLL0);
1171                 break;
1172         case 270000:
1173                 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700,
1174                                               SKL_DPLL0);
1175                 break;
1176         case 162000:
1177                 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620,
1178                                               SKL_DPLL0);
1179                 break;
1180         /* TBD: For DP link rates 2.16 GHz and 4.32 GHz, VCO is 8640 which
1181         results in CDCLK change. Need to handle the change of CDCLK by
1182         disabling pipes and re-enabling them */
1183         case 108000:
1184                 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080,
1185                                               SKL_DPLL0);
1186                 break;
1187         case 216000:
1188                 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2160,
1189                                               SKL_DPLL0);
1190                 break;
1191
1192         }
1193         pipe_config->dpll_hw_state.ctrl1 = ctrl1;
1194 }
1195
1196 static void
1197 hsw_dp_set_ddi_pll_sel(struct intel_crtc_state *pipe_config)
1198 {
1199         memset(&pipe_config->dpll_hw_state, 0,
1200                sizeof(pipe_config->dpll_hw_state));
1201
1202         switch (pipe_config->port_clock / 2) {
1203         case 81000:
1204                 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_810;
1205                 break;
1206         case 135000:
1207                 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_1350;
1208                 break;
1209         case 270000:
1210                 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_2700;
1211                 break;
1212         }
1213 }
1214
1215 static int
1216 intel_dp_sink_rates(struct intel_dp *intel_dp, const int **sink_rates)
1217 {
1218         if (intel_dp->num_sink_rates) {
1219                 *sink_rates = intel_dp->sink_rates;
1220                 return intel_dp->num_sink_rates;
1221         }
1222
1223         *sink_rates = default_rates;
1224
1225         return (intel_dp_max_link_bw(intel_dp) >> 3) + 1;
1226 }
1227
1228 static int
1229 intel_dp_source_rates(struct drm_device *dev, const int **source_rates)
1230 {
1231         if (IS_BROXTON(dev)) {
1232                 *source_rates = bxt_rates;
1233                 return ARRAY_SIZE(bxt_rates);
1234         } else if (IS_SKYLAKE(dev)) {
1235                 *source_rates = skl_rates;
1236                 return ARRAY_SIZE(skl_rates);
1237         } else if (IS_CHERRYVIEW(dev)) {
1238                 *source_rates = chv_rates;
1239                 return ARRAY_SIZE(chv_rates);
1240         }
1241
1242         *source_rates = default_rates;
1243
1244         if (IS_SKYLAKE(dev) && INTEL_REVID(dev) <= SKL_REVID_B0)
1245                 /* WaDisableHBR2:skl */
1246                 return (DP_LINK_BW_2_7 >> 3) + 1;
1247         else if (INTEL_INFO(dev)->gen >= 8 ||
1248             (IS_HASWELL(dev) && !IS_HSW_ULX(dev)))
1249                 return (DP_LINK_BW_5_4 >> 3) + 1;
1250         else
1251                 return (DP_LINK_BW_2_7 >> 3) + 1;
1252 }
1253
1254 static void
1255 intel_dp_set_clock(struct intel_encoder *encoder,
1256                    struct intel_crtc_state *pipe_config)
1257 {
1258         struct drm_device *dev = encoder->base.dev;
1259         const struct dp_link_dpll *divisor = NULL;
1260         int i, count = 0;
1261
1262         if (IS_G4X(dev)) {
1263                 divisor = gen4_dpll;
1264                 count = ARRAY_SIZE(gen4_dpll);
1265         } else if (HAS_PCH_SPLIT(dev)) {
1266                 divisor = pch_dpll;
1267                 count = ARRAY_SIZE(pch_dpll);
1268         } else if (IS_CHERRYVIEW(dev)) {
1269                 divisor = chv_dpll;
1270                 count = ARRAY_SIZE(chv_dpll);
1271         } else if (IS_VALLEYVIEW(dev)) {
1272                 divisor = vlv_dpll;
1273                 count = ARRAY_SIZE(vlv_dpll);
1274         }
1275
1276         if (divisor && count) {
1277                 for (i = 0; i < count; i++) {
1278                         if (pipe_config->port_clock == divisor[i].clock) {
1279                                 pipe_config->dpll = divisor[i].dpll;
1280                                 pipe_config->clock_set = true;
1281                                 break;
1282                         }
1283                 }
1284         }
1285 }
1286
1287 static int intersect_rates(const int *source_rates, int source_len,
1288                            const int *sink_rates, int sink_len,
1289                            int *common_rates)
1290 {
1291         int i = 0, j = 0, k = 0;
1292
1293         while (i < source_len && j < sink_len) {
1294                 if (source_rates[i] == sink_rates[j]) {
1295                         if (WARN_ON(k >= DP_MAX_SUPPORTED_RATES))
1296                                 return k;
1297                         common_rates[k] = source_rates[i];
1298                         ++k;
1299                         ++i;
1300                         ++j;
1301                 } else if (source_rates[i] < sink_rates[j]) {
1302                         ++i;
1303                 } else {
1304                         ++j;
1305                 }
1306         }
1307         return k;
1308 }
1309
1310 static int intel_dp_common_rates(struct intel_dp *intel_dp,
1311                                  int *common_rates)
1312 {
1313         struct drm_device *dev = intel_dp_to_dev(intel_dp);
1314         const int *source_rates, *sink_rates;
1315         int source_len, sink_len;
1316
1317         sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
1318         source_len = intel_dp_source_rates(dev, &source_rates);
1319
1320         return intersect_rates(source_rates, source_len,
1321                                sink_rates, sink_len,
1322                                common_rates);
1323 }
1324
1325 static void snprintf_int_array(char *str, size_t len,
1326                                const int *array, int nelem)
1327 {
1328         int i;
1329
1330         str[0] = '\0';
1331
1332         for (i = 0; i < nelem; i++) {
1333                 int r = snprintf(str, len, "%s%d", i ? ", " : "", array[i]);
1334                 if (r >= len)
1335                         return;
1336                 str += r;
1337                 len -= r;
1338         }
1339 }
1340
1341 static void intel_dp_print_rates(struct intel_dp *intel_dp)
1342 {
1343         struct drm_device *dev = intel_dp_to_dev(intel_dp);
1344         const int *source_rates, *sink_rates;
1345         int source_len, sink_len, common_len;
1346         int common_rates[DP_MAX_SUPPORTED_RATES];
1347         char str[128]; /* FIXME: too big for stack? */
1348
1349         if ((drm_debug & DRM_UT_KMS) == 0)
1350                 return;
1351
1352         source_len = intel_dp_source_rates(dev, &source_rates);
1353         snprintf_int_array(str, sizeof(str), source_rates, source_len);
1354         DRM_DEBUG_KMS("source rates: %s\n", str);
1355
1356         sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
1357         snprintf_int_array(str, sizeof(str), sink_rates, sink_len);
1358         DRM_DEBUG_KMS("sink rates: %s\n", str);
1359
1360         common_len = intel_dp_common_rates(intel_dp, common_rates);
1361         snprintf_int_array(str, sizeof(str), common_rates, common_len);
1362         DRM_DEBUG_KMS("common rates: %s\n", str);
1363 }
1364
1365 static int rate_to_index(int find, const int *rates)
1366 {
1367         int i = 0;
1368
1369         for (i = 0; i < DP_MAX_SUPPORTED_RATES; ++i)
1370                 if (find == rates[i])
1371                         break;
1372
1373         return i;
1374 }
1375
1376 int
1377 intel_dp_max_link_rate(struct intel_dp *intel_dp)
1378 {
1379         int rates[DP_MAX_SUPPORTED_RATES] = {};
1380         int len;
1381
1382         len = intel_dp_common_rates(intel_dp, rates);
1383         if (WARN_ON(len <= 0))
1384                 return 162000;
1385
1386         return rates[rate_to_index(0, rates) - 1];
1387 }
1388
1389 int intel_dp_rate_select(struct intel_dp *intel_dp, int rate)
1390 {
1391         return rate_to_index(rate, intel_dp->sink_rates);
1392 }
1393
1394 static void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock,
1395                                   uint8_t *link_bw, uint8_t *rate_select)
1396 {
1397         if (intel_dp->num_sink_rates) {
1398                 *link_bw = 0;
1399                 *rate_select =
1400                         intel_dp_rate_select(intel_dp, port_clock);
1401         } else {
1402                 *link_bw = drm_dp_link_rate_to_bw_code(port_clock);
1403                 *rate_select = 0;
1404         }
1405 }
1406
1407 bool
1408 intel_dp_compute_config(struct intel_encoder *encoder,
1409                         struct intel_crtc_state *pipe_config)
1410 {
1411         struct drm_device *dev = encoder->base.dev;
1412         struct drm_i915_private *dev_priv = dev->dev_private;
1413         struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
1414         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1415         enum port port = dp_to_dig_port(intel_dp)->port;
1416         struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc);
1417         struct intel_connector *intel_connector = intel_dp->attached_connector;
1418         int lane_count, clock;
1419         int min_lane_count = 1;
1420         int max_lane_count = intel_dp_max_lane_count(intel_dp);
1421         /* Conveniently, the link BW constants become indices with a shift...*/
1422         int min_clock = 0;
1423         int max_clock;
1424         int bpp, mode_rate;
1425         int link_avail, link_clock;
1426         int common_rates[DP_MAX_SUPPORTED_RATES] = {};
1427         int common_len;
1428         uint8_t link_bw, rate_select;
1429
1430         common_len = intel_dp_common_rates(intel_dp, common_rates);
1431
1432         /* No common link rates between source and sink */
1433         WARN_ON(common_len <= 0);
1434
1435         max_clock = common_len - 1;
1436
1437         if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev) && port != PORT_A)
1438                 pipe_config->has_pch_encoder = true;
1439
1440         pipe_config->has_dp_encoder = true;
1441         pipe_config->has_drrs = false;
1442         pipe_config->has_audio = intel_dp->has_audio && port != PORT_A;
1443
1444         if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
1445                 intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
1446                                        adjusted_mode);
1447
1448                 if (INTEL_INFO(dev)->gen >= 9) {
1449                         int ret;
1450                         ret = skl_update_scaler_crtc(pipe_config);
1451                         if (ret)
1452                                 return ret;
1453                 }
1454
1455                 if (!HAS_PCH_SPLIT(dev))
1456                         intel_gmch_panel_fitting(intel_crtc, pipe_config,
1457                                                  intel_connector->panel.fitting_mode);
1458                 else
1459                         intel_pch_panel_fitting(intel_crtc, pipe_config,
1460                                                 intel_connector->panel.fitting_mode);
1461         }
1462
1463         if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
1464                 return false;
1465
1466         DRM_DEBUG_KMS("DP link computation with max lane count %i "
1467                       "max bw %d pixel clock %iKHz\n",
1468                       max_lane_count, common_rates[max_clock],
1469                       adjusted_mode->crtc_clock);
1470
1471         /* Walk through all bpp values. Luckily they're all nicely spaced with 2
1472          * bpc in between. */
1473         bpp = pipe_config->pipe_bpp;
1474         if (is_edp(intel_dp)) {
1475
1476                 /* Get bpp from vbt only for panels that dont have bpp in edid */
1477                 if (intel_connector->base.display_info.bpc == 0 &&
1478                         (dev_priv->vbt.edp_bpp && dev_priv->vbt.edp_bpp < bpp)) {
1479                         DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n",
1480                                       dev_priv->vbt.edp_bpp);
1481                         bpp = dev_priv->vbt.edp_bpp;
1482                 }
1483
1484                 /*
1485                  * Use the maximum clock and number of lanes the eDP panel
1486                  * advertizes being capable of. The panels are generally
1487                  * designed to support only a single clock and lane
1488                  * configuration, and typically these values correspond to the
1489                  * native resolution of the panel.
1490                  */
1491                 min_lane_count = max_lane_count;
1492                 min_clock = max_clock;
1493         }
1494
1495         for (; bpp >= 6*3; bpp -= 2*3) {
1496                 mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
1497                                                    bpp);
1498
1499                 for (clock = min_clock; clock <= max_clock; clock++) {
1500                         for (lane_count = min_lane_count;
1501                                 lane_count <= max_lane_count;
1502                                 lane_count <<= 1) {
1503
1504                                 link_clock = common_rates[clock];
1505                                 link_avail = intel_dp_max_data_rate(link_clock,
1506                                                                     lane_count);
1507
1508                                 if (mode_rate <= link_avail) {
1509                                         goto found;
1510                                 }
1511                         }
1512                 }
1513         }
1514
1515         return false;
1516
1517 found:
1518         if (intel_dp->color_range_auto) {
1519                 /*
1520                  * See:
1521                  * CEA-861-E - 5.1 Default Encoding Parameters
1522                  * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry
1523                  */
1524                 pipe_config->limited_color_range =
1525                         bpp != 18 && drm_match_cea_mode(adjusted_mode) > 1;
1526         } else {
1527                 pipe_config->limited_color_range =
1528                         intel_dp->limited_color_range;
1529         }
1530
1531         pipe_config->lane_count = lane_count;
1532
1533         pipe_config->pipe_bpp = bpp;
1534         pipe_config->port_clock = common_rates[clock];
1535
1536         intel_dp_compute_rate(intel_dp, pipe_config->port_clock,
1537                               &link_bw, &rate_select);
1538
1539         DRM_DEBUG_KMS("DP link bw %02x rate select %02x lane count %d clock %d bpp %d\n",
1540                       link_bw, rate_select, pipe_config->lane_count,
1541                       pipe_config->port_clock, bpp);
1542         DRM_DEBUG_KMS("DP link bw required %i available %i\n",
1543                       mode_rate, link_avail);
1544
1545         intel_link_compute_m_n(bpp, lane_count,
1546                                adjusted_mode->crtc_clock,
1547                                pipe_config->port_clock,
1548                                &pipe_config->dp_m_n);
1549
1550         if (intel_connector->panel.downclock_mode != NULL &&
1551                 dev_priv->drrs.type == SEAMLESS_DRRS_SUPPORT) {
1552                         pipe_config->has_drrs = true;
1553                         intel_link_compute_m_n(bpp, lane_count,
1554                                 intel_connector->panel.downclock_mode->clock,
1555                                 pipe_config->port_clock,
1556                                 &pipe_config->dp_m2_n2);
1557         }
1558
1559         if (IS_SKYLAKE(dev) && is_edp(intel_dp))
1560                 skl_edp_set_pll_config(pipe_config);
1561         else if (IS_BROXTON(dev))
1562                 /* handled in ddi */;
1563         else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
1564                 hsw_dp_set_ddi_pll_sel(pipe_config);
1565         else
1566                 intel_dp_set_clock(encoder, pipe_config);
1567
1568         return true;
1569 }
1570
1571 static void ironlake_set_pll_cpu_edp(struct intel_dp *intel_dp)
1572 {
1573         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1574         struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
1575         struct drm_device *dev = crtc->base.dev;
1576         struct drm_i915_private *dev_priv = dev->dev_private;
1577         u32 dpa_ctl;
1578
1579         DRM_DEBUG_KMS("eDP PLL enable for clock %d\n",
1580                       crtc->config->port_clock);
1581         dpa_ctl = I915_READ(DP_A);
1582         dpa_ctl &= ~DP_PLL_FREQ_MASK;
1583
1584         if (crtc->config->port_clock == 162000) {
1585                 /* For a long time we've carried around a ILK-DevA w/a for the
1586                  * 160MHz clock. If we're really unlucky, it's still required.
1587                  */
1588                 DRM_DEBUG_KMS("160MHz cpu eDP clock, might need ilk devA w/a\n");
1589                 dpa_ctl |= DP_PLL_FREQ_160MHZ;
1590                 intel_dp->DP |= DP_PLL_FREQ_160MHZ;
1591         } else {
1592                 dpa_ctl |= DP_PLL_FREQ_270MHZ;
1593                 intel_dp->DP |= DP_PLL_FREQ_270MHZ;
1594         }
1595
1596         I915_WRITE(DP_A, dpa_ctl);
1597
1598         POSTING_READ(DP_A);
1599         udelay(500);
1600 }
1601
1602 void intel_dp_set_link_params(struct intel_dp *intel_dp,
1603                               const struct intel_crtc_state *pipe_config)
1604 {
1605         intel_dp->link_rate = pipe_config->port_clock;
1606         intel_dp->lane_count = pipe_config->lane_count;
1607 }
1608
1609 static void intel_dp_prepare(struct intel_encoder *encoder)
1610 {
1611         struct drm_device *dev = encoder->base.dev;
1612         struct drm_i915_private *dev_priv = dev->dev_private;
1613         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1614         enum port port = dp_to_dig_port(intel_dp)->port;
1615         struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
1616         struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
1617
1618         intel_dp_set_link_params(intel_dp, crtc->config);
1619
1620         /*
1621          * There are four kinds of DP registers:
1622          *
1623          *      IBX PCH
1624          *      SNB CPU
1625          *      IVB CPU
1626          *      CPT PCH
1627          *
1628          * IBX PCH and CPU are the same for almost everything,
1629          * except that the CPU DP PLL is configured in this
1630          * register
1631          *
1632          * CPT PCH is quite different, having many bits moved
1633          * to the TRANS_DP_CTL register instead. That
1634          * configuration happens (oddly) in ironlake_pch_enable
1635          */
1636
1637         /* Preserve the BIOS-computed detected bit. This is
1638          * supposed to be read-only.
1639          */
1640         intel_dp->DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
1641
1642         /* Handle DP bits in common between all three register formats */
1643         intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
1644         intel_dp->DP |= DP_PORT_WIDTH(crtc->config->lane_count);
1645
1646         if (crtc->config->has_audio)
1647                 intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
1648
1649         /* Split out the IBX/CPU vs CPT settings */
1650
1651         if (IS_GEN7(dev) && port == PORT_A) {
1652                 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1653                         intel_dp->DP |= DP_SYNC_HS_HIGH;
1654                 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1655                         intel_dp->DP |= DP_SYNC_VS_HIGH;
1656                 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
1657
1658                 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1659                         intel_dp->DP |= DP_ENHANCED_FRAMING;
1660
1661                 intel_dp->DP |= crtc->pipe << 29;
1662         } else if (HAS_PCH_CPT(dev) && port != PORT_A) {
1663                 u32 trans_dp;
1664
1665                 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
1666
1667                 trans_dp = I915_READ(TRANS_DP_CTL(crtc->pipe));
1668                 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1669                         trans_dp |= TRANS_DP_ENH_FRAMING;
1670                 else
1671                         trans_dp &= ~TRANS_DP_ENH_FRAMING;
1672                 I915_WRITE(TRANS_DP_CTL(crtc->pipe), trans_dp);
1673         } else {
1674                 if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev) &&
1675                     crtc->config->limited_color_range)
1676                         intel_dp->DP |= DP_COLOR_RANGE_16_235;
1677
1678                 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1679                         intel_dp->DP |= DP_SYNC_HS_HIGH;
1680                 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1681                         intel_dp->DP |= DP_SYNC_VS_HIGH;
1682                 intel_dp->DP |= DP_LINK_TRAIN_OFF;
1683
1684                 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1685                         intel_dp->DP |= DP_ENHANCED_FRAMING;
1686
1687                 if (IS_CHERRYVIEW(dev))
1688                         intel_dp->DP |= DP_PIPE_SELECT_CHV(crtc->pipe);
1689                 else if (crtc->pipe == PIPE_B)
1690                         intel_dp->DP |= DP_PIPEB_SELECT;
1691         }
1692 }
1693
1694 #define IDLE_ON_MASK            (PP_ON | PP_SEQUENCE_MASK | 0                     | PP_SEQUENCE_STATE_MASK)
1695 #define IDLE_ON_VALUE           (PP_ON | PP_SEQUENCE_NONE | 0                     | PP_SEQUENCE_STATE_ON_IDLE)
1696
1697 #define IDLE_OFF_MASK           (PP_ON | PP_SEQUENCE_MASK | 0                     | 0)
1698 #define IDLE_OFF_VALUE          (0     | PP_SEQUENCE_NONE | 0                     | 0)
1699
1700 #define IDLE_CYCLE_MASK         (PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
1701 #define IDLE_CYCLE_VALUE        (0     | PP_SEQUENCE_NONE | 0                     | PP_SEQUENCE_STATE_OFF_IDLE)
1702
1703 static void wait_panel_status(struct intel_dp *intel_dp,
1704                                        u32 mask,
1705                                        u32 value)
1706 {
1707         struct drm_device *dev = intel_dp_to_dev(intel_dp);
1708         struct drm_i915_private *dev_priv = dev->dev_private;
1709         u32 pp_stat_reg, pp_ctrl_reg;
1710
1711         lockdep_assert_held(&dev_priv->pps_mutex);
1712
1713         pp_stat_reg = _pp_stat_reg(intel_dp);
1714         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1715
1716         DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n",
1717                         mask, value,
1718                         I915_READ(pp_stat_reg),
1719                         I915_READ(pp_ctrl_reg));
1720
1721         if (_wait_for((I915_READ(pp_stat_reg) & mask) == value, 5000, 10)) {
1722                 DRM_ERROR("Panel status timeout: status %08x control %08x\n",
1723                                 I915_READ(pp_stat_reg),
1724                                 I915_READ(pp_ctrl_reg));
1725         }
1726
1727         DRM_DEBUG_KMS("Wait complete\n");
1728 }
1729
1730 static void wait_panel_on(struct intel_dp *intel_dp)
1731 {
1732         DRM_DEBUG_KMS("Wait for panel power on\n");
1733         wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
1734 }
1735
1736 static void wait_panel_off(struct intel_dp *intel_dp)
1737 {
1738         DRM_DEBUG_KMS("Wait for panel power off time\n");
1739         wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
1740 }
1741
1742 static void wait_panel_power_cycle(struct intel_dp *intel_dp)
1743 {
1744         DRM_DEBUG_KMS("Wait for panel power cycle\n");
1745
1746         /* When we disable the VDD override bit last we have to do the manual
1747          * wait. */
1748         wait_remaining_ms_from_jiffies(intel_dp->last_power_cycle,
1749                                        intel_dp->panel_power_cycle_delay);
1750
1751         wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
1752 }
1753
1754 static void wait_backlight_on(struct intel_dp *intel_dp)
1755 {
1756         wait_remaining_ms_from_jiffies(intel_dp->last_power_on,
1757                                        intel_dp->backlight_on_delay);
1758 }
1759
1760 static void edp_wait_backlight_off(struct intel_dp *intel_dp)
1761 {
1762         wait_remaining_ms_from_jiffies(intel_dp->last_backlight_off,
1763                                        intel_dp->backlight_off_delay);
1764 }
1765
1766 /* Read the current pp_control value, unlocking the register if it
1767  * is locked
1768  */
1769
1770 static  u32 ironlake_get_pp_control(struct intel_dp *intel_dp)
1771 {
1772         struct drm_device *dev = intel_dp_to_dev(intel_dp);
1773         struct drm_i915_private *dev_priv = dev->dev_private;
1774         u32 control;
1775
1776         lockdep_assert_held(&dev_priv->pps_mutex);
1777
1778         control = I915_READ(_pp_ctrl_reg(intel_dp));
1779         if (!IS_BROXTON(dev)) {
1780                 control &= ~PANEL_UNLOCK_MASK;
1781                 control |= PANEL_UNLOCK_REGS;
1782         }
1783         return control;
1784 }
1785
1786 /*
1787  * Must be paired with edp_panel_vdd_off().
1788  * Must hold pps_mutex around the whole on/off sequence.
1789  * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
1790  */
1791 static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
1792 {
1793         struct drm_device *dev = intel_dp_to_dev(intel_dp);
1794         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1795         struct intel_encoder *intel_encoder = &intel_dig_port->base;
1796         struct drm_i915_private *dev_priv = dev->dev_private;
1797         enum intel_display_power_domain power_domain;
1798         u32 pp;
1799         u32 pp_stat_reg, pp_ctrl_reg;
1800         bool need_to_disable = !intel_dp->want_panel_vdd;
1801
1802         lockdep_assert_held(&dev_priv->pps_mutex);
1803
1804         if (!is_edp(intel_dp))
1805                 return false;
1806
1807         cancel_delayed_work(&intel_dp->panel_vdd_work);
1808         intel_dp->want_panel_vdd = true;
1809
1810         if (edp_have_panel_vdd(intel_dp))
1811                 return need_to_disable;
1812
1813         power_domain = intel_display_port_power_domain(intel_encoder);
1814         intel_display_power_get(dev_priv, power_domain);
1815
1816         DRM_DEBUG_KMS("Turning eDP port %c VDD on\n",
1817                       port_name(intel_dig_port->port));
1818
1819         if (!edp_have_panel_power(intel_dp))
1820                 wait_panel_power_cycle(intel_dp);
1821
1822         pp = ironlake_get_pp_control(intel_dp);
1823         pp |= EDP_FORCE_VDD;
1824
1825         pp_stat_reg = _pp_stat_reg(intel_dp);
1826         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1827
1828         I915_WRITE(pp_ctrl_reg, pp);
1829         POSTING_READ(pp_ctrl_reg);
1830         DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1831                         I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
1832         /*
1833          * If the panel wasn't on, delay before accessing aux channel
1834          */
1835         if (!edp_have_panel_power(intel_dp)) {
1836                 DRM_DEBUG_KMS("eDP port %c panel power wasn't enabled\n",
1837                               port_name(intel_dig_port->port));
1838                 msleep(intel_dp->panel_power_up_delay);
1839         }
1840
1841         return need_to_disable;
1842 }
1843
1844 /*
1845  * Must be paired with intel_edp_panel_vdd_off() or
1846  * intel_edp_panel_off().
1847  * Nested calls to these functions are not allowed since
1848  * we drop the lock. Caller must use some higher level
1849  * locking to prevent nested calls from other threads.
1850  */
1851 void intel_edp_panel_vdd_on(struct intel_dp *intel_dp)
1852 {
1853         bool vdd;
1854
1855         if (!is_edp(intel_dp))
1856                 return;
1857
1858         pps_lock(intel_dp);
1859         vdd = edp_panel_vdd_on(intel_dp);
1860         pps_unlock(intel_dp);
1861
1862         I915_STATE_WARN(!vdd, "eDP port %c VDD already requested on\n",
1863              port_name(dp_to_dig_port(intel_dp)->port));
1864 }
1865
1866 static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
1867 {
1868         struct drm_device *dev = intel_dp_to_dev(intel_dp);
1869         struct drm_i915_private *dev_priv = dev->dev_private;
1870         struct intel_digital_port *intel_dig_port =
1871                 dp_to_dig_port(intel_dp);
1872         struct intel_encoder *intel_encoder = &intel_dig_port->base;
1873         enum intel_display_power_domain power_domain;
1874         u32 pp;
1875         u32 pp_stat_reg, pp_ctrl_reg;
1876
1877         lockdep_assert_held(&dev_priv->pps_mutex);
1878
1879         WARN_ON(intel_dp->want_panel_vdd);
1880
1881         if (!edp_have_panel_vdd(intel_dp))
1882                 return;
1883
1884         DRM_DEBUG_KMS("Turning eDP port %c VDD off\n",
1885                       port_name(intel_dig_port->port));
1886
1887         pp = ironlake_get_pp_control(intel_dp);
1888         pp &= ~EDP_FORCE_VDD;
1889
1890         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1891         pp_stat_reg = _pp_stat_reg(intel_dp);
1892
1893         I915_WRITE(pp_ctrl_reg, pp);
1894         POSTING_READ(pp_ctrl_reg);
1895
1896         /* Make sure sequencer is idle before allowing subsequent activity */
1897         DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1898         I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
1899
1900         if ((pp & POWER_TARGET_ON) == 0)
1901                 intel_dp->last_power_cycle = jiffies;
1902
1903         power_domain = intel_display_port_power_domain(intel_encoder);
1904         intel_display_power_put(dev_priv, power_domain);
1905 }
1906
1907 static void edp_panel_vdd_work(struct work_struct *__work)
1908 {
1909         struct intel_dp *intel_dp = container_of(to_delayed_work(__work),
1910                                                  struct intel_dp, panel_vdd_work);
1911
1912         pps_lock(intel_dp);
1913         if (!intel_dp->want_panel_vdd)
1914                 edp_panel_vdd_off_sync(intel_dp);
1915         pps_unlock(intel_dp);
1916 }
1917
1918 static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp)
1919 {
1920         unsigned long delay;
1921
1922         /*
1923          * Queue the timer to fire a long time from now (relative to the power
1924          * down delay) to keep the panel power up across a sequence of
1925          * operations.
1926          */
1927         delay = msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5);
1928         schedule_delayed_work(&intel_dp->panel_vdd_work, delay);
1929 }
1930
1931 /*
1932  * Must be paired with edp_panel_vdd_on().
1933  * Must hold pps_mutex around the whole on/off sequence.
1934  * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
1935  */
1936 static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
1937 {
1938         struct drm_i915_private *dev_priv =
1939                 intel_dp_to_dev(intel_dp)->dev_private;
1940
1941         lockdep_assert_held(&dev_priv->pps_mutex);
1942
1943         if (!is_edp(intel_dp))
1944                 return;
1945
1946         I915_STATE_WARN(!intel_dp->want_panel_vdd, "eDP port %c VDD not forced on",
1947              port_name(dp_to_dig_port(intel_dp)->port));
1948
1949         intel_dp->want_panel_vdd = false;
1950
1951         if (sync)
1952                 edp_panel_vdd_off_sync(intel_dp);
1953         else
1954                 edp_panel_vdd_schedule_off(intel_dp);
1955 }
1956
1957 static void edp_panel_on(struct intel_dp *intel_dp)
1958 {
1959         struct drm_device *dev = intel_dp_to_dev(intel_dp);
1960         struct drm_i915_private *dev_priv = dev->dev_private;
1961         u32 pp;
1962         u32 pp_ctrl_reg;
1963
1964         lockdep_assert_held(&dev_priv->pps_mutex);
1965
1966         if (!is_edp(intel_dp))
1967                 return;
1968
1969         DRM_DEBUG_KMS("Turn eDP port %c panel power on\n",
1970                       port_name(dp_to_dig_port(intel_dp)->port));
1971
1972         if (WARN(edp_have_panel_power(intel_dp),
1973                  "eDP port %c panel power already on\n",
1974                  port_name(dp_to_dig_port(intel_dp)->port)))
1975                 return;
1976
1977         wait_panel_power_cycle(intel_dp);
1978
1979         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1980         pp = ironlake_get_pp_control(intel_dp);
1981         if (IS_GEN5(dev)) {
1982                 /* ILK workaround: disable reset around power sequence */
1983                 pp &= ~PANEL_POWER_RESET;
1984                 I915_WRITE(pp_ctrl_reg, pp);
1985                 POSTING_READ(pp_ctrl_reg);
1986         }
1987
1988         pp |= POWER_TARGET_ON;
1989         if (!IS_GEN5(dev))
1990                 pp |= PANEL_POWER_RESET;
1991
1992         I915_WRITE(pp_ctrl_reg, pp);
1993         POSTING_READ(pp_ctrl_reg);
1994
1995         wait_panel_on(intel_dp);
1996         intel_dp->last_power_on = jiffies;
1997
1998         if (IS_GEN5(dev)) {
1999                 pp |= PANEL_POWER_RESET; /* restore panel reset bit */
2000                 I915_WRITE(pp_ctrl_reg, pp);
2001                 POSTING_READ(pp_ctrl_reg);
2002         }
2003 }
2004
2005 void intel_edp_panel_on(struct intel_dp *intel_dp)
2006 {
2007         if (!is_edp(intel_dp))
2008                 return;
2009
2010         pps_lock(intel_dp);
2011         edp_panel_on(intel_dp);
2012         pps_unlock(intel_dp);
2013 }
2014
2015
2016 static void edp_panel_off(struct intel_dp *intel_dp)
2017 {
2018         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2019         struct intel_encoder *intel_encoder = &intel_dig_port->base;
2020         struct drm_device *dev = intel_dp_to_dev(intel_dp);
2021         struct drm_i915_private *dev_priv = dev->dev_private;
2022         enum intel_display_power_domain power_domain;
2023         u32 pp;
2024         u32 pp_ctrl_reg;
2025
2026         lockdep_assert_held(&dev_priv->pps_mutex);
2027
2028         if (!is_edp(intel_dp))
2029                 return;
2030
2031         DRM_DEBUG_KMS("Turn eDP port %c panel power off\n",
2032                       port_name(dp_to_dig_port(intel_dp)->port));
2033
2034         WARN(!intel_dp->want_panel_vdd, "Need eDP port %c VDD to turn off panel\n",
2035              port_name(dp_to_dig_port(intel_dp)->port));
2036
2037         pp = ironlake_get_pp_control(intel_dp);
2038         /* We need to switch off panel power _and_ force vdd, for otherwise some
2039          * panels get very unhappy and cease to work. */
2040         pp &= ~(POWER_TARGET_ON | PANEL_POWER_RESET | EDP_FORCE_VDD |
2041                 EDP_BLC_ENABLE);
2042
2043         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2044
2045         intel_dp->want_panel_vdd = false;
2046
2047         I915_WRITE(pp_ctrl_reg, pp);
2048         POSTING_READ(pp_ctrl_reg);
2049
2050         intel_dp->last_power_cycle = jiffies;
2051         wait_panel_off(intel_dp);
2052
2053         /* We got a reference when we enabled the VDD. */
2054         power_domain = intel_display_port_power_domain(intel_encoder);
2055         intel_display_power_put(dev_priv, power_domain);
2056 }
2057
2058 void intel_edp_panel_off(struct intel_dp *intel_dp)
2059 {
2060         if (!is_edp(intel_dp))
2061                 return;
2062
2063         pps_lock(intel_dp);
2064         edp_panel_off(intel_dp);
2065         pps_unlock(intel_dp);
2066 }
2067
2068 /* Enable backlight in the panel power control. */
2069 static void _intel_edp_backlight_on(struct intel_dp *intel_dp)
2070 {
2071         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2072         struct drm_device *dev = intel_dig_port->base.base.dev;
2073         struct drm_i915_private *dev_priv = dev->dev_private;
2074         u32 pp;
2075         u32 pp_ctrl_reg;
2076
2077         /*
2078          * If we enable the backlight right away following a panel power
2079          * on, we may see slight flicker as the panel syncs with the eDP
2080          * link.  So delay a bit to make sure the image is solid before
2081          * allowing it to appear.
2082          */
2083         wait_backlight_on(intel_dp);
2084
2085         pps_lock(intel_dp);
2086
2087         pp = ironlake_get_pp_control(intel_dp);
2088         pp |= EDP_BLC_ENABLE;
2089
2090         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2091
2092         I915_WRITE(pp_ctrl_reg, pp);
2093         POSTING_READ(pp_ctrl_reg);
2094
2095         pps_unlock(intel_dp);
2096 }
2097
2098 /* Enable backlight PWM and backlight PP control. */
2099 void intel_edp_backlight_on(struct intel_dp *intel_dp)
2100 {
2101         if (!is_edp(intel_dp))
2102                 return;
2103
2104         DRM_DEBUG_KMS("\n");
2105
2106         intel_panel_enable_backlight(intel_dp->attached_connector);
2107         _intel_edp_backlight_on(intel_dp);
2108 }
2109
2110 /* Disable backlight in the panel power control. */
2111 static void _intel_edp_backlight_off(struct intel_dp *intel_dp)
2112 {
2113         struct drm_device *dev = intel_dp_to_dev(intel_dp);
2114         struct drm_i915_private *dev_priv = dev->dev_private;
2115         u32 pp;
2116         u32 pp_ctrl_reg;
2117
2118         if (!is_edp(intel_dp))
2119                 return;
2120
2121         pps_lock(intel_dp);
2122
2123         pp = ironlake_get_pp_control(intel_dp);
2124         pp &= ~EDP_BLC_ENABLE;
2125
2126         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2127
2128         I915_WRITE(pp_ctrl_reg, pp);
2129         POSTING_READ(pp_ctrl_reg);
2130
2131         pps_unlock(intel_dp);
2132
2133         intel_dp->last_backlight_off = jiffies;
2134         edp_wait_backlight_off(intel_dp);
2135 }
2136
2137 /* Disable backlight PP control and backlight PWM. */
2138 void intel_edp_backlight_off(struct intel_dp *intel_dp)
2139 {
2140         if (!is_edp(intel_dp))
2141                 return;
2142
2143         DRM_DEBUG_KMS("\n");
2144
2145         _intel_edp_backlight_off(intel_dp);
2146         intel_panel_disable_backlight(intel_dp->attached_connector);
2147 }
2148
2149 /*
2150  * Hook for controlling the panel power control backlight through the bl_power
2151  * sysfs attribute. Take care to handle multiple calls.
2152  */
2153 static void intel_edp_backlight_power(struct intel_connector *connector,
2154                                       bool enable)
2155 {
2156         struct intel_dp *intel_dp = intel_attached_dp(&connector->base);
2157         bool is_enabled;
2158
2159         pps_lock(intel_dp);
2160         is_enabled = ironlake_get_pp_control(intel_dp) & EDP_BLC_ENABLE;
2161         pps_unlock(intel_dp);
2162
2163         if (is_enabled == enable)
2164                 return;
2165
2166         DRM_DEBUG_KMS("panel power control backlight %s\n",
2167                       enable ? "enable" : "disable");
2168
2169         if (enable)
2170                 _intel_edp_backlight_on(intel_dp);
2171         else
2172                 _intel_edp_backlight_off(intel_dp);
2173 }
2174
2175 static void ironlake_edp_pll_on(struct intel_dp *intel_dp)
2176 {
2177         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2178         struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
2179         struct drm_device *dev = crtc->dev;
2180         struct drm_i915_private *dev_priv = dev->dev_private;
2181         u32 dpa_ctl;
2182
2183         assert_pipe_disabled(dev_priv,
2184                              to_intel_crtc(crtc)->pipe);
2185
2186         DRM_DEBUG_KMS("\n");
2187         dpa_ctl = I915_READ(DP_A);
2188         WARN(dpa_ctl & DP_PLL_ENABLE, "dp pll on, should be off\n");
2189         WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
2190
2191         /* We don't adjust intel_dp->DP while tearing down the link, to
2192          * facilitate link retraining (e.g. after hotplug). Hence clear all
2193          * enable bits here to ensure that we don't enable too much. */
2194         intel_dp->DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
2195         intel_dp->DP |= DP_PLL_ENABLE;
2196         I915_WRITE(DP_A, intel_dp->DP);
2197         POSTING_READ(DP_A);
2198         udelay(200);
2199 }
2200
2201 static void ironlake_edp_pll_off(struct intel_dp *intel_dp)
2202 {
2203         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2204         struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
2205         struct drm_device *dev = crtc->dev;
2206         struct drm_i915_private *dev_priv = dev->dev_private;
2207         u32 dpa_ctl;
2208
2209         assert_pipe_disabled(dev_priv,
2210                              to_intel_crtc(crtc)->pipe);
2211
2212         dpa_ctl = I915_READ(DP_A);
2213         WARN((dpa_ctl & DP_PLL_ENABLE) == 0,
2214              "dp pll off, should be on\n");
2215         WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
2216
2217         /* We can't rely on the value tracked for the DP register in
2218          * intel_dp->DP because link_down must not change that (otherwise link
2219          * re-training will fail. */
2220         dpa_ctl &= ~DP_PLL_ENABLE;
2221         I915_WRITE(DP_A, dpa_ctl);
2222         POSTING_READ(DP_A);
2223         udelay(200);
2224 }
2225
2226 /* If the sink supports it, try to set the power state appropriately */
2227 void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
2228 {
2229         int ret, i;
2230
2231         /* Should have a valid DPCD by this point */
2232         if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
2233                 return;
2234
2235         if (mode != DRM_MODE_DPMS_ON) {
2236                 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2237                                          DP_SET_POWER_D3);
2238         } else {
2239                 /*
2240                  * When turning on, we need to retry for 1ms to give the sink
2241                  * time to wake up.
2242                  */
2243                 for (i = 0; i < 3; i++) {
2244                         ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2245                                                  DP_SET_POWER_D0);
2246                         if (ret == 1)
2247                                 break;
2248                         msleep(1);
2249                 }
2250         }
2251
2252         if (ret != 1)
2253                 DRM_DEBUG_KMS("failed to %s sink power state\n",
2254                               mode == DRM_MODE_DPMS_ON ? "enable" : "disable");
2255 }
2256
2257 static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
2258                                   enum pipe *pipe)
2259 {
2260         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2261         enum port port = dp_to_dig_port(intel_dp)->port;
2262         struct drm_device *dev = encoder->base.dev;
2263         struct drm_i915_private *dev_priv = dev->dev_private;
2264         enum intel_display_power_domain power_domain;
2265         u32 tmp;
2266
2267         power_domain = intel_display_port_power_domain(encoder);
2268         if (!intel_display_power_is_enabled(dev_priv, power_domain))
2269                 return false;
2270
2271         tmp = I915_READ(intel_dp->output_reg);
2272
2273         if (!(tmp & DP_PORT_EN))
2274                 return false;
2275
2276         if (IS_GEN7(dev) && port == PORT_A) {
2277                 *pipe = PORT_TO_PIPE_CPT(tmp);
2278         } else if (HAS_PCH_CPT(dev) && port != PORT_A) {
2279                 enum pipe p;
2280
2281                 for_each_pipe(dev_priv, p) {
2282                         u32 trans_dp = I915_READ(TRANS_DP_CTL(p));
2283                         if (TRANS_DP_PIPE_TO_PORT(trans_dp) == port) {
2284                                 *pipe = p;
2285                                 return true;
2286                         }
2287                 }
2288
2289                 DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n",
2290                               intel_dp->output_reg);
2291         } else if (IS_CHERRYVIEW(dev)) {
2292                 *pipe = DP_PORT_TO_PIPE_CHV(tmp);
2293         } else {
2294                 *pipe = PORT_TO_PIPE(tmp);
2295         }
2296
2297         return true;
2298 }
2299
2300 static void intel_dp_get_config(struct intel_encoder *encoder,
2301                                 struct intel_crtc_state *pipe_config)
2302 {
2303         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2304         u32 tmp, flags = 0;
2305         struct drm_device *dev = encoder->base.dev;
2306         struct drm_i915_private *dev_priv = dev->dev_private;
2307         enum port port = dp_to_dig_port(intel_dp)->port;
2308         struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2309         int dotclock;
2310
2311         tmp = I915_READ(intel_dp->output_reg);
2312
2313         pipe_config->has_audio = tmp & DP_AUDIO_OUTPUT_ENABLE && port != PORT_A;
2314
2315         if (HAS_PCH_CPT(dev) && port != PORT_A) {
2316                 u32 trans_dp = I915_READ(TRANS_DP_CTL(crtc->pipe));
2317
2318                 if (trans_dp & TRANS_DP_HSYNC_ACTIVE_HIGH)
2319                         flags |= DRM_MODE_FLAG_PHSYNC;
2320                 else
2321                         flags |= DRM_MODE_FLAG_NHSYNC;
2322
2323                 if (trans_dp & TRANS_DP_VSYNC_ACTIVE_HIGH)
2324                         flags |= DRM_MODE_FLAG_PVSYNC;
2325                 else
2326                         flags |= DRM_MODE_FLAG_NVSYNC;
2327         } else {
2328                 if (tmp & DP_SYNC_HS_HIGH)
2329                         flags |= DRM_MODE_FLAG_PHSYNC;
2330                 else
2331                         flags |= DRM_MODE_FLAG_NHSYNC;
2332
2333                 if (tmp & DP_SYNC_VS_HIGH)
2334                         flags |= DRM_MODE_FLAG_PVSYNC;
2335                 else
2336                         flags |= DRM_MODE_FLAG_NVSYNC;
2337         }
2338
2339         pipe_config->base.adjusted_mode.flags |= flags;
2340
2341         if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev) &&
2342             tmp & DP_COLOR_RANGE_16_235)
2343                 pipe_config->limited_color_range = true;
2344
2345         pipe_config->has_dp_encoder = true;
2346
2347         pipe_config->lane_count =
2348                 ((tmp & DP_PORT_WIDTH_MASK) >> DP_PORT_WIDTH_SHIFT) + 1;
2349
2350         intel_dp_get_m_n(crtc, pipe_config);
2351
2352         if (port == PORT_A) {
2353                 if ((I915_READ(DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_160MHZ)
2354                         pipe_config->port_clock = 162000;
2355                 else
2356                         pipe_config->port_clock = 270000;
2357         }
2358
2359         dotclock = intel_dotclock_calculate(pipe_config->port_clock,
2360                                             &pipe_config->dp_m_n);
2361
2362         if (HAS_PCH_SPLIT(dev_priv->dev) && port != PORT_A)
2363                 ironlake_check_encoder_dotclock(pipe_config, dotclock);
2364
2365         pipe_config->base.adjusted_mode.crtc_clock = dotclock;
2366
2367         if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp &&
2368             pipe_config->pipe_bpp > dev_priv->vbt.edp_bpp) {
2369                 /*
2370                  * This is a big fat ugly hack.
2371                  *
2372                  * Some machines in UEFI boot mode provide us a VBT that has 18
2373                  * bpp and 1.62 GHz link bandwidth for eDP, which for reasons
2374                  * unknown we fail to light up. Yet the same BIOS boots up with
2375                  * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as
2376                  * max, not what it tells us to use.
2377                  *
2378                  * Note: This will still be broken if the eDP panel is not lit
2379                  * up by the BIOS, and thus we can't get the mode at module
2380                  * load.
2381                  */
2382                 DRM_DEBUG_KMS("pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
2383                               pipe_config->pipe_bpp, dev_priv->vbt.edp_bpp);
2384                 dev_priv->vbt.edp_bpp = pipe_config->pipe_bpp;
2385         }
2386 }
2387
2388 static void intel_disable_dp(struct intel_encoder *encoder)
2389 {
2390         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2391         struct drm_device *dev = encoder->base.dev;
2392         struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2393
2394         if (crtc->config->has_audio)
2395                 intel_audio_codec_disable(encoder);
2396
2397         if (HAS_PSR(dev) && !HAS_DDI(dev))
2398                 intel_psr_disable(intel_dp);
2399
2400         /* Make sure the panel is off before trying to change the mode. But also
2401          * ensure that we have vdd while we switch off the panel. */
2402         intel_edp_panel_vdd_on(intel_dp);
2403         intel_edp_backlight_off(intel_dp);
2404         intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
2405         intel_edp_panel_off(intel_dp);
2406
2407         /* disable the port before the pipe on g4x */
2408         if (INTEL_INFO(dev)->gen < 5)
2409                 intel_dp_link_down(intel_dp);
2410 }
2411
2412 static void ilk_post_disable_dp(struct intel_encoder *encoder)
2413 {
2414         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2415         enum port port = dp_to_dig_port(intel_dp)->port;
2416
2417         intel_dp_link_down(intel_dp);
2418         if (port == PORT_A)
2419                 ironlake_edp_pll_off(intel_dp);
2420 }
2421
2422 static void vlv_post_disable_dp(struct intel_encoder *encoder)
2423 {
2424         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2425
2426         intel_dp_link_down(intel_dp);
2427 }
2428
2429 static void chv_data_lane_soft_reset(struct intel_encoder *encoder,
2430                                      bool reset)
2431 {
2432         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
2433         enum dpio_channel ch = vlv_dport_to_channel(enc_to_dig_port(&encoder->base));
2434         struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2435         enum pipe pipe = crtc->pipe;
2436         uint32_t val;
2437
2438         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
2439         if (reset)
2440                 val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2441         else
2442                 val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET;
2443         vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
2444
2445         if (crtc->config->lane_count > 2) {
2446                 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
2447                 if (reset)
2448                         val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2449                 else
2450                         val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET;
2451                 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
2452         }
2453
2454         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
2455         val |= CHV_PCS_REQ_SOFTRESET_EN;
2456         if (reset)
2457                 val &= ~DPIO_PCS_CLK_SOFT_RESET;
2458         else
2459                 val |= DPIO_PCS_CLK_SOFT_RESET;
2460         vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
2461
2462         if (crtc->config->lane_count > 2) {
2463                 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
2464                 val |= CHV_PCS_REQ_SOFTRESET_EN;
2465                 if (reset)
2466                         val &= ~DPIO_PCS_CLK_SOFT_RESET;
2467                 else
2468                         val |= DPIO_PCS_CLK_SOFT_RESET;
2469                 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
2470         }
2471 }
2472
2473 static void chv_post_disable_dp(struct intel_encoder *encoder)
2474 {
2475         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2476         struct drm_device *dev = encoder->base.dev;
2477         struct drm_i915_private *dev_priv = dev->dev_private;
2478
2479         intel_dp_link_down(intel_dp);
2480
2481         mutex_lock(&dev_priv->sb_lock);
2482
2483         /* Assert data lane reset */
2484         chv_data_lane_soft_reset(encoder, true);
2485
2486         mutex_unlock(&dev_priv->sb_lock);
2487 }
2488
2489 static void
2490 _intel_dp_set_link_train(struct intel_dp *intel_dp,
2491                          uint32_t *DP,
2492                          uint8_t dp_train_pat)
2493 {
2494         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2495         struct drm_device *dev = intel_dig_port->base.base.dev;
2496         struct drm_i915_private *dev_priv = dev->dev_private;
2497         enum port port = intel_dig_port->port;
2498
2499         if (HAS_DDI(dev)) {
2500                 uint32_t temp = I915_READ(DP_TP_CTL(port));
2501
2502                 if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
2503                         temp |= DP_TP_CTL_SCRAMBLE_DISABLE;
2504                 else
2505                         temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE;
2506
2507                 temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
2508                 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2509                 case DP_TRAINING_PATTERN_DISABLE:
2510                         temp |= DP_TP_CTL_LINK_TRAIN_NORMAL;
2511
2512                         break;
2513                 case DP_TRAINING_PATTERN_1:
2514                         temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
2515                         break;
2516                 case DP_TRAINING_PATTERN_2:
2517                         temp |= DP_TP_CTL_LINK_TRAIN_PAT2;
2518                         break;
2519                 case DP_TRAINING_PATTERN_3:
2520                         temp |= DP_TP_CTL_LINK_TRAIN_PAT3;
2521                         break;
2522                 }
2523                 I915_WRITE(DP_TP_CTL(port), temp);
2524
2525         } else if ((IS_GEN7(dev) && port == PORT_A) ||
2526                    (HAS_PCH_CPT(dev) && port != PORT_A)) {
2527                 *DP &= ~DP_LINK_TRAIN_MASK_CPT;
2528
2529                 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2530                 case DP_TRAINING_PATTERN_DISABLE:
2531                         *DP |= DP_LINK_TRAIN_OFF_CPT;
2532                         break;
2533                 case DP_TRAINING_PATTERN_1:
2534                         *DP |= DP_LINK_TRAIN_PAT_1_CPT;
2535                         break;
2536                 case DP_TRAINING_PATTERN_2:
2537                         *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2538                         break;
2539                 case DP_TRAINING_PATTERN_3:
2540                         DRM_ERROR("DP training pattern 3 not supported\n");
2541                         *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2542                         break;
2543                 }
2544
2545         } else {
2546                 if (IS_CHERRYVIEW(dev))
2547                         *DP &= ~DP_LINK_TRAIN_MASK_CHV;
2548                 else
2549                         *DP &= ~DP_LINK_TRAIN_MASK;
2550
2551                 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2552                 case DP_TRAINING_PATTERN_DISABLE:
2553                         *DP |= DP_LINK_TRAIN_OFF;
2554                         break;
2555                 case DP_TRAINING_PATTERN_1:
2556                         *DP |= DP_LINK_TRAIN_PAT_1;
2557                         break;
2558                 case DP_TRAINING_PATTERN_2:
2559                         *DP |= DP_LINK_TRAIN_PAT_2;
2560                         break;
2561                 case DP_TRAINING_PATTERN_3:
2562                         if (IS_CHERRYVIEW(dev)) {
2563                                 *DP |= DP_LINK_TRAIN_PAT_3_CHV;
2564                         } else {
2565                                 DRM_ERROR("DP training pattern 3 not supported\n");
2566                                 *DP |= DP_LINK_TRAIN_PAT_2;
2567                         }
2568                         break;
2569                 }
2570         }
2571 }
2572
2573 static void intel_dp_enable_port(struct intel_dp *intel_dp)
2574 {
2575         struct drm_device *dev = intel_dp_to_dev(intel_dp);
2576         struct drm_i915_private *dev_priv = dev->dev_private;
2577
2578         /* enable with pattern 1 (as per spec) */
2579         _intel_dp_set_link_train(intel_dp, &intel_dp->DP,
2580                                  DP_TRAINING_PATTERN_1);
2581
2582         I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2583         POSTING_READ(intel_dp->output_reg);
2584
2585         /*
2586          * Magic for VLV/CHV. We _must_ first set up the register
2587          * without actually enabling the port, and then do another
2588          * write to enable the port. Otherwise link training will
2589          * fail when the power sequencer is freshly used for this port.
2590          */
2591         intel_dp->DP |= DP_PORT_EN;
2592
2593         I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2594         POSTING_READ(intel_dp->output_reg);
2595 }
2596
2597 static void intel_enable_dp(struct intel_encoder *encoder)
2598 {
2599         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2600         struct drm_device *dev = encoder->base.dev;
2601         struct drm_i915_private *dev_priv = dev->dev_private;
2602         struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2603         uint32_t dp_reg = I915_READ(intel_dp->output_reg);
2604
2605         if (WARN_ON(dp_reg & DP_PORT_EN))
2606                 return;
2607
2608         pps_lock(intel_dp);
2609
2610         if (IS_VALLEYVIEW(dev))
2611                 vlv_init_panel_power_sequencer(intel_dp);
2612
2613         intel_dp_enable_port(intel_dp);
2614
2615         edp_panel_vdd_on(intel_dp);
2616         edp_panel_on(intel_dp);
2617         edp_panel_vdd_off(intel_dp, true);
2618
2619         pps_unlock(intel_dp);
2620
2621         if (IS_VALLEYVIEW(dev)) {
2622                 unsigned int lane_mask = 0x0;
2623
2624                 if (IS_CHERRYVIEW(dev))
2625                         lane_mask = intel_dp_unused_lane_mask(crtc->config->lane_count);
2626
2627                 vlv_wait_port_ready(dev_priv, dp_to_dig_port(intel_dp),
2628                                     lane_mask);
2629         }
2630
2631         intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
2632         intel_dp_start_link_train(intel_dp);
2633         intel_dp_complete_link_train(intel_dp);
2634         intel_dp_stop_link_train(intel_dp);
2635
2636         if (crtc->config->has_audio) {
2637                 DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
2638                                  pipe_name(crtc->pipe));
2639                 intel_audio_codec_enable(encoder);
2640         }
2641 }
2642
2643 static void g4x_enable_dp(struct intel_encoder *encoder)
2644 {
2645         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2646
2647         intel_enable_dp(encoder);
2648         intel_edp_backlight_on(intel_dp);
2649 }
2650
2651 static void vlv_enable_dp(struct intel_encoder *encoder)
2652 {
2653         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2654
2655         intel_edp_backlight_on(intel_dp);
2656         intel_psr_enable(intel_dp);
2657 }
2658
2659 static void g4x_pre_enable_dp(struct intel_encoder *encoder)
2660 {
2661         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2662         struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2663
2664         intel_dp_prepare(encoder);
2665
2666         /* Only ilk+ has port A */
2667         if (dport->port == PORT_A) {
2668                 ironlake_set_pll_cpu_edp(intel_dp);
2669                 ironlake_edp_pll_on(intel_dp);
2670         }
2671 }
2672
2673 static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
2674 {
2675         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2676         struct drm_i915_private *dev_priv = intel_dig_port->base.base.dev->dev_private;
2677         enum pipe pipe = intel_dp->pps_pipe;
2678         int pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
2679
2680         edp_panel_vdd_off_sync(intel_dp);
2681
2682         /*
2683          * VLV seems to get confused when multiple power seqeuencers
2684          * have the same port selected (even if only one has power/vdd
2685          * enabled). The failure manifests as vlv_wait_port_ready() failing
2686          * CHV on the other hand doesn't seem to mind having the same port
2687          * selected in multiple power seqeuencers, but let's clear the
2688          * port select always when logically disconnecting a power sequencer
2689          * from a port.
2690          */
2691         DRM_DEBUG_KMS("detaching pipe %c power sequencer from port %c\n",
2692                       pipe_name(pipe), port_name(intel_dig_port->port));
2693         I915_WRITE(pp_on_reg, 0);
2694         POSTING_READ(pp_on_reg);
2695
2696         intel_dp->pps_pipe = INVALID_PIPE;
2697 }
2698
2699 static void vlv_steal_power_sequencer(struct drm_device *dev,
2700                                       enum pipe pipe)
2701 {
2702         struct drm_i915_private *dev_priv = dev->dev_private;
2703         struct intel_encoder *encoder;
2704
2705         lockdep_assert_held(&dev_priv->pps_mutex);
2706
2707         if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
2708                 return;
2709
2710         list_for_each_entry(encoder, &dev->mode_config.encoder_list,
2711                             base.head) {
2712                 struct intel_dp *intel_dp;
2713                 enum port port;
2714
2715                 if (encoder->type != INTEL_OUTPUT_EDP)
2716                         continue;
2717
2718                 intel_dp = enc_to_intel_dp(&encoder->base);
2719                 port = dp_to_dig_port(intel_dp)->port;
2720
2721                 if (intel_dp->pps_pipe != pipe)
2722                         continue;
2723
2724                 DRM_DEBUG_KMS("stealing pipe %c power sequencer from port %c\n",
2725                               pipe_name(pipe), port_name(port));
2726
2727                 WARN(encoder->base.crtc,
2728                      "stealing pipe %c power sequencer from active eDP port %c\n",
2729                      pipe_name(pipe), port_name(port));
2730
2731                 /* make sure vdd is off before we steal it */
2732                 vlv_detach_power_sequencer(intel_dp);
2733         }
2734 }
2735
2736 static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp)
2737 {
2738         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2739         struct intel_encoder *encoder = &intel_dig_port->base;
2740         struct drm_device *dev = encoder->base.dev;
2741         struct drm_i915_private *dev_priv = dev->dev_private;
2742         struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2743
2744         lockdep_assert_held(&dev_priv->pps_mutex);
2745
2746         if (!is_edp(intel_dp))
2747                 return;
2748
2749         if (intel_dp->pps_pipe == crtc->pipe)
2750                 return;
2751
2752         /*
2753          * If another power sequencer was being used on this
2754          * port previously make sure to turn off vdd there while
2755          * we still have control of it.
2756          */
2757         if (intel_dp->pps_pipe != INVALID_PIPE)
2758                 vlv_detach_power_sequencer(intel_dp);
2759
2760         /*
2761          * We may be stealing the power
2762          * sequencer from another port.
2763          */
2764         vlv_steal_power_sequencer(dev, crtc->pipe);
2765
2766         /* now it's all ours */
2767         intel_dp->pps_pipe = crtc->pipe;
2768
2769         DRM_DEBUG_KMS("initializing pipe %c power sequencer for port %c\n",
2770                       pipe_name(intel_dp->pps_pipe), port_name(intel_dig_port->port));
2771
2772         /* init power sequencer on this pipe and port */
2773         intel_dp_init_panel_power_sequencer(dev, intel_dp);
2774         intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
2775 }
2776
2777 static void vlv_pre_enable_dp(struct intel_encoder *encoder)
2778 {
2779         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2780         struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2781         struct drm_device *dev = encoder->base.dev;
2782         struct drm_i915_private *dev_priv = dev->dev_private;
2783         struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
2784         enum dpio_channel port = vlv_dport_to_channel(dport);
2785         int pipe = intel_crtc->pipe;
2786         u32 val;
2787
2788         mutex_lock(&dev_priv->sb_lock);
2789
2790         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(port));
2791         val = 0;
2792         if (pipe)
2793                 val |= (1<<21);
2794         else
2795                 val &= ~(1<<21);
2796         val |= 0x001000c4;
2797         vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW8(port), val);
2798         vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW14(port), 0x00760018);
2799         vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW23(port), 0x00400888);
2800
2801         mutex_unlock(&dev_priv->sb_lock);
2802
2803         intel_enable_dp(encoder);
2804 }
2805
2806 static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder)
2807 {
2808         struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
2809         struct drm_device *dev = encoder->base.dev;
2810         struct drm_i915_private *dev_priv = dev->dev_private;
2811         struct intel_crtc *intel_crtc =
2812                 to_intel_crtc(encoder->base.crtc);
2813         enum dpio_channel port = vlv_dport_to_channel(dport);
2814         int pipe = intel_crtc->pipe;
2815
2816         intel_dp_prepare(encoder);
2817
2818         /* Program Tx lane resets to default */
2819         mutex_lock(&dev_priv->sb_lock);
2820         vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port),
2821                          DPIO_PCS_TX_LANE2_RESET |
2822                          DPIO_PCS_TX_LANE1_RESET);
2823         vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port),
2824                          DPIO_PCS_CLK_CRI_RXEB_EIOS_EN |
2825                          DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN |
2826                          (1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) |
2827                                  DPIO_PCS_CLK_SOFT_RESET);
2828
2829         /* Fix up inter-pair skew failure */
2830         vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW12(port), 0x00750f00);
2831         vlv_dpio_write(dev_priv, pipe, VLV_TX_DW11(port), 0x00001500);
2832         vlv_dpio_write(dev_priv, pipe, VLV_TX_DW14(port), 0x40400000);
2833         mutex_unlock(&dev_priv->sb_lock);
2834 }
2835
2836 static void chv_pre_enable_dp(struct intel_encoder *encoder)
2837 {
2838         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2839         struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2840         struct drm_device *dev = encoder->base.dev;
2841         struct drm_i915_private *dev_priv = dev->dev_private;
2842         struct intel_crtc *intel_crtc =
2843                 to_intel_crtc(encoder->base.crtc);
2844         enum dpio_channel ch = vlv_dport_to_channel(dport);
2845         int pipe = intel_crtc->pipe;
2846         int data, i, stagger;
2847         u32 val;
2848
2849         mutex_lock(&dev_priv->sb_lock);
2850
2851         /* allow hardware to manage TX FIFO reset source */
2852         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
2853         val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
2854         vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
2855
2856         if (intel_crtc->config->lane_count > 2) {
2857                 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
2858                 val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
2859                 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
2860         }
2861
2862         /* Program Tx lane latency optimal setting*/
2863         for (i = 0; i < intel_crtc->config->lane_count; i++) {
2864                 /* Set the upar bit */
2865                 if (intel_crtc->config->lane_count == 1)
2866                         data = 0x0;
2867                 else
2868                         data = (i == 1) ? 0x0 : 0x1;
2869                 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW14(ch, i),
2870                                 data << DPIO_UPAR_SHIFT);
2871         }
2872
2873         /* Data lane stagger programming */
2874         if (intel_crtc->config->port_clock > 270000)
2875                 stagger = 0x18;
2876         else if (intel_crtc->config->port_clock > 135000)
2877                 stagger = 0xd;
2878         else if (intel_crtc->config->port_clock > 67500)
2879                 stagger = 0x7;
2880         else if (intel_crtc->config->port_clock > 33750)
2881                 stagger = 0x4;
2882         else
2883                 stagger = 0x2;
2884
2885         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
2886         val |= DPIO_TX2_STAGGER_MASK(0x1f);
2887         vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
2888
2889         if (intel_crtc->config->lane_count > 2) {
2890                 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
2891                 val |= DPIO_TX2_STAGGER_MASK(0x1f);
2892                 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
2893         }
2894
2895         vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW12(ch),
2896                        DPIO_LANESTAGGER_STRAP(stagger) |
2897                        DPIO_LANESTAGGER_STRAP_OVRD |
2898                        DPIO_TX1_STAGGER_MASK(0x1f) |
2899                        DPIO_TX1_STAGGER_MULT(6) |
2900                        DPIO_TX2_STAGGER_MULT(0));
2901
2902         if (intel_crtc->config->lane_count > 2) {
2903                 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW12(ch),
2904                                DPIO_LANESTAGGER_STRAP(stagger) |
2905                                DPIO_LANESTAGGER_STRAP_OVRD |
2906                                DPIO_TX1_STAGGER_MASK(0x1f) |
2907                                DPIO_TX1_STAGGER_MULT(7) |
2908                                DPIO_TX2_STAGGER_MULT(5));
2909         }
2910
2911         /* Deassert data lane reset */
2912         chv_data_lane_soft_reset(encoder, false);
2913
2914         mutex_unlock(&dev_priv->sb_lock);
2915
2916         intel_enable_dp(encoder);
2917
2918         /* Second common lane will stay alive on its own now */
2919         if (dport->release_cl2_override) {
2920                 chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, false);
2921                 dport->release_cl2_override = false;
2922         }
2923 }
2924
2925 static void chv_dp_pre_pll_enable(struct intel_encoder *encoder)
2926 {
2927         struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
2928         struct drm_device *dev = encoder->base.dev;
2929         struct drm_i915_private *dev_priv = dev->dev_private;
2930         struct intel_crtc *intel_crtc =
2931                 to_intel_crtc(encoder->base.crtc);
2932         enum dpio_channel ch = vlv_dport_to_channel(dport);
2933         enum pipe pipe = intel_crtc->pipe;
2934         unsigned int lane_mask =
2935                 intel_dp_unused_lane_mask(intel_crtc->config->lane_count);
2936         u32 val;
2937
2938         intel_dp_prepare(encoder);
2939
2940         /*
2941          * Must trick the second common lane into life.
2942          * Otherwise we can't even access the PLL.
2943          */
2944         if (ch == DPIO_CH0 && pipe == PIPE_B)
2945                 dport->release_cl2_override =
2946                         !chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, true);
2947
2948         chv_phy_powergate_lanes(encoder, true, lane_mask);
2949
2950         mutex_lock(&dev_priv->sb_lock);
2951
2952         /* Assert data lane reset */
2953         chv_data_lane_soft_reset(encoder, true);
2954
2955         /* program left/right clock distribution */
2956         if (pipe != PIPE_B) {
2957                 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
2958                 val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
2959                 if (ch == DPIO_CH0)
2960                         val |= CHV_BUFLEFTENA1_FORCE;
2961                 if (ch == DPIO_CH1)
2962                         val |= CHV_BUFRIGHTENA1_FORCE;
2963                 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
2964         } else {
2965                 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
2966                 val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
2967                 if (ch == DPIO_CH0)
2968                         val |= CHV_BUFLEFTENA2_FORCE;
2969                 if (ch == DPIO_CH1)
2970                         val |= CHV_BUFRIGHTENA2_FORCE;
2971                 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
2972         }
2973
2974         /* program clock channel usage */
2975         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(ch));
2976         val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
2977         if (pipe != PIPE_B)
2978                 val &= ~CHV_PCS_USEDCLKCHANNEL;
2979         else
2980                 val |= CHV_PCS_USEDCLKCHANNEL;
2981         vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW8(ch), val);
2982
2983         if (intel_crtc->config->lane_count > 2) {
2984                 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW8(ch));
2985                 val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
2986                 if (pipe != PIPE_B)
2987                         val &= ~CHV_PCS_USEDCLKCHANNEL;
2988                 else
2989                         val |= CHV_PCS_USEDCLKCHANNEL;
2990                 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW8(ch), val);
2991         }
2992
2993         /*
2994          * This a a bit weird since generally CL
2995          * matches the pipe, but here we need to
2996          * pick the CL based on the port.
2997          */
2998         val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW19(ch));
2999         if (pipe != PIPE_B)
3000                 val &= ~CHV_CMN_USEDCLKCHANNEL;
3001         else
3002                 val |= CHV_CMN_USEDCLKCHANNEL;
3003         vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW19(ch), val);
3004
3005         mutex_unlock(&dev_priv->sb_lock);
3006 }
3007
3008 static void chv_dp_post_pll_disable(struct intel_encoder *encoder)
3009 {
3010         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
3011         enum pipe pipe = to_intel_crtc(encoder->base.crtc)->pipe;
3012         u32 val;
3013
3014         mutex_lock(&dev_priv->sb_lock);
3015
3016         /* disable left/right clock distribution */
3017         if (pipe != PIPE_B) {
3018                 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
3019                 val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
3020                 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
3021         } else {
3022                 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
3023                 val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
3024                 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
3025         }
3026
3027         mutex_unlock(&dev_priv->sb_lock);
3028
3029         /*
3030          * Leave the power down bit cleared for at least one
3031          * lane so that chv_powergate_phy_ch() will power
3032          * on something when the channel is otherwise unused.
3033          * When the port is off and the override is removed
3034          * the lanes power down anyway, so otherwise it doesn't
3035          * really matter what the state of power down bits is
3036          * after this.
3037          */
3038         chv_phy_powergate_lanes(encoder, false, 0x0);
3039 }
3040
3041 /*
3042  * Native read with retry for link status and receiver capability reads for
3043  * cases where the sink may still be asleep.
3044  *
3045  * Sinks are *supposed* to come up within 1ms from an off state, but we're also
3046  * supposed to retry 3 times per the spec.
3047  */
3048 static ssize_t
3049 intel_dp_dpcd_read_wake(struct drm_dp_aux *aux, unsigned int offset,
3050                         void *buffer, size_t size)
3051 {
3052         ssize_t ret;
3053         int i;
3054
3055         /*
3056          * Sometime we just get the same incorrect byte repeated
3057          * over the entire buffer. Doing just one throw away read
3058          * initially seems to "solve" it.
3059          */
3060         drm_dp_dpcd_read(aux, DP_DPCD_REV, buffer, 1);
3061
3062         for (i = 0; i < 3; i++) {
3063                 ret = drm_dp_dpcd_read(aux, offset, buffer, size);
3064                 if (ret == size)
3065                         return ret;
3066                 msleep(1);
3067         }
3068
3069         return ret;
3070 }
3071
3072 /*
3073  * Fetch AUX CH registers 0x202 - 0x207 which contain
3074  * link status information
3075  */
3076 static bool
3077 intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
3078 {
3079         return intel_dp_dpcd_read_wake(&intel_dp->aux,
3080                                        DP_LANE0_1_STATUS,
3081                                        link_status,
3082                                        DP_LINK_STATUS_SIZE) == DP_LINK_STATUS_SIZE;
3083 }
3084
3085 /* These are source-specific values. */
3086 static uint8_t
3087 intel_dp_voltage_max(struct intel_dp *intel_dp)
3088 {
3089         struct drm_device *dev = intel_dp_to_dev(intel_dp);
3090         struct drm_i915_private *dev_priv = dev->dev_private;
3091         enum port port = dp_to_dig_port(intel_dp)->port;
3092
3093         if (IS_BROXTON(dev))
3094                 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3095         else if (INTEL_INFO(dev)->gen >= 9) {
3096                 if (dev_priv->edp_low_vswing && port == PORT_A)
3097                         return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3098                 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
3099         } else if (IS_VALLEYVIEW(dev))
3100                 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3101         else if (IS_GEN7(dev) && port == PORT_A)
3102                 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
3103         else if (HAS_PCH_CPT(dev) && port != PORT_A)
3104                 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3105         else
3106                 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
3107 }
3108
3109 static uint8_t
3110 intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
3111 {
3112         struct drm_device *dev = intel_dp_to_dev(intel_dp);
3113         enum port port = dp_to_dig_port(intel_dp)->port;
3114
3115         if (INTEL_INFO(dev)->gen >= 9) {
3116                 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3117                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3118                         return DP_TRAIN_PRE_EMPH_LEVEL_3;
3119                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3120                         return DP_TRAIN_PRE_EMPH_LEVEL_2;
3121                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3122                         return DP_TRAIN_PRE_EMPH_LEVEL_1;
3123                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3124                         return DP_TRAIN_PRE_EMPH_LEVEL_0;
3125                 default:
3126                         return DP_TRAIN_PRE_EMPH_LEVEL_0;
3127                 }
3128         } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
3129                 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3130                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3131                         return DP_TRAIN_PRE_EMPH_LEVEL_3;
3132                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3133                         return DP_TRAIN_PRE_EMPH_LEVEL_2;
3134                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3135                         return DP_TRAIN_PRE_EMPH_LEVEL_1;
3136                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3137                 default:
3138                         return DP_TRAIN_PRE_EMPH_LEVEL_0;
3139                 }
3140         } else if (IS_VALLEYVIEW(dev)) {
3141                 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3142                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3143                         return DP_TRAIN_PRE_EMPH_LEVEL_3;
3144                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3145                         return DP_TRAIN_PRE_EMPH_LEVEL_2;
3146                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3147                         return DP_TRAIN_PRE_EMPH_LEVEL_1;
3148                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3149                 default:
3150                         return DP_TRAIN_PRE_EMPH_LEVEL_0;
3151                 }
3152         } else if (IS_GEN7(dev) && port == PORT_A) {
3153                 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3154                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3155                         return DP_TRAIN_PRE_EMPH_LEVEL_2;
3156                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3157                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3158                         return DP_TRAIN_PRE_EMPH_LEVEL_1;
3159                 default:
3160                         return DP_TRAIN_PRE_EMPH_LEVEL_0;
3161                 }
3162         } else {
3163                 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3164                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3165                         return DP_TRAIN_PRE_EMPH_LEVEL_2;
3166                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3167                         return DP_TRAIN_PRE_EMPH_LEVEL_2;
3168                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3169                         return DP_TRAIN_PRE_EMPH_LEVEL_1;
3170                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3171                 default:
3172                         return DP_TRAIN_PRE_EMPH_LEVEL_0;
3173                 }
3174         }
3175 }
3176
3177 static uint32_t vlv_signal_levels(struct intel_dp *intel_dp)
3178 {
3179         struct drm_device *dev = intel_dp_to_dev(intel_dp);
3180         struct drm_i915_private *dev_priv = dev->dev_private;
3181         struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
3182         struct intel_crtc *intel_crtc =
3183                 to_intel_crtc(dport->base.base.crtc);
3184         unsigned long demph_reg_value, preemph_reg_value,
3185                 uniqtranscale_reg_value;
3186         uint8_t train_set = intel_dp->train_set[0];
3187         enum dpio_channel port = vlv_dport_to_channel(dport);
3188         int pipe = intel_crtc->pipe;
3189
3190         switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3191         case DP_TRAIN_PRE_EMPH_LEVEL_0:
3192                 preemph_reg_value = 0x0004000;
3193                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3194                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3195                         demph_reg_value = 0x2B405555;
3196                         uniqtranscale_reg_value = 0x552AB83A;
3197                         break;
3198                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3199                         demph_reg_value = 0x2B404040;
3200                         uniqtranscale_reg_value = 0x5548B83A;
3201                         break;
3202                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3203                         demph_reg_value = 0x2B245555;
3204                         uniqtranscale_reg_value = 0x5560B83A;
3205                         break;
3206                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3207                         demph_reg_value = 0x2B405555;
3208                         uniqtranscale_reg_value = 0x5598DA3A;
3209                         break;
3210                 default:
3211                         return 0;
3212                 }
3213                 break;
3214         case DP_TRAIN_PRE_EMPH_LEVEL_1:
3215                 preemph_reg_value = 0x0002000;
3216                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3217                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3218                         demph_reg_value = 0x2B404040;
3219                         uniqtranscale_reg_value = 0x5552B83A;
3220                         break;
3221                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3222                         demph_reg_value = 0x2B404848;
3223                         uniqtranscale_reg_value = 0x5580B83A;
3224                         break;
3225                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3226                         demph_reg_value = 0x2B404040;
3227                         uniqtranscale_reg_value = 0x55ADDA3A;
3228                         break;
3229                 default:
3230                         return 0;
3231                 }
3232                 break;
3233         case DP_TRAIN_PRE_EMPH_LEVEL_2:
3234                 preemph_reg_value = 0x0000000;
3235                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3236                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3237                         demph_reg_value = 0x2B305555;
3238                         uniqtranscale_reg_value = 0x5570B83A;
3239                         break;
3240                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3241                         demph_reg_value = 0x2B2B4040;
3242                         uniqtranscale_reg_value = 0x55ADDA3A;
3243                         break;
3244                 default:
3245                         return 0;
3246                 }
3247                 break;
3248         case DP_TRAIN_PRE_EMPH_LEVEL_3:
3249                 preemph_reg_value = 0x0006000;
3250                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3251                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3252                         demph_reg_value = 0x1B405555;
3253                         uniqtranscale_reg_value = 0x55ADDA3A;
3254                         break;
3255                 default:
3256                         return 0;
3257                 }
3258                 break;
3259         default:
3260                 return 0;
3261         }
3262
3263         mutex_lock(&dev_priv->sb_lock);
3264         vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x00000000);
3265         vlv_dpio_write(dev_priv, pipe, VLV_TX_DW4(port), demph_reg_value);
3266         vlv_dpio_write(dev_priv, pipe, VLV_TX_DW2(port),
3267                          uniqtranscale_reg_value);
3268         vlv_dpio_write(dev_priv, pipe, VLV_TX_DW3(port), 0x0C782040);
3269         vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW11(port), 0x00030000);
3270         vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW9(port), preemph_reg_value);
3271         vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x80000000);
3272         mutex_unlock(&dev_priv->sb_lock);
3273
3274         return 0;
3275 }
3276
3277 static bool chv_need_uniq_trans_scale(uint8_t train_set)
3278 {
3279         return (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) == DP_TRAIN_PRE_EMPH_LEVEL_0 &&
3280                 (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) == DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3281 }
3282
3283 static uint32_t chv_signal_levels(struct intel_dp *intel_dp)
3284 {
3285         struct drm_device *dev = intel_dp_to_dev(intel_dp);
3286         struct drm_i915_private *dev_priv = dev->dev_private;
3287         struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
3288         struct intel_crtc *intel_crtc = to_intel_crtc(dport->base.base.crtc);
3289         u32 deemph_reg_value, margin_reg_value, val;
3290         uint8_t train_set = intel_dp->train_set[0];
3291         enum dpio_channel ch = vlv_dport_to_channel(dport);
3292         enum pipe pipe = intel_crtc->pipe;
3293         int i;
3294
3295         switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3296         case DP_TRAIN_PRE_EMPH_LEVEL_0:
3297                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3298                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3299                         deemph_reg_value = 128;
3300                         margin_reg_value = 52;
3301                         break;
3302                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3303                         deemph_reg_value = 128;
3304                         margin_reg_value = 77;
3305                         break;
3306                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3307                         deemph_reg_value = 128;
3308                         margin_reg_value = 102;
3309                         break;
3310                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3311                         deemph_reg_value = 128;
3312                         margin_reg_value = 154;
3313                         /* FIXME extra to set for 1200 */
3314                         break;
3315                 default:
3316                         return 0;
3317                 }
3318                 break;
3319         case DP_TRAIN_PRE_EMPH_LEVEL_1:
3320                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3321                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3322                         deemph_reg_value = 85;
3323                         margin_reg_value = 78;
3324                         break;
3325                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3326                         deemph_reg_value = 85;
3327                         margin_reg_value = 116;
3328                         break;
3329                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3330                         deemph_reg_value = 85;
3331                         margin_reg_value = 154;
3332                         break;
3333                 default:
3334                         return 0;
3335                 }
3336                 break;
3337         case DP_TRAIN_PRE_EMPH_LEVEL_2:
3338                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3339                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3340                         deemph_reg_value = 64;
3341                         margin_reg_value = 104;
3342                         break;
3343                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3344                         deemph_reg_value = 64;
3345                         margin_reg_value = 154;
3346                         break;
3347                 default:
3348                         return 0;
3349                 }
3350                 break;
3351         case DP_TRAIN_PRE_EMPH_LEVEL_3:
3352                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3353                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3354                         deemph_reg_value = 43;
3355                         margin_reg_value = 154;
3356                         break;
3357                 default:
3358                         return 0;
3359                 }
3360                 break;
3361         default:
3362                 return 0;
3363         }
3364
3365         mutex_lock(&dev_priv->sb_lock);
3366
3367         /* Clear calc init */
3368         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
3369         val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
3370         val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
3371         val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
3372         vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
3373
3374         if (intel_crtc->config->lane_count > 2) {
3375                 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
3376                 val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
3377                 val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
3378                 val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
3379                 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
3380         }
3381
3382         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW9(ch));
3383         val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
3384         val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
3385         vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW9(ch), val);
3386
3387         if (intel_crtc->config->lane_count > 2) {
3388                 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW9(ch));
3389                 val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
3390                 val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
3391                 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW9(ch), val);
3392         }
3393
3394         /* Program swing deemph */
3395         for (i = 0; i < intel_crtc->config->lane_count; i++) {
3396                 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW4(ch, i));
3397                 val &= ~DPIO_SWING_DEEMPH9P5_MASK;
3398                 val |= deemph_reg_value << DPIO_SWING_DEEMPH9P5_SHIFT;
3399                 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW4(ch, i), val);
3400         }
3401
3402         /* Program swing margin */
3403         for (i = 0; i < intel_crtc->config->lane_count; i++) {
3404                 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
3405
3406                 val &= ~DPIO_SWING_MARGIN000_MASK;
3407                 val |= margin_reg_value << DPIO_SWING_MARGIN000_SHIFT;
3408
3409                 /*
3410                  * Supposedly this value shouldn't matter when unique transition
3411                  * scale is disabled, but in fact it does matter. Let's just
3412                  * always program the same value and hope it's OK.
3413                  */
3414                 val &= ~(0xff << DPIO_UNIQ_TRANS_SCALE_SHIFT);
3415                 val |= 0x9a << DPIO_UNIQ_TRANS_SCALE_SHIFT;
3416
3417                 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
3418         }
3419
3420         /*
3421          * The document said it needs to set bit 27 for ch0 and bit 26
3422          * for ch1. Might be a typo in the doc.
3423          * For now, for this unique transition scale selection, set bit
3424          * 27 for ch0 and ch1.
3425          */
3426         for (i = 0; i < intel_crtc->config->lane_count; i++) {
3427                 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
3428                 if (chv_need_uniq_trans_scale(train_set))
3429                         val |= DPIO_TX_UNIQ_TRANS_SCALE_EN;
3430                 else
3431                         val &= ~DPIO_TX_UNIQ_TRANS_SCALE_EN;
3432                 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
3433         }
3434
3435         /* Start swing calculation */
3436         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
3437         val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
3438         vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
3439
3440         if (intel_crtc->config->lane_count > 2) {
3441                 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
3442                 val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
3443                 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
3444         }
3445
3446         /* LRC Bypass */
3447         val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW30);
3448         val |= DPIO_LRC_BYPASS;
3449         vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW30, val);
3450
3451         mutex_unlock(&dev_priv->sb_lock);
3452
3453         return 0;
3454 }
3455
3456 static void
3457 intel_get_adjust_train(struct intel_dp *intel_dp,
3458                        const uint8_t link_status[DP_LINK_STATUS_SIZE])
3459 {
3460         uint8_t v = 0;
3461         uint8_t p = 0;
3462         int lane;
3463         uint8_t voltage_max;
3464         uint8_t preemph_max;
3465
3466         for (lane = 0; lane < intel_dp->lane_count; lane++) {
3467                 uint8_t this_v = drm_dp_get_adjust_request_voltage(link_status, lane);
3468                 uint8_t this_p = drm_dp_get_adjust_request_pre_emphasis(link_status, lane);
3469
3470                 if (this_v > v)
3471                         v = this_v;
3472                 if (this_p > p)
3473                         p = this_p;
3474         }
3475
3476         voltage_max = intel_dp_voltage_max(intel_dp);
3477         if (v >= voltage_max)
3478                 v = voltage_max | DP_TRAIN_MAX_SWING_REACHED;
3479
3480         preemph_max = intel_dp_pre_emphasis_max(intel_dp, v);
3481         if (p >= preemph_max)
3482                 p = preemph_max | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
3483
3484         for (lane = 0; lane < 4; lane++)
3485                 intel_dp->train_set[lane] = v | p;
3486 }
3487
3488 static uint32_t
3489 gen4_signal_levels(uint8_t train_set)
3490 {
3491         uint32_t        signal_levels = 0;
3492
3493         switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3494         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3495         default:
3496                 signal_levels |= DP_VOLTAGE_0_4;
3497                 break;
3498         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3499                 signal_levels |= DP_VOLTAGE_0_6;
3500                 break;
3501         case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3502                 signal_levels |= DP_VOLTAGE_0_8;
3503                 break;
3504         case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3505                 signal_levels |= DP_VOLTAGE_1_2;
3506                 break;
3507         }
3508         switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3509         case DP_TRAIN_PRE_EMPH_LEVEL_0:
3510         default:
3511                 signal_levels |= DP_PRE_EMPHASIS_0;
3512                 break;
3513         case DP_TRAIN_PRE_EMPH_LEVEL_1:
3514                 signal_levels |= DP_PRE_EMPHASIS_3_5;
3515                 break;
3516         case DP_TRAIN_PRE_EMPH_LEVEL_2:
3517                 signal_levels |= DP_PRE_EMPHASIS_6;
3518                 break;
3519         case DP_TRAIN_PRE_EMPH_LEVEL_3:
3520                 signal_levels |= DP_PRE_EMPHASIS_9_5;
3521                 break;
3522         }
3523         return signal_levels;
3524 }
3525
3526 /* Gen6's DP voltage swing and pre-emphasis control */
3527 static uint32_t
3528 gen6_edp_signal_levels(uint8_t train_set)
3529 {
3530         int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3531                                          DP_TRAIN_PRE_EMPHASIS_MASK);
3532         switch (signal_levels) {
3533         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3534         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3535                 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
3536         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3537                 return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B;
3538         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3539         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3540                 return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B;
3541         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3542         case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3543                 return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B;
3544         case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3545         case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3546                 return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B;
3547         default:
3548                 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3549                               "0x%x\n", signal_levels);
3550                 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
3551         }
3552 }
3553
3554 /* Gen7's DP voltage swing and pre-emphasis control */
3555 static uint32_t
3556 gen7_edp_signal_levels(uint8_t train_set)
3557 {
3558         int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3559                                          DP_TRAIN_PRE_EMPHASIS_MASK);
3560         switch (signal_levels) {
3561         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3562                 return EDP_LINK_TRAIN_400MV_0DB_IVB;
3563         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3564                 return EDP_LINK_TRAIN_400MV_3_5DB_IVB;
3565         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3566                 return EDP_LINK_TRAIN_400MV_6DB_IVB;
3567
3568         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3569                 return EDP_LINK_TRAIN_600MV_0DB_IVB;
3570         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3571                 return EDP_LINK_TRAIN_600MV_3_5DB_IVB;
3572
3573         case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3574                 return EDP_LINK_TRAIN_800MV_0DB_IVB;
3575         case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3576                 return EDP_LINK_TRAIN_800MV_3_5DB_IVB;
3577
3578         default:
3579                 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3580                               "0x%x\n", signal_levels);
3581                 return EDP_LINK_TRAIN_500MV_0DB_IVB;
3582         }
3583 }
3584
3585 /* Properly updates "DP" with the correct signal levels. */
3586 static void
3587 intel_dp_set_signal_levels(struct intel_dp *intel_dp, uint32_t *DP)
3588 {
3589         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3590         enum port port = intel_dig_port->port;
3591         struct drm_device *dev = intel_dig_port->base.base.dev;
3592         uint32_t signal_levels, mask = 0;
3593         uint8_t train_set = intel_dp->train_set[0];
3594
3595         if (HAS_DDI(dev)) {
3596                 signal_levels = ddi_signal_levels(intel_dp);
3597
3598                 if (IS_BROXTON(dev))
3599                         signal_levels = 0;
3600                 else
3601                         mask = DDI_BUF_EMP_MASK;
3602         } else if (IS_CHERRYVIEW(dev)) {
3603                 signal_levels = chv_signal_levels(intel_dp);
3604         } else if (IS_VALLEYVIEW(dev)) {
3605                 signal_levels = vlv_signal_levels(intel_dp);
3606         } else if (IS_GEN7(dev) && port == PORT_A) {
3607                 signal_levels = gen7_edp_signal_levels(train_set);
3608                 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB;
3609         } else if (IS_GEN6(dev) && port == PORT_A) {
3610                 signal_levels = gen6_edp_signal_levels(train_set);
3611                 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_SNB;
3612         } else {
3613                 signal_levels = gen4_signal_levels(train_set);
3614                 mask = DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK;
3615         }
3616
3617         if (mask)
3618                 DRM_DEBUG_KMS("Using signal levels %08x\n", signal_levels);
3619
3620         DRM_DEBUG_KMS("Using vswing level %d\n",
3621                 train_set & DP_TRAIN_VOLTAGE_SWING_MASK);
3622         DRM_DEBUG_KMS("Using pre-emphasis level %d\n",
3623                 (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) >>
3624                         DP_TRAIN_PRE_EMPHASIS_SHIFT);
3625
3626         *DP = (*DP & ~mask) | signal_levels;
3627 }
3628
3629 static bool
3630 intel_dp_set_link_train(struct intel_dp *intel_dp,
3631                         uint32_t *DP,
3632                         uint8_t dp_train_pat)
3633 {
3634         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3635         struct drm_i915_private *dev_priv =
3636                 to_i915(intel_dig_port->base.base.dev);
3637         uint8_t buf[sizeof(intel_dp->train_set) + 1];
3638         int ret, len;
3639
3640         _intel_dp_set_link_train(intel_dp, DP, dp_train_pat);
3641
3642         I915_WRITE(intel_dp->output_reg, *DP);
3643         POSTING_READ(intel_dp->output_reg);
3644
3645         buf[0] = dp_train_pat;
3646         if ((dp_train_pat & DP_TRAINING_PATTERN_MASK) ==
3647             DP_TRAINING_PATTERN_DISABLE) {
3648                 /* don't write DP_TRAINING_LANEx_SET on disable */
3649                 len = 1;
3650         } else {
3651                 /* DP_TRAINING_LANEx_SET follow DP_TRAINING_PATTERN_SET */
3652                 memcpy(buf + 1, intel_dp->train_set, intel_dp->lane_count);
3653                 len = intel_dp->lane_count + 1;
3654         }
3655
3656         ret = drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_PATTERN_SET,
3657                                 buf, len);
3658
3659         return ret == len;
3660 }
3661
3662 static bool
3663 intel_dp_reset_link_train(struct intel_dp *intel_dp, uint32_t *DP,
3664                         uint8_t dp_train_pat)
3665 {
3666         if (!intel_dp->train_set_valid)
3667                 memset(intel_dp->train_set, 0, sizeof(intel_dp->train_set));
3668         intel_dp_set_signal_levels(intel_dp, DP);
3669         return intel_dp_set_link_train(intel_dp, DP, dp_train_pat);
3670 }
3671
3672 static bool
3673 intel_dp_update_link_train(struct intel_dp *intel_dp, uint32_t *DP,
3674                            const uint8_t link_status[DP_LINK_STATUS_SIZE])
3675 {
3676         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3677         struct drm_i915_private *dev_priv =
3678                 to_i915(intel_dig_port->base.base.dev);
3679         int ret;
3680
3681         intel_get_adjust_train(intel_dp, link_status);
3682         intel_dp_set_signal_levels(intel_dp, DP);
3683
3684         I915_WRITE(intel_dp->output_reg, *DP);
3685         POSTING_READ(intel_dp->output_reg);
3686
3687         ret = drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_LANE0_SET,
3688                                 intel_dp->train_set, intel_dp->lane_count);
3689
3690         return ret == intel_dp->lane_count;
3691 }
3692
3693 static void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
3694 {
3695         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3696         struct drm_device *dev = intel_dig_port->base.base.dev;
3697         struct drm_i915_private *dev_priv = dev->dev_private;
3698         enum port port = intel_dig_port->port;
3699         uint32_t val;
3700
3701         if (!HAS_DDI(dev))
3702                 return;
3703
3704         val = I915_READ(DP_TP_CTL(port));
3705         val &= ~DP_TP_CTL_LINK_TRAIN_MASK;
3706         val |= DP_TP_CTL_LINK_TRAIN_IDLE;
3707         I915_WRITE(DP_TP_CTL(port), val);
3708
3709         /*
3710          * On PORT_A we can have only eDP in SST mode. There the only reason
3711          * we need to set idle transmission mode is to work around a HW issue
3712          * where we enable the pipe while not in idle link-training mode.
3713          * In this case there is requirement to wait for a minimum number of
3714          * idle patterns to be sent.
3715          */
3716         if (port == PORT_A)
3717                 return;
3718
3719         if (wait_for((I915_READ(DP_TP_STATUS(port)) & DP_TP_STATUS_IDLE_DONE),
3720                      1))
3721                 DRM_ERROR("Timed out waiting for DP idle patterns\n");
3722 }
3723
3724 /* Enable corresponding port and start training pattern 1 */
3725 void
3726 intel_dp_start_link_train(struct intel_dp *intel_dp)
3727 {
3728         struct drm_encoder *encoder = &dp_to_dig_port(intel_dp)->base.base;
3729         struct drm_device *dev = encoder->dev;
3730         int i;
3731         uint8_t voltage;
3732         int voltage_tries, loop_tries;
3733         uint32_t DP = intel_dp->DP;
3734         uint8_t link_config[2];
3735         uint8_t link_bw, rate_select;
3736
3737         if (HAS_DDI(dev))
3738                 intel_ddi_prepare_link_retrain(encoder);
3739
3740         intel_dp_compute_rate(intel_dp, intel_dp->link_rate,
3741                               &link_bw, &rate_select);
3742
3743         /* Write the link configuration data */
3744         link_config[0] = link_bw;
3745         link_config[1] = intel_dp->lane_count;
3746         if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
3747                 link_config[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
3748         drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_BW_SET, link_config, 2);
3749         if (intel_dp->num_sink_rates)
3750                 drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_RATE_SET,
3751                                   &rate_select, 1);
3752
3753         link_config[0] = 0;
3754         link_config[1] = DP_SET_ANSI_8B10B;
3755         drm_dp_dpcd_write(&intel_dp->aux, DP_DOWNSPREAD_CTRL, link_config, 2);
3756
3757         DP |= DP_PORT_EN;
3758
3759         /* clock recovery */
3760         if (!intel_dp_reset_link_train(intel_dp, &DP,
3761                                        DP_TRAINING_PATTERN_1 |
3762                                        DP_LINK_SCRAMBLING_DISABLE)) {
3763                 DRM_ERROR("failed to enable link training\n");
3764                 return;
3765         }
3766
3767         voltage = 0xff;
3768         voltage_tries = 0;
3769         loop_tries = 0;
3770         for (;;) {
3771                 uint8_t link_status[DP_LINK_STATUS_SIZE];
3772
3773                 drm_dp_link_train_clock_recovery_delay(intel_dp->dpcd);
3774                 if (!intel_dp_get_link_status(intel_dp, link_status)) {
3775                         DRM_ERROR("failed to get link status\n");
3776                         break;
3777                 }
3778
3779                 if (drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
3780                         DRM_DEBUG_KMS("clock recovery OK\n");
3781                         break;
3782                 }
3783
3784                 /*
3785                  * if we used previously trained voltage and pre-emphasis values
3786                  * and we don't get clock recovery, reset link training values
3787                  */
3788                 if (intel_dp->train_set_valid) {
3789                         DRM_DEBUG_KMS("clock recovery not ok, reset");
3790                         /* clear the flag as we are not reusing train set */
3791                         intel_dp->train_set_valid = false;
3792                         if (!intel_dp_reset_link_train(intel_dp, &DP,
3793                                                        DP_TRAINING_PATTERN_1 |
3794                                                        DP_LINK_SCRAMBLING_DISABLE)) {
3795                                 DRM_ERROR("failed to enable link training\n");
3796                                 return;
3797                         }
3798                         continue;
3799                 }
3800
3801                 /* Check to see if we've tried the max voltage */
3802                 for (i = 0; i < intel_dp->lane_count; i++)
3803                         if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
3804                                 break;
3805                 if (i == intel_dp->lane_count) {
3806                         ++loop_tries;
3807                         if (loop_tries == 5) {
3808                                 DRM_ERROR("too many full retries, give up\n");
3809                                 break;
3810                         }
3811                         intel_dp_reset_link_train(intel_dp, &DP,
3812                                                   DP_TRAINING_PATTERN_1 |
3813                                                   DP_LINK_SCRAMBLING_DISABLE);
3814                         voltage_tries = 0;
3815                         continue;
3816                 }
3817
3818                 /* Check to see if we've tried the same voltage 5 times */
3819                 if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) {
3820                         ++voltage_tries;
3821                         if (voltage_tries == 5) {
3822                                 DRM_ERROR("too many voltage retries, give up\n");
3823                                 break;
3824                         }
3825                 } else
3826                         voltage_tries = 0;
3827                 voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
3828
3829                 /* Update training set as requested by target */
3830                 if (!intel_dp_update_link_train(intel_dp, &DP, link_status)) {
3831                         DRM_ERROR("failed to update link training\n");
3832                         break;
3833                 }
3834         }
3835
3836         intel_dp->DP = DP;
3837 }
3838
3839 void
3840 intel_dp_complete_link_train(struct intel_dp *intel_dp)
3841 {
3842         bool channel_eq = false;
3843         int tries, cr_tries;
3844         uint32_t DP = intel_dp->DP;
3845         uint32_t training_pattern = DP_TRAINING_PATTERN_2;
3846
3847         /* Training Pattern 3 for HBR2 or 1.2 devices that support it*/
3848         if (intel_dp->link_rate == 540000 || intel_dp->use_tps3)
3849                 training_pattern = DP_TRAINING_PATTERN_3;
3850
3851         /* channel equalization */
3852         if (!intel_dp_set_link_train(intel_dp, &DP,
3853                                      training_pattern |
3854                                      DP_LINK_SCRAMBLING_DISABLE)) {
3855                 DRM_ERROR("failed to start channel equalization\n");
3856                 return;
3857         }
3858
3859         tries = 0;
3860         cr_tries = 0;
3861         channel_eq = false;
3862         for (;;) {
3863                 uint8_t link_status[DP_LINK_STATUS_SIZE];
3864
3865                 if (cr_tries > 5) {
3866                         DRM_ERROR("failed to train DP, aborting\n");
3867                         break;
3868                 }
3869
3870                 drm_dp_link_train_channel_eq_delay(intel_dp->dpcd);
3871                 if (!intel_dp_get_link_status(intel_dp, link_status)) {
3872                         DRM_ERROR("failed to get link status\n");
3873                         break;
3874                 }
3875
3876                 /* Make sure clock is still ok */
3877                 if (!drm_dp_clock_recovery_ok(link_status,
3878                                               intel_dp->lane_count)) {
3879                         intel_dp->train_set_valid = false;
3880                         intel_dp_start_link_train(intel_dp);
3881                         intel_dp_set_link_train(intel_dp, &DP,
3882                                                 training_pattern |
3883                                                 DP_LINK_SCRAMBLING_DISABLE);
3884                         cr_tries++;
3885                         continue;
3886                 }
3887
3888                 if (drm_dp_channel_eq_ok(link_status,
3889                                          intel_dp->lane_count)) {
3890                         channel_eq = true;
3891                         break;
3892                 }
3893
3894                 /* Try 5 times, then try clock recovery if that fails */
3895                 if (tries > 5) {
3896                         intel_dp->train_set_valid = false;
3897                         intel_dp_start_link_train(intel_dp);
3898                         intel_dp_set_link_train(intel_dp, &DP,
3899                                                 training_pattern |
3900                                                 DP_LINK_SCRAMBLING_DISABLE);
3901                         tries = 0;
3902                         cr_tries++;
3903                         continue;
3904                 }
3905
3906                 /* Update training set as requested by target */
3907                 if (!intel_dp_update_link_train(intel_dp, &DP, link_status)) {
3908                         DRM_ERROR("failed to update link training\n");
3909                         break;
3910                 }
3911                 ++tries;
3912         }
3913
3914         intel_dp_set_idle_link_train(intel_dp);
3915
3916         intel_dp->DP = DP;
3917
3918         if (channel_eq) {
3919                 intel_dp->train_set_valid = true;
3920                 DRM_DEBUG_KMS("Channel EQ done. DP Training successful\n");
3921         }
3922 }
3923
3924 void intel_dp_stop_link_train(struct intel_dp *intel_dp)
3925 {
3926         intel_dp_set_link_train(intel_dp, &intel_dp->DP,
3927                                 DP_TRAINING_PATTERN_DISABLE);
3928 }
3929
3930 static void
3931 intel_dp_link_down(struct intel_dp *intel_dp)
3932 {
3933         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3934         struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
3935         enum port port = intel_dig_port->port;
3936         struct drm_device *dev = intel_dig_port->base.base.dev;
3937         struct drm_i915_private *dev_priv = dev->dev_private;
3938         uint32_t DP = intel_dp->DP;
3939
3940         if (WARN_ON(HAS_DDI(dev)))
3941                 return;
3942
3943         if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0))
3944                 return;
3945
3946         DRM_DEBUG_KMS("\n");
3947
3948         if ((IS_GEN7(dev) && port == PORT_A) ||
3949             (HAS_PCH_CPT(dev) && port != PORT_A)) {
3950                 DP &= ~DP_LINK_TRAIN_MASK_CPT;
3951                 DP |= DP_LINK_TRAIN_PAT_IDLE_CPT;
3952         } else {
3953                 if (IS_CHERRYVIEW(dev))
3954                         DP &= ~DP_LINK_TRAIN_MASK_CHV;
3955                 else
3956                         DP &= ~DP_LINK_TRAIN_MASK;
3957                 DP |= DP_LINK_TRAIN_PAT_IDLE;
3958         }
3959         I915_WRITE(intel_dp->output_reg, DP);
3960         POSTING_READ(intel_dp->output_reg);
3961
3962         DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
3963         I915_WRITE(intel_dp->output_reg, DP);
3964         POSTING_READ(intel_dp->output_reg);
3965
3966         /*
3967          * HW workaround for IBX, we need to move the port
3968          * to transcoder A after disabling it to allow the
3969          * matching HDMI port to be enabled on transcoder A.
3970          */
3971         if (HAS_PCH_IBX(dev) && crtc->pipe == PIPE_B && port != PORT_A) {
3972                 /* always enable with pattern 1 (as per spec) */
3973                 DP &= ~(DP_PIPEB_SELECT | DP_LINK_TRAIN_MASK);
3974                 DP |= DP_PORT_EN | DP_LINK_TRAIN_PAT_1;
3975                 I915_WRITE(intel_dp->output_reg, DP);
3976                 POSTING_READ(intel_dp->output_reg);
3977
3978                 DP &= ~DP_PORT_EN;
3979                 I915_WRITE(intel_dp->output_reg, DP);
3980                 POSTING_READ(intel_dp->output_reg);
3981         }
3982
3983         msleep(intel_dp->panel_power_down_delay);
3984 }
3985
3986 static bool
3987 intel_dp_get_dpcd(struct intel_dp *intel_dp)
3988 {
3989         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3990         struct drm_device *dev = dig_port->base.base.dev;
3991         struct drm_i915_private *dev_priv = dev->dev_private;
3992         uint8_t rev;
3993
3994         if (intel_dp_dpcd_read_wake(&intel_dp->aux, 0x000, intel_dp->dpcd,
3995                                     sizeof(intel_dp->dpcd)) < 0)
3996                 return false; /* aux transfer failed */
3997
3998         DRM_DEBUG_KMS("DPCD: %*ph\n", (int) sizeof(intel_dp->dpcd), intel_dp->dpcd);
3999
4000         if (intel_dp->dpcd[DP_DPCD_REV] == 0)
4001                 return false; /* DPCD not present */
4002
4003         /* Check if the panel supports PSR */
4004         memset(intel_dp->psr_dpcd, 0, sizeof(intel_dp->psr_dpcd));
4005         if (is_edp(intel_dp)) {
4006                 intel_dp_dpcd_read_wake(&intel_dp->aux, DP_PSR_SUPPORT,
4007                                         intel_dp->psr_dpcd,
4008                                         sizeof(intel_dp->psr_dpcd));
4009                 if (intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED) {
4010                         dev_priv->psr.sink_support = true;
4011                         DRM_DEBUG_KMS("Detected EDP PSR Panel.\n");
4012                 }
4013
4014                 if (INTEL_INFO(dev)->gen >= 9 &&
4015                         (intel_dp->psr_dpcd[0] & DP_PSR2_IS_SUPPORTED)) {
4016                         uint8_t frame_sync_cap;
4017
4018                         dev_priv->psr.sink_support = true;
4019                         intel_dp_dpcd_read_wake(&intel_dp->aux,
4020                                         DP_SINK_DEVICE_AUX_FRAME_SYNC_CAP,
4021                                         &frame_sync_cap, 1);
4022                         dev_priv->psr.aux_frame_sync = frame_sync_cap ? true : false;
4023                         /* PSR2 needs frame sync as well */
4024                         dev_priv->psr.psr2_support = dev_priv->psr.aux_frame_sync;
4025                         DRM_DEBUG_KMS("PSR2 %s on sink",
4026                                 dev_priv->psr.psr2_support ? "supported" : "not supported");
4027                 }
4028         }
4029
4030         /* Training Pattern 3 support, both source and sink */
4031         if (drm_dp_tps3_supported(intel_dp->dpcd) &&
4032             (IS_HASWELL(dev_priv) || INTEL_INFO(dev_priv)->gen >= 8)) {
4033                 intel_dp->use_tps3 = true;
4034                 DRM_DEBUG_KMS("Displayport TPS3 supported\n");
4035         } else
4036                 intel_dp->use_tps3 = false;
4037
4038         /* Intermediate frequency support */
4039         if (is_edp(intel_dp) &&
4040             (intel_dp->dpcd[DP_EDP_CONFIGURATION_CAP] & DP_DPCD_DISPLAY_CONTROL_CAPABLE) &&
4041             (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_EDP_DPCD_REV, &rev, 1) == 1) &&
4042             (rev >= 0x03)) { /* eDp v1.4 or higher */
4043                 __le16 sink_rates[DP_MAX_SUPPORTED_RATES];
4044                 int i;
4045
4046                 intel_dp_dpcd_read_wake(&intel_dp->aux,
4047                                 DP_SUPPORTED_LINK_RATES,
4048                                 sink_rates,
4049                                 sizeof(sink_rates));
4050
4051                 for (i = 0; i < ARRAY_SIZE(sink_rates); i++) {
4052                         int val = le16_to_cpu(sink_rates[i]);
4053
4054                         if (val == 0)
4055                                 break;
4056
4057                         /* Value read is in kHz while drm clock is saved in deca-kHz */
4058                         intel_dp->sink_rates[i] = (val * 200) / 10;
4059                 }
4060                 intel_dp->num_sink_rates = i;
4061         }
4062
4063         intel_dp_print_rates(intel_dp);
4064
4065         if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
4066               DP_DWN_STRM_PORT_PRESENT))
4067                 return true; /* native DP sink */
4068
4069         if (intel_dp->dpcd[DP_DPCD_REV] == 0x10)
4070                 return true; /* no per-port downstream info */
4071
4072         if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_DOWNSTREAM_PORT_0,
4073                                     intel_dp->downstream_ports,
4074                                     DP_MAX_DOWNSTREAM_PORTS) < 0)
4075                 return false; /* downstream port status fetch failed */
4076
4077         return true;
4078 }
4079
4080 static void
4081 intel_dp_probe_oui(struct intel_dp *intel_dp)
4082 {
4083         u8 buf[3];
4084
4085         if (!(intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT))
4086                 return;
4087
4088         if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_OUI, buf, 3) == 3)
4089                 DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n",
4090                               buf[0], buf[1], buf[2]);
4091
4092         if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_BRANCH_OUI, buf, 3) == 3)
4093                 DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n",
4094                               buf[0], buf[1], buf[2]);
4095 }
4096
4097 static bool
4098 intel_dp_probe_mst(struct intel_dp *intel_dp)
4099 {
4100         u8 buf[1];
4101
4102         if (!intel_dp->can_mst)
4103                 return false;
4104
4105         if (intel_dp->dpcd[DP_DPCD_REV] < 0x12)
4106                 return false;
4107
4108         if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_MSTM_CAP, buf, 1)) {
4109                 if (buf[0] & DP_MST_CAP) {
4110                         DRM_DEBUG_KMS("Sink is MST capable\n");
4111                         intel_dp->is_mst = true;
4112                 } else {
4113                         DRM_DEBUG_KMS("Sink is not MST capable\n");
4114                         intel_dp->is_mst = false;
4115                 }
4116         }
4117
4118         drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
4119         return intel_dp->is_mst;
4120 }
4121
4122 static int intel_dp_sink_crc_stop(struct intel_dp *intel_dp)
4123 {
4124         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
4125         struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
4126         u8 buf;
4127         int ret = 0;
4128
4129         if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0) {
4130                 DRM_DEBUG_KMS("Sink CRC couldn't be stopped properly\n");
4131                 ret = -EIO;
4132                 goto out;
4133         }
4134
4135         if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
4136                                buf & ~DP_TEST_SINK_START) < 0) {
4137                 DRM_DEBUG_KMS("Sink CRC couldn't be stopped properly\n");
4138                 ret = -EIO;
4139                 goto out;
4140         }
4141
4142         intel_dp->sink_crc.started = false;
4143  out:
4144         hsw_enable_ips(intel_crtc);
4145         return ret;
4146 }
4147
4148 static int intel_dp_sink_crc_start(struct intel_dp *intel_dp)
4149 {
4150         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
4151         struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
4152         u8 buf;
4153         int ret;
4154
4155         if (intel_dp->sink_crc.started) {
4156                 ret = intel_dp_sink_crc_stop(intel_dp);
4157                 if (ret)
4158                         return ret;
4159         }
4160
4161         if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0)
4162                 return -EIO;
4163
4164         if (!(buf & DP_TEST_CRC_SUPPORTED))
4165                 return -ENOTTY;
4166
4167         intel_dp->sink_crc.last_count = buf & DP_TEST_COUNT_MASK;
4168
4169         if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0)
4170                 return -EIO;
4171
4172         hsw_disable_ips(intel_crtc);
4173
4174         if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
4175                                buf | DP_TEST_SINK_START) < 0) {
4176                 hsw_enable_ips(intel_crtc);
4177                 return -EIO;
4178         }
4179
4180         intel_dp->sink_crc.started = true;
4181         return 0;
4182 }
4183
4184 int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc)
4185 {
4186         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
4187         struct drm_device *dev = dig_port->base.base.dev;
4188         struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
4189         u8 buf;
4190         int count, ret;
4191         int attempts = 6;
4192         bool old_equal_new;
4193
4194         ret = intel_dp_sink_crc_start(intel_dp);
4195         if (ret)
4196                 return ret;
4197
4198         do {
4199                 intel_wait_for_vblank(dev, intel_crtc->pipe);
4200
4201                 if (drm_dp_dpcd_readb(&intel_dp->aux,
4202                                       DP_TEST_SINK_MISC, &buf) < 0) {
4203                         ret = -EIO;
4204                         goto stop;
4205                 }
4206                 count = buf & DP_TEST_COUNT_MASK;
4207
4208                 /*
4209                  * Count might be reset during the loop. In this case
4210                  * last known count needs to be reset as well.
4211                  */
4212                 if (count == 0)
4213                         intel_dp->sink_crc.last_count = 0;
4214
4215                 if (drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_CRC_R_CR, crc, 6) < 0) {
4216                         ret = -EIO;
4217                         goto stop;
4218                 }
4219
4220                 old_equal_new = (count == intel_dp->sink_crc.last_count &&
4221                                  !memcmp(intel_dp->sink_crc.last_crc, crc,
4222                                          6 * sizeof(u8)));
4223
4224         } while (--attempts && (count == 0 || old_equal_new));
4225
4226         intel_dp->sink_crc.last_count = buf & DP_TEST_COUNT_MASK;
4227         memcpy(intel_dp->sink_crc.last_crc, crc, 6 * sizeof(u8));
4228
4229         if (attempts == 0) {
4230                 if (old_equal_new) {
4231                         DRM_DEBUG_KMS("Unreliable Sink CRC counter: Current returned CRC is identical to the previous one\n");
4232                 } else {
4233                         DRM_ERROR("Panel is unable to calculate any CRC after 6 vblanks\n");
4234                         ret = -ETIMEDOUT;
4235                         goto stop;
4236                 }
4237         }
4238
4239 stop:
4240         intel_dp_sink_crc_stop(intel_dp);
4241         return ret;
4242 }
4243
4244 static bool
4245 intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector)
4246 {
4247         return intel_dp_dpcd_read_wake(&intel_dp->aux,
4248                                        DP_DEVICE_SERVICE_IRQ_VECTOR,
4249                                        sink_irq_vector, 1) == 1;
4250 }
4251
4252 static bool
4253 intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector)
4254 {
4255         int ret;
4256
4257         ret = intel_dp_dpcd_read_wake(&intel_dp->aux,
4258                                              DP_SINK_COUNT_ESI,
4259                                              sink_irq_vector, 14);
4260         if (ret != 14)
4261                 return false;
4262
4263         return true;
4264 }
4265
4266 static uint8_t intel_dp_autotest_link_training(struct intel_dp *intel_dp)
4267 {
4268         uint8_t test_result = DP_TEST_ACK;
4269         return test_result;
4270 }
4271
4272 static uint8_t intel_dp_autotest_video_pattern(struct intel_dp *intel_dp)
4273 {
4274         uint8_t test_result = DP_TEST_NAK;
4275         return test_result;
4276 }
4277
4278 static uint8_t intel_dp_autotest_edid(struct intel_dp *intel_dp)
4279 {
4280         uint8_t test_result = DP_TEST_NAK;
4281         struct intel_connector *intel_connector = intel_dp->attached_connector;
4282         struct drm_connector *connector = &intel_connector->base;
4283
4284         if (intel_connector->detect_edid == NULL ||
4285             connector->edid_corrupt ||
4286             intel_dp->aux.i2c_defer_count > 6) {
4287                 /* Check EDID read for NACKs, DEFERs and corruption
4288                  * (DP CTS 1.2 Core r1.1)
4289                  *    4.2.2.4 : Failed EDID read, I2C_NAK
4290                  *    4.2.2.5 : Failed EDID read, I2C_DEFER
4291                  *    4.2.2.6 : EDID corruption detected
4292                  * Use failsafe mode for all cases
4293                  */
4294                 if (intel_dp->aux.i2c_nack_count > 0 ||
4295                         intel_dp->aux.i2c_defer_count > 0)
4296                         DRM_DEBUG_KMS("EDID read had %d NACKs, %d DEFERs\n",
4297                                       intel_dp->aux.i2c_nack_count,
4298                                       intel_dp->aux.i2c_defer_count);
4299                 intel_dp->compliance_test_data = INTEL_DP_RESOLUTION_FAILSAFE;
4300         } else {
4301                 struct edid *block = intel_connector->detect_edid;
4302
4303                 /* We have to write the checksum
4304                  * of the last block read
4305                  */
4306                 block += intel_connector->detect_edid->extensions;
4307
4308                 if (!drm_dp_dpcd_write(&intel_dp->aux,
4309                                         DP_TEST_EDID_CHECKSUM,
4310                                         &block->checksum,
4311                                         1))
4312                         DRM_DEBUG_KMS("Failed to write EDID checksum\n");
4313
4314                 test_result = DP_TEST_ACK | DP_TEST_EDID_CHECKSUM_WRITE;
4315                 intel_dp->compliance_test_data = INTEL_DP_RESOLUTION_STANDARD;
4316         }
4317
4318         /* Set test active flag here so userspace doesn't interrupt things */
4319         intel_dp->compliance_test_active = 1;
4320
4321         return test_result;
4322 }
4323
4324 static uint8_t intel_dp_autotest_phy_pattern(struct intel_dp *intel_dp)
4325 {
4326         uint8_t test_result = DP_TEST_NAK;
4327         return test_result;
4328 }
4329
4330 static void intel_dp_handle_test_request(struct intel_dp *intel_dp)
4331 {
4332         uint8_t response = DP_TEST_NAK;
4333         uint8_t rxdata = 0;
4334         int status = 0;
4335
4336         intel_dp->compliance_test_active = 0;
4337         intel_dp->compliance_test_type = 0;
4338         intel_dp->compliance_test_data = 0;
4339
4340         intel_dp->aux.i2c_nack_count = 0;
4341         intel_dp->aux.i2c_defer_count = 0;
4342
4343         status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_REQUEST, &rxdata, 1);
4344         if (status <= 0) {
4345                 DRM_DEBUG_KMS("Could not read test request from sink\n");
4346                 goto update_status;
4347         }
4348
4349         switch (rxdata) {
4350         case DP_TEST_LINK_TRAINING:
4351                 DRM_DEBUG_KMS("LINK_TRAINING test requested\n");
4352                 intel_dp->compliance_test_type = DP_TEST_LINK_TRAINING;
4353                 response = intel_dp_autotest_link_training(intel_dp);
4354                 break;
4355         case DP_TEST_LINK_VIDEO_PATTERN:
4356                 DRM_DEBUG_KMS("TEST_PATTERN test requested\n");
4357                 intel_dp->compliance_test_type = DP_TEST_LINK_VIDEO_PATTERN;
4358                 response = intel_dp_autotest_video_pattern(intel_dp);
4359                 break;
4360         case DP_TEST_LINK_EDID_READ:
4361                 DRM_DEBUG_KMS("EDID test requested\n");
4362                 intel_dp->compliance_test_type = DP_TEST_LINK_EDID_READ;
4363                 response = intel_dp_autotest_edid(intel_dp);
4364                 break;
4365         case DP_TEST_LINK_PHY_TEST_PATTERN:
4366                 DRM_DEBUG_KMS("PHY_PATTERN test requested\n");
4367                 intel_dp->compliance_test_type = DP_TEST_LINK_PHY_TEST_PATTERN;
4368                 response = intel_dp_autotest_phy_pattern(intel_dp);
4369                 break;
4370         default:
4371                 DRM_DEBUG_KMS("Invalid test request '%02x'\n", rxdata);
4372                 break;
4373         }
4374
4375 update_status:
4376         status = drm_dp_dpcd_write(&intel_dp->aux,
4377                                    DP_TEST_RESPONSE,
4378                                    &response, 1);
4379         if (status <= 0)
4380                 DRM_DEBUG_KMS("Could not write test response to sink\n");
4381 }
4382
4383 static int
4384 intel_dp_check_mst_status(struct intel_dp *intel_dp)
4385 {
4386         bool bret;
4387
4388         if (intel_dp->is_mst) {
4389                 u8 esi[16] = { 0 };
4390                 int ret = 0;
4391                 int retry;
4392                 bool handled;
4393                 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
4394 go_again:
4395                 if (bret == true) {
4396
4397                         /* check link status - esi[10] = 0x200c */
4398                         if (intel_dp->active_mst_links &&
4399                             !drm_dp_channel_eq_ok(&esi[10], intel_dp->lane_count)) {
4400                                 DRM_DEBUG_KMS("channel EQ not ok, retraining\n");
4401                                 intel_dp_start_link_train(intel_dp);
4402                                 intel_dp_complete_link_train(intel_dp);
4403                                 intel_dp_stop_link_train(intel_dp);
4404                         }
4405
4406                         DRM_DEBUG_KMS("got esi %3ph\n", esi);
4407                         ret = drm_dp_mst_hpd_irq(&intel_dp->mst_mgr, esi, &handled);
4408
4409                         if (handled) {
4410                                 for (retry = 0; retry < 3; retry++) {
4411                                         int wret;
4412                                         wret = drm_dp_dpcd_write(&intel_dp->aux,
4413                                                                  DP_SINK_COUNT_ESI+1,
4414                                                                  &esi[1], 3);
4415                                         if (wret == 3) {
4416                                                 break;
4417                                         }
4418                                 }
4419
4420                                 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
4421                                 if (bret == true) {
4422                                         DRM_DEBUG_KMS("got esi2 %3ph\n", esi);
4423                                         goto go_again;
4424                                 }
4425                         } else
4426                                 ret = 0;
4427
4428                         return ret;
4429                 } else {
4430                         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4431                         DRM_DEBUG_KMS("failed to get ESI - device may have failed\n");
4432                         intel_dp->is_mst = false;
4433                         drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
4434                         /* send a hotplug event */
4435                         drm_kms_helper_hotplug_event(intel_dig_port->base.base.dev);
4436                 }
4437         }
4438         return -EINVAL;
4439 }
4440
4441 /*
4442  * According to DP spec
4443  * 5.1.2:
4444  *  1. Read DPCD
4445  *  2. Configure link according to Receiver Capabilities
4446  *  3. Use Link Training from 2.5.3.3 and 3.5.1.3
4447  *  4. Check link status on receipt of hot-plug interrupt
4448  */
4449 static void
4450 intel_dp_check_link_status(struct intel_dp *intel_dp)
4451 {
4452         struct drm_device *dev = intel_dp_to_dev(intel_dp);
4453         struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
4454         u8 sink_irq_vector;
4455         u8 link_status[DP_LINK_STATUS_SIZE];
4456
4457         WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
4458
4459         if (!intel_encoder->base.crtc)
4460                 return;
4461
4462         if (!to_intel_crtc(intel_encoder->base.crtc)->active)
4463                 return;
4464
4465         /* Try to read receiver status if the link appears to be up */
4466         if (!intel_dp_get_link_status(intel_dp, link_status)) {
4467                 return;
4468         }
4469
4470         /* Now read the DPCD to see if it's actually running */
4471         if (!intel_dp_get_dpcd(intel_dp)) {
4472                 return;
4473         }
4474
4475         /* Try to read the source of the interrupt */
4476         if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4477             intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
4478                 /* Clear interrupt source */
4479                 drm_dp_dpcd_writeb(&intel_dp->aux,
4480                                    DP_DEVICE_SERVICE_IRQ_VECTOR,
4481                                    sink_irq_vector);
4482
4483                 if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
4484                         DRM_DEBUG_DRIVER("Test request in short pulse not handled\n");
4485                 if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
4486                         DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
4487         }
4488
4489         if (!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
4490                 DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n",
4491                               intel_encoder->base.name);
4492                 intel_dp_start_link_train(intel_dp);
4493                 intel_dp_complete_link_train(intel_dp);
4494                 intel_dp_stop_link_train(intel_dp);
4495         }
4496 }
4497
4498 /* XXX this is probably wrong for multiple downstream ports */
4499 static enum drm_connector_status
4500 intel_dp_detect_dpcd(struct intel_dp *intel_dp)
4501 {
4502         uint8_t *dpcd = intel_dp->dpcd;
4503         uint8_t type;
4504
4505         if (!intel_dp_get_dpcd(intel_dp))
4506                 return connector_status_disconnected;
4507
4508         /* if there's no downstream port, we're done */
4509         if (!(dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT))
4510                 return connector_status_connected;
4511
4512         /* If we're HPD-aware, SINK_COUNT changes dynamically */
4513         if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4514             intel_dp->downstream_ports[0] & DP_DS_PORT_HPD) {
4515                 uint8_t reg;
4516
4517                 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_COUNT,
4518                                             &reg, 1) < 0)
4519                         return connector_status_unknown;
4520
4521                 return DP_GET_SINK_COUNT(reg) ? connector_status_connected
4522                                               : connector_status_disconnected;
4523         }
4524
4525         /* If no HPD, poke DDC gently */
4526         if (drm_probe_ddc(&intel_dp->aux.ddc))
4527                 return connector_status_connected;
4528
4529         /* Well we tried, say unknown for unreliable port types */
4530         if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) {
4531                 type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
4532                 if (type == DP_DS_PORT_TYPE_VGA ||
4533                     type == DP_DS_PORT_TYPE_NON_EDID)
4534                         return connector_status_unknown;
4535         } else {
4536                 type = intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
4537                         DP_DWN_STRM_PORT_TYPE_MASK;
4538                 if (type == DP_DWN_STRM_PORT_TYPE_ANALOG ||
4539                     type == DP_DWN_STRM_PORT_TYPE_OTHER)
4540                         return connector_status_unknown;
4541         }
4542
4543         /* Anything else is out of spec, warn and ignore */
4544         DRM_DEBUG_KMS("Broken DP branch device, ignoring\n");
4545         return connector_status_disconnected;
4546 }
4547
4548 static enum drm_connector_status
4549 edp_detect(struct intel_dp *intel_dp)
4550 {
4551         struct drm_device *dev = intel_dp_to_dev(intel_dp);
4552         enum drm_connector_status status;
4553
4554         status = intel_panel_detect(dev);
4555         if (status == connector_status_unknown)
4556                 status = connector_status_connected;
4557
4558         return status;
4559 }
4560
4561 static bool ibx_digital_port_connected(struct drm_i915_private *dev_priv,
4562                                        struct intel_digital_port *port)
4563 {
4564         u32 bit;
4565
4566         switch (port->port) {
4567         case PORT_A:
4568                 return true;
4569         case PORT_B:
4570                 bit = SDE_PORTB_HOTPLUG;
4571                 break;
4572         case PORT_C:
4573                 bit = SDE_PORTC_HOTPLUG;
4574                 break;
4575         case PORT_D:
4576                 bit = SDE_PORTD_HOTPLUG;
4577                 break;
4578         default:
4579                 MISSING_CASE(port->port);
4580                 return false;
4581         }
4582
4583         return I915_READ(SDEISR) & bit;
4584 }
4585
4586 static bool cpt_digital_port_connected(struct drm_i915_private *dev_priv,
4587                                        struct intel_digital_port *port)
4588 {
4589         u32 bit;
4590
4591         switch (port->port) {
4592         case PORT_A:
4593                 return true;
4594         case PORT_B:
4595                 bit = SDE_PORTB_HOTPLUG_CPT;
4596                 break;
4597         case PORT_C:
4598                 bit = SDE_PORTC_HOTPLUG_CPT;
4599                 break;
4600         case PORT_D:
4601                 bit = SDE_PORTD_HOTPLUG_CPT;
4602                 break;
4603         default:
4604                 MISSING_CASE(port->port);
4605                 return false;
4606         }
4607
4608         return I915_READ(SDEISR) & bit;
4609 }
4610
4611 static bool g4x_digital_port_connected(struct drm_i915_private *dev_priv,
4612                                        struct intel_digital_port *port)
4613 {
4614         u32 bit;
4615
4616         switch (port->port) {
4617         case PORT_B:
4618                 bit = PORTB_HOTPLUG_LIVE_STATUS_G4X;
4619                 break;
4620         case PORT_C:
4621                 bit = PORTC_HOTPLUG_LIVE_STATUS_G4X;
4622                 break;
4623         case PORT_D:
4624                 bit = PORTD_HOTPLUG_LIVE_STATUS_G4X;
4625                 break;
4626         default:
4627                 MISSING_CASE(port->port);
4628                 return false;
4629         }
4630
4631         return I915_READ(PORT_HOTPLUG_STAT) & bit;
4632 }
4633
4634 static bool vlv_digital_port_connected(struct drm_i915_private *dev_priv,
4635                                        struct intel_digital_port *port)
4636 {
4637         u32 bit;
4638
4639         switch (port->port) {
4640         case PORT_B:
4641                 bit = PORTB_HOTPLUG_LIVE_STATUS_VLV;
4642                 break;
4643         case PORT_C:
4644                 bit = PORTC_HOTPLUG_LIVE_STATUS_VLV;
4645                 break;
4646         case PORT_D:
4647                 bit = PORTD_HOTPLUG_LIVE_STATUS_VLV;
4648                 break;
4649         default:
4650                 MISSING_CASE(port->port);
4651                 return false;
4652         }
4653
4654         return I915_READ(PORT_HOTPLUG_STAT) & bit;
4655 }
4656
4657 static bool bxt_digital_port_connected(struct drm_i915_private *dev_priv,
4658                                        struct intel_digital_port *port)
4659 {
4660         u32 bit;
4661
4662         switch (port->port) {
4663         case PORT_A:
4664                 bit = BXT_DE_PORT_HP_DDIA;
4665                 break;
4666         case PORT_B:
4667                 bit = BXT_DE_PORT_HP_DDIB;
4668                 break;
4669         case PORT_C:
4670                 bit = BXT_DE_PORT_HP_DDIC;
4671                 break;
4672         default:
4673                 MISSING_CASE(port->port);
4674                 return false;
4675         }
4676
4677         return I915_READ(GEN8_DE_PORT_ISR) & bit;
4678 }
4679
4680 /*
4681  * intel_digital_port_connected - is the specified port connected?
4682  * @dev_priv: i915 private structure
4683  * @port: the port to test
4684  *
4685  * Return %true if @port is connected, %false otherwise.
4686  */
4687 static bool intel_digital_port_connected(struct drm_i915_private *dev_priv,
4688                                          struct intel_digital_port *port)
4689 {
4690         if (HAS_PCH_IBX(dev_priv))
4691                 return ibx_digital_port_connected(dev_priv, port);
4692         if (HAS_PCH_SPLIT(dev_priv))
4693                 return cpt_digital_port_connected(dev_priv, port);
4694         else if (IS_BROXTON(dev_priv))
4695                 return bxt_digital_port_connected(dev_priv, port);
4696         else if (IS_VALLEYVIEW(dev_priv))
4697                 return vlv_digital_port_connected(dev_priv, port);
4698         else
4699                 return g4x_digital_port_connected(dev_priv, port);
4700 }
4701
4702 static enum drm_connector_status
4703 ironlake_dp_detect(struct intel_dp *intel_dp)
4704 {
4705         struct drm_device *dev = intel_dp_to_dev(intel_dp);
4706         struct drm_i915_private *dev_priv = dev->dev_private;
4707         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4708
4709         if (!intel_digital_port_connected(dev_priv, intel_dig_port))
4710                 return connector_status_disconnected;
4711
4712         return intel_dp_detect_dpcd(intel_dp);
4713 }
4714
4715 static enum drm_connector_status
4716 g4x_dp_detect(struct intel_dp *intel_dp)
4717 {
4718         struct drm_device *dev = intel_dp_to_dev(intel_dp);
4719         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4720
4721         /* Can't disconnect eDP, but you can close the lid... */
4722         if (is_edp(intel_dp)) {
4723                 enum drm_connector_status status;
4724
4725                 status = intel_panel_detect(dev);
4726                 if (status == connector_status_unknown)
4727                         status = connector_status_connected;
4728                 return status;
4729         }
4730
4731         if (!intel_digital_port_connected(dev->dev_private, intel_dig_port))
4732                 return connector_status_disconnected;
4733
4734         return intel_dp_detect_dpcd(intel_dp);
4735 }
4736
4737 static struct edid *
4738 intel_dp_get_edid(struct intel_dp *intel_dp)
4739 {
4740         struct intel_connector *intel_connector = intel_dp->attached_connector;
4741
4742         /* use cached edid if we have one */
4743         if (intel_connector->edid) {
4744                 /* invalid edid */
4745                 if (IS_ERR(intel_connector->edid))
4746                         return NULL;
4747
4748                 return drm_edid_duplicate(intel_connector->edid);
4749         } else
4750                 return drm_get_edid(&intel_connector->base,
4751                                     &intel_dp->aux.ddc);
4752 }
4753
4754 static void
4755 intel_dp_set_edid(struct intel_dp *intel_dp)
4756 {
4757         struct intel_connector *intel_connector = intel_dp->attached_connector;
4758         struct edid *edid;
4759
4760         edid = intel_dp_get_edid(intel_dp);
4761         intel_connector->detect_edid = edid;
4762
4763         if (intel_dp->force_audio != HDMI_AUDIO_AUTO)
4764                 intel_dp->has_audio = intel_dp->force_audio == HDMI_AUDIO_ON;
4765         else
4766                 intel_dp->has_audio = drm_detect_monitor_audio(edid);
4767 }
4768
4769 static void
4770 intel_dp_unset_edid(struct intel_dp *intel_dp)
4771 {
4772         struct intel_connector *intel_connector = intel_dp->attached_connector;
4773
4774         kfree(intel_connector->detect_edid);
4775         intel_connector->detect_edid = NULL;
4776
4777         intel_dp->has_audio = false;
4778 }
4779
4780 static enum intel_display_power_domain
4781 intel_dp_power_get(struct intel_dp *dp)
4782 {
4783         struct intel_encoder *encoder = &dp_to_dig_port(dp)->base;
4784         enum intel_display_power_domain power_domain;
4785
4786         power_domain = intel_display_port_power_domain(encoder);
4787         intel_display_power_get(to_i915(encoder->base.dev), power_domain);
4788
4789         return power_domain;
4790 }
4791
4792 static void
4793 intel_dp_power_put(struct intel_dp *dp,
4794                    enum intel_display_power_domain power_domain)
4795 {
4796         struct intel_encoder *encoder = &dp_to_dig_port(dp)->base;
4797         intel_display_power_put(to_i915(encoder->base.dev), power_domain);
4798 }
4799
4800 static enum drm_connector_status
4801 intel_dp_detect(struct drm_connector *connector, bool force)
4802 {
4803         struct intel_dp *intel_dp = intel_attached_dp(connector);
4804         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4805         struct intel_encoder *intel_encoder = &intel_dig_port->base;
4806         struct drm_device *dev = connector->dev;
4807         enum drm_connector_status status;
4808         enum intel_display_power_domain power_domain;
4809         bool ret;
4810         u8 sink_irq_vector;
4811
4812         DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
4813                       connector->base.id, connector->name);
4814         intel_dp_unset_edid(intel_dp);
4815
4816         if (intel_dp->is_mst) {
4817                 /* MST devices are disconnected from a monitor POV */
4818                 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4819                         intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4820                 return connector_status_disconnected;
4821         }
4822
4823         power_domain = intel_dp_power_get(intel_dp);
4824
4825         /* Can't disconnect eDP, but you can close the lid... */
4826         if (is_edp(intel_dp))
4827                 status = edp_detect(intel_dp);
4828         else if (HAS_PCH_SPLIT(dev))
4829                 status = ironlake_dp_detect(intel_dp);
4830         else
4831                 status = g4x_dp_detect(intel_dp);
4832         if (status != connector_status_connected)
4833                 goto out;
4834
4835         intel_dp_probe_oui(intel_dp);
4836
4837         ret = intel_dp_probe_mst(intel_dp);
4838         if (ret) {
4839                 /* if we are in MST mode then this connector
4840                    won't appear connected or have anything with EDID on it */
4841                 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4842                         intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4843                 status = connector_status_disconnected;
4844                 goto out;
4845         }
4846
4847         intel_dp_set_edid(intel_dp);
4848
4849         if (intel_encoder->type != INTEL_OUTPUT_EDP)
4850                 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4851         status = connector_status_connected;
4852
4853         /* Try to read the source of the interrupt */
4854         if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4855             intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
4856                 /* Clear interrupt source */
4857                 drm_dp_dpcd_writeb(&intel_dp->aux,
4858                                    DP_DEVICE_SERVICE_IRQ_VECTOR,
4859                                    sink_irq_vector);
4860
4861                 if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
4862                         intel_dp_handle_test_request(intel_dp);
4863                 if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
4864                         DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
4865         }
4866
4867 out:
4868         intel_dp_power_put(intel_dp, power_domain);
4869         return status;
4870 }
4871
4872 static void
4873 intel_dp_force(struct drm_connector *connector)
4874 {
4875         struct intel_dp *intel_dp = intel_attached_dp(connector);
4876         struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
4877         enum intel_display_power_domain power_domain;
4878
4879         DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
4880                       connector->base.id, connector->name);
4881         intel_dp_unset_edid(intel_dp);
4882
4883         if (connector->status != connector_status_connected)
4884                 return;
4885
4886         power_domain = intel_dp_power_get(intel_dp);
4887
4888         intel_dp_set_edid(intel_dp);
4889
4890         intel_dp_power_put(intel_dp, power_domain);
4891
4892         if (intel_encoder->type != INTEL_OUTPUT_EDP)
4893                 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4894 }
4895
4896 static int intel_dp_get_modes(struct drm_connector *connector)
4897 {
4898         struct intel_connector *intel_connector = to_intel_connector(connector);
4899         struct edid *edid;
4900
4901         edid = intel_connector->detect_edid;
4902         if (edid) {
4903                 int ret = intel_connector_update_modes(connector, edid);
4904                 if (ret)
4905                         return ret;
4906         }
4907
4908         /* if eDP has no EDID, fall back to fixed mode */
4909         if (is_edp(intel_attached_dp(connector)) &&
4910             intel_connector->panel.fixed_mode) {
4911                 struct drm_display_mode *mode;
4912
4913                 mode = drm_mode_duplicate(connector->dev,
4914                                           intel_connector->panel.fixed_mode);
4915                 if (mode) {
4916                         drm_mode_probed_add(connector, mode);
4917                         return 1;
4918                 }
4919         }
4920
4921         return 0;
4922 }
4923
4924 static bool
4925 intel_dp_detect_audio(struct drm_connector *connector)
4926 {
4927         bool has_audio = false;
4928         struct edid *edid;
4929
4930         edid = to_intel_connector(connector)->detect_edid;
4931         if (edid)
4932                 has_audio = drm_detect_monitor_audio(edid);
4933
4934         return has_audio;
4935 }
4936
4937 static int
4938 intel_dp_set_property(struct drm_connector *connector,
4939                       struct drm_property *property,
4940                       uint64_t val)
4941 {
4942         struct drm_i915_private *dev_priv = connector->dev->dev_private;
4943         struct intel_connector *intel_connector = to_intel_connector(connector);
4944         struct intel_encoder *intel_encoder = intel_attached_encoder(connector);
4945         struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
4946         int ret;
4947
4948         ret = drm_object_property_set_value(&connector->base, property, val);
4949         if (ret)
4950                 return ret;
4951
4952         if (property == dev_priv->force_audio_property) {
4953                 int i = val;
4954                 bool has_audio;
4955
4956                 if (i == intel_dp->force_audio)
4957                         return 0;
4958
4959                 intel_dp->force_audio = i;
4960
4961                 if (i == HDMI_AUDIO_AUTO)
4962                         has_audio = intel_dp_detect_audio(connector);
4963                 else
4964                         has_audio = (i == HDMI_AUDIO_ON);
4965
4966                 if (has_audio == intel_dp->has_audio)
4967                         return 0;
4968
4969                 intel_dp->has_audio = has_audio;
4970                 goto done;
4971         }
4972
4973         if (property == dev_priv->broadcast_rgb_property) {
4974                 bool old_auto = intel_dp->color_range_auto;
4975                 bool old_range = intel_dp->limited_color_range;
4976
4977                 switch (val) {
4978                 case INTEL_BROADCAST_RGB_AUTO:
4979                         intel_dp->color_range_auto = true;
4980                         break;
4981                 case INTEL_BROADCAST_RGB_FULL:
4982                         intel_dp->color_range_auto = false;
4983                         intel_dp->limited_color_range = false;
4984                         break;
4985                 case INTEL_BROADCAST_RGB_LIMITED:
4986                         intel_dp->color_range_auto = false;
4987                         intel_dp->limited_color_range = true;
4988                         break;
4989                 default:
4990                         return -EINVAL;
4991                 }
4992
4993                 if (old_auto == intel_dp->color_range_auto &&
4994                     old_range == intel_dp->limited_color_range)
4995                         return 0;
4996
4997                 goto done;
4998         }
4999
5000         if (is_edp(intel_dp) &&
5001             property == connector->dev->mode_config.scaling_mode_property) {
5002                 if (val == DRM_MODE_SCALE_NONE) {
5003                         DRM_DEBUG_KMS("no scaling not supported\n");
5004                         return -EINVAL;
5005                 }
5006
5007                 if (intel_connector->panel.fitting_mode == val) {
5008                         /* the eDP scaling property is not changed */
5009                         return 0;
5010                 }
5011                 intel_connector->panel.fitting_mode = val;
5012
5013                 goto done;
5014         }
5015
5016         return -EINVAL;
5017
5018 done:
5019         if (intel_encoder->base.crtc)
5020                 intel_crtc_restore_mode(intel_encoder->base.crtc);
5021
5022         return 0;
5023 }
5024
5025 static void
5026 intel_dp_connector_destroy(struct drm_connector *connector)
5027 {
5028         struct intel_connector *intel_connector = to_intel_connector(connector);
5029
5030         kfree(intel_connector->detect_edid);
5031
5032         if (!IS_ERR_OR_NULL(intel_connector->edid))
5033                 kfree(intel_connector->edid);
5034
5035         /* Can't call is_edp() since the encoder may have been destroyed
5036          * already. */
5037         if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
5038                 intel_panel_fini(&intel_connector->panel);
5039
5040         drm_connector_cleanup(connector);
5041         kfree(connector);
5042 }
5043
5044 void intel_dp_encoder_destroy(struct drm_encoder *encoder)
5045 {
5046         struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
5047         struct intel_dp *intel_dp = &intel_dig_port->dp;
5048
5049         drm_dp_aux_unregister(&intel_dp->aux);
5050         intel_dp_mst_encoder_cleanup(intel_dig_port);
5051         if (is_edp(intel_dp)) {
5052                 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
5053                 /*
5054                  * vdd might still be enabled do to the delayed vdd off.
5055                  * Make sure vdd is actually turned off here.
5056                  */
5057                 pps_lock(intel_dp);
5058                 edp_panel_vdd_off_sync(intel_dp);
5059                 pps_unlock(intel_dp);
5060
5061                 if (intel_dp->edp_notifier.notifier_call) {
5062                         unregister_reboot_notifier(&intel_dp->edp_notifier);
5063                         intel_dp->edp_notifier.notifier_call = NULL;
5064                 }
5065         }
5066         drm_encoder_cleanup(encoder);
5067         kfree(intel_dig_port);
5068 }
5069
5070 static void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
5071 {
5072         struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
5073
5074         if (!is_edp(intel_dp))
5075                 return;
5076
5077         /*
5078          * vdd might still be enabled do to the delayed vdd off.
5079          * Make sure vdd is actually turned off here.
5080          */
5081         cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
5082         pps_lock(intel_dp);
5083         edp_panel_vdd_off_sync(intel_dp);
5084         pps_unlock(intel_dp);
5085 }
5086
5087 static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
5088 {
5089         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
5090         struct drm_device *dev = intel_dig_port->base.base.dev;
5091         struct drm_i915_private *dev_priv = dev->dev_private;
5092         enum intel_display_power_domain power_domain;
5093
5094         lockdep_assert_held(&dev_priv->pps_mutex);
5095
5096         if (!edp_have_panel_vdd(intel_dp))
5097                 return;
5098
5099         /*
5100          * The VDD bit needs a power domain reference, so if the bit is
5101          * already enabled when we boot or resume, grab this reference and
5102          * schedule a vdd off, so we don't hold on to the reference
5103          * indefinitely.
5104          */
5105         DRM_DEBUG_KMS("VDD left on by BIOS, adjusting state tracking\n");
5106         power_domain = intel_display_port_power_domain(&intel_dig_port->base);
5107         intel_display_power_get(dev_priv, power_domain);
5108
5109         edp_panel_vdd_schedule_off(intel_dp);
5110 }
5111
5112 static void intel_dp_encoder_reset(struct drm_encoder *encoder)
5113 {
5114         struct intel_dp *intel_dp;
5115
5116         if (to_intel_encoder(encoder)->type != INTEL_OUTPUT_EDP)
5117                 return;
5118
5119         intel_dp = enc_to_intel_dp(encoder);
5120
5121         pps_lock(intel_dp);
5122
5123         /*
5124          * Read out the current power sequencer assignment,
5125          * in case the BIOS did something with it.
5126          */
5127         if (IS_VALLEYVIEW(encoder->dev))
5128                 vlv_initial_power_sequencer_setup(intel_dp);
5129
5130         intel_edp_panel_vdd_sanitize(intel_dp);
5131
5132         pps_unlock(intel_dp);
5133 }
5134
5135 static const struct drm_connector_funcs intel_dp_connector_funcs = {
5136         .dpms = drm_atomic_helper_connector_dpms,
5137         .detect = intel_dp_detect,
5138         .force = intel_dp_force,
5139         .fill_modes = drm_helper_probe_single_connector_modes,
5140         .set_property = intel_dp_set_property,
5141         .atomic_get_property = intel_connector_atomic_get_property,
5142         .destroy = intel_dp_connector_destroy,
5143         .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
5144         .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
5145 };
5146
5147 static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
5148         .get_modes = intel_dp_get_modes,
5149         .mode_valid = intel_dp_mode_valid,
5150         .best_encoder = intel_best_encoder,
5151 };
5152
5153 static const struct drm_encoder_funcs intel_dp_enc_funcs = {
5154         .reset = intel_dp_encoder_reset,
5155         .destroy = intel_dp_encoder_destroy,
5156 };
5157
5158 enum irqreturn
5159 intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
5160 {
5161         struct intel_dp *intel_dp = &intel_dig_port->dp;
5162         struct intel_encoder *intel_encoder = &intel_dig_port->base;
5163         struct drm_device *dev = intel_dig_port->base.base.dev;
5164         struct drm_i915_private *dev_priv = dev->dev_private;
5165         enum intel_display_power_domain power_domain;
5166         enum irqreturn ret = IRQ_NONE;
5167
5168         if (intel_dig_port->base.type != INTEL_OUTPUT_EDP)
5169                 intel_dig_port->base.type = INTEL_OUTPUT_DISPLAYPORT;
5170
5171         if (long_hpd && intel_dig_port->base.type == INTEL_OUTPUT_EDP) {
5172                 /*
5173                  * vdd off can generate a long pulse on eDP which
5174                  * would require vdd on to handle it, and thus we
5175                  * would end up in an endless cycle of
5176                  * "vdd off -> long hpd -> vdd on -> detect -> vdd off -> ..."
5177                  */
5178                 DRM_DEBUG_KMS("ignoring long hpd on eDP port %c\n",
5179                               port_name(intel_dig_port->port));
5180                 return IRQ_HANDLED;
5181         }
5182
5183         DRM_DEBUG_KMS("got hpd irq on port %c - %s\n",
5184                       port_name(intel_dig_port->port),
5185                       long_hpd ? "long" : "short");
5186
5187         power_domain = intel_display_port_power_domain(intel_encoder);
5188         intel_display_power_get(dev_priv, power_domain);
5189
5190         if (long_hpd) {
5191                 /* indicate that we need to restart link training */
5192                 intel_dp->train_set_valid = false;
5193
5194                 if (!intel_digital_port_connected(dev_priv, intel_dig_port))
5195                         goto mst_fail;
5196
5197                 if (!intel_dp_get_dpcd(intel_dp)) {
5198                         goto mst_fail;
5199                 }
5200
5201                 intel_dp_probe_oui(intel_dp);
5202
5203                 if (!intel_dp_probe_mst(intel_dp))
5204                         goto mst_fail;
5205
5206         } else {
5207                 if (intel_dp->is_mst) {
5208                         if (intel_dp_check_mst_status(intel_dp) == -EINVAL)
5209                                 goto mst_fail;
5210                 }
5211
5212                 if (!intel_dp->is_mst) {
5213                         /*
5214                          * we'll check the link status via the normal hot plug path later -
5215                          * but for short hpds we should check it now
5216                          */
5217                         drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
5218                         intel_dp_check_link_status(intel_dp);
5219                         drm_modeset_unlock(&dev->mode_config.connection_mutex);
5220                 }
5221         }
5222
5223         ret = IRQ_HANDLED;
5224
5225         goto put_power;
5226 mst_fail:
5227         /* if we were in MST mode, and device is not there get out of MST mode */
5228         if (intel_dp->is_mst) {
5229                 DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n", intel_dp->is_mst, intel_dp->mst_mgr.mst_state);
5230                 intel_dp->is_mst = false;
5231                 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
5232         }
5233 put_power:
5234         intel_display_power_put(dev_priv, power_domain);
5235
5236         return ret;
5237 }
5238
5239 /* Return which DP Port should be selected for Transcoder DP control */
5240 int
5241 intel_trans_dp_port_sel(struct drm_crtc *crtc)
5242 {
5243         struct drm_device *dev = crtc->dev;
5244         struct intel_encoder *intel_encoder;
5245         struct intel_dp *intel_dp;
5246
5247         for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
5248                 intel_dp = enc_to_intel_dp(&intel_encoder->base);
5249
5250                 if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
5251                     intel_encoder->type == INTEL_OUTPUT_EDP)
5252                         return intel_dp->output_reg;
5253         }
5254
5255         return -1;
5256 }
5257
5258 /* check the VBT to see whether the eDP is on DP-D port */
5259 bool intel_dp_is_edp(struct drm_device *dev, enum port port)
5260 {
5261         struct drm_i915_private *dev_priv = dev->dev_private;
5262         union child_device_config *p_child;
5263         int i;
5264         static const short port_mapping[] = {
5265                 [PORT_B] = PORT_IDPB,
5266                 [PORT_C] = PORT_IDPC,
5267                 [PORT_D] = PORT_IDPD,
5268         };
5269
5270         if (port == PORT_A)
5271                 return true;
5272
5273         if (!dev_priv->vbt.child_dev_num)
5274                 return false;
5275
5276         for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
5277                 p_child = dev_priv->vbt.child_dev + i;
5278
5279                 if (p_child->common.dvo_port == port_mapping[port] &&
5280                     (p_child->common.device_type & DEVICE_TYPE_eDP_BITS) ==
5281                     (DEVICE_TYPE_eDP & DEVICE_TYPE_eDP_BITS))
5282                         return true;
5283         }
5284         return false;
5285 }
5286
5287 void
5288 intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
5289 {
5290         struct intel_connector *intel_connector = to_intel_connector(connector);
5291
5292         intel_attach_force_audio_property(connector);
5293         intel_attach_broadcast_rgb_property(connector);
5294         intel_dp->color_range_auto = true;
5295
5296         if (is_edp(intel_dp)) {
5297                 drm_mode_create_scaling_mode_property(connector->dev);
5298                 drm_object_attach_property(
5299                         &connector->base,
5300                         connector->dev->mode_config.scaling_mode_property,
5301                         DRM_MODE_SCALE_ASPECT);
5302                 intel_connector->panel.fitting_mode = DRM_MODE_SCALE_ASPECT;
5303         }
5304 }
5305
5306 static void intel_dp_init_panel_power_timestamps(struct intel_dp *intel_dp)
5307 {
5308         intel_dp->last_power_cycle = jiffies;
5309         intel_dp->last_power_on = jiffies;
5310         intel_dp->last_backlight_off = jiffies;
5311 }
5312
5313 static void
5314 intel_dp_init_panel_power_sequencer(struct drm_device *dev,
5315                                     struct intel_dp *intel_dp)
5316 {
5317         struct drm_i915_private *dev_priv = dev->dev_private;
5318         struct edp_power_seq cur, vbt, spec,
5319                 *final = &intel_dp->pps_delays;
5320         u32 pp_on, pp_off, pp_div = 0, pp_ctl = 0;
5321         int pp_ctrl_reg, pp_on_reg, pp_off_reg, pp_div_reg = 0;
5322
5323         lockdep_assert_held(&dev_priv->pps_mutex);
5324
5325         /* already initialized? */
5326         if (final->t11_t12 != 0)
5327                 return;
5328
5329         if (IS_BROXTON(dev)) {
5330                 /*
5331                  * TODO: BXT has 2 sets of PPS registers.
5332                  * Correct Register for Broxton need to be identified
5333                  * using VBT. hardcoding for now
5334                  */
5335                 pp_ctrl_reg = BXT_PP_CONTROL(0);
5336                 pp_on_reg = BXT_PP_ON_DELAYS(0);
5337                 pp_off_reg = BXT_PP_OFF_DELAYS(0);
5338         } else if (HAS_PCH_SPLIT(dev)) {
5339                 pp_ctrl_reg = PCH_PP_CONTROL;
5340                 pp_on_reg = PCH_PP_ON_DELAYS;
5341                 pp_off_reg = PCH_PP_OFF_DELAYS;
5342                 pp_div_reg = PCH_PP_DIVISOR;
5343         } else {
5344                 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
5345
5346                 pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
5347                 pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
5348                 pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
5349                 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
5350         }
5351
5352         /* Workaround: Need to write PP_CONTROL with the unlock key as
5353          * the very first thing. */
5354         pp_ctl = ironlake_get_pp_control(intel_dp);
5355
5356         pp_on = I915_READ(pp_on_reg);
5357         pp_off = I915_READ(pp_off_reg);
5358         if (!IS_BROXTON(dev)) {
5359                 I915_WRITE(pp_ctrl_reg, pp_ctl);
5360                 pp_div = I915_READ(pp_div_reg);
5361         }
5362
5363         /* Pull timing values out of registers */
5364         cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >>
5365                 PANEL_POWER_UP_DELAY_SHIFT;
5366
5367         cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >>
5368                 PANEL_LIGHT_ON_DELAY_SHIFT;
5369
5370         cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >>
5371                 PANEL_LIGHT_OFF_DELAY_SHIFT;
5372
5373         cur.t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >>
5374                 PANEL_POWER_DOWN_DELAY_SHIFT;
5375
5376         if (IS_BROXTON(dev)) {
5377                 u16 tmp = (pp_ctl & BXT_POWER_CYCLE_DELAY_MASK) >>
5378                         BXT_POWER_CYCLE_DELAY_SHIFT;
5379                 if (tmp > 0)
5380                         cur.t11_t12 = (tmp - 1) * 1000;
5381                 else
5382                         cur.t11_t12 = 0;
5383         } else {
5384                 cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >>
5385                        PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000;
5386         }
5387
5388         DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
5389                       cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12);
5390
5391         vbt = dev_priv->vbt.edp_pps;
5392
5393         /* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
5394          * our hw here, which are all in 100usec. */
5395         spec.t1_t3 = 210 * 10;
5396         spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */
5397         spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */
5398         spec.t10 = 500 * 10;
5399         /* This one is special and actually in units of 100ms, but zero
5400          * based in the hw (so we need to add 100 ms). But the sw vbt
5401          * table multiplies it with 1000 to make it in units of 100usec,
5402          * too. */
5403         spec.t11_t12 = (510 + 100) * 10;
5404
5405         DRM_DEBUG_KMS("vbt t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
5406                       vbt.t1_t3, vbt.t8, vbt.t9, vbt.t10, vbt.t11_t12);
5407
5408         /* Use the max of the register settings and vbt. If both are
5409          * unset, fall back to the spec limits. */
5410 #define assign_final(field)     final->field = (max(cur.field, vbt.field) == 0 ? \
5411                                        spec.field : \
5412                                        max(cur.field, vbt.field))
5413         assign_final(t1_t3);
5414         assign_final(t8);
5415         assign_final(t9);
5416         assign_final(t10);
5417         assign_final(t11_t12);
5418 #undef assign_final
5419
5420 #define get_delay(field)        (DIV_ROUND_UP(final->field, 10))
5421         intel_dp->panel_power_up_delay = get_delay(t1_t3);
5422         intel_dp->backlight_on_delay = get_delay(t8);
5423         intel_dp->backlight_off_delay = get_delay(t9);
5424         intel_dp->panel_power_down_delay = get_delay(t10);
5425         intel_dp->panel_power_cycle_delay = get_delay(t11_t12);
5426 #undef get_delay
5427
5428         DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n",
5429                       intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay,
5430                       intel_dp->panel_power_cycle_delay);
5431
5432         DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
5433                       intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
5434 }
5435
5436 static void
5437 intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
5438                                               struct intel_dp *intel_dp)
5439 {
5440         struct drm_i915_private *dev_priv = dev->dev_private;
5441         u32 pp_on, pp_off, pp_div, port_sel = 0;
5442         int div = HAS_PCH_SPLIT(dev) ? intel_pch_rawclk(dev) : intel_hrawclk(dev);
5443         int pp_on_reg, pp_off_reg, pp_div_reg = 0, pp_ctrl_reg;
5444         enum port port = dp_to_dig_port(intel_dp)->port;
5445         const struct edp_power_seq *seq = &intel_dp->pps_delays;
5446
5447         lockdep_assert_held(&dev_priv->pps_mutex);
5448
5449         if (IS_BROXTON(dev)) {
5450                 /*
5451                  * TODO: BXT has 2 sets of PPS registers.
5452                  * Correct Register for Broxton need to be identified
5453                  * using VBT. hardcoding for now
5454                  */
5455                 pp_ctrl_reg = BXT_PP_CONTROL(0);
5456                 pp_on_reg = BXT_PP_ON_DELAYS(0);
5457                 pp_off_reg = BXT_PP_OFF_DELAYS(0);
5458
5459         } else if (HAS_PCH_SPLIT(dev)) {
5460                 pp_on_reg = PCH_PP_ON_DELAYS;
5461                 pp_off_reg = PCH_PP_OFF_DELAYS;
5462                 pp_div_reg = PCH_PP_DIVISOR;
5463         } else {
5464                 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
5465
5466                 pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
5467                 pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
5468                 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
5469         }
5470
5471         /*
5472          * And finally store the new values in the power sequencer. The
5473          * backlight delays are set to 1 because we do manual waits on them. For
5474          * T8, even BSpec recommends doing it. For T9, if we don't do this,
5475          * we'll end up waiting for the backlight off delay twice: once when we
5476          * do the manual sleep, and once when we disable the panel and wait for
5477          * the PP_STATUS bit to become zero.
5478          */
5479         pp_on = (seq->t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) |
5480                 (1 << PANEL_LIGHT_ON_DELAY_SHIFT);
5481         pp_off = (1 << PANEL_LIGHT_OFF_DELAY_SHIFT) |
5482                  (seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT);
5483         /* Compute the divisor for the pp clock, simply match the Bspec
5484          * formula. */
5485         if (IS_BROXTON(dev)) {
5486                 pp_div = I915_READ(pp_ctrl_reg);
5487                 pp_div &= ~BXT_POWER_CYCLE_DELAY_MASK;
5488                 pp_div |= (DIV_ROUND_UP((seq->t11_t12 + 1), 1000)
5489                                 << BXT_POWER_CYCLE_DELAY_SHIFT);
5490         } else {
5491                 pp_div = ((100 * div)/2 - 1) << PP_REFERENCE_DIVIDER_SHIFT;
5492                 pp_div |= (DIV_ROUND_UP(seq->t11_t12, 1000)
5493                                 << PANEL_POWER_CYCLE_DELAY_SHIFT);
5494         }
5495
5496         /* Haswell doesn't have any port selection bits for the panel
5497          * power sequencer any more. */
5498         if (IS_VALLEYVIEW(dev)) {
5499                 port_sel = PANEL_PORT_SELECT_VLV(port);
5500         } else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
5501                 if (port == PORT_A)
5502                         port_sel = PANEL_PORT_SELECT_DPA;
5503                 else
5504                         port_sel = PANEL_PORT_SELECT_DPD;
5505         }
5506
5507         pp_on |= port_sel;
5508
5509         I915_WRITE(pp_on_reg, pp_on);
5510         I915_WRITE(pp_off_reg, pp_off);
5511         if (IS_BROXTON(dev))
5512                 I915_WRITE(pp_ctrl_reg, pp_div);
5513         else
5514                 I915_WRITE(pp_div_reg, pp_div);
5515
5516         DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
5517                       I915_READ(pp_on_reg),
5518                       I915_READ(pp_off_reg),
5519                       IS_BROXTON(dev) ?
5520                       (I915_READ(pp_ctrl_reg) & BXT_POWER_CYCLE_DELAY_MASK) :
5521                       I915_READ(pp_div_reg));
5522 }
5523
5524 /**
5525  * intel_dp_set_drrs_state - program registers for RR switch to take effect
5526  * @dev: DRM device
5527  * @refresh_rate: RR to be programmed
5528  *
5529  * This function gets called when refresh rate (RR) has to be changed from
5530  * one frequency to another. Switches can be between high and low RR
5531  * supported by the panel or to any other RR based on media playback (in
5532  * this case, RR value needs to be passed from user space).
5533  *
5534  * The caller of this function needs to take a lock on dev_priv->drrs.
5535  */
5536 static void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate)
5537 {
5538         struct drm_i915_private *dev_priv = dev->dev_private;
5539         struct intel_encoder *encoder;
5540         struct intel_digital_port *dig_port = NULL;
5541         struct intel_dp *intel_dp = dev_priv->drrs.dp;
5542         struct intel_crtc_state *config = NULL;
5543         struct intel_crtc *intel_crtc = NULL;
5544         u32 reg, val;
5545         enum drrs_refresh_rate_type index = DRRS_HIGH_RR;
5546
5547         if (refresh_rate <= 0) {
5548                 DRM_DEBUG_KMS("Refresh rate should be positive non-zero.\n");
5549                 return;
5550         }
5551
5552         if (intel_dp == NULL) {
5553                 DRM_DEBUG_KMS("DRRS not supported.\n");
5554                 return;
5555         }
5556
5557         /*
5558          * FIXME: This needs proper synchronization with psr state for some
5559          * platforms that cannot have PSR and DRRS enabled at the same time.
5560          */
5561
5562         dig_port = dp_to_dig_port(intel_dp);
5563         encoder = &dig_port->base;
5564         intel_crtc = to_intel_crtc(encoder->base.crtc);
5565
5566         if (!intel_crtc) {
5567                 DRM_DEBUG_KMS("DRRS: intel_crtc not initialized\n");
5568                 return;
5569         }
5570
5571         config = intel_crtc->config;
5572
5573         if (dev_priv->drrs.type < SEAMLESS_DRRS_SUPPORT) {
5574                 DRM_DEBUG_KMS("Only Seamless DRRS supported.\n");
5575                 return;
5576         }
5577
5578         if (intel_dp->attached_connector->panel.downclock_mode->vrefresh ==
5579                         refresh_rate)
5580                 index = DRRS_LOW_RR;
5581
5582         if (index == dev_priv->drrs.refresh_rate_type) {
5583                 DRM_DEBUG_KMS(
5584                         "DRRS requested for previously set RR...ignoring\n");
5585                 return;
5586         }
5587
5588         if (!intel_crtc->active) {
5589                 DRM_DEBUG_KMS("eDP encoder disabled. CRTC not Active\n");
5590                 return;
5591         }
5592
5593         if (INTEL_INFO(dev)->gen >= 8 && !IS_CHERRYVIEW(dev)) {
5594                 switch (index) {
5595                 case DRRS_HIGH_RR:
5596                         intel_dp_set_m_n(intel_crtc, M1_N1);
5597                         break;
5598                 case DRRS_LOW_RR:
5599                         intel_dp_set_m_n(intel_crtc, M2_N2);
5600                         break;
5601                 case DRRS_MAX_RR:
5602                 default:
5603                         DRM_ERROR("Unsupported refreshrate type\n");
5604                 }
5605         } else if (INTEL_INFO(dev)->gen > 6) {
5606                 reg = PIPECONF(intel_crtc->config->cpu_transcoder);
5607                 val = I915_READ(reg);
5608
5609                 if (index > DRRS_HIGH_RR) {
5610                         if (IS_VALLEYVIEW(dev))
5611                                 val |= PIPECONF_EDP_RR_MODE_SWITCH_VLV;
5612                         else
5613                                 val |= PIPECONF_EDP_RR_MODE_SWITCH;
5614                 } else {
5615                         if (IS_VALLEYVIEW(dev))
5616                                 val &= ~PIPECONF_EDP_RR_MODE_SWITCH_VLV;
5617                         else
5618                                 val &= ~PIPECONF_EDP_RR_MODE_SWITCH;
5619                 }
5620                 I915_WRITE(reg, val);
5621         }
5622
5623         dev_priv->drrs.refresh_rate_type = index;
5624
5625         DRM_DEBUG_KMS("eDP Refresh Rate set to : %dHz\n", refresh_rate);
5626 }
5627
5628 /**
5629  * intel_edp_drrs_enable - init drrs struct if supported
5630  * @intel_dp: DP struct
5631  *
5632  * Initializes frontbuffer_bits and drrs.dp
5633  */
5634 void intel_edp_drrs_enable(struct intel_dp *intel_dp)
5635 {
5636         struct drm_device *dev = intel_dp_to_dev(intel_dp);
5637         struct drm_i915_private *dev_priv = dev->dev_private;
5638         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5639         struct drm_crtc *crtc = dig_port->base.base.crtc;
5640         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5641
5642         if (!intel_crtc->config->has_drrs) {
5643                 DRM_DEBUG_KMS("Panel doesn't support DRRS\n");
5644                 return;
5645         }
5646
5647         mutex_lock(&dev_priv->drrs.mutex);
5648         if (WARN_ON(dev_priv->drrs.dp)) {
5649                 DRM_ERROR("DRRS already enabled\n");
5650                 goto unlock;
5651         }
5652
5653         dev_priv->drrs.busy_frontbuffer_bits = 0;
5654
5655         dev_priv->drrs.dp = intel_dp;
5656
5657 unlock:
5658         mutex_unlock(&dev_priv->drrs.mutex);
5659 }
5660
5661 /**
5662  * intel_edp_drrs_disable - Disable DRRS
5663  * @intel_dp: DP struct
5664  *
5665  */
5666 void intel_edp_drrs_disable(struct intel_dp *intel_dp)
5667 {
5668         struct drm_device *dev = intel_dp_to_dev(intel_dp);
5669         struct drm_i915_private *dev_priv = dev->dev_private;
5670         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5671         struct drm_crtc *crtc = dig_port->base.base.crtc;
5672         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5673
5674         if (!intel_crtc->config->has_drrs)
5675                 return;
5676
5677         mutex_lock(&dev_priv->drrs.mutex);
5678         if (!dev_priv->drrs.dp) {
5679                 mutex_unlock(&dev_priv->drrs.mutex);
5680                 return;
5681         }
5682
5683         if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
5684                 intel_dp_set_drrs_state(dev_priv->dev,
5685                         intel_dp->attached_connector->panel.
5686                         fixed_mode->vrefresh);
5687
5688         dev_priv->drrs.dp = NULL;
5689         mutex_unlock(&dev_priv->drrs.mutex);
5690
5691         cancel_delayed_work_sync(&dev_priv->drrs.work);
5692 }
5693
5694 static void intel_edp_drrs_downclock_work(struct work_struct *work)
5695 {
5696         struct drm_i915_private *dev_priv =
5697                 container_of(work, typeof(*dev_priv), drrs.work.work);
5698         struct intel_dp *intel_dp;
5699
5700         mutex_lock(&dev_priv->drrs.mutex);
5701
5702         intel_dp = dev_priv->drrs.dp;
5703
5704         if (!intel_dp)
5705                 goto unlock;
5706
5707         /*
5708          * The delayed work can race with an invalidate hence we need to
5709          * recheck.
5710          */
5711
5712         if (dev_priv->drrs.busy_frontbuffer_bits)
5713                 goto unlock;
5714
5715         if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR)
5716                 intel_dp_set_drrs_state(dev_priv->dev,
5717                         intel_dp->attached_connector->panel.
5718                         downclock_mode->vrefresh);
5719
5720 unlock:
5721         mutex_unlock(&dev_priv->drrs.mutex);
5722 }
5723
5724 /**
5725  * intel_edp_drrs_invalidate - Disable Idleness DRRS
5726  * @dev: DRM device
5727  * @frontbuffer_bits: frontbuffer plane tracking bits
5728  *
5729  * This function gets called everytime rendering on the given planes start.
5730  * Hence DRRS needs to be Upclocked, i.e. (LOW_RR -> HIGH_RR).
5731  *
5732  * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5733  */
5734 void intel_edp_drrs_invalidate(struct drm_device *dev,
5735                 unsigned frontbuffer_bits)
5736 {
5737         struct drm_i915_private *dev_priv = dev->dev_private;
5738         struct drm_crtc *crtc;
5739         enum pipe pipe;
5740
5741         if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
5742                 return;
5743
5744         cancel_delayed_work(&dev_priv->drrs.work);
5745
5746         mutex_lock(&dev_priv->drrs.mutex);
5747         if (!dev_priv->drrs.dp) {
5748                 mutex_unlock(&dev_priv->drrs.mutex);
5749                 return;
5750         }
5751
5752         crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5753         pipe = to_intel_crtc(crtc)->pipe;
5754
5755         frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
5756         dev_priv->drrs.busy_frontbuffer_bits |= frontbuffer_bits;
5757
5758         /* invalidate means busy screen hence upclock */
5759         if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
5760                 intel_dp_set_drrs_state(dev_priv->dev,
5761                                 dev_priv->drrs.dp->attached_connector->panel.
5762                                 fixed_mode->vrefresh);
5763
5764         mutex_unlock(&dev_priv->drrs.mutex);
5765 }
5766
5767 /**
5768  * intel_edp_drrs_flush - Restart Idleness DRRS
5769  * @dev: DRM device
5770  * @frontbuffer_bits: frontbuffer plane tracking bits
5771  *
5772  * This function gets called every time rendering on the given planes has
5773  * completed or flip on a crtc is completed. So DRRS should be upclocked
5774  * (LOW_RR -> HIGH_RR). And also Idleness detection should be started again,
5775  * if no other planes are dirty.
5776  *
5777  * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5778  */
5779 void intel_edp_drrs_flush(struct drm_device *dev,
5780                 unsigned frontbuffer_bits)
5781 {
5782         struct drm_i915_private *dev_priv = dev->dev_private;
5783         struct drm_crtc *crtc;
5784         enum pipe pipe;
5785
5786         if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
5787                 return;
5788
5789         cancel_delayed_work(&dev_priv->drrs.work);
5790
5791         mutex_lock(&dev_priv->drrs.mutex);
5792         if (!dev_priv->drrs.dp) {
5793                 mutex_unlock(&dev_priv->drrs.mutex);
5794                 return;
5795         }
5796
5797         crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5798         pipe = to_intel_crtc(crtc)->pipe;
5799
5800         frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
5801         dev_priv->drrs.busy_frontbuffer_bits &= ~frontbuffer_bits;
5802
5803         /* flush means busy screen hence upclock */
5804         if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
5805                 intel_dp_set_drrs_state(dev_priv->dev,
5806                                 dev_priv->drrs.dp->attached_connector->panel.
5807                                 fixed_mode->vrefresh);
5808
5809         /*
5810          * flush also means no more activity hence schedule downclock, if all
5811          * other fbs are quiescent too
5812          */
5813         if (!dev_priv->drrs.busy_frontbuffer_bits)
5814                 schedule_delayed_work(&dev_priv->drrs.work,
5815                                 msecs_to_jiffies(1000));
5816         mutex_unlock(&dev_priv->drrs.mutex);
5817 }
5818
5819 /**
5820  * DOC: Display Refresh Rate Switching (DRRS)
5821  *
5822  * Display Refresh Rate Switching (DRRS) is a power conservation feature
5823  * which enables swtching between low and high refresh rates,
5824  * dynamically, based on the usage scenario. This feature is applicable
5825  * for internal panels.
5826  *
5827  * Indication that the panel supports DRRS is given by the panel EDID, which
5828  * would list multiple refresh rates for one resolution.
5829  *
5830  * DRRS is of 2 types - static and seamless.
5831  * Static DRRS involves changing refresh rate (RR) by doing a full modeset
5832  * (may appear as a blink on screen) and is used in dock-undock scenario.
5833  * Seamless DRRS involves changing RR without any visual effect to the user
5834  * and can be used during normal system usage. This is done by programming
5835  * certain registers.
5836  *
5837  * Support for static/seamless DRRS may be indicated in the VBT based on
5838  * inputs from the panel spec.
5839  *
5840  * DRRS saves power by switching to low RR based on usage scenarios.
5841  *
5842  * eDP DRRS:-
5843  *        The implementation is based on frontbuffer tracking implementation.
5844  * When there is a disturbance on the screen triggered by user activity or a
5845  * periodic system activity, DRRS is disabled (RR is changed to high RR).
5846  * When there is no movement on screen, after a timeout of 1 second, a switch
5847  * to low RR is made.
5848  *        For integration with frontbuffer tracking code,
5849  * intel_edp_drrs_invalidate() and intel_edp_drrs_flush() are called.
5850  *
5851  * DRRS can be further extended to support other internal panels and also
5852  * the scenario of video playback wherein RR is set based on the rate
5853  * requested by userspace.
5854  */
5855
5856 /**
5857  * intel_dp_drrs_init - Init basic DRRS work and mutex.
5858  * @intel_connector: eDP connector
5859  * @fixed_mode: preferred mode of panel
5860  *
5861  * This function is  called only once at driver load to initialize basic
5862  * DRRS stuff.
5863  *
5864  * Returns:
5865  * Downclock mode if panel supports it, else return NULL.
5866  * DRRS support is determined by the presence of downclock mode (apart
5867  * from VBT setting).
5868  */
5869 static struct drm_display_mode *
5870 intel_dp_drrs_init(struct intel_connector *intel_connector,
5871                 struct drm_display_mode *fixed_mode)
5872 {
5873         struct drm_connector *connector = &intel_connector->base;
5874         struct drm_device *dev = connector->dev;
5875         struct drm_i915_private *dev_priv = dev->dev_private;
5876         struct drm_display_mode *downclock_mode = NULL;
5877
5878         INIT_DELAYED_WORK(&dev_priv->drrs.work, intel_edp_drrs_downclock_work);
5879         mutex_init(&dev_priv->drrs.mutex);
5880
5881         if (INTEL_INFO(dev)->gen <= 6) {
5882                 DRM_DEBUG_KMS("DRRS supported for Gen7 and above\n");
5883                 return NULL;
5884         }
5885
5886         if (dev_priv->vbt.drrs_type != SEAMLESS_DRRS_SUPPORT) {
5887                 DRM_DEBUG_KMS("VBT doesn't support DRRS\n");
5888                 return NULL;
5889         }
5890
5891         downclock_mode = intel_find_panel_downclock
5892                                         (dev, fixed_mode, connector);
5893
5894         if (!downclock_mode) {
5895                 DRM_DEBUG_KMS("Downclock mode is not found. DRRS not supported\n");
5896                 return NULL;
5897         }
5898
5899         dev_priv->drrs.type = dev_priv->vbt.drrs_type;
5900
5901         dev_priv->drrs.refresh_rate_type = DRRS_HIGH_RR;
5902         DRM_DEBUG_KMS("seamless DRRS supported for eDP panel.\n");
5903         return downclock_mode;
5904 }
5905
5906 static bool intel_edp_init_connector(struct intel_dp *intel_dp,
5907                                      struct intel_connector *intel_connector)
5908 {
5909         struct drm_connector *connector = &intel_connector->base;
5910         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
5911         struct intel_encoder *intel_encoder = &intel_dig_port->base;
5912         struct drm_device *dev = intel_encoder->base.dev;
5913         struct drm_i915_private *dev_priv = dev->dev_private;
5914         struct drm_display_mode *fixed_mode = NULL;
5915         struct drm_display_mode *downclock_mode = NULL;
5916         bool has_dpcd;
5917         struct drm_display_mode *scan;
5918         struct edid *edid;
5919         enum pipe pipe = INVALID_PIPE;
5920
5921         if (!is_edp(intel_dp))
5922                 return true;
5923
5924         pps_lock(intel_dp);
5925         intel_edp_panel_vdd_sanitize(intel_dp);
5926         pps_unlock(intel_dp);
5927
5928         /* Cache DPCD and EDID for edp. */
5929         has_dpcd = intel_dp_get_dpcd(intel_dp);
5930
5931         if (has_dpcd) {
5932                 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
5933                         dev_priv->no_aux_handshake =
5934                                 intel_dp->dpcd[DP_MAX_DOWNSPREAD] &
5935                                 DP_NO_AUX_HANDSHAKE_LINK_TRAINING;
5936         } else {
5937                 /* if this fails, presume the device is a ghost */
5938                 DRM_INFO("failed to retrieve link info, disabling eDP\n");
5939                 return false;
5940         }
5941
5942         /* We now know it's not a ghost, init power sequence regs. */
5943         pps_lock(intel_dp);
5944         intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
5945         pps_unlock(intel_dp);
5946
5947         mutex_lock(&dev->mode_config.mutex);
5948         edid = drm_get_edid(connector, &intel_dp->aux.ddc);
5949         if (edid) {
5950                 if (drm_add_edid_modes(connector, edid)) {
5951                         drm_mode_connector_update_edid_property(connector,
5952                                                                 edid);
5953                         drm_edid_to_eld(connector, edid);
5954                 } else {
5955                         kfree(edid);
5956                         edid = ERR_PTR(-EINVAL);
5957                 }
5958         } else {
5959                 edid = ERR_PTR(-ENOENT);
5960         }
5961         intel_connector->edid = edid;
5962
5963         /* prefer fixed mode from EDID if available */
5964         list_for_each_entry(scan, &connector->probed_modes, head) {
5965                 if ((scan->type & DRM_MODE_TYPE_PREFERRED)) {
5966                         fixed_mode = drm_mode_duplicate(dev, scan);
5967                         downclock_mode = intel_dp_drrs_init(
5968                                                 intel_connector, fixed_mode);
5969                         break;
5970                 }
5971         }
5972
5973         /* fallback to VBT if available for eDP */
5974         if (!fixed_mode && dev_priv->vbt.lfp_lvds_vbt_mode) {
5975                 fixed_mode = drm_mode_duplicate(dev,
5976                                         dev_priv->vbt.lfp_lvds_vbt_mode);
5977                 if (fixed_mode)
5978                         fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
5979         }
5980         mutex_unlock(&dev->mode_config.mutex);
5981
5982         if (IS_VALLEYVIEW(dev)) {
5983                 intel_dp->edp_notifier.notifier_call = edp_notify_handler;
5984                 register_reboot_notifier(&intel_dp->edp_notifier);
5985
5986                 /*
5987                  * Figure out the current pipe for the initial backlight setup.
5988                  * If the current pipe isn't valid, try the PPS pipe, and if that
5989                  * fails just assume pipe A.
5990                  */
5991                 if (IS_CHERRYVIEW(dev))
5992                         pipe = DP_PORT_TO_PIPE_CHV(intel_dp->DP);
5993                 else
5994                         pipe = PORT_TO_PIPE(intel_dp->DP);
5995
5996                 if (pipe != PIPE_A && pipe != PIPE_B)
5997                         pipe = intel_dp->pps_pipe;
5998
5999                 if (pipe != PIPE_A && pipe != PIPE_B)
6000                         pipe = PIPE_A;
6001
6002                 DRM_DEBUG_KMS("using pipe %c for initial backlight setup\n",
6003                               pipe_name(pipe));
6004         }
6005
6006         intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
6007         intel_connector->panel.backlight_power = intel_edp_backlight_power;
6008         intel_panel_setup_backlight(connector, pipe);
6009
6010         return true;
6011 }
6012
6013 bool
6014 intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
6015                         struct intel_connector *intel_connector)
6016 {
6017         struct drm_connector *connector = &intel_connector->base;
6018         struct intel_dp *intel_dp = &intel_dig_port->dp;
6019         struct intel_encoder *intel_encoder = &intel_dig_port->base;
6020         struct drm_device *dev = intel_encoder->base.dev;
6021         struct drm_i915_private *dev_priv = dev->dev_private;
6022         enum port port = intel_dig_port->port;
6023         int type;
6024
6025         intel_dp->pps_pipe = INVALID_PIPE;
6026
6027         /* intel_dp vfuncs */
6028         if (INTEL_INFO(dev)->gen >= 9)
6029                 intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider;
6030         else if (IS_VALLEYVIEW(dev))
6031                 intel_dp->get_aux_clock_divider = vlv_get_aux_clock_divider;
6032         else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
6033                 intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider;
6034         else if (HAS_PCH_SPLIT(dev))
6035                 intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider;
6036         else
6037                 intel_dp->get_aux_clock_divider = i9xx_get_aux_clock_divider;
6038
6039         if (INTEL_INFO(dev)->gen >= 9)
6040                 intel_dp->get_aux_send_ctl = skl_get_aux_send_ctl;
6041         else
6042                 intel_dp->get_aux_send_ctl = i9xx_get_aux_send_ctl;
6043
6044         /* Preserve the current hw state. */
6045         intel_dp->DP = I915_READ(intel_dp->output_reg);
6046         intel_dp->attached_connector = intel_connector;
6047
6048         if (intel_dp_is_edp(dev, port))
6049                 type = DRM_MODE_CONNECTOR_eDP;
6050         else
6051                 type = DRM_MODE_CONNECTOR_DisplayPort;
6052
6053         /*
6054          * For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but
6055          * for DP the encoder type can be set by the caller to
6056          * INTEL_OUTPUT_UNKNOWN for DDI, so don't rewrite it.
6057          */
6058         if (type == DRM_MODE_CONNECTOR_eDP)
6059                 intel_encoder->type = INTEL_OUTPUT_EDP;
6060
6061         /* eDP only on port B and/or C on vlv/chv */
6062         if (WARN_ON(IS_VALLEYVIEW(dev) && is_edp(intel_dp) &&
6063                     port != PORT_B && port != PORT_C))
6064                 return false;
6065
6066         DRM_DEBUG_KMS("Adding %s connector on port %c\n",
6067                         type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP",
6068                         port_name(port));
6069
6070         drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
6071         drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
6072
6073         connector->interlace_allowed = true;
6074         connector->doublescan_allowed = 0;
6075
6076         INIT_DELAYED_WORK(&intel_dp->panel_vdd_work,
6077                           edp_panel_vdd_work);
6078
6079         intel_connector_attach_encoder(intel_connector, intel_encoder);
6080         drm_connector_register(connector);
6081
6082         if (HAS_DDI(dev))
6083                 intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
6084         else
6085                 intel_connector->get_hw_state = intel_connector_get_hw_state;
6086         intel_connector->unregister = intel_dp_connector_unregister;
6087
6088         /* Set up the hotplug pin. */
6089         switch (port) {
6090         case PORT_A:
6091                 intel_encoder->hpd_pin = HPD_PORT_A;
6092                 break;
6093         case PORT_B:
6094                 intel_encoder->hpd_pin = HPD_PORT_B;
6095                 if (IS_BROXTON(dev_priv) && (INTEL_REVID(dev) < BXT_REVID_B0))
6096                         intel_encoder->hpd_pin = HPD_PORT_A;
6097                 break;
6098         case PORT_C:
6099                 intel_encoder->hpd_pin = HPD_PORT_C;
6100                 break;
6101         case PORT_D:
6102                 intel_encoder->hpd_pin = HPD_PORT_D;
6103                 break;
6104         default:
6105                 BUG();
6106         }
6107
6108         if (is_edp(intel_dp)) {
6109                 pps_lock(intel_dp);
6110                 intel_dp_init_panel_power_timestamps(intel_dp);
6111                 if (IS_VALLEYVIEW(dev))
6112                         vlv_initial_power_sequencer_setup(intel_dp);
6113                 else
6114                         intel_dp_init_panel_power_sequencer(dev, intel_dp);
6115                 pps_unlock(intel_dp);
6116         }
6117
6118         intel_dp_aux_init(intel_dp, intel_connector);
6119
6120         /* init MST on ports that can support it */
6121         if (HAS_DP_MST(dev) &&
6122             (port == PORT_B || port == PORT_C || port == PORT_D))
6123                 intel_dp_mst_encoder_init(intel_dig_port,
6124                                           intel_connector->base.base.id);
6125
6126         if (!intel_edp_init_connector(intel_dp, intel_connector)) {
6127                 drm_dp_aux_unregister(&intel_dp->aux);
6128                 if (is_edp(intel_dp)) {
6129                         cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
6130                         /*
6131                          * vdd might still be enabled do to the delayed vdd off.
6132                          * Make sure vdd is actually turned off here.
6133                          */
6134                         pps_lock(intel_dp);
6135                         edp_panel_vdd_off_sync(intel_dp);
6136                         pps_unlock(intel_dp);
6137                 }
6138                 drm_connector_unregister(connector);
6139                 drm_connector_cleanup(connector);
6140                 return false;
6141         }
6142
6143         intel_dp_add_properties(intel_dp, connector);
6144
6145         /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
6146          * 0xd.  Failure to do so will result in spurious interrupts being
6147          * generated on the port when a cable is not attached.
6148          */
6149         if (IS_G4X(dev) && !IS_GM45(dev)) {
6150                 u32 temp = I915_READ(PEG_BAND_GAP_DATA);
6151                 I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
6152         }
6153
6154         i915_debugfs_connector_add(connector);
6155
6156         return true;
6157 }
6158
6159 void
6160 intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
6161 {
6162         struct drm_i915_private *dev_priv = dev->dev_private;
6163         struct intel_digital_port *intel_dig_port;
6164         struct intel_encoder *intel_encoder;
6165         struct drm_encoder *encoder;
6166         struct intel_connector *intel_connector;
6167
6168         intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL);
6169         if (!intel_dig_port)
6170                 return;
6171
6172         intel_connector = intel_connector_alloc();
6173         if (!intel_connector) {
6174                 kfree(intel_dig_port);
6175                 return;
6176         }
6177
6178         intel_encoder = &intel_dig_port->base;
6179         encoder = &intel_encoder->base;
6180
6181         drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs,
6182                          DRM_MODE_ENCODER_TMDS);
6183
6184         intel_encoder->compute_config = intel_dp_compute_config;
6185         intel_encoder->disable = intel_disable_dp;
6186         intel_encoder->get_hw_state = intel_dp_get_hw_state;
6187         intel_encoder->get_config = intel_dp_get_config;
6188         intel_encoder->suspend = intel_dp_encoder_suspend;
6189         if (IS_CHERRYVIEW(dev)) {
6190                 intel_encoder->pre_pll_enable = chv_dp_pre_pll_enable;
6191                 intel_encoder->pre_enable = chv_pre_enable_dp;
6192                 intel_encoder->enable = vlv_enable_dp;
6193                 intel_encoder->post_disable = chv_post_disable_dp;
6194                 intel_encoder->post_pll_disable = chv_dp_post_pll_disable;
6195         } else if (IS_VALLEYVIEW(dev)) {
6196                 intel_encoder->pre_pll_enable = vlv_dp_pre_pll_enable;
6197                 intel_encoder->pre_enable = vlv_pre_enable_dp;
6198                 intel_encoder->enable = vlv_enable_dp;
6199                 intel_encoder->post_disable = vlv_post_disable_dp;
6200         } else {
6201                 intel_encoder->pre_enable = g4x_pre_enable_dp;
6202                 intel_encoder->enable = g4x_enable_dp;
6203                 if (INTEL_INFO(dev)->gen >= 5)
6204                         intel_encoder->post_disable = ilk_post_disable_dp;
6205         }
6206
6207         intel_dig_port->port = port;
6208         intel_dig_port->dp.output_reg = output_reg;
6209
6210         intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
6211         if (IS_CHERRYVIEW(dev)) {
6212                 if (port == PORT_D)
6213                         intel_encoder->crtc_mask = 1 << 2;
6214                 else
6215                         intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
6216         } else {
6217                 intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
6218         }
6219         intel_encoder->cloneable = 0;
6220
6221         intel_dig_port->hpd_pulse = intel_dp_hpd_pulse;
6222         dev_priv->hotplug.irq_port[port] = intel_dig_port;
6223
6224         if (!intel_dp_init_connector(intel_dig_port, intel_connector)) {
6225                 drm_encoder_cleanup(encoder);
6226                 kfree(intel_dig_port);
6227                 kfree(intel_connector);
6228         }
6229 }
6230
6231 void intel_dp_mst_suspend(struct drm_device *dev)
6232 {
6233         struct drm_i915_private *dev_priv = dev->dev_private;
6234         int i;
6235
6236         /* disable MST */
6237         for (i = 0; i < I915_MAX_PORTS; i++) {
6238                 struct intel_digital_port *intel_dig_port = dev_priv->hotplug.irq_port[i];
6239                 if (!intel_dig_port)
6240                         continue;
6241
6242                 if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
6243                         if (!intel_dig_port->dp.can_mst)
6244                                 continue;
6245                         if (intel_dig_port->dp.is_mst)
6246                                 drm_dp_mst_topology_mgr_suspend(&intel_dig_port->dp.mst_mgr);
6247                 }
6248         }
6249 }
6250
6251 void intel_dp_mst_resume(struct drm_device *dev)
6252 {
6253         struct drm_i915_private *dev_priv = dev->dev_private;
6254         int i;
6255
6256         for (i = 0; i < I915_MAX_PORTS; i++) {
6257                 struct intel_digital_port *intel_dig_port = dev_priv->hotplug.irq_port[i];
6258                 if (!intel_dig_port)
6259                         continue;
6260                 if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
6261                         int ret;
6262
6263                         if (!intel_dig_port->dp.can_mst)
6264                                 continue;
6265
6266                         ret = drm_dp_mst_topology_mgr_resume(&intel_dig_port->dp.mst_mgr);
6267                         if (ret != 0) {
6268                                 intel_dp_check_mst_status(&intel_dig_port->dp);
6269                         }
6270                 }
6271         }
6272 }