2 * Copyright 2012 Red Hat Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
27 #include <subdev/bios.h>
28 #include <subdev/bios/disp.h>
29 #include <subdev/bios/init.h>
30 #include <subdev/bios/pll.h>
31 #include <subdev/devinit.h>
34 gf119_disp_vblank_init(struct nvkm_event *event, int type, int head)
36 struct nvkm_disp *disp = container_of(event, typeof(*disp), vblank);
37 struct nvkm_device *device = disp->engine.subdev.device;
38 nvkm_mask(device, 0x6100c0 + (head * 0x800), 0x00000001, 0x00000001);
42 gf119_disp_vblank_fini(struct nvkm_event *event, int type, int head)
44 struct nvkm_disp *disp = container_of(event, typeof(*disp), vblank);
45 struct nvkm_device *device = disp->engine.subdev.device;
46 nvkm_mask(device, 0x6100c0 + (head * 0x800), 0x00000001, 0x00000000);
49 const struct nvkm_event_func
50 gf119_disp_vblank_func = {
51 .ctor = nvkm_disp_vblank_ctor,
52 .init = gf119_disp_vblank_init,
53 .fini = gf119_disp_vblank_fini,
56 static struct nvkm_output *
57 exec_lookup(struct nv50_disp *disp, int head, int or, u32 ctrl,
58 u32 *data, u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
59 struct nvbios_outp *info)
61 struct nvkm_subdev *subdev = &disp->base.engine.subdev;
62 struct nvkm_bios *bios = subdev->device->bios;
63 struct nvkm_output *outp;
67 type = DCB_OUTPUT_ANALOG;
71 switch (ctrl & 0x00000f00) {
72 case 0x00000000: type = DCB_OUTPUT_LVDS; mask = 1; break;
73 case 0x00000100: type = DCB_OUTPUT_TMDS; mask = 1; break;
74 case 0x00000200: type = DCB_OUTPUT_TMDS; mask = 2; break;
75 case 0x00000500: type = DCB_OUTPUT_TMDS; mask = 3; break;
76 case 0x00000800: type = DCB_OUTPUT_DP; mask = 1; break;
77 case 0x00000900: type = DCB_OUTPUT_DP; mask = 2; break;
79 nvkm_error(subdev, "unknown SOR mc %08x\n", ctrl);
84 mask = 0x00c0 & (mask << 6);
86 mask |= 0x0100 << head;
88 list_for_each_entry(outp, &disp->base.outp, head) {
89 if ((outp->info.hasht & 0xff) == type &&
90 (outp->info.hashm & mask) == mask) {
91 *data = nvbios_outp_match(bios, outp->info.hasht,
93 ver, hdr, cnt, len, info);
103 static struct nvkm_output *
104 exec_script(struct nv50_disp *disp, int head, int id)
106 struct nvkm_device *device = disp->base.engine.subdev.device;
107 struct nvkm_bios *bios = device->bios;
108 struct nvkm_output *outp;
109 struct nvbios_outp info;
110 u8 ver, hdr, cnt, len;
114 for (or = 0; !(ctrl & (1 << head)) && or < 8; or++) {
115 ctrl = nvkm_rd32(device, 0x640180 + (or * 0x20));
116 if (ctrl & (1 << head))
123 outp = exec_lookup(disp, head, or, ctrl, &data, &ver, &hdr, &cnt, &len, &info);
125 struct nvbios_init init = {
126 .subdev = nv_subdev(disp),
128 .offset = info.script[id],
140 static struct nvkm_output *
141 exec_clkcmp(struct nv50_disp *disp, int head, int id, u32 pclk, u32 *conf)
143 struct nvkm_device *device = disp->base.engine.subdev.device;
144 struct nvkm_bios *bios = device->bios;
145 struct nvkm_output *outp;
146 struct nvbios_outp info1;
147 struct nvbios_ocfg info2;
148 u8 ver, hdr, cnt, len;
152 for (or = 0; !(ctrl & (1 << head)) && or < 8; or++) {
153 ctrl = nvkm_rd32(device, 0x660180 + (or * 0x20));
154 if (ctrl & (1 << head))
161 outp = exec_lookup(disp, head, or, ctrl, &data, &ver, &hdr, &cnt, &len, &info1);
165 switch (outp->info.type) {
166 case DCB_OUTPUT_TMDS:
167 *conf = (ctrl & 0x00000f00) >> 8;
171 case DCB_OUTPUT_LVDS:
172 *conf = disp->sor.lvdsconf;
175 *conf = (ctrl & 0x00000f00) >> 8;
177 case DCB_OUTPUT_ANALOG:
183 data = nvbios_ocfg_match(bios, data, *conf, &ver, &hdr, &cnt, &len, &info2);
184 if (data && id < 0xff) {
185 data = nvbios_oclk_match(bios, info2.clkcmp[id], pclk);
187 struct nvbios_init init = {
188 .subdev = nv_subdev(disp),
204 gf119_disp_intr_unk1_0(struct nv50_disp *disp, int head)
206 exec_script(disp, head, 1);
210 gf119_disp_intr_unk2_0(struct nv50_disp *disp, int head)
212 struct nvkm_subdev *subdev = &disp->base.engine.subdev;
213 struct nvkm_output *outp = exec_script(disp, head, 2);
215 /* see note in nv50_disp_intr_unk20_0() */
216 if (outp && outp->info.type == DCB_OUTPUT_DP) {
217 struct nvkm_output_dp *outpdp = nvkm_output_dp(outp);
218 struct nvbios_init init = {
220 .bios = subdev->device->bios,
223 .offset = outpdp->info.script[4],
228 atomic_set(&outpdp->lt.done, 0);
233 gf119_disp_intr_unk2_1(struct nv50_disp *disp, int head)
235 struct nvkm_device *device = disp->base.engine.subdev.device;
236 struct nvkm_devinit *devinit = device->devinit;
237 u32 pclk = nvkm_rd32(device, 0x660450 + (head * 0x300)) / 1000;
239 nvkm_devinit_pll_set(devinit, PLL_VPLL0 + head, pclk);
240 nvkm_wr32(device, 0x612200 + (head * 0x800), 0x00000000);
244 gf119_disp_intr_unk2_2_tu(struct nv50_disp *disp, int head,
245 struct dcb_output *outp)
247 struct nvkm_device *device = disp->base.engine.subdev.device;
248 const int or = ffs(outp->or) - 1;
249 const u32 ctrl = nvkm_rd32(device, 0x660200 + (or * 0x020));
250 const u32 conf = nvkm_rd32(device, 0x660404 + (head * 0x300));
251 const s32 vactive = nvkm_rd32(device, 0x660414 + (head * 0x300)) & 0xffff;
252 const s32 vblanke = nvkm_rd32(device, 0x66041c + (head * 0x300)) & 0xffff;
253 const s32 vblanks = nvkm_rd32(device, 0x660420 + (head * 0x300)) & 0xffff;
254 const u32 pclk = nvkm_rd32(device, 0x660450 + (head * 0x300)) / 1000;
255 const u32 link = ((ctrl & 0xf00) == 0x800) ? 0 : 1;
256 const u32 hoff = (head * 0x800);
257 const u32 soff = ( or * 0x800);
258 const u32 loff = (link * 0x080) + soff;
259 const u32 symbol = 100000;
261 u32 dpctrl = nvkm_rd32(device, 0x61c10c + loff);
262 u32 clksor = nvkm_rd32(device, 0x612300 + soff);
263 u32 datarate, link_nr, link_bw, bits;
266 link_nr = hweight32(dpctrl & 0x000f0000);
267 link_bw = (clksor & 0x007c0000) >> 18;
270 /* symbols/hblank - algorithm taken from comments in tegra driver */
271 value = vblanke + vactive - vblanks - 7;
272 value = value * link_bw;
274 value = value - (3 * !!(dpctrl & 0x00004000)) - (12 / link_nr);
275 nvkm_mask(device, 0x616620 + hoff, 0x0000ffff, value);
277 /* symbols/vblank - algorithm taken from comments in tegra driver */
278 value = vblanks - vblanke - 25;
279 value = value * link_bw;
281 value = value - ((36 / link_nr) + 3) - 1;
282 nvkm_mask(device, 0x616624 + hoff, 0x00ffffff, value);
285 if ((conf & 0x3c0) == 0x180) bits = 30;
286 else if ((conf & 0x3c0) == 0x140) bits = 24;
288 datarate = (pclk * bits) / 8;
292 do_div(ratio, link_nr * link_bw);
294 value = (symbol - ratio) * TU;
296 do_div(value, symbol);
297 do_div(value, symbol);
302 nvkm_wr32(device, 0x616610 + hoff, value);
306 gf119_disp_intr_unk2_2(struct nv50_disp *disp, int head)
308 struct nvkm_device *device = disp->base.engine.subdev.device;
309 struct nvkm_output *outp;
310 u32 pclk = nvkm_rd32(device, 0x660450 + (head * 0x300)) / 1000;
311 u32 conf, addr, data;
313 outp = exec_clkcmp(disp, head, 0xff, pclk, &conf);
317 /* see note in nv50_disp_intr_unk20_2() */
318 if (outp->info.type == DCB_OUTPUT_DP) {
319 u32 sync = nvkm_rd32(device, 0x660404 + (head * 0x300));
320 switch ((sync & 0x000003c0) >> 6) {
321 case 6: pclk = pclk * 30; break;
322 case 5: pclk = pclk * 24; break;
329 if (nvkm_output_dp_train(outp, pclk, true))
330 OUTP_ERR(outp, "link not trained before attach");
333 disp->sor.magic(outp);
336 exec_clkcmp(disp, head, 0, pclk, &conf);
338 if (outp->info.type == DCB_OUTPUT_ANALOG) {
339 addr = 0x612280 + (ffs(outp->info.or) - 1) * 0x800;
342 addr = 0x612300 + (ffs(outp->info.or) - 1) * 0x800;
343 data = (conf & 0x0100) ? 0x00000101 : 0x00000000;
344 switch (outp->info.type) {
345 case DCB_OUTPUT_TMDS:
346 nvkm_mask(device, addr, 0x007c0000, 0x00280000);
349 gf119_disp_intr_unk2_2_tu(disp, head, &outp->info);
356 nvkm_mask(device, addr, 0x00000707, data);
360 gf119_disp_intr_unk4_0(struct nv50_disp *disp, int head)
362 struct nvkm_device *device = disp->base.engine.subdev.device;
363 u32 pclk = nvkm_rd32(device, 0x660450 + (head * 0x300)) / 1000;
366 exec_clkcmp(disp, head, 1, pclk, &conf);
370 gf119_disp_intr_supervisor(struct work_struct *work)
372 struct nv50_disp *disp =
373 container_of(work, struct nv50_disp, supervisor);
374 struct nvkm_subdev *subdev = &disp->base.engine.subdev;
375 struct nvkm_device *device = subdev->device;
379 nvkm_debug(subdev, "supervisor %d\n", ffs(disp->super));
380 for (head = 0; head < disp->head.nr; head++) {
381 mask[head] = nvkm_rd32(device, 0x6101d4 + (head * 0x800));
382 nvkm_debug(subdev, "head %d: %08x\n", head, mask[head]);
385 if (disp->super & 0x00000001) {
386 nv50_disp_chan_mthd(disp->chan[0], NV_DBG_DEBUG);
387 for (head = 0; head < disp->head.nr; head++) {
388 if (!(mask[head] & 0x00001000))
390 nvkm_debug(subdev, "supervisor 1.0 - head %d\n", head);
391 gf119_disp_intr_unk1_0(disp, head);
394 if (disp->super & 0x00000002) {
395 for (head = 0; head < disp->head.nr; head++) {
396 if (!(mask[head] & 0x00001000))
398 nvkm_debug(subdev, "supervisor 2.0 - head %d\n", head);
399 gf119_disp_intr_unk2_0(disp, head);
401 for (head = 0; head < disp->head.nr; head++) {
402 if (!(mask[head] & 0x00010000))
404 nvkm_debug(subdev, "supervisor 2.1 - head %d\n", head);
405 gf119_disp_intr_unk2_1(disp, head);
407 for (head = 0; head < disp->head.nr; head++) {
408 if (!(mask[head] & 0x00001000))
410 nvkm_debug(subdev, "supervisor 2.2 - head %d\n", head);
411 gf119_disp_intr_unk2_2(disp, head);
414 if (disp->super & 0x00000004) {
415 for (head = 0; head < disp->head.nr; head++) {
416 if (!(mask[head] & 0x00001000))
418 nvkm_debug(subdev, "supervisor 3.0 - head %d\n", head);
419 gf119_disp_intr_unk4_0(disp, head);
423 for (head = 0; head < disp->head.nr; head++)
424 nvkm_wr32(device, 0x6101d4 + (head * 0x800), 0x00000000);
425 nvkm_wr32(device, 0x6101d0, 0x80000000);
429 gf119_disp_intr_error(struct nv50_disp *disp, int chid)
431 struct nvkm_subdev *subdev = &disp->base.engine.subdev;
432 struct nvkm_device *device = subdev->device;
433 u32 mthd = nvkm_rd32(device, 0x6101f0 + (chid * 12));
434 u32 data = nvkm_rd32(device, 0x6101f4 + (chid * 12));
435 u32 unkn = nvkm_rd32(device, 0x6101f8 + (chid * 12));
437 nvkm_error(subdev, "chid %d mthd %04x data %08x %08x %08x\n",
438 chid, (mthd & 0x0000ffc), data, mthd, unkn);
440 if (chid < ARRAY_SIZE(disp->chan)) {
441 switch (mthd & 0xffc) {
443 nv50_disp_chan_mthd(disp->chan[chid], NV_DBG_ERROR);
450 nvkm_wr32(device, 0x61009c, (1 << chid));
451 nvkm_wr32(device, 0x6101f0 + (chid * 12), 0x90000000);
455 gf119_disp_intr(struct nvkm_subdev *subdev)
457 struct nv50_disp *disp = (void *)subdev;
458 struct nvkm_device *device = subdev->device;
459 u32 intr = nvkm_rd32(device, 0x610088);
462 if (intr & 0x00000001) {
463 u32 stat = nvkm_rd32(device, 0x61008c);
465 int chid = __ffs(stat); stat &= ~(1 << chid);
466 nv50_disp_chan_uevent_send(disp, chid);
467 nvkm_wr32(device, 0x61008c, 1 << chid);
472 if (intr & 0x00000002) {
473 u32 stat = nvkm_rd32(device, 0x61009c);
474 int chid = ffs(stat) - 1;
476 gf119_disp_intr_error(disp, chid);
480 if (intr & 0x00100000) {
481 u32 stat = nvkm_rd32(device, 0x6100ac);
482 if (stat & 0x00000007) {
483 disp->super = (stat & 0x00000007);
484 schedule_work(&disp->supervisor);
485 nvkm_wr32(device, 0x6100ac, disp->super);
490 nvkm_warn(subdev, "intr24 %08x\n", stat);
491 nvkm_wr32(device, 0x6100ac, stat);
497 for (i = 0; i < disp->head.nr; i++) {
498 u32 mask = 0x01000000 << i;
500 u32 stat = nvkm_rd32(device, 0x6100bc + (i * 0x800));
501 if (stat & 0x00000001)
502 nvkm_disp_vblank(&disp->base, i);
503 nvkm_mask(device, 0x6100bc + (i * 0x800), 0, 0);
504 nvkm_rd32(device, 0x6100c0 + (i * 0x800));
509 static const struct nvkm_disp_func
511 .root = &gf119_disp_root_oclass,
515 gf119_disp_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
516 struct nvkm_oclass *oclass, void *data, u32 size,
517 struct nvkm_object **pobject)
519 struct nvkm_device *device = (void *)parent;
520 struct nv50_disp *disp;
521 int heads = nvkm_rd32(device, 0x022448);
524 ret = nvkm_disp_create(parent, engine, oclass, heads,
525 "PDISP", "display", &disp);
526 *pobject = nv_object(disp);
530 disp->base.func = &gf119_disp;
532 ret = nvkm_event_init(&gf119_disp_chan_uevent, 1, 17, &disp->uevent);
536 nv_subdev(disp)->intr = gf119_disp_intr;
537 INIT_WORK(&disp->supervisor, gf119_disp_intr_supervisor);
538 disp->head.nr = heads;
541 disp->dac.power = nv50_dac_power;
542 disp->dac.sense = nv50_dac_sense;
543 disp->sor.power = nv50_sor_power;
544 disp->sor.hda_eld = gf119_hda_eld;
545 disp->sor.hdmi = gf119_hdmi_ctrl;
550 gf110_disp_oclass = &(struct nv50_disp_impl) {
551 .base.base.handle = NV_ENGINE(DISP, 0x90),
552 .base.base.ofuncs = &(struct nvkm_ofuncs) {
553 .ctor = gf119_disp_ctor,
554 .dtor = _nvkm_disp_dtor,
555 .init = _nvkm_disp_init,
556 .fini = _nvkm_disp_fini,
558 .base.outp.internal.crt = nv50_dac_output_new,
559 .base.outp.internal.tmds = nv50_sor_output_new,
560 .base.outp.internal.lvds = nv50_sor_output_new,
561 .base.outp.internal.dp = gf119_sor_dp_new,
562 .base.vblank = &gf119_disp_vblank_func,
563 .head.scanoutpos = gf119_disp_root_scanoutpos,