2 * Copyright 2012 Red Hat Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
25 #include <core/object.h>
26 #include <core/client.h>
27 #include <core/parent.h>
28 #include <core/handle.h>
29 #include <nvif/unpack.h>
30 #include <nvif/class.h>
32 #include <engine/disp.h>
34 #include <subdev/bios.h>
35 #include <subdev/bios/dcb.h>
36 #include <subdev/bios/disp.h>
37 #include <subdev/bios/init.h>
38 #include <subdev/bios/pll.h>
39 #include <subdev/devinit.h>
40 #include <subdev/fb.h>
41 #include <subdev/timer.h>
45 /*******************************************************************************
46 * EVO DMA channel base class
47 ******************************************************************************/
50 nvd0_disp_dmac_object_attach(struct nouveau_object *parent,
51 struct nouveau_object *object, u32 name)
53 struct nv50_disp_base *base = (void *)parent->parent;
54 struct nv50_disp_chan *chan = (void *)parent;
55 u32 addr = nv_gpuobj(object)->node->offset;
56 u32 data = (chan->chid << 27) | (addr << 9) | 0x00000001;
57 return nouveau_ramht_insert(base->ramht, chan->chid, name, data);
61 nvd0_disp_dmac_object_detach(struct nouveau_object *parent, int cookie)
63 struct nv50_disp_base *base = (void *)parent->parent;
64 nouveau_ramht_remove(base->ramht, cookie);
68 nvd0_disp_dmac_init(struct nouveau_object *object)
70 struct nv50_disp_priv *priv = (void *)object->engine;
71 struct nv50_disp_dmac *dmac = (void *)object;
72 int chid = dmac->base.chid;
75 ret = nv50_disp_chan_init(&dmac->base);
79 /* enable error reporting */
80 nv_mask(priv, 0x610090, 0x00000001 << chid, 0x00000001 << chid);
81 nv_mask(priv, 0x6100a0, 0x00000001 << chid, 0x00000001 << chid);
83 /* initialise channel for dma command submission */
84 nv_wr32(priv, 0x610494 + (chid * 0x0010), dmac->push);
85 nv_wr32(priv, 0x610498 + (chid * 0x0010), 0x00010000);
86 nv_wr32(priv, 0x61049c + (chid * 0x0010), 0x00000001);
87 nv_mask(priv, 0x610490 + (chid * 0x0010), 0x00000010, 0x00000010);
88 nv_wr32(priv, 0x640000 + (chid * 0x1000), 0x00000000);
89 nv_wr32(priv, 0x610490 + (chid * 0x0010), 0x00000013);
91 /* wait for it to go inactive */
92 if (!nv_wait(priv, 0x610490 + (chid * 0x10), 0x80000000, 0x00000000)) {
93 nv_error(dmac, "init: 0x%08x\n",
94 nv_rd32(priv, 0x610490 + (chid * 0x10)));
102 nvd0_disp_dmac_fini(struct nouveau_object *object, bool suspend)
104 struct nv50_disp_priv *priv = (void *)object->engine;
105 struct nv50_disp_dmac *dmac = (void *)object;
106 int chid = dmac->base.chid;
108 /* deactivate channel */
109 nv_mask(priv, 0x610490 + (chid * 0x0010), 0x00001010, 0x00001000);
110 nv_mask(priv, 0x610490 + (chid * 0x0010), 0x00000003, 0x00000000);
111 if (!nv_wait(priv, 0x610490 + (chid * 0x10), 0x001e0000, 0x00000000)) {
112 nv_error(dmac, "fini: 0x%08x\n",
113 nv_rd32(priv, 0x610490 + (chid * 0x10)));
118 /* disable error reporting */
119 nv_mask(priv, 0x610090, 0x00000001 << chid, 0x00000000);
120 nv_mask(priv, 0x6100a0, 0x00000001 << chid, 0x00000000);
122 return nv50_disp_chan_fini(&dmac->base, suspend);
125 /*******************************************************************************
126 * EVO master channel object
127 ******************************************************************************/
129 const struct nv50_disp_mthd_list
130 nvd0_disp_mast_mthd_base = {
134 { 0x0080, 0x660080 },
135 { 0x0084, 0x660084 },
136 { 0x0088, 0x660088 },
137 { 0x008c, 0x000000 },
142 const struct nv50_disp_mthd_list
143 nvd0_disp_mast_mthd_dac = {
147 { 0x0180, 0x660180 },
148 { 0x0184, 0x660184 },
149 { 0x0188, 0x660188 },
150 { 0x0190, 0x660190 },
155 const struct nv50_disp_mthd_list
156 nvd0_disp_mast_mthd_sor = {
160 { 0x0200, 0x660200 },
161 { 0x0204, 0x660204 },
162 { 0x0208, 0x660208 },
163 { 0x0210, 0x660210 },
168 const struct nv50_disp_mthd_list
169 nvd0_disp_mast_mthd_pior = {
173 { 0x0300, 0x660300 },
174 { 0x0304, 0x660304 },
175 { 0x0308, 0x660308 },
176 { 0x0310, 0x660310 },
181 static const struct nv50_disp_mthd_list
182 nvd0_disp_mast_mthd_head = {
186 { 0x0400, 0x660400 },
187 { 0x0404, 0x660404 },
188 { 0x0408, 0x660408 },
189 { 0x040c, 0x66040c },
190 { 0x0410, 0x660410 },
191 { 0x0414, 0x660414 },
192 { 0x0418, 0x660418 },
193 { 0x041c, 0x66041c },
194 { 0x0420, 0x660420 },
195 { 0x0424, 0x660424 },
196 { 0x0428, 0x660428 },
197 { 0x042c, 0x66042c },
198 { 0x0430, 0x660430 },
199 { 0x0434, 0x660434 },
200 { 0x0438, 0x660438 },
201 { 0x0440, 0x660440 },
202 { 0x0444, 0x660444 },
203 { 0x0448, 0x660448 },
204 { 0x044c, 0x66044c },
205 { 0x0450, 0x660450 },
206 { 0x0454, 0x660454 },
207 { 0x0458, 0x660458 },
208 { 0x045c, 0x66045c },
209 { 0x0460, 0x660460 },
210 { 0x0468, 0x660468 },
211 { 0x046c, 0x66046c },
212 { 0x0470, 0x660470 },
213 { 0x0474, 0x660474 },
214 { 0x0480, 0x660480 },
215 { 0x0484, 0x660484 },
216 { 0x048c, 0x66048c },
217 { 0x0490, 0x660490 },
218 { 0x0494, 0x660494 },
219 { 0x0498, 0x660498 },
220 { 0x04b0, 0x6604b0 },
221 { 0x04b8, 0x6604b8 },
222 { 0x04bc, 0x6604bc },
223 { 0x04c0, 0x6604c0 },
224 { 0x04c4, 0x6604c4 },
225 { 0x04c8, 0x6604c8 },
226 { 0x04d0, 0x6604d0 },
227 { 0x04d4, 0x6604d4 },
228 { 0x04e0, 0x6604e0 },
229 { 0x04e4, 0x6604e4 },
230 { 0x04e8, 0x6604e8 },
231 { 0x04ec, 0x6604ec },
232 { 0x04f0, 0x6604f0 },
233 { 0x04f4, 0x6604f4 },
234 { 0x04f8, 0x6604f8 },
235 { 0x04fc, 0x6604fc },
236 { 0x0500, 0x660500 },
237 { 0x0504, 0x660504 },
238 { 0x0508, 0x660508 },
239 { 0x050c, 0x66050c },
240 { 0x0510, 0x660510 },
241 { 0x0514, 0x660514 },
242 { 0x0518, 0x660518 },
243 { 0x051c, 0x66051c },
244 { 0x052c, 0x66052c },
245 { 0x0530, 0x660530 },
246 { 0x054c, 0x66054c },
247 { 0x0550, 0x660550 },
248 { 0x0554, 0x660554 },
249 { 0x0558, 0x660558 },
250 { 0x055c, 0x66055c },
255 static const struct nv50_disp_mthd_chan
256 nvd0_disp_mast_mthd_chan = {
260 { "Global", 1, &nvd0_disp_mast_mthd_base },
261 { "DAC", 3, &nvd0_disp_mast_mthd_dac },
262 { "SOR", 8, &nvd0_disp_mast_mthd_sor },
263 { "PIOR", 4, &nvd0_disp_mast_mthd_pior },
264 { "HEAD", 4, &nvd0_disp_mast_mthd_head },
270 nvd0_disp_mast_init(struct nouveau_object *object)
272 struct nv50_disp_priv *priv = (void *)object->engine;
273 struct nv50_disp_dmac *mast = (void *)object;
276 ret = nv50_disp_chan_init(&mast->base);
280 /* enable error reporting */
281 nv_mask(priv, 0x610090, 0x00000001, 0x00000001);
282 nv_mask(priv, 0x6100a0, 0x00000001, 0x00000001);
284 /* initialise channel for dma command submission */
285 nv_wr32(priv, 0x610494, mast->push);
286 nv_wr32(priv, 0x610498, 0x00010000);
287 nv_wr32(priv, 0x61049c, 0x00000001);
288 nv_mask(priv, 0x610490, 0x00000010, 0x00000010);
289 nv_wr32(priv, 0x640000, 0x00000000);
290 nv_wr32(priv, 0x610490, 0x01000013);
292 /* wait for it to go inactive */
293 if (!nv_wait(priv, 0x610490, 0x80000000, 0x00000000)) {
294 nv_error(mast, "init: 0x%08x\n", nv_rd32(priv, 0x610490));
302 nvd0_disp_mast_fini(struct nouveau_object *object, bool suspend)
304 struct nv50_disp_priv *priv = (void *)object->engine;
305 struct nv50_disp_dmac *mast = (void *)object;
307 /* deactivate channel */
308 nv_mask(priv, 0x610490, 0x00000010, 0x00000000);
309 nv_mask(priv, 0x610490, 0x00000003, 0x00000000);
310 if (!nv_wait(priv, 0x610490, 0x001e0000, 0x00000000)) {
311 nv_error(mast, "fini: 0x%08x\n", nv_rd32(priv, 0x610490));
316 /* disable error reporting */
317 nv_mask(priv, 0x610090, 0x00000001, 0x00000000);
318 nv_mask(priv, 0x6100a0, 0x00000001, 0x00000000);
320 return nv50_disp_chan_fini(&mast->base, suspend);
323 struct nv50_disp_chan_impl
324 nvd0_disp_mast_ofuncs = {
325 .base.ctor = nv50_disp_mast_ctor,
326 .base.dtor = nv50_disp_dmac_dtor,
327 .base.init = nvd0_disp_mast_init,
328 .base.fini = nvd0_disp_mast_fini,
329 .base.map = nv50_disp_chan_map,
330 .base.rd32 = nv50_disp_chan_rd32,
331 .base.wr32 = nv50_disp_chan_wr32,
333 .attach = nvd0_disp_dmac_object_attach,
334 .detach = nvd0_disp_dmac_object_detach,
337 /*******************************************************************************
338 * EVO sync channel objects
339 ******************************************************************************/
341 static const struct nv50_disp_mthd_list
342 nvd0_disp_sync_mthd_base = {
346 { 0x0080, 0x661080 },
347 { 0x0084, 0x661084 },
348 { 0x0088, 0x661088 },
349 { 0x008c, 0x66108c },
350 { 0x0090, 0x661090 },
351 { 0x0094, 0x661094 },
352 { 0x00a0, 0x6610a0 },
353 { 0x00a4, 0x6610a4 },
354 { 0x00c0, 0x6610c0 },
355 { 0x00c4, 0x6610c4 },
356 { 0x00c8, 0x6610c8 },
357 { 0x00cc, 0x6610cc },
358 { 0x00e0, 0x6610e0 },
359 { 0x00e4, 0x6610e4 },
360 { 0x00e8, 0x6610e8 },
361 { 0x00ec, 0x6610ec },
362 { 0x00fc, 0x6610fc },
363 { 0x0100, 0x661100 },
364 { 0x0104, 0x661104 },
365 { 0x0108, 0x661108 },
366 { 0x010c, 0x66110c },
367 { 0x0110, 0x661110 },
368 { 0x0114, 0x661114 },
369 { 0x0118, 0x661118 },
370 { 0x011c, 0x66111c },
371 { 0x0130, 0x661130 },
372 { 0x0134, 0x661134 },
373 { 0x0138, 0x661138 },
374 { 0x013c, 0x66113c },
375 { 0x0140, 0x661140 },
376 { 0x0144, 0x661144 },
377 { 0x0148, 0x661148 },
378 { 0x014c, 0x66114c },
379 { 0x0150, 0x661150 },
380 { 0x0154, 0x661154 },
381 { 0x0158, 0x661158 },
382 { 0x015c, 0x66115c },
383 { 0x0160, 0x661160 },
384 { 0x0164, 0x661164 },
385 { 0x0168, 0x661168 },
386 { 0x016c, 0x66116c },
391 static const struct nv50_disp_mthd_list
392 nvd0_disp_sync_mthd_image = {
396 { 0x0400, 0x661400 },
397 { 0x0404, 0x661404 },
398 { 0x0408, 0x661408 },
399 { 0x040c, 0x66140c },
400 { 0x0410, 0x661410 },
405 const struct nv50_disp_mthd_chan
406 nvd0_disp_sync_mthd_chan = {
410 { "Global", 1, &nvd0_disp_sync_mthd_base },
411 { "Image", 2, &nvd0_disp_sync_mthd_image },
416 struct nv50_disp_chan_impl
417 nvd0_disp_sync_ofuncs = {
418 .base.ctor = nv50_disp_sync_ctor,
419 .base.dtor = nv50_disp_dmac_dtor,
420 .base.init = nvd0_disp_dmac_init,
421 .base.fini = nvd0_disp_dmac_fini,
422 .base.map = nv50_disp_chan_map,
423 .base.rd32 = nv50_disp_chan_rd32,
424 .base.wr32 = nv50_disp_chan_wr32,
426 .attach = nvd0_disp_dmac_object_attach,
427 .detach = nvd0_disp_dmac_object_detach,
430 /*******************************************************************************
431 * EVO overlay channel objects
432 ******************************************************************************/
434 static const struct nv50_disp_mthd_list
435 nvd0_disp_ovly_mthd_base = {
438 { 0x0080, 0x665080 },
439 { 0x0084, 0x665084 },
440 { 0x0088, 0x665088 },
441 { 0x008c, 0x66508c },
442 { 0x0090, 0x665090 },
443 { 0x0094, 0x665094 },
444 { 0x00a0, 0x6650a0 },
445 { 0x00a4, 0x6650a4 },
446 { 0x00b0, 0x6650b0 },
447 { 0x00b4, 0x6650b4 },
448 { 0x00b8, 0x6650b8 },
449 { 0x00c0, 0x6650c0 },
450 { 0x00e0, 0x6650e0 },
451 { 0x00e4, 0x6650e4 },
452 { 0x00e8, 0x6650e8 },
453 { 0x0100, 0x665100 },
454 { 0x0104, 0x665104 },
455 { 0x0108, 0x665108 },
456 { 0x010c, 0x66510c },
457 { 0x0110, 0x665110 },
458 { 0x0118, 0x665118 },
459 { 0x011c, 0x66511c },
460 { 0x0120, 0x665120 },
461 { 0x0124, 0x665124 },
462 { 0x0130, 0x665130 },
463 { 0x0134, 0x665134 },
464 { 0x0138, 0x665138 },
465 { 0x013c, 0x66513c },
466 { 0x0140, 0x665140 },
467 { 0x0144, 0x665144 },
468 { 0x0148, 0x665148 },
469 { 0x014c, 0x66514c },
470 { 0x0150, 0x665150 },
471 { 0x0154, 0x665154 },
472 { 0x0158, 0x665158 },
473 { 0x015c, 0x66515c },
474 { 0x0160, 0x665160 },
475 { 0x0164, 0x665164 },
476 { 0x0168, 0x665168 },
477 { 0x016c, 0x66516c },
478 { 0x0400, 0x665400 },
479 { 0x0408, 0x665408 },
480 { 0x040c, 0x66540c },
481 { 0x0410, 0x665410 },
486 static const struct nv50_disp_mthd_chan
487 nvd0_disp_ovly_mthd_chan = {
491 { "Global", 1, &nvd0_disp_ovly_mthd_base },
496 struct nv50_disp_chan_impl
497 nvd0_disp_ovly_ofuncs = {
498 .base.ctor = nv50_disp_ovly_ctor,
499 .base.dtor = nv50_disp_dmac_dtor,
500 .base.init = nvd0_disp_dmac_init,
501 .base.fini = nvd0_disp_dmac_fini,
502 .base.map = nv50_disp_chan_map,
503 .base.rd32 = nv50_disp_chan_rd32,
504 .base.wr32 = nv50_disp_chan_wr32,
506 .attach = nvd0_disp_dmac_object_attach,
507 .detach = nvd0_disp_dmac_object_detach,
510 /*******************************************************************************
511 * EVO PIO channel base class
512 ******************************************************************************/
515 nvd0_disp_pioc_init(struct nouveau_object *object)
517 struct nv50_disp_priv *priv = (void *)object->engine;
518 struct nv50_disp_pioc *pioc = (void *)object;
519 int chid = pioc->base.chid;
522 ret = nv50_disp_chan_init(&pioc->base);
526 /* enable error reporting */
527 nv_mask(priv, 0x610090, 0x00000001 << chid, 0x00000001 << chid);
528 nv_mask(priv, 0x6100a0, 0x00000001 << chid, 0x00000001 << chid);
530 /* activate channel */
531 nv_wr32(priv, 0x610490 + (chid * 0x10), 0x00000001);
532 if (!nv_wait(priv, 0x610490 + (chid * 0x10), 0x00030000, 0x00010000)) {
533 nv_error(pioc, "init: 0x%08x\n",
534 nv_rd32(priv, 0x610490 + (chid * 0x10)));
542 nvd0_disp_pioc_fini(struct nouveau_object *object, bool suspend)
544 struct nv50_disp_priv *priv = (void *)object->engine;
545 struct nv50_disp_pioc *pioc = (void *)object;
546 int chid = pioc->base.chid;
548 nv_mask(priv, 0x610490 + (chid * 0x10), 0x00000001, 0x00000000);
549 if (!nv_wait(priv, 0x610490 + (chid * 0x10), 0x00030000, 0x00000000)) {
550 nv_error(pioc, "timeout: 0x%08x\n",
551 nv_rd32(priv, 0x610490 + (chid * 0x10)));
556 /* disable error reporting */
557 nv_mask(priv, 0x610090, 0x00000001 << chid, 0x00000000);
558 nv_mask(priv, 0x6100a0, 0x00000001 << chid, 0x00000000);
560 return nv50_disp_chan_fini(&pioc->base, suspend);
563 /*******************************************************************************
564 * EVO immediate overlay channel objects
565 ******************************************************************************/
567 struct nv50_disp_chan_impl
568 nvd0_disp_oimm_ofuncs = {
569 .base.ctor = nv50_disp_oimm_ctor,
570 .base.dtor = nv50_disp_pioc_dtor,
571 .base.init = nvd0_disp_pioc_init,
572 .base.fini = nvd0_disp_pioc_fini,
573 .base.map = nv50_disp_chan_map,
574 .base.rd32 = nv50_disp_chan_rd32,
575 .base.wr32 = nv50_disp_chan_wr32,
579 /*******************************************************************************
580 * EVO cursor channel objects
581 ******************************************************************************/
583 struct nv50_disp_chan_impl
584 nvd0_disp_curs_ofuncs = {
585 .base.ctor = nv50_disp_curs_ctor,
586 .base.dtor = nv50_disp_pioc_dtor,
587 .base.init = nvd0_disp_pioc_init,
588 .base.fini = nvd0_disp_pioc_fini,
589 .base.map = nv50_disp_chan_map,
590 .base.rd32 = nv50_disp_chan_rd32,
591 .base.wr32 = nv50_disp_chan_wr32,
595 /*******************************************************************************
596 * Base display object
597 ******************************************************************************/
600 nvd0_disp_base_scanoutpos(NV50_DISP_MTHD_V0)
602 const u32 total = nv_rd32(priv, 0x640414 + (head * 0x300));
603 const u32 blanke = nv_rd32(priv, 0x64041c + (head * 0x300));
604 const u32 blanks = nv_rd32(priv, 0x640420 + (head * 0x300));
606 struct nv04_disp_scanoutpos_v0 v0;
610 nv_ioctl(object, "disp scanoutpos size %d\n", size);
611 if (nvif_unpack(args->v0, 0, 0, false)) {
612 nv_ioctl(object, "disp scanoutpos vers %d\n", args->v0.version);
613 args->v0.vblanke = (blanke & 0xffff0000) >> 16;
614 args->v0.hblanke = (blanke & 0x0000ffff);
615 args->v0.vblanks = (blanks & 0xffff0000) >> 16;
616 args->v0.hblanks = (blanks & 0x0000ffff);
617 args->v0.vtotal = ( total & 0xffff0000) >> 16;
618 args->v0.htotal = ( total & 0x0000ffff);
619 args->v0.time[0] = ktime_to_ns(ktime_get());
620 args->v0.vline = /* vline read locks hline */
621 nv_rd32(priv, 0x616340 + (head * 0x800)) & 0xffff;
622 args->v0.time[1] = ktime_to_ns(ktime_get());
624 nv_rd32(priv, 0x616344 + (head * 0x800)) & 0xffff;
632 nvd0_disp_base_init(struct nouveau_object *object)
634 struct nv50_disp_priv *priv = (void *)object->engine;
635 struct nv50_disp_base *base = (void *)object;
639 ret = nouveau_parent_init(&base->base);
643 /* The below segments of code copying values from one register to
644 * another appear to inform EVO of the display capabilities or
649 for (i = 0; i < priv->head.nr; i++) {
650 tmp = nv_rd32(priv, 0x616104 + (i * 0x800));
651 nv_wr32(priv, 0x6101b4 + (i * 0x800), tmp);
652 tmp = nv_rd32(priv, 0x616108 + (i * 0x800));
653 nv_wr32(priv, 0x6101b8 + (i * 0x800), tmp);
654 tmp = nv_rd32(priv, 0x61610c + (i * 0x800));
655 nv_wr32(priv, 0x6101bc + (i * 0x800), tmp);
659 for (i = 0; i < priv->dac.nr; i++) {
660 tmp = nv_rd32(priv, 0x61a000 + (i * 0x800));
661 nv_wr32(priv, 0x6101c0 + (i * 0x800), tmp);
665 for (i = 0; i < priv->sor.nr; i++) {
666 tmp = nv_rd32(priv, 0x61c000 + (i * 0x800));
667 nv_wr32(priv, 0x6301c4 + (i * 0x800), tmp);
670 /* steal display away from vbios, or something like that */
671 if (nv_rd32(priv, 0x6100ac) & 0x00000100) {
672 nv_wr32(priv, 0x6100ac, 0x00000100);
673 nv_mask(priv, 0x6194e8, 0x00000001, 0x00000000);
674 if (!nv_wait(priv, 0x6194e8, 0x00000002, 0x00000000)) {
675 nv_error(priv, "timeout acquiring display\n");
680 /* point at display engine memory area (hash table, objects) */
681 nv_wr32(priv, 0x610010, (nv_gpuobj(object->parent)->addr >> 8) | 9);
683 /* enable supervisor interrupts, disable everything else */
684 nv_wr32(priv, 0x610090, 0x00000000);
685 nv_wr32(priv, 0x6100a0, 0x00000000);
686 nv_wr32(priv, 0x6100b0, 0x00000307);
688 /* disable underflow reporting, preventing an intermittent issue
689 * on some nve4 boards where the production vbios left this
690 * setting enabled by default.
692 * ftp://download.nvidia.com/open-gpu-doc/gk104-disable-underflow-reporting/1/gk104-disable-underflow-reporting.txt
694 for (i = 0; i < priv->head.nr; i++)
695 nv_mask(priv, 0x616308 + (i * 0x800), 0x00000111, 0x00000010);
701 nvd0_disp_base_fini(struct nouveau_object *object, bool suspend)
703 struct nv50_disp_priv *priv = (void *)object->engine;
704 struct nv50_disp_base *base = (void *)object;
706 /* disable all interrupts */
707 nv_wr32(priv, 0x6100b0, 0x00000000);
709 return nouveau_parent_fini(&base->base, suspend);
712 struct nouveau_ofuncs
713 nvd0_disp_base_ofuncs = {
714 .ctor = nv50_disp_base_ctor,
715 .dtor = nv50_disp_base_dtor,
716 .init = nvd0_disp_base_init,
717 .fini = nvd0_disp_base_fini,
718 .mthd = nv50_disp_base_mthd,
719 .ntfy = nouveau_disp_ntfy,
722 static struct nouveau_oclass
723 nvd0_disp_base_oclass[] = {
724 { GF110_DISP, &nvd0_disp_base_ofuncs },
728 static struct nouveau_oclass
729 nvd0_disp_sclass[] = {
730 { GF110_DISP_CORE_CHANNEL_DMA, &nvd0_disp_mast_ofuncs.base },
731 { GF110_DISP_BASE_CHANNEL_DMA, &nvd0_disp_sync_ofuncs.base },
732 { GF110_DISP_OVERLAY_CONTROL_DMA, &nvd0_disp_ovly_ofuncs.base },
733 { GF110_DISP_OVERLAY, &nvd0_disp_oimm_ofuncs.base },
734 { GF110_DISP_CURSOR, &nvd0_disp_curs_ofuncs.base },
738 /*******************************************************************************
739 * Display engine implementation
740 ******************************************************************************/
743 nvd0_disp_vblank_init(struct nvkm_event *event, int type, int head)
745 struct nouveau_disp *disp = container_of(event, typeof(*disp), vblank);
746 nv_mask(disp, 0x6100c0 + (head * 0x800), 0x00000001, 0x00000001);
750 nvd0_disp_vblank_fini(struct nvkm_event *event, int type, int head)
752 struct nouveau_disp *disp = container_of(event, typeof(*disp), vblank);
753 nv_mask(disp, 0x6100c0 + (head * 0x800), 0x00000001, 0x00000000);
756 const struct nvkm_event_func
757 nvd0_disp_vblank_func = {
758 .ctor = nouveau_disp_vblank_ctor,
759 .init = nvd0_disp_vblank_init,
760 .fini = nvd0_disp_vblank_fini,
763 static struct nvkm_output *
764 exec_lookup(struct nv50_disp_priv *priv, int head, int or, u32 ctrl,
765 u32 *data, u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
766 struct nvbios_outp *info)
768 struct nouveau_bios *bios = nouveau_bios(priv);
769 struct nvkm_output *outp;
773 type = DCB_OUTPUT_ANALOG;
777 switch (ctrl & 0x00000f00) {
778 case 0x00000000: type = DCB_OUTPUT_LVDS; mask = 1; break;
779 case 0x00000100: type = DCB_OUTPUT_TMDS; mask = 1; break;
780 case 0x00000200: type = DCB_OUTPUT_TMDS; mask = 2; break;
781 case 0x00000500: type = DCB_OUTPUT_TMDS; mask = 3; break;
782 case 0x00000800: type = DCB_OUTPUT_DP; mask = 1; break;
783 case 0x00000900: type = DCB_OUTPUT_DP; mask = 2; break;
785 nv_error(priv, "unknown SOR mc 0x%08x\n", ctrl);
790 mask = 0x00c0 & (mask << 6);
791 mask |= 0x0001 << or;
792 mask |= 0x0100 << head;
794 list_for_each_entry(outp, &priv->base.outp, head) {
795 if ((outp->info.hasht & 0xff) == type &&
796 (outp->info.hashm & mask) == mask) {
797 *data = nvbios_outp_match(bios, outp->info.hasht,
799 ver, hdr, cnt, len, info);
809 static struct nvkm_output *
810 exec_script(struct nv50_disp_priv *priv, int head, int id)
812 struct nouveau_bios *bios = nouveau_bios(priv);
813 struct nvkm_output *outp;
814 struct nvbios_outp info;
815 u8 ver, hdr, cnt, len;
819 for (or = 0; !(ctrl & (1 << head)) && or < 8; or++) {
820 ctrl = nv_rd32(priv, 0x640180 + (or * 0x20));
821 if (ctrl & (1 << head))
828 outp = exec_lookup(priv, head, or, ctrl, &data, &ver, &hdr, &cnt, &len, &info);
830 struct nvbios_init init = {
831 .subdev = nv_subdev(priv),
833 .offset = info.script[id],
845 static struct nvkm_output *
846 exec_clkcmp(struct nv50_disp_priv *priv, int head, int id, u32 pclk, u32 *conf)
848 struct nouveau_bios *bios = nouveau_bios(priv);
849 struct nvkm_output *outp;
850 struct nvbios_outp info1;
851 struct nvbios_ocfg info2;
852 u8 ver, hdr, cnt, len;
856 for (or = 0; !(ctrl & (1 << head)) && or < 8; or++) {
857 ctrl = nv_rd32(priv, 0x660180 + (or * 0x20));
858 if (ctrl & (1 << head))
865 outp = exec_lookup(priv, head, or, ctrl, &data, &ver, &hdr, &cnt, &len, &info1);
869 switch (outp->info.type) {
870 case DCB_OUTPUT_TMDS:
871 *conf = (ctrl & 0x00000f00) >> 8;
875 case DCB_OUTPUT_LVDS:
876 *conf = priv->sor.lvdsconf;
879 *conf = (ctrl & 0x00000f00) >> 8;
881 case DCB_OUTPUT_ANALOG:
887 data = nvbios_ocfg_match(bios, data, *conf, &ver, &hdr, &cnt, &len, &info2);
888 if (data && id < 0xff) {
889 data = nvbios_oclk_match(bios, info2.clkcmp[id], pclk);
891 struct nvbios_init init = {
892 .subdev = nv_subdev(priv),
908 nvd0_disp_intr_unk1_0(struct nv50_disp_priv *priv, int head)
910 exec_script(priv, head, 1);
914 nvd0_disp_intr_unk2_0(struct nv50_disp_priv *priv, int head)
916 struct nvkm_output *outp = exec_script(priv, head, 2);
918 /* see note in nv50_disp_intr_unk20_0() */
919 if (outp && outp->info.type == DCB_OUTPUT_DP) {
920 struct nvkm_output_dp *outpdp = (void *)outp;
921 struct nvbios_init init = {
922 .subdev = nv_subdev(priv),
923 .bios = nouveau_bios(priv),
926 .offset = outpdp->info.script[4],
931 atomic_set(&outpdp->lt.done, 0);
936 nvd0_disp_intr_unk2_1(struct nv50_disp_priv *priv, int head)
938 struct nouveau_devinit *devinit = nouveau_devinit(priv);
939 u32 pclk = nv_rd32(priv, 0x660450 + (head * 0x300)) / 1000;
941 devinit->pll_set(devinit, PLL_VPLL0 + head, pclk);
942 nv_wr32(priv, 0x612200 + (head * 0x800), 0x00000000);
946 nvd0_disp_intr_unk2_2_tu(struct nv50_disp_priv *priv, int head,
947 struct dcb_output *outp)
949 const int or = ffs(outp->or) - 1;
950 const u32 ctrl = nv_rd32(priv, 0x660200 + (or * 0x020));
951 const u32 conf = nv_rd32(priv, 0x660404 + (head * 0x300));
952 const u32 pclk = nv_rd32(priv, 0x660450 + (head * 0x300)) / 1000;
953 const u32 link = ((ctrl & 0xf00) == 0x800) ? 0 : 1;
954 const u32 hoff = (head * 0x800);
955 const u32 soff = ( or * 0x800);
956 const u32 loff = (link * 0x080) + soff;
957 const u32 symbol = 100000;
959 u32 dpctrl = nv_rd32(priv, 0x61c10c + loff) & 0x000f0000;
960 u32 clksor = nv_rd32(priv, 0x612300 + soff);
961 u32 datarate, link_nr, link_bw, bits;
964 if ((conf & 0x3c0) == 0x180) bits = 30;
965 else if ((conf & 0x3c0) == 0x140) bits = 24;
967 datarate = (pclk * bits) / 8;
969 if (dpctrl > 0x00030000) link_nr = 4;
970 else if (dpctrl > 0x00010000) link_nr = 2;
973 link_bw = (clksor & 0x007c0000) >> 18;
978 do_div(ratio, link_nr * link_bw);
980 value = (symbol - ratio) * TU;
982 do_div(value, symbol);
983 do_div(value, symbol);
988 nv_wr32(priv, 0x616610 + hoff, value);
992 nvd0_disp_intr_unk2_2(struct nv50_disp_priv *priv, int head)
994 struct nvkm_output *outp;
995 u32 pclk = nv_rd32(priv, 0x660450 + (head * 0x300)) / 1000;
996 u32 conf, addr, data;
998 outp = exec_clkcmp(priv, head, 0xff, pclk, &conf);
1002 /* see note in nv50_disp_intr_unk20_2() */
1003 if (outp->info.type == DCB_OUTPUT_DP) {
1004 u32 sync = nv_rd32(priv, 0x660404 + (head * 0x300));
1005 switch ((sync & 0x000003c0) >> 6) {
1006 case 6: pclk = pclk * 30; break;
1007 case 5: pclk = pclk * 24; break;
1014 if (nvkm_output_dp_train(outp, pclk, true))
1015 ERR("link not trained before attach\n");
1018 exec_clkcmp(priv, head, 0, pclk, &conf);
1020 if (outp->info.type == DCB_OUTPUT_ANALOG) {
1021 addr = 0x612280 + (ffs(outp->info.or) - 1) * 0x800;
1024 if (outp->info.type == DCB_OUTPUT_DP)
1025 nvd0_disp_intr_unk2_2_tu(priv, head, &outp->info);
1026 addr = 0x612300 + (ffs(outp->info.or) - 1) * 0x800;
1027 data = (conf & 0x0100) ? 0x00000101 : 0x00000000;
1030 nv_mask(priv, addr, 0x00000707, data);
1034 nvd0_disp_intr_unk4_0(struct nv50_disp_priv *priv, int head)
1036 u32 pclk = nv_rd32(priv, 0x660450 + (head * 0x300)) / 1000;
1039 exec_clkcmp(priv, head, 1, pclk, &conf);
1043 nvd0_disp_intr_supervisor(struct work_struct *work)
1045 struct nv50_disp_priv *priv =
1046 container_of(work, struct nv50_disp_priv, supervisor);
1047 struct nv50_disp_impl *impl = (void *)nv_object(priv)->oclass;
1051 nv_debug(priv, "supervisor %d\n", ffs(priv->super));
1052 for (head = 0; head < priv->head.nr; head++) {
1053 mask[head] = nv_rd32(priv, 0x6101d4 + (head * 0x800));
1054 nv_debug(priv, "head %d: 0x%08x\n", head, mask[head]);
1057 if (priv->super & 0x00000001) {
1058 nv50_disp_mthd_chan(priv, NV_DBG_DEBUG, 0, impl->mthd.core);
1059 for (head = 0; head < priv->head.nr; head++) {
1060 if (!(mask[head] & 0x00001000))
1062 nv_debug(priv, "supervisor 1.0 - head %d\n", head);
1063 nvd0_disp_intr_unk1_0(priv, head);
1066 if (priv->super & 0x00000002) {
1067 for (head = 0; head < priv->head.nr; head++) {
1068 if (!(mask[head] & 0x00001000))
1070 nv_debug(priv, "supervisor 2.0 - head %d\n", head);
1071 nvd0_disp_intr_unk2_0(priv, head);
1073 for (head = 0; head < priv->head.nr; head++) {
1074 if (!(mask[head] & 0x00010000))
1076 nv_debug(priv, "supervisor 2.1 - head %d\n", head);
1077 nvd0_disp_intr_unk2_1(priv, head);
1079 for (head = 0; head < priv->head.nr; head++) {
1080 if (!(mask[head] & 0x00001000))
1082 nv_debug(priv, "supervisor 2.2 - head %d\n", head);
1083 nvd0_disp_intr_unk2_2(priv, head);
1086 if (priv->super & 0x00000004) {
1087 for (head = 0; head < priv->head.nr; head++) {
1088 if (!(mask[head] & 0x00001000))
1090 nv_debug(priv, "supervisor 3.0 - head %d\n", head);
1091 nvd0_disp_intr_unk4_0(priv, head);
1095 for (head = 0; head < priv->head.nr; head++)
1096 nv_wr32(priv, 0x6101d4 + (head * 0x800), 0x00000000);
1097 nv_wr32(priv, 0x6101d0, 0x80000000);
1101 nvd0_disp_intr_error(struct nv50_disp_priv *priv, int chid)
1103 const struct nv50_disp_impl *impl = (void *)nv_object(priv)->oclass;
1104 u32 mthd = nv_rd32(priv, 0x6101f0 + (chid * 12));
1105 u32 data = nv_rd32(priv, 0x6101f4 + (chid * 12));
1106 u32 unkn = nv_rd32(priv, 0x6101f8 + (chid * 12));
1108 nv_error(priv, "chid %d mthd 0x%04x data 0x%08x "
1110 chid, (mthd & 0x0000ffc), data, mthd, unkn);
1113 switch (mthd & 0xffc) {
1115 nv50_disp_mthd_chan(priv, NV_DBG_ERROR, chid - 0,
1123 switch (mthd & 0xffc) {
1125 nv50_disp_mthd_chan(priv, NV_DBG_ERROR, chid - 1,
1133 switch (mthd & 0xffc) {
1135 nv50_disp_mthd_chan(priv, NV_DBG_ERROR, chid - 5,
1143 nv_wr32(priv, 0x61009c, (1 << chid));
1144 nv_wr32(priv, 0x6101f0 + (chid * 12), 0x90000000);
1148 nvd0_disp_intr(struct nouveau_subdev *subdev)
1150 struct nv50_disp_priv *priv = (void *)subdev;
1151 u32 intr = nv_rd32(priv, 0x610088);
1154 if (intr & 0x00000001) {
1155 u32 stat = nv_rd32(priv, 0x61008c);
1156 nv_wr32(priv, 0x61008c, stat);
1157 intr &= ~0x00000001;
1160 if (intr & 0x00000002) {
1161 u32 stat = nv_rd32(priv, 0x61009c);
1162 int chid = ffs(stat) - 1;
1164 nvd0_disp_intr_error(priv, chid);
1165 intr &= ~0x00000002;
1168 if (intr & 0x00100000) {
1169 u32 stat = nv_rd32(priv, 0x6100ac);
1170 if (stat & 0x00000007) {
1171 priv->super = (stat & 0x00000007);
1172 schedule_work(&priv->supervisor);
1173 nv_wr32(priv, 0x6100ac, priv->super);
1174 stat &= ~0x00000007;
1178 nv_info(priv, "unknown intr24 0x%08x\n", stat);
1179 nv_wr32(priv, 0x6100ac, stat);
1182 intr &= ~0x00100000;
1185 for (i = 0; i < priv->head.nr; i++) {
1186 u32 mask = 0x01000000 << i;
1188 u32 stat = nv_rd32(priv, 0x6100bc + (i * 0x800));
1189 if (stat & 0x00000001)
1190 nouveau_disp_vblank(&priv->base, i);
1191 nv_mask(priv, 0x6100bc + (i * 0x800), 0, 0);
1192 nv_rd32(priv, 0x6100c0 + (i * 0x800));
1198 nvd0_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
1199 struct nouveau_oclass *oclass, void *data, u32 size,
1200 struct nouveau_object **pobject)
1202 struct nv50_disp_priv *priv;
1203 int heads = nv_rd32(parent, 0x022448);
1206 ret = nouveau_disp_create(parent, engine, oclass, heads,
1207 "PDISP", "display", &priv);
1208 *pobject = nv_object(priv);
1212 nv_engine(priv)->sclass = nvd0_disp_base_oclass;
1213 nv_engine(priv)->cclass = &nv50_disp_cclass;
1214 nv_subdev(priv)->intr = nvd0_disp_intr;
1215 INIT_WORK(&priv->supervisor, nvd0_disp_intr_supervisor);
1216 priv->sclass = nvd0_disp_sclass;
1217 priv->head.nr = heads;
1220 priv->dac.power = nv50_dac_power;
1221 priv->dac.sense = nv50_dac_sense;
1222 priv->sor.power = nv50_sor_power;
1223 priv->sor.hda_eld = nvd0_hda_eld;
1224 priv->sor.hdmi = nvd0_hdmi_ctrl;
1228 struct nouveau_oclass *
1229 nvd0_disp_outp_sclass[] = {
1230 &nvd0_sor_dp_impl.base.base,
1234 struct nouveau_oclass *
1235 nvd0_disp_oclass = &(struct nv50_disp_impl) {
1236 .base.base.handle = NV_ENGINE(DISP, 0x90),
1237 .base.base.ofuncs = &(struct nouveau_ofuncs) {
1238 .ctor = nvd0_disp_ctor,
1239 .dtor = _nouveau_disp_dtor,
1240 .init = _nouveau_disp_init,
1241 .fini = _nouveau_disp_fini,
1243 .base.vblank = &nvd0_disp_vblank_func,
1244 .base.outp = nvd0_disp_outp_sclass,
1245 .mthd.core = &nvd0_disp_mast_mthd_chan,
1246 .mthd.base = &nvd0_disp_sync_mthd_chan,
1247 .mthd.ovly = &nvd0_disp_ovly_mthd_chan,
1248 .mthd.prev = -0x020000,
1249 .head.scanoutpos = nvd0_disp_base_scanoutpos,