drm/i915/debugfs: Display the contents of the BLT and BSD status pages
[firefly-linux-kernel-4.4.55.git] / drivers / gpu / drm / i915 / i915_debugfs.c
1 /*
2  * Copyright © 2008 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eric Anholt <eric@anholt.net>
25  *    Keith Packard <keithp@keithp.com>
26  *
27  */
28
29 #include <linux/seq_file.h>
30 #include <linux/debugfs.h>
31 #include <linux/slab.h>
32 #include "drmP.h"
33 #include "drm.h"
34 #include "intel_drv.h"
35 #include "i915_drm.h"
36 #include "i915_drv.h"
37
38 #define DRM_I915_RING_DEBUG 1
39
40
41 #if defined(CONFIG_DEBUG_FS)
42
43 enum {
44         ACTIVE_LIST,
45         FLUSHING_LIST,
46         INACTIVE_LIST,
47         PINNED_LIST,
48         DEFERRED_FREE_LIST,
49 };
50
51 enum {
52         RENDER_RING,
53         BSD_RING,
54         BLT_RING,
55 };
56
57 static const char *yesno(int v)
58 {
59         return v ? "yes" : "no";
60 }
61
62 static int i915_capabilities(struct seq_file *m, void *data)
63 {
64         struct drm_info_node *node = (struct drm_info_node *) m->private;
65         struct drm_device *dev = node->minor->dev;
66         const struct intel_device_info *info = INTEL_INFO(dev);
67
68         seq_printf(m, "gen: %d\n", info->gen);
69 #define B(x) seq_printf(m, #x ": %s\n", yesno(info->x))
70         B(is_mobile);
71         B(is_i85x);
72         B(is_i915g);
73         B(is_i945gm);
74         B(is_g33);
75         B(need_gfx_hws);
76         B(is_g4x);
77         B(is_pineview);
78         B(is_broadwater);
79         B(is_crestline);
80         B(has_fbc);
81         B(has_rc6);
82         B(has_pipe_cxsr);
83         B(has_hotplug);
84         B(cursor_needs_physical);
85         B(has_overlay);
86         B(overlay_needs_physical);
87         B(supports_tv);
88         B(has_bsd_ring);
89         B(has_blt_ring);
90 #undef B
91
92         return 0;
93 }
94
95 static const char *get_pin_flag(struct drm_i915_gem_object *obj_priv)
96 {
97         if (obj_priv->user_pin_count > 0)
98                 return "P";
99         else if (obj_priv->pin_count > 0)
100                 return "p";
101         else
102                 return " ";
103 }
104
105 static const char *get_tiling_flag(struct drm_i915_gem_object *obj_priv)
106 {
107     switch (obj_priv->tiling_mode) {
108     default:
109     case I915_TILING_NONE: return " ";
110     case I915_TILING_X: return "X";
111     case I915_TILING_Y: return "Y";
112     }
113 }
114
115 static void
116 describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
117 {
118         seq_printf(m, "%p: %s%s %8zd %08x %08x %d%s%s",
119                    &obj->base,
120                    get_pin_flag(obj),
121                    get_tiling_flag(obj),
122                    obj->base.size,
123                    obj->base.read_domains,
124                    obj->base.write_domain,
125                    obj->last_rendering_seqno,
126                    obj->dirty ? " dirty" : "",
127                    obj->madv == I915_MADV_DONTNEED ? " purgeable" : "");
128         if (obj->base.name)
129                 seq_printf(m, " (name: %d)", obj->base.name);
130         if (obj->fence_reg != I915_FENCE_REG_NONE)
131                 seq_printf(m, " (fence: %d)", obj->fence_reg);
132         if (obj->gtt_space != NULL)
133                 seq_printf(m, " (gtt offset: %08x, size: %08x)",
134                            obj->gtt_offset, (unsigned int)obj->gtt_space->size);
135         if (obj->pin_mappable || obj->fault_mappable)
136                 seq_printf(m, " (mappable)");
137         if (obj->ring != NULL)
138                 seq_printf(m, " (%s)", obj->ring->name);
139 }
140
141 static int i915_gem_object_list_info(struct seq_file *m, void *data)
142 {
143         struct drm_info_node *node = (struct drm_info_node *) m->private;
144         uintptr_t list = (uintptr_t) node->info_ent->data;
145         struct list_head *head;
146         struct drm_device *dev = node->minor->dev;
147         drm_i915_private_t *dev_priv = dev->dev_private;
148         struct drm_i915_gem_object *obj_priv;
149         size_t total_obj_size, total_gtt_size;
150         int count, ret;
151
152         ret = mutex_lock_interruptible(&dev->struct_mutex);
153         if (ret)
154                 return ret;
155
156         switch (list) {
157         case ACTIVE_LIST:
158                 seq_printf(m, "Active:\n");
159                 head = &dev_priv->mm.active_list;
160                 break;
161         case INACTIVE_LIST:
162                 seq_printf(m, "Inactive:\n");
163                 head = &dev_priv->mm.inactive_list;
164                 break;
165         case PINNED_LIST:
166                 seq_printf(m, "Pinned:\n");
167                 head = &dev_priv->mm.pinned_list;
168                 break;
169         case FLUSHING_LIST:
170                 seq_printf(m, "Flushing:\n");
171                 head = &dev_priv->mm.flushing_list;
172                 break;
173         case DEFERRED_FREE_LIST:
174                 seq_printf(m, "Deferred free:\n");
175                 head = &dev_priv->mm.deferred_free_list;
176                 break;
177         default:
178                 mutex_unlock(&dev->struct_mutex);
179                 return -EINVAL;
180         }
181
182         total_obj_size = total_gtt_size = count = 0;
183         list_for_each_entry(obj_priv, head, mm_list) {
184                 seq_printf(m, "   ");
185                 describe_obj(m, obj_priv);
186                 seq_printf(m, "\n");
187                 total_obj_size += obj_priv->base.size;
188                 total_gtt_size += obj_priv->gtt_space->size;
189                 count++;
190         }
191         mutex_unlock(&dev->struct_mutex);
192
193         seq_printf(m, "Total %d objects, %zu bytes, %zu GTT size\n",
194                    count, total_obj_size, total_gtt_size);
195         return 0;
196 }
197
198 static int i915_gem_object_info(struct seq_file *m, void* data)
199 {
200         struct drm_info_node *node = (struct drm_info_node *) m->private;
201         struct drm_device *dev = node->minor->dev;
202         struct drm_i915_private *dev_priv = dev->dev_private;
203         int ret;
204
205         ret = mutex_lock_interruptible(&dev->struct_mutex);
206         if (ret)
207                 return ret;
208
209         seq_printf(m, "%u objects\n", dev_priv->mm.object_count);
210         seq_printf(m, "%zu object bytes\n", dev_priv->mm.object_memory);
211         seq_printf(m, "%u pinned\n", dev_priv->mm.pin_count);
212         seq_printf(m, "%zu pin bytes\n", dev_priv->mm.pin_memory);
213         seq_printf(m, "%u mappable objects in gtt\n", dev_priv->mm.gtt_mappable_count);
214         seq_printf(m, "%zu mappable gtt bytes\n", dev_priv->mm.gtt_mappable_memory);
215         seq_printf(m, "%zu mappable gtt used bytes\n", dev_priv->mm.mappable_gtt_used);
216         seq_printf(m, "%zu mappable gtt total\n", dev_priv->mm.mappable_gtt_total);
217         seq_printf(m, "%u objects in gtt\n", dev_priv->mm.gtt_count);
218         seq_printf(m, "%zu gtt bytes\n", dev_priv->mm.gtt_memory);
219         seq_printf(m, "%zu gtt total\n", dev_priv->mm.gtt_total);
220
221         mutex_unlock(&dev->struct_mutex);
222
223         return 0;
224 }
225
226
227 static int i915_gem_pageflip_info(struct seq_file *m, void *data)
228 {
229         struct drm_info_node *node = (struct drm_info_node *) m->private;
230         struct drm_device *dev = node->minor->dev;
231         unsigned long flags;
232         struct intel_crtc *crtc;
233
234         list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) {
235                 const char *pipe = crtc->pipe ? "B" : "A";
236                 const char *plane = crtc->plane ? "B" : "A";
237                 struct intel_unpin_work *work;
238
239                 spin_lock_irqsave(&dev->event_lock, flags);
240                 work = crtc->unpin_work;
241                 if (work == NULL) {
242                         seq_printf(m, "No flip due on pipe %s (plane %s)\n",
243                                    pipe, plane);
244                 } else {
245                         if (!work->pending) {
246                                 seq_printf(m, "Flip queued on pipe %s (plane %s)\n",
247                                            pipe, plane);
248                         } else {
249                                 seq_printf(m, "Flip pending (waiting for vsync) on pipe %s (plane %s)\n",
250                                            pipe, plane);
251                         }
252                         if (work->enable_stall_check)
253                                 seq_printf(m, "Stall check enabled, ");
254                         else
255                                 seq_printf(m, "Stall check waiting for page flip ioctl, ");
256                         seq_printf(m, "%d prepares\n", work->pending);
257
258                         if (work->old_fb_obj) {
259                                 struct drm_i915_gem_object *obj_priv = to_intel_bo(work->old_fb_obj);
260                                 if(obj_priv)
261                                         seq_printf(m, "Old framebuffer gtt_offset 0x%08x\n", obj_priv->gtt_offset );
262                         }
263                         if (work->pending_flip_obj) {
264                                 struct drm_i915_gem_object *obj_priv = to_intel_bo(work->pending_flip_obj);
265                                 if(obj_priv)
266                                         seq_printf(m, "New framebuffer gtt_offset 0x%08x\n", obj_priv->gtt_offset );
267                         }
268                 }
269                 spin_unlock_irqrestore(&dev->event_lock, flags);
270         }
271
272         return 0;
273 }
274
275 static int i915_gem_request_info(struct seq_file *m, void *data)
276 {
277         struct drm_info_node *node = (struct drm_info_node *) m->private;
278         struct drm_device *dev = node->minor->dev;
279         drm_i915_private_t *dev_priv = dev->dev_private;
280         struct drm_i915_gem_request *gem_request;
281         int ret, count;
282
283         ret = mutex_lock_interruptible(&dev->struct_mutex);
284         if (ret)
285                 return ret;
286
287         count = 0;
288         if (!list_empty(&dev_priv->render_ring.request_list)) {
289                 seq_printf(m, "Render requests:\n");
290                 list_for_each_entry(gem_request,
291                                     &dev_priv->render_ring.request_list,
292                                     list) {
293                         seq_printf(m, "    %d @ %d\n",
294                                    gem_request->seqno,
295                                    (int) (jiffies - gem_request->emitted_jiffies));
296                 }
297                 count++;
298         }
299         if (!list_empty(&dev_priv->bsd_ring.request_list)) {
300                 seq_printf(m, "BSD requests:\n");
301                 list_for_each_entry(gem_request,
302                                     &dev_priv->bsd_ring.request_list,
303                                     list) {
304                         seq_printf(m, "    %d @ %d\n",
305                                    gem_request->seqno,
306                                    (int) (jiffies - gem_request->emitted_jiffies));
307                 }
308                 count++;
309         }
310         if (!list_empty(&dev_priv->blt_ring.request_list)) {
311                 seq_printf(m, "BLT requests:\n");
312                 list_for_each_entry(gem_request,
313                                     &dev_priv->blt_ring.request_list,
314                                     list) {
315                         seq_printf(m, "    %d @ %d\n",
316                                    gem_request->seqno,
317                                    (int) (jiffies - gem_request->emitted_jiffies));
318                 }
319                 count++;
320         }
321         mutex_unlock(&dev->struct_mutex);
322
323         if (count == 0)
324                 seq_printf(m, "No requests\n");
325
326         return 0;
327 }
328
329 static void i915_ring_seqno_info(struct seq_file *m,
330                                  struct intel_ring_buffer *ring)
331 {
332         if (ring->get_seqno) {
333                 seq_printf(m, "Current sequence (%s): %d\n",
334                            ring->name, ring->get_seqno(ring));
335                 seq_printf(m, "Waiter sequence (%s):  %d\n",
336                            ring->name, ring->waiting_seqno);
337                 seq_printf(m, "IRQ sequence (%s):     %d\n",
338                            ring->name, ring->irq_seqno);
339         }
340 }
341
342 static int i915_gem_seqno_info(struct seq_file *m, void *data)
343 {
344         struct drm_info_node *node = (struct drm_info_node *) m->private;
345         struct drm_device *dev = node->minor->dev;
346         drm_i915_private_t *dev_priv = dev->dev_private;
347         int ret;
348
349         ret = mutex_lock_interruptible(&dev->struct_mutex);
350         if (ret)
351                 return ret;
352
353         i915_ring_seqno_info(m, &dev_priv->render_ring);
354         i915_ring_seqno_info(m, &dev_priv->bsd_ring);
355         i915_ring_seqno_info(m, &dev_priv->blt_ring);
356
357         mutex_unlock(&dev->struct_mutex);
358
359         return 0;
360 }
361
362
363 static int i915_interrupt_info(struct seq_file *m, void *data)
364 {
365         struct drm_info_node *node = (struct drm_info_node *) m->private;
366         struct drm_device *dev = node->minor->dev;
367         drm_i915_private_t *dev_priv = dev->dev_private;
368         int ret;
369
370         ret = mutex_lock_interruptible(&dev->struct_mutex);
371         if (ret)
372                 return ret;
373
374         if (!HAS_PCH_SPLIT(dev)) {
375                 seq_printf(m, "Interrupt enable:    %08x\n",
376                            I915_READ(IER));
377                 seq_printf(m, "Interrupt identity:  %08x\n",
378                            I915_READ(IIR));
379                 seq_printf(m, "Interrupt mask:      %08x\n",
380                            I915_READ(IMR));
381                 seq_printf(m, "Pipe A stat:         %08x\n",
382                            I915_READ(PIPEASTAT));
383                 seq_printf(m, "Pipe B stat:         %08x\n",
384                            I915_READ(PIPEBSTAT));
385         } else {
386                 seq_printf(m, "North Display Interrupt enable:          %08x\n",
387                            I915_READ(DEIER));
388                 seq_printf(m, "North Display Interrupt identity:        %08x\n",
389                            I915_READ(DEIIR));
390                 seq_printf(m, "North Display Interrupt mask:            %08x\n",
391                            I915_READ(DEIMR));
392                 seq_printf(m, "South Display Interrupt enable:          %08x\n",
393                            I915_READ(SDEIER));
394                 seq_printf(m, "South Display Interrupt identity:        %08x\n",
395                            I915_READ(SDEIIR));
396                 seq_printf(m, "South Display Interrupt mask:            %08x\n",
397                            I915_READ(SDEIMR));
398                 seq_printf(m, "Graphics Interrupt enable:               %08x\n",
399                            I915_READ(GTIER));
400                 seq_printf(m, "Graphics Interrupt identity:             %08x\n",
401                            I915_READ(GTIIR));
402                 seq_printf(m, "Graphics Interrupt mask:         %08x\n",
403                            I915_READ(GTIMR));
404         }
405         seq_printf(m, "Interrupts received: %d\n",
406                    atomic_read(&dev_priv->irq_received));
407         i915_ring_seqno_info(m, &dev_priv->render_ring);
408         i915_ring_seqno_info(m, &dev_priv->bsd_ring);
409         i915_ring_seqno_info(m, &dev_priv->blt_ring);
410         mutex_unlock(&dev->struct_mutex);
411
412         return 0;
413 }
414
415 static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
416 {
417         struct drm_info_node *node = (struct drm_info_node *) m->private;
418         struct drm_device *dev = node->minor->dev;
419         drm_i915_private_t *dev_priv = dev->dev_private;
420         int i, ret;
421
422         ret = mutex_lock_interruptible(&dev->struct_mutex);
423         if (ret)
424                 return ret;
425
426         seq_printf(m, "Reserved fences = %d\n", dev_priv->fence_reg_start);
427         seq_printf(m, "Total fences = %d\n", dev_priv->num_fence_regs);
428         for (i = 0; i < dev_priv->num_fence_regs; i++) {
429                 struct drm_gem_object *obj = dev_priv->fence_regs[i].obj;
430
431                 seq_printf(m, "Fenced object[%2d] = ", i);
432                 if (obj == NULL)
433                         seq_printf(m, "unused");
434                 else
435                         describe_obj(m, to_intel_bo(obj));
436                 seq_printf(m, "\n");
437         }
438         mutex_unlock(&dev->struct_mutex);
439
440         return 0;
441 }
442
443 static int i915_hws_info(struct seq_file *m, void *data)
444 {
445         struct drm_info_node *node = (struct drm_info_node *) m->private;
446         struct drm_device *dev = node->minor->dev;
447         drm_i915_private_t *dev_priv = dev->dev_private;
448         struct intel_ring_buffer *ring;
449         volatile u32 *hws;
450         int i;
451
452         switch ((uintptr_t)node->info_ent->data) {
453         case RENDER_RING: ring = &dev_priv->render_ring; break;
454         case BSD_RING: ring = &dev_priv->bsd_ring; break;
455         case BLT_RING: ring = &dev_priv->blt_ring; break;
456         default: return -EINVAL;
457         }
458
459         hws = (volatile u32 *)ring->status_page.page_addr;
460         if (hws == NULL)
461                 return 0;
462
463         for (i = 0; i < 4096 / sizeof(u32) / 4; i += 4) {
464                 seq_printf(m, "0x%08x: 0x%08x 0x%08x 0x%08x 0x%08x\n",
465                            i * 4,
466                            hws[i], hws[i + 1], hws[i + 2], hws[i + 3]);
467         }
468         return 0;
469 }
470
471 static void i915_dump_object(struct seq_file *m,
472                              struct io_mapping *mapping,
473                              struct drm_i915_gem_object *obj_priv)
474 {
475         int page, page_count, i;
476
477         page_count = obj_priv->base.size / PAGE_SIZE;
478         for (page = 0; page < page_count; page++) {
479                 u32 *mem = io_mapping_map_wc(mapping,
480                                              obj_priv->gtt_offset + page * PAGE_SIZE);
481                 for (i = 0; i < PAGE_SIZE; i += 4)
482                         seq_printf(m, "%08x :  %08x\n", i, mem[i / 4]);
483                 io_mapping_unmap(mem);
484         }
485 }
486
487 static int i915_batchbuffer_info(struct seq_file *m, void *data)
488 {
489         struct drm_info_node *node = (struct drm_info_node *) m->private;
490         struct drm_device *dev = node->minor->dev;
491         drm_i915_private_t *dev_priv = dev->dev_private;
492         struct drm_gem_object *obj;
493         struct drm_i915_gem_object *obj_priv;
494         int ret;
495
496         ret = mutex_lock_interruptible(&dev->struct_mutex);
497         if (ret)
498                 return ret;
499
500         list_for_each_entry(obj_priv, &dev_priv->mm.active_list, mm_list) {
501                 obj = &obj_priv->base;
502                 if (obj->read_domains & I915_GEM_DOMAIN_COMMAND) {
503                     seq_printf(m, "--- gtt_offset = 0x%08x\n",
504                                obj_priv->gtt_offset);
505                     i915_dump_object(m, dev_priv->mm.gtt_mapping, obj_priv);
506                 }
507         }
508
509         mutex_unlock(&dev->struct_mutex);
510
511         return 0;
512 }
513
514 static int i915_ringbuffer_data(struct seq_file *m, void *data)
515 {
516         struct drm_info_node *node = (struct drm_info_node *) m->private;
517         struct drm_device *dev = node->minor->dev;
518         drm_i915_private_t *dev_priv = dev->dev_private;
519         struct intel_ring_buffer *ring;
520         int ret;
521
522         switch ((uintptr_t)node->info_ent->data) {
523         case RENDER_RING: ring = &dev_priv->render_ring; break;
524         case BSD_RING: ring = &dev_priv->bsd_ring; break;
525         case BLT_RING: ring = &dev_priv->blt_ring; break;
526         default: return -EINVAL;
527         }
528
529         ret = mutex_lock_interruptible(&dev->struct_mutex);
530         if (ret)
531                 return ret;
532
533         if (!ring->gem_object) {
534                 seq_printf(m, "No ringbuffer setup\n");
535         } else {
536                 u8 *virt = ring->virtual_start;
537                 uint32_t off;
538
539                 for (off = 0; off < ring->size; off += 4) {
540                         uint32_t *ptr = (uint32_t *)(virt + off);
541                         seq_printf(m, "%08x :  %08x\n", off, *ptr);
542                 }
543         }
544         mutex_unlock(&dev->struct_mutex);
545
546         return 0;
547 }
548
549 static int i915_ringbuffer_info(struct seq_file *m, void *data)
550 {
551         struct drm_info_node *node = (struct drm_info_node *) m->private;
552         struct drm_device *dev = node->minor->dev;
553         drm_i915_private_t *dev_priv = dev->dev_private;
554         struct intel_ring_buffer *ring;
555
556         switch ((uintptr_t)node->info_ent->data) {
557         case RENDER_RING: ring = &dev_priv->render_ring; break;
558         case BSD_RING: ring = &dev_priv->bsd_ring; break;
559         case BLT_RING: ring = &dev_priv->blt_ring; break;
560         default: return -EINVAL;
561         }
562
563         if (ring->size == 0)
564             return 0;
565
566         seq_printf(m, "Ring %s:\n", ring->name);
567         seq_printf(m, "  Head :    %08x\n", I915_READ_HEAD(ring) & HEAD_ADDR);
568         seq_printf(m, "  Tail :    %08x\n", I915_READ_TAIL(ring) & TAIL_ADDR);
569         seq_printf(m, "  Size :    %08x\n", ring->size);
570         seq_printf(m, "  Active :  %08x\n", intel_ring_get_active_head(ring));
571         seq_printf(m, "  Control : %08x\n", I915_READ_CTL(ring));
572         seq_printf(m, "  Start :   %08x\n", I915_READ_START(ring));
573
574         return 0;
575 }
576
577 static const char *pin_flag(int pinned)
578 {
579         if (pinned > 0)
580                 return " P";
581         else if (pinned < 0)
582                 return " p";
583         else
584                 return "";
585 }
586
587 static const char *tiling_flag(int tiling)
588 {
589         switch (tiling) {
590         default:
591         case I915_TILING_NONE: return "";
592         case I915_TILING_X: return " X";
593         case I915_TILING_Y: return " Y";
594         }
595 }
596
597 static const char *dirty_flag(int dirty)
598 {
599         return dirty ? " dirty" : "";
600 }
601
602 static const char *purgeable_flag(int purgeable)
603 {
604         return purgeable ? " purgeable" : "";
605 }
606
607 static int i915_error_state(struct seq_file *m, void *unused)
608 {
609         struct drm_info_node *node = (struct drm_info_node *) m->private;
610         struct drm_device *dev = node->minor->dev;
611         drm_i915_private_t *dev_priv = dev->dev_private;
612         struct drm_i915_error_state *error;
613         unsigned long flags;
614         int i, page, offset, elt;
615
616         spin_lock_irqsave(&dev_priv->error_lock, flags);
617         if (!dev_priv->first_error) {
618                 seq_printf(m, "no error state collected\n");
619                 goto out;
620         }
621
622         error = dev_priv->first_error;
623
624         seq_printf(m, "Time: %ld s %ld us\n", error->time.tv_sec,
625                    error->time.tv_usec);
626         seq_printf(m, "PCI ID: 0x%04x\n", dev->pci_device);
627         seq_printf(m, "EIR: 0x%08x\n", error->eir);
628         seq_printf(m, "PGTBL_ER: 0x%08x\n", error->pgtbl_er);
629         if (INTEL_INFO(dev)->gen >= 6) {
630                 seq_printf(m, "ERROR: 0x%08x\n", error->error);
631                 seq_printf(m, "Blitter command stream:\n");
632                 seq_printf(m, "  ACTHD:    0x%08x\n", error->bcs_acthd);
633                 seq_printf(m, "  IPEHR:    0x%08x\n", error->bcs_ipehr);
634                 seq_printf(m, "  IPEIR:    0x%08x\n", error->bcs_ipeir);
635                 seq_printf(m, "  INSTDONE: 0x%08x\n", error->bcs_instdone);
636                 seq_printf(m, "  seqno:    0x%08x\n", error->bcs_seqno);
637         }
638         seq_printf(m, "Render command stream:\n");
639         seq_printf(m, "  ACTHD: 0x%08x\n", error->acthd);
640         seq_printf(m, "  IPEIR: 0x%08x\n", error->ipeir);
641         seq_printf(m, "  IPEHR: 0x%08x\n", error->ipehr);
642         seq_printf(m, "  INSTDONE: 0x%08x\n", error->instdone);
643         if (INTEL_INFO(dev)->gen >= 4) {
644                 seq_printf(m, "  INSTDONE1: 0x%08x\n", error->instdone1);
645                 seq_printf(m, "  INSTPS: 0x%08x\n", error->instps);
646         }
647         seq_printf(m, "  INSTPM: 0x%08x\n", error->instpm);
648         seq_printf(m, "  seqno: 0x%08x\n", error->seqno);
649
650         if (error->active_bo_count) {
651                 seq_printf(m, "Buffers [%d]:\n", error->active_bo_count);
652
653                 for (i = 0; i < error->active_bo_count; i++) {
654                         seq_printf(m, "  %08x %8zd %08x %08x %08x%s%s%s%s",
655                                    error->active_bo[i].gtt_offset,
656                                    error->active_bo[i].size,
657                                    error->active_bo[i].read_domains,
658                                    error->active_bo[i].write_domain,
659                                    error->active_bo[i].seqno,
660                                    pin_flag(error->active_bo[i].pinned),
661                                    tiling_flag(error->active_bo[i].tiling),
662                                    dirty_flag(error->active_bo[i].dirty),
663                                    purgeable_flag(error->active_bo[i].purgeable));
664
665                         if (error->active_bo[i].name)
666                                 seq_printf(m, " (name: %d)", error->active_bo[i].name);
667                         if (error->active_bo[i].fence_reg != I915_FENCE_REG_NONE)
668                                 seq_printf(m, " (fence: %d)", error->active_bo[i].fence_reg);
669
670                         seq_printf(m, "\n");
671                 }
672         }
673
674         for (i = 0; i < ARRAY_SIZE(error->batchbuffer); i++) {
675                 if (error->batchbuffer[i]) {
676                         struct drm_i915_error_object *obj = error->batchbuffer[i];
677
678                         seq_printf(m, "--- gtt_offset = 0x%08x\n", obj->gtt_offset);
679                         offset = 0;
680                         for (page = 0; page < obj->page_count; page++) {
681                                 for (elt = 0; elt < PAGE_SIZE/4; elt++) {
682                                         seq_printf(m, "%08x :  %08x\n", offset, obj->pages[page][elt]);
683                                         offset += 4;
684                                 }
685                         }
686                 }
687         }
688
689         if (error->ringbuffer) {
690                 struct drm_i915_error_object *obj = error->ringbuffer;
691
692                 seq_printf(m, "--- ringbuffer = 0x%08x\n", obj->gtt_offset);
693                 offset = 0;
694                 for (page = 0; page < obj->page_count; page++) {
695                         for (elt = 0; elt < PAGE_SIZE/4; elt++) {
696                                 seq_printf(m, "%08x :  %08x\n", offset, obj->pages[page][elt]);
697                                 offset += 4;
698                         }
699                 }
700         }
701
702         if (error->overlay)
703                 intel_overlay_print_error_state(m, error->overlay);
704
705 out:
706         spin_unlock_irqrestore(&dev_priv->error_lock, flags);
707
708         return 0;
709 }
710
711 static int i915_rstdby_delays(struct seq_file *m, void *unused)
712 {
713         struct drm_info_node *node = (struct drm_info_node *) m->private;
714         struct drm_device *dev = node->minor->dev;
715         drm_i915_private_t *dev_priv = dev->dev_private;
716         u16 crstanddelay = I915_READ16(CRSTANDVID);
717
718         seq_printf(m, "w/ctx: %d, w/o ctx: %d\n", (crstanddelay >> 8) & 0x3f, (crstanddelay & 0x3f));
719
720         return 0;
721 }
722
723 static int i915_cur_delayinfo(struct seq_file *m, void *unused)
724 {
725         struct drm_info_node *node = (struct drm_info_node *) m->private;
726         struct drm_device *dev = node->minor->dev;
727         drm_i915_private_t *dev_priv = dev->dev_private;
728         u16 rgvswctl = I915_READ16(MEMSWCTL);
729         u16 rgvstat = I915_READ16(MEMSTAT_ILK);
730
731         seq_printf(m, "Requested P-state: %d\n", (rgvswctl >> 8) & 0xf);
732         seq_printf(m, "Requested VID: %d\n", rgvswctl & 0x3f);
733         seq_printf(m, "Current VID: %d\n", (rgvstat & MEMSTAT_VID_MASK) >>
734                    MEMSTAT_VID_SHIFT);
735         seq_printf(m, "Current P-state: %d\n",
736                    (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT);
737
738         return 0;
739 }
740
741 static int i915_delayfreq_table(struct seq_file *m, void *unused)
742 {
743         struct drm_info_node *node = (struct drm_info_node *) m->private;
744         struct drm_device *dev = node->minor->dev;
745         drm_i915_private_t *dev_priv = dev->dev_private;
746         u32 delayfreq;
747         int i;
748
749         for (i = 0; i < 16; i++) {
750                 delayfreq = I915_READ(PXVFREQ_BASE + i * 4);
751                 seq_printf(m, "P%02dVIDFREQ: 0x%08x (VID: %d)\n", i, delayfreq,
752                            (delayfreq & PXVFREQ_PX_MASK) >> PXVFREQ_PX_SHIFT);
753         }
754
755         return 0;
756 }
757
758 static inline int MAP_TO_MV(int map)
759 {
760         return 1250 - (map * 25);
761 }
762
763 static int i915_inttoext_table(struct seq_file *m, void *unused)
764 {
765         struct drm_info_node *node = (struct drm_info_node *) m->private;
766         struct drm_device *dev = node->minor->dev;
767         drm_i915_private_t *dev_priv = dev->dev_private;
768         u32 inttoext;
769         int i;
770
771         for (i = 1; i <= 32; i++) {
772                 inttoext = I915_READ(INTTOEXT_BASE_ILK + i * 4);
773                 seq_printf(m, "INTTOEXT%02d: 0x%08x\n", i, inttoext);
774         }
775
776         return 0;
777 }
778
779 static int i915_drpc_info(struct seq_file *m, void *unused)
780 {
781         struct drm_info_node *node = (struct drm_info_node *) m->private;
782         struct drm_device *dev = node->minor->dev;
783         drm_i915_private_t *dev_priv = dev->dev_private;
784         u32 rgvmodectl = I915_READ(MEMMODECTL);
785         u32 rstdbyctl = I915_READ(MCHBAR_RENDER_STANDBY);
786         u16 crstandvid = I915_READ16(CRSTANDVID);
787
788         seq_printf(m, "HD boost: %s\n", (rgvmodectl & MEMMODE_BOOST_EN) ?
789                    "yes" : "no");
790         seq_printf(m, "Boost freq: %d\n",
791                    (rgvmodectl & MEMMODE_BOOST_FREQ_MASK) >>
792                    MEMMODE_BOOST_FREQ_SHIFT);
793         seq_printf(m, "HW control enabled: %s\n",
794                    rgvmodectl & MEMMODE_HWIDLE_EN ? "yes" : "no");
795         seq_printf(m, "SW control enabled: %s\n",
796                    rgvmodectl & MEMMODE_SWMODE_EN ? "yes" : "no");
797         seq_printf(m, "Gated voltage change: %s\n",
798                    rgvmodectl & MEMMODE_RCLK_GATE ? "yes" : "no");
799         seq_printf(m, "Starting frequency: P%d\n",
800                    (rgvmodectl & MEMMODE_FSTART_MASK) >> MEMMODE_FSTART_SHIFT);
801         seq_printf(m, "Max P-state: P%d\n",
802                    (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT);
803         seq_printf(m, "Min P-state: P%d\n", (rgvmodectl & MEMMODE_FMIN_MASK));
804         seq_printf(m, "RS1 VID: %d\n", (crstandvid & 0x3f));
805         seq_printf(m, "RS2 VID: %d\n", ((crstandvid >> 8) & 0x3f));
806         seq_printf(m, "Render standby enabled: %s\n",
807                    (rstdbyctl & RCX_SW_EXIT) ? "no" : "yes");
808
809         return 0;
810 }
811
812 static int i915_fbc_status(struct seq_file *m, void *unused)
813 {
814         struct drm_info_node *node = (struct drm_info_node *) m->private;
815         struct drm_device *dev = node->minor->dev;
816         drm_i915_private_t *dev_priv = dev->dev_private;
817
818         if (!I915_HAS_FBC(dev)) {
819                 seq_printf(m, "FBC unsupported on this chipset\n");
820                 return 0;
821         }
822
823         if (intel_fbc_enabled(dev)) {
824                 seq_printf(m, "FBC enabled\n");
825         } else {
826                 seq_printf(m, "FBC disabled: ");
827                 switch (dev_priv->no_fbc_reason) {
828                 case FBC_NO_OUTPUT:
829                         seq_printf(m, "no outputs");
830                         break;
831                 case FBC_STOLEN_TOO_SMALL:
832                         seq_printf(m, "not enough stolen memory");
833                         break;
834                 case FBC_UNSUPPORTED_MODE:
835                         seq_printf(m, "mode not supported");
836                         break;
837                 case FBC_MODE_TOO_LARGE:
838                         seq_printf(m, "mode too large");
839                         break;
840                 case FBC_BAD_PLANE:
841                         seq_printf(m, "FBC unsupported on plane");
842                         break;
843                 case FBC_NOT_TILED:
844                         seq_printf(m, "scanout buffer not tiled");
845                         break;
846                 case FBC_MULTIPLE_PIPES:
847                         seq_printf(m, "multiple pipes are enabled");
848                         break;
849                 default:
850                         seq_printf(m, "unknown reason");
851                 }
852                 seq_printf(m, "\n");
853         }
854         return 0;
855 }
856
857 static int i915_sr_status(struct seq_file *m, void *unused)
858 {
859         struct drm_info_node *node = (struct drm_info_node *) m->private;
860         struct drm_device *dev = node->minor->dev;
861         drm_i915_private_t *dev_priv = dev->dev_private;
862         bool sr_enabled = false;
863
864         if (IS_GEN5(dev))
865                 sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN;
866         else if (IS_CRESTLINE(dev) || IS_I945G(dev) || IS_I945GM(dev))
867                 sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
868         else if (IS_I915GM(dev))
869                 sr_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN;
870         else if (IS_PINEVIEW(dev))
871                 sr_enabled = I915_READ(DSPFW3) & PINEVIEW_SELF_REFRESH_EN;
872
873         seq_printf(m, "self-refresh: %s\n",
874                    sr_enabled ? "enabled" : "disabled");
875
876         return 0;
877 }
878
879 static int i915_emon_status(struct seq_file *m, void *unused)
880 {
881         struct drm_info_node *node = (struct drm_info_node *) m->private;
882         struct drm_device *dev = node->minor->dev;
883         drm_i915_private_t *dev_priv = dev->dev_private;
884         unsigned long temp, chipset, gfx;
885         int ret;
886
887         ret = mutex_lock_interruptible(&dev->struct_mutex);
888         if (ret)
889                 return ret;
890
891         temp = i915_mch_val(dev_priv);
892         chipset = i915_chipset_val(dev_priv);
893         gfx = i915_gfx_val(dev_priv);
894         mutex_unlock(&dev->struct_mutex);
895
896         seq_printf(m, "GMCH temp: %ld\n", temp);
897         seq_printf(m, "Chipset power: %ld\n", chipset);
898         seq_printf(m, "GFX power: %ld\n", gfx);
899         seq_printf(m, "Total power: %ld\n", chipset + gfx);
900
901         return 0;
902 }
903
904 static int i915_gfxec(struct seq_file *m, void *unused)
905 {
906         struct drm_info_node *node = (struct drm_info_node *) m->private;
907         struct drm_device *dev = node->minor->dev;
908         drm_i915_private_t *dev_priv = dev->dev_private;
909
910         seq_printf(m, "GFXEC: %ld\n", (unsigned long)I915_READ(0x112f4));
911
912         return 0;
913 }
914
915 static int i915_opregion(struct seq_file *m, void *unused)
916 {
917         struct drm_info_node *node = (struct drm_info_node *) m->private;
918         struct drm_device *dev = node->minor->dev;
919         drm_i915_private_t *dev_priv = dev->dev_private;
920         struct intel_opregion *opregion = &dev_priv->opregion;
921         int ret;
922
923         ret = mutex_lock_interruptible(&dev->struct_mutex);
924         if (ret)
925                 return ret;
926
927         if (opregion->header)
928                 seq_write(m, opregion->header, OPREGION_SIZE);
929
930         mutex_unlock(&dev->struct_mutex);
931
932         return 0;
933 }
934
935 static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
936 {
937         struct drm_info_node *node = (struct drm_info_node *) m->private;
938         struct drm_device *dev = node->minor->dev;
939         drm_i915_private_t *dev_priv = dev->dev_private;
940         struct intel_fbdev *ifbdev;
941         struct intel_framebuffer *fb;
942         int ret;
943
944         ret = mutex_lock_interruptible(&dev->mode_config.mutex);
945         if (ret)
946                 return ret;
947
948         ifbdev = dev_priv->fbdev;
949         fb = to_intel_framebuffer(ifbdev->helper.fb);
950
951         seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, obj ",
952                    fb->base.width,
953                    fb->base.height,
954                    fb->base.depth,
955                    fb->base.bits_per_pixel);
956         describe_obj(m, to_intel_bo(fb->obj));
957         seq_printf(m, "\n");
958
959         list_for_each_entry(fb, &dev->mode_config.fb_list, base.head) {
960                 if (&fb->base == ifbdev->helper.fb)
961                         continue;
962
963                 seq_printf(m, "user size: %d x %d, depth %d, %d bpp, obj ",
964                            fb->base.width,
965                            fb->base.height,
966                            fb->base.depth,
967                            fb->base.bits_per_pixel);
968                 describe_obj(m, to_intel_bo(fb->obj));
969                 seq_printf(m, "\n");
970         }
971
972         mutex_unlock(&dev->mode_config.mutex);
973
974         return 0;
975 }
976
977 static int
978 i915_wedged_open(struct inode *inode,
979                  struct file *filp)
980 {
981         filp->private_data = inode->i_private;
982         return 0;
983 }
984
985 static ssize_t
986 i915_wedged_read(struct file *filp,
987                  char __user *ubuf,
988                  size_t max,
989                  loff_t *ppos)
990 {
991         struct drm_device *dev = filp->private_data;
992         drm_i915_private_t *dev_priv = dev->dev_private;
993         char buf[80];
994         int len;
995
996         len = snprintf(buf, sizeof (buf),
997                        "wedged :  %d\n",
998                        atomic_read(&dev_priv->mm.wedged));
999
1000         if (len > sizeof (buf))
1001                 len = sizeof (buf);
1002
1003         return simple_read_from_buffer(ubuf, max, ppos, buf, len);
1004 }
1005
1006 static ssize_t
1007 i915_wedged_write(struct file *filp,
1008                   const char __user *ubuf,
1009                   size_t cnt,
1010                   loff_t *ppos)
1011 {
1012         struct drm_device *dev = filp->private_data;
1013         drm_i915_private_t *dev_priv = dev->dev_private;
1014         char buf[20];
1015         int val = 1;
1016
1017         if (cnt > 0) {
1018                 if (cnt > sizeof (buf) - 1)
1019                         return -EINVAL;
1020
1021                 if (copy_from_user(buf, ubuf, cnt))
1022                         return -EFAULT;
1023                 buf[cnt] = 0;
1024
1025                 val = simple_strtoul(buf, NULL, 0);
1026         }
1027
1028         DRM_INFO("Manually setting wedged to %d\n", val);
1029
1030         atomic_set(&dev_priv->mm.wedged, val);
1031         if (val) {
1032                 wake_up_all(&dev_priv->irq_queue);
1033                 queue_work(dev_priv->wq, &dev_priv->error_work);
1034         }
1035
1036         return cnt;
1037 }
1038
1039 static const struct file_operations i915_wedged_fops = {
1040         .owner = THIS_MODULE,
1041         .open = i915_wedged_open,
1042         .read = i915_wedged_read,
1043         .write = i915_wedged_write,
1044         .llseek = default_llseek,
1045 };
1046
1047 /* As the drm_debugfs_init() routines are called before dev->dev_private is
1048  * allocated we need to hook into the minor for release. */
1049 static int
1050 drm_add_fake_info_node(struct drm_minor *minor,
1051                        struct dentry *ent,
1052                        const void *key)
1053 {
1054         struct drm_info_node *node;
1055
1056         node = kmalloc(sizeof(struct drm_info_node), GFP_KERNEL);
1057         if (node == NULL) {
1058                 debugfs_remove(ent);
1059                 return -ENOMEM;
1060         }
1061
1062         node->minor = minor;
1063         node->dent = ent;
1064         node->info_ent = (void *) key;
1065         list_add(&node->list, &minor->debugfs_nodes.list);
1066
1067         return 0;
1068 }
1069
1070 static int i915_wedged_create(struct dentry *root, struct drm_minor *minor)
1071 {
1072         struct drm_device *dev = minor->dev;
1073         struct dentry *ent;
1074
1075         ent = debugfs_create_file("i915_wedged",
1076                                   S_IRUGO | S_IWUSR,
1077                                   root, dev,
1078                                   &i915_wedged_fops);
1079         if (IS_ERR(ent))
1080                 return PTR_ERR(ent);
1081
1082         return drm_add_fake_info_node(minor, ent, &i915_wedged_fops);
1083 }
1084
1085 static struct drm_info_list i915_debugfs_list[] = {
1086         {"i915_capabilities", i915_capabilities, 0, 0},
1087         {"i915_gem_objects", i915_gem_object_info, 0},
1088         {"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST},
1089         {"i915_gem_flushing", i915_gem_object_list_info, 0, (void *) FLUSHING_LIST},
1090         {"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST},
1091         {"i915_gem_pinned", i915_gem_object_list_info, 0, (void *) PINNED_LIST},
1092         {"i915_gem_deferred_free", i915_gem_object_list_info, 0, (void *) DEFERRED_FREE_LIST},
1093         {"i915_gem_pageflip", i915_gem_pageflip_info, 0},
1094         {"i915_gem_request", i915_gem_request_info, 0},
1095         {"i915_gem_seqno", i915_gem_seqno_info, 0},
1096         {"i915_gem_fence_regs", i915_gem_fence_regs_info, 0},
1097         {"i915_gem_interrupt", i915_interrupt_info, 0},
1098         {"i915_gem_hws", i915_hws_info, 0, (void *)RENDER_RING},
1099         {"i915_gem_hws_blt", i915_hws_info, 0, (void *)BLT_RING},
1100         {"i915_gem_hws_bsd", i915_hws_info, 0, (void *)BSD_RING},
1101         {"i915_ringbuffer_data", i915_ringbuffer_data, 0, (void *)RENDER_RING},
1102         {"i915_ringbuffer_info", i915_ringbuffer_info, 0, (void *)RENDER_RING},
1103         {"i915_bsd_ringbuffer_data", i915_ringbuffer_data, 0, (void *)BSD_RING},
1104         {"i915_bsd_ringbuffer_info", i915_ringbuffer_info, 0, (void *)BSD_RING},
1105         {"i915_blt_ringbuffer_data", i915_ringbuffer_data, 0, (void *)BLT_RING},
1106         {"i915_blt_ringbuffer_info", i915_ringbuffer_info, 0, (void *)BLT_RING},
1107         {"i915_batchbuffers", i915_batchbuffer_info, 0},
1108         {"i915_error_state", i915_error_state, 0},
1109         {"i915_rstdby_delays", i915_rstdby_delays, 0},
1110         {"i915_cur_delayinfo", i915_cur_delayinfo, 0},
1111         {"i915_delayfreq_table", i915_delayfreq_table, 0},
1112         {"i915_inttoext_table", i915_inttoext_table, 0},
1113         {"i915_drpc_info", i915_drpc_info, 0},
1114         {"i915_emon_status", i915_emon_status, 0},
1115         {"i915_gfxec", i915_gfxec, 0},
1116         {"i915_fbc_status", i915_fbc_status, 0},
1117         {"i915_sr_status", i915_sr_status, 0},
1118         {"i915_opregion", i915_opregion, 0},
1119         {"i915_gem_framebuffer", i915_gem_framebuffer_info, 0},
1120 };
1121 #define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
1122
1123 int i915_debugfs_init(struct drm_minor *minor)
1124 {
1125         int ret;
1126
1127         ret = i915_wedged_create(minor->debugfs_root, minor);
1128         if (ret)
1129                 return ret;
1130
1131         return drm_debugfs_create_files(i915_debugfs_list,
1132                                         I915_DEBUGFS_ENTRIES,
1133                                         minor->debugfs_root, minor);
1134 }
1135
1136 void i915_debugfs_cleanup(struct drm_minor *minor)
1137 {
1138         drm_debugfs_remove_files(i915_debugfs_list,
1139                                  I915_DEBUGFS_ENTRIES, minor);
1140         drm_debugfs_remove_files((struct drm_info_list *) &i915_wedged_fops,
1141                                  1, minor);
1142 }
1143
1144 #endif /* CONFIG_DEBUG_FS */