2 * Copyright © 2008 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eric Anholt <eric@anholt.net>
25 * Keith Packard <keithp@keithp.com>
29 #include <linux/seq_file.h>
30 #include <linux/circ_buf.h>
31 #include <linux/ctype.h>
32 #include <linux/debugfs.h>
33 #include <linux/slab.h>
34 #include <linux/export.h>
35 #include <linux/list_sort.h>
36 #include <asm/msr-index.h>
38 #include "intel_drv.h"
39 #include "intel_ringbuffer.h"
40 #include <drm/i915_drm.h>
49 static const char *yesno(int v)
51 return v ? "yes" : "no";
54 /* As the drm_debugfs_init() routines are called before dev->dev_private is
55 * allocated we need to hook into the minor for release. */
57 drm_add_fake_info_node(struct drm_minor *minor,
61 struct drm_info_node *node;
63 node = kmalloc(sizeof(*node), GFP_KERNEL);
71 node->info_ent = (void *) key;
73 mutex_lock(&minor->debugfs_lock);
74 list_add(&node->list, &minor->debugfs_list);
75 mutex_unlock(&minor->debugfs_lock);
80 static int i915_capabilities(struct seq_file *m, void *data)
82 struct drm_info_node *node = m->private;
83 struct drm_device *dev = node->minor->dev;
84 const struct intel_device_info *info = INTEL_INFO(dev);
86 seq_printf(m, "gen: %d\n", info->gen);
87 seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(dev));
88 #define PRINT_FLAG(x) seq_printf(m, #x ": %s\n", yesno(info->x))
89 #define SEP_SEMICOLON ;
90 DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG, SEP_SEMICOLON);
97 static const char *get_pin_flag(struct drm_i915_gem_object *obj)
99 if (obj->user_pin_count > 0)
101 else if (i915_gem_obj_is_pinned(obj))
107 static const char *get_tiling_flag(struct drm_i915_gem_object *obj)
109 switch (obj->tiling_mode) {
111 case I915_TILING_NONE: return " ";
112 case I915_TILING_X: return "X";
113 case I915_TILING_Y: return "Y";
117 static inline const char *get_global_flag(struct drm_i915_gem_object *obj)
119 return obj->has_global_gtt_mapping ? "g" : " ";
123 describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
125 struct i915_vma *vma;
128 seq_printf(m, "%pK: %s%s%s %8zdKiB %02x %02x %u %u %u%s%s%s",
131 get_tiling_flag(obj),
132 get_global_flag(obj),
133 obj->base.size / 1024,
134 obj->base.read_domains,
135 obj->base.write_domain,
136 obj->last_read_seqno,
137 obj->last_write_seqno,
138 obj->last_fenced_seqno,
139 i915_cache_level_str(obj->cache_level),
140 obj->dirty ? " dirty" : "",
141 obj->madv == I915_MADV_DONTNEED ? " purgeable" : "");
143 seq_printf(m, " (name: %d)", obj->base.name);
144 list_for_each_entry(vma, &obj->vma_list, vma_link)
145 if (vma->pin_count > 0)
147 seq_printf(m, " (pinned x %d)", pin_count);
148 if (obj->pin_display)
149 seq_printf(m, " (display)");
150 if (obj->fence_reg != I915_FENCE_REG_NONE)
151 seq_printf(m, " (fence: %d)", obj->fence_reg);
152 list_for_each_entry(vma, &obj->vma_list, vma_link) {
153 if (!i915_is_ggtt(vma->vm))
157 seq_printf(m, "gtt offset: %08lx, size: %08lx)",
158 vma->node.start, vma->node.size);
161 seq_printf(m, " (stolen: %08lx)", obj->stolen->start);
162 if (obj->pin_mappable || obj->fault_mappable) {
164 if (obj->pin_mappable)
166 if (obj->fault_mappable)
169 seq_printf(m, " (%s mappable)", s);
171 if (obj->ring != NULL)
172 seq_printf(m, " (%s)", obj->ring->name);
175 static void describe_ctx(struct seq_file *m, struct i915_hw_context *ctx)
177 seq_putc(m, ctx->is_initialized ? 'I' : 'i');
178 seq_putc(m, ctx->remap_slice ? 'R' : 'r');
182 static int i915_gem_object_list_info(struct seq_file *m, void *data)
184 struct drm_info_node *node = m->private;
185 uintptr_t list = (uintptr_t) node->info_ent->data;
186 struct list_head *head;
187 struct drm_device *dev = node->minor->dev;
188 struct drm_i915_private *dev_priv = dev->dev_private;
189 struct i915_address_space *vm = &dev_priv->gtt.base;
190 struct i915_vma *vma;
191 size_t total_obj_size, total_gtt_size;
194 ret = mutex_lock_interruptible(&dev->struct_mutex);
198 /* FIXME: the user of this interface might want more than just GGTT */
201 seq_puts(m, "Active:\n");
202 head = &vm->active_list;
205 seq_puts(m, "Inactive:\n");
206 head = &vm->inactive_list;
209 mutex_unlock(&dev->struct_mutex);
213 total_obj_size = total_gtt_size = count = 0;
214 list_for_each_entry(vma, head, mm_list) {
216 describe_obj(m, vma->obj);
218 total_obj_size += vma->obj->base.size;
219 total_gtt_size += vma->node.size;
222 mutex_unlock(&dev->struct_mutex);
224 seq_printf(m, "Total %d objects, %zu bytes, %zu GTT size\n",
225 count, total_obj_size, total_gtt_size);
229 static int obj_rank_by_stolen(void *priv,
230 struct list_head *A, struct list_head *B)
232 struct drm_i915_gem_object *a =
233 container_of(A, struct drm_i915_gem_object, obj_exec_link);
234 struct drm_i915_gem_object *b =
235 container_of(B, struct drm_i915_gem_object, obj_exec_link);
237 return a->stolen->start - b->stolen->start;
240 static int i915_gem_stolen_list_info(struct seq_file *m, void *data)
242 struct drm_info_node *node = m->private;
243 struct drm_device *dev = node->minor->dev;
244 struct drm_i915_private *dev_priv = dev->dev_private;
245 struct drm_i915_gem_object *obj;
246 size_t total_obj_size, total_gtt_size;
250 ret = mutex_lock_interruptible(&dev->struct_mutex);
254 total_obj_size = total_gtt_size = count = 0;
255 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
256 if (obj->stolen == NULL)
259 list_add(&obj->obj_exec_link, &stolen);
261 total_obj_size += obj->base.size;
262 total_gtt_size += i915_gem_obj_ggtt_size(obj);
265 list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) {
266 if (obj->stolen == NULL)
269 list_add(&obj->obj_exec_link, &stolen);
271 total_obj_size += obj->base.size;
274 list_sort(NULL, &stolen, obj_rank_by_stolen);
275 seq_puts(m, "Stolen:\n");
276 while (!list_empty(&stolen)) {
277 obj = list_first_entry(&stolen, typeof(*obj), obj_exec_link);
279 describe_obj(m, obj);
281 list_del_init(&obj->obj_exec_link);
283 mutex_unlock(&dev->struct_mutex);
285 seq_printf(m, "Total %d objects, %zu bytes, %zu GTT size\n",
286 count, total_obj_size, total_gtt_size);
290 #define count_objects(list, member) do { \
291 list_for_each_entry(obj, list, member) { \
292 size += i915_gem_obj_ggtt_size(obj); \
294 if (obj->map_and_fenceable) { \
295 mappable_size += i915_gem_obj_ggtt_size(obj); \
302 struct drm_i915_file_private *file_priv;
304 size_t total, unbound;
305 size_t global, shared;
306 size_t active, inactive;
309 static int per_file_stats(int id, void *ptr, void *data)
311 struct drm_i915_gem_object *obj = ptr;
312 struct file_stats *stats = data;
313 struct i915_vma *vma;
316 stats->total += obj->base.size;
318 if (obj->base.name || obj->base.dma_buf)
319 stats->shared += obj->base.size;
321 if (USES_FULL_PPGTT(obj->base.dev)) {
322 list_for_each_entry(vma, &obj->vma_list, vma_link) {
323 struct i915_hw_ppgtt *ppgtt;
325 if (!drm_mm_node_allocated(&vma->node))
328 if (i915_is_ggtt(vma->vm)) {
329 stats->global += obj->base.size;
333 ppgtt = container_of(vma->vm, struct i915_hw_ppgtt, base);
334 if (ppgtt->ctx && ppgtt->ctx->file_priv != stats->file_priv)
337 if (obj->ring) /* XXX per-vma statistic */
338 stats->active += obj->base.size;
340 stats->inactive += obj->base.size;
345 if (i915_gem_obj_ggtt_bound(obj)) {
346 stats->global += obj->base.size;
348 stats->active += obj->base.size;
350 stats->inactive += obj->base.size;
355 if (!list_empty(&obj->global_list))
356 stats->unbound += obj->base.size;
361 #define count_vmas(list, member) do { \
362 list_for_each_entry(vma, list, member) { \
363 size += i915_gem_obj_ggtt_size(vma->obj); \
365 if (vma->obj->map_and_fenceable) { \
366 mappable_size += i915_gem_obj_ggtt_size(vma->obj); \
372 static int i915_gem_object_info(struct seq_file *m, void* data)
374 struct drm_info_node *node = m->private;
375 struct drm_device *dev = node->minor->dev;
376 struct drm_i915_private *dev_priv = dev->dev_private;
377 u32 count, mappable_count, purgeable_count;
378 size_t size, mappable_size, purgeable_size;
379 struct drm_i915_gem_object *obj;
380 struct i915_address_space *vm = &dev_priv->gtt.base;
381 struct drm_file *file;
382 struct i915_vma *vma;
385 ret = mutex_lock_interruptible(&dev->struct_mutex);
389 seq_printf(m, "%u objects, %zu bytes\n",
390 dev_priv->mm.object_count,
391 dev_priv->mm.object_memory);
393 size = count = mappable_size = mappable_count = 0;
394 count_objects(&dev_priv->mm.bound_list, global_list);
395 seq_printf(m, "%u [%u] objects, %zu [%zu] bytes in gtt\n",
396 count, mappable_count, size, mappable_size);
398 size = count = mappable_size = mappable_count = 0;
399 count_vmas(&vm->active_list, mm_list);
400 seq_printf(m, " %u [%u] active objects, %zu [%zu] bytes\n",
401 count, mappable_count, size, mappable_size);
403 size = count = mappable_size = mappable_count = 0;
404 count_vmas(&vm->inactive_list, mm_list);
405 seq_printf(m, " %u [%u] inactive objects, %zu [%zu] bytes\n",
406 count, mappable_count, size, mappable_size);
408 size = count = purgeable_size = purgeable_count = 0;
409 list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) {
410 size += obj->base.size, ++count;
411 if (obj->madv == I915_MADV_DONTNEED)
412 purgeable_size += obj->base.size, ++purgeable_count;
414 seq_printf(m, "%u unbound objects, %zu bytes\n", count, size);
416 size = count = mappable_size = mappable_count = 0;
417 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
418 if (obj->fault_mappable) {
419 size += i915_gem_obj_ggtt_size(obj);
422 if (obj->pin_mappable) {
423 mappable_size += i915_gem_obj_ggtt_size(obj);
426 if (obj->madv == I915_MADV_DONTNEED) {
427 purgeable_size += obj->base.size;
431 seq_printf(m, "%u purgeable objects, %zu bytes\n",
432 purgeable_count, purgeable_size);
433 seq_printf(m, "%u pinned mappable objects, %zu bytes\n",
434 mappable_count, mappable_size);
435 seq_printf(m, "%u fault mappable objects, %zu bytes\n",
438 seq_printf(m, "%zu [%lu] gtt total\n",
439 dev_priv->gtt.base.total,
440 dev_priv->gtt.mappable_end - dev_priv->gtt.base.start);
443 list_for_each_entry_reverse(file, &dev->filelist, lhead) {
444 struct file_stats stats;
445 struct task_struct *task;
447 memset(&stats, 0, sizeof(stats));
448 stats.file_priv = file->driver_priv;
449 idr_for_each(&file->object_idr, per_file_stats, &stats);
451 * Although we have a valid reference on file->pid, that does
452 * not guarantee that the task_struct who called get_pid() is
453 * still alive (e.g. get_pid(current) => fork() => exit()).
454 * Therefore, we need to protect this ->comm access using RCU.
457 task = pid_task(file->pid, PIDTYPE_PID);
458 seq_printf(m, "%s: %u objects, %zu bytes (%zu active, %zu inactive, %zu global, %zu shared, %zu unbound)\n",
459 task ? task->comm : "<unknown>",
470 mutex_unlock(&dev->struct_mutex);
475 static int i915_gem_gtt_info(struct seq_file *m, void *data)
477 struct drm_info_node *node = m->private;
478 struct drm_device *dev = node->minor->dev;
479 uintptr_t list = (uintptr_t) node->info_ent->data;
480 struct drm_i915_private *dev_priv = dev->dev_private;
481 struct drm_i915_gem_object *obj;
482 size_t total_obj_size, total_gtt_size;
485 ret = mutex_lock_interruptible(&dev->struct_mutex);
489 total_obj_size = total_gtt_size = count = 0;
490 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
491 if (list == PINNED_LIST && !i915_gem_obj_is_pinned(obj))
495 describe_obj(m, obj);
497 total_obj_size += obj->base.size;
498 total_gtt_size += i915_gem_obj_ggtt_size(obj);
502 mutex_unlock(&dev->struct_mutex);
504 seq_printf(m, "Total %d objects, %zu bytes, %zu GTT size\n",
505 count, total_obj_size, total_gtt_size);
510 static int i915_gem_pageflip_info(struct seq_file *m, void *data)
512 struct drm_info_node *node = m->private;
513 struct drm_device *dev = node->minor->dev;
515 struct intel_crtc *crtc;
517 list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) {
518 const char pipe = pipe_name(crtc->pipe);
519 const char plane = plane_name(crtc->plane);
520 struct intel_unpin_work *work;
522 spin_lock_irqsave(&dev->event_lock, flags);
523 work = crtc->unpin_work;
525 seq_printf(m, "No flip due on pipe %c (plane %c)\n",
528 if (atomic_read(&work->pending) < INTEL_FLIP_COMPLETE) {
529 seq_printf(m, "Flip queued on pipe %c (plane %c)\n",
532 seq_printf(m, "Flip pending (waiting for vsync) on pipe %c (plane %c)\n",
535 if (work->enable_stall_check)
536 seq_puts(m, "Stall check enabled, ");
538 seq_puts(m, "Stall check waiting for page flip ioctl, ");
539 seq_printf(m, "%d prepares\n", atomic_read(&work->pending));
541 if (work->old_fb_obj) {
542 struct drm_i915_gem_object *obj = work->old_fb_obj;
544 seq_printf(m, "Old framebuffer gtt_offset 0x%08lx\n",
545 i915_gem_obj_ggtt_offset(obj));
547 if (work->pending_flip_obj) {
548 struct drm_i915_gem_object *obj = work->pending_flip_obj;
550 seq_printf(m, "New framebuffer gtt_offset 0x%08lx\n",
551 i915_gem_obj_ggtt_offset(obj));
554 spin_unlock_irqrestore(&dev->event_lock, flags);
560 static int i915_gem_request_info(struct seq_file *m, void *data)
562 struct drm_info_node *node = m->private;
563 struct drm_device *dev = node->minor->dev;
564 struct drm_i915_private *dev_priv = dev->dev_private;
565 struct intel_ring_buffer *ring;
566 struct drm_i915_gem_request *gem_request;
569 ret = mutex_lock_interruptible(&dev->struct_mutex);
574 for_each_ring(ring, dev_priv, i) {
575 if (list_empty(&ring->request_list))
578 seq_printf(m, "%s requests:\n", ring->name);
579 list_for_each_entry(gem_request,
582 seq_printf(m, " %d @ %d\n",
584 (int) (jiffies - gem_request->emitted_jiffies));
588 mutex_unlock(&dev->struct_mutex);
591 seq_puts(m, "No requests\n");
596 static void i915_ring_seqno_info(struct seq_file *m,
597 struct intel_ring_buffer *ring)
599 if (ring->get_seqno) {
600 seq_printf(m, "Current sequence (%s): %u\n",
601 ring->name, ring->get_seqno(ring, false));
605 static int i915_gem_seqno_info(struct seq_file *m, void *data)
607 struct drm_info_node *node = m->private;
608 struct drm_device *dev = node->minor->dev;
609 struct drm_i915_private *dev_priv = dev->dev_private;
610 struct intel_ring_buffer *ring;
613 ret = mutex_lock_interruptible(&dev->struct_mutex);
616 intel_runtime_pm_get(dev_priv);
618 for_each_ring(ring, dev_priv, i)
619 i915_ring_seqno_info(m, ring);
621 intel_runtime_pm_put(dev_priv);
622 mutex_unlock(&dev->struct_mutex);
628 static int i915_interrupt_info(struct seq_file *m, void *data)
630 struct drm_info_node *node = m->private;
631 struct drm_device *dev = node->minor->dev;
632 struct drm_i915_private *dev_priv = dev->dev_private;
633 struct intel_ring_buffer *ring;
636 ret = mutex_lock_interruptible(&dev->struct_mutex);
639 intel_runtime_pm_get(dev_priv);
641 if (IS_CHERRYVIEW(dev)) {
643 seq_printf(m, "Master Interrupt Control:\t%08x\n",
644 I915_READ(GEN8_MASTER_IRQ));
646 seq_printf(m, "Display IER:\t%08x\n",
648 seq_printf(m, "Display IIR:\t%08x\n",
650 seq_printf(m, "Display IIR_RW:\t%08x\n",
651 I915_READ(VLV_IIR_RW));
652 seq_printf(m, "Display IMR:\t%08x\n",
655 seq_printf(m, "Pipe %c stat:\t%08x\n",
657 I915_READ(PIPESTAT(pipe)));
659 seq_printf(m, "Port hotplug:\t%08x\n",
660 I915_READ(PORT_HOTPLUG_EN));
661 seq_printf(m, "DPFLIPSTAT:\t%08x\n",
662 I915_READ(VLV_DPFLIPSTAT));
663 seq_printf(m, "DPINVGTT:\t%08x\n",
664 I915_READ(DPINVGTT));
666 for (i = 0; i < 4; i++) {
667 seq_printf(m, "GT Interrupt IMR %d:\t%08x\n",
668 i, I915_READ(GEN8_GT_IMR(i)));
669 seq_printf(m, "GT Interrupt IIR %d:\t%08x\n",
670 i, I915_READ(GEN8_GT_IIR(i)));
671 seq_printf(m, "GT Interrupt IER %d:\t%08x\n",
672 i, I915_READ(GEN8_GT_IER(i)));
675 seq_printf(m, "PCU interrupt mask:\t%08x\n",
676 I915_READ(GEN8_PCU_IMR));
677 seq_printf(m, "PCU interrupt identity:\t%08x\n",
678 I915_READ(GEN8_PCU_IIR));
679 seq_printf(m, "PCU interrupt enable:\t%08x\n",
680 I915_READ(GEN8_PCU_IER));
681 } else if (INTEL_INFO(dev)->gen >= 8) {
682 seq_printf(m, "Master Interrupt Control:\t%08x\n",
683 I915_READ(GEN8_MASTER_IRQ));
685 for (i = 0; i < 4; i++) {
686 seq_printf(m, "GT Interrupt IMR %d:\t%08x\n",
687 i, I915_READ(GEN8_GT_IMR(i)));
688 seq_printf(m, "GT Interrupt IIR %d:\t%08x\n",
689 i, I915_READ(GEN8_GT_IIR(i)));
690 seq_printf(m, "GT Interrupt IER %d:\t%08x\n",
691 i, I915_READ(GEN8_GT_IER(i)));
694 for_each_pipe(pipe) {
695 seq_printf(m, "Pipe %c IMR:\t%08x\n",
697 I915_READ(GEN8_DE_PIPE_IMR(pipe)));
698 seq_printf(m, "Pipe %c IIR:\t%08x\n",
700 I915_READ(GEN8_DE_PIPE_IIR(pipe)));
701 seq_printf(m, "Pipe %c IER:\t%08x\n",
703 I915_READ(GEN8_DE_PIPE_IER(pipe)));
706 seq_printf(m, "Display Engine port interrupt mask:\t%08x\n",
707 I915_READ(GEN8_DE_PORT_IMR));
708 seq_printf(m, "Display Engine port interrupt identity:\t%08x\n",
709 I915_READ(GEN8_DE_PORT_IIR));
710 seq_printf(m, "Display Engine port interrupt enable:\t%08x\n",
711 I915_READ(GEN8_DE_PORT_IER));
713 seq_printf(m, "Display Engine misc interrupt mask:\t%08x\n",
714 I915_READ(GEN8_DE_MISC_IMR));
715 seq_printf(m, "Display Engine misc interrupt identity:\t%08x\n",
716 I915_READ(GEN8_DE_MISC_IIR));
717 seq_printf(m, "Display Engine misc interrupt enable:\t%08x\n",
718 I915_READ(GEN8_DE_MISC_IER));
720 seq_printf(m, "PCU interrupt mask:\t%08x\n",
721 I915_READ(GEN8_PCU_IMR));
722 seq_printf(m, "PCU interrupt identity:\t%08x\n",
723 I915_READ(GEN8_PCU_IIR));
724 seq_printf(m, "PCU interrupt enable:\t%08x\n",
725 I915_READ(GEN8_PCU_IER));
726 } else if (IS_VALLEYVIEW(dev)) {
727 seq_printf(m, "Display IER:\t%08x\n",
729 seq_printf(m, "Display IIR:\t%08x\n",
731 seq_printf(m, "Display IIR_RW:\t%08x\n",
732 I915_READ(VLV_IIR_RW));
733 seq_printf(m, "Display IMR:\t%08x\n",
736 seq_printf(m, "Pipe %c stat:\t%08x\n",
738 I915_READ(PIPESTAT(pipe)));
740 seq_printf(m, "Master IER:\t%08x\n",
741 I915_READ(VLV_MASTER_IER));
743 seq_printf(m, "Render IER:\t%08x\n",
745 seq_printf(m, "Render IIR:\t%08x\n",
747 seq_printf(m, "Render IMR:\t%08x\n",
750 seq_printf(m, "PM IER:\t\t%08x\n",
751 I915_READ(GEN6_PMIER));
752 seq_printf(m, "PM IIR:\t\t%08x\n",
753 I915_READ(GEN6_PMIIR));
754 seq_printf(m, "PM IMR:\t\t%08x\n",
755 I915_READ(GEN6_PMIMR));
757 seq_printf(m, "Port hotplug:\t%08x\n",
758 I915_READ(PORT_HOTPLUG_EN));
759 seq_printf(m, "DPFLIPSTAT:\t%08x\n",
760 I915_READ(VLV_DPFLIPSTAT));
761 seq_printf(m, "DPINVGTT:\t%08x\n",
762 I915_READ(DPINVGTT));
764 } else if (!HAS_PCH_SPLIT(dev)) {
765 seq_printf(m, "Interrupt enable: %08x\n",
767 seq_printf(m, "Interrupt identity: %08x\n",
769 seq_printf(m, "Interrupt mask: %08x\n",
772 seq_printf(m, "Pipe %c stat: %08x\n",
774 I915_READ(PIPESTAT(pipe)));
776 seq_printf(m, "North Display Interrupt enable: %08x\n",
778 seq_printf(m, "North Display Interrupt identity: %08x\n",
780 seq_printf(m, "North Display Interrupt mask: %08x\n",
782 seq_printf(m, "South Display Interrupt enable: %08x\n",
784 seq_printf(m, "South Display Interrupt identity: %08x\n",
786 seq_printf(m, "South Display Interrupt mask: %08x\n",
788 seq_printf(m, "Graphics Interrupt enable: %08x\n",
790 seq_printf(m, "Graphics Interrupt identity: %08x\n",
792 seq_printf(m, "Graphics Interrupt mask: %08x\n",
795 for_each_ring(ring, dev_priv, i) {
796 if (INTEL_INFO(dev)->gen >= 6) {
798 "Graphics Interrupt mask (%s): %08x\n",
799 ring->name, I915_READ_IMR(ring));
801 i915_ring_seqno_info(m, ring);
803 intel_runtime_pm_put(dev_priv);
804 mutex_unlock(&dev->struct_mutex);
809 static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
811 struct drm_info_node *node = m->private;
812 struct drm_device *dev = node->minor->dev;
813 struct drm_i915_private *dev_priv = dev->dev_private;
816 ret = mutex_lock_interruptible(&dev->struct_mutex);
820 seq_printf(m, "Reserved fences = %d\n", dev_priv->fence_reg_start);
821 seq_printf(m, "Total fences = %d\n", dev_priv->num_fence_regs);
822 for (i = 0; i < dev_priv->num_fence_regs; i++) {
823 struct drm_i915_gem_object *obj = dev_priv->fence_regs[i].obj;
825 seq_printf(m, "Fence %d, pin count = %d, object = ",
826 i, dev_priv->fence_regs[i].pin_count);
828 seq_puts(m, "unused");
830 describe_obj(m, obj);
834 mutex_unlock(&dev->struct_mutex);
838 static int i915_hws_info(struct seq_file *m, void *data)
840 struct drm_info_node *node = m->private;
841 struct drm_device *dev = node->minor->dev;
842 struct drm_i915_private *dev_priv = dev->dev_private;
843 struct intel_ring_buffer *ring;
847 ring = &dev_priv->ring[(uintptr_t)node->info_ent->data];
848 hws = ring->status_page.page_addr;
852 for (i = 0; i < 4096 / sizeof(u32) / 4; i += 4) {
853 seq_printf(m, "0x%08x: 0x%08x 0x%08x 0x%08x 0x%08x\n",
855 hws[i], hws[i + 1], hws[i + 2], hws[i + 3]);
861 i915_error_state_write(struct file *filp,
862 const char __user *ubuf,
866 struct i915_error_state_file_priv *error_priv = filp->private_data;
867 struct drm_device *dev = error_priv->dev;
870 DRM_DEBUG_DRIVER("Resetting error state\n");
872 ret = mutex_lock_interruptible(&dev->struct_mutex);
876 i915_destroy_error_state(dev);
877 mutex_unlock(&dev->struct_mutex);
882 static int i915_error_state_open(struct inode *inode, struct file *file)
884 struct drm_device *dev = inode->i_private;
885 struct i915_error_state_file_priv *error_priv;
887 error_priv = kzalloc(sizeof(*error_priv), GFP_KERNEL);
891 error_priv->dev = dev;
893 i915_error_state_get(dev, error_priv);
895 file->private_data = error_priv;
900 static int i915_error_state_release(struct inode *inode, struct file *file)
902 struct i915_error_state_file_priv *error_priv = file->private_data;
904 i915_error_state_put(error_priv);
910 static ssize_t i915_error_state_read(struct file *file, char __user *userbuf,
911 size_t count, loff_t *pos)
913 struct i915_error_state_file_priv *error_priv = file->private_data;
914 struct drm_i915_error_state_buf error_str;
916 ssize_t ret_count = 0;
919 ret = i915_error_state_buf_init(&error_str, count, *pos);
923 ret = i915_error_state_to_str(&error_str, error_priv);
927 ret_count = simple_read_from_buffer(userbuf, count, &tmp_pos,
934 *pos = error_str.start + ret_count;
936 i915_error_state_buf_release(&error_str);
937 return ret ?: ret_count;
940 static const struct file_operations i915_error_state_fops = {
941 .owner = THIS_MODULE,
942 .open = i915_error_state_open,
943 .read = i915_error_state_read,
944 .write = i915_error_state_write,
945 .llseek = default_llseek,
946 .release = i915_error_state_release,
950 i915_next_seqno_get(void *data, u64 *val)
952 struct drm_device *dev = data;
953 struct drm_i915_private *dev_priv = dev->dev_private;
956 ret = mutex_lock_interruptible(&dev->struct_mutex);
960 *val = dev_priv->next_seqno;
961 mutex_unlock(&dev->struct_mutex);
967 i915_next_seqno_set(void *data, u64 val)
969 struct drm_device *dev = data;
972 ret = mutex_lock_interruptible(&dev->struct_mutex);
976 ret = i915_gem_set_seqno(dev, val);
977 mutex_unlock(&dev->struct_mutex);
982 DEFINE_SIMPLE_ATTRIBUTE(i915_next_seqno_fops,
983 i915_next_seqno_get, i915_next_seqno_set,
986 static int i915_rstdby_delays(struct seq_file *m, void *unused)
988 struct drm_info_node *node = m->private;
989 struct drm_device *dev = node->minor->dev;
990 struct drm_i915_private *dev_priv = dev->dev_private;
994 ret = mutex_lock_interruptible(&dev->struct_mutex);
997 intel_runtime_pm_get(dev_priv);
999 crstanddelay = I915_READ16(CRSTANDVID);
1001 intel_runtime_pm_put(dev_priv);
1002 mutex_unlock(&dev->struct_mutex);
1004 seq_printf(m, "w/ctx: %d, w/o ctx: %d\n", (crstanddelay >> 8) & 0x3f, (crstanddelay & 0x3f));
1009 static int i915_frequency_info(struct seq_file *m, void *unused)
1011 struct drm_info_node *node = m->private;
1012 struct drm_device *dev = node->minor->dev;
1013 struct drm_i915_private *dev_priv = dev->dev_private;
1016 intel_runtime_pm_get(dev_priv);
1018 flush_delayed_work(&dev_priv->rps.delayed_resume_work);
1021 u16 rgvswctl = I915_READ16(MEMSWCTL);
1022 u16 rgvstat = I915_READ16(MEMSTAT_ILK);
1024 seq_printf(m, "Requested P-state: %d\n", (rgvswctl >> 8) & 0xf);
1025 seq_printf(m, "Requested VID: %d\n", rgvswctl & 0x3f);
1026 seq_printf(m, "Current VID: %d\n", (rgvstat & MEMSTAT_VID_MASK) >>
1028 seq_printf(m, "Current P-state: %d\n",
1029 (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT);
1030 } else if ((IS_GEN6(dev) || IS_GEN7(dev)) && !IS_VALLEYVIEW(dev)) {
1031 u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
1032 u32 rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS);
1033 u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
1034 u32 rpmodectl, rpinclimit, rpdeclimit;
1035 u32 rpstat, cagf, reqf;
1036 u32 rpupei, rpcurup, rpprevup;
1037 u32 rpdownei, rpcurdown, rpprevdown;
1040 /* RPSTAT1 is in the GT power well */
1041 ret = mutex_lock_interruptible(&dev->struct_mutex);
1045 gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
1047 reqf = I915_READ(GEN6_RPNSWREQ);
1048 reqf &= ~GEN6_TURBO_DISABLE;
1049 if (IS_HASWELL(dev))
1053 reqf *= GT_FREQUENCY_MULTIPLIER;
1055 rpmodectl = I915_READ(GEN6_RP_CONTROL);
1056 rpinclimit = I915_READ(GEN6_RP_UP_THRESHOLD);
1057 rpdeclimit = I915_READ(GEN6_RP_DOWN_THRESHOLD);
1059 rpstat = I915_READ(GEN6_RPSTAT1);
1060 rpupei = I915_READ(GEN6_RP_CUR_UP_EI);
1061 rpcurup = I915_READ(GEN6_RP_CUR_UP);
1062 rpprevup = I915_READ(GEN6_RP_PREV_UP);
1063 rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI);
1064 rpcurdown = I915_READ(GEN6_RP_CUR_DOWN);
1065 rpprevdown = I915_READ(GEN6_RP_PREV_DOWN);
1066 if (IS_HASWELL(dev))
1067 cagf = (rpstat & HSW_CAGF_MASK) >> HSW_CAGF_SHIFT;
1069 cagf = (rpstat & GEN6_CAGF_MASK) >> GEN6_CAGF_SHIFT;
1070 cagf *= GT_FREQUENCY_MULTIPLIER;
1072 gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
1073 mutex_unlock(&dev->struct_mutex);
1075 seq_printf(m, "PM IER=0x%08x IMR=0x%08x ISR=0x%08x IIR=0x%08x, MASK=0x%08x\n",
1076 I915_READ(GEN6_PMIER),
1077 I915_READ(GEN6_PMIMR),
1078 I915_READ(GEN6_PMISR),
1079 I915_READ(GEN6_PMIIR),
1080 I915_READ(GEN6_PMINTRMSK));
1081 seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status);
1082 seq_printf(m, "Render p-state ratio: %d\n",
1083 (gt_perf_status & 0xff00) >> 8);
1084 seq_printf(m, "Render p-state VID: %d\n",
1085 gt_perf_status & 0xff);
1086 seq_printf(m, "Render p-state limit: %d\n",
1087 rp_state_limits & 0xff);
1088 seq_printf(m, "RPSTAT1: 0x%08x\n", rpstat);
1089 seq_printf(m, "RPMODECTL: 0x%08x\n", rpmodectl);
1090 seq_printf(m, "RPINCLIMIT: 0x%08x\n", rpinclimit);
1091 seq_printf(m, "RPDECLIMIT: 0x%08x\n", rpdeclimit);
1092 seq_printf(m, "RPNSWREQ: %dMHz\n", reqf);
1093 seq_printf(m, "CAGF: %dMHz\n", cagf);
1094 seq_printf(m, "RP CUR UP EI: %dus\n", rpupei &
1095 GEN6_CURICONT_MASK);
1096 seq_printf(m, "RP CUR UP: %dus\n", rpcurup &
1097 GEN6_CURBSYTAVG_MASK);
1098 seq_printf(m, "RP PREV UP: %dus\n", rpprevup &
1099 GEN6_CURBSYTAVG_MASK);
1100 seq_printf(m, "RP CUR DOWN EI: %dus\n", rpdownei &
1102 seq_printf(m, "RP CUR DOWN: %dus\n", rpcurdown &
1103 GEN6_CURBSYTAVG_MASK);
1104 seq_printf(m, "RP PREV DOWN: %dus\n", rpprevdown &
1105 GEN6_CURBSYTAVG_MASK);
1107 max_freq = (rp_state_cap & 0xff0000) >> 16;
1108 seq_printf(m, "Lowest (RPN) frequency: %dMHz\n",
1109 max_freq * GT_FREQUENCY_MULTIPLIER);
1111 max_freq = (rp_state_cap & 0xff00) >> 8;
1112 seq_printf(m, "Nominal (RP1) frequency: %dMHz\n",
1113 max_freq * GT_FREQUENCY_MULTIPLIER);
1115 max_freq = rp_state_cap & 0xff;
1116 seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
1117 max_freq * GT_FREQUENCY_MULTIPLIER);
1119 seq_printf(m, "Max overclocked frequency: %dMHz\n",
1120 dev_priv->rps.max_freq * GT_FREQUENCY_MULTIPLIER);
1121 } else if (IS_VALLEYVIEW(dev)) {
1124 mutex_lock(&dev_priv->rps.hw_lock);
1125 freq_sts = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
1126 seq_printf(m, "PUNIT_REG_GPU_FREQ_STS: 0x%08x\n", freq_sts);
1127 seq_printf(m, "DDR freq: %d MHz\n", dev_priv->mem_freq);
1129 val = valleyview_rps_max_freq(dev_priv);
1130 seq_printf(m, "max GPU freq: %d MHz\n",
1131 vlv_gpu_freq(dev_priv, val));
1133 val = valleyview_rps_min_freq(dev_priv);
1134 seq_printf(m, "min GPU freq: %d MHz\n",
1135 vlv_gpu_freq(dev_priv, val));
1137 seq_printf(m, "current GPU freq: %d MHz\n",
1138 vlv_gpu_freq(dev_priv, (freq_sts >> 8) & 0xff));
1139 mutex_unlock(&dev_priv->rps.hw_lock);
1141 seq_puts(m, "no P-state info available\n");
1145 intel_runtime_pm_put(dev_priv);
1149 static int i915_delayfreq_table(struct seq_file *m, void *unused)
1151 struct drm_info_node *node = m->private;
1152 struct drm_device *dev = node->minor->dev;
1153 struct drm_i915_private *dev_priv = dev->dev_private;
1157 ret = mutex_lock_interruptible(&dev->struct_mutex);
1160 intel_runtime_pm_get(dev_priv);
1162 for (i = 0; i < 16; i++) {
1163 delayfreq = I915_READ(PXVFREQ_BASE + i * 4);
1164 seq_printf(m, "P%02dVIDFREQ: 0x%08x (VID: %d)\n", i, delayfreq,
1165 (delayfreq & PXVFREQ_PX_MASK) >> PXVFREQ_PX_SHIFT);
1168 intel_runtime_pm_put(dev_priv);
1170 mutex_unlock(&dev->struct_mutex);
1175 static inline int MAP_TO_MV(int map)
1177 return 1250 - (map * 25);
1180 static int i915_inttoext_table(struct seq_file *m, void *unused)
1182 struct drm_info_node *node = m->private;
1183 struct drm_device *dev = node->minor->dev;
1184 struct drm_i915_private *dev_priv = dev->dev_private;
1188 ret = mutex_lock_interruptible(&dev->struct_mutex);
1191 intel_runtime_pm_get(dev_priv);
1193 for (i = 1; i <= 32; i++) {
1194 inttoext = I915_READ(INTTOEXT_BASE_ILK + i * 4);
1195 seq_printf(m, "INTTOEXT%02d: 0x%08x\n", i, inttoext);
1198 intel_runtime_pm_put(dev_priv);
1199 mutex_unlock(&dev->struct_mutex);
1204 static int ironlake_drpc_info(struct seq_file *m)
1206 struct drm_info_node *node = m->private;
1207 struct drm_device *dev = node->minor->dev;
1208 struct drm_i915_private *dev_priv = dev->dev_private;
1209 u32 rgvmodectl, rstdbyctl;
1213 ret = mutex_lock_interruptible(&dev->struct_mutex);
1216 intel_runtime_pm_get(dev_priv);
1218 rgvmodectl = I915_READ(MEMMODECTL);
1219 rstdbyctl = I915_READ(RSTDBYCTL);
1220 crstandvid = I915_READ16(CRSTANDVID);
1222 intel_runtime_pm_put(dev_priv);
1223 mutex_unlock(&dev->struct_mutex);
1225 seq_printf(m, "HD boost: %s\n", (rgvmodectl & MEMMODE_BOOST_EN) ?
1227 seq_printf(m, "Boost freq: %d\n",
1228 (rgvmodectl & MEMMODE_BOOST_FREQ_MASK) >>
1229 MEMMODE_BOOST_FREQ_SHIFT);
1230 seq_printf(m, "HW control enabled: %s\n",
1231 rgvmodectl & MEMMODE_HWIDLE_EN ? "yes" : "no");
1232 seq_printf(m, "SW control enabled: %s\n",
1233 rgvmodectl & MEMMODE_SWMODE_EN ? "yes" : "no");
1234 seq_printf(m, "Gated voltage change: %s\n",
1235 rgvmodectl & MEMMODE_RCLK_GATE ? "yes" : "no");
1236 seq_printf(m, "Starting frequency: P%d\n",
1237 (rgvmodectl & MEMMODE_FSTART_MASK) >> MEMMODE_FSTART_SHIFT);
1238 seq_printf(m, "Max P-state: P%d\n",
1239 (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT);
1240 seq_printf(m, "Min P-state: P%d\n", (rgvmodectl & MEMMODE_FMIN_MASK));
1241 seq_printf(m, "RS1 VID: %d\n", (crstandvid & 0x3f));
1242 seq_printf(m, "RS2 VID: %d\n", ((crstandvid >> 8) & 0x3f));
1243 seq_printf(m, "Render standby enabled: %s\n",
1244 (rstdbyctl & RCX_SW_EXIT) ? "no" : "yes");
1245 seq_puts(m, "Current RS state: ");
1246 switch (rstdbyctl & RSX_STATUS_MASK) {
1248 seq_puts(m, "on\n");
1250 case RSX_STATUS_RC1:
1251 seq_puts(m, "RC1\n");
1253 case RSX_STATUS_RC1E:
1254 seq_puts(m, "RC1E\n");
1256 case RSX_STATUS_RS1:
1257 seq_puts(m, "RS1\n");
1259 case RSX_STATUS_RS2:
1260 seq_puts(m, "RS2 (RC6)\n");
1262 case RSX_STATUS_RS3:
1263 seq_puts(m, "RC3 (RC6+)\n");
1266 seq_puts(m, "unknown\n");
1273 static int vlv_drpc_info(struct seq_file *m)
1276 struct drm_info_node *node = m->private;
1277 struct drm_device *dev = node->minor->dev;
1278 struct drm_i915_private *dev_priv = dev->dev_private;
1279 u32 rpmodectl1, rcctl1;
1280 unsigned fw_rendercount = 0, fw_mediacount = 0;
1282 intel_runtime_pm_get(dev_priv);
1284 rpmodectl1 = I915_READ(GEN6_RP_CONTROL);
1285 rcctl1 = I915_READ(GEN6_RC_CONTROL);
1287 intel_runtime_pm_put(dev_priv);
1289 seq_printf(m, "Video Turbo Mode: %s\n",
1290 yesno(rpmodectl1 & GEN6_RP_MEDIA_TURBO));
1291 seq_printf(m, "Turbo enabled: %s\n",
1292 yesno(rpmodectl1 & GEN6_RP_ENABLE));
1293 seq_printf(m, "HW control enabled: %s\n",
1294 yesno(rpmodectl1 & GEN6_RP_ENABLE));
1295 seq_printf(m, "SW control enabled: %s\n",
1296 yesno((rpmodectl1 & GEN6_RP_MEDIA_MODE_MASK) ==
1297 GEN6_RP_MEDIA_SW_MODE));
1298 seq_printf(m, "RC6 Enabled: %s\n",
1299 yesno(rcctl1 & (GEN7_RC_CTL_TO_MODE |
1300 GEN6_RC_CTL_EI_MODE(1))));
1301 seq_printf(m, "Render Power Well: %s\n",
1302 (I915_READ(VLV_GTLC_PW_STATUS) &
1303 VLV_GTLC_PW_RENDER_STATUS_MASK) ? "Up" : "Down");
1304 seq_printf(m, "Media Power Well: %s\n",
1305 (I915_READ(VLV_GTLC_PW_STATUS) &
1306 VLV_GTLC_PW_MEDIA_STATUS_MASK) ? "Up" : "Down");
1308 seq_printf(m, "Render RC6 residency since boot: %u\n",
1309 I915_READ(VLV_GT_RENDER_RC6));
1310 seq_printf(m, "Media RC6 residency since boot: %u\n",
1311 I915_READ(VLV_GT_MEDIA_RC6));
1313 spin_lock_irq(&dev_priv->uncore.lock);
1314 fw_rendercount = dev_priv->uncore.fw_rendercount;
1315 fw_mediacount = dev_priv->uncore.fw_mediacount;
1316 spin_unlock_irq(&dev_priv->uncore.lock);
1318 seq_printf(m, "Forcewake Render Count = %u\n", fw_rendercount);
1319 seq_printf(m, "Forcewake Media Count = %u\n", fw_mediacount);
1326 static int gen6_drpc_info(struct seq_file *m)
1329 struct drm_info_node *node = m->private;
1330 struct drm_device *dev = node->minor->dev;
1331 struct drm_i915_private *dev_priv = dev->dev_private;
1332 u32 rpmodectl1, gt_core_status, rcctl1, rc6vids = 0;
1333 unsigned forcewake_count;
1336 ret = mutex_lock_interruptible(&dev->struct_mutex);
1339 intel_runtime_pm_get(dev_priv);
1341 spin_lock_irq(&dev_priv->uncore.lock);
1342 forcewake_count = dev_priv->uncore.forcewake_count;
1343 spin_unlock_irq(&dev_priv->uncore.lock);
1345 if (forcewake_count) {
1346 seq_puts(m, "RC information inaccurate because somebody "
1347 "holds a forcewake reference \n");
1349 /* NB: we cannot use forcewake, else we read the wrong values */
1350 while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK) & 1))
1352 seq_printf(m, "RC information accurate: %s\n", yesno(count < 51));
1355 gt_core_status = readl(dev_priv->regs + GEN6_GT_CORE_STATUS);
1356 trace_i915_reg_rw(false, GEN6_GT_CORE_STATUS, gt_core_status, 4, true);
1358 rpmodectl1 = I915_READ(GEN6_RP_CONTROL);
1359 rcctl1 = I915_READ(GEN6_RC_CONTROL);
1360 mutex_unlock(&dev->struct_mutex);
1361 mutex_lock(&dev_priv->rps.hw_lock);
1362 sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids);
1363 mutex_unlock(&dev_priv->rps.hw_lock);
1365 intel_runtime_pm_put(dev_priv);
1367 seq_printf(m, "Video Turbo Mode: %s\n",
1368 yesno(rpmodectl1 & GEN6_RP_MEDIA_TURBO));
1369 seq_printf(m, "HW control enabled: %s\n",
1370 yesno(rpmodectl1 & GEN6_RP_ENABLE));
1371 seq_printf(m, "SW control enabled: %s\n",
1372 yesno((rpmodectl1 & GEN6_RP_MEDIA_MODE_MASK) ==
1373 GEN6_RP_MEDIA_SW_MODE));
1374 seq_printf(m, "RC1e Enabled: %s\n",
1375 yesno(rcctl1 & GEN6_RC_CTL_RC1e_ENABLE));
1376 seq_printf(m, "RC6 Enabled: %s\n",
1377 yesno(rcctl1 & GEN6_RC_CTL_RC6_ENABLE));
1378 seq_printf(m, "Deep RC6 Enabled: %s\n",
1379 yesno(rcctl1 & GEN6_RC_CTL_RC6p_ENABLE));
1380 seq_printf(m, "Deepest RC6 Enabled: %s\n",
1381 yesno(rcctl1 & GEN6_RC_CTL_RC6pp_ENABLE));
1382 seq_puts(m, "Current RC state: ");
1383 switch (gt_core_status & GEN6_RCn_MASK) {
1385 if (gt_core_status & GEN6_CORE_CPD_STATE_MASK)
1386 seq_puts(m, "Core Power Down\n");
1388 seq_puts(m, "on\n");
1391 seq_puts(m, "RC3\n");
1394 seq_puts(m, "RC6\n");
1397 seq_puts(m, "RC7\n");
1400 seq_puts(m, "Unknown\n");
1404 seq_printf(m, "Core Power Down: %s\n",
1405 yesno(gt_core_status & GEN6_CORE_CPD_STATE_MASK));
1407 /* Not exactly sure what this is */
1408 seq_printf(m, "RC6 \"Locked to RPn\" residency since boot: %u\n",
1409 I915_READ(GEN6_GT_GFX_RC6_LOCKED));
1410 seq_printf(m, "RC6 residency since boot: %u\n",
1411 I915_READ(GEN6_GT_GFX_RC6));
1412 seq_printf(m, "RC6+ residency since boot: %u\n",
1413 I915_READ(GEN6_GT_GFX_RC6p));
1414 seq_printf(m, "RC6++ residency since boot: %u\n",
1415 I915_READ(GEN6_GT_GFX_RC6pp));
1417 seq_printf(m, "RC6 voltage: %dmV\n",
1418 GEN6_DECODE_RC6_VID(((rc6vids >> 0) & 0xff)));
1419 seq_printf(m, "RC6+ voltage: %dmV\n",
1420 GEN6_DECODE_RC6_VID(((rc6vids >> 8) & 0xff)));
1421 seq_printf(m, "RC6++ voltage: %dmV\n",
1422 GEN6_DECODE_RC6_VID(((rc6vids >> 16) & 0xff)));
1426 static int i915_drpc_info(struct seq_file *m, void *unused)
1428 struct drm_info_node *node = m->private;
1429 struct drm_device *dev = node->minor->dev;
1431 if (IS_VALLEYVIEW(dev))
1432 return vlv_drpc_info(m);
1433 else if (IS_GEN6(dev) || IS_GEN7(dev))
1434 return gen6_drpc_info(m);
1436 return ironlake_drpc_info(m);
1439 static int i915_fbc_status(struct seq_file *m, void *unused)
1441 struct drm_info_node *node = m->private;
1442 struct drm_device *dev = node->minor->dev;
1443 struct drm_i915_private *dev_priv = dev->dev_private;
1445 if (!HAS_FBC(dev)) {
1446 seq_puts(m, "FBC unsupported on this chipset\n");
1450 intel_runtime_pm_get(dev_priv);
1452 if (intel_fbc_enabled(dev)) {
1453 seq_puts(m, "FBC enabled\n");
1455 seq_puts(m, "FBC disabled: ");
1456 switch (dev_priv->fbc.no_fbc_reason) {
1458 seq_puts(m, "FBC actived, but currently disabled in hardware");
1460 case FBC_UNSUPPORTED:
1461 seq_puts(m, "unsupported by this chipset");
1464 seq_puts(m, "no outputs");
1466 case FBC_STOLEN_TOO_SMALL:
1467 seq_puts(m, "not enough stolen memory");
1469 case FBC_UNSUPPORTED_MODE:
1470 seq_puts(m, "mode not supported");
1472 case FBC_MODE_TOO_LARGE:
1473 seq_puts(m, "mode too large");
1476 seq_puts(m, "FBC unsupported on plane");
1479 seq_puts(m, "scanout buffer not tiled");
1481 case FBC_MULTIPLE_PIPES:
1482 seq_puts(m, "multiple pipes are enabled");
1484 case FBC_MODULE_PARAM:
1485 seq_puts(m, "disabled per module param (default off)");
1487 case FBC_CHIP_DEFAULT:
1488 seq_puts(m, "disabled per chip default");
1491 seq_puts(m, "unknown reason");
1496 intel_runtime_pm_put(dev_priv);
1501 static int i915_ips_status(struct seq_file *m, void *unused)
1503 struct drm_info_node *node = m->private;
1504 struct drm_device *dev = node->minor->dev;
1505 struct drm_i915_private *dev_priv = dev->dev_private;
1507 if (!HAS_IPS(dev)) {
1508 seq_puts(m, "not supported\n");
1512 intel_runtime_pm_get(dev_priv);
1514 if (IS_BROADWELL(dev) || I915_READ(IPS_CTL) & IPS_ENABLE)
1515 seq_puts(m, "enabled\n");
1517 seq_puts(m, "disabled\n");
1519 intel_runtime_pm_put(dev_priv);
1524 static int i915_sr_status(struct seq_file *m, void *unused)
1526 struct drm_info_node *node = m->private;
1527 struct drm_device *dev = node->minor->dev;
1528 struct drm_i915_private *dev_priv = dev->dev_private;
1529 bool sr_enabled = false;
1531 intel_runtime_pm_get(dev_priv);
1533 if (HAS_PCH_SPLIT(dev))
1534 sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN;
1535 else if (IS_CRESTLINE(dev) || IS_I945G(dev) || IS_I945GM(dev))
1536 sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
1537 else if (IS_I915GM(dev))
1538 sr_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN;
1539 else if (IS_PINEVIEW(dev))
1540 sr_enabled = I915_READ(DSPFW3) & PINEVIEW_SELF_REFRESH_EN;
1542 intel_runtime_pm_put(dev_priv);
1544 seq_printf(m, "self-refresh: %s\n",
1545 sr_enabled ? "enabled" : "disabled");
1550 static int i915_emon_status(struct seq_file *m, void *unused)
1552 struct drm_info_node *node = m->private;
1553 struct drm_device *dev = node->minor->dev;
1554 struct drm_i915_private *dev_priv = dev->dev_private;
1555 unsigned long temp, chipset, gfx;
1561 ret = mutex_lock_interruptible(&dev->struct_mutex);
1565 temp = i915_mch_val(dev_priv);
1566 chipset = i915_chipset_val(dev_priv);
1567 gfx = i915_gfx_val(dev_priv);
1568 mutex_unlock(&dev->struct_mutex);
1570 seq_printf(m, "GMCH temp: %ld\n", temp);
1571 seq_printf(m, "Chipset power: %ld\n", chipset);
1572 seq_printf(m, "GFX power: %ld\n", gfx);
1573 seq_printf(m, "Total power: %ld\n", chipset + gfx);
1578 static int i915_ring_freq_table(struct seq_file *m, void *unused)
1580 struct drm_info_node *node = m->private;
1581 struct drm_device *dev = node->minor->dev;
1582 struct drm_i915_private *dev_priv = dev->dev_private;
1584 int gpu_freq, ia_freq;
1586 if (!(IS_GEN6(dev) || IS_GEN7(dev))) {
1587 seq_puts(m, "unsupported on this chipset\n");
1591 intel_runtime_pm_get(dev_priv);
1593 flush_delayed_work(&dev_priv->rps.delayed_resume_work);
1595 ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
1599 seq_puts(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n");
1601 for (gpu_freq = dev_priv->rps.min_freq_softlimit;
1602 gpu_freq <= dev_priv->rps.max_freq_softlimit;
1605 sandybridge_pcode_read(dev_priv,
1606 GEN6_PCODE_READ_MIN_FREQ_TABLE,
1608 seq_printf(m, "%d\t\t%d\t\t\t\t%d\n",
1609 gpu_freq * GT_FREQUENCY_MULTIPLIER,
1610 ((ia_freq >> 0) & 0xff) * 100,
1611 ((ia_freq >> 8) & 0xff) * 100);
1614 mutex_unlock(&dev_priv->rps.hw_lock);
1617 intel_runtime_pm_put(dev_priv);
1621 static int i915_gfxec(struct seq_file *m, void *unused)
1623 struct drm_info_node *node = m->private;
1624 struct drm_device *dev = node->minor->dev;
1625 struct drm_i915_private *dev_priv = dev->dev_private;
1628 ret = mutex_lock_interruptible(&dev->struct_mutex);
1631 intel_runtime_pm_get(dev_priv);
1633 seq_printf(m, "GFXEC: %ld\n", (unsigned long)I915_READ(0x112f4));
1634 intel_runtime_pm_put(dev_priv);
1636 mutex_unlock(&dev->struct_mutex);
1641 static int i915_opregion(struct seq_file *m, void *unused)
1643 struct drm_info_node *node = m->private;
1644 struct drm_device *dev = node->minor->dev;
1645 struct drm_i915_private *dev_priv = dev->dev_private;
1646 struct intel_opregion *opregion = &dev_priv->opregion;
1647 void *data = kmalloc(OPREGION_SIZE, GFP_KERNEL);
1653 ret = mutex_lock_interruptible(&dev->struct_mutex);
1657 if (opregion->header) {
1658 memcpy_fromio(data, opregion->header, OPREGION_SIZE);
1659 seq_write(m, data, OPREGION_SIZE);
1662 mutex_unlock(&dev->struct_mutex);
1669 static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
1671 struct drm_info_node *node = m->private;
1672 struct drm_device *dev = node->minor->dev;
1673 struct intel_fbdev *ifbdev = NULL;
1674 struct intel_framebuffer *fb;
1676 #ifdef CONFIG_DRM_I915_FBDEV
1677 struct drm_i915_private *dev_priv = dev->dev_private;
1678 int ret = mutex_lock_interruptible(&dev->mode_config.mutex);
1682 ifbdev = dev_priv->fbdev;
1683 fb = to_intel_framebuffer(ifbdev->helper.fb);
1685 seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, refcount %d, obj ",
1689 fb->base.bits_per_pixel,
1690 atomic_read(&fb->base.refcount.refcount));
1691 describe_obj(m, fb->obj);
1693 mutex_unlock(&dev->mode_config.mutex);
1696 mutex_lock(&dev->mode_config.fb_lock);
1697 list_for_each_entry(fb, &dev->mode_config.fb_list, base.head) {
1698 if (ifbdev && &fb->base == ifbdev->helper.fb)
1701 seq_printf(m, "user size: %d x %d, depth %d, %d bpp, refcount %d, obj ",
1705 fb->base.bits_per_pixel,
1706 atomic_read(&fb->base.refcount.refcount));
1707 describe_obj(m, fb->obj);
1710 mutex_unlock(&dev->mode_config.fb_lock);
1715 static int i915_context_status(struct seq_file *m, void *unused)
1717 struct drm_info_node *node = m->private;
1718 struct drm_device *dev = node->minor->dev;
1719 struct drm_i915_private *dev_priv = dev->dev_private;
1720 struct intel_ring_buffer *ring;
1721 struct i915_hw_context *ctx;
1724 ret = mutex_lock_interruptible(&dev->mode_config.mutex);
1728 if (dev_priv->ips.pwrctx) {
1729 seq_puts(m, "power context ");
1730 describe_obj(m, dev_priv->ips.pwrctx);
1734 if (dev_priv->ips.renderctx) {
1735 seq_puts(m, "render context ");
1736 describe_obj(m, dev_priv->ips.renderctx);
1740 list_for_each_entry(ctx, &dev_priv->context_list, link) {
1741 if (ctx->obj == NULL)
1744 seq_puts(m, "HW context ");
1745 describe_ctx(m, ctx);
1746 for_each_ring(ring, dev_priv, i)
1747 if (ring->default_context == ctx)
1748 seq_printf(m, "(default context %s) ", ring->name);
1750 describe_obj(m, ctx->obj);
1754 mutex_unlock(&dev->mode_config.mutex);
1759 static int i915_gen6_forcewake_count_info(struct seq_file *m, void *data)
1761 struct drm_info_node *node = m->private;
1762 struct drm_device *dev = node->minor->dev;
1763 struct drm_i915_private *dev_priv = dev->dev_private;
1764 unsigned forcewake_count = 0, fw_rendercount = 0, fw_mediacount = 0;
1766 spin_lock_irq(&dev_priv->uncore.lock);
1767 if (IS_VALLEYVIEW(dev)) {
1768 fw_rendercount = dev_priv->uncore.fw_rendercount;
1769 fw_mediacount = dev_priv->uncore.fw_mediacount;
1771 forcewake_count = dev_priv->uncore.forcewake_count;
1772 spin_unlock_irq(&dev_priv->uncore.lock);
1774 if (IS_VALLEYVIEW(dev)) {
1775 seq_printf(m, "fw_rendercount = %u\n", fw_rendercount);
1776 seq_printf(m, "fw_mediacount = %u\n", fw_mediacount);
1778 seq_printf(m, "forcewake count = %u\n", forcewake_count);
1783 static const char *swizzle_string(unsigned swizzle)
1786 case I915_BIT_6_SWIZZLE_NONE:
1788 case I915_BIT_6_SWIZZLE_9:
1790 case I915_BIT_6_SWIZZLE_9_10:
1791 return "bit9/bit10";
1792 case I915_BIT_6_SWIZZLE_9_11:
1793 return "bit9/bit11";
1794 case I915_BIT_6_SWIZZLE_9_10_11:
1795 return "bit9/bit10/bit11";
1796 case I915_BIT_6_SWIZZLE_9_17:
1797 return "bit9/bit17";
1798 case I915_BIT_6_SWIZZLE_9_10_17:
1799 return "bit9/bit10/bit17";
1800 case I915_BIT_6_SWIZZLE_UNKNOWN:
1807 static int i915_swizzle_info(struct seq_file *m, void *data)
1809 struct drm_info_node *node = m->private;
1810 struct drm_device *dev = node->minor->dev;
1811 struct drm_i915_private *dev_priv = dev->dev_private;
1814 ret = mutex_lock_interruptible(&dev->struct_mutex);
1817 intel_runtime_pm_get(dev_priv);
1819 seq_printf(m, "bit6 swizzle for X-tiling = %s\n",
1820 swizzle_string(dev_priv->mm.bit_6_swizzle_x));
1821 seq_printf(m, "bit6 swizzle for Y-tiling = %s\n",
1822 swizzle_string(dev_priv->mm.bit_6_swizzle_y));
1824 if (IS_GEN3(dev) || IS_GEN4(dev)) {
1825 seq_printf(m, "DDC = 0x%08x\n",
1827 seq_printf(m, "C0DRB3 = 0x%04x\n",
1828 I915_READ16(C0DRB3));
1829 seq_printf(m, "C1DRB3 = 0x%04x\n",
1830 I915_READ16(C1DRB3));
1831 } else if (INTEL_INFO(dev)->gen >= 6) {
1832 seq_printf(m, "MAD_DIMM_C0 = 0x%08x\n",
1833 I915_READ(MAD_DIMM_C0));
1834 seq_printf(m, "MAD_DIMM_C1 = 0x%08x\n",
1835 I915_READ(MAD_DIMM_C1));
1836 seq_printf(m, "MAD_DIMM_C2 = 0x%08x\n",
1837 I915_READ(MAD_DIMM_C2));
1838 seq_printf(m, "TILECTL = 0x%08x\n",
1839 I915_READ(TILECTL));
1841 seq_printf(m, "GAMTARBMODE = 0x%08x\n",
1842 I915_READ(GAMTARBMODE));
1844 seq_printf(m, "ARB_MODE = 0x%08x\n",
1845 I915_READ(ARB_MODE));
1846 seq_printf(m, "DISP_ARB_CTL = 0x%08x\n",
1847 I915_READ(DISP_ARB_CTL));
1849 intel_runtime_pm_put(dev_priv);
1850 mutex_unlock(&dev->struct_mutex);
1855 static int per_file_ctx(int id, void *ptr, void *data)
1857 struct i915_hw_context *ctx = ptr;
1858 struct seq_file *m = data;
1859 struct i915_hw_ppgtt *ppgtt = ctx_to_ppgtt(ctx);
1861 ppgtt->debug_dump(ppgtt, m);
1866 static void gen8_ppgtt_info(struct seq_file *m, struct drm_device *dev)
1868 struct drm_i915_private *dev_priv = dev->dev_private;
1869 struct intel_ring_buffer *ring;
1870 struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
1876 seq_printf(m, "Page directories: %d\n", ppgtt->num_pd_pages);
1877 seq_printf(m, "Page tables: %d\n", ppgtt->num_pd_entries);
1878 for_each_ring(ring, dev_priv, unused) {
1879 seq_printf(m, "%s\n", ring->name);
1880 for (i = 0; i < 4; i++) {
1881 u32 offset = 0x270 + i * 8;
1882 u64 pdp = I915_READ(ring->mmio_base + offset + 4);
1884 pdp |= I915_READ(ring->mmio_base + offset);
1885 seq_printf(m, "\tPDP%d 0x%016llx\n", i, pdp);
1890 static void gen6_ppgtt_info(struct seq_file *m, struct drm_device *dev)
1892 struct drm_i915_private *dev_priv = dev->dev_private;
1893 struct intel_ring_buffer *ring;
1894 struct drm_file *file;
1897 if (INTEL_INFO(dev)->gen == 6)
1898 seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(GFX_MODE));
1900 for_each_ring(ring, dev_priv, i) {
1901 seq_printf(m, "%s\n", ring->name);
1902 if (INTEL_INFO(dev)->gen == 7)
1903 seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(RING_MODE_GEN7(ring)));
1904 seq_printf(m, "PP_DIR_BASE: 0x%08x\n", I915_READ(RING_PP_DIR_BASE(ring)));
1905 seq_printf(m, "PP_DIR_BASE_READ: 0x%08x\n", I915_READ(RING_PP_DIR_BASE_READ(ring)));
1906 seq_printf(m, "PP_DIR_DCLV: 0x%08x\n", I915_READ(RING_PP_DIR_DCLV(ring)));
1908 if (dev_priv->mm.aliasing_ppgtt) {
1909 struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
1911 seq_puts(m, "aliasing PPGTT:\n");
1912 seq_printf(m, "pd gtt offset: 0x%08x\n", ppgtt->pd_offset);
1914 ppgtt->debug_dump(ppgtt, m);
1918 list_for_each_entry_reverse(file, &dev->filelist, lhead) {
1919 struct drm_i915_file_private *file_priv = file->driver_priv;
1920 struct i915_hw_ppgtt *pvt_ppgtt;
1922 pvt_ppgtt = ctx_to_ppgtt(file_priv->private_default_ctx);
1923 seq_printf(m, "proc: %s\n",
1924 get_pid_task(file->pid, PIDTYPE_PID)->comm);
1925 seq_puts(m, " default context:\n");
1926 idr_for_each(&file_priv->context_idr, per_file_ctx, m);
1928 seq_printf(m, "ECOCHK: 0x%08x\n", I915_READ(GAM_ECOCHK));
1931 static int i915_ppgtt_info(struct seq_file *m, void *data)
1933 struct drm_info_node *node = m->private;
1934 struct drm_device *dev = node->minor->dev;
1935 struct drm_i915_private *dev_priv = dev->dev_private;
1937 int ret = mutex_lock_interruptible(&dev->struct_mutex);
1940 intel_runtime_pm_get(dev_priv);
1942 if (INTEL_INFO(dev)->gen >= 8)
1943 gen8_ppgtt_info(m, dev);
1944 else if (INTEL_INFO(dev)->gen >= 6)
1945 gen6_ppgtt_info(m, dev);
1947 intel_runtime_pm_put(dev_priv);
1948 mutex_unlock(&dev->struct_mutex);
1953 static int i915_llc(struct seq_file *m, void *data)
1955 struct drm_info_node *node = m->private;
1956 struct drm_device *dev = node->minor->dev;
1957 struct drm_i915_private *dev_priv = dev->dev_private;
1959 /* Size calculation for LLC is a bit of a pain. Ignore for now. */
1960 seq_printf(m, "LLC: %s\n", yesno(HAS_LLC(dev)));
1961 seq_printf(m, "eLLC: %zuMB\n", dev_priv->ellc_size);
1966 static int i915_edp_psr_status(struct seq_file *m, void *data)
1968 struct drm_info_node *node = m->private;
1969 struct drm_device *dev = node->minor->dev;
1970 struct drm_i915_private *dev_priv = dev->dev_private;
1972 bool enabled = false;
1974 intel_runtime_pm_get(dev_priv);
1976 seq_printf(m, "Sink_Support: %s\n", yesno(dev_priv->psr.sink_support));
1977 seq_printf(m, "Source_OK: %s\n", yesno(dev_priv->psr.source_ok));
1979 enabled = HAS_PSR(dev) &&
1980 I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE;
1981 seq_printf(m, "Enabled: %s\n", yesno(enabled));
1984 psrperf = I915_READ(EDP_PSR_PERF_CNT(dev)) &
1985 EDP_PSR_PERF_CNT_MASK;
1986 seq_printf(m, "Performance_Counter: %u\n", psrperf);
1988 intel_runtime_pm_put(dev_priv);
1992 static int i915_sink_crc(struct seq_file *m, void *data)
1994 struct drm_info_node *node = m->private;
1995 struct drm_device *dev = node->minor->dev;
1996 struct intel_encoder *encoder;
1997 struct intel_connector *connector;
1998 struct intel_dp *intel_dp = NULL;
2002 drm_modeset_lock_all(dev);
2003 list_for_each_entry(connector, &dev->mode_config.connector_list,
2006 if (connector->base.dpms != DRM_MODE_DPMS_ON)
2009 if (!connector->base.encoder)
2012 encoder = to_intel_encoder(connector->base.encoder);
2013 if (encoder->type != INTEL_OUTPUT_EDP)
2016 intel_dp = enc_to_intel_dp(&encoder->base);
2018 ret = intel_dp_sink_crc(intel_dp, crc);
2022 seq_printf(m, "%02x%02x%02x%02x%02x%02x\n",
2023 crc[0], crc[1], crc[2],
2024 crc[3], crc[4], crc[5]);
2029 drm_modeset_unlock_all(dev);
2033 static int i915_energy_uJ(struct seq_file *m, void *data)
2035 struct drm_info_node *node = m->private;
2036 struct drm_device *dev = node->minor->dev;
2037 struct drm_i915_private *dev_priv = dev->dev_private;
2041 if (INTEL_INFO(dev)->gen < 6)
2044 intel_runtime_pm_get(dev_priv);
2046 rdmsrl(MSR_RAPL_POWER_UNIT, power);
2047 power = (power & 0x1f00) >> 8;
2048 units = 1000000 / (1 << power); /* convert to uJ */
2049 power = I915_READ(MCH_SECP_NRG_STTS);
2052 intel_runtime_pm_put(dev_priv);
2054 seq_printf(m, "%llu", (long long unsigned)power);
2059 static int i915_pc8_status(struct seq_file *m, void *unused)
2061 struct drm_info_node *node = m->private;
2062 struct drm_device *dev = node->minor->dev;
2063 struct drm_i915_private *dev_priv = dev->dev_private;
2065 if (!IS_HASWELL(dev) && !IS_BROADWELL(dev)) {
2066 seq_puts(m, "not supported\n");
2070 seq_printf(m, "GPU idle: %s\n", yesno(!dev_priv->mm.busy));
2071 seq_printf(m, "IRQs disabled: %s\n",
2072 yesno(dev_priv->pm.irqs_disabled));
2077 static const char *power_domain_str(enum intel_display_power_domain domain)
2080 case POWER_DOMAIN_PIPE_A:
2082 case POWER_DOMAIN_PIPE_B:
2084 case POWER_DOMAIN_PIPE_C:
2086 case POWER_DOMAIN_PIPE_A_PANEL_FITTER:
2087 return "PIPE_A_PANEL_FITTER";
2088 case POWER_DOMAIN_PIPE_B_PANEL_FITTER:
2089 return "PIPE_B_PANEL_FITTER";
2090 case POWER_DOMAIN_PIPE_C_PANEL_FITTER:
2091 return "PIPE_C_PANEL_FITTER";
2092 case POWER_DOMAIN_TRANSCODER_A:
2093 return "TRANSCODER_A";
2094 case POWER_DOMAIN_TRANSCODER_B:
2095 return "TRANSCODER_B";
2096 case POWER_DOMAIN_TRANSCODER_C:
2097 return "TRANSCODER_C";
2098 case POWER_DOMAIN_TRANSCODER_EDP:
2099 return "TRANSCODER_EDP";
2100 case POWER_DOMAIN_PORT_DDI_A_2_LANES:
2101 return "PORT_DDI_A_2_LANES";
2102 case POWER_DOMAIN_PORT_DDI_A_4_LANES:
2103 return "PORT_DDI_A_4_LANES";
2104 case POWER_DOMAIN_PORT_DDI_B_2_LANES:
2105 return "PORT_DDI_B_2_LANES";
2106 case POWER_DOMAIN_PORT_DDI_B_4_LANES:
2107 return "PORT_DDI_B_4_LANES";
2108 case POWER_DOMAIN_PORT_DDI_C_2_LANES:
2109 return "PORT_DDI_C_2_LANES";
2110 case POWER_DOMAIN_PORT_DDI_C_4_LANES:
2111 return "PORT_DDI_C_4_LANES";
2112 case POWER_DOMAIN_PORT_DDI_D_2_LANES:
2113 return "PORT_DDI_D_2_LANES";
2114 case POWER_DOMAIN_PORT_DDI_D_4_LANES:
2115 return "PORT_DDI_D_4_LANES";
2116 case POWER_DOMAIN_PORT_DSI:
2118 case POWER_DOMAIN_PORT_CRT:
2120 case POWER_DOMAIN_PORT_OTHER:
2121 return "PORT_OTHER";
2122 case POWER_DOMAIN_VGA:
2124 case POWER_DOMAIN_AUDIO:
2126 case POWER_DOMAIN_INIT:
2134 static int i915_power_domain_info(struct seq_file *m, void *unused)
2136 struct drm_info_node *node = m->private;
2137 struct drm_device *dev = node->minor->dev;
2138 struct drm_i915_private *dev_priv = dev->dev_private;
2139 struct i915_power_domains *power_domains = &dev_priv->power_domains;
2142 mutex_lock(&power_domains->lock);
2144 seq_printf(m, "%-25s %s\n", "Power well/domain", "Use count");
2145 for (i = 0; i < power_domains->power_well_count; i++) {
2146 struct i915_power_well *power_well;
2147 enum intel_display_power_domain power_domain;
2149 power_well = &power_domains->power_wells[i];
2150 seq_printf(m, "%-25s %d\n", power_well->name,
2153 for (power_domain = 0; power_domain < POWER_DOMAIN_NUM;
2155 if (!(BIT(power_domain) & power_well->domains))
2158 seq_printf(m, " %-23s %d\n",
2159 power_domain_str(power_domain),
2160 power_domains->domain_use_count[power_domain]);
2164 mutex_unlock(&power_domains->lock);
2169 static void intel_seq_print_mode(struct seq_file *m, int tabs,
2170 struct drm_display_mode *mode)
2174 for (i = 0; i < tabs; i++)
2177 seq_printf(m, "id %d:\"%s\" freq %d clock %d hdisp %d hss %d hse %d htot %d vdisp %d vss %d vse %d vtot %d type 0x%x flags 0x%x\n",
2178 mode->base.id, mode->name,
2179 mode->vrefresh, mode->clock,
2180 mode->hdisplay, mode->hsync_start,
2181 mode->hsync_end, mode->htotal,
2182 mode->vdisplay, mode->vsync_start,
2183 mode->vsync_end, mode->vtotal,
2184 mode->type, mode->flags);
2187 static void intel_encoder_info(struct seq_file *m,
2188 struct intel_crtc *intel_crtc,
2189 struct intel_encoder *intel_encoder)
2191 struct drm_info_node *node = m->private;
2192 struct drm_device *dev = node->minor->dev;
2193 struct drm_crtc *crtc = &intel_crtc->base;
2194 struct intel_connector *intel_connector;
2195 struct drm_encoder *encoder;
2197 encoder = &intel_encoder->base;
2198 seq_printf(m, "\tencoder %d: type: %s, connectors:\n",
2199 encoder->base.id, drm_get_encoder_name(encoder));
2200 for_each_connector_on_encoder(dev, encoder, intel_connector) {
2201 struct drm_connector *connector = &intel_connector->base;
2202 seq_printf(m, "\t\tconnector %d: type: %s, status: %s",
2204 drm_get_connector_name(connector),
2205 drm_get_connector_status_name(connector->status));
2206 if (connector->status == connector_status_connected) {
2207 struct drm_display_mode *mode = &crtc->mode;
2208 seq_printf(m, ", mode:\n");
2209 intel_seq_print_mode(m, 2, mode);
2216 static void intel_crtc_info(struct seq_file *m, struct intel_crtc *intel_crtc)
2218 struct drm_info_node *node = m->private;
2219 struct drm_device *dev = node->minor->dev;
2220 struct drm_crtc *crtc = &intel_crtc->base;
2221 struct intel_encoder *intel_encoder;
2223 seq_printf(m, "\tfb: %d, pos: %dx%d, size: %dx%d\n",
2224 crtc->primary->fb->base.id, crtc->x, crtc->y,
2225 crtc->primary->fb->width, crtc->primary->fb->height);
2226 for_each_encoder_on_crtc(dev, crtc, intel_encoder)
2227 intel_encoder_info(m, intel_crtc, intel_encoder);
2230 static void intel_panel_info(struct seq_file *m, struct intel_panel *panel)
2232 struct drm_display_mode *mode = panel->fixed_mode;
2234 seq_printf(m, "\tfixed mode:\n");
2235 intel_seq_print_mode(m, 2, mode);
2238 static void intel_dp_info(struct seq_file *m,
2239 struct intel_connector *intel_connector)
2241 struct intel_encoder *intel_encoder = intel_connector->encoder;
2242 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
2244 seq_printf(m, "\tDPCD rev: %x\n", intel_dp->dpcd[DP_DPCD_REV]);
2245 seq_printf(m, "\taudio support: %s\n", intel_dp->has_audio ? "yes" :
2247 if (intel_encoder->type == INTEL_OUTPUT_EDP)
2248 intel_panel_info(m, &intel_connector->panel);
2251 static void intel_hdmi_info(struct seq_file *m,
2252 struct intel_connector *intel_connector)
2254 struct intel_encoder *intel_encoder = intel_connector->encoder;
2255 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&intel_encoder->base);
2257 seq_printf(m, "\taudio support: %s\n", intel_hdmi->has_audio ? "yes" :
2261 static void intel_lvds_info(struct seq_file *m,
2262 struct intel_connector *intel_connector)
2264 intel_panel_info(m, &intel_connector->panel);
2267 static void intel_connector_info(struct seq_file *m,
2268 struct drm_connector *connector)
2270 struct intel_connector *intel_connector = to_intel_connector(connector);
2271 struct intel_encoder *intel_encoder = intel_connector->encoder;
2272 struct drm_display_mode *mode;
2274 seq_printf(m, "connector %d: type %s, status: %s\n",
2275 connector->base.id, drm_get_connector_name(connector),
2276 drm_get_connector_status_name(connector->status));
2277 if (connector->status == connector_status_connected) {
2278 seq_printf(m, "\tname: %s\n", connector->display_info.name);
2279 seq_printf(m, "\tphysical dimensions: %dx%dmm\n",
2280 connector->display_info.width_mm,
2281 connector->display_info.height_mm);
2282 seq_printf(m, "\tsubpixel order: %s\n",
2283 drm_get_subpixel_order_name(connector->display_info.subpixel_order));
2284 seq_printf(m, "\tCEA rev: %d\n",
2285 connector->display_info.cea_rev);
2287 if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
2288 intel_encoder->type == INTEL_OUTPUT_EDP)
2289 intel_dp_info(m, intel_connector);
2290 else if (intel_encoder->type == INTEL_OUTPUT_HDMI)
2291 intel_hdmi_info(m, intel_connector);
2292 else if (intel_encoder->type == INTEL_OUTPUT_LVDS)
2293 intel_lvds_info(m, intel_connector);
2295 seq_printf(m, "\tmodes:\n");
2296 list_for_each_entry(mode, &connector->modes, head)
2297 intel_seq_print_mode(m, 2, mode);
2300 static bool cursor_active(struct drm_device *dev, int pipe)
2302 struct drm_i915_private *dev_priv = dev->dev_private;
2305 if (IS_845G(dev) || IS_I865G(dev))
2306 state = I915_READ(_CURACNTR) & CURSOR_ENABLE;
2307 else if (INTEL_INFO(dev)->gen <= 6 || IS_VALLEYVIEW(dev))
2308 state = I915_READ(CURCNTR(pipe)) & CURSOR_MODE;
2310 state = I915_READ(CURCNTR_IVB(pipe)) & CURSOR_MODE;
2315 static bool cursor_position(struct drm_device *dev, int pipe, int *x, int *y)
2317 struct drm_i915_private *dev_priv = dev->dev_private;
2320 if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev) || IS_BROADWELL(dev))
2321 pos = I915_READ(CURPOS_IVB(pipe));
2323 pos = I915_READ(CURPOS(pipe));
2325 *x = (pos >> CURSOR_X_SHIFT) & CURSOR_POS_MASK;
2326 if (pos & (CURSOR_POS_SIGN << CURSOR_X_SHIFT))
2329 *y = (pos >> CURSOR_Y_SHIFT) & CURSOR_POS_MASK;
2330 if (pos & (CURSOR_POS_SIGN << CURSOR_Y_SHIFT))
2333 return cursor_active(dev, pipe);
2336 static int i915_display_info(struct seq_file *m, void *unused)
2338 struct drm_info_node *node = m->private;
2339 struct drm_device *dev = node->minor->dev;
2340 struct drm_i915_private *dev_priv = dev->dev_private;
2341 struct intel_crtc *crtc;
2342 struct drm_connector *connector;
2344 intel_runtime_pm_get(dev_priv);
2345 drm_modeset_lock_all(dev);
2346 seq_printf(m, "CRTC info\n");
2347 seq_printf(m, "---------\n");
2348 list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) {
2352 seq_printf(m, "CRTC %d: pipe: %c, active: %s\n",
2353 crtc->base.base.id, pipe_name(crtc->pipe),
2354 yesno(crtc->active));
2356 intel_crtc_info(m, crtc);
2358 active = cursor_position(dev, crtc->pipe, &x, &y);
2359 seq_printf(m, "\tcursor visible? %s, position (%d, %d), addr 0x%08x, active? %s\n",
2360 yesno(crtc->cursor_visible),
2361 x, y, crtc->cursor_addr,
2366 seq_printf(m, "\n");
2367 seq_printf(m, "Connector info\n");
2368 seq_printf(m, "--------------\n");
2369 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
2370 intel_connector_info(m, connector);
2372 drm_modeset_unlock_all(dev);
2373 intel_runtime_pm_put(dev_priv);
2378 struct pipe_crc_info {
2380 struct drm_device *dev;
2384 static int i915_pipe_crc_open(struct inode *inode, struct file *filep)
2386 struct pipe_crc_info *info = inode->i_private;
2387 struct drm_i915_private *dev_priv = info->dev->dev_private;
2388 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe];
2390 if (info->pipe >= INTEL_INFO(info->dev)->num_pipes)
2393 spin_lock_irq(&pipe_crc->lock);
2395 if (pipe_crc->opened) {
2396 spin_unlock_irq(&pipe_crc->lock);
2397 return -EBUSY; /* already open */
2400 pipe_crc->opened = true;
2401 filep->private_data = inode->i_private;
2403 spin_unlock_irq(&pipe_crc->lock);
2408 static int i915_pipe_crc_release(struct inode *inode, struct file *filep)
2410 struct pipe_crc_info *info = inode->i_private;
2411 struct drm_i915_private *dev_priv = info->dev->dev_private;
2412 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe];
2414 spin_lock_irq(&pipe_crc->lock);
2415 pipe_crc->opened = false;
2416 spin_unlock_irq(&pipe_crc->lock);
2421 /* (6 fields, 8 chars each, space separated (5) + '\n') */
2422 #define PIPE_CRC_LINE_LEN (6 * 8 + 5 + 1)
2423 /* account for \'0' */
2424 #define PIPE_CRC_BUFFER_LEN (PIPE_CRC_LINE_LEN + 1)
2426 static int pipe_crc_data_count(struct intel_pipe_crc *pipe_crc)
2428 assert_spin_locked(&pipe_crc->lock);
2429 return CIRC_CNT(pipe_crc->head, pipe_crc->tail,
2430 INTEL_PIPE_CRC_ENTRIES_NR);
2434 i915_pipe_crc_read(struct file *filep, char __user *user_buf, size_t count,
2437 struct pipe_crc_info *info = filep->private_data;
2438 struct drm_device *dev = info->dev;
2439 struct drm_i915_private *dev_priv = dev->dev_private;
2440 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe];
2441 char buf[PIPE_CRC_BUFFER_LEN];
2442 int head, tail, n_entries, n;
2446 * Don't allow user space to provide buffers not big enough to hold
2449 if (count < PIPE_CRC_LINE_LEN)
2452 if (pipe_crc->source == INTEL_PIPE_CRC_SOURCE_NONE)
2455 /* nothing to read */
2456 spin_lock_irq(&pipe_crc->lock);
2457 while (pipe_crc_data_count(pipe_crc) == 0) {
2460 if (filep->f_flags & O_NONBLOCK) {
2461 spin_unlock_irq(&pipe_crc->lock);
2465 ret = wait_event_interruptible_lock_irq(pipe_crc->wq,
2466 pipe_crc_data_count(pipe_crc), pipe_crc->lock);
2468 spin_unlock_irq(&pipe_crc->lock);
2473 /* We now have one or more entries to read */
2474 head = pipe_crc->head;
2475 tail = pipe_crc->tail;
2476 n_entries = min((size_t)CIRC_CNT(head, tail, INTEL_PIPE_CRC_ENTRIES_NR),
2477 count / PIPE_CRC_LINE_LEN);
2478 spin_unlock_irq(&pipe_crc->lock);
2483 struct intel_pipe_crc_entry *entry = &pipe_crc->entries[tail];
2486 bytes_read += snprintf(buf, PIPE_CRC_BUFFER_LEN,
2487 "%8u %8x %8x %8x %8x %8x\n",
2488 entry->frame, entry->crc[0],
2489 entry->crc[1], entry->crc[2],
2490 entry->crc[3], entry->crc[4]);
2492 ret = copy_to_user(user_buf + n * PIPE_CRC_LINE_LEN,
2493 buf, PIPE_CRC_LINE_LEN);
2494 if (ret == PIPE_CRC_LINE_LEN)
2497 BUILD_BUG_ON_NOT_POWER_OF_2(INTEL_PIPE_CRC_ENTRIES_NR);
2498 tail = (tail + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1);
2500 } while (--n_entries);
2502 spin_lock_irq(&pipe_crc->lock);
2503 pipe_crc->tail = tail;
2504 spin_unlock_irq(&pipe_crc->lock);
2509 static const struct file_operations i915_pipe_crc_fops = {
2510 .owner = THIS_MODULE,
2511 .open = i915_pipe_crc_open,
2512 .read = i915_pipe_crc_read,
2513 .release = i915_pipe_crc_release,
2516 static struct pipe_crc_info i915_pipe_crc_data[I915_MAX_PIPES] = {
2518 .name = "i915_pipe_A_crc",
2522 .name = "i915_pipe_B_crc",
2526 .name = "i915_pipe_C_crc",
2531 static int i915_pipe_crc_create(struct dentry *root, struct drm_minor *minor,
2534 struct drm_device *dev = minor->dev;
2536 struct pipe_crc_info *info = &i915_pipe_crc_data[pipe];
2539 ent = debugfs_create_file(info->name, S_IRUGO, root, info,
2540 &i915_pipe_crc_fops);
2544 return drm_add_fake_info_node(minor, ent, info);
2547 static const char * const pipe_crc_sources[] = {
2560 static const char *pipe_crc_source_name(enum intel_pipe_crc_source source)
2562 BUILD_BUG_ON(ARRAY_SIZE(pipe_crc_sources) != INTEL_PIPE_CRC_SOURCE_MAX);
2563 return pipe_crc_sources[source];
2566 static int display_crc_ctl_show(struct seq_file *m, void *data)
2568 struct drm_device *dev = m->private;
2569 struct drm_i915_private *dev_priv = dev->dev_private;
2572 for (i = 0; i < I915_MAX_PIPES; i++)
2573 seq_printf(m, "%c %s\n", pipe_name(i),
2574 pipe_crc_source_name(dev_priv->pipe_crc[i].source));
2579 static int display_crc_ctl_open(struct inode *inode, struct file *file)
2581 struct drm_device *dev = inode->i_private;
2583 return single_open(file, display_crc_ctl_show, dev);
2586 static int i8xx_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source,
2589 if (*source == INTEL_PIPE_CRC_SOURCE_AUTO)
2590 *source = INTEL_PIPE_CRC_SOURCE_PIPE;
2593 case INTEL_PIPE_CRC_SOURCE_PIPE:
2594 *val = PIPE_CRC_ENABLE | PIPE_CRC_INCLUDE_BORDER_I8XX;
2596 case INTEL_PIPE_CRC_SOURCE_NONE:
2606 static int i9xx_pipe_crc_auto_source(struct drm_device *dev, enum pipe pipe,
2607 enum intel_pipe_crc_source *source)
2609 struct intel_encoder *encoder;
2610 struct intel_crtc *crtc;
2611 struct intel_digital_port *dig_port;
2614 *source = INTEL_PIPE_CRC_SOURCE_PIPE;
2616 mutex_lock(&dev->mode_config.mutex);
2617 list_for_each_entry(encoder, &dev->mode_config.encoder_list,
2619 if (!encoder->base.crtc)
2622 crtc = to_intel_crtc(encoder->base.crtc);
2624 if (crtc->pipe != pipe)
2627 switch (encoder->type) {
2628 case INTEL_OUTPUT_TVOUT:
2629 *source = INTEL_PIPE_CRC_SOURCE_TV;
2631 case INTEL_OUTPUT_DISPLAYPORT:
2632 case INTEL_OUTPUT_EDP:
2633 dig_port = enc_to_dig_port(&encoder->base);
2634 switch (dig_port->port) {
2636 *source = INTEL_PIPE_CRC_SOURCE_DP_B;
2639 *source = INTEL_PIPE_CRC_SOURCE_DP_C;
2642 *source = INTEL_PIPE_CRC_SOURCE_DP_D;
2645 WARN(1, "nonexisting DP port %c\n",
2646 port_name(dig_port->port));
2652 mutex_unlock(&dev->mode_config.mutex);
2657 static int vlv_pipe_crc_ctl_reg(struct drm_device *dev,
2659 enum intel_pipe_crc_source *source,
2662 struct drm_i915_private *dev_priv = dev->dev_private;
2663 bool need_stable_symbols = false;
2665 if (*source == INTEL_PIPE_CRC_SOURCE_AUTO) {
2666 int ret = i9xx_pipe_crc_auto_source(dev, pipe, source);
2672 case INTEL_PIPE_CRC_SOURCE_PIPE:
2673 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PIPE_VLV;
2675 case INTEL_PIPE_CRC_SOURCE_DP_B:
2676 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_B_VLV;
2677 need_stable_symbols = true;
2679 case INTEL_PIPE_CRC_SOURCE_DP_C:
2680 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_C_VLV;
2681 need_stable_symbols = true;
2683 case INTEL_PIPE_CRC_SOURCE_NONE:
2691 * When the pipe CRC tap point is after the transcoders we need
2692 * to tweak symbol-level features to produce a deterministic series of
2693 * symbols for a given frame. We need to reset those features only once
2694 * a frame (instead of every nth symbol):
2695 * - DC-balance: used to ensure a better clock recovery from the data
2697 * - DisplayPort scrambling: used for EMI reduction
2699 if (need_stable_symbols) {
2700 uint32_t tmp = I915_READ(PORT_DFT2_G4X);
2702 tmp |= DC_BALANCE_RESET_VLV;
2704 tmp |= PIPE_A_SCRAMBLE_RESET;
2706 tmp |= PIPE_B_SCRAMBLE_RESET;
2708 I915_WRITE(PORT_DFT2_G4X, tmp);
2714 static int i9xx_pipe_crc_ctl_reg(struct drm_device *dev,
2716 enum intel_pipe_crc_source *source,
2719 struct drm_i915_private *dev_priv = dev->dev_private;
2720 bool need_stable_symbols = false;
2722 if (*source == INTEL_PIPE_CRC_SOURCE_AUTO) {
2723 int ret = i9xx_pipe_crc_auto_source(dev, pipe, source);
2729 case INTEL_PIPE_CRC_SOURCE_PIPE:
2730 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PIPE_I9XX;
2732 case INTEL_PIPE_CRC_SOURCE_TV:
2733 if (!SUPPORTS_TV(dev))
2735 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_TV_PRE;
2737 case INTEL_PIPE_CRC_SOURCE_DP_B:
2740 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_B_G4X;
2741 need_stable_symbols = true;
2743 case INTEL_PIPE_CRC_SOURCE_DP_C:
2746 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_C_G4X;
2747 need_stable_symbols = true;
2749 case INTEL_PIPE_CRC_SOURCE_DP_D:
2752 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_D_G4X;
2753 need_stable_symbols = true;
2755 case INTEL_PIPE_CRC_SOURCE_NONE:
2763 * When the pipe CRC tap point is after the transcoders we need
2764 * to tweak symbol-level features to produce a deterministic series of
2765 * symbols for a given frame. We need to reset those features only once
2766 * a frame (instead of every nth symbol):
2767 * - DC-balance: used to ensure a better clock recovery from the data
2769 * - DisplayPort scrambling: used for EMI reduction
2771 if (need_stable_symbols) {
2772 uint32_t tmp = I915_READ(PORT_DFT2_G4X);
2774 WARN_ON(!IS_G4X(dev));
2776 I915_WRITE(PORT_DFT_I9XX,
2777 I915_READ(PORT_DFT_I9XX) | DC_BALANCE_RESET);
2780 tmp |= PIPE_A_SCRAMBLE_RESET;
2782 tmp |= PIPE_B_SCRAMBLE_RESET;
2784 I915_WRITE(PORT_DFT2_G4X, tmp);
2790 static void vlv_undo_pipe_scramble_reset(struct drm_device *dev,
2793 struct drm_i915_private *dev_priv = dev->dev_private;
2794 uint32_t tmp = I915_READ(PORT_DFT2_G4X);
2797 tmp &= ~PIPE_A_SCRAMBLE_RESET;
2799 tmp &= ~PIPE_B_SCRAMBLE_RESET;
2800 if (!(tmp & PIPE_SCRAMBLE_RESET_MASK))
2801 tmp &= ~DC_BALANCE_RESET_VLV;
2802 I915_WRITE(PORT_DFT2_G4X, tmp);
2806 static void g4x_undo_pipe_scramble_reset(struct drm_device *dev,
2809 struct drm_i915_private *dev_priv = dev->dev_private;
2810 uint32_t tmp = I915_READ(PORT_DFT2_G4X);
2813 tmp &= ~PIPE_A_SCRAMBLE_RESET;
2815 tmp &= ~PIPE_B_SCRAMBLE_RESET;
2816 I915_WRITE(PORT_DFT2_G4X, tmp);
2818 if (!(tmp & PIPE_SCRAMBLE_RESET_MASK)) {
2819 I915_WRITE(PORT_DFT_I9XX,
2820 I915_READ(PORT_DFT_I9XX) & ~DC_BALANCE_RESET);
2824 static int ilk_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source,
2827 if (*source == INTEL_PIPE_CRC_SOURCE_AUTO)
2828 *source = INTEL_PIPE_CRC_SOURCE_PIPE;
2831 case INTEL_PIPE_CRC_SOURCE_PLANE1:
2832 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PRIMARY_ILK;
2834 case INTEL_PIPE_CRC_SOURCE_PLANE2:
2835 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_SPRITE_ILK;
2837 case INTEL_PIPE_CRC_SOURCE_PIPE:
2838 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PIPE_ILK;
2840 case INTEL_PIPE_CRC_SOURCE_NONE:
2850 static int ivb_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source,
2853 if (*source == INTEL_PIPE_CRC_SOURCE_AUTO)
2854 *source = INTEL_PIPE_CRC_SOURCE_PF;
2857 case INTEL_PIPE_CRC_SOURCE_PLANE1:
2858 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PRIMARY_IVB;
2860 case INTEL_PIPE_CRC_SOURCE_PLANE2:
2861 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_SPRITE_IVB;
2863 case INTEL_PIPE_CRC_SOURCE_PF:
2864 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PF_IVB;
2866 case INTEL_PIPE_CRC_SOURCE_NONE:
2876 static int pipe_crc_set_source(struct drm_device *dev, enum pipe pipe,
2877 enum intel_pipe_crc_source source)
2879 struct drm_i915_private *dev_priv = dev->dev_private;
2880 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
2881 u32 val = 0; /* shut up gcc */
2884 if (pipe_crc->source == source)
2887 /* forbid changing the source without going back to 'none' */
2888 if (pipe_crc->source && source)
2892 ret = i8xx_pipe_crc_ctl_reg(&source, &val);
2893 else if (INTEL_INFO(dev)->gen < 5)
2894 ret = i9xx_pipe_crc_ctl_reg(dev, pipe, &source, &val);
2895 else if (IS_VALLEYVIEW(dev))
2896 ret = vlv_pipe_crc_ctl_reg(dev,pipe, &source, &val);
2897 else if (IS_GEN5(dev) || IS_GEN6(dev))
2898 ret = ilk_pipe_crc_ctl_reg(&source, &val);
2900 ret = ivb_pipe_crc_ctl_reg(&source, &val);
2905 /* none -> real source transition */
2907 DRM_DEBUG_DRIVER("collecting CRCs for pipe %c, %s\n",
2908 pipe_name(pipe), pipe_crc_source_name(source));
2910 pipe_crc->entries = kzalloc(sizeof(*pipe_crc->entries) *
2911 INTEL_PIPE_CRC_ENTRIES_NR,
2913 if (!pipe_crc->entries)
2916 spin_lock_irq(&pipe_crc->lock);
2919 spin_unlock_irq(&pipe_crc->lock);
2922 pipe_crc->source = source;
2924 I915_WRITE(PIPE_CRC_CTL(pipe), val);
2925 POSTING_READ(PIPE_CRC_CTL(pipe));
2927 /* real source -> none transition */
2928 if (source == INTEL_PIPE_CRC_SOURCE_NONE) {
2929 struct intel_pipe_crc_entry *entries;
2931 DRM_DEBUG_DRIVER("stopping CRCs for pipe %c\n",
2934 intel_wait_for_vblank(dev, pipe);
2936 spin_lock_irq(&pipe_crc->lock);
2937 entries = pipe_crc->entries;
2938 pipe_crc->entries = NULL;
2939 spin_unlock_irq(&pipe_crc->lock);
2944 g4x_undo_pipe_scramble_reset(dev, pipe);
2945 else if (IS_VALLEYVIEW(dev))
2946 vlv_undo_pipe_scramble_reset(dev, pipe);
2953 * Parse pipe CRC command strings:
2954 * command: wsp* object wsp+ name wsp+ source wsp*
2957 * source: (none | plane1 | plane2 | pf)
2958 * wsp: (#0x20 | #0x9 | #0xA)+
2961 * "pipe A plane1" -> Start CRC computations on plane1 of pipe A
2962 * "pipe A none" -> Stop CRC
2964 static int display_crc_ctl_tokenize(char *buf, char *words[], int max_words)
2971 /* skip leading white space */
2972 buf = skip_spaces(buf);
2974 break; /* end of buffer */
2976 /* find end of word */
2977 for (end = buf; *end && !isspace(*end); end++)
2980 if (n_words == max_words) {
2981 DRM_DEBUG_DRIVER("too many words, allowed <= %d\n",
2983 return -EINVAL; /* ran out of words[] before bytes */
2988 words[n_words++] = buf;
2995 enum intel_pipe_crc_object {
2996 PIPE_CRC_OBJECT_PIPE,
2999 static const char * const pipe_crc_objects[] = {
3004 display_crc_ctl_parse_object(const char *buf, enum intel_pipe_crc_object *o)
3008 for (i = 0; i < ARRAY_SIZE(pipe_crc_objects); i++)
3009 if (!strcmp(buf, pipe_crc_objects[i])) {
3017 static int display_crc_ctl_parse_pipe(const char *buf, enum pipe *pipe)
3019 const char name = buf[0];
3021 if (name < 'A' || name >= pipe_name(I915_MAX_PIPES))
3030 display_crc_ctl_parse_source(const char *buf, enum intel_pipe_crc_source *s)
3034 for (i = 0; i < ARRAY_SIZE(pipe_crc_sources); i++)
3035 if (!strcmp(buf, pipe_crc_sources[i])) {
3043 static int display_crc_ctl_parse(struct drm_device *dev, char *buf, size_t len)
3047 char *words[N_WORDS];
3049 enum intel_pipe_crc_object object;
3050 enum intel_pipe_crc_source source;
3052 n_words = display_crc_ctl_tokenize(buf, words, N_WORDS);
3053 if (n_words != N_WORDS) {
3054 DRM_DEBUG_DRIVER("tokenize failed, a command is %d words\n",
3059 if (display_crc_ctl_parse_object(words[0], &object) < 0) {
3060 DRM_DEBUG_DRIVER("unknown object %s\n", words[0]);
3064 if (display_crc_ctl_parse_pipe(words[1], &pipe) < 0) {
3065 DRM_DEBUG_DRIVER("unknown pipe %s\n", words[1]);
3069 if (display_crc_ctl_parse_source(words[2], &source) < 0) {
3070 DRM_DEBUG_DRIVER("unknown source %s\n", words[2]);
3074 return pipe_crc_set_source(dev, pipe, source);
3077 static ssize_t display_crc_ctl_write(struct file *file, const char __user *ubuf,
3078 size_t len, loff_t *offp)
3080 struct seq_file *m = file->private_data;
3081 struct drm_device *dev = m->private;
3088 if (len > PAGE_SIZE - 1) {
3089 DRM_DEBUG_DRIVER("expected <%lu bytes into pipe crc control\n",
3094 tmpbuf = kmalloc(len + 1, GFP_KERNEL);
3098 if (copy_from_user(tmpbuf, ubuf, len)) {
3104 ret = display_crc_ctl_parse(dev, tmpbuf, len);
3115 static const struct file_operations i915_display_crc_ctl_fops = {
3116 .owner = THIS_MODULE,
3117 .open = display_crc_ctl_open,
3119 .llseek = seq_lseek,
3120 .release = single_release,
3121 .write = display_crc_ctl_write
3124 static void wm_latency_show(struct seq_file *m, const uint16_t wm[5])
3126 struct drm_device *dev = m->private;
3127 int num_levels = ilk_wm_max_level(dev) + 1;
3130 drm_modeset_lock_all(dev);
3132 for (level = 0; level < num_levels; level++) {
3133 unsigned int latency = wm[level];
3135 /* WM1+ latency values in 0.5us units */
3139 seq_printf(m, "WM%d %u (%u.%u usec)\n",
3141 latency / 10, latency % 10);
3144 drm_modeset_unlock_all(dev);
3147 static int pri_wm_latency_show(struct seq_file *m, void *data)
3149 struct drm_device *dev = m->private;
3151 wm_latency_show(m, to_i915(dev)->wm.pri_latency);
3156 static int spr_wm_latency_show(struct seq_file *m, void *data)
3158 struct drm_device *dev = m->private;
3160 wm_latency_show(m, to_i915(dev)->wm.spr_latency);
3165 static int cur_wm_latency_show(struct seq_file *m, void *data)
3167 struct drm_device *dev = m->private;
3169 wm_latency_show(m, to_i915(dev)->wm.cur_latency);
3174 static int pri_wm_latency_open(struct inode *inode, struct file *file)
3176 struct drm_device *dev = inode->i_private;
3178 if (!HAS_PCH_SPLIT(dev))
3181 return single_open(file, pri_wm_latency_show, dev);
3184 static int spr_wm_latency_open(struct inode *inode, struct file *file)
3186 struct drm_device *dev = inode->i_private;
3188 if (!HAS_PCH_SPLIT(dev))
3191 return single_open(file, spr_wm_latency_show, dev);
3194 static int cur_wm_latency_open(struct inode *inode, struct file *file)
3196 struct drm_device *dev = inode->i_private;
3198 if (!HAS_PCH_SPLIT(dev))
3201 return single_open(file, cur_wm_latency_show, dev);
3204 static ssize_t wm_latency_write(struct file *file, const char __user *ubuf,
3205 size_t len, loff_t *offp, uint16_t wm[5])
3207 struct seq_file *m = file->private_data;
3208 struct drm_device *dev = m->private;
3209 uint16_t new[5] = { 0 };
3210 int num_levels = ilk_wm_max_level(dev) + 1;
3215 if (len >= sizeof(tmp))
3218 if (copy_from_user(tmp, ubuf, len))
3223 ret = sscanf(tmp, "%hu %hu %hu %hu %hu", &new[0], &new[1], &new[2], &new[3], &new[4]);
3224 if (ret != num_levels)
3227 drm_modeset_lock_all(dev);
3229 for (level = 0; level < num_levels; level++)
3230 wm[level] = new[level];
3232 drm_modeset_unlock_all(dev);
3238 static ssize_t pri_wm_latency_write(struct file *file, const char __user *ubuf,
3239 size_t len, loff_t *offp)
3241 struct seq_file *m = file->private_data;
3242 struct drm_device *dev = m->private;
3244 return wm_latency_write(file, ubuf, len, offp, to_i915(dev)->wm.pri_latency);
3247 static ssize_t spr_wm_latency_write(struct file *file, const char __user *ubuf,
3248 size_t len, loff_t *offp)
3250 struct seq_file *m = file->private_data;
3251 struct drm_device *dev = m->private;
3253 return wm_latency_write(file, ubuf, len, offp, to_i915(dev)->wm.spr_latency);
3256 static ssize_t cur_wm_latency_write(struct file *file, const char __user *ubuf,
3257 size_t len, loff_t *offp)
3259 struct seq_file *m = file->private_data;
3260 struct drm_device *dev = m->private;
3262 return wm_latency_write(file, ubuf, len, offp, to_i915(dev)->wm.cur_latency);
3265 static const struct file_operations i915_pri_wm_latency_fops = {
3266 .owner = THIS_MODULE,
3267 .open = pri_wm_latency_open,
3269 .llseek = seq_lseek,
3270 .release = single_release,
3271 .write = pri_wm_latency_write
3274 static const struct file_operations i915_spr_wm_latency_fops = {
3275 .owner = THIS_MODULE,
3276 .open = spr_wm_latency_open,
3278 .llseek = seq_lseek,
3279 .release = single_release,
3280 .write = spr_wm_latency_write
3283 static const struct file_operations i915_cur_wm_latency_fops = {
3284 .owner = THIS_MODULE,
3285 .open = cur_wm_latency_open,
3287 .llseek = seq_lseek,
3288 .release = single_release,
3289 .write = cur_wm_latency_write
3293 i915_wedged_get(void *data, u64 *val)
3295 struct drm_device *dev = data;
3296 struct drm_i915_private *dev_priv = dev->dev_private;
3298 *val = atomic_read(&dev_priv->gpu_error.reset_counter);
3304 i915_wedged_set(void *data, u64 val)
3306 struct drm_device *dev = data;
3307 struct drm_i915_private *dev_priv = dev->dev_private;
3309 intel_runtime_pm_get(dev_priv);
3311 i915_handle_error(dev, val,
3312 "Manually setting wedged to %llu", val);
3314 intel_runtime_pm_put(dev_priv);
3319 DEFINE_SIMPLE_ATTRIBUTE(i915_wedged_fops,
3320 i915_wedged_get, i915_wedged_set,
3324 i915_ring_stop_get(void *data, u64 *val)
3326 struct drm_device *dev = data;
3327 struct drm_i915_private *dev_priv = dev->dev_private;
3329 *val = dev_priv->gpu_error.stop_rings;
3335 i915_ring_stop_set(void *data, u64 val)
3337 struct drm_device *dev = data;
3338 struct drm_i915_private *dev_priv = dev->dev_private;
3341 DRM_DEBUG_DRIVER("Stopping rings 0x%08llx\n", val);
3343 ret = mutex_lock_interruptible(&dev->struct_mutex);
3347 dev_priv->gpu_error.stop_rings = val;
3348 mutex_unlock(&dev->struct_mutex);
3353 DEFINE_SIMPLE_ATTRIBUTE(i915_ring_stop_fops,
3354 i915_ring_stop_get, i915_ring_stop_set,
3358 i915_ring_missed_irq_get(void *data, u64 *val)
3360 struct drm_device *dev = data;
3361 struct drm_i915_private *dev_priv = dev->dev_private;
3363 *val = dev_priv->gpu_error.missed_irq_rings;
3368 i915_ring_missed_irq_set(void *data, u64 val)
3370 struct drm_device *dev = data;
3371 struct drm_i915_private *dev_priv = dev->dev_private;
3374 /* Lock against concurrent debugfs callers */
3375 ret = mutex_lock_interruptible(&dev->struct_mutex);
3378 dev_priv->gpu_error.missed_irq_rings = val;
3379 mutex_unlock(&dev->struct_mutex);
3384 DEFINE_SIMPLE_ATTRIBUTE(i915_ring_missed_irq_fops,
3385 i915_ring_missed_irq_get, i915_ring_missed_irq_set,
3389 i915_ring_test_irq_get(void *data, u64 *val)
3391 struct drm_device *dev = data;
3392 struct drm_i915_private *dev_priv = dev->dev_private;
3394 *val = dev_priv->gpu_error.test_irq_rings;
3400 i915_ring_test_irq_set(void *data, u64 val)
3402 struct drm_device *dev = data;
3403 struct drm_i915_private *dev_priv = dev->dev_private;
3406 DRM_DEBUG_DRIVER("Masking interrupts on rings 0x%08llx\n", val);
3408 /* Lock against concurrent debugfs callers */
3409 ret = mutex_lock_interruptible(&dev->struct_mutex);
3413 dev_priv->gpu_error.test_irq_rings = val;
3414 mutex_unlock(&dev->struct_mutex);
3419 DEFINE_SIMPLE_ATTRIBUTE(i915_ring_test_irq_fops,
3420 i915_ring_test_irq_get, i915_ring_test_irq_set,
3423 #define DROP_UNBOUND 0x1
3424 #define DROP_BOUND 0x2
3425 #define DROP_RETIRE 0x4
3426 #define DROP_ACTIVE 0x8
3427 #define DROP_ALL (DROP_UNBOUND | \
3432 i915_drop_caches_get(void *data, u64 *val)
3440 i915_drop_caches_set(void *data, u64 val)
3442 struct drm_device *dev = data;
3443 struct drm_i915_private *dev_priv = dev->dev_private;
3444 struct drm_i915_gem_object *obj, *next;
3445 struct i915_address_space *vm;
3446 struct i915_vma *vma, *x;
3449 DRM_DEBUG("Dropping caches: 0x%08llx\n", val);
3451 /* No need to check and wait for gpu resets, only libdrm auto-restarts
3452 * on ioctls on -EAGAIN. */
3453 ret = mutex_lock_interruptible(&dev->struct_mutex);
3457 if (val & DROP_ACTIVE) {
3458 ret = i915_gpu_idle(dev);
3463 if (val & (DROP_RETIRE | DROP_ACTIVE))
3464 i915_gem_retire_requests(dev);
3466 if (val & DROP_BOUND) {
3467 list_for_each_entry(vm, &dev_priv->vm_list, global_link) {
3468 list_for_each_entry_safe(vma, x, &vm->inactive_list,
3473 ret = i915_vma_unbind(vma);
3480 if (val & DROP_UNBOUND) {
3481 list_for_each_entry_safe(obj, next, &dev_priv->mm.unbound_list,
3483 if (obj->pages_pin_count == 0) {
3484 ret = i915_gem_object_put_pages(obj);
3491 mutex_unlock(&dev->struct_mutex);
3496 DEFINE_SIMPLE_ATTRIBUTE(i915_drop_caches_fops,
3497 i915_drop_caches_get, i915_drop_caches_set,
3501 i915_max_freq_get(void *data, u64 *val)
3503 struct drm_device *dev = data;
3504 struct drm_i915_private *dev_priv = dev->dev_private;
3507 if (!(IS_GEN6(dev) || IS_GEN7(dev)))
3510 flush_delayed_work(&dev_priv->rps.delayed_resume_work);
3512 ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
3516 if (IS_VALLEYVIEW(dev))
3517 *val = vlv_gpu_freq(dev_priv, dev_priv->rps.max_freq_softlimit);
3519 *val = dev_priv->rps.max_freq_softlimit * GT_FREQUENCY_MULTIPLIER;
3520 mutex_unlock(&dev_priv->rps.hw_lock);
3526 i915_max_freq_set(void *data, u64 val)
3528 struct drm_device *dev = data;
3529 struct drm_i915_private *dev_priv = dev->dev_private;
3530 u32 rp_state_cap, hw_max, hw_min;
3533 if (!(IS_GEN6(dev) || IS_GEN7(dev)))
3536 flush_delayed_work(&dev_priv->rps.delayed_resume_work);
3538 DRM_DEBUG_DRIVER("Manually setting max freq to %llu\n", val);
3540 ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
3545 * Turbo will still be enabled, but won't go above the set value.
3547 if (IS_VALLEYVIEW(dev)) {
3548 val = vlv_freq_opcode(dev_priv, val);
3550 hw_max = valleyview_rps_max_freq(dev_priv);
3551 hw_min = valleyview_rps_min_freq(dev_priv);
3553 do_div(val, GT_FREQUENCY_MULTIPLIER);
3555 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
3556 hw_max = dev_priv->rps.max_freq;
3557 hw_min = (rp_state_cap >> 16) & 0xff;
3560 if (val < hw_min || val > hw_max || val < dev_priv->rps.min_freq_softlimit) {
3561 mutex_unlock(&dev_priv->rps.hw_lock);
3565 dev_priv->rps.max_freq_softlimit = val;
3567 if (IS_VALLEYVIEW(dev))
3568 valleyview_set_rps(dev, val);
3570 gen6_set_rps(dev, val);
3572 mutex_unlock(&dev_priv->rps.hw_lock);
3577 DEFINE_SIMPLE_ATTRIBUTE(i915_max_freq_fops,
3578 i915_max_freq_get, i915_max_freq_set,
3582 i915_min_freq_get(void *data, u64 *val)
3584 struct drm_device *dev = data;
3585 struct drm_i915_private *dev_priv = dev->dev_private;
3588 if (!(IS_GEN6(dev) || IS_GEN7(dev)))
3591 flush_delayed_work(&dev_priv->rps.delayed_resume_work);
3593 ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
3597 if (IS_VALLEYVIEW(dev))
3598 *val = vlv_gpu_freq(dev_priv, dev_priv->rps.min_freq_softlimit);
3600 *val = dev_priv->rps.min_freq_softlimit * GT_FREQUENCY_MULTIPLIER;
3601 mutex_unlock(&dev_priv->rps.hw_lock);
3607 i915_min_freq_set(void *data, u64 val)
3609 struct drm_device *dev = data;
3610 struct drm_i915_private *dev_priv = dev->dev_private;
3611 u32 rp_state_cap, hw_max, hw_min;
3614 if (!(IS_GEN6(dev) || IS_GEN7(dev)))
3617 flush_delayed_work(&dev_priv->rps.delayed_resume_work);
3619 DRM_DEBUG_DRIVER("Manually setting min freq to %llu\n", val);
3621 ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
3626 * Turbo will still be enabled, but won't go below the set value.
3628 if (IS_VALLEYVIEW(dev)) {
3629 val = vlv_freq_opcode(dev_priv, val);
3631 hw_max = valleyview_rps_max_freq(dev_priv);
3632 hw_min = valleyview_rps_min_freq(dev_priv);
3634 do_div(val, GT_FREQUENCY_MULTIPLIER);
3636 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
3637 hw_max = dev_priv->rps.max_freq;
3638 hw_min = (rp_state_cap >> 16) & 0xff;
3641 if (val < hw_min || val > hw_max || val > dev_priv->rps.max_freq_softlimit) {
3642 mutex_unlock(&dev_priv->rps.hw_lock);
3646 dev_priv->rps.min_freq_softlimit = val;
3648 if (IS_VALLEYVIEW(dev))
3649 valleyview_set_rps(dev, val);
3651 gen6_set_rps(dev, val);
3653 mutex_unlock(&dev_priv->rps.hw_lock);
3658 DEFINE_SIMPLE_ATTRIBUTE(i915_min_freq_fops,
3659 i915_min_freq_get, i915_min_freq_set,
3663 i915_cache_sharing_get(void *data, u64 *val)
3665 struct drm_device *dev = data;
3666 struct drm_i915_private *dev_priv = dev->dev_private;
3670 if (!(IS_GEN6(dev) || IS_GEN7(dev)))
3673 ret = mutex_lock_interruptible(&dev->struct_mutex);
3676 intel_runtime_pm_get(dev_priv);
3678 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
3680 intel_runtime_pm_put(dev_priv);
3681 mutex_unlock(&dev_priv->dev->struct_mutex);
3683 *val = (snpcr & GEN6_MBC_SNPCR_MASK) >> GEN6_MBC_SNPCR_SHIFT;
3689 i915_cache_sharing_set(void *data, u64 val)
3691 struct drm_device *dev = data;
3692 struct drm_i915_private *dev_priv = dev->dev_private;
3695 if (!(IS_GEN6(dev) || IS_GEN7(dev)))
3701 intel_runtime_pm_get(dev_priv);
3702 DRM_DEBUG_DRIVER("Manually setting uncore sharing to %llu\n", val);
3704 /* Update the cache sharing policy here as well */
3705 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
3706 snpcr &= ~GEN6_MBC_SNPCR_MASK;
3707 snpcr |= (val << GEN6_MBC_SNPCR_SHIFT);
3708 I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
3710 intel_runtime_pm_put(dev_priv);
3714 DEFINE_SIMPLE_ATTRIBUTE(i915_cache_sharing_fops,
3715 i915_cache_sharing_get, i915_cache_sharing_set,
3718 static int i915_forcewake_open(struct inode *inode, struct file *file)
3720 struct drm_device *dev = inode->i_private;
3721 struct drm_i915_private *dev_priv = dev->dev_private;
3723 if (INTEL_INFO(dev)->gen < 6)
3726 gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
3731 static int i915_forcewake_release(struct inode *inode, struct file *file)
3733 struct drm_device *dev = inode->i_private;
3734 struct drm_i915_private *dev_priv = dev->dev_private;
3736 if (INTEL_INFO(dev)->gen < 6)
3739 gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
3744 static const struct file_operations i915_forcewake_fops = {
3745 .owner = THIS_MODULE,
3746 .open = i915_forcewake_open,
3747 .release = i915_forcewake_release,
3750 static int i915_forcewake_create(struct dentry *root, struct drm_minor *minor)
3752 struct drm_device *dev = minor->dev;
3755 ent = debugfs_create_file("i915_forcewake_user",
3758 &i915_forcewake_fops);
3762 return drm_add_fake_info_node(minor, ent, &i915_forcewake_fops);
3765 static int i915_debugfs_create(struct dentry *root,
3766 struct drm_minor *minor,
3768 const struct file_operations *fops)
3770 struct drm_device *dev = minor->dev;
3773 ent = debugfs_create_file(name,
3780 return drm_add_fake_info_node(minor, ent, fops);
3783 static const struct drm_info_list i915_debugfs_list[] = {
3784 {"i915_capabilities", i915_capabilities, 0},
3785 {"i915_gem_objects", i915_gem_object_info, 0},
3786 {"i915_gem_gtt", i915_gem_gtt_info, 0},
3787 {"i915_gem_pinned", i915_gem_gtt_info, 0, (void *) PINNED_LIST},
3788 {"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST},
3789 {"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST},
3790 {"i915_gem_stolen", i915_gem_stolen_list_info },
3791 {"i915_gem_pageflip", i915_gem_pageflip_info, 0},
3792 {"i915_gem_request", i915_gem_request_info, 0},
3793 {"i915_gem_seqno", i915_gem_seqno_info, 0},
3794 {"i915_gem_fence_regs", i915_gem_fence_regs_info, 0},
3795 {"i915_gem_interrupt", i915_interrupt_info, 0},
3796 {"i915_gem_hws", i915_hws_info, 0, (void *)RCS},
3797 {"i915_gem_hws_blt", i915_hws_info, 0, (void *)BCS},
3798 {"i915_gem_hws_bsd", i915_hws_info, 0, (void *)VCS},
3799 {"i915_gem_hws_vebox", i915_hws_info, 0, (void *)VECS},
3800 {"i915_rstdby_delays", i915_rstdby_delays, 0},
3801 {"i915_frequency_info", i915_frequency_info, 0},
3802 {"i915_delayfreq_table", i915_delayfreq_table, 0},
3803 {"i915_inttoext_table", i915_inttoext_table, 0},
3804 {"i915_drpc_info", i915_drpc_info, 0},
3805 {"i915_emon_status", i915_emon_status, 0},
3806 {"i915_ring_freq_table", i915_ring_freq_table, 0},
3807 {"i915_gfxec", i915_gfxec, 0},
3808 {"i915_fbc_status", i915_fbc_status, 0},
3809 {"i915_ips_status", i915_ips_status, 0},
3810 {"i915_sr_status", i915_sr_status, 0},
3811 {"i915_opregion", i915_opregion, 0},
3812 {"i915_gem_framebuffer", i915_gem_framebuffer_info, 0},
3813 {"i915_context_status", i915_context_status, 0},
3814 {"i915_gen6_forcewake_count", i915_gen6_forcewake_count_info, 0},
3815 {"i915_swizzle_info", i915_swizzle_info, 0},
3816 {"i915_ppgtt_info", i915_ppgtt_info, 0},
3817 {"i915_llc", i915_llc, 0},
3818 {"i915_edp_psr_status", i915_edp_psr_status, 0},
3819 {"i915_sink_crc_eDP1", i915_sink_crc, 0},
3820 {"i915_energy_uJ", i915_energy_uJ, 0},
3821 {"i915_pc8_status", i915_pc8_status, 0},
3822 {"i915_power_domain_info", i915_power_domain_info, 0},
3823 {"i915_display_info", i915_display_info, 0},
3825 #define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
3827 static const struct i915_debugfs_files {
3829 const struct file_operations *fops;
3830 } i915_debugfs_files[] = {
3831 {"i915_wedged", &i915_wedged_fops},
3832 {"i915_max_freq", &i915_max_freq_fops},
3833 {"i915_min_freq", &i915_min_freq_fops},
3834 {"i915_cache_sharing", &i915_cache_sharing_fops},
3835 {"i915_ring_stop", &i915_ring_stop_fops},
3836 {"i915_ring_missed_irq", &i915_ring_missed_irq_fops},
3837 {"i915_ring_test_irq", &i915_ring_test_irq_fops},
3838 {"i915_gem_drop_caches", &i915_drop_caches_fops},
3839 {"i915_error_state", &i915_error_state_fops},
3840 {"i915_next_seqno", &i915_next_seqno_fops},
3841 {"i915_display_crc_ctl", &i915_display_crc_ctl_fops},
3842 {"i915_pri_wm_latency", &i915_pri_wm_latency_fops},
3843 {"i915_spr_wm_latency", &i915_spr_wm_latency_fops},
3844 {"i915_cur_wm_latency", &i915_cur_wm_latency_fops},
3847 void intel_display_crc_init(struct drm_device *dev)
3849 struct drm_i915_private *dev_priv = dev->dev_private;
3852 for_each_pipe(pipe) {
3853 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
3855 pipe_crc->opened = false;
3856 spin_lock_init(&pipe_crc->lock);
3857 init_waitqueue_head(&pipe_crc->wq);
3861 int i915_debugfs_init(struct drm_minor *minor)
3865 ret = i915_forcewake_create(minor->debugfs_root, minor);
3869 for (i = 0; i < ARRAY_SIZE(i915_pipe_crc_data); i++) {
3870 ret = i915_pipe_crc_create(minor->debugfs_root, minor, i);
3875 for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) {
3876 ret = i915_debugfs_create(minor->debugfs_root, minor,
3877 i915_debugfs_files[i].name,
3878 i915_debugfs_files[i].fops);
3883 return drm_debugfs_create_files(i915_debugfs_list,
3884 I915_DEBUGFS_ENTRIES,
3885 minor->debugfs_root, minor);
3888 void i915_debugfs_cleanup(struct drm_minor *minor)
3892 drm_debugfs_remove_files(i915_debugfs_list,
3893 I915_DEBUGFS_ENTRIES, minor);
3895 drm_debugfs_remove_files((struct drm_info_list *) &i915_forcewake_fops,
3898 for (i = 0; i < ARRAY_SIZE(i915_pipe_crc_data); i++) {
3899 struct drm_info_list *info_list =
3900 (struct drm_info_list *)&i915_pipe_crc_data[i];
3902 drm_debugfs_remove_files(info_list, 1, minor);
3905 for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) {
3906 struct drm_info_list *info_list =
3907 (struct drm_info_list *) i915_debugfs_files[i].fops;
3909 drm_debugfs_remove_files(info_list, 1, minor);