4 * Copyright (C) 2012 Google, Inc.
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
17 #include <linux/debugfs.h>
18 #include <linux/export.h>
19 #include <linux/file.h>
21 #include <linux/kernel.h>
22 #include <linux/poll.h>
23 #include <linux/sched.h>
24 #include <linux/seq_file.h>
25 #include <linux/slab.h>
26 #include <linux/uaccess.h>
27 #include <linux/anon_inodes.h>
28 #include <linux/time64.h>
31 #ifdef CONFIG_DEBUG_FS
33 static LIST_HEAD(sync_timeline_list_head);
34 static DEFINE_SPINLOCK(sync_timeline_list_lock);
35 static LIST_HEAD(sync_fence_list_head);
36 static DEFINE_SPINLOCK(sync_fence_list_lock);
38 void sync_timeline_debug_add(struct sync_timeline *obj)
42 spin_lock_irqsave(&sync_timeline_list_lock, flags);
43 list_add_tail(&obj->sync_timeline_list, &sync_timeline_list_head);
44 spin_unlock_irqrestore(&sync_timeline_list_lock, flags);
47 void sync_timeline_debug_remove(struct sync_timeline *obj)
51 spin_lock_irqsave(&sync_timeline_list_lock, flags);
52 list_del(&obj->sync_timeline_list);
53 spin_unlock_irqrestore(&sync_timeline_list_lock, flags);
56 void sync_fence_debug_add(struct sync_fence *fence)
60 spin_lock_irqsave(&sync_fence_list_lock, flags);
61 list_add_tail(&fence->sync_fence_list, &sync_fence_list_head);
62 spin_unlock_irqrestore(&sync_fence_list_lock, flags);
65 void sync_fence_debug_remove(struct sync_fence *fence)
69 spin_lock_irqsave(&sync_fence_list_lock, flags);
70 list_del(&fence->sync_fence_list);
71 spin_unlock_irqrestore(&sync_fence_list_lock, flags);
74 static const char *sync_status_str(int status)
85 static void sync_print_pt(struct seq_file *s, struct sync_pt *pt, bool fence)
88 struct sync_timeline *parent = sync_pt_parent(pt);
90 if (fence_is_signaled_locked(&pt->base))
91 status = pt->base.status;
93 seq_printf(s, " %s%spt %s",
94 fence ? parent->name : "",
96 sync_status_str(status));
99 struct timespec64 ts64 = ktime_to_timespec64(pt->base.timestamp);
101 seq_printf(s, "@%lld.%09ld", (s64)ts64.tv_sec, ts64.tv_nsec);
104 if (parent->ops->timeline_value_str &&
105 parent->ops->pt_value_str) {
108 parent->ops->pt_value_str(pt, value, sizeof(value));
109 seq_printf(s, ": %s", value);
111 parent->ops->timeline_value_str(parent, value,
113 seq_printf(s, " / %s", value);
120 static void sync_print_obj(struct seq_file *s, struct sync_timeline *obj)
122 struct list_head *pos;
125 seq_printf(s, "%s %s", obj->name, obj->ops->driver_name);
127 if (obj->ops->timeline_value_str) {
130 obj->ops->timeline_value_str(obj, value, sizeof(value));
131 seq_printf(s, ": %s", value);
136 spin_lock_irqsave(&obj->child_list_lock, flags);
137 list_for_each(pos, &obj->child_list_head) {
139 container_of(pos, struct sync_pt, child_list);
140 sync_print_pt(s, pt, false);
142 spin_unlock_irqrestore(&obj->child_list_lock, flags);
145 static void sync_print_fence(struct seq_file *s, struct sync_fence *fence)
151 seq_printf(s, "[%p] %s: %s\n", fence, fence->name,
152 sync_status_str(atomic_read(&fence->status)));
154 for (i = 0; i < fence->num_fences; ++i) {
156 container_of(fence->cbs[i].sync_pt,
157 struct sync_pt, base);
159 sync_print_pt(s, pt, true);
162 spin_lock_irqsave(&fence->wq.lock, flags);
163 list_for_each_entry(pos, &fence->wq.task_list, task_list) {
164 struct sync_fence_waiter *waiter;
166 if (pos->func != &sync_fence_wake_up_wq)
169 waiter = container_of(pos, struct sync_fence_waiter, work);
171 seq_printf(s, "waiter %pF\n", waiter->callback);
173 spin_unlock_irqrestore(&fence->wq.lock, flags);
176 static int sync_debugfs_show(struct seq_file *s, void *unused)
179 struct list_head *pos;
181 seq_puts(s, "objs:\n--------------\n");
183 spin_lock_irqsave(&sync_timeline_list_lock, flags);
184 list_for_each(pos, &sync_timeline_list_head) {
185 struct sync_timeline *obj =
186 container_of(pos, struct sync_timeline,
189 sync_print_obj(s, obj);
192 spin_unlock_irqrestore(&sync_timeline_list_lock, flags);
194 seq_puts(s, "fences:\n--------------\n");
196 spin_lock_irqsave(&sync_fence_list_lock, flags);
197 list_for_each(pos, &sync_fence_list_head) {
198 struct sync_fence *fence =
199 container_of(pos, struct sync_fence, sync_fence_list);
201 sync_print_fence(s, fence);
204 spin_unlock_irqrestore(&sync_fence_list_lock, flags);
208 static int sync_debugfs_open(struct inode *inode, struct file *file)
210 return single_open(file, sync_debugfs_show, inode->i_private);
213 static const struct file_operations sync_debugfs_fops = {
214 .open = sync_debugfs_open,
217 .release = single_release,
220 static __init int sync_debugfs_init(void)
222 debugfs_create_file("sync", S_IRUGO, NULL, NULL, &sync_debugfs_fops);
225 late_initcall(sync_debugfs_init);
227 #define DUMP_CHUNK 256
228 static char sync_dump_buf[64 * 1024];
231 struct seq_file s = {
232 .buf = sync_dump_buf,
233 .size = sizeof(sync_dump_buf) - 1,
237 sync_debugfs_show(&s, NULL);
239 for (i = 0; i < s.count; i += DUMP_CHUNK) {
240 if ((s.count - i) > DUMP_CHUNK) {
241 char c = s.buf[i + DUMP_CHUNK];
243 s.buf[i + DUMP_CHUNK] = 0;
244 pr_cont("%s", s.buf + i);
245 s.buf[i + DUMP_CHUNK] = c;
248 pr_cont("%s", s.buf + i);