blob: 257fc91bf02bc631bb33d528e6690dd1e060f675 [file] [log] [blame]
Maarten Lankhorst0f0d8402014-07-01 12:57:31 +02001/*
2 * drivers/base/sync.c
3 *
4 * Copyright (C) 2012 Google, Inc.
5 *
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 */
16
17#include <linux/debugfs.h>
18#include <linux/export.h>
19#include <linux/file.h>
20#include <linux/fs.h>
21#include <linux/kernel.h>
22#include <linux/poll.h>
23#include <linux/sched.h>
24#include <linux/seq_file.h>
25#include <linux/slab.h>
26#include <linux/uaccess.h>
27#include <linux/anon_inodes.h>
28#include "sync.h"
29
30#ifdef CONFIG_DEBUG_FS
31
32static LIST_HEAD(sync_timeline_list_head);
33static DEFINE_SPINLOCK(sync_timeline_list_lock);
34static LIST_HEAD(sync_fence_list_head);
35static DEFINE_SPINLOCK(sync_fence_list_lock);
36
37void sync_timeline_debug_add(struct sync_timeline *obj)
38{
39 unsigned long flags;
40
41 spin_lock_irqsave(&sync_timeline_list_lock, flags);
42 list_add_tail(&obj->sync_timeline_list, &sync_timeline_list_head);
43 spin_unlock_irqrestore(&sync_timeline_list_lock, flags);
44}
45
46void sync_timeline_debug_remove(struct sync_timeline *obj)
47{
48 unsigned long flags;
49
50 spin_lock_irqsave(&sync_timeline_list_lock, flags);
51 list_del(&obj->sync_timeline_list);
52 spin_unlock_irqrestore(&sync_timeline_list_lock, flags);
53}
54
55void sync_fence_debug_add(struct sync_fence *fence)
56{
57 unsigned long flags;
58
59 spin_lock_irqsave(&sync_fence_list_lock, flags);
60 list_add_tail(&fence->sync_fence_list, &sync_fence_list_head);
61 spin_unlock_irqrestore(&sync_fence_list_lock, flags);
62}
63
64void sync_fence_debug_remove(struct sync_fence *fence)
65{
66 unsigned long flags;
67
68 spin_lock_irqsave(&sync_fence_list_lock, flags);
69 list_del(&fence->sync_fence_list);
70 spin_unlock_irqrestore(&sync_fence_list_lock, flags);
71}
72
73static const char *sync_status_str(int status)
74{
75 if (status == 0)
76 return "signaled";
Peter Senna Tschudin95451352014-07-12 21:55:56 +020077
78 if (status > 0)
Maarten Lankhorst0f0d8402014-07-01 12:57:31 +020079 return "active";
Peter Senna Tschudin95451352014-07-12 21:55:56 +020080
81 return "error";
Maarten Lankhorst0f0d8402014-07-01 12:57:31 +020082}
83
84static void sync_print_pt(struct seq_file *s, struct sync_pt *pt, bool fence)
85{
86 int status = 1;
87 struct sync_timeline *parent = sync_pt_parent(pt);
88
89 if (fence_is_signaled_locked(&pt->base))
90 status = pt->base.status;
91
92 seq_printf(s, " %s%spt %s",
93 fence ? parent->name : "",
94 fence ? "_" : "",
95 sync_status_str(status));
96
97 if (status <= 0) {
98 struct timeval tv = ktime_to_timeval(pt->base.timestamp);
Peter Senna Tschudin95451352014-07-12 21:55:56 +020099
Maarten Lankhorst0f0d8402014-07-01 12:57:31 +0200100 seq_printf(s, "@%ld.%06ld", tv.tv_sec, tv.tv_usec);
101 }
102
103 if (parent->ops->timeline_value_str &&
104 parent->ops->pt_value_str) {
105 char value[64];
Peter Senna Tschudin95451352014-07-12 21:55:56 +0200106
Maarten Lankhorst0f0d8402014-07-01 12:57:31 +0200107 parent->ops->pt_value_str(pt, value, sizeof(value));
108 seq_printf(s, ": %s", value);
109 if (fence) {
110 parent->ops->timeline_value_str(parent, value,
111 sizeof(value));
112 seq_printf(s, " / %s", value);
113 }
114 }
115
116 seq_puts(s, "\n");
117}
118
119static void sync_print_obj(struct seq_file *s, struct sync_timeline *obj)
120{
121 struct list_head *pos;
122 unsigned long flags;
123
124 seq_printf(s, "%s %s", obj->name, obj->ops->driver_name);
125
126 if (obj->ops->timeline_value_str) {
127 char value[64];
Peter Senna Tschudin95451352014-07-12 21:55:56 +0200128
Maarten Lankhorst0f0d8402014-07-01 12:57:31 +0200129 obj->ops->timeline_value_str(obj, value, sizeof(value));
130 seq_printf(s, ": %s", value);
131 }
132
133 seq_puts(s, "\n");
134
135 spin_lock_irqsave(&obj->child_list_lock, flags);
136 list_for_each(pos, &obj->child_list_head) {
137 struct sync_pt *pt =
138 container_of(pos, struct sync_pt, child_list);
139 sync_print_pt(s, pt, false);
140 }
141 spin_unlock_irqrestore(&obj->child_list_lock, flags);
142}
143
144static void sync_print_fence(struct seq_file *s, struct sync_fence *fence)
145{
146 wait_queue_t *pos;
147 unsigned long flags;
148 int i;
149
150 seq_printf(s, "[%p] %s: %s\n", fence, fence->name,
151 sync_status_str(atomic_read(&fence->status)));
152
153 for (i = 0; i < fence->num_fences; ++i) {
154 struct sync_pt *pt =
155 container_of(fence->cbs[i].sync_pt,
156 struct sync_pt, base);
157
158 sync_print_pt(s, pt, true);
159 }
160
161 spin_lock_irqsave(&fence->wq.lock, flags);
162 list_for_each_entry(pos, &fence->wq.task_list, task_list) {
163 struct sync_fence_waiter *waiter;
164
165 if (pos->func != &sync_fence_wake_up_wq)
166 continue;
167
168 waiter = container_of(pos, struct sync_fence_waiter, work);
169
170 seq_printf(s, "waiter %pF\n", waiter->callback);
171 }
172 spin_unlock_irqrestore(&fence->wq.lock, flags);
173}
174
175static int sync_debugfs_show(struct seq_file *s, void *unused)
176{
177 unsigned long flags;
178 struct list_head *pos;
179
180 seq_puts(s, "objs:\n--------------\n");
181
182 spin_lock_irqsave(&sync_timeline_list_lock, flags);
183 list_for_each(pos, &sync_timeline_list_head) {
184 struct sync_timeline *obj =
185 container_of(pos, struct sync_timeline,
186 sync_timeline_list);
187
188 sync_print_obj(s, obj);
189 seq_puts(s, "\n");
190 }
191 spin_unlock_irqrestore(&sync_timeline_list_lock, flags);
192
193 seq_puts(s, "fences:\n--------------\n");
194
195 spin_lock_irqsave(&sync_fence_list_lock, flags);
196 list_for_each(pos, &sync_fence_list_head) {
197 struct sync_fence *fence =
198 container_of(pos, struct sync_fence, sync_fence_list);
199
200 sync_print_fence(s, fence);
201 seq_puts(s, "\n");
202 }
203 spin_unlock_irqrestore(&sync_fence_list_lock, flags);
204 return 0;
205}
206
207static int sync_debugfs_open(struct inode *inode, struct file *file)
208{
209 return single_open(file, sync_debugfs_show, inode->i_private);
210}
211
212static const struct file_operations sync_debugfs_fops = {
213 .open = sync_debugfs_open,
214 .read = seq_read,
215 .llseek = seq_lseek,
216 .release = single_release,
217};
218
219static __init int sync_debugfs_init(void)
220{
221 debugfs_create_file("sync", S_IRUGO, NULL, NULL, &sync_debugfs_fops);
222 return 0;
223}
224late_initcall(sync_debugfs_init);
225
226#define DUMP_CHUNK 256
227static char sync_dump_buf[64 * 1024];
228void sync_dump(void)
229{
230 struct seq_file s = {
231 .buf = sync_dump_buf,
232 .size = sizeof(sync_dump_buf) - 1,
233 };
234 int i;
235
236 sync_debugfs_show(&s, NULL);
237
238 for (i = 0; i < s.count; i += DUMP_CHUNK) {
239 if ((s.count - i) > DUMP_CHUNK) {
240 char c = s.buf[i + DUMP_CHUNK];
Peter Senna Tschudin95451352014-07-12 21:55:56 +0200241
Maarten Lankhorst0f0d8402014-07-01 12:57:31 +0200242 s.buf[i + DUMP_CHUNK] = 0;
243 pr_cont("%s", s.buf + i);
244 s.buf[i + DUMP_CHUNK] = c;
245 } else {
246 s.buf[s.count] = 0;
247 pr_cont("%s", s.buf + i);
248 }
249 }
250}
251
252#endif