blob: c769dc653b344ac23fe5a99a36b99a31dc2fceba [file] [log] [blame]
Maarten Lankhorst0f0d8402014-07-01 12:57:31 +02001/*
Gustavo Padovane912c882016-08-11 12:26:42 -03002 * Sync File validation framework and debug information
Maarten Lankhorst0f0d8402014-07-01 12:57:31 +02003 *
4 * Copyright (C) 2012 Google, Inc.
5 *
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 */
16
17#include <linux/debugfs.h>
Gustavo Padovan1fe82e22016-05-31 16:59:12 -030018#include "sync_debug.h"
Maarten Lankhorst0f0d8402014-07-01 12:57:31 +020019
Gustavo Padovan8a004482016-01-21 10:49:17 -020020static struct dentry *dbgfs;
21
Maarten Lankhorst0f0d8402014-07-01 12:57:31 +020022static LIST_HEAD(sync_timeline_list_head);
23static DEFINE_SPINLOCK(sync_timeline_list_lock);
Gustavo Padovand7fdb0a2016-01-21 10:49:19 -020024static LIST_HEAD(sync_file_list_head);
25static DEFINE_SPINLOCK(sync_file_list_lock);
Maarten Lankhorst0f0d8402014-07-01 12:57:31 +020026
27void sync_timeline_debug_add(struct sync_timeline *obj)
28{
29 unsigned long flags;
30
31 spin_lock_irqsave(&sync_timeline_list_lock, flags);
32 list_add_tail(&obj->sync_timeline_list, &sync_timeline_list_head);
33 spin_unlock_irqrestore(&sync_timeline_list_lock, flags);
34}
35
36void sync_timeline_debug_remove(struct sync_timeline *obj)
37{
38 unsigned long flags;
39
40 spin_lock_irqsave(&sync_timeline_list_lock, flags);
41 list_del(&obj->sync_timeline_list);
42 spin_unlock_irqrestore(&sync_timeline_list_lock, flags);
43}
44
Gustavo Padovand7fdb0a2016-01-21 10:49:19 -020045void sync_file_debug_add(struct sync_file *sync_file)
Maarten Lankhorst0f0d8402014-07-01 12:57:31 +020046{
47 unsigned long flags;
48
Gustavo Padovand7fdb0a2016-01-21 10:49:19 -020049 spin_lock_irqsave(&sync_file_list_lock, flags);
50 list_add_tail(&sync_file->sync_file_list, &sync_file_list_head);
51 spin_unlock_irqrestore(&sync_file_list_lock, flags);
Maarten Lankhorst0f0d8402014-07-01 12:57:31 +020052}
53
Gustavo Padovand7fdb0a2016-01-21 10:49:19 -020054void sync_file_debug_remove(struct sync_file *sync_file)
Maarten Lankhorst0f0d8402014-07-01 12:57:31 +020055{
56 unsigned long flags;
57
Gustavo Padovand7fdb0a2016-01-21 10:49:19 -020058 spin_lock_irqsave(&sync_file_list_lock, flags);
59 list_del(&sync_file->sync_file_list);
60 spin_unlock_irqrestore(&sync_file_list_lock, flags);
Maarten Lankhorst0f0d8402014-07-01 12:57:31 +020061}
62
63static const char *sync_status_str(int status)
64{
Chris Wilsond6c99f42017-01-04 14:12:21 +000065 if (status < 0)
66 return "error";
Peter Senna Tschudin95451352014-07-12 21:55:56 +020067
68 if (status > 0)
Chris Wilsond6c99f42017-01-04 14:12:21 +000069 return "signaled";
Peter Senna Tschudin95451352014-07-12 21:55:56 +020070
Chris Wilsond6c99f42017-01-04 14:12:21 +000071 return "active";
Maarten Lankhorst0f0d8402014-07-01 12:57:31 +020072}
73
Chris Wilsonf54d1862016-10-25 13:00:45 +010074static void sync_print_fence(struct seq_file *s,
75 struct dma_fence *fence, bool show)
Maarten Lankhorst0f0d8402014-07-01 12:57:31 +020076{
Chris Wilsonf54d1862016-10-25 13:00:45 +010077 struct sync_timeline *parent = dma_fence_parent(fence);
Chris Wilsond6c99f42017-01-04 14:12:21 +000078 int status;
Maarten Lankhorst0f0d8402014-07-01 12:57:31 +020079
Chris Wilsond6c99f42017-01-04 14:12:21 +000080 status = dma_fence_get_status_locked(fence);
Maarten Lankhorst0f0d8402014-07-01 12:57:31 +020081
Gustavo Padovanb55b54b2016-01-21 10:49:21 -020082 seq_printf(s, " %s%sfence %s",
83 show ? parent->name : "",
84 show ? "_" : "",
Maarten Lankhorst0f0d8402014-07-01 12:57:31 +020085 sync_status_str(status));
86
Chris Wilsond6c99f42017-01-04 14:12:21 +000087 if (status) {
Steve Pennington0541cdf2014-12-24 09:33:02 -060088 struct timespec64 ts64 =
Gustavo Padovanb55b54b2016-01-21 10:49:21 -020089 ktime_to_timespec64(fence->timestamp);
Peter Senna Tschudin95451352014-07-12 21:55:56 +020090
Tapasweni Pathak353fdf12014-10-26 19:20:16 +053091 seq_printf(s, "@%lld.%09ld", (s64)ts64.tv_sec, ts64.tv_nsec);
Maarten Lankhorst0f0d8402014-07-01 12:57:31 +020092 }
93
Gustavo Padovan724812d2016-05-31 16:59:02 -030094 if (fence->ops->timeline_value_str &&
Gustavo Padovanb55b54b2016-01-21 10:49:21 -020095 fence->ops->fence_value_str) {
Maarten Lankhorst0f0d8402014-07-01 12:57:31 +020096 char value[64];
Maarten Lankhorst73465f12015-12-11 13:11:49 +000097 bool success;
Peter Senna Tschudin95451352014-07-12 21:55:56 +020098
Gustavo Padovanb55b54b2016-01-21 10:49:21 -020099 fence->ops->fence_value_str(fence, value, sizeof(value));
Maarten Lankhorst73465f12015-12-11 13:11:49 +0000100 success = strlen(value);
101
Gustavo Padovan724812d2016-05-31 16:59:02 -0300102 if (success) {
Maarten Lankhorst73465f12015-12-11 13:11:49 +0000103 seq_printf(s, ": %s", value);
104
Gustavo Padovanb55b54b2016-01-21 10:49:21 -0200105 fence->ops->timeline_value_str(fence, value,
106 sizeof(value));
Maarten Lankhorst73465f12015-12-11 13:11:49 +0000107
108 if (strlen(value))
109 seq_printf(s, " / %s", value);
Maarten Lankhorst0f0d8402014-07-01 12:57:31 +0200110 }
111 }
112
113 seq_puts(s, "\n");
114}
115
116static void sync_print_obj(struct seq_file *s, struct sync_timeline *obj)
117{
118 struct list_head *pos;
119 unsigned long flags;
120
Gustavo Padovanb9bc2b72016-05-31 16:59:11 -0300121 seq_printf(s, "%s: %d\n", obj->name, obj->value);
Maarten Lankhorst0f0d8402014-07-01 12:57:31 +0200122
123 spin_lock_irqsave(&obj->child_list_lock, flags);
124 list_for_each(pos, &obj->child_list_head) {
Gustavo Padovan0431b902016-05-31 16:59:04 -0300125 struct sync_pt *pt =
126 container_of(pos, struct sync_pt, child_list);
127 sync_print_fence(s, &pt->base, false);
Maarten Lankhorst0f0d8402014-07-01 12:57:31 +0200128 }
129 spin_unlock_irqrestore(&obj->child_list_lock, flags);
130}
131
Gustavo Padovand7fdb0a2016-01-21 10:49:19 -0200132static void sync_print_sync_file(struct seq_file *s,
133 struct sync_file *sync_file)
Gustavo Padovanb55b54b2016-01-21 10:49:21 -0200134{
Maarten Lankhorst0f0d8402014-07-01 12:57:31 +0200135 int i;
136
Gustavo Padovand7fdb0a2016-01-21 10:49:19 -0200137 seq_printf(s, "[%p] %s: %s\n", sync_file, sync_file->name,
Chris Wilsond6c99f42017-01-04 14:12:21 +0000138 sync_status_str(dma_fence_get_status(sync_file->fence)));
Maarten Lankhorst0f0d8402014-07-01 12:57:31 +0200139
Chris Wilsonf54d1862016-10-25 13:00:45 +0100140 if (dma_fence_is_array(sync_file->fence)) {
141 struct dma_fence_array *array = to_dma_fence_array(sync_file->fence);
Linus Torvalds6b25e212016-10-11 18:12:22 -0700142
143 for (i = 0; i < array->num_fences; ++i)
144 sync_print_fence(s, array->fences[i], true);
145 } else {
146 sync_print_fence(s, sync_file->fence, true);
147 }
Gustavo Padovanb55b54b2016-01-21 10:49:21 -0200148}
149
Maarten Lankhorst0f0d8402014-07-01 12:57:31 +0200150static int sync_debugfs_show(struct seq_file *s, void *unused)
151{
152 unsigned long flags;
153 struct list_head *pos;
154
155 seq_puts(s, "objs:\n--------------\n");
156
157 spin_lock_irqsave(&sync_timeline_list_lock, flags);
158 list_for_each(pos, &sync_timeline_list_head) {
159 struct sync_timeline *obj =
160 container_of(pos, struct sync_timeline,
161 sync_timeline_list);
162
163 sync_print_obj(s, obj);
164 seq_puts(s, "\n");
165 }
166 spin_unlock_irqrestore(&sync_timeline_list_lock, flags);
167
168 seq_puts(s, "fences:\n--------------\n");
169
Gustavo Padovand7fdb0a2016-01-21 10:49:19 -0200170 spin_lock_irqsave(&sync_file_list_lock, flags);
171 list_for_each(pos, &sync_file_list_head) {
172 struct sync_file *sync_file =
173 container_of(pos, struct sync_file, sync_file_list);
Maarten Lankhorst0f0d8402014-07-01 12:57:31 +0200174
Gustavo Padovand7fdb0a2016-01-21 10:49:19 -0200175 sync_print_sync_file(s, sync_file);
Maarten Lankhorst0f0d8402014-07-01 12:57:31 +0200176 seq_puts(s, "\n");
177 }
Gustavo Padovand7fdb0a2016-01-21 10:49:19 -0200178 spin_unlock_irqrestore(&sync_file_list_lock, flags);
Maarten Lankhorst0f0d8402014-07-01 12:57:31 +0200179 return 0;
180}
181
Gustavo Padovan8a004482016-01-21 10:49:17 -0200182static int sync_info_debugfs_open(struct inode *inode, struct file *file)
Maarten Lankhorst0f0d8402014-07-01 12:57:31 +0200183{
184 return single_open(file, sync_debugfs_show, inode->i_private);
185}
186
Gustavo Padovan8a004482016-01-21 10:49:17 -0200187static const struct file_operations sync_info_debugfs_fops = {
188 .open = sync_info_debugfs_open,
Maarten Lankhorst0f0d8402014-07-01 12:57:31 +0200189 .read = seq_read,
190 .llseek = seq_lseek,
191 .release = single_release,
192};
193
194static __init int sync_debugfs_init(void)
195{
Gustavo Padovan8a004482016-01-21 10:49:17 -0200196 dbgfs = debugfs_create_dir("sync", NULL);
197
Nicolai Stange0fd9da92016-05-27 20:03:54 +0200198 /*
199 * The debugfs files won't ever get removed and thus, there is
200 * no need to protect it against removal races. The use of
201 * debugfs_create_file_unsafe() is actually safe here.
202 */
203 debugfs_create_file_unsafe("info", 0444, dbgfs, NULL,
204 &sync_info_debugfs_fops);
205 debugfs_create_file_unsafe("sw_sync", 0644, dbgfs, NULL,
206 &sw_sync_debugfs_fops);
Gustavo Padovan8a004482016-01-21 10:49:17 -0200207
Maarten Lankhorst0f0d8402014-07-01 12:57:31 +0200208 return 0;
209}
210late_initcall(sync_debugfs_init);
211
212#define DUMP_CHUNK 256
213static char sync_dump_buf[64 * 1024];
214void sync_dump(void)
215{
216 struct seq_file s = {
217 .buf = sync_dump_buf,
218 .size = sizeof(sync_dump_buf) - 1,
219 };
220 int i;
221
222 sync_debugfs_show(&s, NULL);
223
224 for (i = 0; i < s.count; i += DUMP_CHUNK) {
225 if ((s.count - i) > DUMP_CHUNK) {
226 char c = s.buf[i + DUMP_CHUNK];
Peter Senna Tschudin95451352014-07-12 21:55:56 +0200227
Maarten Lankhorst0f0d8402014-07-01 12:57:31 +0200228 s.buf[i + DUMP_CHUNK] = 0;
229 pr_cont("%s", s.buf + i);
230 s.buf[i + DUMP_CHUNK] = c;
231 } else {
232 s.buf[s.count] = 0;
233 pr_cont("%s", s.buf + i);
234 }
235 }
236}