blob: 48b20e34fb6d059f79616f9da66beac3a2582462 [file] [log] [blame]
Maarten Lankhorst0f0d8402014-07-01 12:57:31 +02001/*
Gustavo Padovane912c882016-08-11 12:26:42 -03002 * Sync File validation framework and debug information
Maarten Lankhorst0f0d8402014-07-01 12:57:31 +02003 *
4 * Copyright (C) 2012 Google, Inc.
5 *
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 */
16
17#include <linux/debugfs.h>
Gustavo Padovan1fe82e22016-05-31 16:59:12 -030018#include "sync_debug.h"
Maarten Lankhorst0f0d8402014-07-01 12:57:31 +020019
Gustavo Padovan8a004482016-01-21 10:49:17 -020020static struct dentry *dbgfs;
21
Maarten Lankhorst0f0d8402014-07-01 12:57:31 +020022static LIST_HEAD(sync_timeline_list_head);
23static DEFINE_SPINLOCK(sync_timeline_list_lock);
Gustavo Padovand7fdb0a2016-01-21 10:49:19 -020024static LIST_HEAD(sync_file_list_head);
25static DEFINE_SPINLOCK(sync_file_list_lock);
Maarten Lankhorst0f0d8402014-07-01 12:57:31 +020026
27void sync_timeline_debug_add(struct sync_timeline *obj)
28{
29 unsigned long flags;
30
31 spin_lock_irqsave(&sync_timeline_list_lock, flags);
32 list_add_tail(&obj->sync_timeline_list, &sync_timeline_list_head);
33 spin_unlock_irqrestore(&sync_timeline_list_lock, flags);
34}
35
36void sync_timeline_debug_remove(struct sync_timeline *obj)
37{
38 unsigned long flags;
39
40 spin_lock_irqsave(&sync_timeline_list_lock, flags);
41 list_del(&obj->sync_timeline_list);
42 spin_unlock_irqrestore(&sync_timeline_list_lock, flags);
43}
44
Gustavo Padovand7fdb0a2016-01-21 10:49:19 -020045void sync_file_debug_add(struct sync_file *sync_file)
Maarten Lankhorst0f0d8402014-07-01 12:57:31 +020046{
47 unsigned long flags;
48
Gustavo Padovand7fdb0a2016-01-21 10:49:19 -020049 spin_lock_irqsave(&sync_file_list_lock, flags);
50 list_add_tail(&sync_file->sync_file_list, &sync_file_list_head);
51 spin_unlock_irqrestore(&sync_file_list_lock, flags);
Maarten Lankhorst0f0d8402014-07-01 12:57:31 +020052}
53
Gustavo Padovand7fdb0a2016-01-21 10:49:19 -020054void sync_file_debug_remove(struct sync_file *sync_file)
Maarten Lankhorst0f0d8402014-07-01 12:57:31 +020055{
56 unsigned long flags;
57
Gustavo Padovand7fdb0a2016-01-21 10:49:19 -020058 spin_lock_irqsave(&sync_file_list_lock, flags);
59 list_del(&sync_file->sync_file_list);
60 spin_unlock_irqrestore(&sync_file_list_lock, flags);
Maarten Lankhorst0f0d8402014-07-01 12:57:31 +020061}
62
63static const char *sync_status_str(int status)
64{
65 if (status == 0)
66 return "signaled";
Peter Senna Tschudin95451352014-07-12 21:55:56 +020067
68 if (status > 0)
Maarten Lankhorst0f0d8402014-07-01 12:57:31 +020069 return "active";
Peter Senna Tschudin95451352014-07-12 21:55:56 +020070
71 return "error";
Maarten Lankhorst0f0d8402014-07-01 12:57:31 +020072}
73
Chris Wilsonf54d1862016-10-25 13:00:45 +010074static void sync_print_fence(struct seq_file *s,
75 struct dma_fence *fence, bool show)
Maarten Lankhorst0f0d8402014-07-01 12:57:31 +020076{
77 int status = 1;
Chris Wilsonf54d1862016-10-25 13:00:45 +010078 struct sync_timeline *parent = dma_fence_parent(fence);
Maarten Lankhorst0f0d8402014-07-01 12:57:31 +020079
Chris Wilsonf54d1862016-10-25 13:00:45 +010080 if (dma_fence_is_signaled_locked(fence))
Gustavo Padovanb55b54b2016-01-21 10:49:21 -020081 status = fence->status;
Maarten Lankhorst0f0d8402014-07-01 12:57:31 +020082
Gustavo Padovanb55b54b2016-01-21 10:49:21 -020083 seq_printf(s, " %s%sfence %s",
84 show ? parent->name : "",
85 show ? "_" : "",
Maarten Lankhorst0f0d8402014-07-01 12:57:31 +020086 sync_status_str(status));
87
88 if (status <= 0) {
Steve Pennington0541cdf2014-12-24 09:33:02 -060089 struct timespec64 ts64 =
Gustavo Padovanb55b54b2016-01-21 10:49:21 -020090 ktime_to_timespec64(fence->timestamp);
Peter Senna Tschudin95451352014-07-12 21:55:56 +020091
Tapasweni Pathak353fdf12014-10-26 19:20:16 +053092 seq_printf(s, "@%lld.%09ld", (s64)ts64.tv_sec, ts64.tv_nsec);
Maarten Lankhorst0f0d8402014-07-01 12:57:31 +020093 }
94
Gustavo Padovan724812d2016-05-31 16:59:02 -030095 if (fence->ops->timeline_value_str &&
Gustavo Padovanb55b54b2016-01-21 10:49:21 -020096 fence->ops->fence_value_str) {
Maarten Lankhorst0f0d8402014-07-01 12:57:31 +020097 char value[64];
Maarten Lankhorst73465f12015-12-11 13:11:49 +000098 bool success;
Peter Senna Tschudin95451352014-07-12 21:55:56 +020099
Gustavo Padovanb55b54b2016-01-21 10:49:21 -0200100 fence->ops->fence_value_str(fence, value, sizeof(value));
Maarten Lankhorst73465f12015-12-11 13:11:49 +0000101 success = strlen(value);
102
Gustavo Padovan724812d2016-05-31 16:59:02 -0300103 if (success) {
Maarten Lankhorst73465f12015-12-11 13:11:49 +0000104 seq_printf(s, ": %s", value);
105
Gustavo Padovanb55b54b2016-01-21 10:49:21 -0200106 fence->ops->timeline_value_str(fence, value,
107 sizeof(value));
Maarten Lankhorst73465f12015-12-11 13:11:49 +0000108
109 if (strlen(value))
110 seq_printf(s, " / %s", value);
Maarten Lankhorst0f0d8402014-07-01 12:57:31 +0200111 }
112 }
113
114 seq_puts(s, "\n");
115}
116
117static void sync_print_obj(struct seq_file *s, struct sync_timeline *obj)
118{
119 struct list_head *pos;
120 unsigned long flags;
121
Gustavo Padovanb9bc2b72016-05-31 16:59:11 -0300122 seq_printf(s, "%s: %d\n", obj->name, obj->value);
Maarten Lankhorst0f0d8402014-07-01 12:57:31 +0200123
124 spin_lock_irqsave(&obj->child_list_lock, flags);
125 list_for_each(pos, &obj->child_list_head) {
Gustavo Padovan0431b902016-05-31 16:59:04 -0300126 struct sync_pt *pt =
127 container_of(pos, struct sync_pt, child_list);
128 sync_print_fence(s, &pt->base, false);
Maarten Lankhorst0f0d8402014-07-01 12:57:31 +0200129 }
130 spin_unlock_irqrestore(&obj->child_list_lock, flags);
131}
132
Gustavo Padovand7fdb0a2016-01-21 10:49:19 -0200133static void sync_print_sync_file(struct seq_file *s,
134 struct sync_file *sync_file)
Gustavo Padovanb55b54b2016-01-21 10:49:21 -0200135{
Maarten Lankhorst0f0d8402014-07-01 12:57:31 +0200136 int i;
137
Gustavo Padovand7fdb0a2016-01-21 10:49:19 -0200138 seq_printf(s, "[%p] %s: %s\n", sync_file, sync_file->name,
Chris Wilsonf54d1862016-10-25 13:00:45 +0100139 sync_status_str(!dma_fence_is_signaled(sync_file->fence)));
Maarten Lankhorst0f0d8402014-07-01 12:57:31 +0200140
Chris Wilsonf54d1862016-10-25 13:00:45 +0100141 if (dma_fence_is_array(sync_file->fence)) {
142 struct dma_fence_array *array = to_dma_fence_array(sync_file->fence);
Linus Torvalds6b25e212016-10-11 18:12:22 -0700143
144 for (i = 0; i < array->num_fences; ++i)
145 sync_print_fence(s, array->fences[i], true);
146 } else {
147 sync_print_fence(s, sync_file->fence, true);
148 }
Gustavo Padovanb55b54b2016-01-21 10:49:21 -0200149}
150
Maarten Lankhorst0f0d8402014-07-01 12:57:31 +0200151static int sync_debugfs_show(struct seq_file *s, void *unused)
152{
153 unsigned long flags;
154 struct list_head *pos;
155
156 seq_puts(s, "objs:\n--------------\n");
157
158 spin_lock_irqsave(&sync_timeline_list_lock, flags);
159 list_for_each(pos, &sync_timeline_list_head) {
160 struct sync_timeline *obj =
161 container_of(pos, struct sync_timeline,
162 sync_timeline_list);
163
164 sync_print_obj(s, obj);
165 seq_puts(s, "\n");
166 }
167 spin_unlock_irqrestore(&sync_timeline_list_lock, flags);
168
169 seq_puts(s, "fences:\n--------------\n");
170
Gustavo Padovand7fdb0a2016-01-21 10:49:19 -0200171 spin_lock_irqsave(&sync_file_list_lock, flags);
172 list_for_each(pos, &sync_file_list_head) {
173 struct sync_file *sync_file =
174 container_of(pos, struct sync_file, sync_file_list);
Maarten Lankhorst0f0d8402014-07-01 12:57:31 +0200175
Gustavo Padovand7fdb0a2016-01-21 10:49:19 -0200176 sync_print_sync_file(s, sync_file);
Maarten Lankhorst0f0d8402014-07-01 12:57:31 +0200177 seq_puts(s, "\n");
178 }
Gustavo Padovand7fdb0a2016-01-21 10:49:19 -0200179 spin_unlock_irqrestore(&sync_file_list_lock, flags);
Maarten Lankhorst0f0d8402014-07-01 12:57:31 +0200180 return 0;
181}
182
Gustavo Padovan8a004482016-01-21 10:49:17 -0200183static int sync_info_debugfs_open(struct inode *inode, struct file *file)
Maarten Lankhorst0f0d8402014-07-01 12:57:31 +0200184{
185 return single_open(file, sync_debugfs_show, inode->i_private);
186}
187
Gustavo Padovan8a004482016-01-21 10:49:17 -0200188static const struct file_operations sync_info_debugfs_fops = {
189 .open = sync_info_debugfs_open,
Maarten Lankhorst0f0d8402014-07-01 12:57:31 +0200190 .read = seq_read,
191 .llseek = seq_lseek,
192 .release = single_release,
193};
194
195static __init int sync_debugfs_init(void)
196{
Gustavo Padovan8a004482016-01-21 10:49:17 -0200197 dbgfs = debugfs_create_dir("sync", NULL);
198
Nicolai Stange0fd9da92016-05-27 20:03:54 +0200199 /*
200 * The debugfs files won't ever get removed and thus, there is
201 * no need to protect it against removal races. The use of
202 * debugfs_create_file_unsafe() is actually safe here.
203 */
204 debugfs_create_file_unsafe("info", 0444, dbgfs, NULL,
205 &sync_info_debugfs_fops);
206 debugfs_create_file_unsafe("sw_sync", 0644, dbgfs, NULL,
207 &sw_sync_debugfs_fops);
Gustavo Padovan8a004482016-01-21 10:49:17 -0200208
Maarten Lankhorst0f0d8402014-07-01 12:57:31 +0200209 return 0;
210}
211late_initcall(sync_debugfs_init);
212
213#define DUMP_CHUNK 256
214static char sync_dump_buf[64 * 1024];
215void sync_dump(void)
216{
217 struct seq_file s = {
218 .buf = sync_dump_buf,
219 .size = sizeof(sync_dump_buf) - 1,
220 };
221 int i;
222
223 sync_debugfs_show(&s, NULL);
224
225 for (i = 0; i < s.count; i += DUMP_CHUNK) {
226 if ((s.count - i) > DUMP_CHUNK) {
227 char c = s.buf[i + DUMP_CHUNK];
Peter Senna Tschudin95451352014-07-12 21:55:56 +0200228
Maarten Lankhorst0f0d8402014-07-01 12:57:31 +0200229 s.buf[i + DUMP_CHUNK] = 0;
230 pr_cont("%s", s.buf + i);
231 s.buf[i + DUMP_CHUNK] = c;
232 } else {
233 s.buf[s.count] = 0;
234 pr_cont("%s", s.buf + i);
235 }
236 }
237}