blob: 858263dbecd4c38091d29e709008dadbf79fd052 [file] [log] [blame]
Maarten Lankhorst0f0d8402014-07-01 12:57:31 +02001/*
Gustavo Padovane912c882016-08-11 12:26:42 -03002 * Sync File validation framework and debug information
Maarten Lankhorst0f0d8402014-07-01 12:57:31 +02003 *
4 * Copyright (C) 2012 Google, Inc.
5 *
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 */
16
17#include <linux/debugfs.h>
Gustavo Padovan1fe82e22016-05-31 16:59:12 -030018#include "sync_debug.h"
Maarten Lankhorst0f0d8402014-07-01 12:57:31 +020019
Gustavo Padovan8a004482016-01-21 10:49:17 -020020static struct dentry *dbgfs;
21
Maarten Lankhorst0f0d8402014-07-01 12:57:31 +020022static LIST_HEAD(sync_timeline_list_head);
23static DEFINE_SPINLOCK(sync_timeline_list_lock);
Gustavo Padovand7fdb0a2016-01-21 10:49:19 -020024static LIST_HEAD(sync_file_list_head);
25static DEFINE_SPINLOCK(sync_file_list_lock);
Maarten Lankhorst0f0d8402014-07-01 12:57:31 +020026
27void sync_timeline_debug_add(struct sync_timeline *obj)
28{
29 unsigned long flags;
30
31 spin_lock_irqsave(&sync_timeline_list_lock, flags);
32 list_add_tail(&obj->sync_timeline_list, &sync_timeline_list_head);
33 spin_unlock_irqrestore(&sync_timeline_list_lock, flags);
34}
35
36void sync_timeline_debug_remove(struct sync_timeline *obj)
37{
38 unsigned long flags;
39
40 spin_lock_irqsave(&sync_timeline_list_lock, flags);
41 list_del(&obj->sync_timeline_list);
42 spin_unlock_irqrestore(&sync_timeline_list_lock, flags);
43}
44
Gustavo Padovand7fdb0a2016-01-21 10:49:19 -020045void sync_file_debug_add(struct sync_file *sync_file)
Maarten Lankhorst0f0d8402014-07-01 12:57:31 +020046{
47 unsigned long flags;
48
Gustavo Padovand7fdb0a2016-01-21 10:49:19 -020049 spin_lock_irqsave(&sync_file_list_lock, flags);
50 list_add_tail(&sync_file->sync_file_list, &sync_file_list_head);
51 spin_unlock_irqrestore(&sync_file_list_lock, flags);
Maarten Lankhorst0f0d8402014-07-01 12:57:31 +020052}
53
Gustavo Padovand7fdb0a2016-01-21 10:49:19 -020054void sync_file_debug_remove(struct sync_file *sync_file)
Maarten Lankhorst0f0d8402014-07-01 12:57:31 +020055{
56 unsigned long flags;
57
Gustavo Padovand7fdb0a2016-01-21 10:49:19 -020058 spin_lock_irqsave(&sync_file_list_lock, flags);
59 list_del(&sync_file->sync_file_list);
60 spin_unlock_irqrestore(&sync_file_list_lock, flags);
Maarten Lankhorst0f0d8402014-07-01 12:57:31 +020061}
62
63static const char *sync_status_str(int status)
64{
Chris Wilsond3b029a2017-01-04 14:12:21 +000065 if (status < 0)
66 return "error";
Peter Senna Tschudin95451352014-07-12 21:55:56 +020067
68 if (status > 0)
Chris Wilsond3b029a2017-01-04 14:12:21 +000069 return "signaled";
Peter Senna Tschudin95451352014-07-12 21:55:56 +020070
Chris Wilsond3b029a2017-01-04 14:12:21 +000071 return "active";
Maarten Lankhorst0f0d8402014-07-01 12:57:31 +020072}
73
Chris Wilsond3b029a2017-01-04 14:12:21 +000074static void sync_print_fence(struct seq_file *s,
75 struct fence *fence, bool show)
Maarten Lankhorst0f0d8402014-07-01 12:57:31 +020076{
Gustavo Padovanb55b54b2016-01-21 10:49:21 -020077 struct sync_timeline *parent = fence_parent(fence);
Chris Wilsond3b029a2017-01-04 14:12:21 +000078 int status;
Maarten Lankhorst0f0d8402014-07-01 12:57:31 +020079
Chris Wilsond3b029a2017-01-04 14:12:21 +000080 status = fence_get_status_locked(fence);
Maarten Lankhorst0f0d8402014-07-01 12:57:31 +020081
Gustavo Padovanb55b54b2016-01-21 10:49:21 -020082 seq_printf(s, " %s%sfence %s",
83 show ? parent->name : "",
84 show ? "_" : "",
Maarten Lankhorst0f0d8402014-07-01 12:57:31 +020085 sync_status_str(status));
86
Chris Wilsond3b029a2017-01-04 14:12:21 +000087 if (status) {
Steve Pennington0541cdf2014-12-24 09:33:02 -060088 struct timespec64 ts64 =
Gustavo Padovanb55b54b2016-01-21 10:49:21 -020089 ktime_to_timespec64(fence->timestamp);
Peter Senna Tschudin95451352014-07-12 21:55:56 +020090
Tapasweni Pathak353fdf12014-10-26 19:20:16 +053091 seq_printf(s, "@%lld.%09ld", (s64)ts64.tv_sec, ts64.tv_nsec);
Maarten Lankhorst0f0d8402014-07-01 12:57:31 +020092 }
93
Gustavo Padovan724812d2016-05-31 16:59:02 -030094 if (fence->ops->timeline_value_str &&
Gustavo Padovanb55b54b2016-01-21 10:49:21 -020095 fence->ops->fence_value_str) {
Maarten Lankhorst0f0d8402014-07-01 12:57:31 +020096 char value[64];
Maarten Lankhorst73465f12015-12-11 13:11:49 +000097 bool success;
Peter Senna Tschudin95451352014-07-12 21:55:56 +020098
Gustavo Padovanb55b54b2016-01-21 10:49:21 -020099 fence->ops->fence_value_str(fence, value, sizeof(value));
Maarten Lankhorst73465f12015-12-11 13:11:49 +0000100 success = strlen(value);
101
Gustavo Padovan724812d2016-05-31 16:59:02 -0300102 if (success) {
Maarten Lankhorst73465f12015-12-11 13:11:49 +0000103 seq_printf(s, ": %s", value);
104
Gustavo Padovanb55b54b2016-01-21 10:49:21 -0200105 fence->ops->timeline_value_str(fence, value,
106 sizeof(value));
Maarten Lankhorst73465f12015-12-11 13:11:49 +0000107
108 if (strlen(value))
109 seq_printf(s, " / %s", value);
Maarten Lankhorst0f0d8402014-07-01 12:57:31 +0200110 }
111 }
112
113 seq_puts(s, "\n");
114}
115
116static void sync_print_obj(struct seq_file *s, struct sync_timeline *obj)
117{
118 struct list_head *pos;
Maarten Lankhorst0f0d8402014-07-01 12:57:31 +0200119
Gustavo Padovanb9bc2b72016-05-31 16:59:11 -0300120 seq_printf(s, "%s: %d\n", obj->name, obj->value);
Maarten Lankhorst0f0d8402014-07-01 12:57:31 +0200121
Chris Wilsone82ecb22017-06-29 22:05:32 +0100122 spin_lock_irq(&obj->lock);
123 list_for_each(pos, &obj->pt_list) {
124 struct sync_pt *pt = container_of(pos, struct sync_pt, link);
Gustavo Padovan0431b902016-05-31 16:59:04 -0300125 sync_print_fence(s, &pt->base, false);
Maarten Lankhorst0f0d8402014-07-01 12:57:31 +0200126 }
Chris Wilsone82ecb22017-06-29 22:05:32 +0100127 spin_unlock_irq(&obj->lock);
Maarten Lankhorst0f0d8402014-07-01 12:57:31 +0200128}
129
Gustavo Padovand7fdb0a2016-01-21 10:49:19 -0200130static void sync_print_sync_file(struct seq_file *s,
131 struct sync_file *sync_file)
Gustavo Padovanb55b54b2016-01-21 10:49:21 -0200132{
Maarten Lankhorst0f0d8402014-07-01 12:57:31 +0200133 int i;
134
Gustavo Padovand7fdb0a2016-01-21 10:49:19 -0200135 seq_printf(s, "[%p] %s: %s\n", sync_file, sync_file->name,
Chris Wilsond3b029a2017-01-04 14:12:21 +0000136 sync_status_str(fence_get_status(sync_file->fence)));
Maarten Lankhorst0f0d8402014-07-01 12:57:31 +0200137
Linus Torvalds6b25e212016-10-11 18:12:22 -0700138 if (fence_is_array(sync_file->fence)) {
139 struct fence_array *array = to_fence_array(sync_file->fence);
140
141 for (i = 0; i < array->num_fences; ++i)
142 sync_print_fence(s, array->fences[i], true);
143 } else {
144 sync_print_fence(s, sync_file->fence, true);
145 }
Gustavo Padovanb55b54b2016-01-21 10:49:21 -0200146}
147
Maarten Lankhorst0f0d8402014-07-01 12:57:31 +0200148static int sync_debugfs_show(struct seq_file *s, void *unused)
149{
Maarten Lankhorst0f0d8402014-07-01 12:57:31 +0200150 struct list_head *pos;
151
152 seq_puts(s, "objs:\n--------------\n");
153
Chris Wilsonf14ad422017-06-29 13:59:27 +0100154 spin_lock_irq(&sync_timeline_list_lock);
Maarten Lankhorst0f0d8402014-07-01 12:57:31 +0200155 list_for_each(pos, &sync_timeline_list_head) {
156 struct sync_timeline *obj =
157 container_of(pos, struct sync_timeline,
158 sync_timeline_list);
159
160 sync_print_obj(s, obj);
161 seq_puts(s, "\n");
162 }
Chris Wilsonf14ad422017-06-29 13:59:27 +0100163 spin_unlock_irq(&sync_timeline_list_lock);
Maarten Lankhorst0f0d8402014-07-01 12:57:31 +0200164
165 seq_puts(s, "fences:\n--------------\n");
166
Chris Wilsonf14ad422017-06-29 13:59:27 +0100167 spin_lock_irq(&sync_file_list_lock);
Gustavo Padovand7fdb0a2016-01-21 10:49:19 -0200168 list_for_each(pos, &sync_file_list_head) {
169 struct sync_file *sync_file =
170 container_of(pos, struct sync_file, sync_file_list);
Maarten Lankhorst0f0d8402014-07-01 12:57:31 +0200171
Gustavo Padovand7fdb0a2016-01-21 10:49:19 -0200172 sync_print_sync_file(s, sync_file);
Maarten Lankhorst0f0d8402014-07-01 12:57:31 +0200173 seq_puts(s, "\n");
174 }
Chris Wilsonf14ad422017-06-29 13:59:27 +0100175 spin_unlock_irq(&sync_file_list_lock);
Maarten Lankhorst0f0d8402014-07-01 12:57:31 +0200176 return 0;
177}
178
Gustavo Padovan8a004482016-01-21 10:49:17 -0200179static int sync_info_debugfs_open(struct inode *inode, struct file *file)
Maarten Lankhorst0f0d8402014-07-01 12:57:31 +0200180{
181 return single_open(file, sync_debugfs_show, inode->i_private);
182}
183
Gustavo Padovan8a004482016-01-21 10:49:17 -0200184static const struct file_operations sync_info_debugfs_fops = {
185 .open = sync_info_debugfs_open,
Maarten Lankhorst0f0d8402014-07-01 12:57:31 +0200186 .read = seq_read,
187 .llseek = seq_lseek,
188 .release = single_release,
189};
190
191static __init int sync_debugfs_init(void)
192{
Gustavo Padovan8a004482016-01-21 10:49:17 -0200193 dbgfs = debugfs_create_dir("sync", NULL);
194
Nicolai Stange0fd9da92016-05-27 20:03:54 +0200195 /*
196 * The debugfs files won't ever get removed and thus, there is
197 * no need to protect it against removal races. The use of
198 * debugfs_create_file_unsafe() is actually safe here.
199 */
200 debugfs_create_file_unsafe("info", 0444, dbgfs, NULL,
201 &sync_info_debugfs_fops);
202 debugfs_create_file_unsafe("sw_sync", 0644, dbgfs, NULL,
203 &sw_sync_debugfs_fops);
Gustavo Padovan8a004482016-01-21 10:49:17 -0200204
Maarten Lankhorst0f0d8402014-07-01 12:57:31 +0200205 return 0;
206}
207late_initcall(sync_debugfs_init);
208
209#define DUMP_CHUNK 256
210static char sync_dump_buf[64 * 1024];
211void sync_dump(void)
212{
213 struct seq_file s = {
214 .buf = sync_dump_buf,
215 .size = sizeof(sync_dump_buf) - 1,
216 };
217 int i;
218
219 sync_debugfs_show(&s, NULL);
220
221 for (i = 0; i < s.count; i += DUMP_CHUNK) {
222 if ((s.count - i) > DUMP_CHUNK) {
223 char c = s.buf[i + DUMP_CHUNK];
Peter Senna Tschudin95451352014-07-12 21:55:56 +0200224
Maarten Lankhorst0f0d8402014-07-01 12:57:31 +0200225 s.buf[i + DUMP_CHUNK] = 0;
226 pr_cont("%s", s.buf + i);
227 s.buf[i + DUMP_CHUNK] = c;
228 } else {
229 s.buf[s.count] = 0;
230 pr_cont("%s", s.buf + i);
231 }
232 }
233}