blob: d042f05b44f2a0a9a4a91deaf45e0b8554215e01 [file] [log] [blame]
Shrenuj Bansala419c792016-10-20 14:05:11 -07001/* Copyright (c) 2011-2017, The Linux Foundation. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13
14#include <linux/slab.h>
15#include <linux/list.h>
16#include <linux/workqueue.h>
17#include <linux/debugfs.h>
18#include <kgsl_device.h>
19
20#include "kgsl_debugfs.h"
21#include "kgsl_trace.h"
22
23/*
24 * Define an kmem cache for the event structures since we allocate and free them
25 * so frequently
26 */
27static struct kmem_cache *events_cache;
28static struct dentry *events_dentry;
29
30static inline void signal_event(struct kgsl_device *device,
31 struct kgsl_event *event, int result)
32{
33 list_del(&event->node);
34 event->result = result;
35 queue_work(device->events_wq, &event->work);
36}
37
38/**
39 * _kgsl_event_worker() - Work handler for processing GPU event callbacks
40 * @work: Pointer to the work_struct for the event
41 *
42 * Each event callback has its own work struct and is run on a event specific
43 * workqeuue. This is the worker that queues up the event callback function.
44 */
45static void _kgsl_event_worker(struct work_struct *work)
46{
47 struct kgsl_event *event = container_of(work, struct kgsl_event, work);
48 int id = KGSL_CONTEXT_ID(event->context);
49
50 trace_kgsl_fire_event(id, event->timestamp, event->result,
51 jiffies - event->created, event->func);
52
53 event->func(event->device, event->group, event->priv, event->result);
54
55 kgsl_context_put(event->context);
56 kmem_cache_free(events_cache, event);
57}
58
59/* return true if the group needs to be processed */
60static bool _do_process_group(unsigned int processed, unsigned int cur)
61{
62 if (processed == cur)
63 return false;
64
65 /*
66 * This ensures that the timestamp didn't slip back accidently, maybe
67 * due to a memory barrier issue. This is highly unlikely but we've
68 * been burned here in the past.
69 */
70 if ((cur < processed) && ((processed - cur) < KGSL_TIMESTAMP_WINDOW))
71 return false;
72
73 return true;
74}
75
76static void _process_event_group(struct kgsl_device *device,
77 struct kgsl_event_group *group, bool flush)
78{
79 struct kgsl_event *event, *tmp;
80 unsigned int timestamp;
81 struct kgsl_context *context;
82
83 if (group == NULL)
84 return;
85
86 context = group->context;
87
88 /*
89 * Sanity check to be sure that we we aren't racing with the context
90 * getting destroyed
91 */
92 if (context != NULL && !_kgsl_context_get(context)) {
93 WARN_ON(1);
94 return;
95 }
96
97 spin_lock(&group->lock);
98
99 group->readtimestamp(device, group->priv, KGSL_TIMESTAMP_RETIRED,
100 &timestamp);
101
102 if (!flush && _do_process_group(group->processed, timestamp) == false)
103 goto out;
104
105 list_for_each_entry_safe(event, tmp, &group->events, node) {
106 if (timestamp_cmp(event->timestamp, timestamp) <= 0)
107 signal_event(device, event, KGSL_EVENT_RETIRED);
108 else if (flush)
109 signal_event(device, event, KGSL_EVENT_CANCELLED);
110
111 }
112
113 group->processed = timestamp;
114
115out:
116 spin_unlock(&group->lock);
117 kgsl_context_put(context);
118}
119
120/**
121 * kgsl_process_event_group() - Handle all the retired events in a group
122 * @device: Pointer to a KGSL device
123 * @group: Pointer to a GPU events group to process
124 */
125
126void kgsl_process_event_group(struct kgsl_device *device,
127 struct kgsl_event_group *group)
128{
129 _process_event_group(device, group, false);
130}
131EXPORT_SYMBOL(kgsl_process_event_group);
132
133/**
134 * kgsl_flush_event_group() - flush all the events in a group by retiring the
135 * ones can be retired and cancelling the ones that are pending
136 * @device: Pointer to a KGSL device
137 * @group: Pointer to a GPU events group to process
138 */
139void kgsl_flush_event_group(struct kgsl_device *device,
140 struct kgsl_event_group *group)
141{
142 _process_event_group(device, group, true);
143}
144EXPORT_SYMBOL(kgsl_flush_event_group);
145
146/**
147 * kgsl_cancel_events_timestamp() - Cancel pending events for a given timestamp
148 * @device: Pointer to a KGSL device
149 * @group: Ponter to the GPU event group that owns the event
150 * @timestamp: Registered expiry timestamp for the event
151 */
152void kgsl_cancel_events_timestamp(struct kgsl_device *device,
153 struct kgsl_event_group *group, unsigned int timestamp)
154{
155 struct kgsl_event *event, *tmp;
156
157 spin_lock(&group->lock);
158
159 list_for_each_entry_safe(event, tmp, &group->events, node) {
160 if (timestamp_cmp(timestamp, event->timestamp) == 0)
161 signal_event(device, event, KGSL_EVENT_CANCELLED);
162 }
163
164 spin_unlock(&group->lock);
165}
166EXPORT_SYMBOL(kgsl_cancel_events_timestamp);
167
168/**
169 * kgsl_cancel_events() - Cancel all pending events in the group
170 * @device: Pointer to a KGSL device
171 * @group: Pointer to a kgsl_events_group
172 */
173void kgsl_cancel_events(struct kgsl_device *device,
174 struct kgsl_event_group *group)
175{
176 struct kgsl_event *event, *tmp;
177
178 spin_lock(&group->lock);
179
180 list_for_each_entry_safe(event, tmp, &group->events, node)
181 signal_event(device, event, KGSL_EVENT_CANCELLED);
182
183 spin_unlock(&group->lock);
184}
185EXPORT_SYMBOL(kgsl_cancel_events);
186
187/**
188 * kgsl_cancel_event() - Cancel a specific event from a group
189 * @device: Pointer to a KGSL device
190 * @group: Pointer to the group that contains the events
191 * @timestamp: Registered expiry timestamp for the event
192 * @func: Registered callback for the function
193 * @priv: Registered priv data for the function
194 */
195void kgsl_cancel_event(struct kgsl_device *device,
196 struct kgsl_event_group *group, unsigned int timestamp,
197 kgsl_event_func func, void *priv)
198{
199 struct kgsl_event *event, *tmp;
200
201 spin_lock(&group->lock);
202
203 list_for_each_entry_safe(event, tmp, &group->events, node) {
204 if (timestamp == event->timestamp && func == event->func &&
205 event->priv == priv)
206 signal_event(device, event, KGSL_EVENT_CANCELLED);
207 }
208
209 spin_unlock(&group->lock);
210}
211EXPORT_SYMBOL(kgsl_cancel_event);
212
213/**
214 * kgsl_event_pending() - Searches for an event in an event group
215 * @device: Pointer to a KGSL device
216 * @group: Pointer to the group that contains the events
217 * @timestamp: Registered expiry timestamp for the event
218 * @func: Registered callback for the function
219 * @priv: Registered priv data for the function
220 */
221bool kgsl_event_pending(struct kgsl_device *device,
222 struct kgsl_event_group *group,
223 unsigned int timestamp, kgsl_event_func func, void *priv)
224{
225 struct kgsl_event *event;
226 bool result = false;
227
228 spin_lock(&group->lock);
229 list_for_each_entry(event, &group->events, node) {
230 if (timestamp == event->timestamp && func == event->func &&
231 event->priv == priv) {
232 result = true;
233 break;
234 }
235 }
236 spin_unlock(&group->lock);
237 return result;
238}
239/**
240 * kgsl_add_event() - Add a new GPU event to a group
241 * @device: Pointer to a KGSL device
242 * @group: Pointer to the group to add the event to
243 * @timestamp: Timestamp that the event will expire on
244 * @func: Callback function for the event
245 * @priv: Private data to send to the callback function
246 */
247int kgsl_add_event(struct kgsl_device *device, struct kgsl_event_group *group,
248 unsigned int timestamp, kgsl_event_func func, void *priv)
249{
250 unsigned int queued;
251 struct kgsl_context *context = group->context;
252 struct kgsl_event *event;
253 unsigned int retired;
254
255 if (!func)
256 return -EINVAL;
257
258 /*
259 * If the caller is creating their own timestamps, let them schedule
260 * events in the future. Otherwise only allow timestamps that have been
261 * queued.
262 */
263 if (!context || !(context->flags & KGSL_CONTEXT_USER_GENERATED_TS)) {
264 group->readtimestamp(device, group->priv, KGSL_TIMESTAMP_QUEUED,
265 &queued);
266
267 if (timestamp_cmp(timestamp, queued) > 0)
268 return -EINVAL;
269 }
270
271 event = kmem_cache_alloc(events_cache, GFP_KERNEL);
272 if (event == NULL)
273 return -ENOMEM;
274
275 /* Get a reference to the context while the event is active */
276 if (context != NULL && !_kgsl_context_get(context)) {
277 kmem_cache_free(events_cache, event);
278 return -ENOENT;
279 }
280
281 event->device = device;
282 event->context = context;
283 event->timestamp = timestamp;
284 event->priv = priv;
285 event->func = func;
286 event->created = jiffies;
287 event->group = group;
288
289 INIT_WORK(&event->work, _kgsl_event_worker);
290
291 trace_kgsl_register_event(KGSL_CONTEXT_ID(context), timestamp, func);
292
293 spin_lock(&group->lock);
294
295 /*
296 * Check to see if the requested timestamp has already retired. If so,
297 * schedule the callback right away
298 */
299 group->readtimestamp(device, group->priv, KGSL_TIMESTAMP_RETIRED,
300 &retired);
301
302 if (timestamp_cmp(retired, timestamp) >= 0) {
303 event->result = KGSL_EVENT_RETIRED;
304 queue_work(device->events_wq, &event->work);
305 spin_unlock(&group->lock);
306 return 0;
307 }
308
309 /* Add the event to the group list */
310 list_add_tail(&event->node, &group->events);
311
312 spin_unlock(&group->lock);
313
314 return 0;
315}
316EXPORT_SYMBOL(kgsl_add_event);
317
318static DEFINE_RWLOCK(group_lock);
319static LIST_HEAD(group_list);
320
321void kgsl_process_event_groups(struct kgsl_device *device)
322{
323 struct kgsl_event_group *group;
324
325 read_lock(&group_lock);
326 list_for_each_entry(group, &group_list, group)
327 _process_event_group(device, group, false);
328 read_unlock(&group_lock);
329}
330EXPORT_SYMBOL(kgsl_process_event_groups);
331
332/**
333 * kgsl_del_event_group() - Remove a GPU event group
334 * @group: GPU event group to remove
335 */
336void kgsl_del_event_group(struct kgsl_event_group *group)
337{
338 /* Make sure that all the events have been deleted from the list */
339 BUG_ON(!list_empty(&group->events));
340
341 write_lock(&group_lock);
342 list_del(&group->group);
343 write_unlock(&group_lock);
344}
345EXPORT_SYMBOL(kgsl_del_event_group);
346
347/**
348 * kgsl_add_event_group() - Add a new GPU event group
349 * group: Pointer to the new group to add to the list
350 * context: Context that owns the group (or NULL for global)
351 * name: Name of the group
352 * readtimestamp: Function pointer to the readtimestamp function to call when
353 * processing events
354 * priv: Priv member to pass to the readtimestamp function
355 */
356void kgsl_add_event_group(struct kgsl_event_group *group,
357 struct kgsl_context *context, const char *name,
358 readtimestamp_func readtimestamp, void *priv)
359{
360 BUG_ON(readtimestamp == NULL);
361
362 spin_lock_init(&group->lock);
363 INIT_LIST_HEAD(&group->events);
364
365 group->context = context;
366 group->readtimestamp = readtimestamp;
367 group->priv = priv;
368
369 if (name)
370 strlcpy(group->name, name, sizeof(group->name));
371
372 write_lock(&group_lock);
373 list_add_tail(&group->group, &group_list);
374 write_unlock(&group_lock);
375}
376EXPORT_SYMBOL(kgsl_add_event_group);
377
378static void events_debugfs_print_group(struct seq_file *s,
379 struct kgsl_event_group *group)
380{
381 struct kgsl_event *event;
382 unsigned int retired;
383
384 spin_lock(&group->lock);
385
386 seq_printf(s, "%s: last=%d\n", group->name, group->processed);
387
388 list_for_each_entry(event, &group->events, node) {
389
390 group->readtimestamp(event->device, group->priv,
391 KGSL_TIMESTAMP_RETIRED, &retired);
392
393 seq_printf(s, "\t%d:%d age=%lu func=%ps [retired=%d]\n",
394 group->context ? group->context->id :
395 KGSL_MEMSTORE_GLOBAL,
396 event->timestamp, jiffies - event->created,
397 event->func, retired);
398 }
399 spin_unlock(&group->lock);
400}
401
402static int events_debugfs_print(struct seq_file *s, void *unused)
403{
404 struct kgsl_event_group *group;
405
406 seq_puts(s, "event groups:\n");
407 seq_puts(s, "--------------\n");
408
409 read_lock(&group_lock);
410 list_for_each_entry(group, &group_list, group) {
411 events_debugfs_print_group(s, group);
412 seq_puts(s, "\n");
413 }
414 read_unlock(&group_lock);
415
416 return 0;
417}
418
419static int events_debugfs_open(struct inode *inode, struct file *file)
420{
421 return single_open(file, events_debugfs_print, NULL);
422}
423
424static const struct file_operations events_fops = {
425 .open = events_debugfs_open,
426 .read = seq_read,
427 .llseek = seq_lseek,
428 .release = single_release,
429};
430
431/**
432 * kgsl_events_exit() - Destroy the event kmem cache on module exit
433 */
434void kgsl_events_exit(void)
435{
436 kmem_cache_destroy(events_cache);
437
438 debugfs_remove(events_dentry);
439}
440
441/**
442 * kgsl_events_init() - Create the event kmem cache on module start
443 */
444void __init kgsl_events_init(void)
445{
446 struct dentry *debugfs_dir = kgsl_get_debugfs_dir();
447
448 events_cache = KMEM_CACHE(kgsl_event, 0);
449
450 events_dentry = debugfs_create_file("events", 0444, debugfs_dir, NULL,
451 &events_fops);
452
453 /* Failure to create a debugfs entry is non fatal */
454 if (IS_ERR(events_dentry))
455 events_dentry = NULL;
456}