blob: f1989fcaf3540bc037fa8330dda396375c37194d [file] [log] [blame]
Gustavo Padovanb3dfbdf2016-06-01 15:10:03 +02001/*
2 * fence-array: aggregate fences to be waited together
3 *
4 * Copyright (C) 2016 Collabora Ltd
5 * Copyright (C) 2016 Advanced Micro Devices, Inc.
6 * Authors:
7 * Gustavo Padovan <gustavo@padovan.org>
8 * Christian König <christian.koenig@amd.com>
9 *
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License version 2 as published by
12 * the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but WITHOUT
15 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
17 * more details.
18 */
19
20#include <linux/export.h>
21#include <linux/slab.h>
22#include <linux/fence-array.h>
23
24static void fence_array_cb_func(struct fence *f, struct fence_cb *cb);
25
26static const char *fence_array_get_driver_name(struct fence *fence)
27{
28 return "fence_array";
29}
30
31static const char *fence_array_get_timeline_name(struct fence *fence)
32{
33 return "unbound";
34}
35
36static void fence_array_cb_func(struct fence *f, struct fence_cb *cb)
37{
38 struct fence_array_cb *array_cb =
39 container_of(cb, struct fence_array_cb, cb);
40 struct fence_array *array = array_cb->array;
41
42 if (atomic_dec_and_test(&array->num_pending))
43 fence_signal(&array->base);
Christian Königf7104562016-06-01 15:10:04 +020044 fence_put(&array->base);
Gustavo Padovanb3dfbdf2016-06-01 15:10:03 +020045}
46
47static bool fence_array_enable_signaling(struct fence *fence)
48{
49 struct fence_array *array = to_fence_array(fence);
50 struct fence_array_cb *cb = (void *)(&array[1]);
51 unsigned i;
52
53 for (i = 0; i < array->num_fences; ++i) {
54 cb[i].array = array;
Christian Königf7104562016-06-01 15:10:04 +020055 /*
56 * As we may report that the fence is signaled before all
57 * callbacks are complete, we need to take an additional
58 * reference count on the array so that we do not free it too
59 * early. The core fence handling will only hold the reference
60 * until we signal the array as complete (but that is now
61 * insufficient).
62 */
63 fence_get(&array->base);
Gustavo Padovanb3dfbdf2016-06-01 15:10:03 +020064 if (fence_add_callback(array->fences[i], &cb[i].cb,
Christian Königf7104562016-06-01 15:10:04 +020065 fence_array_cb_func)) {
66 fence_put(&array->base);
Gustavo Padovanb3dfbdf2016-06-01 15:10:03 +020067 if (atomic_dec_and_test(&array->num_pending))
68 return false;
Christian Königf7104562016-06-01 15:10:04 +020069 }
Gustavo Padovanb3dfbdf2016-06-01 15:10:03 +020070 }
71
72 return true;
73}
74
75static bool fence_array_signaled(struct fence *fence)
76{
77 struct fence_array *array = to_fence_array(fence);
78
Christian Königf7104562016-06-01 15:10:04 +020079 return atomic_read(&array->num_pending) <= 0;
Gustavo Padovanb3dfbdf2016-06-01 15:10:03 +020080}
81
82static void fence_array_release(struct fence *fence)
83{
84 struct fence_array *array = to_fence_array(fence);
85 unsigned i;
86
87 for (i = 0; i < array->num_fences; ++i)
88 fence_put(array->fences[i]);
89
90 kfree(array->fences);
91 fence_free(fence);
92}
93
94const struct fence_ops fence_array_ops = {
95 .get_driver_name = fence_array_get_driver_name,
96 .get_timeline_name = fence_array_get_timeline_name,
97 .enable_signaling = fence_array_enable_signaling,
98 .signaled = fence_array_signaled,
99 .wait = fence_default_wait,
100 .release = fence_array_release,
101};
Gustavo Padovane1aaf312016-08-05 10:39:34 -0300102EXPORT_SYMBOL(fence_array_ops);
Gustavo Padovanb3dfbdf2016-06-01 15:10:03 +0200103
104/**
105 * fence_array_create - Create a custom fence array
Christian Königf7104562016-06-01 15:10:04 +0200106 * @num_fences: [in] number of fences to add in the array
107 * @fences: [in] array containing the fences
108 * @context: [in] fence context to use
109 * @seqno: [in] sequence number to use
Randy Dunlap68acb6a2016-08-16 16:31:00 -0700110 * @signal_on_any: [in] signal on any fence in the array
Gustavo Padovanb3dfbdf2016-06-01 15:10:03 +0200111 *
112 * Allocate a fence_array object and initialize the base fence with fence_init().
113 * In case of error it returns NULL.
114 *
Randy Dunlap68acb6a2016-08-16 16:31:00 -0700115 * The caller should allocate the fences array with num_fences size
Gustavo Padovanb3dfbdf2016-06-01 15:10:03 +0200116 * and fill it with the fences it wants to add to the object. Ownership of this
Randy Dunlap68acb6a2016-08-16 16:31:00 -0700117 * array is taken and fence_put() is used on each fence on release.
Christian Königf7104562016-06-01 15:10:04 +0200118 *
119 * If @signal_on_any is true the fence array signals if any fence in the array
120 * signals, otherwise it signals when all fences in the array signal.
Gustavo Padovanb3dfbdf2016-06-01 15:10:03 +0200121 */
122struct fence_array *fence_array_create(int num_fences, struct fence **fences,
Christian Königf7104562016-06-01 15:10:04 +0200123 u64 context, unsigned seqno,
124 bool signal_on_any)
Gustavo Padovanb3dfbdf2016-06-01 15:10:03 +0200125{
126 struct fence_array *array;
127 size_t size = sizeof(*array);
128
129 /* Allocate the callback structures behind the array. */
130 size += num_fences * sizeof(struct fence_array_cb);
131 array = kzalloc(size, GFP_KERNEL);
132 if (!array)
133 return NULL;
134
135 spin_lock_init(&array->lock);
136 fence_init(&array->base, &fence_array_ops, &array->lock,
137 context, seqno);
138
139 array->num_fences = num_fences;
Christian Königf7104562016-06-01 15:10:04 +0200140 atomic_set(&array->num_pending, signal_on_any ? 1 : num_fences);
Gustavo Padovanb3dfbdf2016-06-01 15:10:03 +0200141 array->fences = fences;
142
143 return array;
144}
145EXPORT_SYMBOL(fence_array_create);