blob: b56a2b93c6d72397a57242401f12517e4ef8703d [file] [log] [blame]
Alex Eldere88afa52014-10-01 21:54:15 -05001/*
2 * Greybus operations
3 *
4 * Copyright 2014 Google Inc.
5 *
6 * Released under the GPLv2 only.
7 */
8
9#include <linux/kernel.h>
10#include <linux/slab.h>
11#include <linux/module.h>
12#include <linux/workqueue.h>
13
14#include "greybus.h"
15
16/*
Alex Elder22b320f2014-10-16 06:35:31 -050017 * The top bit of the type in an operation message header indicates
18 * whether the message is a request (bit clear) or response (bit set)
19 */
20#define GB_OPERATION_TYPE_RESPONSE 0x80
21
22/*
Alex Eldere88afa52014-10-01 21:54:15 -050023 * All operation messages (both requests and responses) begin with
24 * a common header that encodes the size of the data (header
25 * included). This header also contains a unique identifier, which
26 * is used to keep track of in-flight operations. Finally, the
27 * header contains a operation type field, whose interpretation is
28 * dependent on what type of device lies on the other end of the
29 * connection. Response messages are distinguished from request
30 * messages by setting the high bit (0x80) in the operation type
31 * value.
32 *
33 * The wire format for all numeric fields in the header is little
34 * endian. Any operation-specific data begins immediately after the
35 * header, and is 64-bit aligned.
36 */
37struct gb_operation_msg_hdr {
38 __le16 size; /* Size in bytes of header + payload */
39 __le16 id; /* Operation unique id */
40 __u8 type; /* E.g GB_I2C_TYPE_* or GB_GPIO_TYPE_* */
41 /* 3 bytes pad, must be zero (ignore when read) */
42} __aligned(sizeof(u64));
43
44/* XXX Could be per-host device, per-module, or even per-connection */
45static DEFINE_SPINLOCK(gb_operations_lock);
46
Alex Elder84d148b2014-10-16 06:35:32 -050047static void gb_operation_insert(struct gb_operation *operation)
48{
49 struct gb_connection *connection = operation->connection;
50 struct rb_root *root = &connection->pending;
51 struct rb_node *node = &operation->node;
52 struct rb_node **link = &root->rb_node;
53 struct rb_node *above = NULL;
54 struct gb_operation_msg_hdr *header;
55 __le16 wire_id;
56
57 /*
58 * Assign the operation's id, and store it in the header of
59 * both request and response message headers.
60 */
61 operation->id = gb_connection_operation_id(connection);
62 wire_id = cpu_to_le16(operation->id);
63 header = operation->request->transfer_buffer;
64 header->id = wire_id;
65
66 /* OK, insert the operation into its connection's tree */
67 spin_lock_irq(&gb_operations_lock);
68
69 while (*link) {
70 struct gb_operation *other;
71
72 above = *link;
73 other = rb_entry(above, struct gb_operation, node);
74 header = other->request->transfer_buffer;
75 if (other->id > operation->id)
76 link = &above->rb_left;
77 else if (other->id < operation->id)
78 link = &above->rb_right;
79 }
80 rb_link_node(node, above, link);
81 rb_insert_color(node, root);
82
83 spin_unlock_irq(&gb_operations_lock);
84}
85
86static void gb_operation_remove(struct gb_operation *operation)
87{
88 spin_lock_irq(&gb_operations_lock);
89 rb_erase(&operation->node, &operation->connection->pending);
90 spin_unlock_irq(&gb_operations_lock);
91}
92
93static struct gb_operation *
94gb_operation_find(struct gb_connection *connection, u16 id)
95{
96 struct gb_operation *operation;
97 struct rb_node *node;
98 bool found = false;
99
100 spin_lock_irq(&gb_operations_lock);
101 node = connection->pending.rb_node;
102 while (node && !found) {
103 operation = rb_entry(node, struct gb_operation, node);
104 if (operation->id > id)
105 node = node->rb_left;
106 else if (operation->id < id)
107 node = node->rb_right;
108 else
109 found = true;
110 }
111 spin_unlock_irq(&gb_operations_lock);
112
113 return found ? operation : NULL;
114}
115
Alex Eldere88afa52014-10-01 21:54:15 -0500116/*
117 * An operations's response message has arrived. If no callback was
118 * supplied it was submitted for asynchronous completion, so we notify
119 * any waiters. Otherwise we assume calling the completion is enough
120 * and nobody else will be waiting.
121 */
122void gb_operation_complete(struct gb_operation *operation)
123{
124 if (operation->callback)
125 operation->callback(operation);
126 else
127 complete_all(&operation->completion);
128}
129
130/*
131 * Wait for a submitted operatnoi to complete */
132int gb_operation_wait(struct gb_operation *operation)
133{
134 int ret;
135
136 ret = wait_for_completion_interruptible(&operation->completion);
137 /* If interrupted, cancel the in-flight buffer */
138 if (ret < 0)
Alex Elder22b320f2014-10-16 06:35:31 -0500139 ret = greybus_kill_gbuf(operation->request);
Alex Eldere88afa52014-10-01 21:54:15 -0500140 return ret;
141
142}
143
144/*
145 * Submit an outbound operation. The caller has filled in any
146 * payload so the request message is ready to go. If non-null,
147 * the callback function supplied will be called when the response
148 * message has arrived indicating the operation is complete. A null
149 * callback function is used for a synchronous request; return from
150 * this function won't occur until the operation is complete (or an
151 * interrupt occurs).
152 */
153int gb_operation_submit(struct gb_operation *operation,
154 gb_operation_callback callback)
155{
156 int ret;
157
Alex Elder22b320f2014-10-16 06:35:31 -0500158 /*
159 * XXX
160 * I think the order of operations is going to be
161 * significant, and if so, we may need a mutex to surround
162 * setting the operation id and submitting the gbuf.
Alex Eldere88afa52014-10-01 21:54:15 -0500163 */
164 operation->callback = callback;
Alex Elder84d148b2014-10-16 06:35:32 -0500165 gb_operation_insert(operation);
Alex Elder22b320f2014-10-16 06:35:31 -0500166 ret = greybus_submit_gbuf(operation->request, GFP_KERNEL);
Alex Eldere88afa52014-10-01 21:54:15 -0500167 if (ret)
168 return ret;
169 if (!callback)
170 ret = gb_operation_wait(operation);
171
172 return ret;
173}
174
175/*
Alex Elder22b320f2014-10-16 06:35:31 -0500176 * Called when an operation buffer completes.
Alex Eldere88afa52014-10-01 21:54:15 -0500177 */
Alex Elder22b320f2014-10-16 06:35:31 -0500178static void gb_operation_gbuf_complete(struct gbuf *gbuf)
Alex Eldere88afa52014-10-01 21:54:15 -0500179{
Alex Elder84d148b2014-10-16 06:35:32 -0500180 struct gb_operation *operation;
181 struct gb_operation_msg_hdr *header;
182 u16 id;
183
184 /*
185 * This isn't right, but it keeps things balanced until we
186 * can set up operation response handling.
187 */
188 header = gbuf->transfer_buffer;
189 id = le16_to_cpu(header->id);
190 operation = gb_operation_find(gbuf->connection, id);
191 if (operation)
192 gb_operation_remove(operation);
193 else
194 gb_connection_err(gbuf->connection, "operation not found");
Alex Eldere88afa52014-10-01 21:54:15 -0500195}
196
197/*
Alex Elder22b320f2014-10-16 06:35:31 -0500198 * Allocate a buffer to be used for an operation request or response
199 * message. Both types of message contain a header, which is filled
200 * in here. W
Alex Eldere88afa52014-10-01 21:54:15 -0500201 */
Alex Elder22b320f2014-10-16 06:35:31 -0500202struct gbuf *gb_operation_gbuf_create(struct gb_operation *operation,
203 u8 type, size_t size, bool outbound)
Alex Eldere88afa52014-10-01 21:54:15 -0500204{
Alex Elder22b320f2014-10-16 06:35:31 -0500205 struct gb_connection *connection = operation->connection;
Alex Eldere88afa52014-10-01 21:54:15 -0500206 struct gb_operation_msg_hdr *header;
207 struct gbuf *gbuf;
Alex Elder22b320f2014-10-16 06:35:31 -0500208 gfp_t gfp_flags = outbound ? GFP_KERNEL : GFP_ATOMIC;
Alex Eldere88afa52014-10-01 21:54:15 -0500209
Alex Elder22b320f2014-10-16 06:35:31 -0500210 /* Operation buffers hold a header in addition to their payload */
Alex Eldere88afa52014-10-01 21:54:15 -0500211 size += sizeof(*header);
Alex Elder22b320f2014-10-16 06:35:31 -0500212 gbuf = greybus_alloc_gbuf(connection, gb_operation_gbuf_complete,
213 size, outbound, gfp_flags, operation);
214 if (!gbuf)
Alex Eldere88afa52014-10-01 21:54:15 -0500215 return NULL;
Alex Eldere88afa52014-10-01 21:54:15 -0500216
Alex Elder22b320f2014-10-16 06:35:31 -0500217 /* Fill in the header structure */
218 header = (struct gb_operation_msg_hdr *)gbuf->transfer_buffer;
Greg Kroah-Hartman322543a2014-10-02 21:25:21 -0700219 header->size = cpu_to_le16(size);
Alex Elderb0b65752014-10-03 15:05:20 -0500220 header->id = 0; /* Filled in when submitted */
221 header->type = type;
Alex Elder22b320f2014-10-16 06:35:31 -0500222
223 return gbuf;
224}
225
226/*
227 * Create a Greybus operation to be sent over the given connection.
228 * The request buffer will big enough for a payload of the given
229 * size. Outgoing requests must specify the size of the response
230 * buffer size, which must be sufficient to hold all expected
231 * response data.
232 *
233 * Incoming requests will supply a response size of 0, and in that
234 * case no response buffer is allocated. (A response always
235 * includes a status byte, so 0 is not a valid size.) Whatever
236 * handles the operation request is responsible for allocating the
237 * response buffer.
238 *
239 * Returns a pointer to the new operation or a null pointer if an
240 * error occurs.
241 */
242struct gb_operation *gb_operation_create(struct gb_connection *connection,
243 u8 type, size_t request_size,
244 size_t response_size)
245{
246 struct gb_operation *operation;
247 gfp_t gfp_flags = response_size ? GFP_KERNEL : GFP_ATOMIC;
248
249 if (!request_size) {
250 gb_connection_err(connection, "zero-sized request");
251 return NULL;
252 }
253
254 /* XXX Use a slab cache */
255 operation = kzalloc(sizeof(*operation), gfp_flags);
256 if (!operation)
257 return NULL;
258 operation->connection = connection; /* XXX refcount? */
259
260 operation->request = gb_operation_gbuf_create(operation, type,
261 request_size, true);
262 if (!operation->request) {
263 kfree(operation);
264 return NULL;
265 }
266 operation->request_payload = operation->request->transfer_buffer +
267 sizeof(struct gb_operation_msg_hdr);
268 /* We always use the full request buffer */
269 operation->request->actual_length = request_size;
270
271 if (response_size) {
272 type |= GB_OPERATION_TYPE_RESPONSE;
273 operation->response = gb_operation_gbuf_create(operation,
274 type, response_size, false);
275 if (!operation->response) {
276 greybus_free_gbuf(operation->request);
277 kfree(operation);
278 return NULL;
279 }
280 operation->response_payload =
281 operation->response->transfer_buffer +
282 sizeof(struct gb_operation_msg_hdr);
283 }
Alex Eldere88afa52014-10-01 21:54:15 -0500284
285 operation->callback = NULL; /* set at submit time */
286 init_completion(&operation->completion);
287
288 spin_lock_irq(&gb_operations_lock);
289 list_add_tail(&operation->links, &connection->operations);
290 spin_unlock_irq(&gb_operations_lock);
291
292 return operation;
293}
294
295/*
296 * Destroy a previously created operation.
297 */
298void gb_operation_destroy(struct gb_operation *operation)
299{
300 if (WARN_ON(!operation))
301 return;
302
303 /* XXX Make sure it's not in flight */
304 spin_lock_irq(&gb_operations_lock);
305 list_del(&operation->links);
306 spin_unlock_irq(&gb_operations_lock);
307
Alex Elder22b320f2014-10-16 06:35:31 -0500308 greybus_free_gbuf(operation->response);
309 greybus_free_gbuf(operation->request);
Alex Eldere88afa52014-10-01 21:54:15 -0500310
311 kfree(operation);
312}