blob: 092ceb696a59bf6778f466699e8a9281821c1380 [file] [log] [blame]
Alex Eldere88afa52014-10-01 21:54:15 -05001/*
2 * Greybus operations
3 *
4 * Copyright 2014 Google Inc.
5 *
6 * Released under the GPLv2 only.
7 */
8
9#include <linux/kernel.h>
10#include <linux/slab.h>
11#include <linux/module.h>
12#include <linux/workqueue.h>
13
14#include "greybus.h"
15
16/*
Alex Elder22b320f2014-10-16 06:35:31 -050017 * The top bit of the type in an operation message header indicates
18 * whether the message is a request (bit clear) or response (bit set)
19 */
20#define GB_OPERATION_TYPE_RESPONSE 0x80
21
22/*
Alex Elderd90c25b2014-10-16 06:35:33 -050023 * XXX This needs to be coordinated with host driver parameters
24 */
25#define GB_OPERATION_MESSAGE_SIZE_MAX 4096
26
27/*
Alex Eldere88afa52014-10-01 21:54:15 -050028 * All operation messages (both requests and responses) begin with
29 * a common header that encodes the size of the data (header
30 * included). This header also contains a unique identifier, which
31 * is used to keep track of in-flight operations. Finally, the
32 * header contains a operation type field, whose interpretation is
33 * dependent on what type of device lies on the other end of the
34 * connection. Response messages are distinguished from request
35 * messages by setting the high bit (0x80) in the operation type
36 * value.
37 *
38 * The wire format for all numeric fields in the header is little
39 * endian. Any operation-specific data begins immediately after the
40 * header, and is 64-bit aligned.
41 */
42struct gb_operation_msg_hdr {
43 __le16 size; /* Size in bytes of header + payload */
44 __le16 id; /* Operation unique id */
45 __u8 type; /* E.g GB_I2C_TYPE_* or GB_GPIO_TYPE_* */
46 /* 3 bytes pad, must be zero (ignore when read) */
47} __aligned(sizeof(u64));
48
49/* XXX Could be per-host device, per-module, or even per-connection */
50static DEFINE_SPINLOCK(gb_operations_lock);
51
Alex Elder84d148b2014-10-16 06:35:32 -050052static void gb_operation_insert(struct gb_operation *operation)
53{
54 struct gb_connection *connection = operation->connection;
55 struct rb_root *root = &connection->pending;
56 struct rb_node *node = &operation->node;
57 struct rb_node **link = &root->rb_node;
58 struct rb_node *above = NULL;
59 struct gb_operation_msg_hdr *header;
60 __le16 wire_id;
61
62 /*
63 * Assign the operation's id, and store it in the header of
64 * both request and response message headers.
65 */
66 operation->id = gb_connection_operation_id(connection);
67 wire_id = cpu_to_le16(operation->id);
68 header = operation->request->transfer_buffer;
69 header->id = wire_id;
70
71 /* OK, insert the operation into its connection's tree */
72 spin_lock_irq(&gb_operations_lock);
73
74 while (*link) {
75 struct gb_operation *other;
76
77 above = *link;
78 other = rb_entry(above, struct gb_operation, node);
79 header = other->request->transfer_buffer;
80 if (other->id > operation->id)
81 link = &above->rb_left;
82 else if (other->id < operation->id)
83 link = &above->rb_right;
84 }
85 rb_link_node(node, above, link);
86 rb_insert_color(node, root);
87
88 spin_unlock_irq(&gb_operations_lock);
89}
90
91static void gb_operation_remove(struct gb_operation *operation)
92{
93 spin_lock_irq(&gb_operations_lock);
94 rb_erase(&operation->node, &operation->connection->pending);
95 spin_unlock_irq(&gb_operations_lock);
96}
97
98static struct gb_operation *
99gb_operation_find(struct gb_connection *connection, u16 id)
100{
Alex Elderd90c25b2014-10-16 06:35:33 -0500101 struct gb_operation *operation = NULL;
Alex Elder84d148b2014-10-16 06:35:32 -0500102 struct rb_node *node;
103 bool found = false;
104
105 spin_lock_irq(&gb_operations_lock);
106 node = connection->pending.rb_node;
107 while (node && !found) {
108 operation = rb_entry(node, struct gb_operation, node);
109 if (operation->id > id)
110 node = node->rb_left;
111 else if (operation->id < id)
112 node = node->rb_right;
113 else
114 found = true;
115 }
116 spin_unlock_irq(&gb_operations_lock);
117
118 return found ? operation : NULL;
119}
120
Alex Eldere88afa52014-10-01 21:54:15 -0500121/*
122 * An operations's response message has arrived. If no callback was
123 * supplied it was submitted for asynchronous completion, so we notify
124 * any waiters. Otherwise we assume calling the completion is enough
125 * and nobody else will be waiting.
126 */
127void gb_operation_complete(struct gb_operation *operation)
128{
Alex Elderd90c25b2014-10-16 06:35:33 -0500129 /* XXX Should probably report bad status if no callback */
Alex Eldere88afa52014-10-01 21:54:15 -0500130 if (operation->callback)
131 operation->callback(operation);
132 else
133 complete_all(&operation->completion);
Alex Elderd90c25b2014-10-16 06:35:33 -0500134 gb_operation_destroy(operation);
Alex Eldere88afa52014-10-01 21:54:15 -0500135}
136
137/*
138 * Wait for a submitted operatnoi to complete */
139int gb_operation_wait(struct gb_operation *operation)
140{
141 int ret;
142
143 ret = wait_for_completion_interruptible(&operation->completion);
144 /* If interrupted, cancel the in-flight buffer */
145 if (ret < 0)
Alex Elder22b320f2014-10-16 06:35:31 -0500146 ret = greybus_kill_gbuf(operation->request);
Alex Eldere88afa52014-10-01 21:54:15 -0500147 return ret;
148
149}
150
151/*
Alex Elder22b320f2014-10-16 06:35:31 -0500152 * Called when an operation buffer completes.
Alex Eldere88afa52014-10-01 21:54:15 -0500153 */
Alex Elder22b320f2014-10-16 06:35:31 -0500154static void gb_operation_gbuf_complete(struct gbuf *gbuf)
Alex Eldere88afa52014-10-01 21:54:15 -0500155{
Alex Elderd90c25b2014-10-16 06:35:33 -0500156 /* Don't do anything */
Alex Elder84d148b2014-10-16 06:35:32 -0500157 struct gb_operation *operation;
158 struct gb_operation_msg_hdr *header;
159 u16 id;
160
161 /*
162 * This isn't right, but it keeps things balanced until we
163 * can set up operation response handling.
164 */
165 header = gbuf->transfer_buffer;
166 id = le16_to_cpu(header->id);
167 operation = gb_operation_find(gbuf->connection, id);
168 if (operation)
169 gb_operation_remove(operation);
170 else
171 gb_connection_err(gbuf->connection, "operation not found");
Alex Eldere88afa52014-10-01 21:54:15 -0500172}
173
174/*
Alex Elder22b320f2014-10-16 06:35:31 -0500175 * Allocate a buffer to be used for an operation request or response
176 * message. Both types of message contain a header, which is filled
177 * in here. W
Alex Eldere88afa52014-10-01 21:54:15 -0500178 */
Alex Elder22b320f2014-10-16 06:35:31 -0500179struct gbuf *gb_operation_gbuf_create(struct gb_operation *operation,
180 u8 type, size_t size, bool outbound)
Alex Eldere88afa52014-10-01 21:54:15 -0500181{
Alex Elder22b320f2014-10-16 06:35:31 -0500182 struct gb_connection *connection = operation->connection;
Alex Eldere88afa52014-10-01 21:54:15 -0500183 struct gb_operation_msg_hdr *header;
184 struct gbuf *gbuf;
Alex Elder22b320f2014-10-16 06:35:31 -0500185 gfp_t gfp_flags = outbound ? GFP_KERNEL : GFP_ATOMIC;
Alex Eldere88afa52014-10-01 21:54:15 -0500186
Alex Elder22b320f2014-10-16 06:35:31 -0500187 /* Operation buffers hold a header in addition to their payload */
Alex Eldere88afa52014-10-01 21:54:15 -0500188 size += sizeof(*header);
Alex Elder22b320f2014-10-16 06:35:31 -0500189 gbuf = greybus_alloc_gbuf(connection, gb_operation_gbuf_complete,
190 size, outbound, gfp_flags, operation);
191 if (!gbuf)
Alex Eldere88afa52014-10-01 21:54:15 -0500192 return NULL;
Alex Eldere88afa52014-10-01 21:54:15 -0500193
Alex Elder22b320f2014-10-16 06:35:31 -0500194 /* Fill in the header structure */
195 header = (struct gb_operation_msg_hdr *)gbuf->transfer_buffer;
Greg Kroah-Hartman322543a2014-10-02 21:25:21 -0700196 header->size = cpu_to_le16(size);
Alex Elderb0b65752014-10-03 15:05:20 -0500197 header->id = 0; /* Filled in when submitted */
198 header->type = type;
Alex Elder22b320f2014-10-16 06:35:31 -0500199
200 return gbuf;
201}
202
203/*
204 * Create a Greybus operation to be sent over the given connection.
205 * The request buffer will big enough for a payload of the given
206 * size. Outgoing requests must specify the size of the response
207 * buffer size, which must be sufficient to hold all expected
208 * response data.
209 *
210 * Incoming requests will supply a response size of 0, and in that
211 * case no response buffer is allocated. (A response always
212 * includes a status byte, so 0 is not a valid size.) Whatever
213 * handles the operation request is responsible for allocating the
214 * response buffer.
215 *
216 * Returns a pointer to the new operation or a null pointer if an
217 * error occurs.
218 */
219struct gb_operation *gb_operation_create(struct gb_connection *connection,
220 u8 type, size_t request_size,
221 size_t response_size)
222{
223 struct gb_operation *operation;
224 gfp_t gfp_flags = response_size ? GFP_KERNEL : GFP_ATOMIC;
225
226 if (!request_size) {
227 gb_connection_err(connection, "zero-sized request");
228 return NULL;
229 }
230
231 /* XXX Use a slab cache */
232 operation = kzalloc(sizeof(*operation), gfp_flags);
233 if (!operation)
234 return NULL;
235 operation->connection = connection; /* XXX refcount? */
236
237 operation->request = gb_operation_gbuf_create(operation, type,
238 request_size, true);
239 if (!operation->request) {
240 kfree(operation);
241 return NULL;
242 }
243 operation->request_payload = operation->request->transfer_buffer +
244 sizeof(struct gb_operation_msg_hdr);
245 /* We always use the full request buffer */
246 operation->request->actual_length = request_size;
247
248 if (response_size) {
249 type |= GB_OPERATION_TYPE_RESPONSE;
250 operation->response = gb_operation_gbuf_create(operation,
251 type, response_size, false);
252 if (!operation->response) {
253 greybus_free_gbuf(operation->request);
254 kfree(operation);
255 return NULL;
256 }
257 operation->response_payload =
258 operation->response->transfer_buffer +
259 sizeof(struct gb_operation_msg_hdr);
260 }
Alex Eldere88afa52014-10-01 21:54:15 -0500261
262 operation->callback = NULL; /* set at submit time */
263 init_completion(&operation->completion);
264
265 spin_lock_irq(&gb_operations_lock);
266 list_add_tail(&operation->links, &connection->operations);
267 spin_unlock_irq(&gb_operations_lock);
268
269 return operation;
270}
271
272/*
273 * Destroy a previously created operation.
274 */
275void gb_operation_destroy(struct gb_operation *operation)
276{
277 if (WARN_ON(!operation))
278 return;
279
280 /* XXX Make sure it's not in flight */
281 spin_lock_irq(&gb_operations_lock);
282 list_del(&operation->links);
283 spin_unlock_irq(&gb_operations_lock);
284
Alex Elder22b320f2014-10-16 06:35:31 -0500285 greybus_free_gbuf(operation->response);
286 greybus_free_gbuf(operation->request);
Alex Eldere88afa52014-10-01 21:54:15 -0500287
288 kfree(operation);
289}
Alex Elderd90c25b2014-10-16 06:35:33 -0500290
291/*
292 * Send an operation request message. The caller has filled in
293 * any payload so the request message is ready to go. If non-null,
294 * the callback function supplied will be called when the response
295 * message has arrived indicating the operation is complete. A null
296 * callback function is used for a synchronous request; return from
297 * this function won't occur until the operation is complete (or an
298 * interrupt occurs).
299 */
300int gb_operation_request_send(struct gb_operation *operation,
301 gb_operation_callback callback)
302{
303 int ret;
304
305 /*
306 * XXX
307 * I think the order of operations is going to be
308 * significant, and if so, we may need a mutex to surround
309 * setting the operation id and submitting the gbuf.
310 */
311 operation->callback = callback;
312 gb_operation_insert(operation);
313 ret = greybus_submit_gbuf(operation->request, GFP_KERNEL);
314 if (ret)
315 return ret;
316 if (!callback)
317 ret = gb_operation_wait(operation);
318
319 return ret;
320}
321
322/*
323 * Send a response for an incoming operation request.
324 */
325int gb_operation_response_send(struct gb_operation *operation)
326{
327 /* XXX
328 * Caller needs to have set operation->response->actual_length
329 */
330 gb_operation_remove(operation);
331 gb_operation_destroy(operation);
332
333 return 0;
334}
335
336void gb_connection_operation_recv(struct gb_connection *connection,
337 void *data, size_t size)
338{
339 struct gb_operation_msg_hdr *header;
340 struct gb_operation *operation;
341 struct gbuf *gbuf;
342 u16 msg_size;
343
344 if (size > GB_OPERATION_MESSAGE_SIZE_MAX) {
345 gb_connection_err(connection, "message too big");
346 return;
347 }
348
349 header = data;
350 msg_size = le16_to_cpu(header->size);
351 if (header->type & GB_OPERATION_TYPE_RESPONSE) {
352 u16 id = le16_to_cpu(header->id);
353
354 operation = gb_operation_find(connection, id);
355 if (!operation) {
356 gb_connection_err(connection, "operation not found");
357 return;
358 }
359 gb_operation_remove(operation);
360 gbuf = operation->response;
361 if (size > gbuf->transfer_buffer_length) {
362 gb_connection_err(connection, "recv buffer too small");
363 return;
364 }
365 } else {
366 WARN_ON(msg_size != size);
367 operation = gb_operation_create(connection, header->type,
368 msg_size, 0);
369 if (!operation) {
370 gb_connection_err(connection, "can't create operation");
371 return;
372 }
373 gbuf = operation->request;
374 }
375
376 memcpy(gbuf->transfer_buffer, data, msg_size);
377 gbuf->actual_length = msg_size;
378
379 /* XXX And now we let a work queue handle the rest */
380}