blob: da455f019e3392f70c54c745ecb9919067d6bbbf [file] [log] [blame]
Alex Eldere88afa52014-10-01 21:54:15 -05001/*
2 * Greybus operations
3 *
4 * Copyright 2014 Google Inc.
5 *
6 * Released under the GPLv2 only.
7 */
8
9#include <linux/kernel.h>
10#include <linux/slab.h>
11#include <linux/module.h>
12#include <linux/workqueue.h>
13
14#include "greybus.h"
15
16/*
Alex Elder22b320f2014-10-16 06:35:31 -050017 * The top bit of the type in an operation message header indicates
18 * whether the message is a request (bit clear) or response (bit set)
19 */
20#define GB_OPERATION_TYPE_RESPONSE 0x80
21
22/*
Alex Elderd90c25b2014-10-16 06:35:33 -050023 * XXX This needs to be coordinated with host driver parameters
24 */
25#define GB_OPERATION_MESSAGE_SIZE_MAX 4096
26
Alex Elder5b3db0d2014-10-20 10:27:56 -050027static struct kmem_cache *gb_operation_cache;
28
Alex Elder2eb585f2014-10-16 06:35:34 -050029/* Workqueue to handle Greybus operation completions. */
30static struct workqueue_struct *gb_operation_recv_workqueue;
31
Alex Elderd90c25b2014-10-16 06:35:33 -050032/*
Alex Eldere88afa52014-10-01 21:54:15 -050033 * All operation messages (both requests and responses) begin with
34 * a common header that encodes the size of the data (header
35 * included). This header also contains a unique identifier, which
36 * is used to keep track of in-flight operations. Finally, the
37 * header contains a operation type field, whose interpretation is
38 * dependent on what type of device lies on the other end of the
39 * connection. Response messages are distinguished from request
40 * messages by setting the high bit (0x80) in the operation type
41 * value.
42 *
43 * The wire format for all numeric fields in the header is little
44 * endian. Any operation-specific data begins immediately after the
45 * header, and is 64-bit aligned.
46 */
47struct gb_operation_msg_hdr {
48 __le16 size; /* Size in bytes of header + payload */
49 __le16 id; /* Operation unique id */
50 __u8 type; /* E.g GB_I2C_TYPE_* or GB_GPIO_TYPE_* */
51 /* 3 bytes pad, must be zero (ignore when read) */
52} __aligned(sizeof(u64));
53
54/* XXX Could be per-host device, per-module, or even per-connection */
55static DEFINE_SPINLOCK(gb_operations_lock);
56
Alex Elder84d148b2014-10-16 06:35:32 -050057static void gb_operation_insert(struct gb_operation *operation)
58{
59 struct gb_connection *connection = operation->connection;
60 struct rb_root *root = &connection->pending;
61 struct rb_node *node = &operation->node;
62 struct rb_node **link = &root->rb_node;
63 struct rb_node *above = NULL;
64 struct gb_operation_msg_hdr *header;
65 __le16 wire_id;
66
67 /*
68 * Assign the operation's id, and store it in the header of
69 * both request and response message headers.
70 */
71 operation->id = gb_connection_operation_id(connection);
72 wire_id = cpu_to_le16(operation->id);
73 header = operation->request->transfer_buffer;
74 header->id = wire_id;
75
76 /* OK, insert the operation into its connection's tree */
77 spin_lock_irq(&gb_operations_lock);
78
79 while (*link) {
80 struct gb_operation *other;
81
82 above = *link;
83 other = rb_entry(above, struct gb_operation, node);
84 header = other->request->transfer_buffer;
85 if (other->id > operation->id)
86 link = &above->rb_left;
87 else if (other->id < operation->id)
88 link = &above->rb_right;
89 }
90 rb_link_node(node, above, link);
91 rb_insert_color(node, root);
92
93 spin_unlock_irq(&gb_operations_lock);
94}
95
96static void gb_operation_remove(struct gb_operation *operation)
97{
98 spin_lock_irq(&gb_operations_lock);
99 rb_erase(&operation->node, &operation->connection->pending);
100 spin_unlock_irq(&gb_operations_lock);
101}
102
103static struct gb_operation *
104gb_operation_find(struct gb_connection *connection, u16 id)
105{
Alex Elderd90c25b2014-10-16 06:35:33 -0500106 struct gb_operation *operation = NULL;
Alex Elder84d148b2014-10-16 06:35:32 -0500107 struct rb_node *node;
108 bool found = false;
109
110 spin_lock_irq(&gb_operations_lock);
111 node = connection->pending.rb_node;
112 while (node && !found) {
113 operation = rb_entry(node, struct gb_operation, node);
114 if (operation->id > id)
115 node = node->rb_left;
116 else if (operation->id < id)
117 node = node->rb_right;
118 else
119 found = true;
120 }
121 spin_unlock_irq(&gb_operations_lock);
122
123 return found ? operation : NULL;
124}
125
Alex Eldere88afa52014-10-01 21:54:15 -0500126/*
127 * An operations's response message has arrived. If no callback was
128 * supplied it was submitted for asynchronous completion, so we notify
129 * any waiters. Otherwise we assume calling the completion is enough
130 * and nobody else will be waiting.
131 */
132void gb_operation_complete(struct gb_operation *operation)
133{
134 if (operation->callback)
135 operation->callback(operation);
136 else
137 complete_all(&operation->completion);
138}
139
Alex Elder2eb585f2014-10-16 06:35:34 -0500140/* Wait for a submitted operation to complete */
Alex Eldere88afa52014-10-01 21:54:15 -0500141int gb_operation_wait(struct gb_operation *operation)
142{
143 int ret;
144
145 ret = wait_for_completion_interruptible(&operation->completion);
146 /* If interrupted, cancel the in-flight buffer */
147 if (ret < 0)
Alex Elder22b320f2014-10-16 06:35:31 -0500148 ret = greybus_kill_gbuf(operation->request);
Alex Eldere88afa52014-10-01 21:54:15 -0500149 return ret;
150
151}
152
Alex Eldered8800d2014-10-16 06:35:38 -0500153/*
154 * This handler is used if no operation response messages are ever
155 * expected for a given protocol.
156 */
157static void gb_operation_recv_none(struct gb_operation *operation)
158{
159 /* Nothing to do! */
160}
Alex Elder2eb585f2014-10-16 06:35:34 -0500161
162typedef void (*gb_operation_recv_handler)(struct gb_operation *operation);
163static gb_operation_recv_handler gb_operation_recv_handlers[] = {
164 [GREYBUS_PROTOCOL_CONTROL] = NULL,
165 [GREYBUS_PROTOCOL_AP] = NULL,
166 [GREYBUS_PROTOCOL_GPIO] = NULL,
Alex Eldered8800d2014-10-16 06:35:38 -0500167 [GREYBUS_PROTOCOL_I2C] = gb_operation_recv_none,
Alex Elder2eb585f2014-10-16 06:35:34 -0500168 [GREYBUS_PROTOCOL_UART] = NULL,
169 [GREYBUS_PROTOCOL_HID] = NULL,
Greg Kroah-Hartman2bb7eae2014-10-20 15:24:57 +0800170 [GREYBUS_PROTOCOL_BATTERY] = gb_operation_recv_none,
Greg Kroah-Hartman42d4a222014-10-20 16:02:56 +0800171 [GREYBUS_PROTOCOL_LED] = NULL,
Alex Elder2eb585f2014-10-16 06:35:34 -0500172 [GREYBUS_PROTOCOL_VENDOR] = NULL,
173};
174
175static void gb_operation_request_handle(struct gb_operation *operation)
176{
177 u8 protocol = operation->connection->protocol;
178
179 /* Subtract one from array size to stay within u8 range */
180 if (protocol <= (u8)(ARRAY_SIZE(gb_operation_recv_handlers) - 1)) {
181 gb_operation_recv_handler handler;
182
183 handler = gb_operation_recv_handlers[protocol];
184 if (handler) {
185 handler(operation); /* Handle the request */
186 return;
187 }
188 }
189
190 gb_connection_err(operation->connection, "unrecognized protocol %u\n",
191 (unsigned int)protocol);
192 operation->result = GB_OP_PROTOCOL_BAD;
193 gb_operation_complete(operation);
194}
195
Alex Eldere88afa52014-10-01 21:54:15 -0500196/*
Alex Elder2eb585f2014-10-16 06:35:34 -0500197 * Either this operation contains an incoming request, or its
198 * response has arrived. An incoming request will have a null
199 * response buffer pointer (it is the responsibility of the request
200 * handler to allocate and fill in the response buffer).
201 */
202static void gb_operation_recv_work(struct work_struct *recv_work)
203{
204 struct gb_operation *operation;
205 bool incoming_request;
206
207 operation = container_of(recv_work, struct gb_operation, recv_work);
208 incoming_request = operation->response == NULL;
209 if (incoming_request)
210 gb_operation_request_handle(operation);
211 gb_operation_complete(operation);
212
213 /* We're finished with the buffer we read into */
214 if (incoming_request)
215 greybus_gbuf_finished(operation->request);
216 else
217 greybus_gbuf_finished(operation->response);
218}
219
220/*
221 * Buffer completion function. We get notified whenever any buffer
222 * completes. For outbound messages, this tells us that the message
223 * has been sent. For inbound messages, it means the data has
224 * landed in the buffer and is ready to be processed.
225 *
226 * Either way, we don't do anything. We don't really care when an
227 * outbound message has been sent, and for incoming messages we
228 * we'll be done with everything we need to do before we mark it
229 * finished.
230 *
Alex Elderf012a522014-10-17 21:03:49 -0500231 * XXX We may want to record that a request is (or is no longer) in flight.
Alex Eldere88afa52014-10-01 21:54:15 -0500232 */
Alex Elder22b320f2014-10-16 06:35:31 -0500233static void gb_operation_gbuf_complete(struct gbuf *gbuf)
Alex Eldere88afa52014-10-01 21:54:15 -0500234{
Alex Elderf012a522014-10-17 21:03:49 -0500235 if (gbuf->status) {
236 struct gb_operation *operation = gbuf->context;
237 struct gb_operation_msg_hdr *header;
238 int id;
239 int type;
240
241 if (gbuf == operation->request)
242 header = operation->request_payload;
243 else if (gbuf == operation->response)
244 header = operation->response_payload;
245 else
246 header = NULL;
247 id = header ? (int)header->id : -1;
248 type = header ? (int)header->type : -1;
249
250 gb_connection_err(operation->connection,
251 "operation %d type %d gbuf error %d",
252 id, type, gbuf->status);
253 }
Alex Elder2eb585f2014-10-16 06:35:34 -0500254 return;
Alex Eldere88afa52014-10-01 21:54:15 -0500255}
256
257/*
Alex Elder22b320f2014-10-16 06:35:31 -0500258 * Allocate a buffer to be used for an operation request or response
Alex Elder2eb585f2014-10-16 06:35:34 -0500259 * message. For outgoing messages, both types of message contain a
260 * common header, which is filled in here. Incoming requests or
261 * responses also contain the same header, but there's no need to
262 * initialize it here (it'll be overwritten by the incoming
263 * message).
Alex Eldere88afa52014-10-01 21:54:15 -0500264 */
Alex Elder22b320f2014-10-16 06:35:31 -0500265struct gbuf *gb_operation_gbuf_create(struct gb_operation *operation,
Alex Elder2eb585f2014-10-16 06:35:34 -0500266 u8 type, size_t size, bool data_out)
Alex Eldere88afa52014-10-01 21:54:15 -0500267{
Alex Elder22b320f2014-10-16 06:35:31 -0500268 struct gb_connection *connection = operation->connection;
Alex Eldere88afa52014-10-01 21:54:15 -0500269 struct gb_operation_msg_hdr *header;
270 struct gbuf *gbuf;
Alex Elder2eb585f2014-10-16 06:35:34 -0500271 gfp_t gfp_flags = data_out ? GFP_KERNEL : GFP_ATOMIC;
Alex Eldere88afa52014-10-01 21:54:15 -0500272
Alex Eldere88afa52014-10-01 21:54:15 -0500273 size += sizeof(*header);
Alex Elder22b320f2014-10-16 06:35:31 -0500274 gbuf = greybus_alloc_gbuf(connection, gb_operation_gbuf_complete,
Alex Elder2eb585f2014-10-16 06:35:34 -0500275 size, data_out, gfp_flags, operation);
Alex Elder22b320f2014-10-16 06:35:31 -0500276 if (!gbuf)
Alex Eldere88afa52014-10-01 21:54:15 -0500277 return NULL;
Alex Eldere88afa52014-10-01 21:54:15 -0500278
Alex Elder22b320f2014-10-16 06:35:31 -0500279 /* Fill in the header structure */
280 header = (struct gb_operation_msg_hdr *)gbuf->transfer_buffer;
Greg Kroah-Hartman322543a2014-10-02 21:25:21 -0700281 header->size = cpu_to_le16(size);
Alex Elderb0b65752014-10-03 15:05:20 -0500282 header->id = 0; /* Filled in when submitted */
283 header->type = type;
Alex Elder22b320f2014-10-16 06:35:31 -0500284
285 return gbuf;
286}
287
288/*
289 * Create a Greybus operation to be sent over the given connection.
290 * The request buffer will big enough for a payload of the given
291 * size. Outgoing requests must specify the size of the response
292 * buffer size, which must be sufficient to hold all expected
293 * response data.
294 *
295 * Incoming requests will supply a response size of 0, and in that
296 * case no response buffer is allocated. (A response always
297 * includes a status byte, so 0 is not a valid size.) Whatever
298 * handles the operation request is responsible for allocating the
299 * response buffer.
300 *
301 * Returns a pointer to the new operation or a null pointer if an
302 * error occurs.
303 */
304struct gb_operation *gb_operation_create(struct gb_connection *connection,
305 u8 type, size_t request_size,
306 size_t response_size)
307{
308 struct gb_operation *operation;
309 gfp_t gfp_flags = response_size ? GFP_KERNEL : GFP_ATOMIC;
Alex Elder2eb585f2014-10-16 06:35:34 -0500310 bool outgoing = response_size != 0;
Alex Elder22b320f2014-10-16 06:35:31 -0500311
Alex Elder5b3db0d2014-10-20 10:27:56 -0500312 operation = kmem_cache_zalloc(gb_operation_cache, gfp_flags);
Alex Elder22b320f2014-10-16 06:35:31 -0500313 if (!operation)
314 return NULL;
315 operation->connection = connection; /* XXX refcount? */
316
317 operation->request = gb_operation_gbuf_create(operation, type,
Alex Elder2eb585f2014-10-16 06:35:34 -0500318 request_size,
319 outgoing);
Alex Elder5b3db0d2014-10-20 10:27:56 -0500320 if (!operation->request)
321 goto err_cache;
Alex Elder22b320f2014-10-16 06:35:31 -0500322 operation->request_payload = operation->request->transfer_buffer +
323 sizeof(struct gb_operation_msg_hdr);
324 /* We always use the full request buffer */
325 operation->request->actual_length = request_size;
326
Alex Elder2eb585f2014-10-16 06:35:34 -0500327 if (outgoing) {
Alex Elder22b320f2014-10-16 06:35:31 -0500328 type |= GB_OPERATION_TYPE_RESPONSE;
329 operation->response = gb_operation_gbuf_create(operation,
Alex Elder2eb585f2014-10-16 06:35:34 -0500330 type, response_size,
331 false);
Alex Elder5b3db0d2014-10-20 10:27:56 -0500332 if (!operation->response)
333 goto err_request;
Alex Elder22b320f2014-10-16 06:35:31 -0500334 operation->response_payload =
335 operation->response->transfer_buffer +
336 sizeof(struct gb_operation_msg_hdr);
337 }
Alex Eldere88afa52014-10-01 21:54:15 -0500338
Alex Elder2eb585f2014-10-16 06:35:34 -0500339 INIT_WORK(&operation->recv_work, gb_operation_recv_work);
Alex Eldere88afa52014-10-01 21:54:15 -0500340 operation->callback = NULL; /* set at submit time */
341 init_completion(&operation->completion);
342
343 spin_lock_irq(&gb_operations_lock);
344 list_add_tail(&operation->links, &connection->operations);
345 spin_unlock_irq(&gb_operations_lock);
346
347 return operation;
Alex Elder5b3db0d2014-10-20 10:27:56 -0500348
349err_request:
350 greybus_free_gbuf(operation->request);
351err_cache:
352 kmem_cache_free(gb_operation_cache, operation);
353
354 return NULL;
Alex Eldere88afa52014-10-01 21:54:15 -0500355}
356
357/*
358 * Destroy a previously created operation.
359 */
360void gb_operation_destroy(struct gb_operation *operation)
361{
362 if (WARN_ON(!operation))
363 return;
364
365 /* XXX Make sure it's not in flight */
366 spin_lock_irq(&gb_operations_lock);
367 list_del(&operation->links);
368 spin_unlock_irq(&gb_operations_lock);
369
Alex Elder22b320f2014-10-16 06:35:31 -0500370 greybus_free_gbuf(operation->response);
371 greybus_free_gbuf(operation->request);
Alex Eldere88afa52014-10-01 21:54:15 -0500372
Alex Elder5b3db0d2014-10-20 10:27:56 -0500373 kmem_cache_free(gb_operation_cache, operation);
Alex Eldere88afa52014-10-01 21:54:15 -0500374}
Alex Elderd90c25b2014-10-16 06:35:33 -0500375
376/*
377 * Send an operation request message. The caller has filled in
378 * any payload so the request message is ready to go. If non-null,
379 * the callback function supplied will be called when the response
380 * message has arrived indicating the operation is complete. A null
381 * callback function is used for a synchronous request; return from
382 * this function won't occur until the operation is complete (or an
383 * interrupt occurs).
384 */
385int gb_operation_request_send(struct gb_operation *operation,
386 gb_operation_callback callback)
387{
388 int ret;
389
390 /*
391 * XXX
392 * I think the order of operations is going to be
393 * significant, and if so, we may need a mutex to surround
394 * setting the operation id and submitting the gbuf.
395 */
396 operation->callback = callback;
397 gb_operation_insert(operation);
398 ret = greybus_submit_gbuf(operation->request, GFP_KERNEL);
399 if (ret)
400 return ret;
401 if (!callback)
402 ret = gb_operation_wait(operation);
403
404 return ret;
405}
406
407/*
408 * Send a response for an incoming operation request.
409 */
410int gb_operation_response_send(struct gb_operation *operation)
411{
412 /* XXX
413 * Caller needs to have set operation->response->actual_length
414 */
415 gb_operation_remove(operation);
416 gb_operation_destroy(operation);
417
418 return 0;
419}
420
Alex Elder2eb585f2014-10-16 06:35:34 -0500421/*
422 * Handle data arriving on a connection. This is called in
423 * interrupt context, so just copy the incoming data into a buffer
424 * and do remaining handling via a work queue.
425 */
Alex Elderd90c25b2014-10-16 06:35:33 -0500426void gb_connection_operation_recv(struct gb_connection *connection,
427 void *data, size_t size)
428{
429 struct gb_operation_msg_hdr *header;
430 struct gb_operation *operation;
431 struct gbuf *gbuf;
432 u16 msg_size;
433
434 if (size > GB_OPERATION_MESSAGE_SIZE_MAX) {
435 gb_connection_err(connection, "message too big");
436 return;
437 }
438
439 header = data;
440 msg_size = le16_to_cpu(header->size);
441 if (header->type & GB_OPERATION_TYPE_RESPONSE) {
442 u16 id = le16_to_cpu(header->id);
443
444 operation = gb_operation_find(connection, id);
445 if (!operation) {
446 gb_connection_err(connection, "operation not found");
Alex Elder2eb585f2014-10-16 06:35:34 -0500447 return;
Alex Elderd90c25b2014-10-16 06:35:33 -0500448 }
449 gb_operation_remove(operation);
450 gbuf = operation->response;
Alex Elderbedfdf32014-10-17 05:18:22 -0500451 gbuf->status = GB_OP_SUCCESS; /* If we got here we're good */
Alex Elderd90c25b2014-10-16 06:35:33 -0500452 if (size > gbuf->transfer_buffer_length) {
453 gb_connection_err(connection, "recv buffer too small");
454 return;
455 }
456 } else {
457 WARN_ON(msg_size != size);
458 operation = gb_operation_create(connection, header->type,
459 msg_size, 0);
460 if (!operation) {
461 gb_connection_err(connection, "can't create operation");
462 return;
463 }
464 gbuf = operation->request;
465 }
466
467 memcpy(gbuf->transfer_buffer, data, msg_size);
468 gbuf->actual_length = msg_size;
469
Alex Elder2eb585f2014-10-16 06:35:34 -0500470 /* The rest will be handled in work queue context */
471 queue_work(gb_operation_recv_workqueue, &operation->recv_work);
472}
473
474int gb_operation_init(void)
475{
Alex Elder5b3db0d2014-10-20 10:27:56 -0500476 gb_operation_cache = kmem_cache_create("gb_operation_cache",
477 sizeof(struct gb_operation), 0, 0, NULL);
478 if (!gb_operation_cache)
Alex Elder2eb585f2014-10-16 06:35:34 -0500479 return -ENOMEM;
480
Alex Elder5b3db0d2014-10-20 10:27:56 -0500481 gb_operation_recv_workqueue = alloc_workqueue("greybus_recv", 0, 1);
482 if (!gb_operation_recv_workqueue) {
483 kmem_cache_destroy(gb_operation_cache);
484 gb_operation_cache = NULL;
485 return -ENOMEM;
486 }
487
Alex Elder2eb585f2014-10-16 06:35:34 -0500488 return 0;
489}
490
491void gb_operation_exit(void)
492{
Alex Elder5b3db0d2014-10-20 10:27:56 -0500493 kmem_cache_destroy(gb_operation_cache);
494 gb_operation_cache = NULL;
Alex Elder2eb585f2014-10-16 06:35:34 -0500495 destroy_workqueue(gb_operation_recv_workqueue);
Alex Elder5b3db0d2014-10-20 10:27:56 -0500496 gb_operation_recv_workqueue = NULL;
Alex Elderd90c25b2014-10-16 06:35:33 -0500497}