blob: 4cbe33e21254385c335ae785f14c9a8ffd4d2aa0 [file] [log] [blame]
Alex Eldere88afa52014-10-01 21:54:15 -05001/*
2 * Greybus operations
3 *
4 * Copyright 2014 Google Inc.
5 *
6 * Released under the GPLv2 only.
7 */
8
9#include <linux/kernel.h>
10#include <linux/slab.h>
11#include <linux/module.h>
12#include <linux/workqueue.h>
13
14#include "greybus.h"
15
16/*
Alex Elder22b320f2014-10-16 06:35:31 -050017 * The top bit of the type in an operation message header indicates
18 * whether the message is a request (bit clear) or response (bit set)
19 */
20#define GB_OPERATION_TYPE_RESPONSE 0x80
21
22/*
Alex Elderd90c25b2014-10-16 06:35:33 -050023 * XXX This needs to be coordinated with host driver parameters
24 */
25#define GB_OPERATION_MESSAGE_SIZE_MAX 4096
26
Alex Elder2eb585f2014-10-16 06:35:34 -050027/* Workqueue to handle Greybus operation completions. */
28static struct workqueue_struct *gb_operation_recv_workqueue;
29
Alex Elderd90c25b2014-10-16 06:35:33 -050030/*
Alex Eldere88afa52014-10-01 21:54:15 -050031 * All operation messages (both requests and responses) begin with
32 * a common header that encodes the size of the data (header
33 * included). This header also contains a unique identifier, which
34 * is used to keep track of in-flight operations. Finally, the
35 * header contains a operation type field, whose interpretation is
36 * dependent on what type of device lies on the other end of the
37 * connection. Response messages are distinguished from request
38 * messages by setting the high bit (0x80) in the operation type
39 * value.
40 *
41 * The wire format for all numeric fields in the header is little
42 * endian. Any operation-specific data begins immediately after the
43 * header, and is 64-bit aligned.
44 */
45struct gb_operation_msg_hdr {
46 __le16 size; /* Size in bytes of header + payload */
47 __le16 id; /* Operation unique id */
48 __u8 type; /* E.g GB_I2C_TYPE_* or GB_GPIO_TYPE_* */
49 /* 3 bytes pad, must be zero (ignore when read) */
50} __aligned(sizeof(u64));
51
52/* XXX Could be per-host device, per-module, or even per-connection */
53static DEFINE_SPINLOCK(gb_operations_lock);
54
Alex Elder84d148b2014-10-16 06:35:32 -050055static void gb_operation_insert(struct gb_operation *operation)
56{
57 struct gb_connection *connection = operation->connection;
58 struct rb_root *root = &connection->pending;
59 struct rb_node *node = &operation->node;
60 struct rb_node **link = &root->rb_node;
61 struct rb_node *above = NULL;
62 struct gb_operation_msg_hdr *header;
63 __le16 wire_id;
64
65 /*
66 * Assign the operation's id, and store it in the header of
67 * both request and response message headers.
68 */
69 operation->id = gb_connection_operation_id(connection);
70 wire_id = cpu_to_le16(operation->id);
71 header = operation->request->transfer_buffer;
72 header->id = wire_id;
73
74 /* OK, insert the operation into its connection's tree */
75 spin_lock_irq(&gb_operations_lock);
76
77 while (*link) {
78 struct gb_operation *other;
79
80 above = *link;
81 other = rb_entry(above, struct gb_operation, node);
82 header = other->request->transfer_buffer;
83 if (other->id > operation->id)
84 link = &above->rb_left;
85 else if (other->id < operation->id)
86 link = &above->rb_right;
87 }
88 rb_link_node(node, above, link);
89 rb_insert_color(node, root);
90
91 spin_unlock_irq(&gb_operations_lock);
92}
93
94static void gb_operation_remove(struct gb_operation *operation)
95{
96 spin_lock_irq(&gb_operations_lock);
97 rb_erase(&operation->node, &operation->connection->pending);
98 spin_unlock_irq(&gb_operations_lock);
99}
100
101static struct gb_operation *
102gb_operation_find(struct gb_connection *connection, u16 id)
103{
Alex Elderd90c25b2014-10-16 06:35:33 -0500104 struct gb_operation *operation = NULL;
Alex Elder84d148b2014-10-16 06:35:32 -0500105 struct rb_node *node;
106 bool found = false;
107
108 spin_lock_irq(&gb_operations_lock);
109 node = connection->pending.rb_node;
110 while (node && !found) {
111 operation = rb_entry(node, struct gb_operation, node);
112 if (operation->id > id)
113 node = node->rb_left;
114 else if (operation->id < id)
115 node = node->rb_right;
116 else
117 found = true;
118 }
119 spin_unlock_irq(&gb_operations_lock);
120
121 return found ? operation : NULL;
122}
123
Alex Eldere88afa52014-10-01 21:54:15 -0500124/*
125 * An operations's response message has arrived. If no callback was
126 * supplied it was submitted for asynchronous completion, so we notify
127 * any waiters. Otherwise we assume calling the completion is enough
128 * and nobody else will be waiting.
129 */
130void gb_operation_complete(struct gb_operation *operation)
131{
132 if (operation->callback)
133 operation->callback(operation);
134 else
135 complete_all(&operation->completion);
136}
137
Alex Elder2eb585f2014-10-16 06:35:34 -0500138/* Wait for a submitted operation to complete */
Alex Eldere88afa52014-10-01 21:54:15 -0500139int gb_operation_wait(struct gb_operation *operation)
140{
141 int ret;
142
143 ret = wait_for_completion_interruptible(&operation->completion);
144 /* If interrupted, cancel the in-flight buffer */
145 if (ret < 0)
Alex Elder22b320f2014-10-16 06:35:31 -0500146 ret = greybus_kill_gbuf(operation->request);
Alex Eldere88afa52014-10-01 21:54:15 -0500147 return ret;
148
149}
150
Alex Eldered8800d2014-10-16 06:35:38 -0500151/*
152 * This handler is used if no operation response messages are ever
153 * expected for a given protocol.
154 */
155static void gb_operation_recv_none(struct gb_operation *operation)
156{
157 /* Nothing to do! */
158}
Alex Elder2eb585f2014-10-16 06:35:34 -0500159
160typedef void (*gb_operation_recv_handler)(struct gb_operation *operation);
161static gb_operation_recv_handler gb_operation_recv_handlers[] = {
162 [GREYBUS_PROTOCOL_CONTROL] = NULL,
163 [GREYBUS_PROTOCOL_AP] = NULL,
164 [GREYBUS_PROTOCOL_GPIO] = NULL,
Alex Eldered8800d2014-10-16 06:35:38 -0500165 [GREYBUS_PROTOCOL_I2C] = gb_operation_recv_none,
Alex Elder2eb585f2014-10-16 06:35:34 -0500166 [GREYBUS_PROTOCOL_UART] = NULL,
167 [GREYBUS_PROTOCOL_HID] = NULL,
168 [GREYBUS_PROTOCOL_VENDOR] = NULL,
169};
170
171static void gb_operation_request_handle(struct gb_operation *operation)
172{
173 u8 protocol = operation->connection->protocol;
174
175 /* Subtract one from array size to stay within u8 range */
176 if (protocol <= (u8)(ARRAY_SIZE(gb_operation_recv_handlers) - 1)) {
177 gb_operation_recv_handler handler;
178
179 handler = gb_operation_recv_handlers[protocol];
180 if (handler) {
181 handler(operation); /* Handle the request */
182 return;
183 }
184 }
185
186 gb_connection_err(operation->connection, "unrecognized protocol %u\n",
187 (unsigned int)protocol);
188 operation->result = GB_OP_PROTOCOL_BAD;
189 gb_operation_complete(operation);
190}
191
Alex Eldere88afa52014-10-01 21:54:15 -0500192/*
Alex Elder2eb585f2014-10-16 06:35:34 -0500193 * Either this operation contains an incoming request, or its
194 * response has arrived. An incoming request will have a null
195 * response buffer pointer (it is the responsibility of the request
196 * handler to allocate and fill in the response buffer).
197 */
198static void gb_operation_recv_work(struct work_struct *recv_work)
199{
200 struct gb_operation *operation;
201 bool incoming_request;
202
203 operation = container_of(recv_work, struct gb_operation, recv_work);
204 incoming_request = operation->response == NULL;
205 if (incoming_request)
206 gb_operation_request_handle(operation);
207 gb_operation_complete(operation);
208
209 /* We're finished with the buffer we read into */
210 if (incoming_request)
211 greybus_gbuf_finished(operation->request);
212 else
213 greybus_gbuf_finished(operation->response);
214}
215
216/*
217 * Buffer completion function. We get notified whenever any buffer
218 * completes. For outbound messages, this tells us that the message
219 * has been sent. For inbound messages, it means the data has
220 * landed in the buffer and is ready to be processed.
221 *
222 * Either way, we don't do anything. We don't really care when an
223 * outbound message has been sent, and for incoming messages we
224 * we'll be done with everything we need to do before we mark it
225 * finished.
226 *
Alex Elderf012a522014-10-17 21:03:49 -0500227 * XXX We may want to record that a request is (or is no longer) in flight.
Alex Eldere88afa52014-10-01 21:54:15 -0500228 */
Alex Elder22b320f2014-10-16 06:35:31 -0500229static void gb_operation_gbuf_complete(struct gbuf *gbuf)
Alex Eldere88afa52014-10-01 21:54:15 -0500230{
Alex Elderf012a522014-10-17 21:03:49 -0500231 if (gbuf->status) {
232 struct gb_operation *operation = gbuf->context;
233 struct gb_operation_msg_hdr *header;
234 int id;
235 int type;
236
237 if (gbuf == operation->request)
238 header = operation->request_payload;
239 else if (gbuf == operation->response)
240 header = operation->response_payload;
241 else
242 header = NULL;
243 id = header ? (int)header->id : -1;
244 type = header ? (int)header->type : -1;
245
246 gb_connection_err(operation->connection,
247 "operation %d type %d gbuf error %d",
248 id, type, gbuf->status);
249 }
Alex Elder2eb585f2014-10-16 06:35:34 -0500250 return;
Alex Eldere88afa52014-10-01 21:54:15 -0500251}
252
253/*
Alex Elder22b320f2014-10-16 06:35:31 -0500254 * Allocate a buffer to be used for an operation request or response
Alex Elder2eb585f2014-10-16 06:35:34 -0500255 * message. For outgoing messages, both types of message contain a
256 * common header, which is filled in here. Incoming requests or
257 * responses also contain the same header, but there's no need to
258 * initialize it here (it'll be overwritten by the incoming
259 * message).
Alex Eldere88afa52014-10-01 21:54:15 -0500260 */
Alex Elder22b320f2014-10-16 06:35:31 -0500261struct gbuf *gb_operation_gbuf_create(struct gb_operation *operation,
Alex Elder2eb585f2014-10-16 06:35:34 -0500262 u8 type, size_t size, bool data_out)
Alex Eldere88afa52014-10-01 21:54:15 -0500263{
Alex Elder22b320f2014-10-16 06:35:31 -0500264 struct gb_connection *connection = operation->connection;
Alex Eldere88afa52014-10-01 21:54:15 -0500265 struct gb_operation_msg_hdr *header;
266 struct gbuf *gbuf;
Alex Elder2eb585f2014-10-16 06:35:34 -0500267 gfp_t gfp_flags = data_out ? GFP_KERNEL : GFP_ATOMIC;
Alex Eldere88afa52014-10-01 21:54:15 -0500268
Alex Eldere88afa52014-10-01 21:54:15 -0500269 size += sizeof(*header);
Alex Elder22b320f2014-10-16 06:35:31 -0500270 gbuf = greybus_alloc_gbuf(connection, gb_operation_gbuf_complete,
Alex Elder2eb585f2014-10-16 06:35:34 -0500271 size, data_out, gfp_flags, operation);
Alex Elder22b320f2014-10-16 06:35:31 -0500272 if (!gbuf)
Alex Eldere88afa52014-10-01 21:54:15 -0500273 return NULL;
Alex Eldere88afa52014-10-01 21:54:15 -0500274
Alex Elder22b320f2014-10-16 06:35:31 -0500275 /* Fill in the header structure */
276 header = (struct gb_operation_msg_hdr *)gbuf->transfer_buffer;
Greg Kroah-Hartman322543a2014-10-02 21:25:21 -0700277 header->size = cpu_to_le16(size);
Alex Elderb0b65752014-10-03 15:05:20 -0500278 header->id = 0; /* Filled in when submitted */
279 header->type = type;
Alex Elder22b320f2014-10-16 06:35:31 -0500280
281 return gbuf;
282}
283
284/*
285 * Create a Greybus operation to be sent over the given connection.
286 * The request buffer will big enough for a payload of the given
287 * size. Outgoing requests must specify the size of the response
288 * buffer size, which must be sufficient to hold all expected
289 * response data.
290 *
291 * Incoming requests will supply a response size of 0, and in that
292 * case no response buffer is allocated. (A response always
293 * includes a status byte, so 0 is not a valid size.) Whatever
294 * handles the operation request is responsible for allocating the
295 * response buffer.
296 *
297 * Returns a pointer to the new operation or a null pointer if an
298 * error occurs.
299 */
300struct gb_operation *gb_operation_create(struct gb_connection *connection,
301 u8 type, size_t request_size,
302 size_t response_size)
303{
304 struct gb_operation *operation;
305 gfp_t gfp_flags = response_size ? GFP_KERNEL : GFP_ATOMIC;
Alex Elder2eb585f2014-10-16 06:35:34 -0500306 bool outgoing = response_size != 0;
Alex Elder22b320f2014-10-16 06:35:31 -0500307
308 /* XXX Use a slab cache */
309 operation = kzalloc(sizeof(*operation), gfp_flags);
310 if (!operation)
311 return NULL;
312 operation->connection = connection; /* XXX refcount? */
313
314 operation->request = gb_operation_gbuf_create(operation, type,
Alex Elder2eb585f2014-10-16 06:35:34 -0500315 request_size,
316 outgoing);
Alex Elder22b320f2014-10-16 06:35:31 -0500317 if (!operation->request) {
318 kfree(operation);
319 return NULL;
320 }
321 operation->request_payload = operation->request->transfer_buffer +
322 sizeof(struct gb_operation_msg_hdr);
323 /* We always use the full request buffer */
324 operation->request->actual_length = request_size;
325
Alex Elder2eb585f2014-10-16 06:35:34 -0500326 if (outgoing) {
Alex Elder22b320f2014-10-16 06:35:31 -0500327 type |= GB_OPERATION_TYPE_RESPONSE;
328 operation->response = gb_operation_gbuf_create(operation,
Alex Elder2eb585f2014-10-16 06:35:34 -0500329 type, response_size,
330 false);
Alex Elder22b320f2014-10-16 06:35:31 -0500331 if (!operation->response) {
332 greybus_free_gbuf(operation->request);
333 kfree(operation);
334 return NULL;
335 }
336 operation->response_payload =
337 operation->response->transfer_buffer +
338 sizeof(struct gb_operation_msg_hdr);
339 }
Alex Eldere88afa52014-10-01 21:54:15 -0500340
Alex Elder2eb585f2014-10-16 06:35:34 -0500341 INIT_WORK(&operation->recv_work, gb_operation_recv_work);
Alex Eldere88afa52014-10-01 21:54:15 -0500342 operation->callback = NULL; /* set at submit time */
343 init_completion(&operation->completion);
344
345 spin_lock_irq(&gb_operations_lock);
346 list_add_tail(&operation->links, &connection->operations);
347 spin_unlock_irq(&gb_operations_lock);
348
349 return operation;
350}
351
352/*
353 * Destroy a previously created operation.
354 */
355void gb_operation_destroy(struct gb_operation *operation)
356{
357 if (WARN_ON(!operation))
358 return;
359
360 /* XXX Make sure it's not in flight */
361 spin_lock_irq(&gb_operations_lock);
362 list_del(&operation->links);
363 spin_unlock_irq(&gb_operations_lock);
364
Alex Elder22b320f2014-10-16 06:35:31 -0500365 greybus_free_gbuf(operation->response);
366 greybus_free_gbuf(operation->request);
Alex Eldere88afa52014-10-01 21:54:15 -0500367
368 kfree(operation);
369}
Alex Elderd90c25b2014-10-16 06:35:33 -0500370
371/*
372 * Send an operation request message. The caller has filled in
373 * any payload so the request message is ready to go. If non-null,
374 * the callback function supplied will be called when the response
375 * message has arrived indicating the operation is complete. A null
376 * callback function is used for a synchronous request; return from
377 * this function won't occur until the operation is complete (or an
378 * interrupt occurs).
379 */
380int gb_operation_request_send(struct gb_operation *operation,
381 gb_operation_callback callback)
382{
383 int ret;
384
385 /*
386 * XXX
387 * I think the order of operations is going to be
388 * significant, and if so, we may need a mutex to surround
389 * setting the operation id and submitting the gbuf.
390 */
391 operation->callback = callback;
392 gb_operation_insert(operation);
393 ret = greybus_submit_gbuf(operation->request, GFP_KERNEL);
394 if (ret)
395 return ret;
396 if (!callback)
397 ret = gb_operation_wait(operation);
398
399 return ret;
400}
401
402/*
403 * Send a response for an incoming operation request.
404 */
405int gb_operation_response_send(struct gb_operation *operation)
406{
407 /* XXX
408 * Caller needs to have set operation->response->actual_length
409 */
410 gb_operation_remove(operation);
411 gb_operation_destroy(operation);
412
413 return 0;
414}
415
Alex Elder2eb585f2014-10-16 06:35:34 -0500416/*
417 * Handle data arriving on a connection. This is called in
418 * interrupt context, so just copy the incoming data into a buffer
419 * and do remaining handling via a work queue.
420 */
Alex Elderd90c25b2014-10-16 06:35:33 -0500421void gb_connection_operation_recv(struct gb_connection *connection,
422 void *data, size_t size)
423{
424 struct gb_operation_msg_hdr *header;
425 struct gb_operation *operation;
426 struct gbuf *gbuf;
427 u16 msg_size;
428
429 if (size > GB_OPERATION_MESSAGE_SIZE_MAX) {
430 gb_connection_err(connection, "message too big");
431 return;
432 }
433
434 header = data;
435 msg_size = le16_to_cpu(header->size);
436 if (header->type & GB_OPERATION_TYPE_RESPONSE) {
437 u16 id = le16_to_cpu(header->id);
438
439 operation = gb_operation_find(connection, id);
440 if (!operation) {
441 gb_connection_err(connection, "operation not found");
Alex Elder2eb585f2014-10-16 06:35:34 -0500442 return;
Alex Elderd90c25b2014-10-16 06:35:33 -0500443 }
444 gb_operation_remove(operation);
445 gbuf = operation->response;
Alex Elderbedfdf32014-10-17 05:18:22 -0500446 gbuf->status = GB_OP_SUCCESS; /* If we got here we're good */
Alex Elderd90c25b2014-10-16 06:35:33 -0500447 if (size > gbuf->transfer_buffer_length) {
448 gb_connection_err(connection, "recv buffer too small");
449 return;
450 }
451 } else {
452 WARN_ON(msg_size != size);
453 operation = gb_operation_create(connection, header->type,
454 msg_size, 0);
455 if (!operation) {
456 gb_connection_err(connection, "can't create operation");
457 return;
458 }
459 gbuf = operation->request;
460 }
461
462 memcpy(gbuf->transfer_buffer, data, msg_size);
463 gbuf->actual_length = msg_size;
464
Alex Elder2eb585f2014-10-16 06:35:34 -0500465 /* The rest will be handled in work queue context */
466 queue_work(gb_operation_recv_workqueue, &operation->recv_work);
467}
468
469int gb_operation_init(void)
470{
471 gb_operation_recv_workqueue = alloc_workqueue("greybus_recv", 0, 1);
472 if (!gb_operation_recv_workqueue)
473 return -ENOMEM;
474
475 return 0;
476}
477
478void gb_operation_exit(void)
479{
480 destroy_workqueue(gb_operation_recv_workqueue);
Alex Elderd90c25b2014-10-16 06:35:33 -0500481}