blob: 0388242d9b79585eed8a439d3679ecd7cff81be7 [file] [log] [blame]
Alex Eldere88afa52014-10-01 21:54:15 -05001/*
2 * Greybus operations
3 *
4 * Copyright 2014 Google Inc.
5 *
6 * Released under the GPLv2 only.
7 */
8
9#include <linux/kernel.h>
10#include <linux/slab.h>
11#include <linux/module.h>
12#include <linux/workqueue.h>
13
14#include "greybus.h"
15
16/*
Alex Elder22b320f2014-10-16 06:35:31 -050017 * The top bit of the type in an operation message header indicates
18 * whether the message is a request (bit clear) or response (bit set)
19 */
20#define GB_OPERATION_TYPE_RESPONSE 0x80
21
Greg Kroah-Hartman708971e2014-10-27 15:40:09 +080022#define OPERATION_TIMEOUT_DEFAULT 1000 /* milliseconds */
Alex Eldere816e372014-10-22 02:04:28 -050023
Alex Elder22b320f2014-10-16 06:35:31 -050024/*
Alex Elderd90c25b2014-10-16 06:35:33 -050025 * XXX This needs to be coordinated with host driver parameters
26 */
27#define GB_OPERATION_MESSAGE_SIZE_MAX 4096
28
Alex Elder5b3db0d2014-10-20 10:27:56 -050029static struct kmem_cache *gb_operation_cache;
30
Alex Elder2eb585f2014-10-16 06:35:34 -050031/* Workqueue to handle Greybus operation completions. */
32static struct workqueue_struct *gb_operation_recv_workqueue;
33
Alex Elderd90c25b2014-10-16 06:35:33 -050034/*
Alex Eldere88afa52014-10-01 21:54:15 -050035 * All operation messages (both requests and responses) begin with
36 * a common header that encodes the size of the data (header
37 * included). This header also contains a unique identifier, which
38 * is used to keep track of in-flight operations. Finally, the
39 * header contains a operation type field, whose interpretation is
40 * dependent on what type of device lies on the other end of the
41 * connection. Response messages are distinguished from request
42 * messages by setting the high bit (0x80) in the operation type
43 * value.
44 *
45 * The wire format for all numeric fields in the header is little
46 * endian. Any operation-specific data begins immediately after the
47 * header, and is 64-bit aligned.
48 */
49struct gb_operation_msg_hdr {
50 __le16 size; /* Size in bytes of header + payload */
51 __le16 id; /* Operation unique id */
52 __u8 type; /* E.g GB_I2C_TYPE_* or GB_GPIO_TYPE_* */
53 /* 3 bytes pad, must be zero (ignore when read) */
54} __aligned(sizeof(u64));
55
56/* XXX Could be per-host device, per-module, or even per-connection */
57static DEFINE_SPINLOCK(gb_operations_lock);
58
Alex Elder84d148b2014-10-16 06:35:32 -050059static void gb_operation_insert(struct gb_operation *operation)
60{
61 struct gb_connection *connection = operation->connection;
62 struct rb_root *root = &connection->pending;
63 struct rb_node *node = &operation->node;
64 struct rb_node **link = &root->rb_node;
65 struct rb_node *above = NULL;
66 struct gb_operation_msg_hdr *header;
Alex Eldere816e372014-10-22 02:04:28 -050067 unsigned long timeout;
68 bool start_timer;
Alex Elder84d148b2014-10-16 06:35:32 -050069 __le16 wire_id;
70
71 /*
72 * Assign the operation's id, and store it in the header of
73 * both request and response message headers.
74 */
75 operation->id = gb_connection_operation_id(connection);
76 wire_id = cpu_to_le16(operation->id);
77 header = operation->request->transfer_buffer;
78 header->id = wire_id;
79
80 /* OK, insert the operation into its connection's tree */
81 spin_lock_irq(&gb_operations_lock);
82
Alex Eldere816e372014-10-22 02:04:28 -050083 /*
84 * We impose a time limit for requests to complete. If
85 * there are no requests pending there is no need for a
86 * timer. So if this will be the only one in flight we'll
87 * need to start the timer. Otherwise we just update the
88 * existing one to give this request a full timeout period
89 * to complete.
90 */
91 start_timer = RB_EMPTY_ROOT(root);
92
Alex Elder84d148b2014-10-16 06:35:32 -050093 while (*link) {
94 struct gb_operation *other;
95
96 above = *link;
97 other = rb_entry(above, struct gb_operation, node);
98 header = other->request->transfer_buffer;
99 if (other->id > operation->id)
100 link = &above->rb_left;
101 else if (other->id < operation->id)
102 link = &above->rb_right;
103 }
104 rb_link_node(node, above, link);
105 rb_insert_color(node, root);
Alex Elder84d148b2014-10-16 06:35:32 -0500106 spin_unlock_irq(&gb_operations_lock);
Alex Eldere816e372014-10-22 02:04:28 -0500107
Greg Kroah-Hartman708971e2014-10-27 15:40:09 +0800108 timeout = msecs_to_jiffies(OPERATION_TIMEOUT_DEFAULT);
Alex Eldere816e372014-10-22 02:04:28 -0500109 if (start_timer)
Greg Kroah-Hartman708971e2014-10-27 15:40:09 +0800110 schedule_delayed_work(&operation->timeout_work, timeout);
Alex Eldere816e372014-10-22 02:04:28 -0500111 else
Greg Kroah-Hartman708971e2014-10-27 15:40:09 +0800112 mod_delayed_work(system_wq, &operation->timeout_work, timeout);
Alex Elder84d148b2014-10-16 06:35:32 -0500113}
114
115static void gb_operation_remove(struct gb_operation *operation)
116{
Alex Eldere816e372014-10-22 02:04:28 -0500117 struct gb_connection *connection = operation->connection;
Alex Eldere816e372014-10-22 02:04:28 -0500118
Greg Kroah-Hartman708971e2014-10-27 15:40:09 +0800119 /* Shut down our timeout timer */
120 cancel_delayed_work(&operation->timeout_work);
121
122 /* Take us off of the list of pending operations */
Alex Elder84d148b2014-10-16 06:35:32 -0500123 spin_lock_irq(&gb_operations_lock);
Alex Eldere816e372014-10-22 02:04:28 -0500124 rb_erase(&operation->node, &connection->pending);
Alex Elder84d148b2014-10-16 06:35:32 -0500125 spin_unlock_irq(&gb_operations_lock);
Alex Eldere816e372014-10-22 02:04:28 -0500126
Alex Elder84d148b2014-10-16 06:35:32 -0500127}
128
129static struct gb_operation *
130gb_operation_find(struct gb_connection *connection, u16 id)
131{
Alex Elderd90c25b2014-10-16 06:35:33 -0500132 struct gb_operation *operation = NULL;
Alex Elder84d148b2014-10-16 06:35:32 -0500133 struct rb_node *node;
134 bool found = false;
135
136 spin_lock_irq(&gb_operations_lock);
137 node = connection->pending.rb_node;
138 while (node && !found) {
139 operation = rb_entry(node, struct gb_operation, node);
140 if (operation->id > id)
141 node = node->rb_left;
142 else if (operation->id < id)
143 node = node->rb_right;
144 else
145 found = true;
146 }
147 spin_unlock_irq(&gb_operations_lock);
148
149 return found ? operation : NULL;
150}
151
Alex Eldere88afa52014-10-01 21:54:15 -0500152/*
153 * An operations's response message has arrived. If no callback was
154 * supplied it was submitted for asynchronous completion, so we notify
155 * any waiters. Otherwise we assume calling the completion is enough
156 * and nobody else will be waiting.
157 */
Greg Kroah-Hartman708971e2014-10-27 15:40:09 +0800158static void gb_operation_complete(struct gb_operation *operation)
Alex Eldere88afa52014-10-01 21:54:15 -0500159{
160 if (operation->callback)
161 operation->callback(operation);
162 else
163 complete_all(&operation->completion);
164}
165
Alex Elder2eb585f2014-10-16 06:35:34 -0500166/* Wait for a submitted operation to complete */
Alex Eldere88afa52014-10-01 21:54:15 -0500167int gb_operation_wait(struct gb_operation *operation)
168{
169 int ret;
170
171 ret = wait_for_completion_interruptible(&operation->completion);
172 /* If interrupted, cancel the in-flight buffer */
173 if (ret < 0)
Greg Kroah-Hartman4afbba02014-10-27 14:01:06 +0800174 greybus_kill_gbuf(operation->request);
Alex Eldere88afa52014-10-01 21:54:15 -0500175 return ret;
176
177}
178
Alex Eldered8800d2014-10-16 06:35:38 -0500179/*
180 * This handler is used if no operation response messages are ever
181 * expected for a given protocol.
182 */
183static void gb_operation_recv_none(struct gb_operation *operation)
184{
185 /* Nothing to do! */
186}
Alex Elder2eb585f2014-10-16 06:35:34 -0500187
188typedef void (*gb_operation_recv_handler)(struct gb_operation *operation);
189static gb_operation_recv_handler gb_operation_recv_handlers[] = {
190 [GREYBUS_PROTOCOL_CONTROL] = NULL,
191 [GREYBUS_PROTOCOL_AP] = NULL,
192 [GREYBUS_PROTOCOL_GPIO] = NULL,
Alex Eldered8800d2014-10-16 06:35:38 -0500193 [GREYBUS_PROTOCOL_I2C] = gb_operation_recv_none,
Alex Elder2eb585f2014-10-16 06:35:34 -0500194 [GREYBUS_PROTOCOL_UART] = NULL,
195 [GREYBUS_PROTOCOL_HID] = NULL,
Greg Kroah-Hartman2bb7eae2014-10-20 15:24:57 +0800196 [GREYBUS_PROTOCOL_BATTERY] = gb_operation_recv_none,
Greg Kroah-Hartman42d4a222014-10-20 16:02:56 +0800197 [GREYBUS_PROTOCOL_LED] = NULL,
Alex Elder2eb585f2014-10-16 06:35:34 -0500198 [GREYBUS_PROTOCOL_VENDOR] = NULL,
199};
200
201static void gb_operation_request_handle(struct gb_operation *operation)
202{
Alex Elder7fba0072014-10-28 19:35:59 -0500203 u8 protocol_id = operation->connection->protocol_id;
Alex Elder2eb585f2014-10-16 06:35:34 -0500204
205 /* Subtract one from array size to stay within u8 range */
Alex Elder7fba0072014-10-28 19:35:59 -0500206 if (protocol_id <= (u8)(ARRAY_SIZE(gb_operation_recv_handlers) - 1)) {
Alex Elder2eb585f2014-10-16 06:35:34 -0500207 gb_operation_recv_handler handler;
208
Alex Elder7fba0072014-10-28 19:35:59 -0500209 handler = gb_operation_recv_handlers[protocol_id];
Alex Elder2eb585f2014-10-16 06:35:34 -0500210 if (handler) {
211 handler(operation); /* Handle the request */
212 return;
213 }
214 }
215
Alex Elder7fba0072014-10-28 19:35:59 -0500216 gb_connection_err(operation->connection,
217 "unrecognized protocol id %hhu\n", protocol_id);
Alex Elder2eb585f2014-10-16 06:35:34 -0500218 operation->result = GB_OP_PROTOCOL_BAD;
219 gb_operation_complete(operation);
220}
221
Alex Eldere88afa52014-10-01 21:54:15 -0500222/*
Alex Elder2eb585f2014-10-16 06:35:34 -0500223 * Either this operation contains an incoming request, or its
224 * response has arrived. An incoming request will have a null
225 * response buffer pointer (it is the responsibility of the request
226 * handler to allocate and fill in the response buffer).
227 */
228static void gb_operation_recv_work(struct work_struct *recv_work)
229{
230 struct gb_operation *operation;
231 bool incoming_request;
232
233 operation = container_of(recv_work, struct gb_operation, recv_work);
234 incoming_request = operation->response == NULL;
235 if (incoming_request)
236 gb_operation_request_handle(operation);
237 gb_operation_complete(operation);
238
239 /* We're finished with the buffer we read into */
240 if (incoming_request)
241 greybus_gbuf_finished(operation->request);
242 else
243 greybus_gbuf_finished(operation->response);
244}
245
246/*
Greg Kroah-Hartman708971e2014-10-27 15:40:09 +0800247 * Timeout call for the operation.
248 *
249 * If this fires, something went wrong, so mark the result as timed out, and
250 * run the completion handler, which (hopefully) should clean up the operation
251 * properly.
252 */
253static void operation_timeout(struct work_struct *work)
254{
255 struct gb_operation *operation;
256
257 operation = container_of(work, struct gb_operation, timeout_work.work);
258 printk("timeout!\n");
259
260 operation->result = GB_OP_TIMEOUT;
261 gb_operation_complete(operation);
262}
263
264/*
Alex Elder2eb585f2014-10-16 06:35:34 -0500265 * Buffer completion function. We get notified whenever any buffer
266 * completes. For outbound messages, this tells us that the message
267 * has been sent. For inbound messages, it means the data has
268 * landed in the buffer and is ready to be processed.
269 *
270 * Either way, we don't do anything. We don't really care when an
271 * outbound message has been sent, and for incoming messages we
272 * we'll be done with everything we need to do before we mark it
273 * finished.
274 *
Alex Elderf012a522014-10-17 21:03:49 -0500275 * XXX We may want to record that a request is (or is no longer) in flight.
Alex Eldere88afa52014-10-01 21:54:15 -0500276 */
Alex Elder22b320f2014-10-16 06:35:31 -0500277static void gb_operation_gbuf_complete(struct gbuf *gbuf)
Alex Eldere88afa52014-10-01 21:54:15 -0500278{
Alex Elderf012a522014-10-17 21:03:49 -0500279 if (gbuf->status) {
280 struct gb_operation *operation = gbuf->context;
281 struct gb_operation_msg_hdr *header;
282 int id;
283 int type;
284
285 if (gbuf == operation->request)
286 header = operation->request_payload;
287 else if (gbuf == operation->response)
288 header = operation->response_payload;
289 else
290 header = NULL;
Greg Kroah-Hartmanf9624de2014-10-27 12:30:15 +0800291
292 if (header) {
293 id = le16_to_cpu(header->id);
294 type = header->type;
295 } else {
296 id = -1;
297 type = -1;
298 }
Alex Elderf012a522014-10-17 21:03:49 -0500299
300 gb_connection_err(operation->connection,
301 "operation %d type %d gbuf error %d",
302 id, type, gbuf->status);
303 }
Alex Elder2eb585f2014-10-16 06:35:34 -0500304 return;
Alex Eldere88afa52014-10-01 21:54:15 -0500305}
306
307/*
Alex Elder22b320f2014-10-16 06:35:31 -0500308 * Allocate a buffer to be used for an operation request or response
Alex Elder2eb585f2014-10-16 06:35:34 -0500309 * message. For outgoing messages, both types of message contain a
310 * common header, which is filled in here. Incoming requests or
311 * responses also contain the same header, but there's no need to
312 * initialize it here (it'll be overwritten by the incoming
313 * message).
Alex Eldere88afa52014-10-01 21:54:15 -0500314 */
Greg Kroah-Hartmanf9624de2014-10-27 12:30:15 +0800315static struct gbuf *gb_operation_gbuf_create(struct gb_operation *operation,
316 u8 type, size_t size,
317 bool data_out)
Alex Eldere88afa52014-10-01 21:54:15 -0500318{
Alex Elder22b320f2014-10-16 06:35:31 -0500319 struct gb_connection *connection = operation->connection;
Alex Eldere88afa52014-10-01 21:54:15 -0500320 struct gb_operation_msg_hdr *header;
321 struct gbuf *gbuf;
Alex Elder2eb585f2014-10-16 06:35:34 -0500322 gfp_t gfp_flags = data_out ? GFP_KERNEL : GFP_ATOMIC;
Alex Eldere88afa52014-10-01 21:54:15 -0500323
Alex Eldere88afa52014-10-01 21:54:15 -0500324 size += sizeof(*header);
Alex Elder22b320f2014-10-16 06:35:31 -0500325 gbuf = greybus_alloc_gbuf(connection, gb_operation_gbuf_complete,
Alex Elder2eb585f2014-10-16 06:35:34 -0500326 size, data_out, gfp_flags, operation);
Alex Elder22b320f2014-10-16 06:35:31 -0500327 if (!gbuf)
Alex Eldere88afa52014-10-01 21:54:15 -0500328 return NULL;
Alex Eldere88afa52014-10-01 21:54:15 -0500329
Alex Elder22b320f2014-10-16 06:35:31 -0500330 /* Fill in the header structure */
331 header = (struct gb_operation_msg_hdr *)gbuf->transfer_buffer;
Greg Kroah-Hartman322543a2014-10-02 21:25:21 -0700332 header->size = cpu_to_le16(size);
Alex Elderb0b65752014-10-03 15:05:20 -0500333 header->id = 0; /* Filled in when submitted */
334 header->type = type;
Alex Elder22b320f2014-10-16 06:35:31 -0500335
336 return gbuf;
337}
338
339/*
340 * Create a Greybus operation to be sent over the given connection.
341 * The request buffer will big enough for a payload of the given
342 * size. Outgoing requests must specify the size of the response
343 * buffer size, which must be sufficient to hold all expected
344 * response data.
345 *
346 * Incoming requests will supply a response size of 0, and in that
347 * case no response buffer is allocated. (A response always
348 * includes a status byte, so 0 is not a valid size.) Whatever
349 * handles the operation request is responsible for allocating the
350 * response buffer.
351 *
352 * Returns a pointer to the new operation or a null pointer if an
353 * error occurs.
354 */
355struct gb_operation *gb_operation_create(struct gb_connection *connection,
356 u8 type, size_t request_size,
357 size_t response_size)
358{
359 struct gb_operation *operation;
360 gfp_t gfp_flags = response_size ? GFP_KERNEL : GFP_ATOMIC;
Alex Elder2eb585f2014-10-16 06:35:34 -0500361 bool outgoing = response_size != 0;
Alex Elder22b320f2014-10-16 06:35:31 -0500362
Alex Elder5b3db0d2014-10-20 10:27:56 -0500363 operation = kmem_cache_zalloc(gb_operation_cache, gfp_flags);
Alex Elder22b320f2014-10-16 06:35:31 -0500364 if (!operation)
365 return NULL;
Greg Kroah-Hartman6507cce2014-10-27 17:58:54 +0800366 operation->connection = connection;
Alex Elder22b320f2014-10-16 06:35:31 -0500367
368 operation->request = gb_operation_gbuf_create(operation, type,
Alex Elder2eb585f2014-10-16 06:35:34 -0500369 request_size,
370 outgoing);
Alex Elder5b3db0d2014-10-20 10:27:56 -0500371 if (!operation->request)
372 goto err_cache;
Alex Elder22b320f2014-10-16 06:35:31 -0500373 operation->request_payload = operation->request->transfer_buffer +
374 sizeof(struct gb_operation_msg_hdr);
375 /* We always use the full request buffer */
376 operation->request->actual_length = request_size;
377
Alex Elder2eb585f2014-10-16 06:35:34 -0500378 if (outgoing) {
Alex Elder22b320f2014-10-16 06:35:31 -0500379 type |= GB_OPERATION_TYPE_RESPONSE;
380 operation->response = gb_operation_gbuf_create(operation,
Alex Elder2eb585f2014-10-16 06:35:34 -0500381 type, response_size,
382 false);
Alex Elder5b3db0d2014-10-20 10:27:56 -0500383 if (!operation->response)
384 goto err_request;
Alex Elder22b320f2014-10-16 06:35:31 -0500385 operation->response_payload =
386 operation->response->transfer_buffer +
387 sizeof(struct gb_operation_msg_hdr);
388 }
Alex Eldere88afa52014-10-01 21:54:15 -0500389
Alex Elder2eb585f2014-10-16 06:35:34 -0500390 INIT_WORK(&operation->recv_work, gb_operation_recv_work);
Alex Eldere88afa52014-10-01 21:54:15 -0500391 operation->callback = NULL; /* set at submit time */
392 init_completion(&operation->completion);
Greg Kroah-Hartman708971e2014-10-27 15:40:09 +0800393 INIT_DELAYED_WORK(&operation->timeout_work, operation_timeout);
Alex Eldere88afa52014-10-01 21:54:15 -0500394
395 spin_lock_irq(&gb_operations_lock);
396 list_add_tail(&operation->links, &connection->operations);
397 spin_unlock_irq(&gb_operations_lock);
398
399 return operation;
Alex Elder5b3db0d2014-10-20 10:27:56 -0500400
401err_request:
402 greybus_free_gbuf(operation->request);
403err_cache:
404 kmem_cache_free(gb_operation_cache, operation);
405
406 return NULL;
Alex Eldere88afa52014-10-01 21:54:15 -0500407}
408
409/*
410 * Destroy a previously created operation.
411 */
412void gb_operation_destroy(struct gb_operation *operation)
413{
414 if (WARN_ON(!operation))
415 return;
416
417 /* XXX Make sure it's not in flight */
418 spin_lock_irq(&gb_operations_lock);
419 list_del(&operation->links);
420 spin_unlock_irq(&gb_operations_lock);
421
Alex Elder22b320f2014-10-16 06:35:31 -0500422 greybus_free_gbuf(operation->response);
423 greybus_free_gbuf(operation->request);
Alex Eldere88afa52014-10-01 21:54:15 -0500424
Alex Elder5b3db0d2014-10-20 10:27:56 -0500425 kmem_cache_free(gb_operation_cache, operation);
Alex Eldere88afa52014-10-01 21:54:15 -0500426}
Alex Elderd90c25b2014-10-16 06:35:33 -0500427
428/*
429 * Send an operation request message. The caller has filled in
430 * any payload so the request message is ready to go. If non-null,
431 * the callback function supplied will be called when the response
432 * message has arrived indicating the operation is complete. A null
433 * callback function is used for a synchronous request; return from
434 * this function won't occur until the operation is complete (or an
435 * interrupt occurs).
436 */
437int gb_operation_request_send(struct gb_operation *operation,
438 gb_operation_callback callback)
439{
440 int ret;
441
Alex Elder36561f22014-10-22 02:04:30 -0500442 if (operation->connection->state != GB_CONNECTION_STATE_ENABLED)
443 return -ENOTCONN;
444
Alex Elderd90c25b2014-10-16 06:35:33 -0500445 /*
446 * XXX
447 * I think the order of operations is going to be
448 * significant, and if so, we may need a mutex to surround
449 * setting the operation id and submitting the gbuf.
450 */
451 operation->callback = callback;
452 gb_operation_insert(operation);
453 ret = greybus_submit_gbuf(operation->request, GFP_KERNEL);
454 if (ret)
455 return ret;
456 if (!callback)
457 ret = gb_operation_wait(operation);
458
459 return ret;
460}
461
462/*
463 * Send a response for an incoming operation request.
464 */
465int gb_operation_response_send(struct gb_operation *operation)
466{
467 /* XXX
468 * Caller needs to have set operation->response->actual_length
469 */
470 gb_operation_remove(operation);
471 gb_operation_destroy(operation);
472
473 return 0;
474}
475
Alex Elder2eb585f2014-10-16 06:35:34 -0500476/*
477 * Handle data arriving on a connection. This is called in
478 * interrupt context, so just copy the incoming data into a buffer
479 * and do remaining handling via a work queue.
480 */
Alex Elderd90c25b2014-10-16 06:35:33 -0500481void gb_connection_operation_recv(struct gb_connection *connection,
482 void *data, size_t size)
483{
484 struct gb_operation_msg_hdr *header;
485 struct gb_operation *operation;
486 struct gbuf *gbuf;
487 u16 msg_size;
488
Alex Elder36561f22014-10-22 02:04:30 -0500489 if (connection->state != GB_CONNECTION_STATE_ENABLED)
490 return;
491
Alex Elderd90c25b2014-10-16 06:35:33 -0500492 if (size > GB_OPERATION_MESSAGE_SIZE_MAX) {
493 gb_connection_err(connection, "message too big");
494 return;
495 }
496
497 header = data;
498 msg_size = le16_to_cpu(header->size);
499 if (header->type & GB_OPERATION_TYPE_RESPONSE) {
500 u16 id = le16_to_cpu(header->id);
501
502 operation = gb_operation_find(connection, id);
503 if (!operation) {
504 gb_connection_err(connection, "operation not found");
Alex Elder2eb585f2014-10-16 06:35:34 -0500505 return;
Alex Elderd90c25b2014-10-16 06:35:33 -0500506 }
507 gb_operation_remove(operation);
508 gbuf = operation->response;
Alex Elderbedfdf32014-10-17 05:18:22 -0500509 gbuf->status = GB_OP_SUCCESS; /* If we got here we're good */
Alex Elderd90c25b2014-10-16 06:35:33 -0500510 if (size > gbuf->transfer_buffer_length) {
511 gb_connection_err(connection, "recv buffer too small");
512 return;
513 }
514 } else {
515 WARN_ON(msg_size != size);
516 operation = gb_operation_create(connection, header->type,
517 msg_size, 0);
518 if (!operation) {
519 gb_connection_err(connection, "can't create operation");
520 return;
521 }
522 gbuf = operation->request;
523 }
524
525 memcpy(gbuf->transfer_buffer, data, msg_size);
526 gbuf->actual_length = msg_size;
527
Alex Elder2eb585f2014-10-16 06:35:34 -0500528 /* The rest will be handled in work queue context */
529 queue_work(gb_operation_recv_workqueue, &operation->recv_work);
530}
531
Alex Eldere1158df2014-10-22 02:04:29 -0500532/*
533 * Cancel an operation.
534 */
535void gb_operation_cancel(struct gb_operation *operation)
536{
Alex Eldere1158df2014-10-22 02:04:29 -0500537 operation->canceled = true;
Greg Kroah-Hartman4afbba02014-10-27 14:01:06 +0800538 greybus_kill_gbuf(operation->request);
539 if (operation->response)
540 greybus_kill_gbuf(operation->response);
Alex Eldere1158df2014-10-22 02:04:29 -0500541}
542
Alex Elder2eb585f2014-10-16 06:35:34 -0500543int gb_operation_init(void)
544{
Alex Elder5b3db0d2014-10-20 10:27:56 -0500545 gb_operation_cache = kmem_cache_create("gb_operation_cache",
546 sizeof(struct gb_operation), 0, 0, NULL);
547 if (!gb_operation_cache)
Alex Elder2eb585f2014-10-16 06:35:34 -0500548 return -ENOMEM;
549
Alex Elder5b3db0d2014-10-20 10:27:56 -0500550 gb_operation_recv_workqueue = alloc_workqueue("greybus_recv", 0, 1);
551 if (!gb_operation_recv_workqueue) {
552 kmem_cache_destroy(gb_operation_cache);
553 gb_operation_cache = NULL;
554 return -ENOMEM;
555 }
556
Alex Elder2eb585f2014-10-16 06:35:34 -0500557 return 0;
558}
559
560void gb_operation_exit(void)
561{
Alex Elder5b3db0d2014-10-20 10:27:56 -0500562 kmem_cache_destroy(gb_operation_cache);
563 gb_operation_cache = NULL;
Alex Elder2eb585f2014-10-16 06:35:34 -0500564 destroy_workqueue(gb_operation_recv_workqueue);
Alex Elder5b3db0d2014-10-20 10:27:56 -0500565 gb_operation_recv_workqueue = NULL;
Alex Elderd90c25b2014-10-16 06:35:33 -0500566}