blob: e3669a7a7901acceca0be1a74b72766402f0cd67 [file] [log] [blame]
Alex Eldere88afa52014-10-01 21:54:15 -05001/*
2 * Greybus operations
3 *
4 * Copyright 2014 Google Inc.
5 *
6 * Released under the GPLv2 only.
7 */
8
9#include <linux/kernel.h>
10#include <linux/slab.h>
11#include <linux/module.h>
12#include <linux/workqueue.h>
13
14#include "greybus.h"
15
16/*
Alex Elder22b320f2014-10-16 06:35:31 -050017 * The top bit of the type in an operation message header indicates
18 * whether the message is a request (bit clear) or response (bit set)
19 */
20#define GB_OPERATION_TYPE_RESPONSE 0x80
21
Greg Kroah-Hartman708971e2014-10-27 15:40:09 +080022#define OPERATION_TIMEOUT_DEFAULT 1000 /* milliseconds */
Alex Eldere816e372014-10-22 02:04:28 -050023
Alex Elder22b320f2014-10-16 06:35:31 -050024/*
Alex Elderd90c25b2014-10-16 06:35:33 -050025 * XXX This needs to be coordinated with host driver parameters
Alex Elderc7f82d52014-11-17 18:08:32 -060026 * XXX May need to reduce to allow for message header within a page
Alex Elderd90c25b2014-10-16 06:35:33 -050027 */
28#define GB_OPERATION_MESSAGE_SIZE_MAX 4096
29
Alex Elder5b3db0d2014-10-20 10:27:56 -050030static struct kmem_cache *gb_operation_cache;
31
Alex Elder2eb585f2014-10-16 06:35:34 -050032/* Workqueue to handle Greybus operation completions. */
33static struct workqueue_struct *gb_operation_recv_workqueue;
34
Alex Elderd90c25b2014-10-16 06:35:33 -050035/*
Alex Eldere88afa52014-10-01 21:54:15 -050036 * All operation messages (both requests and responses) begin with
37 * a common header that encodes the size of the data (header
38 * included). This header also contains a unique identifier, which
39 * is used to keep track of in-flight operations. Finally, the
40 * header contains a operation type field, whose interpretation is
41 * dependent on what type of device lies on the other end of the
42 * connection. Response messages are distinguished from request
43 * messages by setting the high bit (0x80) in the operation type
44 * value.
45 *
46 * The wire format for all numeric fields in the header is little
47 * endian. Any operation-specific data begins immediately after the
48 * header, and is 64-bit aligned.
49 */
50struct gb_operation_msg_hdr {
51 __le16 size; /* Size in bytes of header + payload */
52 __le16 id; /* Operation unique id */
53 __u8 type; /* E.g GB_I2C_TYPE_* or GB_GPIO_TYPE_* */
54 /* 3 bytes pad, must be zero (ignore when read) */
55} __aligned(sizeof(u64));
56
57/* XXX Could be per-host device, per-module, or even per-connection */
58static DEFINE_SPINLOCK(gb_operations_lock);
59
Alex Elderb8616da2014-11-12 15:17:53 -060060static void gb_pending_operation_insert(struct gb_operation *operation)
Alex Elder84d148b2014-10-16 06:35:32 -050061{
62 struct gb_connection *connection = operation->connection;
Alex Elder84d148b2014-10-16 06:35:32 -050063 struct gb_operation_msg_hdr *header;
Alex Elder84d148b2014-10-16 06:35:32 -050064
Alex Elder360a8772014-11-12 15:17:54 -060065 /*
66 * Assign the operation's id and move it into its
67 * connection's pending list.
Alex Elder84d148b2014-10-16 06:35:32 -050068 */
Alex Elder84d148b2014-10-16 06:35:32 -050069 spin_lock_irq(&gb_operations_lock);
Alex Elder360a8772014-11-12 15:17:54 -060070 operation->id = ++connection->op_cycle;
Alex Elderb8616da2014-11-12 15:17:53 -060071 list_move_tail(&operation->links, &connection->pending);
Alex Elder84d148b2014-10-16 06:35:32 -050072 spin_unlock_irq(&gb_operations_lock);
Alex Elder360a8772014-11-12 15:17:54 -060073
74 /* Store the operation id in the request header */
Alex Elderbc46fab2014-11-17 18:08:35 -060075 header = operation->request.gbuf.transfer_buffer;
Alex Elder360a8772014-11-12 15:17:54 -060076 header->id = cpu_to_le16(operation->id);
Alex Elder84d148b2014-10-16 06:35:32 -050077}
78
Alex Elderb8616da2014-11-12 15:17:53 -060079static void gb_pending_operation_remove(struct gb_operation *operation)
Alex Elder84d148b2014-10-16 06:35:32 -050080{
Alex Eldere816e372014-10-22 02:04:28 -050081 struct gb_connection *connection = operation->connection;
Alex Eldere816e372014-10-22 02:04:28 -050082
Greg Kroah-Hartman708971e2014-10-27 15:40:09 +080083 /* Take us off of the list of pending operations */
Alex Elder84d148b2014-10-16 06:35:32 -050084 spin_lock_irq(&gb_operations_lock);
Alex Elderb8616da2014-11-12 15:17:53 -060085 list_move_tail(&operation->links, &connection->operations);
Alex Elder84d148b2014-10-16 06:35:32 -050086 spin_unlock_irq(&gb_operations_lock);
87}
88
89static struct gb_operation *
Alex Elderb8616da2014-11-12 15:17:53 -060090gb_pending_operation_find(struct gb_connection *connection, u16 id)
Alex Elder84d148b2014-10-16 06:35:32 -050091{
Alex Elderb8616da2014-11-12 15:17:53 -060092 struct gb_operation *operation;
Alex Elder84d148b2014-10-16 06:35:32 -050093 bool found = false;
94
95 spin_lock_irq(&gb_operations_lock);
Alex Elderb8616da2014-11-12 15:17:53 -060096 list_for_each_entry(operation, &connection->pending, links)
97 if (operation->id == id) {
Alex Elder84d148b2014-10-16 06:35:32 -050098 found = true;
Alex Elderb8616da2014-11-12 15:17:53 -060099 break;
100 }
Alex Elder84d148b2014-10-16 06:35:32 -0500101 spin_unlock_irq(&gb_operations_lock);
102
103 return found ? operation : NULL;
104}
105
Alex Elder374e6a22014-11-17 18:08:37 -0600106static int greybus_submit_gbuf(struct gbuf *gbuf, gfp_t gfp_mask)
107{
108 gbuf->status = -EINPROGRESS;
109
110 return gbuf->hd->driver->submit_gbuf(gbuf, gfp_mask);
111}
112
113static void greybus_kill_gbuf(struct gbuf *gbuf)
114{
115 if (gbuf->status != -EINPROGRESS)
116 return;
117
118 gbuf->hd->driver->kill_gbuf(gbuf);
119}
Alex Eldere88afa52014-10-01 21:54:15 -0500120/*
121 * An operations's response message has arrived. If no callback was
122 * supplied it was submitted for asynchronous completion, so we notify
123 * any waiters. Otherwise we assume calling the completion is enough
124 * and nobody else will be waiting.
125 */
Greg Kroah-Hartman708971e2014-10-27 15:40:09 +0800126static void gb_operation_complete(struct gb_operation *operation)
Alex Eldere88afa52014-10-01 21:54:15 -0500127{
128 if (operation->callback)
129 operation->callback(operation);
130 else
131 complete_all(&operation->completion);
132}
133
Alex Elder2eb585f2014-10-16 06:35:34 -0500134/* Wait for a submitted operation to complete */
Alex Eldere88afa52014-10-01 21:54:15 -0500135int gb_operation_wait(struct gb_operation *operation)
136{
137 int ret;
138
139 ret = wait_for_completion_interruptible(&operation->completion);
140 /* If interrupted, cancel the in-flight buffer */
141 if (ret < 0)
Alex Elderbc46fab2014-11-17 18:08:35 -0600142 greybus_kill_gbuf(&operation->request.gbuf);
Alex Eldere88afa52014-10-01 21:54:15 -0500143 return ret;
144
145}
146
Alex Elder2eb585f2014-10-16 06:35:34 -0500147static void gb_operation_request_handle(struct gb_operation *operation)
148{
Alex Elderf8fb05e2014-11-05 16:12:55 -0600149 struct gb_protocol *protocol = operation->connection->protocol;
150 struct gb_operation_msg_hdr *header;
Alex Elder2eb585f2014-10-16 06:35:34 -0500151
Alex Elderbc46fab2014-11-17 18:08:35 -0600152 header = operation->request.gbuf.transfer_buffer;
Alex Elderc3cf2782014-11-12 15:17:55 -0600153
Alex Elderf8fb05e2014-11-05 16:12:55 -0600154 /*
155 * If the protocol has no incoming request handler, report
156 * an error and mark the request bad.
157 */
158 if (protocol->request_recv) {
Alex Elderc3cf2782014-11-12 15:17:55 -0600159 protocol->request_recv(header->type, operation);
Alex Elderf8fb05e2014-11-05 16:12:55 -0600160 goto out;
Alex Elder2eb585f2014-10-16 06:35:34 -0500161 }
162
Alex Elder7fba0072014-10-28 19:35:59 -0500163 gb_connection_err(operation->connection,
Alex Elderf8fb05e2014-11-05 16:12:55 -0600164 "unexpected incoming request type 0x%02hhx\n", header->type);
Alex Elder2eb585f2014-10-16 06:35:34 -0500165 operation->result = GB_OP_PROTOCOL_BAD;
Alex Elderf8fb05e2014-11-05 16:12:55 -0600166out:
Alex Elder2eb585f2014-10-16 06:35:34 -0500167 gb_operation_complete(operation);
168}
169
Alex Eldere88afa52014-10-01 21:54:15 -0500170/*
Alex Elder2eb585f2014-10-16 06:35:34 -0500171 * Either this operation contains an incoming request, or its
172 * response has arrived. An incoming request will have a null
173 * response buffer pointer (it is the responsibility of the request
174 * handler to allocate and fill in the response buffer).
175 */
176static void gb_operation_recv_work(struct work_struct *recv_work)
177{
178 struct gb_operation *operation;
179 bool incoming_request;
180
181 operation = container_of(recv_work, struct gb_operation, recv_work);
Alex Elderbc46fab2014-11-17 18:08:35 -0600182 incoming_request = operation->response.gbuf.transfer_buffer == NULL;
Alex Elder2eb585f2014-10-16 06:35:34 -0500183 if (incoming_request)
184 gb_operation_request_handle(operation);
185 gb_operation_complete(operation);
Alex Elder2eb585f2014-10-16 06:35:34 -0500186}
187
188/*
Greg Kroah-Hartman708971e2014-10-27 15:40:09 +0800189 * Timeout call for the operation.
190 *
191 * If this fires, something went wrong, so mark the result as timed out, and
192 * run the completion handler, which (hopefully) should clean up the operation
193 * properly.
194 */
195static void operation_timeout(struct work_struct *work)
196{
197 struct gb_operation *operation;
198
199 operation = container_of(work, struct gb_operation, timeout_work.work);
Viresh Kumar37d8afc2014-11-13 18:14:35 +0530200 pr_debug("%s: timeout!\n", __func__);
Greg Kroah-Hartman708971e2014-10-27 15:40:09 +0800201
202 operation->result = GB_OP_TIMEOUT;
203 gb_operation_complete(operation);
204}
205
206/*
Alex Elder22b320f2014-10-16 06:35:31 -0500207 * Allocate a buffer to be used for an operation request or response
Alex Elder2eb585f2014-10-16 06:35:34 -0500208 * message. For outgoing messages, both types of message contain a
209 * common header, which is filled in here. Incoming requests or
210 * responses also contain the same header, but there's no need to
211 * initialize it here (it'll be overwritten by the incoming
212 * message).
Alex Eldere88afa52014-10-01 21:54:15 -0500213 */
Alex Elderc7f82d52014-11-17 18:08:32 -0600214static int gb_operation_message_init(struct gb_operation *operation,
215 u8 type, size_t size,
Alex Elder4f83b8d2014-11-18 13:26:38 -0600216 bool request, bool outbound)
Alex Eldere88afa52014-10-01 21:54:15 -0500217{
Alex Elderba993462014-11-17 08:08:43 -0600218 struct gb_connection *connection = operation->connection;
Alex Elderbb888962014-11-17 18:08:33 -0600219 struct greybus_host_device *hd = connection->hd;
Alex Elderc7f82d52014-11-17 18:08:32 -0600220 struct gb_message *message;
Alex Eldere88afa52014-10-01 21:54:15 -0500221 struct gb_operation_msg_hdr *header;
Alex Elderbb888962014-11-17 18:08:33 -0600222 struct gbuf *gbuf;
Alex Elder4f83b8d2014-11-18 13:26:38 -0600223 gfp_t gfp_flags = request && !outbound ? GFP_ATOMIC : GFP_KERNEL;
Alex Elder63921d82014-11-17 08:08:41 -0600224 u16 dest_cport_id;
Alex Eldere88afa52014-10-01 21:54:15 -0500225
Alex Elder78496db2014-11-17 08:08:39 -0600226 if (size > GB_OPERATION_MESSAGE_SIZE_MAX)
Alex Elderc7f82d52014-11-17 18:08:32 -0600227 return -E2BIG;
Alex Elderbc46fab2014-11-17 18:08:35 -0600228 size += sizeof(*header);
Alex Elder78496db2014-11-17 08:08:39 -0600229
Alex Elderc7f82d52014-11-17 18:08:32 -0600230 if (request) {
231 message = &operation->request;
232 } else {
233 message = &operation->response;
234 type |= GB_OPERATION_TYPE_RESPONSE;
235 }
Alex Elderbc46fab2014-11-17 18:08:35 -0600236 gbuf = &message->gbuf;
237
Alex Elder4f83b8d2014-11-18 13:26:38 -0600238 if (outbound)
Alex Elderba993462014-11-17 08:08:43 -0600239 dest_cport_id = connection->interface_cport_id;
Alex Elder63921d82014-11-17 08:08:41 -0600240 else
241 dest_cport_id = CPORT_ID_BAD;
Alex Elderc7f82d52014-11-17 18:08:32 -0600242
Alex Elderd2a259f2014-11-18 13:26:42 -0600243 gbuf->transfer_buffer = hd->driver->buffer_alloc(size, gfp_flags);
244 if (!gbuf->transfer_buffer)
245 return -ENOMEM;
246 gbuf->transfer_buffer_length = size;
Alex Elder0f4c8082014-11-18 13:26:41 -0600247 gbuf->hd = hd;
248 gbuf->dest_cport_id = dest_cport_id;
249 gbuf->status = -EBADR; /* Initial value--means "never set" */
Alex Eldere88afa52014-10-01 21:54:15 -0500250
Alex Elder22b320f2014-10-16 06:35:31 -0500251 /* Fill in the header structure */
Alex Elderbb888962014-11-17 18:08:33 -0600252 header = (struct gb_operation_msg_hdr *)gbuf->transfer_buffer;
Greg Kroah-Hartman322543a2014-10-02 21:25:21 -0700253 header->size = cpu_to_le16(size);
Alex Elderb0b65752014-10-03 15:05:20 -0500254 header->id = 0; /* Filled in when submitted */
255 header->type = type;
Alex Elder22b320f2014-10-16 06:35:31 -0500256
Alex Elderc7f82d52014-11-17 18:08:32 -0600257 message->payload = header + 1;
258 message->operation = operation;
259
260 return 0;
261}
262
263static void gb_operation_message_exit(struct gb_message *message)
264{
265 message->operation = NULL;
266 message->payload = NULL;
Alex Elderbc46fab2014-11-17 18:08:35 -0600267 message->gbuf.hd->driver->free_gbuf_data(&message->gbuf);
Alex Elder22b320f2014-10-16 06:35:31 -0500268}
269
270/*
271 * Create a Greybus operation to be sent over the given connection.
272 * The request buffer will big enough for a payload of the given
273 * size. Outgoing requests must specify the size of the response
274 * buffer size, which must be sufficient to hold all expected
275 * response data.
276 *
277 * Incoming requests will supply a response size of 0, and in that
278 * case no response buffer is allocated. (A response always
279 * includes a status byte, so 0 is not a valid size.) Whatever
280 * handles the operation request is responsible for allocating the
281 * response buffer.
282 *
283 * Returns a pointer to the new operation or a null pointer if an
284 * error occurs.
285 */
286struct gb_operation *gb_operation_create(struct gb_connection *connection,
287 u8 type, size_t request_size,
288 size_t response_size)
289{
290 struct gb_operation *operation;
291 gfp_t gfp_flags = response_size ? GFP_KERNEL : GFP_ATOMIC;
Alex Elder2eb585f2014-10-16 06:35:34 -0500292 bool outgoing = response_size != 0;
Alex Elderc7f82d52014-11-17 18:08:32 -0600293 int ret;
Alex Elder22b320f2014-10-16 06:35:31 -0500294
Alex Elder5b3db0d2014-10-20 10:27:56 -0500295 operation = kmem_cache_zalloc(gb_operation_cache, gfp_flags);
Alex Elder22b320f2014-10-16 06:35:31 -0500296 if (!operation)
297 return NULL;
Greg Kroah-Hartman6507cce2014-10-27 17:58:54 +0800298 operation->connection = connection;
Alex Elder22b320f2014-10-16 06:35:31 -0500299
Alex Elderc7f82d52014-11-17 18:08:32 -0600300 ret = gb_operation_message_init(operation, type, request_size,
301 true, outgoing);
302 if (ret)
Alex Elder5b3db0d2014-10-20 10:27:56 -0500303 goto err_cache;
Alex Elder22b320f2014-10-16 06:35:31 -0500304
Alex Elder2eb585f2014-10-16 06:35:34 -0500305 if (outgoing) {
Alex Elderc7f82d52014-11-17 18:08:32 -0600306 ret = gb_operation_message_init(operation, type, response_size,
307 false, false);
308 if (ret)
Alex Elder5b3db0d2014-10-20 10:27:56 -0500309 goto err_request;
Alex Elder22b320f2014-10-16 06:35:31 -0500310 }
Alex Eldere88afa52014-10-01 21:54:15 -0500311
Alex Elder2eb585f2014-10-16 06:35:34 -0500312 INIT_WORK(&operation->recv_work, gb_operation_recv_work);
Alex Eldere88afa52014-10-01 21:54:15 -0500313 operation->callback = NULL; /* set at submit time */
314 init_completion(&operation->completion);
Greg Kroah-Hartman708971e2014-10-27 15:40:09 +0800315 INIT_DELAYED_WORK(&operation->timeout_work, operation_timeout);
Alex Elderc7d0f252014-11-17 08:08:40 -0600316 kref_init(&operation->kref);
Alex Eldere88afa52014-10-01 21:54:15 -0500317
318 spin_lock_irq(&gb_operations_lock);
319 list_add_tail(&operation->links, &connection->operations);
320 spin_unlock_irq(&gb_operations_lock);
321
322 return operation;
Alex Elder5b3db0d2014-10-20 10:27:56 -0500323
324err_request:
Alex Elderc7f82d52014-11-17 18:08:32 -0600325 gb_operation_message_exit(&operation->request);
Alex Elder5b3db0d2014-10-20 10:27:56 -0500326err_cache:
327 kmem_cache_free(gb_operation_cache, operation);
328
329 return NULL;
Alex Eldere88afa52014-10-01 21:54:15 -0500330}
331
332/*
333 * Destroy a previously created operation.
334 */
Alex Elderc7d0f252014-11-17 08:08:40 -0600335static void _gb_operation_destroy(struct kref *kref)
Alex Eldere88afa52014-10-01 21:54:15 -0500336{
Alex Elderc7d0f252014-11-17 08:08:40 -0600337 struct gb_operation *operation;
338
339 operation = container_of(kref, struct gb_operation, kref);
Alex Eldere88afa52014-10-01 21:54:15 -0500340
341 /* XXX Make sure it's not in flight */
342 spin_lock_irq(&gb_operations_lock);
343 list_del(&operation->links);
344 spin_unlock_irq(&gb_operations_lock);
345
Alex Elderc7f82d52014-11-17 18:08:32 -0600346 gb_operation_message_exit(&operation->response);
347 gb_operation_message_exit(&operation->request);
Alex Eldere88afa52014-10-01 21:54:15 -0500348
Alex Elder5b3db0d2014-10-20 10:27:56 -0500349 kmem_cache_free(gb_operation_cache, operation);
Alex Eldere88afa52014-10-01 21:54:15 -0500350}
Alex Elderd90c25b2014-10-16 06:35:33 -0500351
Alex Elderc7d0f252014-11-17 08:08:40 -0600352void gb_operation_put(struct gb_operation *operation)
353{
354 if (!WARN_ON(!operation))
355 kref_put(&operation->kref, _gb_operation_destroy);
356}
357
Alex Elderd90c25b2014-10-16 06:35:33 -0500358/*
359 * Send an operation request message. The caller has filled in
360 * any payload so the request message is ready to go. If non-null,
361 * the callback function supplied will be called when the response
362 * message has arrived indicating the operation is complete. A null
363 * callback function is used for a synchronous request; return from
364 * this function won't occur until the operation is complete (or an
365 * interrupt occurs).
366 */
367int gb_operation_request_send(struct gb_operation *operation,
368 gb_operation_callback callback)
369{
Alex Elder8350e7a2014-11-12 15:17:52 -0600370 unsigned long timeout;
Alex Elderd90c25b2014-10-16 06:35:33 -0500371 int ret;
372
Alex Elder36561f22014-10-22 02:04:30 -0500373 if (operation->connection->state != GB_CONNECTION_STATE_ENABLED)
374 return -ENOTCONN;
375
Alex Elderd90c25b2014-10-16 06:35:33 -0500376 /*
377 * XXX
378 * I think the order of operations is going to be
379 * significant, and if so, we may need a mutex to surround
380 * setting the operation id and submitting the gbuf.
381 */
382 operation->callback = callback;
Alex Elderb8616da2014-11-12 15:17:53 -0600383 gb_pending_operation_insert(operation);
Alex Elderbc46fab2014-11-17 18:08:35 -0600384 ret = greybus_submit_gbuf(&operation->request.gbuf, GFP_KERNEL);
Alex Elderd90c25b2014-10-16 06:35:33 -0500385 if (ret)
386 return ret;
Alex Elder8350e7a2014-11-12 15:17:52 -0600387
388 /* We impose a time limit for requests to complete. */
389 timeout = msecs_to_jiffies(OPERATION_TIMEOUT_DEFAULT);
390 schedule_delayed_work(&operation->timeout_work, timeout);
Alex Elderd90c25b2014-10-16 06:35:33 -0500391 if (!callback)
392 ret = gb_operation_wait(operation);
393
394 return ret;
395}
396
397/*
398 * Send a response for an incoming operation request.
399 */
400int gb_operation_response_send(struct gb_operation *operation)
401{
Alex Elderd90c25b2014-10-16 06:35:33 -0500402 gb_operation_destroy(operation);
403
404 return 0;
405}
406
Alex Elder2eb585f2014-10-16 06:35:34 -0500407/*
Alex Elder78496db2014-11-17 08:08:39 -0600408 * Handle data arriving on a connection. As soon as we return, the
409 * incoming data buffer will be reused, so we need to copy the data
410 * into one of our own operation message buffers.
411 *
412 * If the incoming data is an operation response message, look up
413 * the operation and copy the incoming data into its response
414 * buffer. Otherwise allocate a new operation and copy the incoming
415 * data into its request buffer.
416 *
417 * This is called in interrupt context, so just copy the incoming
418 * data into the buffer and do remaining handling via a work queue.
419 *
Alex Elder2eb585f2014-10-16 06:35:34 -0500420 */
Alex Elderd90c25b2014-10-16 06:35:33 -0500421void gb_connection_operation_recv(struct gb_connection *connection,
422 void *data, size_t size)
423{
424 struct gb_operation_msg_hdr *header;
425 struct gb_operation *operation;
426 struct gbuf *gbuf;
427 u16 msg_size;
428
Alex Elder36561f22014-10-22 02:04:30 -0500429 if (connection->state != GB_CONNECTION_STATE_ENABLED)
430 return;
431
Alex Elder78496db2014-11-17 08:08:39 -0600432 if (size < sizeof(*header)) {
433 gb_connection_err(connection, "message too small");
Alex Elderd90c25b2014-10-16 06:35:33 -0500434 return;
435 }
436
437 header = data;
438 msg_size = le16_to_cpu(header->size);
439 if (header->type & GB_OPERATION_TYPE_RESPONSE) {
440 u16 id = le16_to_cpu(header->id);
441
Alex Elderb8616da2014-11-12 15:17:53 -0600442 operation = gb_pending_operation_find(connection, id);
Alex Elderd90c25b2014-10-16 06:35:33 -0500443 if (!operation) {
444 gb_connection_err(connection, "operation not found");
Alex Elder2eb585f2014-10-16 06:35:34 -0500445 return;
Alex Elderd90c25b2014-10-16 06:35:33 -0500446 }
Alex Elder19363a22014-11-17 08:08:36 -0600447 cancel_delayed_work(&operation->timeout_work);
Alex Elderb8616da2014-11-12 15:17:53 -0600448 gb_pending_operation_remove(operation);
Alex Elderbc46fab2014-11-17 18:08:35 -0600449 gbuf = &operation->response.gbuf;
Alex Elderd90c25b2014-10-16 06:35:33 -0500450 if (size > gbuf->transfer_buffer_length) {
Alex Elder78496db2014-11-17 08:08:39 -0600451 operation->result = GB_OP_OVERFLOW;
Alex Elderd90c25b2014-10-16 06:35:33 -0500452 gb_connection_err(connection, "recv buffer too small");
453 return;
454 }
Alex Elder78496db2014-11-17 08:08:39 -0600455 operation->result = GB_OP_SUCCESS;
Alex Elderd90c25b2014-10-16 06:35:33 -0500456 } else {
457 WARN_ON(msg_size != size);
458 operation = gb_operation_create(connection, header->type,
459 msg_size, 0);
460 if (!operation) {
461 gb_connection_err(connection, "can't create operation");
462 return;
463 }
Alex Elderbc46fab2014-11-17 18:08:35 -0600464 gbuf = &operation->request.gbuf;
Alex Elderd90c25b2014-10-16 06:35:33 -0500465 }
466
467 memcpy(gbuf->transfer_buffer, data, msg_size);
Alex Elderd90c25b2014-10-16 06:35:33 -0500468
Alex Elder2eb585f2014-10-16 06:35:34 -0500469 /* The rest will be handled in work queue context */
470 queue_work(gb_operation_recv_workqueue, &operation->recv_work);
471}
472
Alex Eldere1158df2014-10-22 02:04:29 -0500473/*
474 * Cancel an operation.
475 */
476void gb_operation_cancel(struct gb_operation *operation)
477{
Alex Eldere1158df2014-10-22 02:04:29 -0500478 operation->canceled = true;
Alex Elderbc46fab2014-11-17 18:08:35 -0600479 greybus_kill_gbuf(&operation->request.gbuf);
480 if (operation->response.gbuf.transfer_buffer)
481 greybus_kill_gbuf(&operation->response.gbuf);
Alex Eldere1158df2014-10-22 02:04:29 -0500482}
483
Alex Elder2eb585f2014-10-16 06:35:34 -0500484int gb_operation_init(void)
485{
Alex Elder5b3db0d2014-10-20 10:27:56 -0500486 gb_operation_cache = kmem_cache_create("gb_operation_cache",
487 sizeof(struct gb_operation), 0, 0, NULL);
488 if (!gb_operation_cache)
Alex Elder2eb585f2014-10-16 06:35:34 -0500489 return -ENOMEM;
490
Alex Elder5b3db0d2014-10-20 10:27:56 -0500491 gb_operation_recv_workqueue = alloc_workqueue("greybus_recv", 0, 1);
492 if (!gb_operation_recv_workqueue) {
493 kmem_cache_destroy(gb_operation_cache);
494 gb_operation_cache = NULL;
495 return -ENOMEM;
496 }
497
Alex Elder2eb585f2014-10-16 06:35:34 -0500498 return 0;
499}
500
501void gb_operation_exit(void)
502{
503 destroy_workqueue(gb_operation_recv_workqueue);
Alex Elder5b3db0d2014-10-20 10:27:56 -0500504 gb_operation_recv_workqueue = NULL;
Viresh Kumar837b3b72014-11-14 17:25:00 +0530505 kmem_cache_destroy(gb_operation_cache);
506 gb_operation_cache = NULL;
Alex Elderd90c25b2014-10-16 06:35:33 -0500507}