blob: bd79f81da0206d431233a80ced8762a9b56b09f2 [file] [log] [blame]
Alex Eldere88afa52014-10-01 21:54:15 -05001/*
2 * Greybus operations
3 *
Alex Elderd3d2bea2015-03-26 21:25:01 -05004 * Copyright 2014-2015 Google Inc.
5 * Copyright 2014-2015 Linaro Ltd.
Alex Eldere88afa52014-10-01 21:54:15 -05006 *
7 * Released under the GPLv2 only.
8 */
9
10#include <linux/kernel.h>
11#include <linux/slab.h>
12#include <linux/module.h>
Johan Hovoldfd7134a2015-07-14 15:43:26 +020013#include <linux/sched.h>
14#include <linux/wait.h>
Alex Eldere88afa52014-10-01 21:54:15 -050015#include <linux/workqueue.h>
16
17#include "greybus.h"
Bryan O'Donoghue5c8ad592015-09-18 16:38:45 +010018#include "greybus_trace.h"
Alex Eldere88afa52014-10-01 21:54:15 -050019
Alex Elder5b3db0d2014-10-20 10:27:56 -050020static struct kmem_cache *gb_operation_cache;
Johan Hovold1e5613b2015-04-07 11:27:17 +020021static struct kmem_cache *gb_message_cache;
Alex Elder5b3db0d2014-10-20 10:27:56 -050022
Johan Hovold701615f2015-07-23 10:50:03 +020023/* Workqueue to handle Greybus operation completions. */
24static struct workqueue_struct *gb_operation_completion_wq;
25
Johan Hovoldfd7134a2015-07-14 15:43:26 +020026/* Wait queue for synchronous cancellations. */
27static DECLARE_WAIT_QUEUE_HEAD(gb_operation_cancellation_queue);
28
Alex Elderd90c25b2014-10-16 06:35:33 -050029/*
Johan Hovold008974c2015-07-14 15:43:31 +020030 * Protects updates to operation->errno.
Alex Elder82b5e3f2014-12-03 12:27:46 -060031 */
Alex Eldere88afa52014-10-01 21:54:15 -050032static DEFINE_SPINLOCK(gb_operations_lock);
33
Johan Hovoldabb722e2015-07-01 12:37:24 +020034static int gb_operation_response_send(struct gb_operation *operation,
35 int errno);
36
Johan Hovold008974c2015-07-14 15:43:31 +020037/*
38 * Increment operation active count and add to connection list unless the
39 * connection is going away.
40 *
41 * Caller holds operation reference.
42 */
43static int gb_operation_get_active(struct gb_operation *operation)
Johan Hovold3eeac7e2015-07-14 15:43:25 +020044{
Johan Hovold008974c2015-07-14 15:43:31 +020045 struct gb_connection *connection = operation->connection;
46 unsigned long flags;
47
48 spin_lock_irqsave(&connection->lock, flags);
49
Johan Hovold570dfa72016-01-19 12:51:07 +010050 if (connection->state != GB_CONNECTION_STATE_ENABLED &&
51 connection->state != GB_CONNECTION_STATE_ENABLED_TX &&
52 !gb_operation_is_incoming(operation)) {
Johan Hovold008974c2015-07-14 15:43:31 +020053 spin_unlock_irqrestore(&connection->lock, flags);
54 return -ENOTCONN;
55 }
56
57 if (operation->active++ == 0)
58 list_add_tail(&operation->links, &connection->operations);
59
60 spin_unlock_irqrestore(&connection->lock, flags);
61
62 return 0;
Johan Hovold3eeac7e2015-07-14 15:43:25 +020063}
64
65/* Caller holds operation reference. */
Johan Hovold008974c2015-07-14 15:43:31 +020066static void gb_operation_put_active(struct gb_operation *operation)
Johan Hovold3eeac7e2015-07-14 15:43:25 +020067{
Johan Hovold008974c2015-07-14 15:43:31 +020068 struct gb_connection *connection = operation->connection;
69 unsigned long flags;
70
71 spin_lock_irqsave(&connection->lock, flags);
72 if (--operation->active == 0) {
73 list_del(&operation->links);
Johan Hovoldfd7134a2015-07-14 15:43:26 +020074 if (atomic_read(&operation->waiters))
75 wake_up(&gb_operation_cancellation_queue);
76 }
Johan Hovold008974c2015-07-14 15:43:31 +020077 spin_unlock_irqrestore(&connection->lock, flags);
Johan Hovoldfd7134a2015-07-14 15:43:26 +020078}
79
Johan Hovold008974c2015-07-14 15:43:31 +020080static bool gb_operation_is_active(struct gb_operation *operation)
Johan Hovoldfd7134a2015-07-14 15:43:26 +020081{
Johan Hovold008974c2015-07-14 15:43:31 +020082 struct gb_connection *connection = operation->connection;
83 unsigned long flags;
84 bool ret;
85
86 spin_lock_irqsave(&connection->lock, flags);
87 ret = operation->active;
88 spin_unlock_irqrestore(&connection->lock, flags);
89
90 return ret;
Johan Hovold3eeac7e2015-07-14 15:43:25 +020091}
92
Alex Elder3deb37d2014-11-25 11:33:15 -060093/*
Alex Elder2fb2d2a2014-12-01 07:53:08 -060094 * Set an operation's result.
95 *
96 * Initially an outgoing operation's errno value is -EBADR.
97 * If no error occurs before sending the request message the only
98 * valid value operation->errno can be set to is -EINPROGRESS,
99 * indicating the request has been (or rather is about to be) sent.
100 * At that point nobody should be looking at the result until the
Johan Hovoldd5062832015-03-27 12:41:11 +0100101 * response arrives.
Alex Elder3deb37d2014-11-25 11:33:15 -0600102 *
103 * The first time the result gets set after the request has been
104 * sent, that result "sticks." That is, if two concurrent threads
105 * race to set the result, the first one wins. The return value
106 * tells the caller whether its result was recorded; if not the
Alex Elder2fb2d2a2014-12-01 07:53:08 -0600107 * caller has nothing more to do.
108 *
109 * The result value -EILSEQ is reserved to signal an implementation
110 * error; if it's ever observed, the code performing the request has
111 * done something fundamentally wrong. It is an error to try to set
112 * the result to -EBADR, and attempts to do so result in a warning,
113 * and -EILSEQ is used instead. Similarly, the only valid result
114 * value to set for an operation in initial state is -EINPROGRESS.
115 * Attempts to do otherwise will also record a (successful) -EILSEQ
116 * operation result.
Alex Elder3deb37d2014-11-25 11:33:15 -0600117 */
Alex Elderabe9a302014-11-25 11:33:14 -0600118static bool gb_operation_result_set(struct gb_operation *operation, int result)
Alex Elderba986b52014-11-25 11:33:13 -0600119{
Johan Hovold184ab532015-03-02 12:34:40 +0100120 unsigned long flags;
Alex Elder894cbc32014-11-25 16:54:02 -0600121 int prev;
122
Alex Elder3deb37d2014-11-25 11:33:15 -0600123 if (result == -EINPROGRESS) {
Alex Elder2fb2d2a2014-12-01 07:53:08 -0600124 /*
125 * -EINPROGRESS is used to indicate the request is
126 * in flight. It should be the first result value
127 * set after the initial -EBADR. Issue a warning
128 * and record an implementation error if it's
129 * set at any other time.
130 */
Johan Hovold184ab532015-03-02 12:34:40 +0100131 spin_lock_irqsave(&gb_operations_lock, flags);
Alex Elder894cbc32014-11-25 16:54:02 -0600132 prev = operation->errno;
133 if (prev == -EBADR)
134 operation->errno = result;
Alex Elder2fb2d2a2014-12-01 07:53:08 -0600135 else
136 operation->errno = -EILSEQ;
Johan Hovold184ab532015-03-02 12:34:40 +0100137 spin_unlock_irqrestore(&gb_operations_lock, flags);
Alex Elder2fb2d2a2014-12-01 07:53:08 -0600138 WARN_ON(prev != -EBADR);
Alex Elder3deb37d2014-11-25 11:33:15 -0600139
Alex Elder2fb2d2a2014-12-01 07:53:08 -0600140 return true;
Alex Elder894cbc32014-11-25 16:54:02 -0600141 }
142
Alex Elder2fb2d2a2014-12-01 07:53:08 -0600143 /*
144 * The first result value set after a request has been sent
145 * will be the final result of the operation. Subsequent
146 * attempts to set the result are ignored.
147 *
148 * Note that -EBADR is a reserved "initial state" result
149 * value. Attempts to set this value result in a warning,
150 * and the result code is set to -EILSEQ instead.
151 */
152 if (WARN_ON(result == -EBADR))
153 result = -EILSEQ; /* Nobody should be setting -EBADR */
154
Johan Hovold184ab532015-03-02 12:34:40 +0100155 spin_lock_irqsave(&gb_operations_lock, flags);
Alex Elder894cbc32014-11-25 16:54:02 -0600156 prev = operation->errno;
157 if (prev == -EINPROGRESS)
Alex Elder2fb2d2a2014-12-01 07:53:08 -0600158 operation->errno = result; /* First and final result */
Johan Hovold184ab532015-03-02 12:34:40 +0100159 spin_unlock_irqrestore(&gb_operations_lock, flags);
Alex Elder894cbc32014-11-25 16:54:02 -0600160
161 return prev == -EINPROGRESS;
Alex Elderba986b52014-11-25 11:33:13 -0600162}
163
164int gb_operation_result(struct gb_operation *operation)
165{
Alex Elder3deb37d2014-11-25 11:33:15 -0600166 int result = operation->errno;
167
Alex Elder2fb2d2a2014-12-01 07:53:08 -0600168 WARN_ON(result == -EBADR);
Alex Elder3deb37d2014-11-25 11:33:15 -0600169 WARN_ON(result == -EINPROGRESS);
170
171 return result;
Alex Elderba986b52014-11-25 11:33:13 -0600172}
Johan Hovold1dad6b32015-03-27 12:41:10 +0100173EXPORT_SYMBOL_GPL(gb_operation_result);
Alex Elderba986b52014-11-25 11:33:13 -0600174
Johan Hovold0581f28e2015-07-09 15:17:59 +0200175/*
Johan Hovold048a7ff2015-07-09 15:18:00 +0200176 * Looks up an outgoing operation on a connection and returns a refcounted
177 * pointer if found, or NULL otherwise.
Johan Hovold0581f28e2015-07-09 15:17:59 +0200178 */
Alex Elder84d148b2014-10-16 06:35:32 -0500179static struct gb_operation *
Johan Hovold048a7ff2015-07-09 15:18:00 +0200180gb_operation_find_outgoing(struct gb_connection *connection, u16 operation_id)
Alex Elder84d148b2014-10-16 06:35:32 -0500181{
Alex Elderb8616da2014-11-12 15:17:53 -0600182 struct gb_operation *operation;
Johan Hovold184ab532015-03-02 12:34:40 +0100183 unsigned long flags;
Alex Elder84d148b2014-10-16 06:35:32 -0500184 bool found = false;
185
Johan Hovold008974c2015-07-14 15:43:31 +0200186 spin_lock_irqsave(&connection->lock, flags);
Alex Elderafb2e132014-12-03 08:35:07 -0600187 list_for_each_entry(operation, &connection->operations, links)
Johan Hovold048a7ff2015-07-09 15:18:00 +0200188 if (operation->id == operation_id &&
189 !gb_operation_is_incoming(operation)) {
Johan Hovold0581f28e2015-07-09 15:17:59 +0200190 gb_operation_get(operation);
Alex Elder84d148b2014-10-16 06:35:32 -0500191 found = true;
Alex Elderb8616da2014-11-12 15:17:53 -0600192 break;
193 }
Johan Hovold008974c2015-07-14 15:43:31 +0200194 spin_unlock_irqrestore(&connection->lock, flags);
Alex Elder84d148b2014-10-16 06:35:32 -0500195
196 return found ? operation : NULL;
197}
198
Johan Hovolda52c4352015-07-01 12:37:23 +0200199static int gb_message_send(struct gb_message *message, gfp_t gfp)
Alex Elder374e6a22014-11-17 18:08:37 -0600200{
Alex Elder3ed67ab2014-11-18 13:26:52 -0600201 struct gb_connection *connection = message->operation->connection;
Alex Elder374e6a22014-11-17 18:08:37 -0600202
Bryan O'Donoghue5c8ad592015-09-18 16:38:45 +0100203 trace_gb_message_send(message);
Johan Hovold3e136cc2015-07-01 12:37:21 +0200204 return connection->hd->driver->message_send(connection->hd,
Alex Elder0a9c4d72014-12-11 16:48:38 -0600205 connection->hd_cport_id,
Johan Hovold7cf7bca2015-04-07 11:27:16 +0200206 message,
Johan Hovoldb84abdc2015-07-17 18:50:26 +0200207 gfp);
Alex Elder374e6a22014-11-17 18:08:37 -0600208}
209
Alex Elder60147182014-11-19 12:27:15 -0600210/*
Johan Hovold7cf7bca2015-04-07 11:27:16 +0200211 * Cancel a message we have passed to the host device layer to be sent.
Alex Elder60147182014-11-19 12:27:15 -0600212 */
Alex Elder35b13422014-11-18 13:26:49 -0600213static void gb_message_cancel(struct gb_message *message)
Alex Elder374e6a22014-11-17 18:08:37 -0600214{
Johan Hovold25376362015-11-03 18:03:23 +0100215 struct gb_host_device *hd = message->operation->connection->hd;
Alex Elder3ed67ab2014-11-18 13:26:52 -0600216
Johan Hovold3e136cc2015-07-01 12:37:21 +0200217 hd->driver->message_cancel(message);
Alex Elder374e6a22014-11-17 18:08:37 -0600218}
Alex Eldera9163b22014-11-18 13:26:44 -0600219
Alex Elder2eb585f2014-10-16 06:35:34 -0500220static void gb_operation_request_handle(struct gb_operation *operation)
221{
Johan Hovold25cdd7a2015-11-25 15:59:16 +0100222 struct gb_connection *connection = operation->connection;
Johan Hovold973ccfd2015-03-27 12:45:49 +0100223 int status;
Johan Hovoldff65be72015-03-27 12:41:15 +0100224 int ret;
Alex Elderc3cf2782014-11-12 15:17:55 -0600225
Johan Hovoldbfa9a5e2016-01-19 12:51:02 +0100226 if (connection->handler) {
227 status = connection->handler(operation);
Johan Hovold973ccfd2015-03-27 12:45:49 +0100228 } else {
Johan Hovold25cdd7a2015-11-25 15:59:16 +0100229 dev_err(&connection->hd->dev,
Viresh Kumar2f3db922015-12-04 21:30:09 +0530230 "%s: unexpected incoming request of type 0x%02x\n",
Johan Hovold25cdd7a2015-11-25 15:59:16 +0100231 connection->name, operation->type);
Johan Hovold973ccfd2015-03-27 12:45:49 +0100232
233 status = -EPROTONOSUPPORT;
Alex Elder2eb585f2014-10-16 06:35:34 -0500234 }
235
Johan Hovold973ccfd2015-03-27 12:45:49 +0100236 ret = gb_operation_response_send(operation, status);
Johan Hovoldff65be72015-03-27 12:41:15 +0100237 if (ret) {
Johan Hovold25cdd7a2015-11-25 15:59:16 +0100238 dev_err(&connection->hd->dev,
Viresh Kumar2f3db922015-12-04 21:30:09 +0530239 "%s: failed to send response %d for type 0x%02x: %d\n",
Johan Hovold25cdd7a2015-11-25 15:59:16 +0100240 connection->name, status, operation->type, ret);
Johan Hovoldff65be72015-03-27 12:41:15 +0100241 return;
242 }
Alex Elder2eb585f2014-10-16 06:35:34 -0500243}
244
Alex Eldere88afa52014-10-01 21:54:15 -0500245/*
Johan Hovoldc600e532015-07-14 15:43:28 +0200246 * Process operation work.
247 *
248 * For incoming requests, call the protocol request handler. The operation
249 * result should be -EINPROGRESS at this point.
Alex Elderd4a1ff62014-12-02 08:30:37 -0600250 *
251 * For outgoing requests, the operation result value should have
252 * been set before queueing this. The operation callback function
253 * allows the original requester to know the request has completed
254 * and its result is available.
Alex Elder2eb585f2014-10-16 06:35:34 -0500255 */
Alex Elderee637a92014-11-21 19:29:13 -0600256static void gb_operation_work(struct work_struct *work)
Alex Elder2eb585f2014-10-16 06:35:34 -0500257{
258 struct gb_operation *operation;
Alex Elder2eb585f2014-10-16 06:35:34 -0500259
Alex Elderee637a92014-11-21 19:29:13 -0600260 operation = container_of(work, struct gb_operation, work);
Johan Hovold37754032015-03-27 12:41:12 +0100261
Johan Hovoldc600e532015-07-14 15:43:28 +0200262 if (gb_operation_is_incoming(operation))
263 gb_operation_request_handle(operation);
264 else
265 operation->callback(operation);
Johan Hovold37754032015-03-27 12:41:12 +0100266
Johan Hovold3eeac7e2015-07-14 15:43:25 +0200267 gb_operation_put_active(operation);
Alex Elder10c69392014-11-21 19:29:20 -0600268 gb_operation_put(operation);
Alex Elder2eb585f2014-10-16 06:35:34 -0500269}
270
Johan Hovold25376362015-11-03 18:03:23 +0100271static void gb_operation_message_init(struct gb_host_device *hd,
Alex Elderdc779222014-12-02 08:30:33 -0600272 struct gb_message *message, u16 operation_id,
Alex Elder7cfa6992014-12-03 12:27:44 -0600273 size_t payload_size, u8 type)
Alex Eldere88afa52014-10-01 21:54:15 -0500274{
Alex Eldere88afa52014-10-01 21:54:15 -0500275 struct gb_operation_msg_hdr *header;
Alex Eldere88afa52014-10-01 21:54:15 -0500276
Johan Hovold24ef4852015-04-07 11:27:21 +0200277 header = message->buffer;
Alex Elderbc46fab2014-11-17 18:08:35 -0600278
Alex Elderc08b1dd2014-11-20 16:09:15 -0600279 message->header = header;
Alex Elder746e0ef2014-12-03 12:27:45 -0600280 message->payload = payload_size ? header + 1 : NULL;
Alex Elder7cfa6992014-12-03 12:27:44 -0600281 message->payload_size = payload_size;
Alex Elderc7f82d52014-11-17 18:08:32 -0600282
Alex Elderea64cd92014-12-02 08:30:32 -0600283 /*
284 * The type supplied for incoming message buffers will be
285 * 0x00. Such buffers will be overwritten by arriving data
286 * so there's no need to initialize the message header.
287 */
Alex Elderc939c2f2014-12-02 17:25:11 -0600288 if (type != GB_OPERATION_TYPE_INVALID) {
Alex Elder7cfa6992014-12-03 12:27:44 -0600289 u16 message_size = (u16)(sizeof(*header) + payload_size);
290
Alex Elderea64cd92014-12-02 08:30:32 -0600291 /*
292 * For a request, the operation id gets filled in
293 * when the message is sent. For a response, it
294 * will be copied from the request by the caller.
295 *
296 * The result field in a request message must be
297 * zero. It will be set just prior to sending for
298 * a response.
299 */
300 header->size = cpu_to_le16(message_size);
301 header->operation_id = 0;
302 header->type = type;
303 header->result = 0;
304 }
Alex Elderdc779222014-12-02 08:30:33 -0600305}
306
307/*
308 * Allocate a message to be used for an operation request or response.
309 * Both types of message contain a common header. The request message
310 * for an outgoing operation is outbound, as is the response message
311 * for an incoming operation. The message header for an outbound
312 * message is partially initialized here.
313 *
314 * The headers for inbound messages don't need to be initialized;
315 * they'll be filled in by arriving data.
316 *
Johan Hovold1e5613b2015-04-07 11:27:17 +0200317 * Our message buffers have the following layout:
Alex Elderdc779222014-12-02 08:30:33 -0600318 * message header \_ these combined are
319 * message payload / the message size
320 */
321static struct gb_message *
Johan Hovold25376362015-11-03 18:03:23 +0100322gb_operation_message_alloc(struct gb_host_device *hd, u8 type,
Alex Elderdc779222014-12-02 08:30:33 -0600323 size_t payload_size, gfp_t gfp_flags)
324{
325 struct gb_message *message;
326 struct gb_operation_msg_hdr *header;
327 size_t message_size = payload_size + sizeof(*header);
Alex Elderdc779222014-12-02 08:30:33 -0600328
Johan Hovold1e5613b2015-04-07 11:27:17 +0200329 if (message_size > hd->buffer_size_max) {
Johan Hovoldb4275722016-02-11 13:52:47 +0100330 dev_warn(&hd->dev, "requested message size too big (%zu > %zu)\n",
Alex Elder0cffcac2014-12-02 08:30:35 -0600331 message_size, hd->buffer_size_max);
Johan Hovold1e5613b2015-04-07 11:27:17 +0200332 return NULL;
Alex Elder0cffcac2014-12-02 08:30:35 -0600333 }
Johan Hovold1e5613b2015-04-07 11:27:17 +0200334
335 /* Allocate the message structure and buffer. */
336 message = kmem_cache_zalloc(gb_message_cache, gfp_flags);
Alex Elderdc779222014-12-02 08:30:33 -0600337 if (!message)
338 return NULL;
339
Johan Hovold24ef4852015-04-07 11:27:21 +0200340 message->buffer = kzalloc(message_size, gfp_flags);
Johan Hovold1e5613b2015-04-07 11:27:17 +0200341 if (!message->buffer)
342 goto err_free_message;
343
Alex Elderdc779222014-12-02 08:30:33 -0600344 /* Initialize the message. Operation id is filled in later. */
Alex Elder7cfa6992014-12-03 12:27:44 -0600345 gb_operation_message_init(hd, message, 0, payload_size, type);
Alex Elderea64cd92014-12-02 08:30:32 -0600346
Alex Elderc08b1dd2014-11-20 16:09:15 -0600347 return message;
Johan Hovold1e5613b2015-04-07 11:27:17 +0200348
349err_free_message:
350 kmem_cache_free(gb_message_cache, message);
351
352 return NULL;
Alex Elderc7f82d52014-11-17 18:08:32 -0600353}
354
Alex Elderc08b1dd2014-11-20 16:09:15 -0600355static void gb_operation_message_free(struct gb_message *message)
Alex Elderc7f82d52014-11-17 18:08:32 -0600356{
Johan Hovold1e5613b2015-04-07 11:27:17 +0200357 kfree(message->buffer);
358 kmem_cache_free(gb_message_cache, message);
Alex Elder22b320f2014-10-16 06:35:31 -0500359}
360
361/*
Viresh Kumar696e0cc2014-11-21 11:26:30 +0530362 * Map an enum gb_operation_status value (which is represented in a
363 * message as a single byte) to an appropriate Linux negative errno.
Alex Elderbc717fc2014-11-19 17:55:04 -0600364 */
Alex Elder0c90fff2014-12-02 08:30:38 -0600365static int gb_operation_status_map(u8 status)
Alex Elderbc717fc2014-11-19 17:55:04 -0600366{
367 switch (status) {
368 case GB_OP_SUCCESS:
369 return 0;
Alex Elderbc717fc2014-11-19 17:55:04 -0600370 case GB_OP_INTERRUPTED:
371 return -EINTR;
Alex Elder57248fa2014-12-01 07:53:09 -0600372 case GB_OP_TIMEOUT:
373 return -ETIMEDOUT;
374 case GB_OP_NO_MEMORY:
375 return -ENOMEM;
Alex Elderbc717fc2014-11-19 17:55:04 -0600376 case GB_OP_PROTOCOL_BAD:
377 return -EPROTONOSUPPORT;
378 case GB_OP_OVERFLOW:
Alex Elder1a365152014-11-25 13:06:44 -0600379 return -EMSGSIZE;
Alex Elder57248fa2014-12-01 07:53:09 -0600380 case GB_OP_INVALID:
381 return -EINVAL;
382 case GB_OP_RETRY:
383 return -EAGAIN;
Alex Elderaa263512014-12-10 08:43:33 -0600384 case GB_OP_NONEXISTENT:
385 return -ENODEV;
Alex Elder57248fa2014-12-01 07:53:09 -0600386 case GB_OP_MALFUNCTION:
387 return -EILSEQ;
388 case GB_OP_UNKNOWN_ERROR:
Alex Elderbc717fc2014-11-19 17:55:04 -0600389 default:
390 return -EIO;
391 }
392}
393
394/*
Alex Elder0c90fff2014-12-02 08:30:38 -0600395 * Map a Linux errno value (from operation->errno) into the value
396 * that should represent it in a response message status sent
397 * over the wire. Returns an enum gb_operation_status value (which
398 * is represented in a message as a single byte).
399 */
400static u8 gb_operation_errno_map(int errno)
401{
402 switch (errno) {
403 case 0:
404 return GB_OP_SUCCESS;
405 case -EINTR:
406 return GB_OP_INTERRUPTED;
407 case -ETIMEDOUT:
408 return GB_OP_TIMEOUT;
409 case -ENOMEM:
410 return GB_OP_NO_MEMORY;
411 case -EPROTONOSUPPORT:
412 return GB_OP_PROTOCOL_BAD;
413 case -EMSGSIZE:
414 return GB_OP_OVERFLOW; /* Could be underflow too */
415 case -EINVAL:
416 return GB_OP_INVALID;
417 case -EAGAIN:
418 return GB_OP_RETRY;
419 case -EILSEQ:
420 return GB_OP_MALFUNCTION;
Alex Elderaa263512014-12-10 08:43:33 -0600421 case -ENODEV:
422 return GB_OP_NONEXISTENT;
Alex Elder0c90fff2014-12-02 08:30:38 -0600423 case -EIO:
424 default:
425 return GB_OP_UNKNOWN_ERROR;
426 }
427}
428
Alex Elder82e26f72014-12-02 08:30:39 -0600429bool gb_operation_response_alloc(struct gb_operation *operation,
Johan Hovold1c7658c2015-07-17 18:50:25 +0200430 size_t response_size, gfp_t gfp)
Alex Elder82e26f72014-12-02 08:30:39 -0600431{
Johan Hovold25376362015-11-03 18:03:23 +0100432 struct gb_host_device *hd = operation->connection->hd;
Alex Elder82e26f72014-12-02 08:30:39 -0600433 struct gb_operation_msg_hdr *request_header;
434 struct gb_message *response;
435 u8 type;
436
Alex Elder6d653372015-05-07 13:03:52 -0500437 type = operation->type | GB_MESSAGE_TYPE_RESPONSE;
Johan Hovold1c7658c2015-07-17 18:50:25 +0200438 response = gb_operation_message_alloc(hd, type, response_size, gfp);
Alex Elder82e26f72014-12-02 08:30:39 -0600439 if (!response)
440 return false;
441 response->operation = operation;
442
443 /*
444 * Size and type get initialized when the message is
445 * allocated. The errno will be set before sending. All
446 * that's left is the operation id, which we copy from the
447 * request message header (as-is, in little-endian order).
448 */
Alex Elder82b5e3f2014-12-03 12:27:46 -0600449 request_header = operation->request->header;
Alex Elder82e26f72014-12-02 08:30:39 -0600450 response->header->operation_id = request_header->operation_id;
451 operation->response = response;
452
453 return true;
454}
Johan Hovold1dad6b32015-03-27 12:41:10 +0100455EXPORT_SYMBOL_GPL(gb_operation_response_alloc);
Alex Elder82e26f72014-12-02 08:30:39 -0600456
Alex Elder0c90fff2014-12-02 08:30:38 -0600457/*
Alex Elder22b320f2014-10-16 06:35:31 -0500458 * Create a Greybus operation to be sent over the given connection.
Viresh Kumar696e0cc2014-11-21 11:26:30 +0530459 * The request buffer will be big enough for a payload of the given
Alex Elderea64cd92014-12-02 08:30:32 -0600460 * size.
Alex Elder22b320f2014-10-16 06:35:31 -0500461 *
Alex Elderea64cd92014-12-02 08:30:32 -0600462 * For outgoing requests, the request message's header will be
463 * initialized with the type of the request and the message size.
464 * Outgoing operations must also specify the response buffer size,
465 * which must be sufficient to hold all expected response data. The
466 * response message header will eventually be overwritten, so there's
467 * no need to initialize it here.
468 *
469 * Request messages for incoming operations can arrive in interrupt
470 * context, so they must be allocated with GFP_ATOMIC. In this case
471 * the request buffer will be immediately overwritten, so there is
472 * no need to initialize the message header. Responsibility for
473 * allocating a response buffer lies with the incoming request
474 * handler for a protocol. So we don't allocate that here.
Alex Elder22b320f2014-10-16 06:35:31 -0500475 *
476 * Returns a pointer to the new operation or a null pointer if an
477 * error occurs.
478 */
Alex Elder30a29642014-11-19 17:55:02 -0600479static struct gb_operation *
Alex Elderea64cd92014-12-02 08:30:32 -0600480gb_operation_create_common(struct gb_connection *connection, u8 type,
Johan Hovolde4207212015-07-01 12:37:22 +0200481 size_t request_size, size_t response_size,
Johan Hovold710067e2015-07-01 12:37:26 +0200482 unsigned long op_flags, gfp_t gfp_flags)
Alex Elder22b320f2014-10-16 06:35:31 -0500483{
Johan Hovold25376362015-11-03 18:03:23 +0100484 struct gb_host_device *hd = connection->hd;
Alex Elder22b320f2014-10-16 06:35:31 -0500485 struct gb_operation *operation;
Alex Elder22b320f2014-10-16 06:35:31 -0500486
Alex Elder5b3db0d2014-10-20 10:27:56 -0500487 operation = kmem_cache_zalloc(gb_operation_cache, gfp_flags);
Alex Elder22b320f2014-10-16 06:35:31 -0500488 if (!operation)
489 return NULL;
Greg Kroah-Hartman6507cce2014-10-27 17:58:54 +0800490 operation->connection = connection;
Alex Elder22b320f2014-10-16 06:35:31 -0500491
Alex Elderc08b1dd2014-11-20 16:09:15 -0600492 operation->request = gb_operation_message_alloc(hd, type, request_size,
493 gfp_flags);
494 if (!operation->request)
Alex Elder5b3db0d2014-10-20 10:27:56 -0500495 goto err_cache;
Alex Elderc08b1dd2014-11-20 16:09:15 -0600496 operation->request->operation = operation;
Alex Elder22b320f2014-10-16 06:35:31 -0500497
Alex Elderea64cd92014-12-02 08:30:32 -0600498 /* Allocate the response buffer for outgoing operations */
Johan Hovold710067e2015-07-01 12:37:26 +0200499 if (!(op_flags & GB_OPERATION_FLAG_INCOMING)) {
Johan Hovold1c7658c2015-07-17 18:50:25 +0200500 if (!gb_operation_response_alloc(operation, response_size,
501 gfp_flags)) {
Alex Elder5b3db0d2014-10-20 10:27:56 -0500502 goto err_request;
Johan Hovold1c7658c2015-07-17 18:50:25 +0200503 }
Alex Elder82b5e3f2014-12-03 12:27:46 -0600504 }
Johan Hovold710067e2015-07-01 12:37:26 +0200505
506 operation->flags = op_flags;
507 operation->type = type;
Alex Elder3deb37d2014-11-25 11:33:15 -0600508 operation->errno = -EBADR; /* Initial value--means "never set" */
Alex Eldere88afa52014-10-01 21:54:15 -0500509
Alex Elderee637a92014-11-21 19:29:13 -0600510 INIT_WORK(&operation->work, gb_operation_work);
Alex Eldere88afa52014-10-01 21:54:15 -0500511 init_completion(&operation->completion);
Alex Elderc7d0f252014-11-17 08:08:40 -0600512 kref_init(&operation->kref);
Johan Hovoldfd7134a2015-07-14 15:43:26 +0200513 atomic_set(&operation->waiters, 0);
Alex Eldere88afa52014-10-01 21:54:15 -0500514
Alex Eldere88afa52014-10-01 21:54:15 -0500515 return operation;
Alex Elder5b3db0d2014-10-20 10:27:56 -0500516
517err_request:
Alex Elderc08b1dd2014-11-20 16:09:15 -0600518 gb_operation_message_free(operation->request);
Alex Elder5b3db0d2014-10-20 10:27:56 -0500519err_cache:
520 kmem_cache_free(gb_operation_cache, operation);
521
522 return NULL;
Alex Eldere88afa52014-10-01 21:54:15 -0500523}
524
Alex Elder55f66a82014-12-02 08:30:31 -0600525/*
526 * Create a new operation associated with the given connection. The
527 * request and response sizes provided are the number of bytes
528 * required to hold the request/response payload only. Both of
529 * these are allowed to be 0. Note that 0x00 is reserved as an
530 * invalid operation type for all protocols, and this is enforced
531 * here.
532 */
Alex Elder30a29642014-11-19 17:55:02 -0600533struct gb_operation *gb_operation_create(struct gb_connection *connection,
534 u8 type, size_t request_size,
Johan Hovolde4207212015-07-01 12:37:22 +0200535 size_t response_size,
536 gfp_t gfp)
Alex Elder30a29642014-11-19 17:55:02 -0600537{
Alex Elderc939c2f2014-12-02 17:25:11 -0600538 if (WARN_ON_ONCE(type == GB_OPERATION_TYPE_INVALID))
Alex Elder55f66a82014-12-02 08:30:31 -0600539 return NULL;
Alex Elder6d653372015-05-07 13:03:52 -0500540 if (WARN_ON_ONCE(type & GB_MESSAGE_TYPE_RESPONSE))
541 type &= ~GB_MESSAGE_TYPE_RESPONSE;
Alex Elder55f66a82014-12-02 08:30:31 -0600542
Alex Elderea64cd92014-12-02 08:30:32 -0600543 return gb_operation_create_common(connection, type,
Johan Hovold710067e2015-07-01 12:37:26 +0200544 request_size, response_size, 0, gfp);
Alex Elder30a29642014-11-19 17:55:02 -0600545}
Greg Kroah-Hartmandf469a92014-12-23 15:16:52 -0800546EXPORT_SYMBOL_GPL(gb_operation_create);
Alex Elder30a29642014-11-19 17:55:02 -0600547
Johan Hovoldd52b35f2015-05-19 11:22:46 +0200548size_t gb_operation_get_payload_size_max(struct gb_connection *connection)
549{
Johan Hovold25376362015-11-03 18:03:23 +0100550 struct gb_host_device *hd = connection->hd;
Johan Hovoldd52b35f2015-05-19 11:22:46 +0200551
552 return hd->buffer_size_max - sizeof(struct gb_operation_msg_hdr);
553}
554EXPORT_SYMBOL_GPL(gb_operation_get_payload_size_max);
555
Alex Elder30a29642014-11-19 17:55:02 -0600556static struct gb_operation *
Alex Elderea64cd92014-12-02 08:30:32 -0600557gb_operation_create_incoming(struct gb_connection *connection, u16 id,
Johan Hovoldcfa79692015-03-27 12:41:18 +0100558 u8 type, void *data, size_t size)
Alex Elder30a29642014-11-19 17:55:02 -0600559{
Alex Elder34db1f92014-12-02 08:30:28 -0600560 struct gb_operation *operation;
Johan Hovoldcfa79692015-03-27 12:41:18 +0100561 size_t request_size;
Johan Hovold710067e2015-07-01 12:37:26 +0200562 unsigned long flags = GB_OPERATION_FLAG_INCOMING;
Johan Hovoldcfa79692015-03-27 12:41:18 +0100563
564 /* Caller has made sure we at least have a message header. */
565 request_size = size - sizeof(struct gb_operation_msg_hdr);
Alex Elder34db1f92014-12-02 08:30:28 -0600566
Johan Hovolde3398812015-07-01 12:37:27 +0200567 if (!id)
568 flags |= GB_OPERATION_FLAG_UNIDIRECTIONAL;
569
Johan Hovold710067e2015-07-01 12:37:26 +0200570 operation = gb_operation_create_common(connection, type,
571 request_size, 0, flags, GFP_ATOMIC);
Johan Hovold9a586bd2015-07-14 15:43:24 +0200572 if (!operation)
573 return NULL;
574
575 operation->id = id;
576 memcpy(operation->request->header, data, size);
Alex Elder34db1f92014-12-02 08:30:28 -0600577
578 return operation;
Alex Elder30a29642014-11-19 17:55:02 -0600579}
580
Alex Eldere88afa52014-10-01 21:54:15 -0500581/*
Alex Elderdeb4b9e2014-11-21 19:29:15 -0600582 * Get an additional reference on an operation.
583 */
584void gb_operation_get(struct gb_operation *operation)
585{
586 kref_get(&operation->kref);
587}
Johan Hovold1dad6b32015-03-27 12:41:10 +0100588EXPORT_SYMBOL_GPL(gb_operation_get);
Alex Elderdeb4b9e2014-11-21 19:29:15 -0600589
590/*
Alex Eldere88afa52014-10-01 21:54:15 -0500591 * Destroy a previously created operation.
592 */
Alex Elderc7d0f252014-11-17 08:08:40 -0600593static void _gb_operation_destroy(struct kref *kref)
Alex Eldere88afa52014-10-01 21:54:15 -0500594{
Alex Elderc7d0f252014-11-17 08:08:40 -0600595 struct gb_operation *operation;
596
597 operation = container_of(kref, struct gb_operation, kref);
Alex Eldere88afa52014-10-01 21:54:15 -0500598
Johan Hovold948966762015-03-27 12:41:17 +0100599 if (operation->response)
600 gb_operation_message_free(operation->response);
Alex Elderc08b1dd2014-11-20 16:09:15 -0600601 gb_operation_message_free(operation->request);
Alex Eldere88afa52014-10-01 21:54:15 -0500602
Alex Elder5b3db0d2014-10-20 10:27:56 -0500603 kmem_cache_free(gb_operation_cache, operation);
Alex Eldere88afa52014-10-01 21:54:15 -0500604}
Alex Elderd90c25b2014-10-16 06:35:33 -0500605
Alex Elderdeb4b9e2014-11-21 19:29:15 -0600606/*
607 * Drop a reference on an operation, and destroy it when the last
608 * one is gone.
609 */
Alex Elderc7d0f252014-11-17 08:08:40 -0600610void gb_operation_put(struct gb_operation *operation)
611{
Johan Hovold85109f72015-07-09 15:17:58 +0200612 if (WARN_ON(!operation))
613 return;
614
Johan Hovold008974c2015-07-14 15:43:31 +0200615 kref_put(&operation->kref, _gb_operation_destroy);
Alex Elderc7d0f252014-11-17 08:08:40 -0600616}
Greg Kroah-Hartmandf469a92014-12-23 15:16:52 -0800617EXPORT_SYMBOL_GPL(gb_operation_put);
Alex Elderc7d0f252014-11-17 08:08:40 -0600618
Alex Elder10c69392014-11-21 19:29:20 -0600619/* Tell the requester we're done */
620static void gb_operation_sync_callback(struct gb_operation *operation)
621{
622 complete(&operation->completion);
623}
624
Alex Elderd90c25b2014-10-16 06:35:33 -0500625/*
Johan Hovold37754032015-03-27 12:41:12 +0100626 * Send an operation request message. The caller has filled in any payload so
627 * the request message is ready to go. The callback function supplied will be
628 * called when the response message has arrived indicating the operation is
629 * complete. In that case, the callback function is responsible for fetching
630 * the result of the operation using gb_operation_result() if desired, and
631 * dropping the initial reference to the operation.
Alex Elderd90c25b2014-10-16 06:35:33 -0500632 */
633int gb_operation_request_send(struct gb_operation *operation,
Johan Hovolda52c4352015-07-01 12:37:23 +0200634 gb_operation_callback callback,
635 gfp_t gfp)
Alex Elderd90c25b2014-10-16 06:35:33 -0500636{
Alex Elderafb2e132014-12-03 08:35:07 -0600637 struct gb_connection *connection = operation->connection;
638 struct gb_operation_msg_hdr *header;
Alex Elder4afb7fd2014-12-03 08:35:08 -0600639 unsigned int cycle;
Johan Hovoldea2c2ee2015-03-27 12:41:14 +0100640 int ret;
Alex Elderd90c25b2014-10-16 06:35:33 -0500641
Johan Hovold37754032015-03-27 12:41:12 +0100642 if (!callback)
643 return -EINVAL;
Alex Elderc25572c2014-12-03 08:35:09 -0600644 /*
645 * Record the callback function, which is executed in
646 * non-atomic (workqueue) context when the final result
647 * of an operation has been set.
648 */
649 operation->callback = callback;
Alex Elderafb2e132014-12-03 08:35:07 -0600650
651 /*
652 * Assign the operation's id, and store it in the request header.
653 * Zero is a reserved operation id.
654 */
Alex Elder4afb7fd2014-12-03 08:35:08 -0600655 cycle = (unsigned int)atomic_inc_return(&connection->op_cycle);
656 operation->id = (u16)(cycle % U16_MAX + 1);
Alex Elderafb2e132014-12-03 08:35:07 -0600657 header = operation->request->header;
658 header->operation_id = cpu_to_le16(operation->id);
Alex Eldere8b48d12014-11-20 15:37:06 -0600659
Alex Elder3deb37d2014-11-25 11:33:15 -0600660 gb_operation_result_set(operation, -EINPROGRESS);
Alex Elderc25572c2014-12-03 08:35:09 -0600661
Johan Hovold3325a4a2015-07-14 15:43:33 +0200662 /*
663 * Get an extra reference on the operation. It'll be dropped when the
664 * operation completes.
665 */
666 gb_operation_get(operation);
667 ret = gb_operation_get_active(operation);
668 if (ret)
669 goto err_put;
670
Johan Hovolda52c4352015-07-01 12:37:23 +0200671 ret = gb_message_send(operation->request, gfp);
Johan Hovold008974c2015-07-14 15:43:31 +0200672 if (ret)
673 goto err_put_active;
674
675 return 0;
676
677err_put_active:
678 gb_operation_put_active(operation);
679err_put:
680 gb_operation_put(operation);
Johan Hovoldea2c2ee2015-03-27 12:41:14 +0100681
682 return ret;
Alex Elderc25572c2014-12-03 08:35:09 -0600683}
Johan Hovold1dad6b32015-03-27 12:41:10 +0100684EXPORT_SYMBOL_GPL(gb_operation_request_send);
Alex Elderc25572c2014-12-03 08:35:09 -0600685
686/*
687 * Send a synchronous operation. This function is expected to
688 * block, returning only when the response has arrived, (or when an
689 * error is detected. The return value is the result of the
690 * operation.
691 */
Johan Hovold4f2c08a2015-07-14 15:43:36 +0200692int gb_operation_request_send_sync_timeout(struct gb_operation *operation,
693 unsigned int timeout)
Alex Elderc25572c2014-12-03 08:35:09 -0600694{
695 int ret;
Johan Hovold4f2c08a2015-07-14 15:43:36 +0200696 unsigned long timeout_jiffies;
Alex Elderc25572c2014-12-03 08:35:09 -0600697
Johan Hovolda52c4352015-07-01 12:37:23 +0200698 ret = gb_operation_request_send(operation, gb_operation_sync_callback,
699 GFP_KERNEL);
Alex Elderc25572c2014-12-03 08:35:09 -0600700 if (ret)
Alex Elderd90c25b2014-10-16 06:35:33 -0500701 return ret;
Alex Elder8350e7a2014-11-12 15:17:52 -0600702
Johan Hovold4f2c08a2015-07-14 15:43:36 +0200703 if (timeout)
704 timeout_jiffies = msecs_to_jiffies(timeout);
705 else
706 timeout_jiffies = MAX_SCHEDULE_TIMEOUT;
707
708 ret = wait_for_completion_interruptible_timeout(&operation->completion,
709 timeout_jiffies);
Perry Hung7bad4e82015-01-14 16:19:26 -0500710 if (ret < 0) {
711 /* Cancel the operation if interrupted */
Alex Elder1a365152014-11-25 13:06:44 -0600712 gb_operation_cancel(operation, -ECANCELED);
Perry Hung7bad4e82015-01-14 16:19:26 -0500713 } else if (ret == 0) {
714 /* Cancel the operation if op timed out */
715 gb_operation_cancel(operation, -ETIMEDOUT);
716 }
Alex Elder2cf72a22014-11-21 19:29:19 -0600717
Alex Elderba986b52014-11-25 11:33:13 -0600718 return gb_operation_result(operation);
Alex Elderd90c25b2014-10-16 06:35:33 -0500719}
Johan Hovold4f2c08a2015-07-14 15:43:36 +0200720EXPORT_SYMBOL_GPL(gb_operation_request_send_sync_timeout);
Alex Elderd90c25b2014-10-16 06:35:33 -0500721
722/*
Alex Elder82e26f72014-12-02 08:30:39 -0600723 * Send a response for an incoming operation request. A non-zero
724 * errno indicates a failed operation.
725 *
726 * If there is any response payload, the incoming request handler is
727 * responsible for allocating the response message. Otherwise the
728 * it can simply supply the result errno; this function will
729 * allocate the response message if necessary.
Alex Elderd90c25b2014-10-16 06:35:33 -0500730 */
Johan Hovoldabb722e2015-07-01 12:37:24 +0200731static int gb_operation_response_send(struct gb_operation *operation,
732 int errno)
Alex Elderd90c25b2014-10-16 06:35:33 -0500733{
Johan Hovolde1baa3f2015-03-27 12:41:19 +0100734 struct gb_connection *connection = operation->connection;
Johan Hovold0fb5acc2015-03-27 12:41:13 +0100735 int ret;
736
Johan Hovoldfde73822015-07-01 12:37:30 +0200737 if (!operation->response &&
738 !gb_operation_is_unidirectional(operation)) {
Johan Hovold1c7658c2015-07-17 18:50:25 +0200739 if (!gb_operation_response_alloc(operation, 0, GFP_KERNEL))
Johan Hovoldfde73822015-07-01 12:37:30 +0200740 return -ENOMEM;
741 }
742
Alex Elderd2d2c0f2014-12-02 08:30:36 -0600743 /* Record the result */
744 if (!gb_operation_result_set(operation, errno)) {
Johan Hovold25cdd7a2015-11-25 15:59:16 +0100745 dev_err(&connection->hd->dev, "request result already set\n");
Alex Elderd2d2c0f2014-12-02 08:30:36 -0600746 return -EIO; /* Shouldn't happen */
747 }
Alex Elderd90c25b2014-10-16 06:35:33 -0500748
Johan Hovold1d771fe2015-05-26 15:29:18 +0200749 /* Sender of request does not care about response. */
Johan Hovolde3398812015-07-01 12:37:27 +0200750 if (gb_operation_is_unidirectional(operation))
Johan Hovold1d771fe2015-05-26 15:29:18 +0200751 return 0;
752
Johan Hovold0fb5acc2015-03-27 12:41:13 +0100753 /* Reference will be dropped when message has been sent. */
754 gb_operation_get(operation);
Johan Hovold008974c2015-07-14 15:43:31 +0200755 ret = gb_operation_get_active(operation);
756 if (ret)
757 goto err_put;
Johan Hovold0fb5acc2015-03-27 12:41:13 +0100758
Alex Elder82e26f72014-12-02 08:30:39 -0600759 /* Fill in the response header and send it */
760 operation->response->header->result = gb_operation_errno_map(errno);
761
Johan Hovolda52c4352015-07-01 12:37:23 +0200762 ret = gb_message_send(operation->response, GFP_KERNEL);
Johan Hovold008974c2015-07-14 15:43:31 +0200763 if (ret)
764 goto err_put_active;
765
766 return 0;
767
768err_put_active:
769 gb_operation_put_active(operation);
770err_put:
771 gb_operation_put(operation);
Johan Hovold0fb5acc2015-03-27 12:41:13 +0100772
773 return ret;
Alex Elderd90c25b2014-10-16 06:35:33 -0500774}
775
Alex Elder2eb585f2014-10-16 06:35:34 -0500776/*
Johan Hovold7cf7bca2015-04-07 11:27:16 +0200777 * This function is called when a message send request has completed.
Alex Elderd98b52b2014-11-20 16:09:17 -0600778 */
Johan Hovold25376362015-11-03 18:03:23 +0100779void greybus_message_sent(struct gb_host_device *hd,
Johan Hovold7cf7bca2015-04-07 11:27:16 +0200780 struct gb_message *message, int status)
Alex Elderd98b52b2014-11-20 16:09:17 -0600781{
Johan Hovolda4e08462015-07-22 17:49:18 +0200782 struct gb_operation *operation = message->operation;
783 struct gb_connection *connection = operation->connection;
Alex Elderd98b52b2014-11-20 16:09:17 -0600784
Alex Elderd4a1ff62014-12-02 08:30:37 -0600785 /*
786 * If the message was a response, we just need to drop our
787 * reference to the operation. If an error occurred, report
788 * it.
789 *
790 * For requests, if there's no error, there's nothing more
791 * to do until the response arrives. If an error occurred
792 * attempting to send it, record that as the result of
793 * the operation and schedule its completion.
794 */
Alex Elderd4a1ff62014-12-02 08:30:37 -0600795 if (message == operation->response) {
Johan Hovolde1baa3f2015-03-27 12:41:19 +0100796 if (status) {
Johan Hovold25cdd7a2015-11-25 15:59:16 +0100797 dev_err(&connection->hd->dev,
Viresh Kumar2f3db922015-12-04 21:30:09 +0530798 "%s: error sending response 0x%02x: %d\n",
Johan Hovold25cdd7a2015-11-25 15:59:16 +0100799 connection->name, operation->type, status);
Johan Hovolde1baa3f2015-03-27 12:41:19 +0100800 }
Johan Hovold3eeac7e2015-07-14 15:43:25 +0200801 gb_operation_put_active(operation);
Alex Elderd4a1ff62014-12-02 08:30:37 -0600802 gb_operation_put(operation);
803 } else if (status) {
Johan Hovold701615f2015-07-23 10:50:03 +0200804 if (gb_operation_result_set(operation, status)) {
805 queue_work(gb_operation_completion_wq,
806 &operation->work);
807 }
Alex Elderd4a1ff62014-12-02 08:30:37 -0600808 }
Alex Elderd98b52b2014-11-20 16:09:17 -0600809}
Johan Hovold7cf7bca2015-04-07 11:27:16 +0200810EXPORT_SYMBOL_GPL(greybus_message_sent);
Alex Elderd98b52b2014-11-20 16:09:17 -0600811
812/*
Alex Elderd37b1db2014-11-19 12:27:17 -0600813 * We've received data on a connection, and it doesn't look like a
814 * response, so we assume it's a request.
Alex Elder78496db2014-11-17 08:08:39 -0600815 *
816 * This is called in interrupt context, so just copy the incoming
Alex Elderd37b1db2014-11-19 12:27:17 -0600817 * data into the request buffer and handle the rest via workqueue.
818 */
Greg Kroah-Hartman85a04422014-12-01 20:42:20 -0800819static void gb_connection_recv_request(struct gb_connection *connection,
Alex Elder82b5e3f2014-12-03 12:27:46 -0600820 u16 operation_id, u8 type,
821 void *data, size_t size)
Alex Elderd37b1db2014-11-19 12:27:17 -0600822{
823 struct gb_operation *operation;
Johan Hovold008974c2015-07-14 15:43:31 +0200824 int ret;
Alex Elderd37b1db2014-11-19 12:27:17 -0600825
Alex Elder34db1f92014-12-02 08:30:28 -0600826 operation = gb_operation_create_incoming(connection, operation_id,
Alex Elder82b5e3f2014-12-03 12:27:46 -0600827 type, data, size);
Alex Elderd37b1db2014-11-19 12:27:17 -0600828 if (!operation) {
Johan Hovold25cdd7a2015-11-25 15:59:16 +0100829 dev_err(&connection->hd->dev,
830 "%s: can't create incoming operation\n",
831 connection->name);
Johan Hovoldff65e202015-10-13 19:10:22 +0200832 return;
Alex Elderd37b1db2014-11-19 12:27:17 -0600833 }
Alex Elderd37b1db2014-11-19 12:27:17 -0600834
Johan Hovold008974c2015-07-14 15:43:31 +0200835 ret = gb_operation_get_active(operation);
836 if (ret) {
837 gb_operation_put(operation);
838 return;
839 }
Bryan O'Donoghue5c8ad592015-09-18 16:38:45 +0100840 trace_gb_message_recv_request(operation->request);
Johan Hovold3eeac7e2015-07-14 15:43:25 +0200841
Alex Elderd4a1ff62014-12-02 08:30:37 -0600842 /*
Johan Hovoldc600e532015-07-14 15:43:28 +0200843 * The initial reference to the operation will be dropped when the
844 * request handler returns.
Alex Elderd4a1ff62014-12-02 08:30:37 -0600845 */
Alex Elderd4a1ff62014-12-02 08:30:37 -0600846 if (gb_operation_result_set(operation, -EINPROGRESS))
Johan Hovold5a5bc352015-07-23 10:50:02 +0200847 queue_work(connection->wq, &operation->work);
Alex Elderd37b1db2014-11-19 12:27:17 -0600848}
849
850/*
851 * We've received data that appears to be an operation response
852 * message. Look up the operation, and record that we've received
Viresh Kumar696e0cc2014-11-21 11:26:30 +0530853 * its response.
Alex Elder78496db2014-11-17 08:08:39 -0600854 *
Alex Elderd37b1db2014-11-19 12:27:17 -0600855 * This is called in interrupt context, so just copy the incoming
856 * data into the response buffer and handle the rest via workqueue.
857 */
858static void gb_connection_recv_response(struct gb_connection *connection,
Alex Elder64ce39a2014-12-02 08:30:30 -0600859 u16 operation_id, u8 result, void *data, size_t size)
Alex Elderd37b1db2014-11-19 12:27:17 -0600860{
861 struct gb_operation *operation;
862 struct gb_message *message;
Alex Elder64ce39a2014-12-02 08:30:30 -0600863 int errno = gb_operation_status_map(result);
Alex Elder7cfa6992014-12-03 12:27:44 -0600864 size_t message_size;
Alex Elderd37b1db2014-11-19 12:27:17 -0600865
Johan Hovold048a7ff2015-07-09 15:18:00 +0200866 operation = gb_operation_find_outgoing(connection, operation_id);
Alex Elderd37b1db2014-11-19 12:27:17 -0600867 if (!operation) {
Johan Hovold25cdd7a2015-11-25 15:59:16 +0100868 dev_err(&connection->hd->dev,
Viresh Kumar2f3db922015-12-04 21:30:09 +0530869 "%s: unexpected response id 0x%04x received\n",
Johan Hovold25cdd7a2015-11-25 15:59:16 +0100870 connection->name, operation_id);
Alex Elderd37b1db2014-11-19 12:27:17 -0600871 return;
872 }
873
Alex Elderc08b1dd2014-11-20 16:09:15 -0600874 message = operation->response;
Alex Elder7cfa6992014-12-03 12:27:44 -0600875 message_size = sizeof(*message->header) + message->payload_size;
876 if (!errno && size != message_size) {
Johan Hovold25cdd7a2015-11-25 15:59:16 +0100877 dev_err(&connection->hd->dev,
Viresh Kumar2f3db922015-12-04 21:30:09 +0530878 "%s: malformed response 0x%02x received (%zu != %zu)\n",
Johan Hovold25cdd7a2015-11-25 15:59:16 +0100879 connection->name, message->header->type, size,
880 message_size);
Alex Elder64ce39a2014-12-02 08:30:30 -0600881 errno = -EMSGSIZE;
Alex Elderd37b1db2014-11-19 12:27:17 -0600882 }
Bryan O'Donoghue5c8ad592015-09-18 16:38:45 +0100883 trace_gb_message_recv_response(operation->response);
Alex Elderd37b1db2014-11-19 12:27:17 -0600884
Alex Elder25d0f812014-11-19 17:55:05 -0600885 /* We must ignore the payload if a bad status is returned */
Alex Elder64ce39a2014-12-02 08:30:30 -0600886 if (errno)
Alex Elderf71e1cc2014-12-02 08:30:29 -0600887 size = sizeof(*message->header);
Alex Elderd37b1db2014-11-19 12:27:17 -0600888
889 /* The rest will be handled in work queue context */
Johan Hovolde4340b12015-07-09 15:18:01 +0200890 if (gb_operation_result_set(operation, errno)) {
891 memcpy(message->header, data, size);
Johan Hovold701615f2015-07-23 10:50:03 +0200892 queue_work(gb_operation_completion_wq, &operation->work);
Johan Hovolde4340b12015-07-09 15:18:01 +0200893 }
Johan Hovold0581f28e2015-07-09 15:17:59 +0200894
895 gb_operation_put(operation);
Alex Elderd37b1db2014-11-19 12:27:17 -0600896}
897
898/*
899 * Handle data arriving on a connection. As soon as we return the
900 * supplied data buffer will be reused (so unless we do something
901 * with, it's effectively dropped).
Alex Elder2eb585f2014-10-16 06:35:34 -0500902 */
Alex Elder61089e82014-11-18 13:26:50 -0600903void gb_connection_recv(struct gb_connection *connection,
Alex Elderd90c25b2014-10-16 06:35:33 -0500904 void *data, size_t size)
905{
Johan Hovold564c72b2015-04-07 11:27:13 +0200906 struct gb_operation_msg_hdr header;
Johan Hovold25cdd7a2015-11-25 15:59:16 +0100907 struct device *dev = &connection->hd->dev;
Alex Elderd37b1db2014-11-19 12:27:17 -0600908 size_t msg_size;
909 u16 operation_id;
Alex Elderd90c25b2014-10-16 06:35:33 -0500910
Johan Hovold570dfa72016-01-19 12:51:07 +0100911 if (connection->state != GB_CONNECTION_STATE_ENABLED &&
912 connection->state != GB_CONNECTION_STATE_ENABLED_TX) {
Johan Hovold25cdd7a2015-11-25 15:59:16 +0100913 dev_warn(dev, "%s: dropping %zu received bytes\n",
914 connection->name, size);
Alex Elder36561f22014-10-22 02:04:30 -0500915 return;
Alex Elderd37b1db2014-11-19 12:27:17 -0600916 }
Alex Elder36561f22014-10-22 02:04:30 -0500917
Johan Hovold564c72b2015-04-07 11:27:13 +0200918 if (size < sizeof(header)) {
Johan Hovold25cdd7a2015-11-25 15:59:16 +0100919 dev_err(dev, "%s: short message received\n", connection->name);
Alex Elderd90c25b2014-10-16 06:35:33 -0500920 return;
921 }
922
Johan Hovold564c72b2015-04-07 11:27:13 +0200923 /* Use memcpy as data may be unaligned */
924 memcpy(&header, data, sizeof(header));
925 msg_size = le16_to_cpu(header.size);
Johan Hovold0150bd72015-03-27 12:41:20 +0100926 if (size < msg_size) {
Greg Kroah-Hartmanc3aa6552015-10-14 11:21:06 -0700927 dev_err(dev,
Viresh Kumar2f3db922015-12-04 21:30:09 +0530928 "%s: incomplete message 0x%04x of type 0x%02x received (%zu < %zu)\n",
Johan Hovold25cdd7a2015-11-25 15:59:16 +0100929 connection->name, le16_to_cpu(header.operation_id),
930 header.type, size, msg_size);
Alex Elderd37b1db2014-11-19 12:27:17 -0600931 return; /* XXX Should still complete operation */
Alex Elderd90c25b2014-10-16 06:35:33 -0500932 }
933
Johan Hovold564c72b2015-04-07 11:27:13 +0200934 operation_id = le16_to_cpu(header.operation_id);
Alex Elder6d653372015-05-07 13:03:52 -0500935 if (header.type & GB_MESSAGE_TYPE_RESPONSE)
Alex Elderd37b1db2014-11-19 12:27:17 -0600936 gb_connection_recv_response(connection, operation_id,
Johan Hovold564c72b2015-04-07 11:27:13 +0200937 header.result, data, msg_size);
Alex Elderd37b1db2014-11-19 12:27:17 -0600938 else
939 gb_connection_recv_request(connection, operation_id,
Johan Hovold564c72b2015-04-07 11:27:13 +0200940 header.type, data, msg_size);
Alex Elder2eb585f2014-10-16 06:35:34 -0500941}
942
Alex Eldere1158df2014-10-22 02:04:29 -0500943/*
Johan Hovold5a3be762015-07-14 15:43:35 +0200944 * Cancel an outgoing operation synchronously, and record the given error to
945 * indicate why.
Alex Eldere1158df2014-10-22 02:04:29 -0500946 */
Alex Elderf68c05c2014-11-21 19:29:17 -0600947void gb_operation_cancel(struct gb_operation *operation, int errno)
Alex Eldere1158df2014-10-22 02:04:29 -0500948{
Johan Hovold5a3be762015-07-14 15:43:35 +0200949 if (WARN_ON(gb_operation_is_incoming(operation)))
950 return;
951
952 if (gb_operation_result_set(operation, errno)) {
953 gb_message_cancel(operation->request);
Johan Hovold701615f2015-07-23 10:50:03 +0200954 queue_work(gb_operation_completion_wq, &operation->work);
Alex Elderabe9a302014-11-25 11:33:14 -0600955 }
Bryan O'Donoghue5c8ad592015-09-18 16:38:45 +0100956 trace_gb_message_cancel_outgoing(operation->request);
Johan Hovoldfd7134a2015-07-14 15:43:26 +0200957
958 atomic_inc(&operation->waiters);
959 wait_event(gb_operation_cancellation_queue,
960 !gb_operation_is_active(operation));
961 atomic_dec(&operation->waiters);
Alex Eldere1158df2014-10-22 02:04:29 -0500962}
Johan Hovold1dad6b32015-03-27 12:41:10 +0100963EXPORT_SYMBOL_GPL(gb_operation_cancel);
Alex Eldere1158df2014-10-22 02:04:29 -0500964
Johan Hovold5a3be762015-07-14 15:43:35 +0200965/*
966 * Cancel an incoming operation synchronously. Called during connection tear
967 * down.
968 */
969void gb_operation_cancel_incoming(struct gb_operation *operation, int errno)
970{
971 if (WARN_ON(!gb_operation_is_incoming(operation)))
972 return;
973
974 if (!gb_operation_is_unidirectional(operation)) {
975 /*
976 * Make sure the request handler has submitted the response
977 * before cancelling it.
978 */
979 flush_work(&operation->work);
980 if (!gb_operation_result_set(operation, errno))
981 gb_message_cancel(operation->response);
982 }
Bryan O'Donoghue5c8ad592015-09-18 16:38:45 +0100983 trace_gb_message_cancel_incoming(operation->response);
Johan Hovold5a3be762015-07-14 15:43:35 +0200984
985 atomic_inc(&operation->waiters);
986 wait_event(gb_operation_cancellation_queue,
987 !gb_operation_is_active(operation));
988 atomic_dec(&operation->waiters);
989}
990
Greg Kroah-Hartman10aa8012014-11-24 11:19:13 -0800991/**
992 * gb_operation_sync: implement a "simple" synchronous gb operation.
993 * @connection: the Greybus connection to send this to
994 * @type: the type of operation to send
995 * @request: pointer to a memory buffer to copy the request from
996 * @request_size: size of @request
997 * @response: pointer to a memory buffer to copy the response to
998 * @response_size: the size of @response.
Johan Hovold129a06f2015-07-14 15:43:37 +0200999 * @timeout: operation timeout in milliseconds
Greg Kroah-Hartman10aa8012014-11-24 11:19:13 -08001000 *
1001 * This function implements a simple synchronous Greybus operation. It sends
1002 * the provided operation request and waits (sleeps) until the corresponding
1003 * operation response message has been successfully received, or an error
1004 * occurs. @request and @response are buffers to hold the request and response
1005 * data respectively, and if they are not NULL, their size must be specified in
1006 * @request_size and @response_size.
1007 *
1008 * If a response payload is to come back, and @response is not NULL,
1009 * @response_size number of bytes will be copied into @response if the operation
1010 * is successful.
1011 *
1012 * If there is an error, the response buffer is left alone.
1013 */
Johan Hovold129a06f2015-07-14 15:43:37 +02001014int gb_operation_sync_timeout(struct gb_connection *connection, int type,
1015 void *request, int request_size,
1016 void *response, int response_size,
1017 unsigned int timeout)
Greg Kroah-Hartman10aa8012014-11-24 11:19:13 -08001018{
1019 struct gb_operation *operation;
1020 int ret;
1021
1022 if ((response_size && !response) ||
1023 (request_size && !request))
1024 return -EINVAL;
1025
1026 operation = gb_operation_create(connection, type,
Johan Hovolde4207212015-07-01 12:37:22 +02001027 request_size, response_size,
1028 GFP_KERNEL);
Greg Kroah-Hartman10aa8012014-11-24 11:19:13 -08001029 if (!operation)
1030 return -ENOMEM;
1031
1032 if (request_size)
Alex Elder6cd6ec52014-12-02 17:03:51 -06001033 memcpy(operation->request->payload, request, request_size);
Greg Kroah-Hartman10aa8012014-11-24 11:19:13 -08001034
Johan Hovold129a06f2015-07-14 15:43:37 +02001035 ret = gb_operation_request_send_sync_timeout(operation, timeout);
Johan Hovoldee8f81b2015-03-19 16:46:18 +01001036 if (ret) {
Johan Hovold25cdd7a2015-11-25 15:59:16 +01001037 dev_err(&connection->hd->dev,
Viresh Kumar2f3db922015-12-04 21:30:09 +05301038 "%s: synchronous operation of type 0x%02x failed: %d\n",
Johan Hovold25cdd7a2015-11-25 15:59:16 +01001039 connection->name, type, ret);
Johan Hovoldee8f81b2015-03-19 16:46:18 +01001040 } else {
1041 if (response_size) {
Greg Kroah-Hartman10aa8012014-11-24 11:19:13 -08001042 memcpy(response, operation->response->payload,
1043 response_size);
Johan Hovoldee8f81b2015-03-19 16:46:18 +01001044 }
1045 }
Johan Hovold6ab1ce42015-09-26 17:59:15 -07001046
1047 gb_operation_put(operation);
Greg Kroah-Hartman10aa8012014-11-24 11:19:13 -08001048
1049 return ret;
1050}
Johan Hovold129a06f2015-07-14 15:43:37 +02001051EXPORT_SYMBOL_GPL(gb_operation_sync_timeout);
Greg Kroah-Hartman10aa8012014-11-24 11:19:13 -08001052
Alex Elder47ed2c92015-06-09 17:42:50 -05001053int __init gb_operation_init(void)
Alex Elder2eb585f2014-10-16 06:35:34 -05001054{
Johan Hovold1e5613b2015-04-07 11:27:17 +02001055 gb_message_cache = kmem_cache_create("gb_message_cache",
1056 sizeof(struct gb_message), 0, 0, NULL);
1057 if (!gb_message_cache)
Alex Elder0cffcac2014-12-02 08:30:35 -06001058 return -ENOMEM;
1059
Alex Elder5b3db0d2014-10-20 10:27:56 -05001060 gb_operation_cache = kmem_cache_create("gb_operation_cache",
1061 sizeof(struct gb_operation), 0, 0, NULL);
1062 if (!gb_operation_cache)
Johan Hovold1e5613b2015-04-07 11:27:17 +02001063 goto err_destroy_message_cache;
Alex Elder2eb585f2014-10-16 06:35:34 -05001064
Johan Hovold701615f2015-07-23 10:50:03 +02001065 gb_operation_completion_wq = alloc_workqueue("greybus_completion",
1066 0, 0);
1067 if (!gb_operation_completion_wq)
1068 goto err_destroy_operation_cache;
1069
Alex Elder2eb585f2014-10-16 06:35:34 -05001070 return 0;
Johan Hovold5a5bc352015-07-23 10:50:02 +02001071
Johan Hovold701615f2015-07-23 10:50:03 +02001072err_destroy_operation_cache:
1073 kmem_cache_destroy(gb_operation_cache);
1074 gb_operation_cache = NULL;
Johan Hovold1e5613b2015-04-07 11:27:17 +02001075err_destroy_message_cache:
1076 kmem_cache_destroy(gb_message_cache);
1077 gb_message_cache = NULL;
Alex Elder0cffcac2014-12-02 08:30:35 -06001078
1079 return -ENOMEM;
Alex Elder2eb585f2014-10-16 06:35:34 -05001080}
1081
Alex Elderf35ab902015-06-09 17:42:51 -05001082void gb_operation_exit(void)
Alex Elder2eb585f2014-10-16 06:35:34 -05001083{
Johan Hovold701615f2015-07-23 10:50:03 +02001084 destroy_workqueue(gb_operation_completion_wq);
1085 gb_operation_completion_wq = NULL;
Viresh Kumar837b3b72014-11-14 17:25:00 +05301086 kmem_cache_destroy(gb_operation_cache);
1087 gb_operation_cache = NULL;
Johan Hovold1e5613b2015-04-07 11:27:17 +02001088 kmem_cache_destroy(gb_message_cache);
1089 gb_message_cache = NULL;
Alex Elderd90c25b2014-10-16 06:35:33 -05001090}