blob: 43ad24424d37c14f92c58eee65b5305ffafda260 [file] [log] [blame]
Alex Eldere88afa52014-10-01 21:54:15 -05001/*
2 * Greybus operations
3 *
4 * Copyright 2014 Google Inc.
5 *
6 * Released under the GPLv2 only.
7 */
8
9#include <linux/kernel.h>
10#include <linux/slab.h>
11#include <linux/module.h>
12#include <linux/workqueue.h>
13
14#include "greybus.h"
15
16/*
Alex Elder22b320f2014-10-16 06:35:31 -050017 * The top bit of the type in an operation message header indicates
18 * whether the message is a request (bit clear) or response (bit set)
19 */
20#define GB_OPERATION_TYPE_RESPONSE 0x80
21
22/*
Alex Eldere88afa52014-10-01 21:54:15 -050023 * All operation messages (both requests and responses) begin with
24 * a common header that encodes the size of the data (header
25 * included). This header also contains a unique identifier, which
26 * is used to keep track of in-flight operations. Finally, the
27 * header contains a operation type field, whose interpretation is
28 * dependent on what type of device lies on the other end of the
29 * connection. Response messages are distinguished from request
30 * messages by setting the high bit (0x80) in the operation type
31 * value.
32 *
33 * The wire format for all numeric fields in the header is little
34 * endian. Any operation-specific data begins immediately after the
35 * header, and is 64-bit aligned.
36 */
37struct gb_operation_msg_hdr {
38 __le16 size; /* Size in bytes of header + payload */
39 __le16 id; /* Operation unique id */
40 __u8 type; /* E.g GB_I2C_TYPE_* or GB_GPIO_TYPE_* */
41 /* 3 bytes pad, must be zero (ignore when read) */
42} __aligned(sizeof(u64));
43
44/* XXX Could be per-host device, per-module, or even per-connection */
45static DEFINE_SPINLOCK(gb_operations_lock);
46
47/*
48 * An operations's response message has arrived. If no callback was
49 * supplied it was submitted for asynchronous completion, so we notify
50 * any waiters. Otherwise we assume calling the completion is enough
51 * and nobody else will be waiting.
52 */
53void gb_operation_complete(struct gb_operation *operation)
54{
55 if (operation->callback)
56 operation->callback(operation);
57 else
58 complete_all(&operation->completion);
59}
60
61/*
62 * Wait for a submitted operatnoi to complete */
63int gb_operation_wait(struct gb_operation *operation)
64{
65 int ret;
66
67 ret = wait_for_completion_interruptible(&operation->completion);
68 /* If interrupted, cancel the in-flight buffer */
69 if (ret < 0)
Alex Elder22b320f2014-10-16 06:35:31 -050070 ret = greybus_kill_gbuf(operation->request);
Alex Eldere88afa52014-10-01 21:54:15 -050071 return ret;
72
73}
74
75/*
76 * Submit an outbound operation. The caller has filled in any
77 * payload so the request message is ready to go. If non-null,
78 * the callback function supplied will be called when the response
79 * message has arrived indicating the operation is complete. A null
80 * callback function is used for a synchronous request; return from
81 * this function won't occur until the operation is complete (or an
82 * interrupt occurs).
83 */
84int gb_operation_submit(struct gb_operation *operation,
85 gb_operation_callback callback)
86{
87 int ret;
88
Alex Elder22b320f2014-10-16 06:35:31 -050089 /*
90 * XXX
91 * I think the order of operations is going to be
92 * significant, and if so, we may need a mutex to surround
93 * setting the operation id and submitting the gbuf.
Alex Eldere88afa52014-10-01 21:54:15 -050094 */
95 operation->callback = callback;
Alex Elder22b320f2014-10-16 06:35:31 -050096 ret = greybus_submit_gbuf(operation->request, GFP_KERNEL);
Alex Eldere88afa52014-10-01 21:54:15 -050097 if (ret)
98 return ret;
99 if (!callback)
100 ret = gb_operation_wait(operation);
101
102 return ret;
103}
104
105/*
Alex Elder22b320f2014-10-16 06:35:31 -0500106 * Called when an operation buffer completes.
Alex Eldere88afa52014-10-01 21:54:15 -0500107 */
Alex Elder22b320f2014-10-16 06:35:31 -0500108static void gb_operation_gbuf_complete(struct gbuf *gbuf)
Alex Eldere88afa52014-10-01 21:54:15 -0500109{
Alex Elder22b320f2014-10-16 06:35:31 -0500110 /* TODO */
Alex Eldere88afa52014-10-01 21:54:15 -0500111}
112
113/*
Alex Elder22b320f2014-10-16 06:35:31 -0500114 * Allocate a buffer to be used for an operation request or response
115 * message. Both types of message contain a header, which is filled
116 * in here. W
Alex Eldere88afa52014-10-01 21:54:15 -0500117 */
Alex Elder22b320f2014-10-16 06:35:31 -0500118struct gbuf *gb_operation_gbuf_create(struct gb_operation *operation,
119 u8 type, size_t size, bool outbound)
Alex Eldere88afa52014-10-01 21:54:15 -0500120{
Alex Elder22b320f2014-10-16 06:35:31 -0500121 struct gb_connection *connection = operation->connection;
Alex Eldere88afa52014-10-01 21:54:15 -0500122 struct gb_operation_msg_hdr *header;
123 struct gbuf *gbuf;
Alex Elder22b320f2014-10-16 06:35:31 -0500124 gfp_t gfp_flags = outbound ? GFP_KERNEL : GFP_ATOMIC;
Alex Eldere88afa52014-10-01 21:54:15 -0500125
Alex Elder22b320f2014-10-16 06:35:31 -0500126 /* Operation buffers hold a header in addition to their payload */
Alex Eldere88afa52014-10-01 21:54:15 -0500127 size += sizeof(*header);
Alex Elder22b320f2014-10-16 06:35:31 -0500128 gbuf = greybus_alloc_gbuf(connection, gb_operation_gbuf_complete,
129 size, outbound, gfp_flags, operation);
130 if (!gbuf)
Alex Eldere88afa52014-10-01 21:54:15 -0500131 return NULL;
Alex Eldere88afa52014-10-01 21:54:15 -0500132
Alex Elder22b320f2014-10-16 06:35:31 -0500133 /* Fill in the header structure */
134 header = (struct gb_operation_msg_hdr *)gbuf->transfer_buffer;
Greg Kroah-Hartman322543a2014-10-02 21:25:21 -0700135 header->size = cpu_to_le16(size);
Alex Elderb0b65752014-10-03 15:05:20 -0500136 header->id = 0; /* Filled in when submitted */
137 header->type = type;
Alex Elder22b320f2014-10-16 06:35:31 -0500138
139 return gbuf;
140}
141
142/*
143 * Create a Greybus operation to be sent over the given connection.
144 * The request buffer will big enough for a payload of the given
145 * size. Outgoing requests must specify the size of the response
146 * buffer size, which must be sufficient to hold all expected
147 * response data.
148 *
149 * Incoming requests will supply a response size of 0, and in that
150 * case no response buffer is allocated. (A response always
151 * includes a status byte, so 0 is not a valid size.) Whatever
152 * handles the operation request is responsible for allocating the
153 * response buffer.
154 *
155 * Returns a pointer to the new operation or a null pointer if an
156 * error occurs.
157 */
158struct gb_operation *gb_operation_create(struct gb_connection *connection,
159 u8 type, size_t request_size,
160 size_t response_size)
161{
162 struct gb_operation *operation;
163 gfp_t gfp_flags = response_size ? GFP_KERNEL : GFP_ATOMIC;
164
165 if (!request_size) {
166 gb_connection_err(connection, "zero-sized request");
167 return NULL;
168 }
169
170 /* XXX Use a slab cache */
171 operation = kzalloc(sizeof(*operation), gfp_flags);
172 if (!operation)
173 return NULL;
174 operation->connection = connection; /* XXX refcount? */
175
176 operation->request = gb_operation_gbuf_create(operation, type,
177 request_size, true);
178 if (!operation->request) {
179 kfree(operation);
180 return NULL;
181 }
182 operation->request_payload = operation->request->transfer_buffer +
183 sizeof(struct gb_operation_msg_hdr);
184 /* We always use the full request buffer */
185 operation->request->actual_length = request_size;
186
187 if (response_size) {
188 type |= GB_OPERATION_TYPE_RESPONSE;
189 operation->response = gb_operation_gbuf_create(operation,
190 type, response_size, false);
191 if (!operation->response) {
192 greybus_free_gbuf(operation->request);
193 kfree(operation);
194 return NULL;
195 }
196 operation->response_payload =
197 operation->response->transfer_buffer +
198 sizeof(struct gb_operation_msg_hdr);
199 }
Alex Eldere88afa52014-10-01 21:54:15 -0500200
201 operation->callback = NULL; /* set at submit time */
202 init_completion(&operation->completion);
203
204 spin_lock_irq(&gb_operations_lock);
205 list_add_tail(&operation->links, &connection->operations);
206 spin_unlock_irq(&gb_operations_lock);
207
208 return operation;
209}
210
211/*
212 * Destroy a previously created operation.
213 */
214void gb_operation_destroy(struct gb_operation *operation)
215{
216 if (WARN_ON(!operation))
217 return;
218
219 /* XXX Make sure it's not in flight */
220 spin_lock_irq(&gb_operations_lock);
221 list_del(&operation->links);
222 spin_unlock_irq(&gb_operations_lock);
223
Alex Elder22b320f2014-10-16 06:35:31 -0500224 greybus_free_gbuf(operation->response);
225 greybus_free_gbuf(operation->request);
Alex Eldere88afa52014-10-01 21:54:15 -0500226
227 kfree(operation);
228}