blob: 24e0a525821a0dd22fc18f938c38722000f91c46 [file] [log] [blame]
Alex Eldere88afa52014-10-01 21:54:15 -05001/*
2 * Greybus operations
3 *
4 * Copyright 2014 Google Inc.
5 *
6 * Released under the GPLv2 only.
7 */
8
9#include <linux/kernel.h>
10#include <linux/slab.h>
11#include <linux/module.h>
12#include <linux/workqueue.h>
13
14#include "greybus.h"
15
16/*
Alex Elder22b320f2014-10-16 06:35:31 -050017 * The top bit of the type in an operation message header indicates
18 * whether the message is a request (bit clear) or response (bit set)
19 */
20#define GB_OPERATION_TYPE_RESPONSE 0x80
21
Greg Kroah-Hartman708971e2014-10-27 15:40:09 +080022#define OPERATION_TIMEOUT_DEFAULT 1000 /* milliseconds */
Alex Eldere816e372014-10-22 02:04:28 -050023
Alex Elder22b320f2014-10-16 06:35:31 -050024/*
Alex Elderd90c25b2014-10-16 06:35:33 -050025 * XXX This needs to be coordinated with host driver parameters
26 */
27#define GB_OPERATION_MESSAGE_SIZE_MAX 4096
28
Alex Elder5b3db0d2014-10-20 10:27:56 -050029static struct kmem_cache *gb_operation_cache;
30
Alex Elder2eb585f2014-10-16 06:35:34 -050031/* Workqueue to handle Greybus operation completions. */
32static struct workqueue_struct *gb_operation_recv_workqueue;
33
Alex Elderd90c25b2014-10-16 06:35:33 -050034/*
Alex Eldere88afa52014-10-01 21:54:15 -050035 * All operation messages (both requests and responses) begin with
36 * a common header that encodes the size of the data (header
37 * included). This header also contains a unique identifier, which
38 * is used to keep track of in-flight operations. Finally, the
39 * header contains a operation type field, whose interpretation is
40 * dependent on what type of device lies on the other end of the
41 * connection. Response messages are distinguished from request
42 * messages by setting the high bit (0x80) in the operation type
43 * value.
44 *
45 * The wire format for all numeric fields in the header is little
46 * endian. Any operation-specific data begins immediately after the
47 * header, and is 64-bit aligned.
48 */
49struct gb_operation_msg_hdr {
50 __le16 size; /* Size in bytes of header + payload */
51 __le16 id; /* Operation unique id */
52 __u8 type; /* E.g GB_I2C_TYPE_* or GB_GPIO_TYPE_* */
53 /* 3 bytes pad, must be zero (ignore when read) */
54} __aligned(sizeof(u64));
55
56/* XXX Could be per-host device, per-module, or even per-connection */
57static DEFINE_SPINLOCK(gb_operations_lock);
58
Alex Elderb8616da2014-11-12 15:17:53 -060059static void gb_pending_operation_insert(struct gb_operation *operation)
Alex Elder84d148b2014-10-16 06:35:32 -050060{
61 struct gb_connection *connection = operation->connection;
Alex Elder84d148b2014-10-16 06:35:32 -050062 struct gb_operation_msg_hdr *header;
Alex Elder84d148b2014-10-16 06:35:32 -050063
Alex Elder360a8772014-11-12 15:17:54 -060064 /*
65 * Assign the operation's id and move it into its
66 * connection's pending list.
Alex Elder84d148b2014-10-16 06:35:32 -050067 */
Alex Elder84d148b2014-10-16 06:35:32 -050068 spin_lock_irq(&gb_operations_lock);
Alex Elder360a8772014-11-12 15:17:54 -060069 operation->id = ++connection->op_cycle;
Alex Elderb8616da2014-11-12 15:17:53 -060070 list_move_tail(&operation->links, &connection->pending);
Alex Elder84d148b2014-10-16 06:35:32 -050071 spin_unlock_irq(&gb_operations_lock);
Alex Elder360a8772014-11-12 15:17:54 -060072
73 /* Store the operation id in the request header */
74 header = operation->request->transfer_buffer;
75 header->id = cpu_to_le16(operation->id);
Alex Elder84d148b2014-10-16 06:35:32 -050076}
77
Alex Elderb8616da2014-11-12 15:17:53 -060078static void gb_pending_operation_remove(struct gb_operation *operation)
Alex Elder84d148b2014-10-16 06:35:32 -050079{
Alex Eldere816e372014-10-22 02:04:28 -050080 struct gb_connection *connection = operation->connection;
Alex Eldere816e372014-10-22 02:04:28 -050081
Greg Kroah-Hartman708971e2014-10-27 15:40:09 +080082 /* Take us off of the list of pending operations */
Alex Elder84d148b2014-10-16 06:35:32 -050083 spin_lock_irq(&gb_operations_lock);
Alex Elderb8616da2014-11-12 15:17:53 -060084 list_move_tail(&operation->links, &connection->operations);
Alex Elder84d148b2014-10-16 06:35:32 -050085 spin_unlock_irq(&gb_operations_lock);
86}
87
88static struct gb_operation *
Alex Elderb8616da2014-11-12 15:17:53 -060089gb_pending_operation_find(struct gb_connection *connection, u16 id)
Alex Elder84d148b2014-10-16 06:35:32 -050090{
Alex Elderb8616da2014-11-12 15:17:53 -060091 struct gb_operation *operation;
Alex Elder84d148b2014-10-16 06:35:32 -050092 bool found = false;
93
94 spin_lock_irq(&gb_operations_lock);
Alex Elderb8616da2014-11-12 15:17:53 -060095 list_for_each_entry(operation, &connection->pending, links)
96 if (operation->id == id) {
Alex Elder84d148b2014-10-16 06:35:32 -050097 found = true;
Alex Elderb8616da2014-11-12 15:17:53 -060098 break;
99 }
Alex Elder84d148b2014-10-16 06:35:32 -0500100 spin_unlock_irq(&gb_operations_lock);
101
102 return found ? operation : NULL;
103}
104
Alex Eldere88afa52014-10-01 21:54:15 -0500105/*
106 * An operations's response message has arrived. If no callback was
107 * supplied it was submitted for asynchronous completion, so we notify
108 * any waiters. Otherwise we assume calling the completion is enough
109 * and nobody else will be waiting.
110 */
Greg Kroah-Hartman708971e2014-10-27 15:40:09 +0800111static void gb_operation_complete(struct gb_operation *operation)
Alex Eldere88afa52014-10-01 21:54:15 -0500112{
113 if (operation->callback)
114 operation->callback(operation);
115 else
116 complete_all(&operation->completion);
117}
118
Alex Elder2eb585f2014-10-16 06:35:34 -0500119/* Wait for a submitted operation to complete */
Alex Eldere88afa52014-10-01 21:54:15 -0500120int gb_operation_wait(struct gb_operation *operation)
121{
122 int ret;
123
124 ret = wait_for_completion_interruptible(&operation->completion);
125 /* If interrupted, cancel the in-flight buffer */
126 if (ret < 0)
Greg Kroah-Hartman4afbba02014-10-27 14:01:06 +0800127 greybus_kill_gbuf(operation->request);
Alex Eldere88afa52014-10-01 21:54:15 -0500128 return ret;
129
130}
131
Alex Elder2eb585f2014-10-16 06:35:34 -0500132static void gb_operation_request_handle(struct gb_operation *operation)
133{
Alex Elderf8fb05e2014-11-05 16:12:55 -0600134 struct gb_protocol *protocol = operation->connection->protocol;
135 struct gb_operation_msg_hdr *header;
Alex Elder2eb585f2014-10-16 06:35:34 -0500136
Alex Elderc3cf2782014-11-12 15:17:55 -0600137 header = operation->request->transfer_buffer;
138
Alex Elderf8fb05e2014-11-05 16:12:55 -0600139 /*
140 * If the protocol has no incoming request handler, report
141 * an error and mark the request bad.
142 */
143 if (protocol->request_recv) {
Alex Elderc3cf2782014-11-12 15:17:55 -0600144 protocol->request_recv(header->type, operation);
Alex Elderf8fb05e2014-11-05 16:12:55 -0600145 goto out;
Alex Elder2eb585f2014-10-16 06:35:34 -0500146 }
147
Alex Elder7fba0072014-10-28 19:35:59 -0500148 gb_connection_err(operation->connection,
Alex Elderf8fb05e2014-11-05 16:12:55 -0600149 "unexpected incoming request type 0x%02hhx\n", header->type);
Alex Elder2eb585f2014-10-16 06:35:34 -0500150 operation->result = GB_OP_PROTOCOL_BAD;
Alex Elderf8fb05e2014-11-05 16:12:55 -0600151out:
Alex Elder2eb585f2014-10-16 06:35:34 -0500152 gb_operation_complete(operation);
153}
154
Alex Eldere88afa52014-10-01 21:54:15 -0500155/*
Alex Elder2eb585f2014-10-16 06:35:34 -0500156 * Either this operation contains an incoming request, or its
157 * response has arrived. An incoming request will have a null
158 * response buffer pointer (it is the responsibility of the request
159 * handler to allocate and fill in the response buffer).
160 */
161static void gb_operation_recv_work(struct work_struct *recv_work)
162{
163 struct gb_operation *operation;
164 bool incoming_request;
165
166 operation = container_of(recv_work, struct gb_operation, recv_work);
167 incoming_request = operation->response == NULL;
168 if (incoming_request)
169 gb_operation_request_handle(operation);
170 gb_operation_complete(operation);
Alex Elder2eb585f2014-10-16 06:35:34 -0500171}
172
173/*
Greg Kroah-Hartman708971e2014-10-27 15:40:09 +0800174 * Timeout call for the operation.
175 *
176 * If this fires, something went wrong, so mark the result as timed out, and
177 * run the completion handler, which (hopefully) should clean up the operation
178 * properly.
179 */
180static void operation_timeout(struct work_struct *work)
181{
182 struct gb_operation *operation;
183
184 operation = container_of(work, struct gb_operation, timeout_work.work);
Viresh Kumar37d8afc2014-11-13 18:14:35 +0530185 pr_debug("%s: timeout!\n", __func__);
Greg Kroah-Hartman708971e2014-10-27 15:40:09 +0800186
187 operation->result = GB_OP_TIMEOUT;
188 gb_operation_complete(operation);
189}
190
191/*
Alex Elder22b320f2014-10-16 06:35:31 -0500192 * Allocate a buffer to be used for an operation request or response
Alex Elder2eb585f2014-10-16 06:35:34 -0500193 * message. For outgoing messages, both types of message contain a
194 * common header, which is filled in here. Incoming requests or
195 * responses also contain the same header, but there's no need to
196 * initialize it here (it'll be overwritten by the incoming
197 * message).
Alex Eldere88afa52014-10-01 21:54:15 -0500198 */
Greg Kroah-Hartmanf9624de2014-10-27 12:30:15 +0800199static struct gbuf *gb_operation_gbuf_create(struct gb_operation *operation,
200 u8 type, size_t size,
201 bool data_out)
Alex Eldere88afa52014-10-01 21:54:15 -0500202{
Alex Eldere88afa52014-10-01 21:54:15 -0500203 struct gb_operation_msg_hdr *header;
204 struct gbuf *gbuf;
Alex Elder2eb585f2014-10-16 06:35:34 -0500205 gfp_t gfp_flags = data_out ? GFP_KERNEL : GFP_ATOMIC;
Alex Elder63921d82014-11-17 08:08:41 -0600206 u16 dest_cport_id;
Alex Eldere88afa52014-10-01 21:54:15 -0500207
Alex Elder78496db2014-11-17 08:08:39 -0600208 if (size > GB_OPERATION_MESSAGE_SIZE_MAX)
209 return NULL; /* Message too big */
210
Alex Elder63921d82014-11-17 08:08:41 -0600211 if (data_out)
212 dest_cport_id = operation->connection->interface_cport_id;
213 else
214 dest_cport_id = CPORT_ID_BAD;
Alex Eldere88afa52014-10-01 21:54:15 -0500215 size += sizeof(*header);
Alex Elder63921d82014-11-17 08:08:41 -0600216 gbuf = greybus_alloc_gbuf(operation, dest_cport_id, size, gfp_flags);
Alex Elder22b320f2014-10-16 06:35:31 -0500217 if (!gbuf)
Alex Eldere88afa52014-10-01 21:54:15 -0500218 return NULL;
Alex Eldere88afa52014-10-01 21:54:15 -0500219
Alex Elder22b320f2014-10-16 06:35:31 -0500220 /* Fill in the header structure */
221 header = (struct gb_operation_msg_hdr *)gbuf->transfer_buffer;
Greg Kroah-Hartman322543a2014-10-02 21:25:21 -0700222 header->size = cpu_to_le16(size);
Alex Elderb0b65752014-10-03 15:05:20 -0500223 header->id = 0; /* Filled in when submitted */
224 header->type = type;
Alex Elder22b320f2014-10-16 06:35:31 -0500225
226 return gbuf;
227}
228
229/*
230 * Create a Greybus operation to be sent over the given connection.
231 * The request buffer will big enough for a payload of the given
232 * size. Outgoing requests must specify the size of the response
233 * buffer size, which must be sufficient to hold all expected
234 * response data.
235 *
236 * Incoming requests will supply a response size of 0, and in that
237 * case no response buffer is allocated. (A response always
238 * includes a status byte, so 0 is not a valid size.) Whatever
239 * handles the operation request is responsible for allocating the
240 * response buffer.
241 *
242 * Returns a pointer to the new operation or a null pointer if an
243 * error occurs.
244 */
245struct gb_operation *gb_operation_create(struct gb_connection *connection,
246 u8 type, size_t request_size,
247 size_t response_size)
248{
249 struct gb_operation *operation;
250 gfp_t gfp_flags = response_size ? GFP_KERNEL : GFP_ATOMIC;
Alex Elder2eb585f2014-10-16 06:35:34 -0500251 bool outgoing = response_size != 0;
Alex Elder22b320f2014-10-16 06:35:31 -0500252
Alex Elder5b3db0d2014-10-20 10:27:56 -0500253 operation = kmem_cache_zalloc(gb_operation_cache, gfp_flags);
Alex Elder22b320f2014-10-16 06:35:31 -0500254 if (!operation)
255 return NULL;
Greg Kroah-Hartman6507cce2014-10-27 17:58:54 +0800256 operation->connection = connection;
Alex Elder22b320f2014-10-16 06:35:31 -0500257
258 operation->request = gb_operation_gbuf_create(operation, type,
Alex Elder2eb585f2014-10-16 06:35:34 -0500259 request_size,
260 outgoing);
Alex Elder5b3db0d2014-10-20 10:27:56 -0500261 if (!operation->request)
262 goto err_cache;
Alex Elder22b320f2014-10-16 06:35:31 -0500263 operation->request_payload = operation->request->transfer_buffer +
264 sizeof(struct gb_operation_msg_hdr);
Alex Elder22b320f2014-10-16 06:35:31 -0500265
Alex Elder2eb585f2014-10-16 06:35:34 -0500266 if (outgoing) {
Alex Elder22b320f2014-10-16 06:35:31 -0500267 type |= GB_OPERATION_TYPE_RESPONSE;
268 operation->response = gb_operation_gbuf_create(operation,
Alex Elder2eb585f2014-10-16 06:35:34 -0500269 type, response_size,
270 false);
Alex Elder5b3db0d2014-10-20 10:27:56 -0500271 if (!operation->response)
272 goto err_request;
Alex Elder22b320f2014-10-16 06:35:31 -0500273 operation->response_payload =
274 operation->response->transfer_buffer +
275 sizeof(struct gb_operation_msg_hdr);
276 }
Alex Eldere88afa52014-10-01 21:54:15 -0500277
Alex Elder2eb585f2014-10-16 06:35:34 -0500278 INIT_WORK(&operation->recv_work, gb_operation_recv_work);
Alex Eldere88afa52014-10-01 21:54:15 -0500279 operation->callback = NULL; /* set at submit time */
280 init_completion(&operation->completion);
Greg Kroah-Hartman708971e2014-10-27 15:40:09 +0800281 INIT_DELAYED_WORK(&operation->timeout_work, operation_timeout);
Alex Elderc7d0f252014-11-17 08:08:40 -0600282 kref_init(&operation->kref);
Alex Eldere88afa52014-10-01 21:54:15 -0500283
284 spin_lock_irq(&gb_operations_lock);
285 list_add_tail(&operation->links, &connection->operations);
286 spin_unlock_irq(&gb_operations_lock);
287
288 return operation;
Alex Elder5b3db0d2014-10-20 10:27:56 -0500289
290err_request:
291 greybus_free_gbuf(operation->request);
292err_cache:
293 kmem_cache_free(gb_operation_cache, operation);
294
295 return NULL;
Alex Eldere88afa52014-10-01 21:54:15 -0500296}
297
298/*
299 * Destroy a previously created operation.
300 */
Alex Elderc7d0f252014-11-17 08:08:40 -0600301static void _gb_operation_destroy(struct kref *kref)
Alex Eldere88afa52014-10-01 21:54:15 -0500302{
Alex Elderc7d0f252014-11-17 08:08:40 -0600303 struct gb_operation *operation;
304
305 operation = container_of(kref, struct gb_operation, kref);
Alex Eldere88afa52014-10-01 21:54:15 -0500306
307 /* XXX Make sure it's not in flight */
308 spin_lock_irq(&gb_operations_lock);
309 list_del(&operation->links);
310 spin_unlock_irq(&gb_operations_lock);
311
Alex Elder22b320f2014-10-16 06:35:31 -0500312 greybus_free_gbuf(operation->response);
313 greybus_free_gbuf(operation->request);
Alex Eldere88afa52014-10-01 21:54:15 -0500314
Alex Elder5b3db0d2014-10-20 10:27:56 -0500315 kmem_cache_free(gb_operation_cache, operation);
Alex Eldere88afa52014-10-01 21:54:15 -0500316}
Alex Elderd90c25b2014-10-16 06:35:33 -0500317
Alex Elderc7d0f252014-11-17 08:08:40 -0600318void gb_operation_put(struct gb_operation *operation)
319{
320 if (!WARN_ON(!operation))
321 kref_put(&operation->kref, _gb_operation_destroy);
322}
323
Alex Elderd90c25b2014-10-16 06:35:33 -0500324/*
325 * Send an operation request message. The caller has filled in
326 * any payload so the request message is ready to go. If non-null,
327 * the callback function supplied will be called when the response
328 * message has arrived indicating the operation is complete. A null
329 * callback function is used for a synchronous request; return from
330 * this function won't occur until the operation is complete (or an
331 * interrupt occurs).
332 */
333int gb_operation_request_send(struct gb_operation *operation,
334 gb_operation_callback callback)
335{
Alex Elder8350e7a2014-11-12 15:17:52 -0600336 unsigned long timeout;
Alex Elderd90c25b2014-10-16 06:35:33 -0500337 int ret;
338
Alex Elder36561f22014-10-22 02:04:30 -0500339 if (operation->connection->state != GB_CONNECTION_STATE_ENABLED)
340 return -ENOTCONN;
341
Alex Elderd90c25b2014-10-16 06:35:33 -0500342 /*
343 * XXX
344 * I think the order of operations is going to be
345 * significant, and if so, we may need a mutex to surround
346 * setting the operation id and submitting the gbuf.
347 */
348 operation->callback = callback;
Alex Elderb8616da2014-11-12 15:17:53 -0600349 gb_pending_operation_insert(operation);
Alex Elderd90c25b2014-10-16 06:35:33 -0500350 ret = greybus_submit_gbuf(operation->request, GFP_KERNEL);
351 if (ret)
352 return ret;
Alex Elder8350e7a2014-11-12 15:17:52 -0600353
354 /* We impose a time limit for requests to complete. */
355 timeout = msecs_to_jiffies(OPERATION_TIMEOUT_DEFAULT);
356 schedule_delayed_work(&operation->timeout_work, timeout);
Alex Elderd90c25b2014-10-16 06:35:33 -0500357 if (!callback)
358 ret = gb_operation_wait(operation);
359
360 return ret;
361}
362
363/*
364 * Send a response for an incoming operation request.
365 */
366int gb_operation_response_send(struct gb_operation *operation)
367{
Alex Elderd90c25b2014-10-16 06:35:33 -0500368 gb_operation_destroy(operation);
369
370 return 0;
371}
372
Alex Elder2eb585f2014-10-16 06:35:34 -0500373/*
Alex Elder78496db2014-11-17 08:08:39 -0600374 * Handle data arriving on a connection. As soon as we return, the
375 * incoming data buffer will be reused, so we need to copy the data
376 * into one of our own operation message buffers.
377 *
378 * If the incoming data is an operation response message, look up
379 * the operation and copy the incoming data into its response
380 * buffer. Otherwise allocate a new operation and copy the incoming
381 * data into its request buffer.
382 *
383 * This is called in interrupt context, so just copy the incoming
384 * data into the buffer and do remaining handling via a work queue.
385 *
Alex Elder2eb585f2014-10-16 06:35:34 -0500386 */
Alex Elderd90c25b2014-10-16 06:35:33 -0500387void gb_connection_operation_recv(struct gb_connection *connection,
388 void *data, size_t size)
389{
390 struct gb_operation_msg_hdr *header;
391 struct gb_operation *operation;
392 struct gbuf *gbuf;
393 u16 msg_size;
394
Alex Elder36561f22014-10-22 02:04:30 -0500395 if (connection->state != GB_CONNECTION_STATE_ENABLED)
396 return;
397
Alex Elder78496db2014-11-17 08:08:39 -0600398 if (size < sizeof(*header)) {
399 gb_connection_err(connection, "message too small");
Alex Elderd90c25b2014-10-16 06:35:33 -0500400 return;
401 }
402
403 header = data;
404 msg_size = le16_to_cpu(header->size);
405 if (header->type & GB_OPERATION_TYPE_RESPONSE) {
406 u16 id = le16_to_cpu(header->id);
407
Alex Elderb8616da2014-11-12 15:17:53 -0600408 operation = gb_pending_operation_find(connection, id);
Alex Elderd90c25b2014-10-16 06:35:33 -0500409 if (!operation) {
410 gb_connection_err(connection, "operation not found");
Alex Elder2eb585f2014-10-16 06:35:34 -0500411 return;
Alex Elderd90c25b2014-10-16 06:35:33 -0500412 }
Alex Elder19363a22014-11-17 08:08:36 -0600413 cancel_delayed_work(&operation->timeout_work);
Alex Elderb8616da2014-11-12 15:17:53 -0600414 gb_pending_operation_remove(operation);
Alex Elderd90c25b2014-10-16 06:35:33 -0500415 gbuf = operation->response;
416 if (size > gbuf->transfer_buffer_length) {
Alex Elder78496db2014-11-17 08:08:39 -0600417 operation->result = GB_OP_OVERFLOW;
Alex Elderd90c25b2014-10-16 06:35:33 -0500418 gb_connection_err(connection, "recv buffer too small");
419 return;
420 }
Alex Elder78496db2014-11-17 08:08:39 -0600421 operation->result = GB_OP_SUCCESS;
Alex Elderd90c25b2014-10-16 06:35:33 -0500422 } else {
423 WARN_ON(msg_size != size);
424 operation = gb_operation_create(connection, header->type,
425 msg_size, 0);
426 if (!operation) {
427 gb_connection_err(connection, "can't create operation");
428 return;
429 }
430 gbuf = operation->request;
431 }
432
433 memcpy(gbuf->transfer_buffer, data, msg_size);
Alex Elderd90c25b2014-10-16 06:35:33 -0500434
Alex Elder2eb585f2014-10-16 06:35:34 -0500435 /* The rest will be handled in work queue context */
436 queue_work(gb_operation_recv_workqueue, &operation->recv_work);
437}
438
Alex Eldere1158df2014-10-22 02:04:29 -0500439/*
440 * Cancel an operation.
441 */
442void gb_operation_cancel(struct gb_operation *operation)
443{
Alex Eldere1158df2014-10-22 02:04:29 -0500444 operation->canceled = true;
Greg Kroah-Hartman4afbba02014-10-27 14:01:06 +0800445 greybus_kill_gbuf(operation->request);
446 if (operation->response)
447 greybus_kill_gbuf(operation->response);
Alex Eldere1158df2014-10-22 02:04:29 -0500448}
449
Alex Elder2eb585f2014-10-16 06:35:34 -0500450int gb_operation_init(void)
451{
Alex Elder5b3db0d2014-10-20 10:27:56 -0500452 gb_operation_cache = kmem_cache_create("gb_operation_cache",
453 sizeof(struct gb_operation), 0, 0, NULL);
454 if (!gb_operation_cache)
Alex Elder2eb585f2014-10-16 06:35:34 -0500455 return -ENOMEM;
456
Alex Elder5b3db0d2014-10-20 10:27:56 -0500457 gb_operation_recv_workqueue = alloc_workqueue("greybus_recv", 0, 1);
458 if (!gb_operation_recv_workqueue) {
459 kmem_cache_destroy(gb_operation_cache);
460 gb_operation_cache = NULL;
461 return -ENOMEM;
462 }
463
Alex Elder2eb585f2014-10-16 06:35:34 -0500464 return 0;
465}
466
467void gb_operation_exit(void)
468{
469 destroy_workqueue(gb_operation_recv_workqueue);
Alex Elder5b3db0d2014-10-20 10:27:56 -0500470 gb_operation_recv_workqueue = NULL;
Viresh Kumar837b3b72014-11-14 17:25:00 +0530471 kmem_cache_destroy(gb_operation_cache);
472 gb_operation_cache = NULL;
Alex Elderd90c25b2014-10-16 06:35:33 -0500473}