Alex Elder | e88afa5 | 2014-10-01 21:54:15 -0500 | [diff] [blame] | 1 | /* |
| 2 | * Greybus operations |
| 3 | * |
| 4 | * Copyright 2014 Google Inc. |
| 5 | * |
| 6 | * Released under the GPLv2 only. |
| 7 | */ |
| 8 | |
| 9 | #include <linux/kernel.h> |
| 10 | #include <linux/slab.h> |
| 11 | #include <linux/module.h> |
| 12 | #include <linux/workqueue.h> |
| 13 | |
| 14 | #include "greybus.h" |
| 15 | |
| 16 | /* |
Alex Elder | 22b320f | 2014-10-16 06:35:31 -0500 | [diff] [blame] | 17 | * The top bit of the type in an operation message header indicates |
| 18 | * whether the message is a request (bit clear) or response (bit set) |
| 19 | */ |
| 20 | #define GB_OPERATION_TYPE_RESPONSE 0x80 |
| 21 | |
| 22 | /* |
Alex Elder | e88afa5 | 2014-10-01 21:54:15 -0500 | [diff] [blame] | 23 | * All operation messages (both requests and responses) begin with |
| 24 | * a common header that encodes the size of the data (header |
| 25 | * included). This header also contains a unique identifier, which |
| 26 | * is used to keep track of in-flight operations. Finally, the |
| 27 | * header contains a operation type field, whose interpretation is |
| 28 | * dependent on what type of device lies on the other end of the |
| 29 | * connection. Response messages are distinguished from request |
| 30 | * messages by setting the high bit (0x80) in the operation type |
| 31 | * value. |
| 32 | * |
| 33 | * The wire format for all numeric fields in the header is little |
| 34 | * endian. Any operation-specific data begins immediately after the |
| 35 | * header, and is 64-bit aligned. |
| 36 | */ |
| 37 | struct gb_operation_msg_hdr { |
| 38 | __le16 size; /* Size in bytes of header + payload */ |
| 39 | __le16 id; /* Operation unique id */ |
| 40 | __u8 type; /* E.g GB_I2C_TYPE_* or GB_GPIO_TYPE_* */ |
| 41 | /* 3 bytes pad, must be zero (ignore when read) */ |
| 42 | } __aligned(sizeof(u64)); |
| 43 | |
| 44 | /* XXX Could be per-host device, per-module, or even per-connection */ |
| 45 | static DEFINE_SPINLOCK(gb_operations_lock); |
| 46 | |
Alex Elder | 84d148b | 2014-10-16 06:35:32 -0500 | [diff] [blame^] | 47 | static void gb_operation_insert(struct gb_operation *operation) |
| 48 | { |
| 49 | struct gb_connection *connection = operation->connection; |
| 50 | struct rb_root *root = &connection->pending; |
| 51 | struct rb_node *node = &operation->node; |
| 52 | struct rb_node **link = &root->rb_node; |
| 53 | struct rb_node *above = NULL; |
| 54 | struct gb_operation_msg_hdr *header; |
| 55 | __le16 wire_id; |
| 56 | |
| 57 | /* |
| 58 | * Assign the operation's id, and store it in the header of |
| 59 | * both request and response message headers. |
| 60 | */ |
| 61 | operation->id = gb_connection_operation_id(connection); |
| 62 | wire_id = cpu_to_le16(operation->id); |
| 63 | header = operation->request->transfer_buffer; |
| 64 | header->id = wire_id; |
| 65 | |
| 66 | /* OK, insert the operation into its connection's tree */ |
| 67 | spin_lock_irq(&gb_operations_lock); |
| 68 | |
| 69 | while (*link) { |
| 70 | struct gb_operation *other; |
| 71 | |
| 72 | above = *link; |
| 73 | other = rb_entry(above, struct gb_operation, node); |
| 74 | header = other->request->transfer_buffer; |
| 75 | if (other->id > operation->id) |
| 76 | link = &above->rb_left; |
| 77 | else if (other->id < operation->id) |
| 78 | link = &above->rb_right; |
| 79 | } |
| 80 | rb_link_node(node, above, link); |
| 81 | rb_insert_color(node, root); |
| 82 | |
| 83 | spin_unlock_irq(&gb_operations_lock); |
| 84 | } |
| 85 | |
| 86 | static void gb_operation_remove(struct gb_operation *operation) |
| 87 | { |
| 88 | spin_lock_irq(&gb_operations_lock); |
| 89 | rb_erase(&operation->node, &operation->connection->pending); |
| 90 | spin_unlock_irq(&gb_operations_lock); |
| 91 | } |
| 92 | |
| 93 | static struct gb_operation * |
| 94 | gb_operation_find(struct gb_connection *connection, u16 id) |
| 95 | { |
| 96 | struct gb_operation *operation; |
| 97 | struct rb_node *node; |
| 98 | bool found = false; |
| 99 | |
| 100 | spin_lock_irq(&gb_operations_lock); |
| 101 | node = connection->pending.rb_node; |
| 102 | while (node && !found) { |
| 103 | operation = rb_entry(node, struct gb_operation, node); |
| 104 | if (operation->id > id) |
| 105 | node = node->rb_left; |
| 106 | else if (operation->id < id) |
| 107 | node = node->rb_right; |
| 108 | else |
| 109 | found = true; |
| 110 | } |
| 111 | spin_unlock_irq(&gb_operations_lock); |
| 112 | |
| 113 | return found ? operation : NULL; |
| 114 | } |
| 115 | |
Alex Elder | e88afa5 | 2014-10-01 21:54:15 -0500 | [diff] [blame] | 116 | /* |
| 117 | * An operations's response message has arrived. If no callback was |
| 118 | * supplied it was submitted for asynchronous completion, so we notify |
| 119 | * any waiters. Otherwise we assume calling the completion is enough |
| 120 | * and nobody else will be waiting. |
| 121 | */ |
| 122 | void gb_operation_complete(struct gb_operation *operation) |
| 123 | { |
| 124 | if (operation->callback) |
| 125 | operation->callback(operation); |
| 126 | else |
| 127 | complete_all(&operation->completion); |
| 128 | } |
| 129 | |
| 130 | /* |
| 131 | * Wait for a submitted operatnoi to complete */ |
| 132 | int gb_operation_wait(struct gb_operation *operation) |
| 133 | { |
| 134 | int ret; |
| 135 | |
| 136 | ret = wait_for_completion_interruptible(&operation->completion); |
| 137 | /* If interrupted, cancel the in-flight buffer */ |
| 138 | if (ret < 0) |
Alex Elder | 22b320f | 2014-10-16 06:35:31 -0500 | [diff] [blame] | 139 | ret = greybus_kill_gbuf(operation->request); |
Alex Elder | e88afa5 | 2014-10-01 21:54:15 -0500 | [diff] [blame] | 140 | return ret; |
| 141 | |
| 142 | } |
| 143 | |
| 144 | /* |
| 145 | * Submit an outbound operation. The caller has filled in any |
| 146 | * payload so the request message is ready to go. If non-null, |
| 147 | * the callback function supplied will be called when the response |
| 148 | * message has arrived indicating the operation is complete. A null |
| 149 | * callback function is used for a synchronous request; return from |
| 150 | * this function won't occur until the operation is complete (or an |
| 151 | * interrupt occurs). |
| 152 | */ |
| 153 | int gb_operation_submit(struct gb_operation *operation, |
| 154 | gb_operation_callback callback) |
| 155 | { |
| 156 | int ret; |
| 157 | |
Alex Elder | 22b320f | 2014-10-16 06:35:31 -0500 | [diff] [blame] | 158 | /* |
| 159 | * XXX |
| 160 | * I think the order of operations is going to be |
| 161 | * significant, and if so, we may need a mutex to surround |
| 162 | * setting the operation id and submitting the gbuf. |
Alex Elder | e88afa5 | 2014-10-01 21:54:15 -0500 | [diff] [blame] | 163 | */ |
| 164 | operation->callback = callback; |
Alex Elder | 84d148b | 2014-10-16 06:35:32 -0500 | [diff] [blame^] | 165 | gb_operation_insert(operation); |
Alex Elder | 22b320f | 2014-10-16 06:35:31 -0500 | [diff] [blame] | 166 | ret = greybus_submit_gbuf(operation->request, GFP_KERNEL); |
Alex Elder | e88afa5 | 2014-10-01 21:54:15 -0500 | [diff] [blame] | 167 | if (ret) |
| 168 | return ret; |
| 169 | if (!callback) |
| 170 | ret = gb_operation_wait(operation); |
| 171 | |
| 172 | return ret; |
| 173 | } |
| 174 | |
| 175 | /* |
Alex Elder | 22b320f | 2014-10-16 06:35:31 -0500 | [diff] [blame] | 176 | * Called when an operation buffer completes. |
Alex Elder | e88afa5 | 2014-10-01 21:54:15 -0500 | [diff] [blame] | 177 | */ |
Alex Elder | 22b320f | 2014-10-16 06:35:31 -0500 | [diff] [blame] | 178 | static void gb_operation_gbuf_complete(struct gbuf *gbuf) |
Alex Elder | e88afa5 | 2014-10-01 21:54:15 -0500 | [diff] [blame] | 179 | { |
Alex Elder | 84d148b | 2014-10-16 06:35:32 -0500 | [diff] [blame^] | 180 | struct gb_operation *operation; |
| 181 | struct gb_operation_msg_hdr *header; |
| 182 | u16 id; |
| 183 | |
| 184 | /* |
| 185 | * This isn't right, but it keeps things balanced until we |
| 186 | * can set up operation response handling. |
| 187 | */ |
| 188 | header = gbuf->transfer_buffer; |
| 189 | id = le16_to_cpu(header->id); |
| 190 | operation = gb_operation_find(gbuf->connection, id); |
| 191 | if (operation) |
| 192 | gb_operation_remove(operation); |
| 193 | else |
| 194 | gb_connection_err(gbuf->connection, "operation not found"); |
Alex Elder | e88afa5 | 2014-10-01 21:54:15 -0500 | [diff] [blame] | 195 | } |
| 196 | |
| 197 | /* |
Alex Elder | 22b320f | 2014-10-16 06:35:31 -0500 | [diff] [blame] | 198 | * Allocate a buffer to be used for an operation request or response |
| 199 | * message. Both types of message contain a header, which is filled |
| 200 | * in here. W |
Alex Elder | e88afa5 | 2014-10-01 21:54:15 -0500 | [diff] [blame] | 201 | */ |
Alex Elder | 22b320f | 2014-10-16 06:35:31 -0500 | [diff] [blame] | 202 | struct gbuf *gb_operation_gbuf_create(struct gb_operation *operation, |
| 203 | u8 type, size_t size, bool outbound) |
Alex Elder | e88afa5 | 2014-10-01 21:54:15 -0500 | [diff] [blame] | 204 | { |
Alex Elder | 22b320f | 2014-10-16 06:35:31 -0500 | [diff] [blame] | 205 | struct gb_connection *connection = operation->connection; |
Alex Elder | e88afa5 | 2014-10-01 21:54:15 -0500 | [diff] [blame] | 206 | struct gb_operation_msg_hdr *header; |
| 207 | struct gbuf *gbuf; |
Alex Elder | 22b320f | 2014-10-16 06:35:31 -0500 | [diff] [blame] | 208 | gfp_t gfp_flags = outbound ? GFP_KERNEL : GFP_ATOMIC; |
Alex Elder | e88afa5 | 2014-10-01 21:54:15 -0500 | [diff] [blame] | 209 | |
Alex Elder | 22b320f | 2014-10-16 06:35:31 -0500 | [diff] [blame] | 210 | /* Operation buffers hold a header in addition to their payload */ |
Alex Elder | e88afa5 | 2014-10-01 21:54:15 -0500 | [diff] [blame] | 211 | size += sizeof(*header); |
Alex Elder | 22b320f | 2014-10-16 06:35:31 -0500 | [diff] [blame] | 212 | gbuf = greybus_alloc_gbuf(connection, gb_operation_gbuf_complete, |
| 213 | size, outbound, gfp_flags, operation); |
| 214 | if (!gbuf) |
Alex Elder | e88afa5 | 2014-10-01 21:54:15 -0500 | [diff] [blame] | 215 | return NULL; |
Alex Elder | e88afa5 | 2014-10-01 21:54:15 -0500 | [diff] [blame] | 216 | |
Alex Elder | 22b320f | 2014-10-16 06:35:31 -0500 | [diff] [blame] | 217 | /* Fill in the header structure */ |
| 218 | header = (struct gb_operation_msg_hdr *)gbuf->transfer_buffer; |
Greg Kroah-Hartman | 322543a | 2014-10-02 21:25:21 -0700 | [diff] [blame] | 219 | header->size = cpu_to_le16(size); |
Alex Elder | b0b6575 | 2014-10-03 15:05:20 -0500 | [diff] [blame] | 220 | header->id = 0; /* Filled in when submitted */ |
| 221 | header->type = type; |
Alex Elder | 22b320f | 2014-10-16 06:35:31 -0500 | [diff] [blame] | 222 | |
| 223 | return gbuf; |
| 224 | } |
| 225 | |
| 226 | /* |
| 227 | * Create a Greybus operation to be sent over the given connection. |
| 228 | * The request buffer will big enough for a payload of the given |
| 229 | * size. Outgoing requests must specify the size of the response |
| 230 | * buffer size, which must be sufficient to hold all expected |
| 231 | * response data. |
| 232 | * |
| 233 | * Incoming requests will supply a response size of 0, and in that |
| 234 | * case no response buffer is allocated. (A response always |
| 235 | * includes a status byte, so 0 is not a valid size.) Whatever |
| 236 | * handles the operation request is responsible for allocating the |
| 237 | * response buffer. |
| 238 | * |
| 239 | * Returns a pointer to the new operation or a null pointer if an |
| 240 | * error occurs. |
| 241 | */ |
| 242 | struct gb_operation *gb_operation_create(struct gb_connection *connection, |
| 243 | u8 type, size_t request_size, |
| 244 | size_t response_size) |
| 245 | { |
| 246 | struct gb_operation *operation; |
| 247 | gfp_t gfp_flags = response_size ? GFP_KERNEL : GFP_ATOMIC; |
| 248 | |
| 249 | if (!request_size) { |
| 250 | gb_connection_err(connection, "zero-sized request"); |
| 251 | return NULL; |
| 252 | } |
| 253 | |
| 254 | /* XXX Use a slab cache */ |
| 255 | operation = kzalloc(sizeof(*operation), gfp_flags); |
| 256 | if (!operation) |
| 257 | return NULL; |
| 258 | operation->connection = connection; /* XXX refcount? */ |
| 259 | |
| 260 | operation->request = gb_operation_gbuf_create(operation, type, |
| 261 | request_size, true); |
| 262 | if (!operation->request) { |
| 263 | kfree(operation); |
| 264 | return NULL; |
| 265 | } |
| 266 | operation->request_payload = operation->request->transfer_buffer + |
| 267 | sizeof(struct gb_operation_msg_hdr); |
| 268 | /* We always use the full request buffer */ |
| 269 | operation->request->actual_length = request_size; |
| 270 | |
| 271 | if (response_size) { |
| 272 | type |= GB_OPERATION_TYPE_RESPONSE; |
| 273 | operation->response = gb_operation_gbuf_create(operation, |
| 274 | type, response_size, false); |
| 275 | if (!operation->response) { |
| 276 | greybus_free_gbuf(operation->request); |
| 277 | kfree(operation); |
| 278 | return NULL; |
| 279 | } |
| 280 | operation->response_payload = |
| 281 | operation->response->transfer_buffer + |
| 282 | sizeof(struct gb_operation_msg_hdr); |
| 283 | } |
Alex Elder | e88afa5 | 2014-10-01 21:54:15 -0500 | [diff] [blame] | 284 | |
| 285 | operation->callback = NULL; /* set at submit time */ |
| 286 | init_completion(&operation->completion); |
| 287 | |
| 288 | spin_lock_irq(&gb_operations_lock); |
| 289 | list_add_tail(&operation->links, &connection->operations); |
| 290 | spin_unlock_irq(&gb_operations_lock); |
| 291 | |
| 292 | return operation; |
| 293 | } |
| 294 | |
| 295 | /* |
| 296 | * Destroy a previously created operation. |
| 297 | */ |
| 298 | void gb_operation_destroy(struct gb_operation *operation) |
| 299 | { |
| 300 | if (WARN_ON(!operation)) |
| 301 | return; |
| 302 | |
| 303 | /* XXX Make sure it's not in flight */ |
| 304 | spin_lock_irq(&gb_operations_lock); |
| 305 | list_del(&operation->links); |
| 306 | spin_unlock_irq(&gb_operations_lock); |
| 307 | |
Alex Elder | 22b320f | 2014-10-16 06:35:31 -0500 | [diff] [blame] | 308 | greybus_free_gbuf(operation->response); |
| 309 | greybus_free_gbuf(operation->request); |
Alex Elder | e88afa5 | 2014-10-01 21:54:15 -0500 | [diff] [blame] | 310 | |
| 311 | kfree(operation); |
| 312 | } |