Greg Kroah-Hartman | de536e3 | 2014-08-31 16:17:04 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Greybus "AP" message loop handling |
| 3 | * |
| 4 | * Copyright 2014 Google Inc. |
| 5 | * |
| 6 | * Released under the GPLv2 only. |
| 7 | */ |
| 8 | |
| 9 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
| 10 | |
| 11 | #include <linux/types.h> |
| 12 | #include <linux/module.h> |
| 13 | #include <linux/moduleparam.h> |
| 14 | #include <linux/kernel.h> |
| 15 | #include <linux/slab.h> |
| 16 | #include <linux/uaccess.h> |
| 17 | #include <linux/kthread.h> |
| 18 | #include <linux/device.h> |
| 19 | #include "greybus.h" |
| 20 | |
Greg Kroah-Hartman | be1e2e9 | 2014-08-31 16:21:33 -0700 | [diff] [blame] | 21 | /* |
| 22 | * AP <-> SVC message structure format: |
| 23 | * |
| 24 | * |
| 25 | * |
| 26 | */ |
Greg Kroah-Hartman | 2ecd536 | 2014-08-31 17:25:22 -0700 | [diff] [blame^] | 27 | enum svc_function_type { |
| 28 | SVC_FUNCTION_HANDSHAKE = 0x00, |
| 29 | SVC_FUNCTION_UNIPRO_NETWORK_MANAGEMENT = 0x01, |
| 30 | SVC_FUNCTION_HOTPLUG = 0x02, |
| 31 | SVC_FUNCTION_DDB = 0x03, |
| 32 | SVC_FUNCTION_POWER = 0x04, |
| 33 | SVC_FUNCTION_EPM = 0x05, |
| 34 | SVC_FUNCTION_SUSPEND = 0x06, |
| 35 | }; |
| 36 | |
| 37 | struct svc_msg_header { |
Greg Kroah-Hartman | be1e2e9 | 2014-08-31 16:21:33 -0700 | [diff] [blame] | 38 | u8 function; |
Greg Kroah-Hartman | 2ecd536 | 2014-08-31 17:25:22 -0700 | [diff] [blame^] | 39 | u8 type; /* enum svc_function_type */ |
Greg Kroah-Hartman | be1e2e9 | 2014-08-31 16:21:33 -0700 | [diff] [blame] | 40 | u8 version_major; |
| 41 | u8 version_minor; |
| 42 | u16 payload_length; |
| 43 | }; |
| 44 | |
Greg Kroah-Hartman | 2ecd536 | 2014-08-31 17:25:22 -0700 | [diff] [blame^] | 45 | enum svc_function_handshake_type { |
| 46 | SVC_HANDSHAKE_SVC_HELLO = 0x00, |
| 47 | SVC_HANDSHAKE_AP_HELLO = 0x01, |
| 48 | SVC_HANDSHAKE_MODULE_HELLO = 0x02, |
| 49 | }; |
| 50 | |
| 51 | struct svc_function_handshake { |
| 52 | u8 handshake_type; /* enum svc_function_handshake_type */ |
| 53 | }; |
| 54 | |
| 55 | struct svc_function_unipro_set_route { |
| 56 | u8 source_device_id; |
| 57 | u8 source_cport_id; |
| 58 | u8 destination_device_id; |
| 59 | u8 destination_cport_id; |
| 60 | }; |
| 61 | |
| 62 | struct svc_function_unipro_link_up { |
| 63 | u8 device_id; |
| 64 | }; |
| 65 | |
| 66 | enum svc_function_management_event { |
| 67 | SVC_MANAGEMENT_SET_ROUTE = 0x00, |
| 68 | SVC_MANAGEMENT_LINK_UP = 0x01, |
| 69 | }; |
| 70 | |
| 71 | struct svc_function_unipro_management { |
| 72 | u8 management_packet_type; /* enum svc_function_management_event */ |
| 73 | union { |
| 74 | struct svc_function_unipro_set_route set_route; |
| 75 | struct svc_function_unipro_link_up link_up; |
| 76 | }; |
| 77 | }; |
| 78 | |
| 79 | enum svc_function_hotplug_event { |
| 80 | SVC_HOTPLUG_EVENT = 0x00, |
| 81 | SVC_HOTUNPLUG_EVENT = 0x01, |
| 82 | }; |
| 83 | |
| 84 | struct svc_function_hotplug { |
| 85 | u8 hotplug_event; /* enum svc_function_hotplug_event */ |
| 86 | u8 device_id; |
| 87 | }; |
| 88 | |
| 89 | enum svc_function_ddb_type { |
| 90 | SVC_DDB_GET = 0x00, |
| 91 | SVC_DDB_RESPONSE = 0x01, |
| 92 | }; |
| 93 | |
| 94 | struct svc_function_ddb_get { |
| 95 | u8 device_id; |
| 96 | u8 message_id; |
| 97 | }; |
| 98 | |
| 99 | struct svc_function_ddb_response { |
| 100 | u8 device_id; |
| 101 | u8 message_id; |
| 102 | u16 descriptor_length; |
| 103 | u8 ddb[0]; |
| 104 | }; |
| 105 | |
| 106 | struct svc_function_ddb { |
| 107 | u8 ddb_type; /* enum svc_function_ddb_type */ |
| 108 | union { |
| 109 | struct svc_function_ddb_get ddb_get; |
| 110 | struct svc_function_ddb_response ddb_response; |
| 111 | }; |
| 112 | }; |
| 113 | |
| 114 | enum svc_function_power_type { |
| 115 | SVC_POWER_BATTERY_STATUS = 0x00, |
| 116 | SVC_POWER_BATTERY_STATUS_REQUEST = 0x01, |
| 117 | }; |
| 118 | |
| 119 | enum svc_function_battery_status { |
| 120 | SVC_BATTERY_UNKNOWN = 0x00, |
| 121 | SVC_BATTERY_CHARGING = 0x01, |
| 122 | SVC_BATTERY_DISCHARGING = 0x02, |
| 123 | SVC_BATTERY_NOT_CHARGING = 0x03, |
| 124 | SVC_BATTERY_FULL = 0x04, |
| 125 | }; |
| 126 | |
| 127 | struct svc_function_power_battery_status { |
| 128 | u16 charge_full; |
| 129 | u16 charge_now; |
| 130 | u8 status; /* enum svc_function_battery_status */ |
| 131 | }; |
| 132 | |
| 133 | struct svc_function_power_battery_status_request { |
| 134 | |
| 135 | }; |
| 136 | |
| 137 | struct svc_function_power { |
| 138 | u8 power_type; /* enum svc_function_power_type */ |
| 139 | union { |
| 140 | struct svc_function_power_battery_status status; |
| 141 | struct svc_function_power_battery_status_request request; |
| 142 | }; |
| 143 | }; |
| 144 | |
| 145 | struct svc_msg { |
| 146 | struct svc_msg_header header; |
| 147 | union { |
| 148 | struct svc_function_handshake handshake; |
| 149 | struct svc_function_unipro_management management; |
| 150 | struct svc_function_hotplug hotplug; |
| 151 | struct svc_function_ddb ddb; |
| 152 | u8 data[0]; |
| 153 | }; |
| 154 | }; |
| 155 | |
Greg Kroah-Hartman | be1e2e9 | 2014-08-31 16:21:33 -0700 | [diff] [blame] | 156 | |
Greg Kroah-Hartman | de536e3 | 2014-08-31 16:17:04 -0700 | [diff] [blame] | 157 | struct ap_msg { |
| 158 | u8 *data; |
| 159 | int size; |
| 160 | struct list_head list; |
| 161 | }; |
| 162 | |
| 163 | static LIST_HEAD(ap_msg_list); |
| 164 | static spinlock_t ap_msg_list_lock; |
| 165 | static struct task_struct *ap_thread; |
| 166 | static wait_queue_head_t ap_wait; |
| 167 | |
| 168 | static struct ap_msg *get_ap_msg(void) |
| 169 | { |
| 170 | struct ap_msg *ap_msg; |
| 171 | unsigned long flags; |
| 172 | |
| 173 | spin_lock_irqsave(&ap_msg_list_lock, flags); |
| 174 | |
| 175 | ap_msg = list_first_entry_or_null(&ap_msg_list, struct ap_msg, list); |
| 176 | if (ap_msg != NULL) |
| 177 | list_del(&ap_msg->list); |
| 178 | spin_unlock_irqrestore(&ap_msg_list_lock, flags); |
| 179 | |
| 180 | return ap_msg; |
| 181 | } |
| 182 | |
| 183 | static int ap_process_loop(void *data) |
| 184 | { |
| 185 | struct ap_msg *ap_msg; |
| 186 | |
| 187 | while (!kthread_should_stop()) { |
| 188 | wait_event_interruptible(ap_wait, kthread_should_stop()); |
| 189 | |
| 190 | if (kthread_should_stop()) |
| 191 | break; |
| 192 | |
| 193 | /* Get some data off of the ap list and process it */ |
| 194 | ap_msg = get_ap_msg(); |
| 195 | if (!ap_msg) |
| 196 | continue; |
| 197 | |
| 198 | // FIXME - process the message |
| 199 | |
| 200 | /* clean the message up */ |
| 201 | kfree(ap_msg->data); |
| 202 | kfree(ap_msg); |
| 203 | } |
| 204 | return 0; |
| 205 | } |
| 206 | |
| 207 | int gb_new_ap_msg(u8 *data, int size) |
| 208 | { |
| 209 | struct ap_msg *ap_msg; |
| 210 | unsigned long flags; |
| 211 | |
| 212 | /* |
| 213 | * Totally naive copy the message into a new structure that we slowly |
| 214 | * create and add it to the list. Let's get this working, the odds of |
| 215 | * this being any "slow path" for AP messages is really low at this |
| 216 | * point in time, but you never know, so this comment is here to point |
| 217 | * out that maybe we should use a slab allocator, or even just not copy |
| 218 | * the data, but use it directly and force the urbs to be "new" each |
| 219 | * time. |
| 220 | */ |
| 221 | |
| 222 | /* Note - this can, and will, be called in interrupt context. */ |
| 223 | ap_msg = kmalloc(sizeof(*ap_msg), GFP_ATOMIC); |
| 224 | if (!ap_msg) |
| 225 | return -ENOMEM; |
| 226 | ap_msg->data = kmalloc(size, GFP_ATOMIC); |
| 227 | if (!ap_msg->data) { |
| 228 | kfree(ap_msg); |
| 229 | return -ENOMEM; |
| 230 | } |
| 231 | memcpy(ap_msg->data, data, size); |
| 232 | ap_msg->size = size; |
| 233 | |
| 234 | spin_lock_irqsave(&ap_msg_list_lock, flags); |
| 235 | list_add(&ap_msg->list, &ap_msg_list); |
| 236 | spin_unlock_irqrestore(&ap_msg_list_lock, flags); |
| 237 | |
| 238 | /* kick our thread to handle the message */ |
| 239 | wake_up_interruptible(&ap_wait); |
| 240 | |
| 241 | return 0; |
| 242 | } |
| 243 | |
| 244 | int gb_thread_init(void) |
| 245 | { |
| 246 | init_waitqueue_head(&ap_wait); |
| 247 | spin_lock_init(&ap_msg_list_lock); |
| 248 | |
| 249 | ap_thread = kthread_run(ap_process_loop, NULL, "greybus_ap"); |
| 250 | if (IS_ERR(ap_thread)) |
| 251 | return PTR_ERR(ap_thread); |
| 252 | |
| 253 | return 0; |
| 254 | } |
| 255 | |
| 256 | void gb_thread_destroy(void) |
| 257 | { |
| 258 | kthread_stop(ap_thread); |
| 259 | } |
| 260 | |
| 261 | |