blob: eeb251e2844113c5474032380a67213872be3466 [file] [log] [blame]
Alex Elder30c6d9d2015-05-22 13:02:08 -05001/*
2 * SVC Greybus driver.
3 *
4 * Copyright 2015 Google Inc.
5 * Copyright 2015 Linaro Ltd.
6 *
7 * Released under the GPLv2 only.
8 */
9
Rui Miguel Silvaebe99d62016-01-21 01:42:17 +000010#include <linux/input.h>
Viresh Kumar067906f2015-08-06 12:44:55 +053011#include <linux/workqueue.h>
Alex Elder30c6d9d2015-05-22 13:02:08 -050012
Viresh Kumarf66427a2015-09-02 21:27:13 +053013#include "greybus.h"
14
Johan Hovoldf6c6c132015-11-11 10:07:08 +010015#define CPORT_FLAGS_E2EFC BIT(0)
16#define CPORT_FLAGS_CSD_N BIT(1)
17#define CPORT_FLAGS_CSV_N BIT(2)
Perry Hung0b226492015-07-24 19:02:34 -040018
Rui Miguel Silvaebe99d62016-01-21 01:42:17 +000019#define SVC_KEY_ARA_BUTTON KEY_A
Viresh Kumarb45864d2015-07-24 15:32:21 +053020
Johan Hovold9ae41092015-12-02 18:23:29 +010021struct gb_svc_deferred_request {
Viresh Kumar067906f2015-08-06 12:44:55 +053022 struct work_struct work;
Johan Hovold9ae41092015-12-02 18:23:29 +010023 struct gb_operation *operation;
Viresh Kumar067906f2015-08-06 12:44:55 +053024};
25
Viresh Kumaread35462015-07-21 17:44:19 +053026
Johan Hovold66069fb2015-11-25 15:59:09 +010027static ssize_t endo_id_show(struct device *dev,
28 struct device_attribute *attr, char *buf)
29{
30 struct gb_svc *svc = to_gb_svc(dev);
31
32 return sprintf(buf, "0x%04x\n", svc->endo_id);
33}
34static DEVICE_ATTR_RO(endo_id);
35
36static ssize_t ap_intf_id_show(struct device *dev,
37 struct device_attribute *attr, char *buf)
38{
39 struct gb_svc *svc = to_gb_svc(dev);
40
41 return sprintf(buf, "%u\n", svc->ap_intf_id);
42}
43static DEVICE_ATTR_RO(ap_intf_id);
44
Rui Miguel Silva2c92bd52016-01-11 13:46:33 +000045
46// FIXME
47// This is a hack, we need to do this "right" and clean the interface up
48// properly, not just forcibly yank the thing out of the system and hope for the
49// best. But for now, people want their modules to come out without having to
50// throw the thing to the ground or get out a screwdriver.
51static ssize_t intf_eject_store(struct device *dev,
52 struct device_attribute *attr, const char *buf,
53 size_t len)
54{
55 struct gb_svc *svc = to_gb_svc(dev);
56 unsigned short intf_id;
57 int ret;
58
59 ret = kstrtou16(buf, 10, &intf_id);
60 if (ret < 0)
61 return ret;
62
63 dev_warn(dev, "Forcibly trying to eject interface %d\n", intf_id);
64
65 ret = gb_svc_intf_eject(svc, intf_id);
66 if (ret < 0)
67 return ret;
68
69 return len;
70}
71static DEVICE_ATTR_WO(intf_eject);
72
Greg Kroah-Hartmand5628532016-01-26 15:17:08 -080073static ssize_t watchdog_show(struct device *dev, struct device_attribute *attr,
74 char *buf)
75{
76 struct gb_svc *svc = to_gb_svc(dev);
77
78 return sprintf(buf, "%s\n",
79 gb_svc_watchdog_enabled(svc) ? "enabled" : "disabled");
80}
81
82static ssize_t watchdog_store(struct device *dev,
83 struct device_attribute *attr, const char *buf,
84 size_t len)
85{
86 struct gb_svc *svc = to_gb_svc(dev);
87 int retval;
88 bool user_request;
89
90 retval = strtobool(buf, &user_request);
91 if (retval)
92 return retval;
93
94 if (user_request)
95 retval = gb_svc_watchdog_enable(svc);
96 else
97 retval = gb_svc_watchdog_disable(svc);
98 if (retval)
99 return retval;
100 return len;
101}
102static DEVICE_ATTR_RW(watchdog);
103
Johan Hovold66069fb2015-11-25 15:59:09 +0100104static struct attribute *svc_attrs[] = {
105 &dev_attr_endo_id.attr,
106 &dev_attr_ap_intf_id.attr,
Rui Miguel Silva2c92bd52016-01-11 13:46:33 +0000107 &dev_attr_intf_eject.attr,
Greg Kroah-Hartmand5628532016-01-26 15:17:08 -0800108 &dev_attr_watchdog.attr,
Johan Hovold66069fb2015-11-25 15:59:09 +0100109 NULL,
110};
111ATTRIBUTE_GROUPS(svc);
112
Viresh Kumar505f16c2015-08-31 17:21:07 +0530113static int gb_svc_intf_device_id(struct gb_svc *svc, u8 intf_id, u8 device_id)
Alex Elder30c6d9d2015-05-22 13:02:08 -0500114{
115 struct gb_svc_intf_device_id_request request;
116
117 request.intf_id = intf_id;
118 request.device_id = device_id;
119
120 return gb_operation_sync(svc->connection, GB_SVC_TYPE_INTF_DEVICE_ID,
121 &request, sizeof(request), NULL, 0);
122}
123
Viresh Kumar3f0e9182015-08-31 17:21:06 +0530124int gb_svc_intf_reset(struct gb_svc *svc, u8 intf_id)
Alex Elder30c6d9d2015-05-22 13:02:08 -0500125{
126 struct gb_svc_intf_reset_request request;
127
128 request.intf_id = intf_id;
129
130 return gb_operation_sync(svc->connection, GB_SVC_TYPE_INTF_RESET,
131 &request, sizeof(request), NULL, 0);
132}
Viresh Kumar3f0e9182015-08-31 17:21:06 +0530133EXPORT_SYMBOL_GPL(gb_svc_intf_reset);
Alex Elder30c6d9d2015-05-22 13:02:08 -0500134
Rui Miguel Silvac5d55fb2016-01-11 13:46:31 +0000135int gb_svc_intf_eject(struct gb_svc *svc, u8 intf_id)
136{
137 struct gb_svc_intf_eject_request request;
138
139 request.intf_id = intf_id;
140
141 /*
142 * The pulse width for module release in svc is long so we need to
143 * increase the timeout so the operation will not return to soon.
144 */
145 return gb_operation_sync_timeout(svc->connection,
146 GB_SVC_TYPE_INTF_EJECT, &request,
147 sizeof(request), NULL, 0,
148 GB_SVC_EJECT_TIME);
149}
150EXPORT_SYMBOL_GPL(gb_svc_intf_eject);
151
Viresh Kumar19151c32015-09-09 21:08:29 +0530152int gb_svc_dme_peer_get(struct gb_svc *svc, u8 intf_id, u16 attr, u16 selector,
153 u32 *value)
154{
155 struct gb_svc_dme_peer_get_request request;
156 struct gb_svc_dme_peer_get_response response;
157 u16 result;
158 int ret;
159
160 request.intf_id = intf_id;
161 request.attr = cpu_to_le16(attr);
162 request.selector = cpu_to_le16(selector);
163
164 ret = gb_operation_sync(svc->connection, GB_SVC_TYPE_DME_PEER_GET,
165 &request, sizeof(request),
166 &response, sizeof(response));
167 if (ret) {
Viresh Kumarb933fa42015-12-04 21:30:10 +0530168 dev_err(&svc->dev, "failed to get DME attribute (%u 0x%04x %u): %d\n",
Johan Hovold684156a2015-11-25 15:59:19 +0100169 intf_id, attr, selector, ret);
Viresh Kumar19151c32015-09-09 21:08:29 +0530170 return ret;
171 }
172
173 result = le16_to_cpu(response.result_code);
174 if (result) {
Viresh Kumarb933fa42015-12-04 21:30:10 +0530175 dev_err(&svc->dev, "UniPro error while getting DME attribute (%u 0x%04x %u): %u\n",
Johan Hovold684156a2015-11-25 15:59:19 +0100176 intf_id, attr, selector, result);
Viresh Kumar4aac6c52015-12-04 21:30:08 +0530177 return -EIO;
Viresh Kumar19151c32015-09-09 21:08:29 +0530178 }
179
180 if (value)
181 *value = le32_to_cpu(response.attr_value);
182
183 return 0;
184}
185EXPORT_SYMBOL_GPL(gb_svc_dme_peer_get);
186
187int gb_svc_dme_peer_set(struct gb_svc *svc, u8 intf_id, u16 attr, u16 selector,
188 u32 value)
189{
190 struct gb_svc_dme_peer_set_request request;
191 struct gb_svc_dme_peer_set_response response;
192 u16 result;
193 int ret;
194
195 request.intf_id = intf_id;
196 request.attr = cpu_to_le16(attr);
197 request.selector = cpu_to_le16(selector);
198 request.value = cpu_to_le32(value);
199
200 ret = gb_operation_sync(svc->connection, GB_SVC_TYPE_DME_PEER_SET,
201 &request, sizeof(request),
202 &response, sizeof(response));
203 if (ret) {
Viresh Kumarb933fa42015-12-04 21:30:10 +0530204 dev_err(&svc->dev, "failed to set DME attribute (%u 0x%04x %u %u): %d\n",
Johan Hovold684156a2015-11-25 15:59:19 +0100205 intf_id, attr, selector, value, ret);
Viresh Kumar19151c32015-09-09 21:08:29 +0530206 return ret;
207 }
208
209 result = le16_to_cpu(response.result_code);
210 if (result) {
Viresh Kumarb933fa42015-12-04 21:30:10 +0530211 dev_err(&svc->dev, "UniPro error while setting DME attribute (%u 0x%04x %u %u): %u\n",
Johan Hovold684156a2015-11-25 15:59:19 +0100212 intf_id, attr, selector, value, result);
Viresh Kumar4aac6c52015-12-04 21:30:08 +0530213 return -EIO;
Viresh Kumar19151c32015-09-09 21:08:29 +0530214 }
215
216 return 0;
217}
218EXPORT_SYMBOL_GPL(gb_svc_dme_peer_set);
219
Viresh Kumar6bec5c72015-09-24 14:40:29 -0700220/*
221 * T_TstSrcIncrement is written by the module on ES2 as a stand-in for boot
Eli Sennesh3563ff82015-12-22 17:26:57 -0500222 * status attribute ES3_INIT_STATUS. AP needs to read and clear it, after
223 * reading a non-zero value from it.
Viresh Kumar6bec5c72015-09-24 14:40:29 -0700224 *
225 * FIXME: This is module-hardware dependent and needs to be extended for every
226 * type of module we want to support.
227 */
228static int gb_svc_read_and_clear_module_boot_status(struct gb_interface *intf)
229{
Johan Hovold25376362015-11-03 18:03:23 +0100230 struct gb_host_device *hd = intf->hd;
Viresh Kumar6bec5c72015-09-24 14:40:29 -0700231 int ret;
232 u32 value;
Eli Sennesh3563ff82015-12-22 17:26:57 -0500233 u16 attr;
234 u8 init_status;
Viresh Kumar6bec5c72015-09-24 14:40:29 -0700235
Eli Sennesh3563ff82015-12-22 17:26:57 -0500236 /*
237 * Check if the module is ES2 or ES3, and choose attr number
238 * appropriately.
239 * FIXME: Remove ES2 support from the kernel entirely.
240 */
241 if (intf->ddbl1_manufacturer_id == ES2_DDBL1_MFR_ID &&
242 intf->ddbl1_product_id == ES2_DDBL1_PROD_ID)
243 attr = DME_ATTR_T_TST_SRC_INCREMENT;
244 else
245 attr = DME_ATTR_ES3_INIT_STATUS;
246
247 /* Read and clear boot status in ES3_INIT_STATUS */
248 ret = gb_svc_dme_peer_get(hd->svc, intf->interface_id, attr,
Viresh Kumar6bec5c72015-09-24 14:40:29 -0700249 DME_ATTR_SELECTOR_INDEX, &value);
250
251 if (ret)
252 return ret;
253
254 /*
255 * A nonzero boot status indicates the module has finished
256 * booting. Clear it.
257 */
258 if (!value) {
259 dev_err(&intf->dev, "Module not ready yet\n");
260 return -ENODEV;
261 }
262
Viresh Kumar1575ef12015-10-07 15:40:24 -0400263 /*
Eli Sennesh3563ff82015-12-22 17:26:57 -0500264 * Check if the module needs to boot from UniPro.
Viresh Kumar1575ef12015-10-07 15:40:24 -0400265 * For ES2: We need to check lowest 8 bits of 'value'.
266 * For ES3: We need to check highest 8 bits out of 32 of 'value'.
Eli Sennesh3563ff82015-12-22 17:26:57 -0500267 * FIXME: Remove ES2 support from the kernel entirely.
Viresh Kumar1575ef12015-10-07 15:40:24 -0400268 */
Eli Sennesh3563ff82015-12-22 17:26:57 -0500269 if (intf->ddbl1_manufacturer_id == ES2_DDBL1_MFR_ID &&
270 intf->ddbl1_product_id == ES2_DDBL1_PROD_ID)
271 init_status = value;
272 else
273 init_status = value >> 24;
274
275 if (init_status == DME_DIS_UNIPRO_BOOT_STARTED ||
276 init_status == DME_DIS_FALLBACK_UNIPRO_BOOT_STARTED)
Viresh Kumar1575ef12015-10-07 15:40:24 -0400277 intf->boot_over_unipro = true;
278
Eli Sennesh3563ff82015-12-22 17:26:57 -0500279 return gb_svc_dme_peer_set(hd->svc, intf->interface_id, attr,
Viresh Kumar6bec5c72015-09-24 14:40:29 -0700280 DME_ATTR_SELECTOR_INDEX, 0);
281}
282
Viresh Kumar3f0e9182015-08-31 17:21:06 +0530283int gb_svc_connection_create(struct gb_svc *svc,
Alex Elder30c6d9d2015-05-22 13:02:08 -0500284 u8 intf1_id, u16 cport1_id,
Viresh Kumar1575ef12015-10-07 15:40:24 -0400285 u8 intf2_id, u16 cport2_id,
286 bool boot_over_unipro)
Alex Elder30c6d9d2015-05-22 13:02:08 -0500287{
288 struct gb_svc_conn_create_request request;
289
290 request.intf1_id = intf1_id;
Rui Miguel Silva24980502015-09-15 15:33:51 +0100291 request.cport1_id = cpu_to_le16(cport1_id);
Alex Elder30c6d9d2015-05-22 13:02:08 -0500292 request.intf2_id = intf2_id;
Rui Miguel Silva24980502015-09-15 15:33:51 +0100293 request.cport2_id = cpu_to_le16(cport2_id);
Perry Hung0b226492015-07-24 19:02:34 -0400294 /*
295 * XXX: fix connections paramaters to TC0 and all CPort flags
296 * for now.
297 */
298 request.tc = 0;
Viresh Kumar1575ef12015-10-07 15:40:24 -0400299
300 /*
301 * We need to skip setting E2EFC and other flags to the connection
302 * create request, for all cports, on an interface that need to boot
303 * over unipro, i.e. interfaces required to download firmware.
304 */
305 if (boot_over_unipro)
306 request.flags = CPORT_FLAGS_CSV_N | CPORT_FLAGS_CSD_N;
307 else
308 request.flags = CPORT_FLAGS_CSV_N | CPORT_FLAGS_E2EFC;
Alex Elder30c6d9d2015-05-22 13:02:08 -0500309
310 return gb_operation_sync(svc->connection, GB_SVC_TYPE_CONN_CREATE,
311 &request, sizeof(request), NULL, 0);
312}
Viresh Kumar3f0e9182015-08-31 17:21:06 +0530313EXPORT_SYMBOL_GPL(gb_svc_connection_create);
Alex Elder30c6d9d2015-05-22 13:02:08 -0500314
Viresh Kumar3f0e9182015-08-31 17:21:06 +0530315void gb_svc_connection_destroy(struct gb_svc *svc, u8 intf1_id, u16 cport1_id,
316 u8 intf2_id, u16 cport2_id)
Alex Elder30c6d9d2015-05-22 13:02:08 -0500317{
318 struct gb_svc_conn_destroy_request request;
Viresh Kumard9fcfff2015-08-31 17:21:05 +0530319 struct gb_connection *connection = svc->connection;
320 int ret;
Alex Elder30c6d9d2015-05-22 13:02:08 -0500321
322 request.intf1_id = intf1_id;
Rui Miguel Silva24980502015-09-15 15:33:51 +0100323 request.cport1_id = cpu_to_le16(cport1_id);
Alex Elder30c6d9d2015-05-22 13:02:08 -0500324 request.intf2_id = intf2_id;
Rui Miguel Silva24980502015-09-15 15:33:51 +0100325 request.cport2_id = cpu_to_le16(cport2_id);
Alex Elder30c6d9d2015-05-22 13:02:08 -0500326
Viresh Kumard9fcfff2015-08-31 17:21:05 +0530327 ret = gb_operation_sync(connection, GB_SVC_TYPE_CONN_DESTROY,
328 &request, sizeof(request), NULL, 0);
Johan Hovold684156a2015-11-25 15:59:19 +0100329 if (ret) {
Viresh Kumar2f3db922015-12-04 21:30:09 +0530330 dev_err(&svc->dev, "failed to destroy connection (%u:%u %u:%u): %d\n",
Johan Hovold684156a2015-11-25 15:59:19 +0100331 intf1_id, cport1_id, intf2_id, cport2_id, ret);
332 }
Alex Elder30c6d9d2015-05-22 13:02:08 -0500333}
Viresh Kumar3f0e9182015-08-31 17:21:06 +0530334EXPORT_SYMBOL_GPL(gb_svc_connection_destroy);
Alex Elder30c6d9d2015-05-22 13:02:08 -0500335
Viresh Kumarbb106852015-09-07 16:01:25 +0530336/* Creates bi-directional routes between the devices */
Viresh Kumar505f16c2015-08-31 17:21:07 +0530337static int gb_svc_route_create(struct gb_svc *svc, u8 intf1_id, u8 dev1_id,
338 u8 intf2_id, u8 dev2_id)
Perry Hunge08aaa42015-07-24 19:02:31 -0400339{
340 struct gb_svc_route_create_request request;
341
342 request.intf1_id = intf1_id;
343 request.dev1_id = dev1_id;
344 request.intf2_id = intf2_id;
345 request.dev2_id = dev2_id;
346
347 return gb_operation_sync(svc->connection, GB_SVC_TYPE_ROUTE_CREATE,
348 &request, sizeof(request), NULL, 0);
349}
Perry Hunge08aaa42015-07-24 19:02:31 -0400350
Viresh Kumar0a020572015-09-07 18:05:26 +0530351/* Destroys bi-directional routes between the devices */
352static void gb_svc_route_destroy(struct gb_svc *svc, u8 intf1_id, u8 intf2_id)
353{
354 struct gb_svc_route_destroy_request request;
355 int ret;
356
357 request.intf1_id = intf1_id;
358 request.intf2_id = intf2_id;
359
360 ret = gb_operation_sync(svc->connection, GB_SVC_TYPE_ROUTE_DESTROY,
361 &request, sizeof(request), NULL, 0);
Johan Hovold684156a2015-11-25 15:59:19 +0100362 if (ret) {
Viresh Kumar2f3db922015-12-04 21:30:09 +0530363 dev_err(&svc->dev, "failed to destroy route (%u %u): %d\n",
Johan Hovold684156a2015-11-25 15:59:19 +0100364 intf1_id, intf2_id, ret);
365 }
Viresh Kumar0a020572015-09-07 18:05:26 +0530366}
367
Laurent Pinchartaab4a1a2016-01-06 16:16:46 +0200368int gb_svc_intf_set_power_mode(struct gb_svc *svc, u8 intf_id, u8 hs_series,
369 u8 tx_mode, u8 tx_gear, u8 tx_nlanes,
370 u8 rx_mode, u8 rx_gear, u8 rx_nlanes,
371 u8 flags, u32 quirks)
Laurent Pinchart784f8762015-12-18 21:23:22 +0200372{
Laurent Pinchartaab4a1a2016-01-06 16:16:46 +0200373 struct gb_svc_intf_set_pwrm_request request;
374 struct gb_svc_intf_set_pwrm_response response;
375 int ret;
Laurent Pinchart784f8762015-12-18 21:23:22 +0200376
377 request.intf_id = intf_id;
Laurent Pinchartaab4a1a2016-01-06 16:16:46 +0200378 request.hs_series = hs_series;
379 request.tx_mode = tx_mode;
380 request.tx_gear = tx_gear;
381 request.tx_nlanes = tx_nlanes;
382 request.rx_mode = rx_mode;
383 request.rx_gear = rx_gear;
384 request.rx_nlanes = rx_nlanes;
Laurent Pinchart784f8762015-12-18 21:23:22 +0200385 request.flags = flags;
Laurent Pinchartaab4a1a2016-01-06 16:16:46 +0200386 request.quirks = cpu_to_le32(quirks);
Laurent Pinchart784f8762015-12-18 21:23:22 +0200387
Laurent Pinchartaab4a1a2016-01-06 16:16:46 +0200388 ret = gb_operation_sync(svc->connection, GB_SVC_TYPE_INTF_SET_PWRM,
389 &request, sizeof(request),
390 &response, sizeof(response));
391 if (ret < 0)
392 return ret;
393
394 return le16_to_cpu(response.result_code);
Laurent Pinchart784f8762015-12-18 21:23:22 +0200395}
Laurent Pinchartaab4a1a2016-01-06 16:16:46 +0200396EXPORT_SYMBOL_GPL(gb_svc_intf_set_power_mode);
Laurent Pinchart784f8762015-12-18 21:23:22 +0200397
Greg Kroah-Hartman55ec09e2016-01-19 23:30:42 -0800398int gb_svc_ping(struct gb_svc *svc)
399{
Greg Kroah-Hartman839ac5b2016-01-26 08:57:50 -0800400 return gb_operation_sync_timeout(svc->connection, GB_SVC_TYPE_PING,
401 NULL, 0, NULL, 0,
402 GB_OPERATION_TIMEOUT_DEFAULT * 2);
Greg Kroah-Hartman55ec09e2016-01-19 23:30:42 -0800403}
404EXPORT_SYMBOL_GPL(gb_svc_ping);
405
Viresh Kumaread35462015-07-21 17:44:19 +0530406static int gb_svc_version_request(struct gb_operation *op)
407{
408 struct gb_connection *connection = op->connection;
Johan Hovold684156a2015-11-25 15:59:19 +0100409 struct gb_svc *svc = connection->private;
Johan Hovoldcfb16902015-09-15 10:48:01 +0200410 struct gb_protocol_version_request *request;
411 struct gb_protocol_version_response *response;
Viresh Kumaread35462015-07-21 17:44:19 +0530412
Johan Hovold55510842015-11-19 18:28:01 +0100413 if (op->request->payload_size < sizeof(*request)) {
Johan Hovold684156a2015-11-25 15:59:19 +0100414 dev_err(&svc->dev, "short version request (%zu < %zu)\n",
Johan Hovold55510842015-11-19 18:28:01 +0100415 op->request->payload_size,
416 sizeof(*request));
417 return -EINVAL;
418 }
419
Johan Hovoldcfb16902015-09-15 10:48:01 +0200420 request = op->request->payload;
Viresh Kumaread35462015-07-21 17:44:19 +0530421
Johan Hovoldcfb16902015-09-15 10:48:01 +0200422 if (request->major > GB_SVC_VERSION_MAJOR) {
Viresh Kumar2f3db922015-12-04 21:30:09 +0530423 dev_warn(&svc->dev, "unsupported major version (%u > %u)\n",
Johan Hovold684156a2015-11-25 15:59:19 +0100424 request->major, GB_SVC_VERSION_MAJOR);
Viresh Kumaread35462015-07-21 17:44:19 +0530425 return -ENOTSUPP;
426 }
427
Johan Hovold357de002016-01-19 12:51:19 +0100428 svc->protocol_major = request->major;
429 svc->protocol_minor = request->minor;
Viresh Kumar3ea959e32015-08-11 07:36:14 +0530430
Johan Hovold684156a2015-11-25 15:59:19 +0100431 if (!gb_operation_response_alloc(op, sizeof(*response), GFP_KERNEL))
Viresh Kumaread35462015-07-21 17:44:19 +0530432 return -ENOMEM;
Viresh Kumaread35462015-07-21 17:44:19 +0530433
Johan Hovoldcfb16902015-09-15 10:48:01 +0200434 response = op->response->payload;
Johan Hovold357de002016-01-19 12:51:19 +0100435 response->major = svc->protocol_major;
436 response->minor = svc->protocol_minor;
Johan Hovold59832932015-09-15 10:48:00 +0200437
Viresh Kumaread35462015-07-21 17:44:19 +0530438 return 0;
439}
440
441static int gb_svc_hello(struct gb_operation *op)
442{
443 struct gb_connection *connection = op->connection;
Johan Hovold88f7b962015-11-25 15:59:08 +0100444 struct gb_svc *svc = connection->private;
Viresh Kumaread35462015-07-21 17:44:19 +0530445 struct gb_svc_hello_request *hello_request;
Viresh Kumaread35462015-07-21 17:44:19 +0530446 int ret;
447
Viresh Kumar0c32d2a2015-08-11 07:29:19 +0530448 if (op->request->payload_size < sizeof(*hello_request)) {
Johan Hovold684156a2015-11-25 15:59:19 +0100449 dev_warn(&svc->dev, "short hello request (%zu < %zu)\n",
450 op->request->payload_size,
451 sizeof(*hello_request));
Viresh Kumaread35462015-07-21 17:44:19 +0530452 return -EINVAL;
453 }
454
455 hello_request = op->request->payload;
Johan Hovold66069fb2015-11-25 15:59:09 +0100456 svc->endo_id = le16_to_cpu(hello_request->endo_id);
457 svc->ap_intf_id = hello_request->interface_id;
Viresh Kumaread35462015-07-21 17:44:19 +0530458
Johan Hovold88f7b962015-11-25 15:59:08 +0100459 ret = device_add(&svc->dev);
460 if (ret) {
461 dev_err(&svc->dev, "failed to register svc device: %d\n", ret);
462 return ret;
463 }
464
Rui Miguel Silvaebe99d62016-01-21 01:42:17 +0000465 ret = input_register_device(svc->input);
466 if (ret) {
467 dev_err(&svc->dev, "failed to register input: %d\n", ret);
468 device_del(&svc->dev);
469 return ret;
470 }
471
Greg Kroah-Hartmaned7279a2016-01-20 22:51:49 -0800472 ret = gb_svc_watchdog_create(svc);
473 if (ret) {
474 dev_err(&svc->dev, "failed to create watchdog: %d\n", ret);
475 input_unregister_device(svc->input);
476 device_del(&svc->dev);
Greg Kroah-Hartman539d6e12016-01-23 17:36:00 -0800477 return ret;
Greg Kroah-Hartmaned7279a2016-01-20 22:51:49 -0800478 }
479
Viresh Kumaread35462015-07-21 17:44:19 +0530480 return 0;
481}
482
Johan Hovoldb4ee82e2015-12-02 18:23:28 +0100483static void gb_svc_intf_remove(struct gb_svc *svc, struct gb_interface *intf)
Viresh Kumarbbaca712015-09-23 16:48:08 -0700484{
Viresh Kumarbbaca712015-09-23 16:48:08 -0700485 u8 intf_id = intf->interface_id;
Johan Hovold141af4f2015-12-15 15:28:57 +0100486 u8 device_id = intf->device_id;
Viresh Kumarbbaca712015-09-23 16:48:08 -0700487
Johan Hovold141af4f2015-12-15 15:28:57 +0100488 intf->disconnected = true;
489
Viresh Kumar80d1ede2015-09-23 16:48:10 -0700490 gb_interface_remove(intf);
Viresh Kumarbbaca712015-09-23 16:48:08 -0700491
492 /*
493 * Destroy the two-way route between the AP and the interface.
494 */
Johan Hovold66069fb2015-11-25 15:59:09 +0100495 gb_svc_route_destroy(svc, svc->ap_intf_id, intf_id);
Viresh Kumarbbaca712015-09-23 16:48:08 -0700496
497 ida_simple_remove(&svc->device_id_map, device_id);
498}
499
Johan Hovold9ae41092015-12-02 18:23:29 +0100500static void gb_svc_process_intf_hotplug(struct gb_operation *operation)
Alex Elder30c6d9d2015-05-22 13:02:08 -0500501{
Johan Hovold24456a092015-12-02 18:23:27 +0100502 struct gb_svc_intf_hotplug_request *request;
Johan Hovold9ae41092015-12-02 18:23:29 +0100503 struct gb_connection *connection = operation->connection;
Viresh Kumar067906f2015-08-06 12:44:55 +0530504 struct gb_svc *svc = connection->private;
Johan Hovold25376362015-11-03 18:03:23 +0100505 struct gb_host_device *hd = connection->hd;
Viresh Kumaread35462015-07-21 17:44:19 +0530506 struct gb_interface *intf;
507 u8 intf_id, device_id;
Viresh Kumarf3e6c092016-01-22 16:16:08 +0530508 u32 vendor_id = 0;
509 u32 product_id = 0;
Viresh Kumaread35462015-07-21 17:44:19 +0530510 int ret;
Alex Elder30c6d9d2015-05-22 13:02:08 -0500511
Johan Hovold24456a092015-12-02 18:23:27 +0100512 /* The request message size has already been verified. */
Johan Hovold9ae41092015-12-02 18:23:29 +0100513 request = operation->request->payload;
Johan Hovold24456a092015-12-02 18:23:27 +0100514 intf_id = request->intf_id;
515
516 dev_dbg(&svc->dev, "%s - id = %u\n", __func__, intf_id);
Alex Elder30c6d9d2015-05-22 13:02:08 -0500517
Viresh Kumarbbaca712015-09-23 16:48:08 -0700518 intf = gb_interface_find(hd, intf_id);
519 if (intf) {
520 /*
Viresh Kumarf3e6c092016-01-22 16:16:08 +0530521 * For ES2, we need to maintain the same vendor/product ids we
522 * got from bootrom, otherwise userspace can't distinguish
523 * between modules.
524 */
525 vendor_id = intf->vendor_id;
526 product_id = intf->product_id;
527
528 /*
Viresh Kumarbbaca712015-09-23 16:48:08 -0700529 * We have received a hotplug request for an interface that
530 * already exists.
531 *
532 * This can happen in cases like:
533 * - bootrom loading the firmware image and booting into that,
534 * which only generates a hotplug event. i.e. no hot-unplug
535 * event.
536 * - Or the firmware on the module crashed and sent hotplug
537 * request again to the SVC, which got propagated to AP.
538 *
539 * Remove the interface and add it again, and let user know
540 * about this with a print message.
541 */
Viresh Kumar2f3db922015-12-04 21:30:09 +0530542 dev_info(&svc->dev, "removing interface %u to add it again\n",
Johan Hovold684156a2015-11-25 15:59:19 +0100543 intf_id);
Johan Hovoldb4ee82e2015-12-02 18:23:28 +0100544 gb_svc_intf_remove(svc, intf);
Viresh Kumarbbaca712015-09-23 16:48:08 -0700545 }
546
Viresh Kumaread35462015-07-21 17:44:19 +0530547 intf = gb_interface_create(hd, intf_id);
548 if (!intf) {
Viresh Kumar2f3db922015-12-04 21:30:09 +0530549 dev_err(&svc->dev, "failed to create interface %u\n",
Johan Hovold684156a2015-11-25 15:59:19 +0100550 intf_id);
Johan Hovold9ae41092015-12-02 18:23:29 +0100551 return;
Viresh Kumaread35462015-07-21 17:44:19 +0530552 }
553
Viresh Kumar63d742b2015-12-23 09:07:42 +0530554 intf->ddbl1_manufacturer_id = le32_to_cpu(request->data.ddbl1_mfr_id);
555 intf->ddbl1_product_id = le32_to_cpu(request->data.ddbl1_prod_id);
556 intf->vendor_id = le32_to_cpu(request->data.ara_vend_id);
557 intf->product_id = le32_to_cpu(request->data.ara_prod_id);
Viresh Kumar57c6bcc2015-12-28 11:59:00 +0530558 intf->serial_number = le64_to_cpu(request->data.serial_number);
Viresh Kumar63d742b2015-12-23 09:07:42 +0530559
Viresh Kumarf3e6c092016-01-22 16:16:08 +0530560 /*
561 * Use VID/PID specified at hotplug if:
562 * - Bridge ASIC chip isn't ES2
563 * - Received non-zero Vendor/Product ids
564 *
565 * Otherwise, use the ids we received from bootrom.
566 */
567 if (intf->ddbl1_manufacturer_id == ES2_DDBL1_MFR_ID &&
568 intf->ddbl1_product_id == ES2_DDBL1_PROD_ID &&
569 intf->vendor_id == 0 && intf->product_id == 0) {
570 intf->vendor_id = vendor_id;
571 intf->product_id = product_id;
572 }
573
Viresh Kumar6bec5c72015-09-24 14:40:29 -0700574 ret = gb_svc_read_and_clear_module_boot_status(intf);
Johan Hovoldb395754a2015-12-07 15:05:28 +0100575 if (ret) {
576 dev_err(&svc->dev, "failed to clear boot status of interface %u: %d\n",
577 intf_id, ret);
Viresh Kumar6bec5c72015-09-24 14:40:29 -0700578 goto destroy_interface;
Johan Hovoldb395754a2015-12-07 15:05:28 +0100579 }
Viresh Kumar6bec5c72015-09-24 14:40:29 -0700580
Viresh Kumaread35462015-07-21 17:44:19 +0530581 /*
582 * Create a device id for the interface:
583 * - device id 0 (GB_DEVICE_ID_SVC) belongs to the SVC
584 * - device id 1 (GB_DEVICE_ID_AP) belongs to the AP
585 *
586 * XXX Do we need to allocate device ID for SVC or the AP here? And what
587 * XXX about an AP with multiple interface blocks?
588 */
Johan Hovoldc09db182015-09-15 09:18:08 +0200589 device_id = ida_simple_get(&svc->device_id_map,
Johan Hovold89f637f2015-09-01 12:25:25 +0200590 GB_DEVICE_ID_MODULES_START, 0, GFP_KERNEL);
Viresh Kumaread35462015-07-21 17:44:19 +0530591 if (device_id < 0) {
592 ret = device_id;
Viresh Kumar2f3db922015-12-04 21:30:09 +0530593 dev_err(&svc->dev, "failed to allocate device id for interface %u: %d\n",
Johan Hovold684156a2015-11-25 15:59:19 +0100594 intf_id, ret);
Viresh Kumaread35462015-07-21 17:44:19 +0530595 goto destroy_interface;
596 }
597
Viresh Kumar3f0e9182015-08-31 17:21:06 +0530598 ret = gb_svc_intf_device_id(svc, intf_id, device_id);
Viresh Kumaread35462015-07-21 17:44:19 +0530599 if (ret) {
Viresh Kumar2f3db922015-12-04 21:30:09 +0530600 dev_err(&svc->dev, "failed to set device id %u for interface %u: %d\n",
Johan Hovold684156a2015-11-25 15:59:19 +0100601 device_id, intf_id, ret);
Viresh Kumaread35462015-07-21 17:44:19 +0530602 goto ida_put;
603 }
604
Perry Hung7e275462015-07-24 19:02:32 -0400605 /*
606 * Create a two-way route between the AP and the new interface
607 */
Johan Hovold66069fb2015-11-25 15:59:09 +0100608 ret = gb_svc_route_create(svc, svc->ap_intf_id, GB_DEVICE_ID_AP,
Viresh Kumar3f0e9182015-08-31 17:21:06 +0530609 intf_id, device_id);
Perry Hung7e275462015-07-24 19:02:32 -0400610 if (ret) {
Viresh Kumar2f3db922015-12-04 21:30:09 +0530611 dev_err(&svc->dev, "failed to create route to interface %u (device id %u): %d\n",
Johan Hovold684156a2015-11-25 15:59:19 +0100612 intf_id, device_id, ret);
Viresh Kumar0a020572015-09-07 18:05:26 +0530613 goto svc_id_free;
Perry Hung7e275462015-07-24 19:02:32 -0400614 }
615
Viresh Kumaread35462015-07-21 17:44:19 +0530616 ret = gb_interface_init(intf, device_id);
617 if (ret) {
Viresh Kumar2f3db922015-12-04 21:30:09 +0530618 dev_err(&svc->dev, "failed to initialize interface %u (device id %u): %d\n",
Johan Hovold684156a2015-11-25 15:59:19 +0100619 intf_id, device_id, ret);
Viresh Kumar0a020572015-09-07 18:05:26 +0530620 goto destroy_route;
Viresh Kumaread35462015-07-21 17:44:19 +0530621 }
Alex Elder30c6d9d2015-05-22 13:02:08 -0500622
Johan Hovold9ae41092015-12-02 18:23:29 +0100623 return;
Viresh Kumaread35462015-07-21 17:44:19 +0530624
Viresh Kumar0a020572015-09-07 18:05:26 +0530625destroy_route:
Johan Hovold66069fb2015-11-25 15:59:09 +0100626 gb_svc_route_destroy(svc, svc->ap_intf_id, intf_id);
Viresh Kumaread35462015-07-21 17:44:19 +0530627svc_id_free:
628 /*
629 * XXX Should we tell SVC that this id doesn't belong to interface
630 * XXX anymore.
631 */
632ida_put:
Johan Hovoldc09db182015-09-15 09:18:08 +0200633 ida_simple_remove(&svc->device_id_map, device_id);
Viresh Kumaread35462015-07-21 17:44:19 +0530634destroy_interface:
Viresh Kumar80d1ede2015-09-23 16:48:10 -0700635 gb_interface_remove(intf);
Johan Hovold9ae41092015-12-02 18:23:29 +0100636}
637
Johan Hovold57ccd4b2015-12-02 18:23:30 +0100638static void gb_svc_process_intf_hot_unplug(struct gb_operation *operation)
639{
640 struct gb_svc *svc = operation->connection->private;
641 struct gb_svc_intf_hot_unplug_request *request;
642 struct gb_host_device *hd = operation->connection->hd;
643 struct gb_interface *intf;
644 u8 intf_id;
645
646 /* The request message size has already been verified. */
647 request = operation->request->payload;
648 intf_id = request->intf_id;
649
650 dev_dbg(&svc->dev, "%s - id = %u\n", __func__, intf_id);
651
652 intf = gb_interface_find(hd, intf_id);
653 if (!intf) {
Viresh Kumar2f3db922015-12-04 21:30:09 +0530654 dev_warn(&svc->dev, "could not find hot-unplug interface %u\n",
Johan Hovold57ccd4b2015-12-02 18:23:30 +0100655 intf_id);
656 return;
657 }
658
659 gb_svc_intf_remove(svc, intf);
660}
661
Johan Hovold9ae41092015-12-02 18:23:29 +0100662static void gb_svc_process_deferred_request(struct work_struct *work)
663{
664 struct gb_svc_deferred_request *dr;
665 struct gb_operation *operation;
666 struct gb_svc *svc;
667 u8 type;
668
669 dr = container_of(work, struct gb_svc_deferred_request, work);
670 operation = dr->operation;
671 svc = operation->connection->private;
672 type = operation->request->header->type;
673
674 switch (type) {
675 case GB_SVC_TYPE_INTF_HOTPLUG:
676 gb_svc_process_intf_hotplug(operation);
677 break;
Johan Hovold57ccd4b2015-12-02 18:23:30 +0100678 case GB_SVC_TYPE_INTF_HOT_UNPLUG:
679 gb_svc_process_intf_hot_unplug(operation);
680 break;
Johan Hovold9ae41092015-12-02 18:23:29 +0100681 default:
Viresh Kumarb933fa42015-12-04 21:30:10 +0530682 dev_err(&svc->dev, "bad deferred request type: 0x%02x\n", type);
Johan Hovold9ae41092015-12-02 18:23:29 +0100683 }
684
685 gb_operation_put(operation);
686 kfree(dr);
687}
688
689static int gb_svc_queue_deferred_request(struct gb_operation *operation)
690{
Johan Hovold3e48aca2015-12-02 18:23:31 +0100691 struct gb_svc *svc = operation->connection->private;
Johan Hovold9ae41092015-12-02 18:23:29 +0100692 struct gb_svc_deferred_request *dr;
693
694 dr = kmalloc(sizeof(*dr), GFP_KERNEL);
695 if (!dr)
696 return -ENOMEM;
697
698 gb_operation_get(operation);
699
700 dr->operation = operation;
701 INIT_WORK(&dr->work, gb_svc_process_deferred_request);
702
Johan Hovold3e48aca2015-12-02 18:23:31 +0100703 queue_work(svc->wq, &dr->work);
Johan Hovold9ae41092015-12-02 18:23:29 +0100704
705 return 0;
Viresh Kumar067906f2015-08-06 12:44:55 +0530706}
Viresh Kumaread35462015-07-21 17:44:19 +0530707
Viresh Kumar067906f2015-08-06 12:44:55 +0530708/*
709 * Bringing up a module can be time consuming, as that may require lots of
710 * initialization on the module side. Over that, we may also need to download
711 * the firmware first and flash that on the module.
712 *
Johan Hovold3e48aca2015-12-02 18:23:31 +0100713 * In order not to make other svc events wait for all this to finish,
Viresh Kumar067906f2015-08-06 12:44:55 +0530714 * handle most of module hotplug stuff outside of the hotplug callback, with
715 * help of a workqueue.
716 */
717static int gb_svc_intf_hotplug_recv(struct gb_operation *op)
718{
Johan Hovold684156a2015-11-25 15:59:19 +0100719 struct gb_svc *svc = op->connection->private;
Johan Hovoldd34a3642015-12-02 18:23:26 +0100720 struct gb_svc_intf_hotplug_request *request;
Viresh Kumar067906f2015-08-06 12:44:55 +0530721
Johan Hovoldd34a3642015-12-02 18:23:26 +0100722 if (op->request->payload_size < sizeof(*request)) {
Johan Hovold684156a2015-11-25 15:59:19 +0100723 dev_warn(&svc->dev, "short hotplug request received (%zu < %zu)\n",
Johan Hovoldd34a3642015-12-02 18:23:26 +0100724 op->request->payload_size, sizeof(*request));
Viresh Kumar067906f2015-08-06 12:44:55 +0530725 return -EINVAL;
726 }
727
Johan Hovoldd34a3642015-12-02 18:23:26 +0100728 request = op->request->payload;
729
730 dev_dbg(&svc->dev, "%s - id = %u\n", __func__, request->intf_id);
731
Johan Hovold9ae41092015-12-02 18:23:29 +0100732 return gb_svc_queue_deferred_request(op);
Alex Elder30c6d9d2015-05-22 13:02:08 -0500733}
734
735static int gb_svc_intf_hot_unplug_recv(struct gb_operation *op)
736{
Johan Hovold684156a2015-11-25 15:59:19 +0100737 struct gb_svc *svc = op->connection->private;
Johan Hovoldd34a3642015-12-02 18:23:26 +0100738 struct gb_svc_intf_hot_unplug_request *request;
Alex Elder30c6d9d2015-05-22 13:02:08 -0500739
Johan Hovoldd34a3642015-12-02 18:23:26 +0100740 if (op->request->payload_size < sizeof(*request)) {
Johan Hovold684156a2015-11-25 15:59:19 +0100741 dev_warn(&svc->dev, "short hot unplug request received (%zu < %zu)\n",
Johan Hovoldd34a3642015-12-02 18:23:26 +0100742 op->request->payload_size, sizeof(*request));
Alex Elder30c6d9d2015-05-22 13:02:08 -0500743 return -EINVAL;
744 }
Alex Elder30c6d9d2015-05-22 13:02:08 -0500745
Johan Hovoldd34a3642015-12-02 18:23:26 +0100746 request = op->request->payload;
Johan Hovoldd34a3642015-12-02 18:23:26 +0100747
Johan Hovold57ccd4b2015-12-02 18:23:30 +0100748 dev_dbg(&svc->dev, "%s - id = %u\n", __func__, request->intf_id);
Alex Elder30c6d9d2015-05-22 13:02:08 -0500749
Johan Hovold57ccd4b2015-12-02 18:23:30 +0100750 return gb_svc_queue_deferred_request(op);
Alex Elder30c6d9d2015-05-22 13:02:08 -0500751}
752
753static int gb_svc_intf_reset_recv(struct gb_operation *op)
754{
Johan Hovold684156a2015-11-25 15:59:19 +0100755 struct gb_svc *svc = op->connection->private;
Alex Elder30c6d9d2015-05-22 13:02:08 -0500756 struct gb_message *request = op->request;
757 struct gb_svc_intf_reset_request *reset;
758 u8 intf_id;
759
760 if (request->payload_size < sizeof(*reset)) {
Johan Hovold684156a2015-11-25 15:59:19 +0100761 dev_warn(&svc->dev, "short reset request received (%zu < %zu)\n",
762 request->payload_size, sizeof(*reset));
Alex Elder30c6d9d2015-05-22 13:02:08 -0500763 return -EINVAL;
764 }
765 reset = request->payload;
766
767 intf_id = reset->intf_id;
768
769 /* FIXME Reset the interface here */
770
771 return 0;
772}
773
Rui Miguel Silvaebe99d62016-01-21 01:42:17 +0000774static int gb_svc_key_code_map(struct gb_svc *svc, u16 key_code, u16 *code)
775{
776 switch (key_code) {
777 case GB_KEYCODE_ARA:
778 *code = SVC_KEY_ARA_BUTTON;
779 break;
780 default:
781 dev_warn(&svc->dev, "unknown keycode received: %u\n", key_code);
782 return -EINVAL;
783 }
784
785 return 0;
786}
787
788static int gb_svc_key_event_recv(struct gb_operation *op)
789{
790 struct gb_svc *svc = op->connection->private;
791 struct gb_message *request = op->request;
792 struct gb_svc_key_event_request *key;
793 u16 code;
794 u8 event;
795 int ret;
796
797 if (request->payload_size < sizeof(*key)) {
798 dev_warn(&svc->dev, "short key request received (%zu < %zu)\n",
799 request->payload_size, sizeof(*key));
800 return -EINVAL;
801 }
802
803 key = request->payload;
804
805 ret = gb_svc_key_code_map(svc, le16_to_cpu(key->key_code), &code);
806 if (ret < 0)
807 return ret;
808
809 event = key->key_event;
810 if ((event != GB_SVC_KEY_PRESSED) && (event != GB_SVC_KEY_RELEASED)) {
811 dev_warn(&svc->dev, "unknown key event received: %u\n", event);
812 return -EINVAL;
813 }
814
815 input_report_key(svc->input, code, (event == GB_SVC_KEY_PRESSED));
816 input_sync(svc->input);
817
818 return 0;
819}
820
Johan Hovold84427942016-01-19 12:51:15 +0100821static int gb_svc_request_handler(struct gb_operation *op)
Alex Elder30c6d9d2015-05-22 13:02:08 -0500822{
Viresh Kumar3ccb1602015-09-03 15:42:22 +0530823 struct gb_connection *connection = op->connection;
824 struct gb_svc *svc = connection->private;
Johan Hovold84427942016-01-19 12:51:15 +0100825 u8 type = op->type;
Viresh Kumar3ccb1602015-09-03 15:42:22 +0530826 int ret = 0;
827
828 /*
829 * SVC requests need to follow a specific order (at least initially) and
830 * below code takes care of enforcing that. The expected order is:
831 * - PROTOCOL_VERSION
832 * - SVC_HELLO
833 * - Any other request, but the earlier two.
834 *
835 * Incoming requests are guaranteed to be serialized and so we don't
836 * need to protect 'state' for any races.
837 */
Alex Elder30c6d9d2015-05-22 13:02:08 -0500838 switch (type) {
Viresh Kumar0e2462d2015-08-14 07:57:38 +0530839 case GB_REQUEST_TYPE_PROTOCOL_VERSION:
Viresh Kumar3ccb1602015-09-03 15:42:22 +0530840 if (svc->state != GB_SVC_STATE_RESET)
841 ret = -EINVAL;
842 break;
Viresh Kumaread35462015-07-21 17:44:19 +0530843 case GB_SVC_TYPE_SVC_HELLO:
Viresh Kumar3ccb1602015-09-03 15:42:22 +0530844 if (svc->state != GB_SVC_STATE_PROTOCOL_VERSION)
845 ret = -EINVAL;
846 break;
847 default:
848 if (svc->state != GB_SVC_STATE_SVC_HELLO)
849 ret = -EINVAL;
850 break;
851 }
852
853 if (ret) {
Johan Hovold684156a2015-11-25 15:59:19 +0100854 dev_warn(&svc->dev, "unexpected request 0x%02x received (state %u)\n",
855 type, svc->state);
Viresh Kumar3ccb1602015-09-03 15:42:22 +0530856 return ret;
857 }
858
859 switch (type) {
860 case GB_REQUEST_TYPE_PROTOCOL_VERSION:
861 ret = gb_svc_version_request(op);
862 if (!ret)
863 svc->state = GB_SVC_STATE_PROTOCOL_VERSION;
864 return ret;
865 case GB_SVC_TYPE_SVC_HELLO:
866 ret = gb_svc_hello(op);
867 if (!ret)
868 svc->state = GB_SVC_STATE_SVC_HELLO;
869 return ret;
Alex Elder30c6d9d2015-05-22 13:02:08 -0500870 case GB_SVC_TYPE_INTF_HOTPLUG:
871 return gb_svc_intf_hotplug_recv(op);
872 case GB_SVC_TYPE_INTF_HOT_UNPLUG:
873 return gb_svc_intf_hot_unplug_recv(op);
874 case GB_SVC_TYPE_INTF_RESET:
875 return gb_svc_intf_reset_recv(op);
Rui Miguel Silvaebe99d62016-01-21 01:42:17 +0000876 case GB_SVC_TYPE_KEY_EVENT:
877 return gb_svc_key_event_recv(op);
Alex Elder30c6d9d2015-05-22 13:02:08 -0500878 default:
Johan Hovold684156a2015-11-25 15:59:19 +0100879 dev_warn(&svc->dev, "unsupported request 0x%02x\n", type);
Alex Elder30c6d9d2015-05-22 13:02:08 -0500880 return -EINVAL;
881 }
882}
883
Rui Miguel Silvaebe99d62016-01-21 01:42:17 +0000884static struct input_dev *gb_svc_input_create(struct gb_svc *svc)
885{
886 struct input_dev *input_dev;
887
888 input_dev = input_allocate_device();
889 if (!input_dev)
890 return ERR_PTR(-ENOMEM);
891
892 input_dev->name = dev_name(&svc->dev);
893 svc->input_phys = kasprintf(GFP_KERNEL, "greybus-%s/input0",
894 input_dev->name);
895 if (!svc->input_phys)
896 goto err_free_input;
897
898 input_dev->phys = svc->input_phys;
899 input_dev->dev.parent = &svc->dev;
900
901 input_set_drvdata(input_dev, svc);
902
903 input_set_capability(input_dev, EV_KEY, SVC_KEY_ARA_BUTTON);
904
905 return input_dev;
906
907err_free_input:
908 input_free_device(svc->input);
909 return ERR_PTR(-ENOMEM);
910}
911
Johan Hovoldefe6ef72015-11-25 15:59:06 +0100912static void gb_svc_release(struct device *dev)
913{
Johan Hovold88f7b962015-11-25 15:59:08 +0100914 struct gb_svc *svc = to_gb_svc(dev);
Johan Hovoldefe6ef72015-11-25 15:59:06 +0100915
Johan Hovold7adeaae72015-12-07 15:05:37 +0100916 if (svc->connection)
917 gb_connection_destroy(svc->connection);
Johan Hovoldefe6ef72015-11-25 15:59:06 +0100918 ida_destroy(&svc->device_id_map);
Johan Hovold3e48aca2015-12-02 18:23:31 +0100919 destroy_workqueue(svc->wq);
Rui Miguel Silvaebe99d62016-01-21 01:42:17 +0000920 kfree(svc->input_phys);
Johan Hovoldefe6ef72015-11-25 15:59:06 +0100921 kfree(svc);
922}
923
924struct device_type greybus_svc_type = {
925 .name = "greybus_svc",
926 .release = gb_svc_release,
927};
928
Johan Hovold7adeaae72015-12-07 15:05:37 +0100929struct gb_svc *gb_svc_create(struct gb_host_device *hd)
Alex Elder30c6d9d2015-05-22 13:02:08 -0500930{
931 struct gb_svc *svc;
Alex Elder30c6d9d2015-05-22 13:02:08 -0500932
933 svc = kzalloc(sizeof(*svc), GFP_KERNEL);
934 if (!svc)
Johan Hovold7adeaae72015-12-07 15:05:37 +0100935 return NULL;
Alex Elder30c6d9d2015-05-22 13:02:08 -0500936
Johan Hovold3e48aca2015-12-02 18:23:31 +0100937 svc->wq = alloc_workqueue("%s:svc", WQ_UNBOUND, 1, dev_name(&hd->dev));
938 if (!svc->wq) {
939 kfree(svc);
Johan Hovold7adeaae72015-12-07 15:05:37 +0100940 return NULL;
Johan Hovold3e48aca2015-12-02 18:23:31 +0100941 }
942
Johan Hovoldefe6ef72015-11-25 15:59:06 +0100943 svc->dev.parent = &hd->dev;
944 svc->dev.bus = &greybus_bus_type;
945 svc->dev.type = &greybus_svc_type;
Johan Hovold66069fb2015-11-25 15:59:09 +0100946 svc->dev.groups = svc_groups;
Johan Hovoldefe6ef72015-11-25 15:59:06 +0100947 svc->dev.dma_mask = svc->dev.parent->dma_mask;
948 device_initialize(&svc->dev);
949
950 dev_set_name(&svc->dev, "%d-svc", hd->bus_id);
951
Johan Hovold6106e512015-11-25 15:59:07 +0100952 ida_init(&svc->device_id_map);
Viresh Kumar3ccb1602015-09-03 15:42:22 +0530953 svc->state = GB_SVC_STATE_RESET;
Johan Hovoldf0960d02015-12-03 19:18:02 +0100954 svc->hd = hd;
Viresh Kumard3d44842015-07-21 17:44:18 +0530955
Rui Miguel Silvaebe99d62016-01-21 01:42:17 +0000956 svc->input = gb_svc_input_create(svc);
957 if (IS_ERR(svc->input)) {
958 dev_err(&svc->dev, "failed to create input device: %ld\n",
959 PTR_ERR(svc->input));
960 goto err_put_device;
961 }
962
Johan Hovoldf7ee0812016-01-21 17:34:21 +0100963 svc->connection = gb_connection_create_static(hd, GB_SVC_CPORT_ID,
964 gb_svc_request_handler);
Johan Hovold24e094d2016-01-21 17:34:16 +0100965 if (IS_ERR(svc->connection)) {
966 dev_err(&svc->dev, "failed to create connection: %ld\n",
967 PTR_ERR(svc->connection));
Rui Miguel Silvaebe99d62016-01-21 01:42:17 +0000968 goto err_free_input;
Johan Hovold7adeaae72015-12-07 15:05:37 +0100969 }
970
971 svc->connection->private = svc;
972
973 return svc;
Rui Miguel Silvaebe99d62016-01-21 01:42:17 +0000974
975err_free_input:
976 input_free_device(svc->input);
977err_put_device:
978 put_device(&svc->dev);
979 return NULL;
Johan Hovold7adeaae72015-12-07 15:05:37 +0100980}
981
982int gb_svc_add(struct gb_svc *svc)
983{
984 int ret;
985
986 /*
987 * The SVC protocol is currently driven by the SVC, so the SVC device
988 * is added from the connection request handler when enough
989 * information has been received.
990 */
Johan Hovoldf7ee0812016-01-21 17:34:21 +0100991 ret = gb_connection_enable(svc->connection);
Johan Hovold7adeaae72015-12-07 15:05:37 +0100992 if (ret)
993 return ret;
994
995 return 0;
996}
997
998void gb_svc_del(struct gb_svc *svc)
999{
Johan Hovold84427942016-01-19 12:51:15 +01001000 gb_connection_disable(svc->connection);
Johan Hovold7adeaae72015-12-07 15:05:37 +01001001
Rui Miguel Silvaebe99d62016-01-21 01:42:17 +00001002 /*
1003 * The SVC device and input device may have been registered
1004 * from the request handler.
1005 */
1006 if (device_is_registered(&svc->dev)) {
Greg Kroah-Hartmaned7279a2016-01-20 22:51:49 -08001007 gb_svc_watchdog_destroy(svc);
Rui Miguel Silvaebe99d62016-01-21 01:42:17 +00001008 input_unregister_device(svc->input);
1009 device_del(&svc->dev);
1010 }
1011
Johan Hovold7adeaae72015-12-07 15:05:37 +01001012 flush_workqueue(svc->wq);
1013}
1014
1015void gb_svc_put(struct gb_svc *svc)
1016{
1017 put_device(&svc->dev);
1018}